hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 958k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c4674db514b2fd6be335133809c409cc899a512 | 6,171 | py | Python | gpgrouper/containers.py | malovannaya-lab/gpgrouper | 45cb948bfa9ed256e450ad8f257ec24324f786ca | [
"BSD-3-Clause"
] | 5 | 2018-08-10T17:10:08.000Z | 2020-05-21T08:09:45.000Z | gpgrouper/containers.py | malovannaya-lab/gpgrouper | 45cb948bfa9ed256e450ad8f257ec24324f786ca | [
"BSD-3-Clause"
] | null | null | null | gpgrouper/containers.py | malovannaya-lab/gpgrouper | 45cb948bfa9ed256e450ad8f257ec24324f786ca | [
"BSD-3-Clause"
] | null | null | null | """Container for each experiment, has a dataframe and metadata"""
import os
import re
from datetime import datetime
import traceback
import pandas as pd
from . import _version
class UserData:
def __init__(self, recno=None, datafile=None, runno=1, searchno=1, no_taxa_redistrib=0,
addedby='', indir = '.', outdir='.', rawfiledir='.',
labeltype='none', quant_source=None, phospho=False,
searchdb=None, taxonid=None, miscuts=2):
if recno is None:
raise ValueError('Must supply record number (recno)')
self.recno = recno
self.runno = runno
self.searchno = searchno
self.taxonid = taxonid
self.added_by = addedby
self.labeltype = labeltype
self.no_taxa_redistrib = no_taxa_redistrib
self.filtervalues = dict()
self.indir = indir
self.outdir = outdir
self.rawfiledir = rawfiledir
self.searchdb = searchdb # file name for refseq
self.datafile = datafile
self.df = pd.DataFrame()
self.pipeline = None
self.original_columns = None
rrs = '{}_{}_{}_'.format(recno, runno, searchno)
basename = os.path.splitext(os.path.basename(datafile))[0]
self.basename = basename.split(rrs)[-1]
self.LOGFILE = os.path.join(outdir, self.output_name(ext='log'))
self._LOGSTACK = list()
self.EXIT_CODE = 0
self.ERROR = None
self.taxon_ratio_totals = dict()
self.miscuts = miscuts
self.phospho = phospho
with open(self.LOGFILE, 'w') as f:
f.write('{} PyGrouper {}'.format(datetime.now(), _version.__version__))
@property
def taxon_miscut_id(self):
return hash(self.taxonid) + hash(self.miscuts)
def __repr__(self):
return '{}_{}_{}'.format(self.recno, self.runno, self.searchno)
def __bool__(self):
if self.datafile is not None and self.recno is not None:
return True
return False
def to_log(self, message):
if self._LOGSTACK: # flush
messages = self._LOGSTACK + (messages,)
else:
messages = (message,)
with open(self.LOGFILE, 'w+') as f:
for message in messages:
f.write(message)
# f.write(sep)
f.write('\n')
def to_logq(self, message):
self._LOGSTACK.append(message+'\n')
return self
def flush_log(self):
if self._LOGSTACK:
stack, self._LOGSTACK = self._LOGSTACK, list()
self.to_log('\n'.join(stack))
return self
def full_path(self, in_or_out='in'):
"""returns data file with given path"""
if in_or_out == 'in':
mydir = self.indir
elif in_or_out == 'out':
mydir = self.outdir
else:
mydir = '.'
return os.path.join(mydir, self.datafile or '')
def read_csv(self, *args, **kwargs):
"""Uses pandas read_csv function to read an input file
args and kwargs are passed to this function"""
try:
self.df = pd.read_csv(self.full_path(), *args, **kwargs)
self.original_columns = self.df.columns.values
except Exception as e:
# self.to_log(''.join(traceback.format_exc()))
self.to_log(traceback.format_exc())
self.ERROR = traceback.format_exc()
self.EXIT_CODE = 1
return 1
if len(self.df) == 0:
self.EXIT_CODE = 1
return 2
return 0
def output_name(self, suffix=None, ext='tab'):
"""generate an appropriate output file name
returns rec_run_search_labeltype_filetype.tab"""
# suffix = '_'.join([str(ix) for ix in suffix])
return '{!r}_{}_{}{}.{}'.format(self,
self.labeltype,
self.basename,
'_' + suffix if suffix else '',
ext
)
def populate_base_data(self):
"""Populate dataframe with base data prior to grouping"""
self.categorical_assign('EXPRecNo', self.recno)
self.categorical_assign('EXPRunNo', self.runno)
self.categorical_assign('EXPSearchNo', self.searchno)
self.categorical_assign('CreationTS', datetime.now().strftime("%m/%d/%Y) %H:%M:%S"))
self.categorical_assign('AddedBy', self.added_by)
# self.categorical_assign('metadatainfo', '') # not sure if this is okay
# self.df['EXPRecNo'] = self._categorical_assign(self.recno)
# self.df['EXPRunNo'] = self._categorical_assign(self.runno)
# self.df['EXPSearchNo'] = self._categorical_assign(self.searchno)
# self.df['CreationTS'] = self._categorical_assign(datetime.now().strftime("%m/%d/%Y) %H:%M:%S"))
# self.df['AddedBy'] = self._categorical_assign(self.added_by)
# self.df['psm_EXPTechRepNo'] = self.techrepno
# self.df['psm_TaxonID'] = self.taxonid
#self.df['psm_GeneList'] = ''
#self.df['psm_ProteinList'] = ''
#self.df['psm_GeneCount'] = 0
#self.df['psm_ProteinCount'] = 0
#self.df['psm_HomologeneID'] = ''
#self.df['psm_ProteinCapacity'] = ''
# self.df['metadatainfo'] = [tuple()] * len(self.df)
self.df['metadatainfo'] = ''
if not 'ion_score_bins' in self.filtervalues:
self.filtervalues['ion_score_bins'] = (10, 20, 30)
return self
@property
def filterstamp(self):
s = 'is{ion_score}_qv{qvalue}_pep{pep}_idg{idg}_z{zmin}to{zmax}_mo{modi}_is_bins{ion_score_bins}'.format(**self.filtervalues)
if self.phospho:
s += '_phospho_only'
return s
def categorical_assign(self, name, value, **kwargs):
"""
Assign a static value to a new column.
Saves memory by using pandas Categorical dtype.
:kwargs: passed to pd.Series.astype
"""
self.df[name] = value
self.df[name] = self.df[name].astype('category', **kwargs)
return self
| 36.087719 | 133 | 0.577702 | import os
import re
from datetime import datetime
import traceback
import pandas as pd
from . import _version
class UserData:
def __init__(self, recno=None, datafile=None, runno=1, searchno=1, no_taxa_redistrib=0,
addedby='', indir = '.', outdir='.', rawfiledir='.',
labeltype='none', quant_source=None, phospho=False,
searchdb=None, taxonid=None, miscuts=2):
if recno is None:
raise ValueError('Must supply record number (recno)')
self.recno = recno
self.runno = runno
self.searchno = searchno
self.taxonid = taxonid
self.added_by = addedby
self.labeltype = labeltype
self.no_taxa_redistrib = no_taxa_redistrib
self.filtervalues = dict()
self.indir = indir
self.outdir = outdir
self.rawfiledir = rawfiledir
self.searchdb = searchdb
self.datafile = datafile
self.df = pd.DataFrame()
self.pipeline = None
self.original_columns = None
rrs = '{}_{}_{}_'.format(recno, runno, searchno)
basename = os.path.splitext(os.path.basename(datafile))[0]
self.basename = basename.split(rrs)[-1]
self.LOGFILE = os.path.join(outdir, self.output_name(ext='log'))
self._LOGSTACK = list()
self.EXIT_CODE = 0
self.ERROR = None
self.taxon_ratio_totals = dict()
self.miscuts = miscuts
self.phospho = phospho
with open(self.LOGFILE, 'w') as f:
f.write('{} PyGrouper {}'.format(datetime.now(), _version.__version__))
@property
def taxon_miscut_id(self):
return hash(self.taxonid) + hash(self.miscuts)
def __repr__(self):
return '{}_{}_{}'.format(self.recno, self.runno, self.searchno)
def __bool__(self):
if self.datafile is not None and self.recno is not None:
return True
return False
def to_log(self, message):
if self._LOGSTACK:
messages = self._LOGSTACK + (messages,)
else:
messages = (message,)
with open(self.LOGFILE, 'w+') as f:
for message in messages:
f.write(message)
f.write('\n')
def to_logq(self, message):
self._LOGSTACK.append(message+'\n')
return self
def flush_log(self):
if self._LOGSTACK:
stack, self._LOGSTACK = self._LOGSTACK, list()
self.to_log('\n'.join(stack))
return self
def full_path(self, in_or_out='in'):
if in_or_out == 'in':
mydir = self.indir
elif in_or_out == 'out':
mydir = self.outdir
else:
mydir = '.'
return os.path.join(mydir, self.datafile or '')
def read_csv(self, *args, **kwargs):
try:
self.df = pd.read_csv(self.full_path(), *args, **kwargs)
self.original_columns = self.df.columns.values
except Exception as e:
self.to_log(traceback.format_exc())
self.ERROR = traceback.format_exc()
self.EXIT_CODE = 1
return 1
if len(self.df) == 0:
self.EXIT_CODE = 1
return 2
return 0
def output_name(self, suffix=None, ext='tab'):
return '{!r}_{}_{}{}.{}'.format(self,
self.labeltype,
self.basename,
'_' + suffix if suffix else '',
ext
)
def populate_base_data(self):
self.categorical_assign('EXPRecNo', self.recno)
self.categorical_assign('EXPRunNo', self.runno)
self.categorical_assign('EXPSearchNo', self.searchno)
self.categorical_assign('CreationTS', datetime.now().strftime("%m/%d/%Y) %H:%M:%S"))
self.categorical_assign('AddedBy', self.added_by)
self.df['metadatainfo'] = ''
if not 'ion_score_bins' in self.filtervalues:
self.filtervalues['ion_score_bins'] = (10, 20, 30)
return self
@property
def filterstamp(self):
s = 'is{ion_score}_qv{qvalue}_pep{pep}_idg{idg}_z{zmin}to{zmax}_mo{modi}_is_bins{ion_score_bins}'.format(**self.filtervalues)
if self.phospho:
s += '_phospho_only'
return s
def categorical_assign(self, name, value, **kwargs):
self.df[name] = value
self.df[name] = self.df[name].astype('category', **kwargs)
return self
| true | true |
1c4675f61a8598c7f8758a842b5965aa6ce62daf | 1,319 | py | Python | Python Programs/The-Imvisible-Man/opcv.py | Chibi-Shem/Hacktoberfest2020-Expert | 324843464aec039e130e85a16e74b76d310f1497 | [
"MIT"
] | 77 | 2020-10-01T10:06:59.000Z | 2021-11-08T08:57:18.000Z | Python Programs/The-Imvisible-Man/opcv.py | Chibi-Shem/Hacktoberfest2020-Expert | 324843464aec039e130e85a16e74b76d310f1497 | [
"MIT"
] | 46 | 2020-09-27T04:55:36.000Z | 2021-05-14T18:49:06.000Z | Python Programs/The-Imvisible-Man/opcv.py | Chibi-Shem/Hacktoberfest2020-Expert | 324843464aec039e130e85a16e74b76d310f1497 | [
"MIT"
] | 327 | 2020-09-26T17:06:03.000Z | 2021-10-09T06:04:39.000Z | import numpy as np
import cv2
import time
cap = cv2.VideoCapture(0)
time.sleep(2)
background=0
#capture the background
for i in range(30):
ret,background = cap.read()
while(cap.isOpened()):
ret , img = cap.read()
if not ret:
break
hsv = cv2.cvtColor(img , cv2.COLOR_BGR2HSV)
lower_red = np.array([0,120,70])
upper_red = np.array([10, 255, 255])
mask1 = cv2.inRange(hsv , lower_red , upper_red) #sepatreting the clock part
lower_red = np.array([170, 120, 70])
upper_red = np.array([180, 255, 255])
mask2 = cv2.inRange(hsv, lower_red, upper_red) # sepatreting the clock part
mask1 = mask1+mask2 #OR 1 or x
mask1 = cv2.morphologyEx(mask1, cv2.MORPH_OPEN,
np.ones((3,3),np.uint8),iterations=2) #Noise Removal
mask1 = cv2.morphologyEx(mask1, cv2.MORPH_DILATE,
np.ones((3,3), np.uint8), iterations=1) #smmoting the image
mask2 = cv2.bitwise_not(mask1) #Except the clock
res1=cv2.bitwise_and(background , background , mask = mask1)
res2 = cv2.bitwise_and(img , img , mask = mask2)
final_output = cv2.addWeighted(res1,1,res2,1,0)
cv2.imshow("Hey invisible..!", final_output)
k = cv2.waitKey(10)
if k == ord('s'):
break
cap.release()
cv2.destroyAllWindows() | 26.918367 | 88 | 0.634572 | import numpy as np
import cv2
import time
cap = cv2.VideoCapture(0)
time.sleep(2)
background=0
for i in range(30):
ret,background = cap.read()
while(cap.isOpened()):
ret , img = cap.read()
if not ret:
break
hsv = cv2.cvtColor(img , cv2.COLOR_BGR2HSV)
lower_red = np.array([0,120,70])
upper_red = np.array([10, 255, 255])
mask1 = cv2.inRange(hsv , lower_red , upper_red)
lower_red = np.array([170, 120, 70])
upper_red = np.array([180, 255, 255])
mask2 = cv2.inRange(hsv, lower_red, upper_red)
mask1 = mask1+mask2
mask1 = cv2.morphologyEx(mask1, cv2.MORPH_OPEN,
np.ones((3,3),np.uint8),iterations=2)
mask1 = cv2.morphologyEx(mask1, cv2.MORPH_DILATE,
np.ones((3,3), np.uint8), iterations=1)
mask2 = cv2.bitwise_not(mask1)
res1=cv2.bitwise_and(background , background , mask = mask1)
res2 = cv2.bitwise_and(img , img , mask = mask2)
final_output = cv2.addWeighted(res1,1,res2,1,0)
cv2.imshow("Hey invisible..!", final_output)
k = cv2.waitKey(10)
if k == ord('s'):
break
cap.release()
cv2.destroyAllWindows() | true | true |
1c4676525bfb0f4f935ed4c6103fd5be72db5498 | 9,215 | py | Python | salt/modules/s3.py | herlo/salt | 10ffb8315559c0cfbc10b4adc26cd62ebc462851 | [
"Apache-2.0"
] | null | null | null | salt/modules/s3.py | herlo/salt | 10ffb8315559c0cfbc10b4adc26cd62ebc462851 | [
"Apache-2.0"
] | null | null | null | salt/modules/s3.py | herlo/salt | 10ffb8315559c0cfbc10b4adc26cd62ebc462851 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Connection module for Amazon S3
:configuration: This module accepts explicit s3 credentials but can also utilize
IAM roles assigned to the instance trough Instance Profiles. Dynamic
credentials are then automatically obtained from AWS API and no further
configuration is necessary. More Information available at::
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file::
s3.keyid: GKTADJGHEIQSXMKKRBJ08H
s3.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
A service_url may also be specified in the configuration::
s3.service_url: s3.amazonaws.com
A role_arn may also be specified in the configuration::
s3.role_arn: arn:aws:iam::111111111111:role/my-role-to-assume
If a service_url is not specified, the default is s3.amazonaws.com. This
may appear in various documentation as an "endpoint". A comprehensive list
for Amazon S3 may be found at::
http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
The service_url will form the basis for the final endpoint that is used to
query the service.
SSL verification may also be turned off in the configuration:
s3.verify_ssl: False
This is required if using S3 bucket names that contain a period, as
these will not match Amazon's S3 wildcard certificates. Certificate
verification is enabled by default.
AWS region may be specified in the configuration:
s3.location: eu-central-1
Default is us-east-1.
This module should be usable to query other S3-like services, such as
Eucalyptus.
:depends: requests
'''
from __future__ import absolute_import
# Import Python libs
import logging
# Import Salt libs
import salt.utils
import salt.utils.s3
log = logging.getLogger(__name__)
def __virtual__():
'''
Should work on any modern Python installation
'''
return True
def delete(bucket, path=None, action=None, key=None, keyid=None,
service_url=None, verify_ssl=None, kms_keyid=None, location=None,
role_arn=None):
'''
Delete a bucket, or delete an object from a bucket.
CLI Example to delete a bucket::
salt myminion s3.delete mybucket
CLI Example to delete an object from a bucket::
salt myminion s3.delete mybucket remoteobject
'''
key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn = _get_key(
key,
keyid,
service_url,
verify_ssl,
kms_keyid,
location,
role_arn,
)
return salt.utils.s3.query(method='DELETE',
bucket=bucket,
path=path,
action=action,
key=key,
keyid=keyid,
kms_keyid=kms_keyid,
service_url=service_url,
verify_ssl=verify_ssl,
location=location,
role_arn=role_arn)
def get(bucket=None, path=None, return_bin=False, action=None,
local_file=None, key=None, keyid=None, service_url=None,
verify_ssl=None, kms_keyid=None, location=None, role_arn=None):
'''
List the contents of a bucket, or return an object from a bucket. Set
return_bin to True in order to retrieve an object wholesale. Otherwise,
Salt will attempt to parse an XML response.
CLI Example to list buckets:
.. code-block:: bash
salt myminion s3.get
CLI Example to list the contents of a bucket:
.. code-block:: bash
salt myminion s3.get mybucket
CLI Example to return the binary contents of an object:
.. code-block:: bash
salt myminion s3.get mybucket myfile.png return_bin=True
CLI Example to save the binary contents of an object to a local file:
.. code-block:: bash
salt myminion s3.get mybucket myfile.png local_file=/tmp/myfile.png
It is also possible to perform an action on a bucket. Currently, S3
supports the following actions::
acl
cors
lifecycle
policy
location
logging
notification
tagging
versions
requestPayment
versioning
website
To perform an action on a bucket:
.. code-block:: bash
salt myminion s3.get mybucket myfile.png action=acl
'''
key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn = _get_key(
key,
keyid,
service_url,
verify_ssl,
kms_keyid,
location,
role_arn,
)
return salt.utils.s3.query(method='GET',
bucket=bucket,
path=path,
return_bin=return_bin,
local_file=local_file,
action=action,
key=key,
keyid=keyid,
kms_keyid=kms_keyid,
service_url=service_url,
verify_ssl=verify_ssl,
location=location,
role_arn=role_arn)
def head(bucket, path=None, key=None, keyid=None, service_url=None,
verify_ssl=None, kms_keyid=None, location=None, role_arn=None):
'''
Return the metadata for a bucket, or an object in a bucket.
CLI Examples:
.. code-block:: bash
salt myminion s3.head mybucket
salt myminion s3.head mybucket myfile.png
'''
key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn = _get_key(
key,
keyid,
service_url,
verify_ssl,
kms_keyid,
location,
role_arn,
)
return salt.utils.s3.query(method='HEAD',
bucket=bucket,
path=path,
key=key,
keyid=keyid,
kms_keyid=kms_keyid,
service_url=service_url,
verify_ssl=verify_ssl,
location=location,
full_headers=True,
role_arn=role_arn)
def put(bucket, path=None, return_bin=False, action=None, local_file=None,
key=None, keyid=None, service_url=None, verify_ssl=None,
kms_keyid=None, location=None, role_arn=None):
'''
Create a new bucket, or upload an object to a bucket.
CLI Example to create a bucket:
.. code-block:: bash
salt myminion s3.put mybucket
CLI Example to upload an object to a bucket:
.. code-block:: bash
salt myminion s3.put mybucket remotepath local_file=/path/to/file
'''
key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn = _get_key(
key,
keyid,
service_url,
verify_ssl,
kms_keyid,
location,
role_arn,
)
return salt.utils.s3.query(method='PUT',
bucket=bucket,
path=path,
return_bin=return_bin,
local_file=local_file,
action=action,
key=key,
keyid=keyid,
kms_keyid=kms_keyid,
service_url=service_url,
verify_ssl=verify_ssl,
location=location,
role_arn=role_arn)
def _get_key(key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn):
'''
Examine the keys, and populate as necessary
'''
if not key and __salt__['config.option']('s3.key'):
key = __salt__['config.option']('s3.key')
if not keyid and __salt__['config.option']('s3.keyid'):
keyid = __salt__['config.option']('s3.keyid')
if not kms_keyid and __salt__['config.option']('aws.kms.keyid'):
kms_keyid = __salt__['config.option']('aws.kms.keyid')
if not service_url and __salt__['config.option']('s3.service_url'):
service_url = __salt__['config.option']('s3.service_url')
if not service_url:
service_url = 's3.amazonaws.com'
if verify_ssl is None and __salt__['config.option']('s3.verify_ssl') is not None:
verify_ssl = __salt__['config.option']('s3.verify_ssl')
if verify_ssl is None:
verify_ssl = True
if location is None and __salt__['config.option']('s3.location') is not None:
location = __salt__['config.option']('s3.location')
if role_arn is None and __salt__['config.option']('s3.role_arn') is not None:
role_arn = __salt__['config.option']('s3.role_arn')
return key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn
| 30.819398 | 87 | 0.58166 |
from __future__ import absolute_import
import logging
import salt.utils
import salt.utils.s3
log = logging.getLogger(__name__)
def __virtual__():
return True
def delete(bucket, path=None, action=None, key=None, keyid=None,
service_url=None, verify_ssl=None, kms_keyid=None, location=None,
role_arn=None):
key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn = _get_key(
key,
keyid,
service_url,
verify_ssl,
kms_keyid,
location,
role_arn,
)
return salt.utils.s3.query(method='DELETE',
bucket=bucket,
path=path,
action=action,
key=key,
keyid=keyid,
kms_keyid=kms_keyid,
service_url=service_url,
verify_ssl=verify_ssl,
location=location,
role_arn=role_arn)
def get(bucket=None, path=None, return_bin=False, action=None,
local_file=None, key=None, keyid=None, service_url=None,
verify_ssl=None, kms_keyid=None, location=None, role_arn=None):
key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn = _get_key(
key,
keyid,
service_url,
verify_ssl,
kms_keyid,
location,
role_arn,
)
return salt.utils.s3.query(method='GET',
bucket=bucket,
path=path,
return_bin=return_bin,
local_file=local_file,
action=action,
key=key,
keyid=keyid,
kms_keyid=kms_keyid,
service_url=service_url,
verify_ssl=verify_ssl,
location=location,
role_arn=role_arn)
def head(bucket, path=None, key=None, keyid=None, service_url=None,
verify_ssl=None, kms_keyid=None, location=None, role_arn=None):
key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn = _get_key(
key,
keyid,
service_url,
verify_ssl,
kms_keyid,
location,
role_arn,
)
return salt.utils.s3.query(method='HEAD',
bucket=bucket,
path=path,
key=key,
keyid=keyid,
kms_keyid=kms_keyid,
service_url=service_url,
verify_ssl=verify_ssl,
location=location,
full_headers=True,
role_arn=role_arn)
def put(bucket, path=None, return_bin=False, action=None, local_file=None,
key=None, keyid=None, service_url=None, verify_ssl=None,
kms_keyid=None, location=None, role_arn=None):
key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn = _get_key(
key,
keyid,
service_url,
verify_ssl,
kms_keyid,
location,
role_arn,
)
return salt.utils.s3.query(method='PUT',
bucket=bucket,
path=path,
return_bin=return_bin,
local_file=local_file,
action=action,
key=key,
keyid=keyid,
kms_keyid=kms_keyid,
service_url=service_url,
verify_ssl=verify_ssl,
location=location,
role_arn=role_arn)
def _get_key(key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn):
if not key and __salt__['config.option']('s3.key'):
key = __salt__['config.option']('s3.key')
if not keyid and __salt__['config.option']('s3.keyid'):
keyid = __salt__['config.option']('s3.keyid')
if not kms_keyid and __salt__['config.option']('aws.kms.keyid'):
kms_keyid = __salt__['config.option']('aws.kms.keyid')
if not service_url and __salt__['config.option']('s3.service_url'):
service_url = __salt__['config.option']('s3.service_url')
if not service_url:
service_url = 's3.amazonaws.com'
if verify_ssl is None and __salt__['config.option']('s3.verify_ssl') is not None:
verify_ssl = __salt__['config.option']('s3.verify_ssl')
if verify_ssl is None:
verify_ssl = True
if location is None and __salt__['config.option']('s3.location') is not None:
location = __salt__['config.option']('s3.location')
if role_arn is None and __salt__['config.option']('s3.role_arn') is not None:
role_arn = __salt__['config.option']('s3.role_arn')
return key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn
| true | true |
1c4676e95a1de5936f6b8382b6bdae4774211922 | 3,598 | py | Python | yt_dlp/extractor/joj.py | YuanHsing/yt-dlp | 38d86f4d45cf2b764f79141c602356fbb426a4b6 | [
"Unlicense"
] | 1 | 2021-12-13T14:12:47.000Z | 2021-12-13T14:12:47.000Z | yt_dlp/extractor/joj.py | YuanHsing/yt-dlp | 38d86f4d45cf2b764f79141c602356fbb426a4b6 | [
"Unlicense"
] | null | null | null | yt_dlp/extractor/joj.py | YuanHsing/yt-dlp | 38d86f4d45cf2b764f79141c602356fbb426a4b6 | [
"Unlicense"
] | null | null | null | import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
format_field,
int_or_none,
js_to_json,
try_get,
)
class JojIE(InfoExtractor):
_VALID_URL = r'''(?x)
(?:
joj:|
https?://media\.joj\.sk/embed/
)
(?P<id>[^/?#^]+)
'''
_TESTS = [{
'url': 'https://media.joj.sk/embed/a388ec4c-6019-4a4a-9312-b1bee194e932',
'info_dict': {
'id': 'a388ec4c-6019-4a4a-9312-b1bee194e932',
'ext': 'mp4',
'title': 'NOVÉ BÝVANIE',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 3118,
}
}, {
'url': 'https://media.joj.sk/embed/9i1cxv',
'only_matching': True,
}, {
'url': 'joj:a388ec4c-6019-4a4a-9312-b1bee194e932',
'only_matching': True,
}, {
'url': 'joj:9i1cxv',
'only_matching': True,
}]
@staticmethod
def _extract_urls(webpage):
return [
mobj.group('url')
for mobj in re.finditer(
r'<iframe\b[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//media\.joj\.sk/embed/(?:(?!\1).)+)\1',
webpage)]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'https://media.joj.sk/embed/%s' % video_id, video_id)
title = self._search_regex(
(r'videoTitle\s*:\s*(["\'])(?P<title>(?:(?!\1).)+)\1',
r'<title>(?P<title>[^<]+)'), webpage, 'title',
default=None, group='title') or self._og_search_title(webpage)
bitrates = self._parse_json(
self._search_regex(
r'(?s)(?:src|bitrates)\s*=\s*({.+?});', webpage, 'bitrates',
default='{}'),
video_id, transform_source=js_to_json, fatal=False)
formats = []
for format_url in try_get(bitrates, lambda x: x['mp4'], list) or []:
if isinstance(format_url, compat_str):
height = self._search_regex(
r'(\d+)[pP]\.', format_url, 'height', default=None)
formats.append({
'url': format_url,
'format_id': format_field(height, None, '%sp'),
'height': int(height),
})
if not formats:
playlist = self._download_xml(
'https://media.joj.sk/services/Video.php?clip=%s' % video_id,
video_id)
for file_el in playlist.findall('./files/file'):
path = file_el.get('path')
if not path:
continue
format_id = file_el.get('id') or file_el.get('label')
formats.append({
'url': 'http://n16.joj.sk/storage/%s' % path.replace(
'dat/', '', 1),
'format_id': format_id,
'height': int_or_none(self._search_regex(
r'(\d+)[pP]', format_id or path, 'height',
default=None)),
})
self._sort_formats(formats)
thumbnail = self._og_search_thumbnail(webpage)
duration = int_or_none(self._search_regex(
r'videoDuration\s*:\s*(\d+)', webpage, 'duration', fatal=False))
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
}
| 33.626168 | 105 | 0.472763 | import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
format_field,
int_or_none,
js_to_json,
try_get,
)
class JojIE(InfoExtractor):
_VALID_URL = r'''(?x)
(?:
joj:|
https?://media\.joj\.sk/embed/
)
(?P<id>[^/?#^]+)
'''
_TESTS = [{
'url': 'https://media.joj.sk/embed/a388ec4c-6019-4a4a-9312-b1bee194e932',
'info_dict': {
'id': 'a388ec4c-6019-4a4a-9312-b1bee194e932',
'ext': 'mp4',
'title': 'NOVÉ BÝVANIE',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 3118,
}
}, {
'url': 'https://media.joj.sk/embed/9i1cxv',
'only_matching': True,
}, {
'url': 'joj:a388ec4c-6019-4a4a-9312-b1bee194e932',
'only_matching': True,
}, {
'url': 'joj:9i1cxv',
'only_matching': True,
}]
@staticmethod
def _extract_urls(webpage):
return [
mobj.group('url')
for mobj in re.finditer(
r'<iframe\b[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//media\.joj\.sk/embed/(?:(?!\1).)+)\1',
webpage)]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'https://media.joj.sk/embed/%s' % video_id, video_id)
title = self._search_regex(
(r'videoTitle\s*:\s*(["\'])(?P<title>(?:(?!\1).)+)\1',
r'<title>(?P<title>[^<]+)'), webpage, 'title',
default=None, group='title') or self._og_search_title(webpage)
bitrates = self._parse_json(
self._search_regex(
r'(?s)(?:src|bitrates)\s*=\s*({.+?});', webpage, 'bitrates',
default='{}'),
video_id, transform_source=js_to_json, fatal=False)
formats = []
for format_url in try_get(bitrates, lambda x: x['mp4'], list) or []:
if isinstance(format_url, compat_str):
height = self._search_regex(
r'(\d+)[pP]\.', format_url, 'height', default=None)
formats.append({
'url': format_url,
'format_id': format_field(height, None, '%sp'),
'height': int(height),
})
if not formats:
playlist = self._download_xml(
'https://media.joj.sk/services/Video.php?clip=%s' % video_id,
video_id)
for file_el in playlist.findall('./files/file'):
path = file_el.get('path')
if not path:
continue
format_id = file_el.get('id') or file_el.get('label')
formats.append({
'url': 'http://n16.joj.sk/storage/%s' % path.replace(
'dat/', '', 1),
'format_id': format_id,
'height': int_or_none(self._search_regex(
r'(\d+)[pP]', format_id or path, 'height',
default=None)),
})
self._sort_formats(formats)
thumbnail = self._og_search_thumbnail(webpage)
duration = int_or_none(self._search_regex(
r'videoDuration\s*:\s*(\d+)', webpage, 'duration', fatal=False))
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
}
| true | true |
1c46778f48f486ad8a9dd24ae5c1193ee12576be | 246 | py | Python | manage.py | fndomariano/prototipo-rbc | 72b48a2cdc5d5072d09b20cb7311df50ea5161bb | [
"MIT"
] | null | null | null | manage.py | fndomariano/prototipo-rbc | 72b48a2cdc5d5072d09b20cb7311df50ea5161bb | [
"MIT"
] | 6 | 2021-03-19T01:32:29.000Z | 2021-09-22T18:50:40.000Z | manage.py | fndomariano/prototype-cbr | 72b48a2cdc5d5072d09b20cb7311df50ea5161bb | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tcc.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 22.363636 | 67 | 0.768293 |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tcc.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| true | true |
1c467887b365f7e063283d64aeac5d205b2dbc04 | 905 | py | Python | services/python/app/lib/modules/detections/keybase.py | seanmcfeely/eventsentry | afa4f7c3797a5b3cd96511064f58eb375ca73848 | [
"Apache-2.0"
] | 4 | 2018-08-17T16:51:46.000Z | 2020-05-05T21:27:18.000Z | services/python/app/lib/modules/detections/keybase.py | seanmcfeely/eventsentry | afa4f7c3797a5b3cd96511064f58eb375ca73848 | [
"Apache-2.0"
] | 6 | 2018-08-06T20:40:22.000Z | 2019-01-17T15:04:31.000Z | services/python/app/lib/modules/detections/keybase.py | seanmcfeely/eventsentry | afa4f7c3797a5b3cd96511064f58eb375ca73848 | [
"Apache-2.0"
] | 4 | 2018-08-06T14:59:09.000Z | 2019-08-30T18:03:45.000Z | from lib.modules.DetectionModule import *
class Module(DetectionModule):
def __init__(self, name, event_json):
super().__init__(name=name, event_json=event_json)
def run(self):
self.logger.debug('Running the {} detection module'.format(self.name))
# Loop over each sandboxed sample in the event.
for sample in self.event_json['sandbox']:
# Loop over all of the process trees.
trees = sample['process_trees'] + sample['process_trees_decoded']
for tree in trees:
tree = tree.lower()
strings = ['C:\\ProgramData\\Mails.txt', 'C:\\ProgramData\\Browsers.txt']
if all(string.lower() in tree for string in strings):
self.detections.append('Detected KeyBase by the process tree: {}'.format(' AND '.join(strings)))
self.tags.append('keybase')
| 37.708333 | 116 | 0.609945 | from lib.modules.DetectionModule import *
class Module(DetectionModule):
def __init__(self, name, event_json):
super().__init__(name=name, event_json=event_json)
def run(self):
self.logger.debug('Running the {} detection module'.format(self.name))
for sample in self.event_json['sandbox']:
trees = sample['process_trees'] + sample['process_trees_decoded']
for tree in trees:
tree = tree.lower()
strings = ['C:\\ProgramData\\Mails.txt', 'C:\\ProgramData\\Browsers.txt']
if all(string.lower() in tree for string in strings):
self.detections.append('Detected KeyBase by the process tree: {}'.format(' AND '.join(strings)))
self.tags.append('keybase')
| true | true |
1c46789dc107becd64aab237d7fbcb37ef2c7d33 | 3,658 | py | Python | assets/pinyin-python-server/server.py | admpub/AreaCity-JsSpider-StatsGov | c677db485d33a479ffb2bb28da8fc377a56d9ec9 | [
"MIT"
] | 3,332 | 2018-11-28T07:11:37.000Z | 2022-03-31T18:54:17.000Z | assets/pinyin-python-server/server.py | xlehehe/AreaCity-JsSpider-StatsGov | c36f95ed88bc24469a10fca7959d0350117e57dc | [
"MIT"
] | 21 | 2019-03-26T06:54:26.000Z | 2022-02-12T05:03:36.000Z | assets/pinyin-python-server/server.py | xlehehe/AreaCity-JsSpider-StatsGov | c36f95ed88bc24469a10fca7959d0350117e57dc | [
"MIT"
] | 663 | 2018-11-28T10:32:56.000Z | 2022-03-29T15:00:00.000Z | # -*- coding:utf-8 -*-
"""
GitHub: https://github.com/xiangyuecn/AreaCity-JsSpider-StatsGov/assets/pinyin-python-server
使用的HanLP (https://github.com/hankcs/HanLP) 语言处理库
【1】安装Miniconda
conda版本随意
【2】安装pyhanlp
https://github.com/hankcs/pyhanlp/wiki/Windows
测试发现python3.7.1 windows下ssl有问题无法安装,conda切换成python 3.6.4测试安装正常
安装好后运行一下hanlp命令,会提示下载,看第3步
【3】下载字典和jar
参考半自动配置: https://github.com/hankcs/pyhanlp/wiki/%E6%89%8B%E5%8A%A8%E9%85%8D%E7%BD%AE
字典和jar存放目录一般在Miniconda3[\envs\py36]\Lib\site-packages\pyhanlp\static
jar直接下载最新releases
字典最好直接clone仓库/data目录最新版本(用svn下载速度快很多,无需model数据),一样的在存储目录内放一个data文件夹,releases对bug处理稍微滞后一点。
另外需要修改hanlp.properties,给root赋值为当前目录完整路径。
svn: https://github.com/hankcs/HanLP/trunk/data
【4】运行
python server.py
【5】浏览器访问
http://127.0.0.1:9527/pinyin?txt=要拼的文字
"拼音。m" 返回结果 {c:0,m:"",v:["pin","yin","F。","Fm"]},c=0时代表正常,其他代表出错,m为错误原因,拼音如果是字母符号会用F打头
"""
import sys
if sys.version_info.major < 3:
print("Require python3 environment!")
exit(1)
from pyhanlp import *
import traceback
import time
import json
import urllib
from http.server import HTTPServer, BaseHTTPRequestHandler
class HttpHandler(BaseHTTPRequestHandler):
def _response(self, path, args):
startTime=time.time()
code=200
rtv={'c':0,'m':'','v':''}
try:
if args:
args=urllib.parse.parse_qs(args).items()
args=dict([(k,v[0]) for k,v in args])
else:
args={}
# ****************************************
# ***************页面开始*****************
# ****************************************
# ==>
if path=="/":
rtv["v"]="服务器已准备好"
# ==>
elif path=="/pinyin":
txt=args.get("txt","")
pinyin_list = HanLP.convertToPinyinList(txt)
list=[]
Pinyin=JClass("com.hankcs.hanlp.dictionary.py.Pinyin")
for i in range(pinyin_list.size()):
pinyin=pinyin_list[i]
if pinyin==Pinyin.none5:
list.append('F'+txt[i])
else:
list.append(pinyin.getPinyinWithoutTone())
rtv["v"]=list
# ****************************************
# ****************页面结束****************
# ****************************************
else:
code=404
rtv["c"]=404
rtv["m"]="路径"+path+"不存在"
except Exception as e:
rtv["c"]=1
rtv["m"]='服务器错误:'+str(e)+"\n"+traceback.format_exc()
rtv["T"]=int(startTime*1000)
rtv["D"]=int((time.time()-startTime)*1000)
try:
rtv=json.dumps(rtv,ensure_ascii=False)
except Exception as e:
rtv={'c':2,'m':'服务器返回数据错误:'+str(e)+"\n"+traceback.format_exc(),'v':''}
rtv=json.dumps(rtv,ensure_ascii=False)
self.send_response(code)
self.send_header('Content-type', 'text/json; charset=utf-8')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(rtv.encode())
def do_GET(self):
path,args=urllib.parse.splitquery(self.path)
self._response(path, args)
def do_POST(self):
args = self.rfile.read(int(self.headers['content-length'])).decode("utf-8")
self._response(self.path, args)
httpd = HTTPServer(('127.0.0.1', 9527), HttpHandler)
httpd.serve_forever()
| 30.483333 | 92 | 0.52269 |
import sys
if sys.version_info.major < 3:
print("Require python3 environment!")
exit(1)
from pyhanlp import *
import traceback
import time
import json
import urllib
from http.server import HTTPServer, BaseHTTPRequestHandler
class HttpHandler(BaseHTTPRequestHandler):
def _response(self, path, args):
startTime=time.time()
code=200
rtv={'c':0,'m':'','v':''}
try:
if args:
args=urllib.parse.parse_qs(args).items()
args=dict([(k,v[0]) for k,v in args])
else:
args={}
if path=="/":
rtv["v"]="服务器已准备好"
elif path=="/pinyin":
txt=args.get("txt","")
pinyin_list = HanLP.convertToPinyinList(txt)
list=[]
Pinyin=JClass("com.hankcs.hanlp.dictionary.py.Pinyin")
for i in range(pinyin_list.size()):
pinyin=pinyin_list[i]
if pinyin==Pinyin.none5:
list.append('F'+txt[i])
else:
list.append(pinyin.getPinyinWithoutTone())
rtv["v"]=list
else:
code=404
rtv["c"]=404
rtv["m"]="路径"+path+"不存在"
except Exception as e:
rtv["c"]=1
rtv["m"]='服务器错误:'+str(e)+"\n"+traceback.format_exc()
rtv["T"]=int(startTime*1000)
rtv["D"]=int((time.time()-startTime)*1000)
try:
rtv=json.dumps(rtv,ensure_ascii=False)
except Exception as e:
rtv={'c':2,'m':'服务器返回数据错误:'+str(e)+"\n"+traceback.format_exc(),'v':''}
rtv=json.dumps(rtv,ensure_ascii=False)
self.send_response(code)
self.send_header('Content-type', 'text/json; charset=utf-8')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(rtv.encode())
def do_GET(self):
path,args=urllib.parse.splitquery(self.path)
self._response(path, args)
def do_POST(self):
args = self.rfile.read(int(self.headers['content-length'])).decode("utf-8")
self._response(self.path, args)
httpd = HTTPServer(('127.0.0.1', 9527), HttpHandler)
httpd.serve_forever()
| true | true |
1c467a246f902e6c48bf43554502bd3ba0323cb2 | 7,868 | py | Python | supervisor/bootstrap.py | pssc/supervisor | 2f09ee05c52e255fbdd8b795451d39b24278ef35 | [
"Apache-2.0"
] | 24 | 2020-03-08T21:13:00.000Z | 2020-03-11T06:18:43.000Z | supervisor/bootstrap.py | pssc/supervisor | 2f09ee05c52e255fbdd8b795451d39b24278ef35 | [
"Apache-2.0"
] | null | null | null | supervisor/bootstrap.py | pssc/supervisor | 2f09ee05c52e255fbdd8b795451d39b24278ef35 | [
"Apache-2.0"
] | null | null | null | """Bootstrap Supervisor."""
import logging
import os
from pathlib import Path
import shutil
import signal
from colorlog import ColoredFormatter
from .addons import AddonManager
from .api import RestAPI
from .arch import CpuArch
from .auth import Auth
from .audio import Audio
from .const import SOCKET_DOCKER, UpdateChannels
from .core import Core
from .coresys import CoreSys
from .dbus import DBusManager
from .discovery import Discovery
from .dns import CoreDNS
from .hassos import HassOS
from .homeassistant import HomeAssistant
from .host import HostManager
from .hwmon import HwMonitor
from .ingress import Ingress
from .services import ServiceManager
from .snapshots import SnapshotManager
from .store import StoreManager
from .supervisor import Supervisor
from .tasks import Tasks
from .updater import Updater
from .secrets import SecretsManager
from .utils.dt import fetch_timezone
_LOGGER: logging.Logger = logging.getLogger(__name__)
ENV_SHARE = "SUPERVISOR_SHARE"
ENV_NAME = "SUPERVISOR_NAME"
ENV_REPO = "HOMEASSISTANT_REPOSITORY"
MACHINE_ID = Path("/etc/machine-id")
async def initialize_coresys():
"""Initialize supervisor coresys/objects."""
coresys = CoreSys()
# Initialize core objects
coresys.core = Core(coresys)
coresys.dns = CoreDNS(coresys)
coresys.arch = CpuArch(coresys)
coresys.audio = Audio(coresys)
coresys.auth = Auth(coresys)
coresys.updater = Updater(coresys)
coresys.api = RestAPI(coresys)
coresys.supervisor = Supervisor(coresys)
coresys.homeassistant = HomeAssistant(coresys)
coresys.addons = AddonManager(coresys)
coresys.snapshots = SnapshotManager(coresys)
coresys.host = HostManager(coresys)
coresys.hwmonitor = HwMonitor(coresys)
coresys.ingress = Ingress(coresys)
coresys.tasks = Tasks(coresys)
coresys.services = ServiceManager(coresys)
coresys.store = StoreManager(coresys)
coresys.discovery = Discovery(coresys)
coresys.dbus = DBusManager(coresys)
coresys.hassos = HassOS(coresys)
coresys.secrets = SecretsManager(coresys)
# bootstrap config
initialize_system_data(coresys)
# Set Machine/Host ID
if MACHINE_ID.exists():
coresys.machine_id = MACHINE_ID.read_text().strip()
# Init TimeZone
if coresys.config.timezone == "UTC":
coresys.config.timezone = await fetch_timezone(coresys.websession)
return coresys
def initialize_system_data(coresys: CoreSys):
"""Set up the default configuration and create folders."""
config = coresys.config
# Home Assistant configuration folder
if not config.path_homeassistant.is_dir():
_LOGGER.info(
"Create Home Assistant configuration folder %s", config.path_homeassistant
)
config.path_homeassistant.mkdir()
# Supervisor ssl folder
if not config.path_ssl.is_dir():
_LOGGER.info("Create Supervisor SSL/TLS folder %s", config.path_ssl)
config.path_ssl.mkdir()
# Supervisor addon data folder
if not config.path_addons_data.is_dir():
_LOGGER.info("Create Supervisor Add-on data folder %s", config.path_addons_data)
config.path_addons_data.mkdir(parents=True)
if not config.path_addons_local.is_dir():
_LOGGER.info(
"Create Supervisor Add-on local repository folder %s",
config.path_addons_local,
)
config.path_addons_local.mkdir(parents=True)
if not config.path_addons_git.is_dir():
_LOGGER.info(
"Create Supervisor Add-on git repositories folder %s",
config.path_addons_git,
)
config.path_addons_git.mkdir(parents=True)
# Supervisor tmp folder
if not config.path_tmp.is_dir():
_LOGGER.info("Create Supervisor temp folder %s", config.path_tmp)
config.path_tmp.mkdir(parents=True)
# Supervisor backup folder
if not config.path_backup.is_dir():
_LOGGER.info("Create Supervisor backup folder %s", config.path_backup)
config.path_backup.mkdir()
# Share folder
if not config.path_share.is_dir():
_LOGGER.info("Create Supervisor share folder %s", config.path_share)
config.path_share.mkdir()
# Apparmor folder
if not config.path_apparmor.is_dir():
_LOGGER.info("Create Supervisor Apparmor folder %s", config.path_apparmor)
config.path_apparmor.mkdir()
# DNS folder
if not config.path_dns.is_dir():
_LOGGER.info("Create Supervisor DNS folder %s", config.path_dns)
config.path_dns.mkdir()
# Audio folder
if not config.path_audio.is_dir():
_LOGGER.info("Create Supervisor audio folder %s", config.path_audio)
config.path_audio.mkdir()
# Update log level
coresys.config.modify_log_level()
# Check if ENV is in development mode
if bool(os.environ.get("SUPERVISOR_DEV", 0)):
_LOGGER.warning("SUPERVISOR_DEV is set")
coresys.updater.channel = UpdateChannels.DEV
coresys.config.logging = "debug"
coresys.config.debug = True
def migrate_system_env(coresys: CoreSys):
"""Cleanup some stuff after update."""
config = coresys.config
# hass.io 0.37 -> 0.38
old_build = Path(config.path_hassio, "addons/build")
if old_build.is_dir():
try:
old_build.rmdir()
except OSError:
_LOGGER.warning("Can't cleanup old Add-on build directory")
def initialize_logging():
"""Setup the logging."""
logging.basicConfig(level=logging.INFO)
fmt = "%(asctime)s %(levelname)s (%(threadName)s) [%(name)s] %(message)s"
colorfmt = f"%(log_color)s{fmt}%(reset)s"
datefmt = "%y-%m-%d %H:%M:%S"
# suppress overly verbose logs from libraries that aren't helpful
logging.getLogger("aiohttp.access").setLevel(logging.WARNING)
logging.getLogger().handlers[0].setFormatter(
ColoredFormatter(
colorfmt,
datefmt=datefmt,
reset=True,
log_colors={
"DEBUG": "cyan",
"INFO": "green",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "red",
},
)
)
def check_environment() -> None:
"""Check if all environment are exists."""
# check environment variables
for key in (ENV_SHARE, ENV_NAME, ENV_REPO):
try:
os.environ[key]
except KeyError:
_LOGGER.fatal("Can't find %s in env!", key)
# check docker socket
if not SOCKET_DOCKER.is_socket():
_LOGGER.fatal("Can't find Docker socket!")
# check socat exec
if not shutil.which("socat"):
_LOGGER.fatal("Can't find socat!")
# check socat exec
if not shutil.which("gdbus"):
_LOGGER.fatal("Can't find gdbus!")
def reg_signal(loop):
"""Register SIGTERM and SIGKILL to stop system."""
try:
loop.add_signal_handler(signal.SIGTERM, lambda: loop.call_soon(loop.stop))
except (ValueError, RuntimeError):
_LOGGER.warning("Could not bind to SIGTERM")
try:
loop.add_signal_handler(signal.SIGHUP, lambda: loop.call_soon(loop.stop))
except (ValueError, RuntimeError):
_LOGGER.warning("Could not bind to SIGHUP")
try:
loop.add_signal_handler(signal.SIGINT, lambda: loop.call_soon(loop.stop))
except (ValueError, RuntimeError):
_LOGGER.warning("Could not bind to SIGINT")
def supervisor_debugger(coresys: CoreSys) -> None:
"""Setup debugger if needed."""
if not coresys.config.debug:
return
# pylint: disable=import-outside-toplevel
import ptvsd
_LOGGER.info("Initialize Supervisor debugger")
ptvsd.enable_attach(address=("0.0.0.0", 33333), redirect_output=True)
if coresys.config.debug_block:
_LOGGER.info("Wait until debugger is attached")
ptvsd.wait_for_attach()
| 31.098814 | 88 | 0.680859 | import logging
import os
from pathlib import Path
import shutil
import signal
from colorlog import ColoredFormatter
from .addons import AddonManager
from .api import RestAPI
from .arch import CpuArch
from .auth import Auth
from .audio import Audio
from .const import SOCKET_DOCKER, UpdateChannels
from .core import Core
from .coresys import CoreSys
from .dbus import DBusManager
from .discovery import Discovery
from .dns import CoreDNS
from .hassos import HassOS
from .homeassistant import HomeAssistant
from .host import HostManager
from .hwmon import HwMonitor
from .ingress import Ingress
from .services import ServiceManager
from .snapshots import SnapshotManager
from .store import StoreManager
from .supervisor import Supervisor
from .tasks import Tasks
from .updater import Updater
from .secrets import SecretsManager
from .utils.dt import fetch_timezone
_LOGGER: logging.Logger = logging.getLogger(__name__)
ENV_SHARE = "SUPERVISOR_SHARE"
ENV_NAME = "SUPERVISOR_NAME"
ENV_REPO = "HOMEASSISTANT_REPOSITORY"
MACHINE_ID = Path("/etc/machine-id")
async def initialize_coresys():
coresys = CoreSys()
coresys.core = Core(coresys)
coresys.dns = CoreDNS(coresys)
coresys.arch = CpuArch(coresys)
coresys.audio = Audio(coresys)
coresys.auth = Auth(coresys)
coresys.updater = Updater(coresys)
coresys.api = RestAPI(coresys)
coresys.supervisor = Supervisor(coresys)
coresys.homeassistant = HomeAssistant(coresys)
coresys.addons = AddonManager(coresys)
coresys.snapshots = SnapshotManager(coresys)
coresys.host = HostManager(coresys)
coresys.hwmonitor = HwMonitor(coresys)
coresys.ingress = Ingress(coresys)
coresys.tasks = Tasks(coresys)
coresys.services = ServiceManager(coresys)
coresys.store = StoreManager(coresys)
coresys.discovery = Discovery(coresys)
coresys.dbus = DBusManager(coresys)
coresys.hassos = HassOS(coresys)
coresys.secrets = SecretsManager(coresys)
initialize_system_data(coresys)
if MACHINE_ID.exists():
coresys.machine_id = MACHINE_ID.read_text().strip()
if coresys.config.timezone == "UTC":
coresys.config.timezone = await fetch_timezone(coresys.websession)
return coresys
def initialize_system_data(coresys: CoreSys):
config = coresys.config
if not config.path_homeassistant.is_dir():
_LOGGER.info(
"Create Home Assistant configuration folder %s", config.path_homeassistant
)
config.path_homeassistant.mkdir()
if not config.path_ssl.is_dir():
_LOGGER.info("Create Supervisor SSL/TLS folder %s", config.path_ssl)
config.path_ssl.mkdir()
if not config.path_addons_data.is_dir():
_LOGGER.info("Create Supervisor Add-on data folder %s", config.path_addons_data)
config.path_addons_data.mkdir(parents=True)
if not config.path_addons_local.is_dir():
_LOGGER.info(
"Create Supervisor Add-on local repository folder %s",
config.path_addons_local,
)
config.path_addons_local.mkdir(parents=True)
if not config.path_addons_git.is_dir():
_LOGGER.info(
"Create Supervisor Add-on git repositories folder %s",
config.path_addons_git,
)
config.path_addons_git.mkdir(parents=True)
if not config.path_tmp.is_dir():
_LOGGER.info("Create Supervisor temp folder %s", config.path_tmp)
config.path_tmp.mkdir(parents=True)
if not config.path_backup.is_dir():
_LOGGER.info("Create Supervisor backup folder %s", config.path_backup)
config.path_backup.mkdir()
if not config.path_share.is_dir():
_LOGGER.info("Create Supervisor share folder %s", config.path_share)
config.path_share.mkdir()
if not config.path_apparmor.is_dir():
_LOGGER.info("Create Supervisor Apparmor folder %s", config.path_apparmor)
config.path_apparmor.mkdir()
if not config.path_dns.is_dir():
_LOGGER.info("Create Supervisor DNS folder %s", config.path_dns)
config.path_dns.mkdir()
if not config.path_audio.is_dir():
_LOGGER.info("Create Supervisor audio folder %s", config.path_audio)
config.path_audio.mkdir()
coresys.config.modify_log_level()
if bool(os.environ.get("SUPERVISOR_DEV", 0)):
_LOGGER.warning("SUPERVISOR_DEV is set")
coresys.updater.channel = UpdateChannels.DEV
coresys.config.logging = "debug"
coresys.config.debug = True
def migrate_system_env(coresys: CoreSys):
config = coresys.config
old_build = Path(config.path_hassio, "addons/build")
if old_build.is_dir():
try:
old_build.rmdir()
except OSError:
_LOGGER.warning("Can't cleanup old Add-on build directory")
def initialize_logging():
logging.basicConfig(level=logging.INFO)
fmt = "%(asctime)s %(levelname)s (%(threadName)s) [%(name)s] %(message)s"
colorfmt = f"%(log_color)s{fmt}%(reset)s"
datefmt = "%y-%m-%d %H:%M:%S"
# suppress overly verbose logs from libraries that aren't helpful
logging.getLogger("aiohttp.access").setLevel(logging.WARNING)
logging.getLogger().handlers[0].setFormatter(
ColoredFormatter(
colorfmt,
datefmt=datefmt,
reset=True,
log_colors={
"DEBUG": "cyan",
"INFO": "green",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "red",
},
)
)
def check_environment() -> None:
for key in (ENV_SHARE, ENV_NAME, ENV_REPO):
try:
os.environ[key]
except KeyError:
_LOGGER.fatal("Can't find %s in env!", key)
# check docker socket
if not SOCKET_DOCKER.is_socket():
_LOGGER.fatal("Can't find Docker socket!")
if not shutil.which("socat"):
_LOGGER.fatal("Can't find socat!")
# check socat exec
if not shutil.which("gdbus"):
_LOGGER.fatal("Can't find gdbus!")
def reg_signal(loop):
try:
loop.add_signal_handler(signal.SIGTERM, lambda: loop.call_soon(loop.stop))
except (ValueError, RuntimeError):
_LOGGER.warning("Could not bind to SIGTERM")
try:
loop.add_signal_handler(signal.SIGHUP, lambda: loop.call_soon(loop.stop))
except (ValueError, RuntimeError):
_LOGGER.warning("Could not bind to SIGHUP")
try:
loop.add_signal_handler(signal.SIGINT, lambda: loop.call_soon(loop.stop))
except (ValueError, RuntimeError):
_LOGGER.warning("Could not bind to SIGINT")
def supervisor_debugger(coresys: CoreSys) -> None:
if not coresys.config.debug:
return
import ptvsd
_LOGGER.info("Initialize Supervisor debugger")
ptvsd.enable_attach(address=("0.0.0.0", 33333), redirect_output=True)
if coresys.config.debug_block:
_LOGGER.info("Wait until debugger is attached")
ptvsd.wait_for_attach()
| true | true |
1c467a33edb77eed8083f7b4d9221ac4d6a5fbee | 6,264 | py | Python | marshmallow_oneofschema/one_of_schema.py | misterflop/marshmallow-oneofschema | 64f836d7c332b515e3e4c956683366fa11d17966 | [
"MIT"
] | null | null | null | marshmallow_oneofschema/one_of_schema.py | misterflop/marshmallow-oneofschema | 64f836d7c332b515e3e4c956683366fa11d17966 | [
"MIT"
] | null | null | null | marshmallow_oneofschema/one_of_schema.py | misterflop/marshmallow-oneofschema | 64f836d7c332b515e3e4c956683366fa11d17966 | [
"MIT"
] | null | null | null | from marshmallow import Schema, ValidationError
class OneOfSchema(Schema):
"""
This is a special kind of schema that actually multiplexes other schemas
based on object type. When serializing values, it uses get_obj_type() method
to get object type name. Then it uses `type_schemas` name-to-Schema mapping
to get schema for that particular object type, serializes object using that
schema and adds an extra "type" field with name of object type.
Deserialization is reverse.
Example:
class Foo(object):
def __init__(self, foo):
self.foo = foo
class Bar(object):
def __init__(self, bar):
self.bar = bar
class FooSchema(marshmallow.Schema):
foo = marshmallow.fields.String(required=True)
@marshmallow.post_load
def make_foo(self, data, **kwargs):
return Foo(**data)
class BarSchema(marshmallow.Schema):
bar = marshmallow.fields.Integer(required=True)
@marshmallow.post_load
def make_bar(self, data, **kwargs):
return Bar(**data)
class MyUberSchema(marshmallow.OneOfSchema):
type_schemas = {
'foo': FooSchema,
'bar': BarSchema,
}
def get_obj_type(self, obj):
if isinstance(obj, Foo):
return 'foo'
elif isinstance(obj, Bar):
return 'bar'
else:
raise Exception('Unknown object type: %s' % repr(obj))
MyUberSchema().dump([Foo(foo='hello'), Bar(bar=123)], many=True)
# => [{'type': 'foo', 'foo': 'hello'}, {'type': 'bar', 'bar': 123}]
You can control type field name added to serialized object representation by
setting `type_field` class property.
"""
type_field = "type"
type_field_remove = True
type_schemas = []
def get_obj_type(self, obj):
"""Returns name of object schema"""
return obj.__class__.__name__
def dump(self, obj, *, many=None, **kwargs):
errors = {}
result_data = []
result_errors = {}
many = self.many if many is None else bool(many)
if not many:
result = result_data = self._dump(obj, **kwargs)
else:
for idx, o in enumerate(obj):
try:
result = self._dump(o, **kwargs)
result_data.append(result)
except ValidationError as error:
result_errors[idx] = error.normalized_messages()
result_data.append(error.valid_data)
result = result_data
errors = result_errors
if not errors:
return result
else:
exc = ValidationError(errors, data=obj, valid_data=result)
raise exc
def _dump(self, obj, *, update_fields=True, **kwargs):
obj_type = self.get_obj_type(obj)
if not obj_type:
return (
None,
{"_schema": "Unknown object class: %s" % obj.__class__.__name__},
)
type_schema = self.type_schemas.get(obj_type)
if not type_schema:
return None, {"_schema": "Unsupported object type: %s" % obj_type}
schema = type_schema if isinstance(type_schema, Schema) else type_schema()
schema.context.update(getattr(self, "context", {}))
result = schema.dump(obj, many=False, **kwargs)
if result is not None:
result[self.type_field] = obj_type
return result
def load(self, data, *, many=None, partial=None, unknown=None, **kwargs):
errors = {}
result_data = []
result_errors = {}
many = self.many if many is None else bool(many)
if partial is None:
partial = self.partial
if not many:
try:
result = result_data = self._load(
data, partial=partial, unknown=unknown, **kwargs
)
# result_data.append(result)
except ValidationError as error:
result_errors = error.normalized_messages()
result_data.append(error.valid_data)
else:
for idx, item in enumerate(data):
try:
result = self._load(item, partial=partial, **kwargs)
result_data.append(result)
except ValidationError as error:
result_errors[idx] = error.normalized_messages()
result_data.append(error.valid_data)
result = result_data
errors = result_errors
if not errors:
return result
else:
exc = ValidationError(errors, data=data, valid_data=result)
raise exc
def _load(self, data, *, partial=None, unknown=None, **kwargs):
if not isinstance(data, dict):
raise ValidationError({"_schema": "Invalid data type: %s" % data})
data = dict(data)
unknown = unknown or self.unknown
data_type = data.get(self.type_field)
if self.type_field in data and self.type_field_remove:
data.pop(self.type_field)
if not data_type:
raise ValidationError(
{self.type_field: ["Missing data for required field."]}
)
try:
type_schema = self.type_schemas.get(data_type)
except TypeError:
# data_type could be unhashable
raise ValidationError({self.type_field: ["Invalid value: %s" % data_type]})
if not type_schema:
raise ValidationError(
{self.type_field: ["Unsupported value: %s" % data_type]}
)
schema = type_schema if isinstance(type_schema, Schema) else type_schema()
schema.context.update(getattr(self, "context", {}))
return schema.load(data, many=False, partial=partial, unknown=unknown, **kwargs)
def validate(self, data, *, many=None, partial=None):
try:
self.load(data, many=many, partial=partial)
except ValidationError as ve:
return ve.messages
return {}
| 36.208092 | 88 | 0.566252 | from marshmallow import Schema, ValidationError
class OneOfSchema(Schema):
type_field = "type"
type_field_remove = True
type_schemas = []
def get_obj_type(self, obj):
return obj.__class__.__name__
def dump(self, obj, *, many=None, **kwargs):
errors = {}
result_data = []
result_errors = {}
many = self.many if many is None else bool(many)
if not many:
result = result_data = self._dump(obj, **kwargs)
else:
for idx, o in enumerate(obj):
try:
result = self._dump(o, **kwargs)
result_data.append(result)
except ValidationError as error:
result_errors[idx] = error.normalized_messages()
result_data.append(error.valid_data)
result = result_data
errors = result_errors
if not errors:
return result
else:
exc = ValidationError(errors, data=obj, valid_data=result)
raise exc
def _dump(self, obj, *, update_fields=True, **kwargs):
obj_type = self.get_obj_type(obj)
if not obj_type:
return (
None,
{"_schema": "Unknown object class: %s" % obj.__class__.__name__},
)
type_schema = self.type_schemas.get(obj_type)
if not type_schema:
return None, {"_schema": "Unsupported object type: %s" % obj_type}
schema = type_schema if isinstance(type_schema, Schema) else type_schema()
schema.context.update(getattr(self, "context", {}))
result = schema.dump(obj, many=False, **kwargs)
if result is not None:
result[self.type_field] = obj_type
return result
def load(self, data, *, many=None, partial=None, unknown=None, **kwargs):
errors = {}
result_data = []
result_errors = {}
many = self.many if many is None else bool(many)
if partial is None:
partial = self.partial
if not many:
try:
result = result_data = self._load(
data, partial=partial, unknown=unknown, **kwargs
)
except ValidationError as error:
result_errors = error.normalized_messages()
result_data.append(error.valid_data)
else:
for idx, item in enumerate(data):
try:
result = self._load(item, partial=partial, **kwargs)
result_data.append(result)
except ValidationError as error:
result_errors[idx] = error.normalized_messages()
result_data.append(error.valid_data)
result = result_data
errors = result_errors
if not errors:
return result
else:
exc = ValidationError(errors, data=data, valid_data=result)
raise exc
def _load(self, data, *, partial=None, unknown=None, **kwargs):
if not isinstance(data, dict):
raise ValidationError({"_schema": "Invalid data type: %s" % data})
data = dict(data)
unknown = unknown or self.unknown
data_type = data.get(self.type_field)
if self.type_field in data and self.type_field_remove:
data.pop(self.type_field)
if not data_type:
raise ValidationError(
{self.type_field: ["Missing data for required field."]}
)
try:
type_schema = self.type_schemas.get(data_type)
except TypeError:
raise ValidationError({self.type_field: ["Invalid value: %s" % data_type]})
if not type_schema:
raise ValidationError(
{self.type_field: ["Unsupported value: %s" % data_type]}
)
schema = type_schema if isinstance(type_schema, Schema) else type_schema()
schema.context.update(getattr(self, "context", {}))
return schema.load(data, many=False, partial=partial, unknown=unknown, **kwargs)
def validate(self, data, *, many=None, partial=None):
try:
self.load(data, many=many, partial=partial)
except ValidationError as ve:
return ve.messages
return {}
| true | true |
1c467bde363eec1247fd8544df7541fa713b274f | 197 | py | Python | code/example1.py | ThomasHuke/knowPython | 0a9d3c224b36416b7eaa9d664e145bc0f2a63df2 | [
"MIT"
] | 4 | 2017-06-20T10:42:45.000Z | 2017-10-24T09:19:27.000Z | code/example1.py | ThomasHuke/knowPython | 0a9d3c224b36416b7eaa9d664e145bc0f2a63df2 | [
"MIT"
] | null | null | null | code/example1.py | ThomasHuke/knowPython | 0a9d3c224b36416b7eaa9d664e145bc0f2a63df2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
def file():
for x, y in [[1, 3], [1, 4]]:
print('x , y ', x, y)
file()
# x , y 1 3
# x , y 1 4
print([x + x for x in range(1, 12)])
| 14.071429 | 36 | 0.436548 |
def file():
for x, y in [[1, 3], [1, 4]]:
print('x , y ', x, y)
file()
print([x + x for x in range(1, 12)])
| true | true |
1c467e126304bcb3bd6860b417e50dc19d57f51d | 12,032 | py | Python | dpmModule/jobs/cannonshooter.py | Jeongwoo-KGI/maplestory_dpm_calc | c474419146e377a05a724e9975a047649b7effa7 | [
"MIT"
] | 2 | 2020-12-18T17:02:21.000Z | 2021-02-01T04:16:33.000Z | dpmModule/jobs/cannonshooter.py | Jeongwoo-KGI/maplestory_dpm_calc | c474419146e377a05a724e9975a047649b7effa7 | [
"MIT"
] | null | null | null | dpmModule/jobs/cannonshooter.py | Jeongwoo-KGI/maplestory_dpm_calc | c474419146e377a05a724e9975a047649b7effa7 | [
"MIT"
] | null | null | null | from ..kernel import core
from ..character import characterKernel as ck
from ..status.ability import Ability_tool
from ..execution.rules import RuleSet, ConditionRule
from . import globalSkill
from .jobbranch import pirates
from .jobclass import adventurer
from . import jobutils
from math import ceil
from typing import Any, Dict
class JobGenerator(ck.JobGenerator):
def __init__(self):
super(JobGenerator, self).__init__()
self.jobtype = "str"
self.jobname = "캐논슈터"
self.vEnhanceNum = 16
self.ability_list = Ability_tool.get_ability_set(
"boss_pdamage", "crit", "reuse"
)
self.preEmptiveSkills = 2
def get_ruleset(self):
def cannonball_rule(soul_contract):
if soul_contract.is_active():
return True
if soul_contract.is_cooltime_left(50000, -1):
return False
return True
ruleset = RuleSet()
ruleset.add_rule(
ConditionRule("빅 휴즈 기간틱 캐논볼", "소울 컨트랙트", cannonball_rule),
RuleSet.BASE,
)
return ruleset
def get_modifier_optimization_hint(self):
return core.CharacterModifier(pdamage=66, crit_damage=6, armor_ignore=30)
def get_passive_skill_list(
self, vEhc, chtr: ck.AbstractCharacter, options: Dict[str, Any]
):
passive_level = chtr.get_base_modifier().passive_level + self.combat
BuildupCannon = core.InformedCharacterModifier("빌드업 캐논", att=20)
CriticalFire = core.InformedCharacterModifier(
"크리티컬 파이어", crit=20, crit_damage=5
)
PirateTraining = core.InformedCharacterModifier(
"파이렛 트레이닝", stat_main=30, stat_sub=30
)
MonkeyWavePassive = core.InformedCharacterModifier("몽키 웨이브(패시브)", crit=20)
OakRuletPassive = core.InformedCharacterModifier(
"오크통 룰렛(패시브)", pdamage_indep=10
)
ReinforceCannon = core.InformedCharacterModifier("리인포스 캐논", att=40)
PirateSpirit = core.InformedCharacterModifier(
"파이렛 스피릿", boss_pdamage=40 + self.combat
)
OverburningCannon = core.InformedCharacterModifier(
"오버버닝 캐논",
pdamage_indep=30 + passive_level,
armor_ignore=20 + passive_level // 2,
)
LoadedDicePassive = pirates.LoadedDicePassiveWrapper(vEhc, 3, 4)
return [
BuildupCannon,
CriticalFire,
PirateTraining,
MonkeyWavePassive,
OakRuletPassive,
ReinforceCannon,
PirateSpirit,
OverburningCannon,
LoadedDicePassive,
]
def get_not_implied_skill_list(
self, vEhc, chtr: ck.AbstractCharacter, options: Dict[str, Any]
):
passive_level = chtr.get_base_modifier().passive_level + self.combat
WeaponConstant = core.InformedCharacterModifier("무기상수", pdamage_indep=50)
Mastery = core.InformedCharacterModifier(
"숙련도", pdamage_indep=-7.5 + 0.5 * ceil(passive_level / 2)
)
return [WeaponConstant, Mastery]
def generate(self, vEhc, chtr: ck.AbstractCharacter, options: Dict[str, Any]):
"""
하이퍼 : 몽키트윈스-스플릿, 인핸스, 캐논버스터 - 리인포스, 보너스 어택.
롤링캐논레인보우 26타
코코볼 6초
이씨밤 5타
코강 순서:
버스터-서포트-다수기-롤캐
"""
COCOBALLHIT = options.get("cocoball_hit", 27)
ICBMHIT = 6
passive_level = chtr.get_base_modifier().passive_level + self.combat
# Buff skills
Booster = core.BuffSkill("부스터", 0, 200 * 1000).wrap(core.BuffSkillWrapper)
Buckshot = core.BuffSkill("벅 샷", 0, 180000).wrap(core.BuffSkillWrapper)
LuckyDice = (
core.BuffSkill(
"로디드 다이스",
delay=0,
remain=180 * 1000,
pdamage=20 # 로디드 데미지 고정.
+ 10 / 6
+ 10 / 6 * (5 / 6 + 1 / 11) * (10 * (5 + passive_level) * 0.01),
)
.isV(vEhc, 3, 4)
.wrap(core.BuffSkillWrapper)
)
MonkeyWave = core.DamageSkill(
"몽키 웨이브",
delay=810,
damage=860,
hit=1,
cooltime=30 * 1000,
).wrap(core.DamageSkillWrapper)
MonkeyWaveBuff = core.BuffSkill(
"몽키 웨이브(버프)",
delay=0,
remain=30000,
cooltime=-1,
crit_damage=5,
).wrap(core.BuffSkillWrapper)
MonkeyFurious = core.DamageSkill(
"몽키 퓨리어스",
delay=720,
damage=180,
hit=3,
cooltime=30 * 1000,
).wrap(core.DamageSkillWrapper)
MonkeyFuriousBuff = core.BuffSkill(
"몽키 퓨리어스(버프)",
delay=0,
remain=30000,
cooltime=-1,
pdamage=40,
).wrap(core.BuffSkillWrapper)
MonkeyFuriousDot = core.DotSkill(
"몽키 퓨리어스(도트)",
summondelay=0,
delay=1000,
damage=200,
hit=1,
remain=30000,
cooltime=-1,
).wrap(core.DotSkillWrapper)
OakRoulette = core.BuffSkill(
"오크통 룰렛",
delay=840,
remain=180000,
rem=True,
cooltime=180000,
crit_damage=1.25,
).wrap(core.BuffSkillWrapper)
OakRuletDOT = core.DotSkill(
"오크통 룰렛(도트)",
summondelay=0,
delay=1000,
damage=50,
hit=1,
remain=5000,
cooltime=-1,
).wrap(core.DotSkillWrapper)
MonkeyMagic = core.BuffSkill(
"하이퍼 몽키 스펠",
delay=0,
remain=180000,
rem=True,
stat_main=60 + passive_level,
stat_sub=60 + passive_level,
).wrap(core.BuffSkillWrapper)
# Damage Skills
CannonBuster = (
core.DamageSkill(
"캐논 버스터",
delay=690,
damage=(750 + 5 * self.combat) * 0.45, # BuckShot
hit=3 * (4 + 1),
modifier=core.CharacterModifier(
crit=15 + ceil(self.combat / 2),
armor_ignore=20 + self.combat // 2,
pdamage=20,
),
)
.setV(vEhc, 0, 2, True)
.wrap(core.DamageSkillWrapper)
)
# Summon Skills
SupportMonkeyTwins = (
core.SummonSkill(
"서포트 몽키 트윈스",
summondelay=720,
delay=930,
damage=(295 + 8 * self.combat) * 0.6, # Split Damage
hit=(1 + 1) * (2 + 1), # Split Damage, Enhance
remain=60000 + 2000 * self.combat,
rem=True,
)
.setV(vEhc, 1, 2, False)
.wrap(core.SummonSkillWrapper)
)
# Hyper
RollingCannonRainbow = (
core.SummonSkill(
"롤링 캐논 레인보우",
summondelay=480,
delay=12000 / 26,
damage=600,
hit=3,
remain=12000,
cooltime=90000,
)
.setV(vEhc, 3, 2, True)
.wrap(core.SummonSkillWrapper)
)
EpicAdventure = core.BuffSkill(
"에픽 어드벤처",
delay=0,
remain=60000,
cooltime=120000,
pdamage=10,
).wrap(core.BuffSkillWrapper)
# V skills
WEAPON_ATT = jobutils.get_weapon_att(chtr)
Overdrive = pirates.OverdriveWrapper(vEhc, 5, 5, WEAPON_ATT)
PirateFlag = adventurer.PirateFlagWrapper(vEhc, 4, 3, chtr.level)
MirrorBreak, MirrorSpider = globalSkill.SpiderInMirrorBuilder(vEhc, 0, 0)
# 쿨타임마다 사용
# 허수아비 대상 27회 충돌
BFGCannonball = core.StackableSummonSkillWrapper(
core.SummonSkill(
"빅 휴즈 기간틱 캐논볼",
summondelay=600,
delay=210,
damage=(450 + 15 * vEhc.getV(0, 0)) * 0.45, # BuckShot
hit=4 * 3,
remain=210 * COCOBALLHIT,
cooltime=25000,
).isV(vEhc, 0, 0),
max_stack=3,
)
ICBM = (
core.DamageSkill(
"ICBM",
delay=1140,
damage=(800 + 32 * vEhc.getV(1, 1)) * 0.45, # BuckShot
hit=5 * ICBMHIT * 3,
cooltime=30000,
red=True,
)
.isV(vEhc, 1, 1)
.wrap(core.DamageSkillWrapper)
)
ICBMDOT = (
core.SummonSkill(
"ICBM(장판)",
summondelay=0,
delay=15000 / 27, # 27타
damage=(500 + 20 * vEhc.getV(1, 1)) * 0.45, # BuckShot
hit=1 * 3,
remain=15000,
cooltime=-1,
)
.isV(vEhc, 1, 1)
.wrap(core.SummonSkillWrapper)
)
SpecialMonkeyEscort_Cannon = (
core.SummonSkill(
"스페셜 몽키 에스코트",
summondelay=780,
delay=1500,
damage=300 + 12 * vEhc.getV(2, 2),
hit=4 * 3,
remain=(30 + vEhc.getV(2, 2) // 2) * 1000,
cooltime=120000,
red=True,
)
.isV(vEhc, 2, 2)
.wrap(core.SummonSkillWrapper)
)
SpecialMonkeyEscort_Bomb = (
core.SummonSkill(
"스페셜 몽키 에스코트(폭탄)",
summondelay=0,
delay=5000,
damage=450 + 18 * vEhc.getV(2, 2),
hit=7 * 3,
remain=(30 + vEhc.getV(2, 2) // 2) * 1000,
cooltime=-1,
modifier=core.CharacterModifier(armor_ignore=100),
)
.isV(vEhc, 2, 2)
.wrap(core.SummonSkillWrapper)
)
FullMaker = (
core.SummonSkill(
"풀 메이커",
summondelay=720,
delay=360,
damage=(700 + 28 * vEhc.getV(0, 0)) * 0.45, # BuckShot
hit=3 * 3,
remain=360 * 20 - 1,
cooltime=60000,
red=True,
)
.isV(vEhc, 0, 0)
.wrap(core.SummonSkillWrapper)
)
### build graph relationships
MonkeyWave.onAfter(MonkeyWaveBuff)
MonkeyFurious.onAfters([MonkeyFuriousBuff, MonkeyFuriousDot])
CannonBuster.onAfter(OakRuletDOT)
BFGCannonball.onAfter(OakRuletDOT)
ICBM.onAfter(OakRuletDOT)
ICBM.onAfter(ICBMDOT)
SpecialMonkeyEscort_Cannon.onJustAfter(SpecialMonkeyEscort_Bomb)
return (
CannonBuster,
[
globalSkill.maple_heros(chtr.level, combat_level=self.combat),
globalSkill.useful_sharp_eyes(),
globalSkill.useful_combat_orders(),
globalSkill.useful_wind_booster(),
Booster,
OakRoulette,
Buckshot,
MonkeyMagic,
LuckyDice,
globalSkill.MapleHeroes2Wrapper(vEhc, 0, 0, chtr.level, self.combat),
EpicAdventure,
Overdrive,
PirateFlag,
globalSkill.soul_contract(),
]
+ [
SpecialMonkeyEscort_Cannon,
BFGCannonball,
FullMaker,
RollingCannonRainbow,
SupportMonkeyTwins,
]
+ [MonkeyWave, MonkeyFurious, ICBM, MirrorBreak]
+ [
SpecialMonkeyEscort_Bomb,
MirrorSpider,
OakRuletDOT,
MonkeyFuriousDot,
MonkeyWaveBuff,
MonkeyFuriousBuff,
ICBMDOT,
] # Not used from scheduler
+ [CannonBuster],
)
| 31.170984 | 85 | 0.499668 | from ..kernel import core
from ..character import characterKernel as ck
from ..status.ability import Ability_tool
from ..execution.rules import RuleSet, ConditionRule
from . import globalSkill
from .jobbranch import pirates
from .jobclass import adventurer
from . import jobutils
from math import ceil
from typing import Any, Dict
class JobGenerator(ck.JobGenerator):
def __init__(self):
super(JobGenerator, self).__init__()
self.jobtype = "str"
self.jobname = "캐논슈터"
self.vEnhanceNum = 16
self.ability_list = Ability_tool.get_ability_set(
"boss_pdamage", "crit", "reuse"
)
self.preEmptiveSkills = 2
def get_ruleset(self):
def cannonball_rule(soul_contract):
if soul_contract.is_active():
return True
if soul_contract.is_cooltime_left(50000, -1):
return False
return True
ruleset = RuleSet()
ruleset.add_rule(
ConditionRule("빅 휴즈 기간틱 캐논볼", "소울 컨트랙트", cannonball_rule),
RuleSet.BASE,
)
return ruleset
def get_modifier_optimization_hint(self):
return core.CharacterModifier(pdamage=66, crit_damage=6, armor_ignore=30)
def get_passive_skill_list(
self, vEhc, chtr: ck.AbstractCharacter, options: Dict[str, Any]
):
passive_level = chtr.get_base_modifier().passive_level + self.combat
BuildupCannon = core.InformedCharacterModifier("빌드업 캐논", att=20)
CriticalFire = core.InformedCharacterModifier(
"크리티컬 파이어", crit=20, crit_damage=5
)
PirateTraining = core.InformedCharacterModifier(
"파이렛 트레이닝", stat_main=30, stat_sub=30
)
MonkeyWavePassive = core.InformedCharacterModifier("몽키 웨이브(패시브)", crit=20)
OakRuletPassive = core.InformedCharacterModifier(
"오크통 룰렛(패시브)", pdamage_indep=10
)
ReinforceCannon = core.InformedCharacterModifier("리인포스 캐논", att=40)
PirateSpirit = core.InformedCharacterModifier(
"파이렛 스피릿", boss_pdamage=40 + self.combat
)
OverburningCannon = core.InformedCharacterModifier(
"오버버닝 캐논",
pdamage_indep=30 + passive_level,
armor_ignore=20 + passive_level // 2,
)
LoadedDicePassive = pirates.LoadedDicePassiveWrapper(vEhc, 3, 4)
return [
BuildupCannon,
CriticalFire,
PirateTraining,
MonkeyWavePassive,
OakRuletPassive,
ReinforceCannon,
PirateSpirit,
OverburningCannon,
LoadedDicePassive,
]
def get_not_implied_skill_list(
self, vEhc, chtr: ck.AbstractCharacter, options: Dict[str, Any]
):
passive_level = chtr.get_base_modifier().passive_level + self.combat
WeaponConstant = core.InformedCharacterModifier("무기상수", pdamage_indep=50)
Mastery = core.InformedCharacterModifier(
"숙련도", pdamage_indep=-7.5 + 0.5 * ceil(passive_level / 2)
)
return [WeaponConstant, Mastery]
def generate(self, vEhc, chtr: ck.AbstractCharacter, options: Dict[str, Any]):
COCOBALLHIT = options.get("cocoball_hit", 27)
ICBMHIT = 6
passive_level = chtr.get_base_modifier().passive_level + self.combat
Booster = core.BuffSkill("부스터", 0, 200 * 1000).wrap(core.BuffSkillWrapper)
Buckshot = core.BuffSkill("벅 샷", 0, 180000).wrap(core.BuffSkillWrapper)
LuckyDice = (
core.BuffSkill(
"로디드 다이스",
delay=0,
remain=180 * 1000,
pdamage=20
+ 10 / 6
+ 10 / 6 * (5 / 6 + 1 / 11) * (10 * (5 + passive_level) * 0.01),
)
.isV(vEhc, 3, 4)
.wrap(core.BuffSkillWrapper)
)
MonkeyWave = core.DamageSkill(
"몽키 웨이브",
delay=810,
damage=860,
hit=1,
cooltime=30 * 1000,
).wrap(core.DamageSkillWrapper)
MonkeyWaveBuff = core.BuffSkill(
"몽키 웨이브(버프)",
delay=0,
remain=30000,
cooltime=-1,
crit_damage=5,
).wrap(core.BuffSkillWrapper)
MonkeyFurious = core.DamageSkill(
"몽키 퓨리어스",
delay=720,
damage=180,
hit=3,
cooltime=30 * 1000,
).wrap(core.DamageSkillWrapper)
MonkeyFuriousBuff = core.BuffSkill(
"몽키 퓨리어스(버프)",
delay=0,
remain=30000,
cooltime=-1,
pdamage=40,
).wrap(core.BuffSkillWrapper)
MonkeyFuriousDot = core.DotSkill(
"몽키 퓨리어스(도트)",
summondelay=0,
delay=1000,
damage=200,
hit=1,
remain=30000,
cooltime=-1,
).wrap(core.DotSkillWrapper)
OakRoulette = core.BuffSkill(
"오크통 룰렛",
delay=840,
remain=180000,
rem=True,
cooltime=180000,
crit_damage=1.25,
).wrap(core.BuffSkillWrapper)
OakRuletDOT = core.DotSkill(
"오크통 룰렛(도트)",
summondelay=0,
delay=1000,
damage=50,
hit=1,
remain=5000,
cooltime=-1,
).wrap(core.DotSkillWrapper)
MonkeyMagic = core.BuffSkill(
"하이퍼 몽키 스펠",
delay=0,
remain=180000,
rem=True,
stat_main=60 + passive_level,
stat_sub=60 + passive_level,
).wrap(core.BuffSkillWrapper)
CannonBuster = (
core.DamageSkill(
"캐논 버스터",
delay=690,
damage=(750 + 5 * self.combat) * 0.45,
hit=3 * (4 + 1),
modifier=core.CharacterModifier(
crit=15 + ceil(self.combat / 2),
armor_ignore=20 + self.combat // 2,
pdamage=20,
),
)
.setV(vEhc, 0, 2, True)
.wrap(core.DamageSkillWrapper)
)
SupportMonkeyTwins = (
core.SummonSkill(
"서포트 몽키 트윈스",
summondelay=720,
delay=930,
damage=(295 + 8 * self.combat) * 0.6,
hit=(1 + 1) * (2 + 1),
remain=60000 + 2000 * self.combat,
rem=True,
)
.setV(vEhc, 1, 2, False)
.wrap(core.SummonSkillWrapper)
)
RollingCannonRainbow = (
core.SummonSkill(
"롤링 캐논 레인보우",
summondelay=480,
delay=12000 / 26,
damage=600,
hit=3,
remain=12000,
cooltime=90000,
)
.setV(vEhc, 3, 2, True)
.wrap(core.SummonSkillWrapper)
)
EpicAdventure = core.BuffSkill(
"에픽 어드벤처",
delay=0,
remain=60000,
cooltime=120000,
pdamage=10,
).wrap(core.BuffSkillWrapper)
WEAPON_ATT = jobutils.get_weapon_att(chtr)
Overdrive = pirates.OverdriveWrapper(vEhc, 5, 5, WEAPON_ATT)
PirateFlag = adventurer.PirateFlagWrapper(vEhc, 4, 3, chtr.level)
MirrorBreak, MirrorSpider = globalSkill.SpiderInMirrorBuilder(vEhc, 0, 0)
BFGCannonball = core.StackableSummonSkillWrapper(
core.SummonSkill(
"빅 휴즈 기간틱 캐논볼",
summondelay=600,
delay=210,
damage=(450 + 15 * vEhc.getV(0, 0)) * 0.45,
hit=4 * 3,
remain=210 * COCOBALLHIT,
cooltime=25000,
).isV(vEhc, 0, 0),
max_stack=3,
)
ICBM = (
core.DamageSkill(
"ICBM",
delay=1140,
damage=(800 + 32 * vEhc.getV(1, 1)) * 0.45,
hit=5 * ICBMHIT * 3,
cooltime=30000,
red=True,
)
.isV(vEhc, 1, 1)
.wrap(core.DamageSkillWrapper)
)
ICBMDOT = (
core.SummonSkill(
"ICBM(장판)",
summondelay=0,
delay=15000 / 27,
damage=(500 + 20 * vEhc.getV(1, 1)) * 0.45,
hit=1 * 3,
remain=15000,
cooltime=-1,
)
.isV(vEhc, 1, 1)
.wrap(core.SummonSkillWrapper)
)
SpecialMonkeyEscort_Cannon = (
core.SummonSkill(
"스페셜 몽키 에스코트",
summondelay=780,
delay=1500,
damage=300 + 12 * vEhc.getV(2, 2),
hit=4 * 3,
remain=(30 + vEhc.getV(2, 2) // 2) * 1000,
cooltime=120000,
red=True,
)
.isV(vEhc, 2, 2)
.wrap(core.SummonSkillWrapper)
)
SpecialMonkeyEscort_Bomb = (
core.SummonSkill(
"스페셜 몽키 에스코트(폭탄)",
summondelay=0,
delay=5000,
damage=450 + 18 * vEhc.getV(2, 2),
hit=7 * 3,
remain=(30 + vEhc.getV(2, 2) // 2) * 1000,
cooltime=-1,
modifier=core.CharacterModifier(armor_ignore=100),
)
.isV(vEhc, 2, 2)
.wrap(core.SummonSkillWrapper)
)
FullMaker = (
core.SummonSkill(
"풀 메이커",
summondelay=720,
delay=360,
damage=(700 + 28 * vEhc.getV(0, 0)) * 0.45,
hit=3 * 3,
remain=360 * 20 - 1,
cooltime=60000,
red=True,
)
.isV(vEhc, 0, 0)
.wrap(core.SummonSkillWrapper)
)
keyFurious.onAfters([MonkeyFuriousBuff, MonkeyFuriousDot])
CannonBuster.onAfter(OakRuletDOT)
BFGCannonball.onAfter(OakRuletDOT)
ICBM.onAfter(OakRuletDOT)
ICBM.onAfter(ICBMDOT)
SpecialMonkeyEscort_Cannon.onJustAfter(SpecialMonkeyEscort_Bomb)
return (
CannonBuster,
[
globalSkill.maple_heros(chtr.level, combat_level=self.combat),
globalSkill.useful_sharp_eyes(),
globalSkill.useful_combat_orders(),
globalSkill.useful_wind_booster(),
Booster,
OakRoulette,
Buckshot,
MonkeyMagic,
LuckyDice,
globalSkill.MapleHeroes2Wrapper(vEhc, 0, 0, chtr.level, self.combat),
EpicAdventure,
Overdrive,
PirateFlag,
globalSkill.soul_contract(),
]
+ [
SpecialMonkeyEscort_Cannon,
BFGCannonball,
FullMaker,
RollingCannonRainbow,
SupportMonkeyTwins,
]
+ [MonkeyWave, MonkeyFurious, ICBM, MirrorBreak]
+ [
SpecialMonkeyEscort_Bomb,
MirrorSpider,
OakRuletDOT,
MonkeyFuriousDot,
MonkeyWaveBuff,
MonkeyFuriousBuff,
ICBMDOT,
]
+ [CannonBuster],
)
| true | true |
1c4680a5dd30a51494fef98f8426f5bccd4e47a7 | 1,264 | py | Python | frontend/tool/a.py | yinzhangyue/PDF_tool | ff1c689478e0d40370724ad88da78ef8bd0bf3d1 | [
"MIT"
] | 3 | 2021-12-07T06:19:12.000Z | 2022-03-30T13:45:34.000Z | frontend/tool/a.py | yinzhangyue/PDF_tool | ff1c689478e0d40370724ad88da78ef8bd0bf3d1 | [
"MIT"
] | null | null | null | frontend/tool/a.py | yinzhangyue/PDF_tool | ff1c689478e0d40370724ad88da78ef8bd0bf3d1 | [
"MIT"
] | 2 | 2022-02-27T16:15:05.000Z | 2022-03-19T07:35:38.000Z | # -*- coding: utf-8 -*-
import numpy as np
import cv2.cv2 as cv2
from numpy import float32
if __name__ == "__main__":
# Read image
img = cv2.imread("./c73.png")
a = np.array([[
1458.4429931640625, 145.316650390625, 1554.5313720703125,
176.924560546875, 1
]],
dtype=float32)
b = np.array([[
1734.0457763671875, 191.89208984375, 1829.681640625, 222.283935546875,
1
]],
dtype=float32)
# Draw rectangle
j = 0
for i in a:
if i[4] > 0.85:
cv2.rectangle(img, (int(i[0]), int(i[1])), (int(i[2]), int(i[3])),
(50, 205, 50), 4)
# cut = img[int(i[0]):int(i[2]), int(i[1]):int(i[3])]
# cv2.imwrite('./pic/' + str(j) + '.png', cut)
# j += 1
for i in b:
if i[4] > 0.85:
cv2.rectangle(img, (int(i[0]), int(i[1])), (int(i[2]), int(i[3])),
(254, 67, 101), 4)
# Display cropped image
width = int(img.shape[1] / 4)
height = int(img.shape[0] / 4)
dim = (width, height)
# resize image
resized = cv2.resize(img, dim)
# save the image
cv2.imshow("Image", resized)
cv2.waitKey(0)
cv2.imwrite('./c73_.png', img)
| 28.727273 | 78 | 0.492089 |
import numpy as np
import cv2.cv2 as cv2
from numpy import float32
if __name__ == "__main__":
img = cv2.imread("./c73.png")
a = np.array([[
1458.4429931640625, 145.316650390625, 1554.5313720703125,
176.924560546875, 1
]],
dtype=float32)
b = np.array([[
1734.0457763671875, 191.89208984375, 1829.681640625, 222.283935546875,
1
]],
dtype=float32)
j = 0
for i in a:
if i[4] > 0.85:
cv2.rectangle(img, (int(i[0]), int(i[1])), (int(i[2]), int(i[3])),
(50, 205, 50), 4)
for i in b:
if i[4] > 0.85:
cv2.rectangle(img, (int(i[0]), int(i[1])), (int(i[2]), int(i[3])),
(254, 67, 101), 4)
width = int(img.shape[1] / 4)
height = int(img.shape[0] / 4)
dim = (width, height)
resized = cv2.resize(img, dim)
cv2.imshow("Image", resized)
cv2.waitKey(0)
cv2.imwrite('./c73_.png', img)
| true | true |
1c4680c58421b1abc945fead1a676d7387f98adb | 3,191 | py | Python | tests/test_roi_pooling.py | scilicet64/keras-spp | 23da20561fe92c585208af9bf3e0ef8f51bc5dcc | [
"MIT"
] | null | null | null | tests/test_roi_pooling.py | scilicet64/keras-spp | 23da20561fe92c585208af9bf3e0ef8f51bc5dcc | [
"MIT"
] | null | null | null | tests/test_roi_pooling.py | scilicet64/keras-spp | 23da20561fe92c585208af9bf3e0ef8f51bc5dcc | [
"MIT"
] | null | null | null | import keras.backend as K
import numpy as np
from keras.layers import Input
from keras.models import Model
from spp.RoiPooling import RoiPooling
dim_ordering = K.image_data_format()
assert dim_ordering in {'channels_last','channels_first'}, 'dim_ordering must be in {channels_last,channels_first}'
pooling_regions = [1, 2, 4]
num_rois = 2
num_channels = 3
if dim_ordering == 'channels_last':
in_img = Input(shape=(None, None, num_channels))
elif dim_ordering == 'channels_first':
in_img = Input(shape=(num_channels, None, None))
in_roi = Input(shape=(num_rois, 4))
out_roi_pool = RoiPooling(pooling_regions, num_rois)([in_img, in_roi])
model = Model([in_img, in_roi], out_roi_pool)
model.summary()
model.compile(loss='mse', optimizer='sgd')
for img_size in [8, 16, 32]:
if dim_ordering == 'channels_first':
X_img = np.random.rand(1, num_channels, img_size, img_size)
row_length = [float(X_img.shape[2]) / i for i in pooling_regions]
col_length = [float(X_img.shape[3]) / i for i in pooling_regions]
elif dim_ordering == 'channels_last':
X_img = np.random.rand(1, img_size, img_size, num_channels)
row_length = [float(X_img.shape[1]) / i for i in pooling_regions]
col_length = [float(X_img.shape[2]) / i for i in pooling_regions]
X_roi = np.array([[0, 0, img_size / 1, img_size / 1],
[0, 0, img_size / 2, img_size / 2]])
X_roi = np.reshape(X_roi, (1, num_rois, 4)).astype(int)
Y = model.predict([X_img, X_roi])
for roi in range(num_rois):
if dim_ordering == 'channels_first':
X_curr = X_img[0, :, X_roi[0, roi, 0]:X_roi[0, roi, 2], X_roi[0, roi, 1]:X_roi[0, roi, 3]]
row_length = [float(X_curr.shape[1]) / i for i in pooling_regions]
col_length = [float(X_curr.shape[2]) / i for i in pooling_regions]
elif dim_ordering == 'channels_last':
X_curr = X_img[0, X_roi[0, roi, 0]:X_roi[0, roi, 2], X_roi[0, roi, 1]:X_roi[0, roi, 3], :]
row_length = [float(X_curr.shape[0]) / i for i in pooling_regions]
col_length = [float(X_curr.shape[1]) / i for i in pooling_regions]
idx = 0
for pool_num, num_pool_regions in enumerate(pooling_regions):
for ix in range(num_pool_regions):
for jy in range(num_pool_regions):
for cn in range(num_channels):
x1 = int(round(ix * col_length[pool_num]))
x2 = int(round(ix * col_length[pool_num] + col_length[pool_num]))
y1 = int(round(jy * row_length[pool_num]))
y2 = int(round(jy * row_length[pool_num] + row_length[pool_num]))
if dim_ordering == 'channels_first':
m_val = np.max(X_curr[cn, y1:y2, x1:x2])
elif dim_ordering == 'channels_last':
m_val = np.max(X_curr[y1:y2, x1:x2, cn])
np.testing.assert_almost_equal(
m_val, Y[0, roi, idx], decimal=6)
idx += 1
print('Passed roi pooling test') | 40.392405 | 115 | 0.596365 | import keras.backend as K
import numpy as np
from keras.layers import Input
from keras.models import Model
from spp.RoiPooling import RoiPooling
dim_ordering = K.image_data_format()
assert dim_ordering in {'channels_last','channels_first'}, 'dim_ordering must be in {channels_last,channels_first}'
pooling_regions = [1, 2, 4]
num_rois = 2
num_channels = 3
if dim_ordering == 'channels_last':
in_img = Input(shape=(None, None, num_channels))
elif dim_ordering == 'channels_first':
in_img = Input(shape=(num_channels, None, None))
in_roi = Input(shape=(num_rois, 4))
out_roi_pool = RoiPooling(pooling_regions, num_rois)([in_img, in_roi])
model = Model([in_img, in_roi], out_roi_pool)
model.summary()
model.compile(loss='mse', optimizer='sgd')
for img_size in [8, 16, 32]:
if dim_ordering == 'channels_first':
X_img = np.random.rand(1, num_channels, img_size, img_size)
row_length = [float(X_img.shape[2]) / i for i in pooling_regions]
col_length = [float(X_img.shape[3]) / i for i in pooling_regions]
elif dim_ordering == 'channels_last':
X_img = np.random.rand(1, img_size, img_size, num_channels)
row_length = [float(X_img.shape[1]) / i for i in pooling_regions]
col_length = [float(X_img.shape[2]) / i for i in pooling_regions]
X_roi = np.array([[0, 0, img_size / 1, img_size / 1],
[0, 0, img_size / 2, img_size / 2]])
X_roi = np.reshape(X_roi, (1, num_rois, 4)).astype(int)
Y = model.predict([X_img, X_roi])
for roi in range(num_rois):
if dim_ordering == 'channels_first':
X_curr = X_img[0, :, X_roi[0, roi, 0]:X_roi[0, roi, 2], X_roi[0, roi, 1]:X_roi[0, roi, 3]]
row_length = [float(X_curr.shape[1]) / i for i in pooling_regions]
col_length = [float(X_curr.shape[2]) / i for i in pooling_regions]
elif dim_ordering == 'channels_last':
X_curr = X_img[0, X_roi[0, roi, 0]:X_roi[0, roi, 2], X_roi[0, roi, 1]:X_roi[0, roi, 3], :]
row_length = [float(X_curr.shape[0]) / i for i in pooling_regions]
col_length = [float(X_curr.shape[1]) / i for i in pooling_regions]
idx = 0
for pool_num, num_pool_regions in enumerate(pooling_regions):
for ix in range(num_pool_regions):
for jy in range(num_pool_regions):
for cn in range(num_channels):
x1 = int(round(ix * col_length[pool_num]))
x2 = int(round(ix * col_length[pool_num] + col_length[pool_num]))
y1 = int(round(jy * row_length[pool_num]))
y2 = int(round(jy * row_length[pool_num] + row_length[pool_num]))
if dim_ordering == 'channels_first':
m_val = np.max(X_curr[cn, y1:y2, x1:x2])
elif dim_ordering == 'channels_last':
m_val = np.max(X_curr[y1:y2, x1:x2, cn])
np.testing.assert_almost_equal(
m_val, Y[0, roi, idx], decimal=6)
idx += 1
print('Passed roi pooling test') | true | true |
1c46810f9df9a321d810630eb9522ae322eae9a2 | 2,102 | py | Python | tests/testproject/testapp/models.py | allenling/django-easy-fixtures | cf6e0abff83565e5bf106e388922a31feb288ee9 | [
"MIT"
] | 2 | 2016-09-19T12:53:44.000Z | 2016-09-25T05:14:15.000Z | tests/testproject/testapp/models.py | allenling/django-easy-fixture | cf6e0abff83565e5bf106e388922a31feb288ee9 | [
"MIT"
] | null | null | null | tests/testproject/testapp/models.py | allenling/django-easy-fixture | cf6e0abff83565e5bf106e388922a31feb288ee9 | [
"MIT"
] | null | null | null | from django.db import models
class TestAbstractModel(models.Model):
postive_integer = models.PositiveIntegerField()
postive_small_integer = models.PositiveSmallIntegerField()
file_path_field = models.FilePathField()
float_field = models.FloatField()
ip = models.GenericIPAddressField()
slug_field = models.SlugField()
small_integer = models.SmallIntegerField()
text_field = models.TextField()
time_field = models.TimeField()
biginteger_field = models.BigIntegerField()
boolean_field = models.BooleanField()
non_boolean_field = models.NullBooleanField()
# decimal_field = models.DecimalField()
duration_field = models.DurationField()
email_field = models.EmailField()
char_field = models.CharField(max_length=20)
integer_field = models.IntegerField()
dete_field = models.DateField()
datetime_field = models.DateTimeField()
url = models.URLField()
bin = models.BinaryField()
uuid = models.UUIDField()
default_field = models.CharField(max_length=20, default='')
unique_field = models.CharField(max_length=20)
unique_together_field_a = models.CharField(max_length=20)
unique_together_field_b = models.CharField(max_length=20)
class Meta:
abstract = True
class OtherModel(TestAbstractModel):
pass
class FixtureForeignModel(TestAbstractModel):
foreign_field = models.ForeignKey(OtherModel, on_delete=models.CASCADE)
class Meta(object):
unique_together = (('char_field', ), ('unique_together_field_a', 'unique_together_field_b'))
class FixtureManyToManyModel(TestAbstractModel):
class Meta(object):
unique_together = (('float_field', ), ('integer_field', ), ('unique_together_field_a', 'unique_together_field_b'))
class FixtureModel(TestAbstractModel):
foreign_field = models.ForeignKey(FixtureForeignModel, on_delete=models.CASCADE)
many_to_many = models.ManyToManyField(FixtureManyToManyModel)
class Meta(object):
unique_together = (('char_field', 'foreign_field'), ('unique_together_field_a', 'unique_together_field_b'))
| 33.903226 | 122 | 0.744053 | from django.db import models
class TestAbstractModel(models.Model):
postive_integer = models.PositiveIntegerField()
postive_small_integer = models.PositiveSmallIntegerField()
file_path_field = models.FilePathField()
float_field = models.FloatField()
ip = models.GenericIPAddressField()
slug_field = models.SlugField()
small_integer = models.SmallIntegerField()
text_field = models.TextField()
time_field = models.TimeField()
biginteger_field = models.BigIntegerField()
boolean_field = models.BooleanField()
non_boolean_field = models.NullBooleanField()
duration_field = models.DurationField()
email_field = models.EmailField()
char_field = models.CharField(max_length=20)
integer_field = models.IntegerField()
dete_field = models.DateField()
datetime_field = models.DateTimeField()
url = models.URLField()
bin = models.BinaryField()
uuid = models.UUIDField()
default_field = models.CharField(max_length=20, default='')
unique_field = models.CharField(max_length=20)
unique_together_field_a = models.CharField(max_length=20)
unique_together_field_b = models.CharField(max_length=20)
class Meta:
abstract = True
class OtherModel(TestAbstractModel):
pass
class FixtureForeignModel(TestAbstractModel):
foreign_field = models.ForeignKey(OtherModel, on_delete=models.CASCADE)
class Meta(object):
unique_together = (('char_field', ), ('unique_together_field_a', 'unique_together_field_b'))
class FixtureManyToManyModel(TestAbstractModel):
class Meta(object):
unique_together = (('float_field', ), ('integer_field', ), ('unique_together_field_a', 'unique_together_field_b'))
class FixtureModel(TestAbstractModel):
foreign_field = models.ForeignKey(FixtureForeignModel, on_delete=models.CASCADE)
many_to_many = models.ManyToManyField(FixtureManyToManyModel)
class Meta(object):
unique_together = (('char_field', 'foreign_field'), ('unique_together_field_a', 'unique_together_field_b'))
| true | true |
1c4681245bdc3eedb5787d6a299414db6b6e6a58 | 4,237 | py | Python | ch05/timsort.py | laszlokiraly/LearningAlgorithms | 032a3cc409546619cf41220821d081cde54bbcce | [
"MIT"
] | 74 | 2021-05-06T22:03:18.000Z | 2022-03-25T04:37:51.000Z | ch05/timsort.py | laszlokiraly/LearningAlgorithms | 032a3cc409546619cf41220821d081cde54bbcce | [
"MIT"
] | null | null | null | ch05/timsort.py | laszlokiraly/LearningAlgorithms | 032a3cc409546619cf41220821d081cde54bbcce | [
"MIT"
] | 19 | 2021-07-16T11:42:00.000Z | 2022-03-22T00:25:49.000Z | """
Simplistic non-optimized, native Python implementation showing the mechanics
of TimSort.
This code is designed to show how TimSort uses Insertion Sort and Merge Sort
as its constituent building blocks. It is not the actual sorting algorithm,
because of extra complexities that optimize this base algorithm even further.
Full details on the sorting algorithm are in the actual CPython code base,
but Tim Peters has provided documentation explaining reasons behind many
of the choices in Tim Sort.
https://hg.python.org/cpython/file/tip/Objects/listsort.txt
"""
import timeit
from algs.table import DataTable
def merge(A, lo, mid, hi, aux):
"""Merge two (consecutive) runs together."""
aux[lo:hi+1] = A[lo:hi+1]
left = lo
right = mid + 1
for i in range(lo, hi+1):
if left > mid:
A[i] = aux[right]
right += 1
elif right > hi:
A[i] = aux[left]
left += 1
elif aux[right] < aux[left]:
A[i] = aux[right]
right += 1
else:
A[i] = aux[left]
left += 1
# https://hg.python.org/cpython/file/tip/Objects/listsort.txt
# Instead we pick a minrun in range(32, 65) such that N/minrun is exactly a
# power of 2, or if that isn't possible, is close to, but strictly less than,
# a power of 2. This is easier to do than it may sound: take the first 6
# bits of N, and add 1 if any of the remaining bits are set. In fact, that
# rule covers every case in this section, including small N and exact powers
# of 2; merge_compute_minrun() is a deceptively simple function.
def compute_min_run(n):
"""Compute min_run to use when sorting n total values."""
# Used to add 1 if any remaining bits are set
r = 0
while n >= 64:
r |= n & 1
n >>= 1
return n + r
def insertion_sort(A, lo, hi):
"""Sort A[lo .. hi] using Insertion Sort. Stable sort demands Ai <= Aj. """
for i in range(lo+1,hi+1):
for j in range(i,lo,-1):
if A[j-1] <= A[j]:
break
A[j],A[j-1] = A[j-1],A[j]
def tim_sort(A):
"""Apply simplistic Tim Sort implementation on A."""
# Small arrays are sorted using insertion sort
N = len(A)
if N < 64:
insertion_sort(A,0,N-1)
return
# Insertion sort in strips of 'size'
size = compute_min_run(N)
for lo in range(0, N, size):
insertion_sort(A, lo, min(lo+size-1, N-1))
aux = [None]*N
while size < N:
# Merge all doubled ranges, taking care with last one
for lo in range(0, N, 2*size):
mid = min(lo + size - 1, N-1)
hi = min(lo + 2*size - 1, N-1)
merge(A, lo, mid, hi, aux)
size = 2 * size
def timing_nlogn_sorting_real_world(max_k=18, output=True):
"""
Confirm N Log N performance of Merge Sort, Heap Sort and Python's built-in sort
for n in 2**k for k up to (but not including) max_k=18.
Represents real-world case where Tim Sort shines, namely, where you are
adding random data to an already sorted set.
"""
# Build model
tbl = DataTable([12,10,10,10,10],['N','MergeSort', 'Quicksort', 'TimSort', 'PythonSort'],
output=output)
for n in [2**k for k in range(8, max_k)]:
t_ms = min(timeit.repeat(stmt='merge_sort(A)', setup='''
import random
from ch05.merge import merge_sort
A=list(range(int({0}*.8)))
B=list(range({0}-len(A)))
random.shuffle(B)
A.extend(B)'''.format(n), repeat=10, number=1))
t_qs = min(timeit.repeat(stmt='quick_sort(A)', setup='''
import random
from ch05.sorting import quick_sort
A=list(range(int({0}*.8)))
B=list(range({0}-len(A)))
random.shuffle(B)
A.extend(B)'''.format(n), repeat=10, number=1))
t_ps = min(timeit.repeat(stmt='A.sort()', setup='''
import random
A=list(range(int({0}*.8)))
B=list(range({0}-len(A)))
random.shuffle(B)
A.extend(B)'''.format(n), repeat=10, number=1))
t_ts = min(timeit.repeat(stmt='tim_sort(A)', setup='''
import random
from ch05.timsort import tim_sort
A=list(range(int({0}*.8)))
B=list(range({0}-len(A)))
random.shuffle(B)
A.extend(B)'''.format(n), repeat=10, number=1))
tbl.row([n, t_ms, t_qs, t_ts, t_ps])
return tbl
| 32.098485 | 93 | 0.621194 | import timeit
from algs.table import DataTable
def merge(A, lo, mid, hi, aux):
aux[lo:hi+1] = A[lo:hi+1]
left = lo
right = mid + 1
for i in range(lo, hi+1):
if left > mid:
A[i] = aux[right]
right += 1
elif right > hi:
A[i] = aux[left]
left += 1
elif aux[right] < aux[left]:
A[i] = aux[right]
right += 1
else:
A[i] = aux[left]
left += 1
# a power of 2. This is easier to do than it may sound: take the first 6
# bits of N, and add 1 if any of the remaining bits are set. In fact, that
# rule covers every case in this section, including small N and exact powers
# of 2; merge_compute_minrun() is a deceptively simple function.
def compute_min_run(n):
# Used to add 1 if any remaining bits are set
r = 0
while n >= 64:
r |= n & 1
n >>= 1
return n + r
def insertion_sort(A, lo, hi):
for i in range(lo+1,hi+1):
for j in range(i,lo,-1):
if A[j-1] <= A[j]:
break
A[j],A[j-1] = A[j-1],A[j]
def tim_sort(A):
# Small arrays are sorted using insertion sort
N = len(A)
if N < 64:
insertion_sort(A,0,N-1)
return
# Insertion sort in strips of 'size'
size = compute_min_run(N)
for lo in range(0, N, size):
insertion_sort(A, lo, min(lo+size-1, N-1))
aux = [None]*N
while size < N:
# Merge all doubled ranges, taking care with last one
for lo in range(0, N, 2*size):
mid = min(lo + size - 1, N-1)
hi = min(lo + 2*size - 1, N-1)
merge(A, lo, mid, hi, aux)
size = 2 * size
def timing_nlogn_sorting_real_world(max_k=18, output=True):
# Build model
tbl = DataTable([12,10,10,10,10],['N','MergeSort', 'Quicksort', 'TimSort', 'PythonSort'],
output=output)
for n in [2**k for k in range(8, max_k)]:
t_ms = min(timeit.repeat(stmt='merge_sort(A)', setup='''
import random
from ch05.merge import merge_sort
A=list(range(int({0}*.8)))
B=list(range({0}-len(A)))
random.shuffle(B)
A.extend(B)'''.format(n), repeat=10, number=1))
t_qs = min(timeit.repeat(stmt='quick_sort(A)', setup='''
import random
from ch05.sorting import quick_sort
A=list(range(int({0}*.8)))
B=list(range({0}-len(A)))
random.shuffle(B)
A.extend(B)'''.format(n), repeat=10, number=1))
t_ps = min(timeit.repeat(stmt='A.sort()', setup='''
import random
A=list(range(int({0}*.8)))
B=list(range({0}-len(A)))
random.shuffle(B)
A.extend(B)'''.format(n), repeat=10, number=1))
t_ts = min(timeit.repeat(stmt='tim_sort(A)', setup='''
import random
from ch05.timsort import tim_sort
A=list(range(int({0}*.8)))
B=list(range({0}-len(A)))
random.shuffle(B)
A.extend(B)'''.format(n), repeat=10, number=1))
tbl.row([n, t_ms, t_qs, t_ts, t_ps])
return tbl
| true | true |
1c4681db212aeac98d706be2f3405b80e5366d54 | 8,460 | py | Python | examples/create_freight_shipment.py | shivam1111/python-fedex | 597a6c1afa233a62dd03667c515372227ae16431 | [
"BSD-3-Clause"
] | null | null | null | examples/create_freight_shipment.py | shivam1111/python-fedex | 597a6c1afa233a62dd03667c515372227ae16431 | [
"BSD-3-Clause"
] | null | null | null | examples/create_freight_shipment.py | shivam1111/python-fedex | 597a6c1afa233a62dd03667c515372227ae16431 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""
This example shows how to create shipments. The variables populated below
represents the minimum required values. You will need to fill all of these, or
risk seeing a SchemaValidationError exception thrown.
Near the bottom of the module, you'll see some different ways to handle the
label data that is returned with the reply.
"""
import logging
import binascii
from example_config import CONFIG_OBJ
from fedex.services.ship_service import FedexProcessShipmentRequest
# Set this to the INFO level to see the response from Fedex printed in stdout.
#logging.basicConfig(filename="suds.log", level=logging.DEBUG)
logging.basicConfig(level=logging.INFO)
# This is the object that will be handling our tracking request.
# We're using the FedexConfig object from example_config.py in this dir.
shipment = FedexProcessShipmentRequest(CONFIG_OBJ)
shipment.RequestedShipment.DropoffType = 'REGULAR_PICKUP'
shipment.RequestedShipment.ServiceType = 'FEDEX_FREIGHT_ECONOMY'
shipment.RequestedShipment.PackagingType = 'YOUR_PACKAGING'
shipment.RequestedShipment.FreightShipmentDetail.FedExFreightAccountNumber = CONFIG_OBJ.freight_account_number
# Shipper contact info.
shipment.RequestedShipment.Shipper.Contact.PersonName = 'Sender Name'
shipment.RequestedShipment.Shipper.Contact.CompanyName = 'Some Company'
shipment.RequestedShipment.Shipper.Contact.PhoneNumber = '9012638716'
# Shipper address.
shipment.RequestedShipment.Shipper.Address.StreetLines = ['1202 Chalet Ln']
shipment.RequestedShipment.Shipper.Address.City = 'Harrison'
shipment.RequestedShipment.Shipper.Address.StateOrProvinceCode = 'AR'
shipment.RequestedShipment.Shipper.Address.PostalCode = '72601'
shipment.RequestedShipment.Shipper.Address.CountryCode = 'US'
shipment.RequestedShipment.Shipper.Address.Residential = True
# Recipient contact info.
shipment.RequestedShipment.Recipient.Contact.PersonName = 'Recipient Name'
shipment.RequestedShipment.Recipient.Contact.CompanyName = 'Recipient Company'
shipment.RequestedShipment.Recipient.Contact.PhoneNumber = '9012637906'
# Recipient address
shipment.RequestedShipment.Recipient.Address.StreetLines = ['2000 Freight LTL Testing']
shipment.RequestedShipment.Recipient.Address.City = 'Harrison'
shipment.RequestedShipment.Recipient.Address.StateOrProvinceCode = 'AR'
shipment.RequestedShipment.Recipient.Address.PostalCode = '72601'
shipment.RequestedShipment.Recipient.Address.CountryCode = 'US'
# This is needed to ensure an accurate rate quote with the response.
shipment.RequestedShipment.Recipient.Address.Residential = False
shipment.RequestedShipment.FreightShipmentDetail.TotalHandlingUnits = 1
shipment.RequestedShipment.ShippingChargesPayment.Payor.ResponsibleParty.AccountNumber = CONFIG_OBJ.freight_account_number
shipment.RequestedShipment.FreightShipmentDetail.FedExFreightBillingContactAndAddress.Contact.PersonName = 'Sender Name'
shipment.RequestedShipment.FreightShipmentDetail.FedExFreightBillingContactAndAddress.Contact.CompanyName = 'Some Company'
shipment.RequestedShipment.FreightShipmentDetail.FedExFreightBillingContactAndAddress.Contact.PhoneNumber = '9012638716'
shipment.RequestedShipment.FreightShipmentDetail.FedExFreightBillingContactAndAddress.Address.StreetLines = ['2000 Freight LTL Testing']
shipment.RequestedShipment.FreightShipmentDetail.FedExFreightBillingContactAndAddress.Address.City = 'Harrison'
shipment.RequestedShipment.FreightShipmentDetail.FedExFreightBillingContactAndAddress.Address.StateOrProvinceCode = 'AR'
shipment.RequestedShipment.FreightShipmentDetail.FedExFreightBillingContactAndAddress.Address.PostalCode = '72601'
shipment.RequestedShipment.FreightShipmentDetail.FedExFreightBillingContactAndAddress.Address.CountryCode = 'US'
shipment.RequestedShipment.FreightShipmentDetail.FedExFreightBillingContactAndAddress.Address.Residential = False
spec = shipment.create_wsdl_object_of_type('ShippingDocumentSpecification')
spec.ShippingDocumentTypes = [spec.CertificateOfOrigin]
# shipment.RequestedShipment.ShippingDocumentSpecification = spec
role = shipment.create_wsdl_object_of_type('FreightShipmentRoleType')
shipment.RequestedShipment.FreightShipmentDetail.Role = role.SHIPPER
shipment.RequestedShipment.FreightShipmentDetail.CollectTermsType = 'STANDARD'
# Specifies the label type to be returned.
shipment.RequestedShipment.LabelSpecification.LabelFormatType = 'FEDEX_FREIGHT_STRAIGHT_BILL_OF_LADING'
# Specifies which format the label file will be sent to you in.
# DPL, EPL2, PDF, PNG, ZPLII
shipment.RequestedShipment.LabelSpecification.ImageType = 'PDF'
# To use doctab stocks, you must change ImageType above to one of the
# label printer formats (ZPLII, EPL2, DPL).
# See documentation for paper types, there quite a few.
shipment.RequestedShipment.LabelSpecification.LabelStockType = 'PAPER_LETTER'
# This indicates if the top or bottom of the label comes out of the
# printer first.
# BOTTOM_EDGE_OF_TEXT_FIRST or TOP_EDGE_OF_TEXT_FIRST
shipment.RequestedShipment.LabelSpecification.LabelPrintingOrientation = 'BOTTOM_EDGE_OF_TEXT_FIRST'
shipment.RequestedShipment.EdtRequestType = 'NONE'
package1_weight = shipment.create_wsdl_object_of_type('Weight')
package1_weight.Value = 500.0
package1_weight.Units = "LB"
shipment.RequestedShipment.FreightShipmentDetail.PalletWeight = package1_weight
package1 = shipment.create_wsdl_object_of_type('FreightShipmentLineItem')
package1.Weight = package1_weight
package1.Packaging = 'PALLET'
package1.Description = 'Products'
package1.FreightClass = 'CLASS_500'
package1.HazardousMaterials = None
package1.Pieces = 12
shipment.RequestedShipment.FreightShipmentDetail.LineItems = package1
# If you'd like to see some documentation on the ship service WSDL, un-comment
# this line. (Spammy).
#print shipment.client
# Un-comment this to see your complete, ready-to-send request as it stands
# before it is actually sent. This is useful for seeing what values you can
# change.
#print shipment.RequestedShipment
# If you want to make sure that all of your entered details are valid, you
# can call this and parse it just like you would via send_request(). If
# shipment.response.HighestSeverity == "SUCCESS", your shipment is valid.
#shipment.send_validation_request()
# Fires off the request, sets the 'response' attribute on the object.
shipment.send_request()
# This will show the reply to your shipment being sent. You can access the
# attributes through the response attribute on the request object. This is
# good to un-comment to see the variables returned by the Fedex reply.
print shipment.response
# Here is the overall end result of the query.
# print "HighestSeverity:", shipment.response.HighestSeverity
# # Getting the tracking number from the new shipment.
# print "Tracking #:", shipment.response.CompletedShipmentDetail.CompletedPackageDetails[0].TrackingIds[0].TrackingNumber
# # Net shipping costs.
# print "Net Shipping Cost (US$):", shipment.response.CompletedShipmentDetail.CompletedPackageDetails[0].PackageRating.PackageRateDetails[0].NetCharge.Amount
# # Get the label image in ASCII format from the reply. Note the list indices
# we're using. You'll need to adjust or iterate through these if your shipment
# has multiple packages.
ascii_label_data = shipment.response.CompletedShipmentDetail.ShipmentDocuments[0].Parts[0].Image
# Convert the ASCII data to binary.
label_binary_data = binascii.a2b_base64(ascii_label_data)
"""
This is an example of how to dump a label to a PNG file.
"""
# This will be the file we write the label out to.
pdf_file = open('example_shipment_label.pdf', 'wb')
pdf_file.write(label_binary_data)
pdf_file.close()
"""
This is an example of how to print the label to a serial printer. This will not
work for all label printers, consult your printer's documentation for more
details on what formats it can accept.
"""
# Pipe the binary directly to the label printer. Works under Linux
# without requiring PySerial. This WILL NOT work on other platforms.
#label_printer = open("/dev/ttyS0", "w")
#label_printer.write(label_binary_data)
#label_printer.close()
"""
This is a potential cross-platform solution using pySerial. This has not been
tested in a long time and may or may not work. For Windows, Mac, and other
platforms, you may want to go this route.
"""
#import serial
#label_printer = serial.Serial(0)
#print "SELECTED SERIAL PORT: "+ label_printer.portstr
#label_printer.write(label_binary_data)
#label_printer.close() | 47.52809 | 157 | 0.827305 |
"""
This example shows how to create shipments. The variables populated below
represents the minimum required values. You will need to fill all of these, or
risk seeing a SchemaValidationError exception thrown.
Near the bottom of the module, you'll see some different ways to handle the
label data that is returned with the reply.
"""
import logging
import binascii
from example_config import CONFIG_OBJ
from fedex.services.ship_service import FedexProcessShipmentRequest
# Set this to the INFO level to see the response from Fedex printed in stdout.
#logging.basicConfig(filename="suds.log", level=logging.DEBUG)
logging.basicConfig(level=logging.INFO)
# This is the object that will be handling our tracking request.
# We're using the FedexConfig object from example_config.py in this dir.
shipment = FedexProcessShipmentRequest(CONFIG_OBJ)
shipment.RequestedShipment.DropoffType = 'REGULAR_PICKUP'
shipment.RequestedShipment.ServiceType = 'FEDEX_FREIGHT_ECONOMY'
shipment.RequestedShipment.PackagingType = 'YOUR_PACKAGING'
shipment.RequestedShipment.FreightShipmentDetail.FedExFreightAccountNumber = CONFIG_OBJ.freight_account_number
shipment.RequestedShipment.Shipper.Contact.PersonName = 'Sender Name'
shipment.RequestedShipment.Shipper.Contact.CompanyName = 'Some Company'
shipment.RequestedShipment.Shipper.Contact.PhoneNumber = '9012638716'
shipment.RequestedShipment.Shipper.Address.StreetLines = ['1202 Chalet Ln']
shipment.RequestedShipment.Shipper.Address.City = 'Harrison'
shipment.RequestedShipment.Shipper.Address.StateOrProvinceCode = 'AR'
shipment.RequestedShipment.Shipper.Address.PostalCode = '72601'
shipment.RequestedShipment.Shipper.Address.CountryCode = 'US'
shipment.RequestedShipment.Shipper.Address.Residential = True
shipment.RequestedShipment.Recipient.Contact.PersonName = 'Recipient Name'
shipment.RequestedShipment.Recipient.Contact.CompanyName = 'Recipient Company'
shipment.RequestedShipment.Recipient.Contact.PhoneNumber = '9012637906'
shipment.RequestedShipment.Recipient.Address.StreetLines = ['2000 Freight LTL Testing']
shipment.RequestedShipment.Recipient.Address.City = 'Harrison'
shipment.RequestedShipment.Recipient.Address.StateOrProvinceCode = 'AR'
shipment.RequestedShipment.Recipient.Address.PostalCode = '72601'
shipment.RequestedShipment.Recipient.Address.CountryCode = 'US'
shipment.RequestedShipment.Recipient.Address.Residential = False
shipment.RequestedShipment.FreightShipmentDetail.TotalHandlingUnits = 1
shipment.RequestedShipment.ShippingChargesPayment.Payor.ResponsibleParty.AccountNumber = CONFIG_OBJ.freight_account_number
shipment.RequestedShipment.FreightShipmentDetail.FedExFreightBillingContactAndAddress.Contact.PersonName = 'Sender Name'
shipment.RequestedShipment.FreightShipmentDetail.FedExFreightBillingContactAndAddress.Contact.CompanyName = 'Some Company'
shipment.RequestedShipment.FreightShipmentDetail.FedExFreightBillingContactAndAddress.Contact.PhoneNumber = '9012638716'
shipment.RequestedShipment.FreightShipmentDetail.FedExFreightBillingContactAndAddress.Address.StreetLines = ['2000 Freight LTL Testing']
shipment.RequestedShipment.FreightShipmentDetail.FedExFreightBillingContactAndAddress.Address.City = 'Harrison'
shipment.RequestedShipment.FreightShipmentDetail.FedExFreightBillingContactAndAddress.Address.StateOrProvinceCode = 'AR'
shipment.RequestedShipment.FreightShipmentDetail.FedExFreightBillingContactAndAddress.Address.PostalCode = '72601'
shipment.RequestedShipment.FreightShipmentDetail.FedExFreightBillingContactAndAddress.Address.CountryCode = 'US'
shipment.RequestedShipment.FreightShipmentDetail.FedExFreightBillingContactAndAddress.Address.Residential = False
spec = shipment.create_wsdl_object_of_type('ShippingDocumentSpecification')
spec.ShippingDocumentTypes = [spec.CertificateOfOrigin]
role = shipment.create_wsdl_object_of_type('FreightShipmentRoleType')
shipment.RequestedShipment.FreightShipmentDetail.Role = role.SHIPPER
shipment.RequestedShipment.FreightShipmentDetail.CollectTermsType = 'STANDARD'
shipment.RequestedShipment.LabelSpecification.LabelFormatType = 'FEDEX_FREIGHT_STRAIGHT_BILL_OF_LADING'
shipment.RequestedShipment.LabelSpecification.ImageType = 'PDF'
shipment.RequestedShipment.LabelSpecification.LabelStockType = 'PAPER_LETTER'
shipment.RequestedShipment.LabelSpecification.LabelPrintingOrientation = 'BOTTOM_EDGE_OF_TEXT_FIRST'
shipment.RequestedShipment.EdtRequestType = 'NONE'
package1_weight = shipment.create_wsdl_object_of_type('Weight')
package1_weight.Value = 500.0
package1_weight.Units = "LB"
shipment.RequestedShipment.FreightShipmentDetail.PalletWeight = package1_weight
package1 = shipment.create_wsdl_object_of_type('FreightShipmentLineItem')
package1.Weight = package1_weight
package1.Packaging = 'PALLET'
package1.Description = 'Products'
package1.FreightClass = 'CLASS_500'
package1.HazardousMaterials = None
package1.Pieces = 12
shipment.RequestedShipment.FreightShipmentDetail.LineItems = package1
# this line. (Spammy).
#print shipment.client
# Un-comment this to see your complete, ready-to-send request as it stands
# before it is actually sent. This is useful for seeing what values you can
# change.
#print shipment.RequestedShipment
# If you want to make sure that all of your entered details are valid, you
# can call this and parse it just like you would via send_request(). If
# shipment.response.HighestSeverity == "SUCCESS", your shipment is valid.
#shipment.send_validation_request()
# Fires off the request, sets the 'response' attribute on the object.
shipment.send_request()
# This will show the reply to your shipment being sent. You can access the
# attributes through the response attribute on the request object. This is
# good to un-comment to see the variables returned by the Fedex reply.
print shipment.response
# Here is the overall end result of the query.
# print "HighestSeverity:", shipment.response.HighestSeverity
# # Getting the tracking number from the new shipment.
# print "Tracking #:", shipment.response.CompletedShipmentDetail.CompletedPackageDetails[0].TrackingIds[0].TrackingNumber
# # Net shipping costs.
# print "Net Shipping Cost (US$):", shipment.response.CompletedShipmentDetail.CompletedPackageDetails[0].PackageRating.PackageRateDetails[0].NetCharge.Amount
# # Get the label image in ASCII format from the reply. Note the list indices
# we're using. You'll need to adjust or iterate through these if your shipment
# has multiple packages.
ascii_label_data = shipment.response.CompletedShipmentDetail.ShipmentDocuments[0].Parts[0].Image
# Convert the ASCII data to binary.
label_binary_data = binascii.a2b_base64(ascii_label_data)
"""
This is an example of how to dump a label to a PNG file.
"""
# This will be the file we write the label out to.
pdf_file = open('example_shipment_label.pdf', 'wb')
pdf_file.write(label_binary_data)
pdf_file.close()
"""
This is an example of how to print the label to a serial printer. This will not
work for all label printers, consult your printer's documentation for more
details on what formats it can accept.
"""
"""
This is a potential cross-platform solution using pySerial. This has not been
tested in a long time and may or may not work. For Windows, Mac, and other
platforms, you may want to go this route.
"""
| false | true |
1c46821305f403db023274e47c9b4a630caf9c77 | 4,807 | py | Python | src/azure-cli/azure/cli/command_modules/search/tests/latest/test_shared_private_link_resource.py | ZengTaoxu/azure-cli | 6be96de450da5ac9f07aafb22dd69880bea04792 | [
"MIT"
] | 4 | 2022-01-25T07:33:15.000Z | 2022-03-24T05:15:13.000Z | src/azure-cli/azure/cli/command_modules/search/tests/latest/test_shared_private_link_resource.py | ZengTaoxu/azure-cli | 6be96de450da5ac9f07aafb22dd69880bea04792 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/search/tests/latest/test_shared_private_link_resource.py | ZengTaoxu/azure-cli | 6be96de450da5ac9f07aafb22dd69880bea04792 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.testsdk import ScenarioTest, ResourceGroupPreparer, StorageAccountPreparer
import unittest
class AzureSearchServicesTests(ScenarioTest):
@ResourceGroupPreparer(name_prefix='azure_search_cli_test')
@StorageAccountPreparer(name_prefix='satest', kind='StorageV2')
def test_shared_private_link_resource_crud(self, resource_group, storage_account):
self.kwargs.update({
'sku_name': 'basic',
'search_service_name': self.create_random_name(prefix='azstest', length=24),
'public_network_access': 'Disabled',
'shared_private_link_resource_name': self.create_random_name(prefix='spltest', length=24),
'storage_account_name': storage_account,
'shared_private_link_resource_group_id': 'blob',
'shared_private_link_resource_request_provisioning_state_default': 'Succeeded',
'shared_private_link_resource_request_status_default': 'Pending',
'shared_private_link_resource_request_message_default': 'Please approve',
'shared_private_link_resource_request_message': 'Please approve again'
})
self.cmd(
'az search service create -n {search_service_name} -g {rg} --sku {sku_name} --public-network-access {public_network_access}',
checks=[self.check('name', '{search_service_name}'),
self.check('sku.name', '{sku_name}'),
self.check('publicNetworkAccess', '{public_network_access}')])
_account_resource_id = "/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Storage/storageAccounts/{}".format(self.get_subscription_id(), resource_group, storage_account)
self.kwargs.update({'_account_resource_id': _account_resource_id})
# create shared private link resource
_tpe_resource = self.cmd('az search shared-private-link-resource create --service-name {search_service_name} -g {rg} --resource-id {_account_resource_id} --name {shared_private_link_resource_name} --group-id {shared_private_link_resource_group_id}',
checks=[self.check('name', '{shared_private_link_resource_name}'),
self.check('properties.provisioningState', '{shared_private_link_resource_request_provisioning_state_default}'),
self.check('properties.requestMessage', '{shared_private_link_resource_request_message_default}'),
self.check('properties.status', '{shared_private_link_resource_request_status_default}')]).get_output_in_json()
# update shared private link resource
self.cmd('az search shared-private-link-resource update --service-name {search_service_name} -g {rg} --resource-id {_account_resource_id} --name {shared_private_link_resource_name} --group-id {shared_private_link_resource_group_id} --request-message "{shared_private_link_resource_request_message}"',
checks=[self.check('properties.requestMessage', '{shared_private_link_resource_request_message}')])
# list shared private link resources
_tpe_resources = self.cmd('az search shared-private-link-resource list --service-name {search_service_name} -g {rg}').get_output_in_json()
self.assertTrue(len(_tpe_resources) == 1)
# get shared private link resource
_tpe_resource = self.cmd('az search shared-private-link-resource show --service-name {search_service_name} -g {rg} --name {shared_private_link_resource_name}').get_output_in_json()
self.assertTrue(_tpe_resource['properties']['privateLinkResourceId'] == _account_resource_id)
# delete shared private link resource
self.cmd('az search shared-private-link-resource delete --service-name {search_service_name} -g {rg} --name {shared_private_link_resource_name} -y')
# list shared private link resources
_tpe_resources = self.cmd('az search shared-private-link-resource list --service-name {search_service_name} -g {rg}').get_output_in_json()
self.assertTrue(len(_tpe_resources) == 0)
# get shared private link resource
with self.assertRaises(SystemExit) as ex:
self.cmd('az search shared-private-link-resource show --service-name {search_service_name} -g {rg} --name {shared_private_link_resource_name}')
self.assertEqual(ex.exception.code, 3)
if __name__ == '__main__':
unittest.main()
| 66.763889 | 308 | 0.68317 |
from azure.cli.testsdk import ScenarioTest, ResourceGroupPreparer, StorageAccountPreparer
import unittest
class AzureSearchServicesTests(ScenarioTest):
@ResourceGroupPreparer(name_prefix='azure_search_cli_test')
@StorageAccountPreparer(name_prefix='satest', kind='StorageV2')
def test_shared_private_link_resource_crud(self, resource_group, storage_account):
self.kwargs.update({
'sku_name': 'basic',
'search_service_name': self.create_random_name(prefix='azstest', length=24),
'public_network_access': 'Disabled',
'shared_private_link_resource_name': self.create_random_name(prefix='spltest', length=24),
'storage_account_name': storage_account,
'shared_private_link_resource_group_id': 'blob',
'shared_private_link_resource_request_provisioning_state_default': 'Succeeded',
'shared_private_link_resource_request_status_default': 'Pending',
'shared_private_link_resource_request_message_default': 'Please approve',
'shared_private_link_resource_request_message': 'Please approve again'
})
self.cmd(
'az search service create -n {search_service_name} -g {rg} --sku {sku_name} --public-network-access {public_network_access}',
checks=[self.check('name', '{search_service_name}'),
self.check('sku.name', '{sku_name}'),
self.check('publicNetworkAccess', '{public_network_access}')])
_account_resource_id = "/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Storage/storageAccounts/{}".format(self.get_subscription_id(), resource_group, storage_account)
self.kwargs.update({'_account_resource_id': _account_resource_id})
_tpe_resource = self.cmd('az search shared-private-link-resource create --service-name {search_service_name} -g {rg} --resource-id {_account_resource_id} --name {shared_private_link_resource_name} --group-id {shared_private_link_resource_group_id}',
checks=[self.check('name', '{shared_private_link_resource_name}'),
self.check('properties.provisioningState', '{shared_private_link_resource_request_provisioning_state_default}'),
self.check('properties.requestMessage', '{shared_private_link_resource_request_message_default}'),
self.check('properties.status', '{shared_private_link_resource_request_status_default}')]).get_output_in_json()
self.cmd('az search shared-private-link-resource update --service-name {search_service_name} -g {rg} --resource-id {_account_resource_id} --name {shared_private_link_resource_name} --group-id {shared_private_link_resource_group_id} --request-message "{shared_private_link_resource_request_message}"',
checks=[self.check('properties.requestMessage', '{shared_private_link_resource_request_message}')])
_tpe_resources = self.cmd('az search shared-private-link-resource list --service-name {search_service_name} -g {rg}').get_output_in_json()
self.assertTrue(len(_tpe_resources) == 1)
_tpe_resource = self.cmd('az search shared-private-link-resource show --service-name {search_service_name} -g {rg} --name {shared_private_link_resource_name}').get_output_in_json()
self.assertTrue(_tpe_resource['properties']['privateLinkResourceId'] == _account_resource_id)
self.cmd('az search shared-private-link-resource delete --service-name {search_service_name} -g {rg} --name {shared_private_link_resource_name} -y')
_tpe_resources = self.cmd('az search shared-private-link-resource list --service-name {search_service_name} -g {rg}').get_output_in_json()
self.assertTrue(len(_tpe_resources) == 0)
with self.assertRaises(SystemExit) as ex:
self.cmd('az search shared-private-link-resource show --service-name {search_service_name} -g {rg} --name {shared_private_link_resource_name}')
self.assertEqual(ex.exception.code, 3)
if __name__ == '__main__':
unittest.main()
| true | true |
1c4682ad9a9b67315be210907787728e1c713657 | 61,319 | py | Python | tests/integration_tests/core_tests.py | akashkj/superset | 8a157d8446780e4e71550405cbedde8a4d64d92a | [
"Apache-2.0"
] | 1 | 2022-01-23T17:08:13.000Z | 2022-01-23T17:08:13.000Z | tests/integration_tests/core_tests.py | akashkj/superset | 8a157d8446780e4e71550405cbedde8a4d64d92a | [
"Apache-2.0"
] | 19 | 2022-01-29T03:16:22.000Z | 2022-03-25T23:50:16.000Z | tests/integration_tests/core_tests.py | akashkj/superset | 8a157d8446780e4e71550405cbedde8a4d64d92a | [
"Apache-2.0"
] | 1 | 2022-02-02T19:59:50.000Z | 2022-02-02T19:59:50.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# isort:skip_file
"""Unit tests for Superset"""
import csv
import datetime
import doctest
import html
import io
import json
import logging
from typing import Dict, List
from urllib.parse import quote
import superset.utils.database
from tests.integration_tests.fixtures.birth_names_dashboard import (
load_birth_names_dashboard_with_slices,
load_birth_names_data,
)
import pytest
import pytz
import random
import re
import unittest
from unittest import mock
import pandas as pd
import sqlalchemy as sqla
from sqlalchemy.exc import SQLAlchemyError
from superset.models.cache import CacheKey
from superset.utils.database import get_example_database
from tests.integration_tests.conftest import with_feature_flags
from tests.integration_tests.fixtures.energy_dashboard import (
load_energy_table_with_slice,
load_energy_table_data,
)
from tests.integration_tests.test_app import app
import superset.views.utils
from superset import (
dataframe,
db,
security_manager,
sql_lab,
)
from superset.common.db_query_status import QueryStatus
from superset.connectors.sqla.models import SqlaTable
from superset.db_engine_specs.base import BaseEngineSpec
from superset.db_engine_specs.mssql import MssqlEngineSpec
from superset.exceptions import SupersetException
from superset.extensions import async_query_manager
from superset.models import core as models
from superset.models.annotations import Annotation, AnnotationLayer
from superset.models.dashboard import Dashboard
from superset.models.datasource_access_request import DatasourceAccessRequest
from superset.models.slice import Slice
from superset.models.sql_lab import Query
from superset.result_set import SupersetResultSet
from superset.utils import core as utils
from superset.views import core as views
from superset.views.database.views import DatabaseView
from .base_tests import SupersetTestCase
from tests.integration_tests.fixtures.world_bank_dashboard import (
load_world_bank_dashboard_with_slices,
load_world_bank_data,
)
logger = logging.getLogger(__name__)
class TestCore(SupersetTestCase):
def setUp(self):
db.session.query(Query).delete()
db.session.query(DatasourceAccessRequest).delete()
db.session.query(models.Log).delete()
self.table_ids = {
tbl.table_name: tbl.id for tbl in (db.session.query(SqlaTable).all())
}
self.original_unsafe_db_setting = app.config["PREVENT_UNSAFE_DB_CONNECTIONS"]
def tearDown(self):
db.session.query(Query).delete()
app.config["PREVENT_UNSAFE_DB_CONNECTIONS"] = self.original_unsafe_db_setting
def test_login(self):
resp = self.get_resp("/login/", data=dict(username="admin", password="general"))
self.assertNotIn("User confirmation needed", resp)
resp = self.get_resp("/logout/", follow_redirects=True)
self.assertIn("User confirmation needed", resp)
resp = self.get_resp(
"/login/", data=dict(username="admin", password="wrongPassword")
)
self.assertIn("User confirmation needed", resp)
def test_dashboard_endpoint(self):
self.login()
resp = self.client.get("/superset/dashboard/-1/")
assert resp.status_code == 404
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_slice_endpoint(self):
self.login(username="admin")
slc = self.get_slice("Girls", db.session)
resp = self.get_resp("/superset/slice/{}/".format(slc.id))
assert "Original value" in resp
assert "List Roles" in resp
# Testing overrides
resp = self.get_resp("/superset/slice/{}/?standalone=true".format(slc.id))
assert '<div class="navbar' not in resp
resp = self.client.get("/superset/slice/-1/")
assert resp.status_code == 404
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_viz_cache_key(self):
self.login(username="admin")
slc = self.get_slice("Girls", db.session)
viz = slc.viz
qobj = viz.query_obj()
cache_key = viz.cache_key(qobj)
qobj["groupby"] = []
cache_key_with_groupby = viz.cache_key(qobj)
self.assertNotEqual(cache_key, cache_key_with_groupby)
self.assertNotEqual(
viz.cache_key(qobj), viz.cache_key(qobj, time_compare="12 weeks")
)
self.assertNotEqual(
viz.cache_key(qobj, time_compare="28 days"),
viz.cache_key(qobj, time_compare="12 weeks"),
)
qobj["inner_from_dttm"] = datetime.datetime(1901, 1, 1)
self.assertEqual(cache_key_with_groupby, viz.cache_key(qobj))
def test_get_superset_tables_not_allowed(self):
example_db = superset.utils.database.get_example_database()
schema_name = self.default_schema_backend_map[example_db.backend]
self.login(username="gamma")
uri = f"superset/tables/{example_db.id}/{schema_name}/undefined/"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 404)
def test_get_superset_tables_substr(self):
example_db = superset.utils.database.get_example_database()
if example_db.backend in {"presto", "hive"}:
# TODO: change table to the real table that is in examples.
return
self.login(username="admin")
schema_name = self.default_schema_backend_map[example_db.backend]
uri = f"superset/tables/{example_db.id}/{schema_name}/ab_role/"
rv = self.client.get(uri)
response = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 200)
expected_response = {
"options": [
{
"label": "ab_role",
"schema": schema_name,
"title": "ab_role",
"type": "table",
"value": "ab_role",
"extra": None,
}
],
"tableLength": 1,
}
self.assertEqual(response, expected_response)
def test_get_superset_tables_not_found(self):
self.login(username="admin")
uri = f"superset/tables/invalid/public/undefined/"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 404)
def test_annotation_json_endpoint(self):
# Set up an annotation layer and annotation
layer = AnnotationLayer(name="foo", descr="bar")
db.session.add(layer)
db.session.commit()
annotation = Annotation(
layer_id=layer.id,
short_descr="my_annotation",
start_dttm=datetime.datetime(2020, 5, 20, 18, 21, 51),
end_dttm=datetime.datetime(2020, 5, 20, 18, 31, 51),
)
db.session.add(annotation)
db.session.commit()
self.login()
resp_annotations = json.loads(
self.get_resp("annotationlayermodelview/api/read")
)
# the UI needs id and name to function
self.assertIn("id", resp_annotations["result"][0])
self.assertIn("name", resp_annotations["result"][0])
response = self.get_resp(
f"/superset/annotation_json/{layer.id}?form_data="
+ quote(json.dumps({"time_range": "100 years ago : now"}))
)
assert "my_annotation" in response
# Rollback changes
db.session.delete(annotation)
db.session.delete(layer)
db.session.commit()
def test_admin_only_permissions(self):
def assert_admin_permission_in(role_name, assert_func):
role = security_manager.find_role(role_name)
permissions = [p.permission.name for p in role.permissions]
assert_func("can_sync_druid_source", permissions)
assert_func("can_approve", permissions)
assert_admin_permission_in("Admin", self.assertIn)
assert_admin_permission_in("Alpha", self.assertNotIn)
assert_admin_permission_in("Gamma", self.assertNotIn)
def test_admin_only_menu_views(self):
def assert_admin_view_menus_in(role_name, assert_func):
role = security_manager.find_role(role_name)
view_menus = [p.view_menu.name for p in role.permissions]
assert_func("ResetPasswordView", view_menus)
assert_func("RoleModelView", view_menus)
assert_func("Security", view_menus)
assert_func("SQL Lab", view_menus)
assert_admin_view_menus_in("Admin", self.assertIn)
assert_admin_view_menus_in("Alpha", self.assertNotIn)
assert_admin_view_menus_in("Gamma", self.assertNotIn)
@pytest.mark.usefixtures("load_energy_table_with_slice")
def test_save_slice(self):
self.login(username="admin")
slice_name = f"Energy Sankey"
slice_id = self.get_slice(slice_name, db.session).id
copy_name_prefix = "Test Sankey"
copy_name = f"{copy_name_prefix}[save]{random.random()}"
tbl_id = self.table_ids.get("energy_usage")
new_slice_name = f"{copy_name_prefix}[overwrite]{random.random()}"
url = (
"/superset/explore/table/{}/?slice_name={}&"
"action={}&datasource_name=energy_usage"
)
form_data = {
"adhoc_filters": [],
"viz_type": "sankey",
"groupby": ["target"],
"metric": "sum__value",
"row_limit": 5000,
"slice_id": slice_id,
"time_range_endpoints": ["inclusive", "exclusive"],
}
# Changing name and save as a new slice
resp = self.client.post(
url.format(tbl_id, copy_name, "saveas"),
data={"form_data": json.dumps(form_data)},
)
db.session.expunge_all()
new_slice_id = resp.json["form_data"]["slice_id"]
slc = db.session.query(Slice).filter_by(id=new_slice_id).one()
self.assertEqual(slc.slice_name, copy_name)
form_data.pop("slice_id") # We don't save the slice id when saving as
self.assertEqual(slc.viz.form_data, form_data)
form_data = {
"adhoc_filters": [],
"viz_type": "sankey",
"groupby": ["source"],
"metric": "sum__value",
"row_limit": 5000,
"slice_id": new_slice_id,
"time_range": "now",
"time_range_endpoints": ["inclusive", "exclusive"],
}
# Setting the name back to its original name by overwriting new slice
self.client.post(
url.format(tbl_id, new_slice_name, "overwrite"),
data={"form_data": json.dumps(form_data)},
)
db.session.expunge_all()
slc = db.session.query(Slice).filter_by(id=new_slice_id).one()
self.assertEqual(slc.slice_name, new_slice_name)
self.assertEqual(slc.viz.form_data, form_data)
# Cleanup
slices = (
db.session.query(Slice)
.filter(Slice.slice_name.like(copy_name_prefix + "%"))
.all()
)
for slc in slices:
db.session.delete(slc)
db.session.commit()
@pytest.mark.usefixtures("load_energy_table_with_slice")
def test_filter_endpoint(self):
self.login(username="admin")
slice_name = "Energy Sankey"
slice_id = self.get_slice(slice_name, db.session).id
db.session.commit()
tbl_id = self.table_ids.get("energy_usage")
table = db.session.query(SqlaTable).filter(SqlaTable.id == tbl_id)
table.filter_select_enabled = True
url = (
"/superset/filter/table/{}/target/?viz_type=sankey&groupby=source"
"&metric=sum__value&flt_col_0=source&flt_op_0=in&flt_eq_0=&"
"slice_id={}&datasource_name=energy_usage&"
"datasource_id=1&datasource_type=table"
)
# Changing name
resp = self.get_resp(url.format(tbl_id, slice_id))
assert len(resp) > 0
assert "energy_target0" in resp
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_slice_data(self):
# slice data should have some required attributes
self.login(username="admin")
slc = self.get_slice(
slice_name="Girls", session=db.session, expunge_from_session=False
)
slc_data_attributes = slc.data.keys()
assert "changed_on" in slc_data_attributes
assert "modified" in slc_data_attributes
assert "owners" in slc_data_attributes
@pytest.mark.usefixtures("load_energy_table_with_slice")
def test_slices(self):
# Testing by hitting the two supported end points for all slices
self.login(username="admin")
Slc = Slice
urls = []
for slc in db.session.query(Slc).all():
urls += [
(slc.slice_name, "explore", slc.slice_url),
]
for name, method, url in urls:
logger.info(f"[{name}]/[{method}]: {url}")
print(f"[{name}]/[{method}]: {url}")
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
def test_tablemodelview_list(self):
self.login(username="admin")
url = "/tablemodelview/list/"
resp = self.get_resp(url)
# assert that a table is listed
table = db.session.query(SqlaTable).first()
assert table.name in resp
assert "/superset/explore/table/{}".format(table.id) in resp
def test_add_slice(self):
self.login(username="admin")
# assert that /chart/add responds with 200
url = "/chart/add"
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_get_user_slices_for_owners(self):
self.login(username="alpha")
user = security_manager.find_user("alpha")
slice_name = "Girls"
# ensure user is not owner of any slices
url = f"/superset/user_slices/{user.id}/"
resp = self.client.get(url)
data = json.loads(resp.data)
self.assertEqual(data, [])
# make user owner of slice and verify that endpoint returns said slice
slc = self.get_slice(
slice_name=slice_name, session=db.session, expunge_from_session=False
)
slc.owners = [user]
db.session.merge(slc)
db.session.commit()
url = f"/superset/user_slices/{user.id}/"
resp = self.client.get(url)
data = json.loads(resp.data)
self.assertEqual(len(data), 1)
self.assertEqual(data[0]["title"], slice_name)
# remove ownership and ensure user no longer gets slice
slc = self.get_slice(
slice_name=slice_name, session=db.session, expunge_from_session=False
)
slc.owners = []
db.session.merge(slc)
db.session.commit()
url = f"/superset/user_slices/{user.id}/"
resp = self.client.get(url)
data = json.loads(resp.data)
self.assertEqual(data, [])
def test_get_user_slices(self):
self.login(username="admin")
userid = security_manager.find_user("admin").id
url = f"/sliceasync/api/read?_flt_0_created_by={userid}"
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
@pytest.mark.usefixtures("load_energy_table_with_slice")
def test_slices_V2(self):
# Add explore-v2-beta role to admin user
# Test all slice urls as user with with explore-v2-beta role
security_manager.add_role("explore-v2-beta")
security_manager.add_user(
"explore_beta",
"explore_beta",
" user",
"explore_beta@airbnb.com",
security_manager.find_role("explore-v2-beta"),
password="general",
)
self.login(username="explore_beta", password="general")
Slc = Slice
urls = []
for slc in db.session.query(Slc).all():
urls += [(slc.slice_name, "slice_url", slc.slice_url)]
for name, method, url in urls:
print(f"[{name}]/[{method}]: {url}")
self.client.get(url)
def test_doctests(self):
modules = [utils, models, sql_lab]
for mod in modules:
failed, tests = doctest.testmod(mod)
if failed:
raise Exception("Failed a doctest")
def test_misc(self):
assert self.get_resp("/health") == "OK"
assert self.get_resp("/healthcheck") == "OK"
assert self.get_resp("/ping") == "OK"
def test_testconn(self, username="admin"):
# need to temporarily allow sqlite dbs, teardown will undo this
app.config["PREVENT_UNSAFE_DB_CONNECTIONS"] = False
self.login(username=username)
database = superset.utils.database.get_example_database()
# validate that the endpoint works with the password-masked sqlalchemy uri
data = json.dumps(
{
"uri": database.safe_sqlalchemy_uri(),
"name": "examples",
"impersonate_user": False,
}
)
response = self.client.post(
"/superset/testconn", data=data, content_type="application/json"
)
assert response.status_code == 200
assert response.headers["Content-Type"] == "application/json"
# validate that the endpoint works with the decrypted sqlalchemy uri
data = json.dumps(
{
"uri": database.sqlalchemy_uri_decrypted,
"name": "examples",
"impersonate_user": False,
}
)
response = self.client.post(
"/superset/testconn", data=data, content_type="application/json"
)
assert response.status_code == 200
assert response.headers["Content-Type"] == "application/json"
def test_testconn_failed_conn(self, username="admin"):
self.login(username=username)
data = json.dumps(
{"uri": "broken://url", "name": "examples", "impersonate_user": False}
)
response = self.client.post(
"/superset/testconn", data=data, content_type="application/json"
)
assert response.status_code == 400
assert response.headers["Content-Type"] == "application/json"
response_body = json.loads(response.data.decode("utf-8"))
expected_body = {"error": "Could not load database driver: broken"}
assert response_body == expected_body, "%s != %s" % (
response_body,
expected_body,
)
data = json.dumps(
{
"uri": "mssql+pymssql://url",
"name": "examples",
"impersonate_user": False,
}
)
response = self.client.post(
"/superset/testconn", data=data, content_type="application/json"
)
assert response.status_code == 400
assert response.headers["Content-Type"] == "application/json"
response_body = json.loads(response.data.decode("utf-8"))
expected_body = {"error": "Could not load database driver: mssql+pymssql"}
assert response_body == expected_body, "%s != %s" % (
response_body,
expected_body,
)
def test_testconn_unsafe_uri(self, username="admin"):
self.login(username=username)
app.config["PREVENT_UNSAFE_DB_CONNECTIONS"] = True
response = self.client.post(
"/superset/testconn",
data=json.dumps(
{
"uri": "sqlite:///home/superset/unsafe.db",
"name": "unsafe",
"impersonate_user": False,
}
),
content_type="application/json",
)
self.assertEqual(400, response.status_code)
response_body = json.loads(response.data.decode("utf-8"))
expected_body = {
"error": "SQLiteDialect_pysqlite cannot be used as a data source for security reasons."
}
self.assertEqual(expected_body, response_body)
def test_custom_password_store(self):
database = superset.utils.database.get_example_database()
conn_pre = sqla.engine.url.make_url(database.sqlalchemy_uri_decrypted)
def custom_password_store(uri):
return "password_store_test"
models.custom_password_store = custom_password_store
conn = sqla.engine.url.make_url(database.sqlalchemy_uri_decrypted)
if conn_pre.password:
assert conn.password == "password_store_test"
assert conn.password != conn_pre.password
# Disable for password store for later tests
models.custom_password_store = None
def test_databaseview_edit(self, username="admin"):
# validate that sending a password-masked uri does not over-write the decrypted
# uri
self.login(username=username)
database = superset.utils.database.get_example_database()
sqlalchemy_uri_decrypted = database.sqlalchemy_uri_decrypted
url = "databaseview/edit/{}".format(database.id)
data = {k: database.__getattribute__(k) for k in DatabaseView.add_columns}
data["sqlalchemy_uri"] = database.safe_sqlalchemy_uri()
self.client.post(url, data=data)
database = superset.utils.database.get_example_database()
self.assertEqual(sqlalchemy_uri_decrypted, database.sqlalchemy_uri_decrypted)
# Need to clean up after ourselves
database.impersonate_user = False
database.allow_dml = False
database.allow_run_async = False
db.session.commit()
@pytest.mark.usefixtures(
"load_energy_table_with_slice", "load_birth_names_dashboard_with_slices"
)
def test_warm_up_cache(self):
self.login()
slc = self.get_slice("Girls", db.session)
data = self.get_json_resp("/superset/warm_up_cache?slice_id={}".format(slc.id))
self.assertEqual(
data, [{"slice_id": slc.id, "viz_error": None, "viz_status": "success"}]
)
data = self.get_json_resp(
"/superset/warm_up_cache?table_name=energy_usage&db_name=main"
)
assert len(data) > 0
dashboard = self.get_dash_by_slug("births")
assert self.get_json_resp(
f"/superset/warm_up_cache?dashboard_id={dashboard.id}&slice_id={slc.id}"
) == [{"slice_id": slc.id, "viz_error": None, "viz_status": "success"}]
assert self.get_json_resp(
f"/superset/warm_up_cache?dashboard_id={dashboard.id}&slice_id={slc.id}&extra_filters="
+ quote(json.dumps([{"col": "name", "op": "in", "val": ["Jennifer"]}]))
) == [{"slice_id": slc.id, "viz_error": None, "viz_status": "success"}]
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_cache_logging(self):
self.login("admin")
store_cache_keys = app.config["STORE_CACHE_KEYS_IN_METADATA_DB"]
app.config["STORE_CACHE_KEYS_IN_METADATA_DB"] = True
girls_slice = self.get_slice("Girls", db.session)
self.get_json_resp("/superset/warm_up_cache?slice_id={}".format(girls_slice.id))
ck = db.session.query(CacheKey).order_by(CacheKey.id.desc()).first()
assert ck.datasource_uid == f"{girls_slice.table.id}__table"
app.config["STORE_CACHE_KEYS_IN_METADATA_DB"] = store_cache_keys
def test_shortner(self):
self.login(username="admin")
data = (
"//superset/explore/table/1/?viz_type=sankey&groupby=source&"
"groupby=target&metric=sum__value&row_limit=5000&where=&having=&"
"flt_col_0=source&flt_op_0=in&flt_eq_0=&slice_id=78&slice_name="
"Energy+Sankey&collapsed_fieldsets=&action=&datasource_name="
"energy_usage&datasource_id=1&datasource_type=table&"
"previous_viz_type=sankey"
)
resp = self.client.post("/r/shortner/", data=dict(data=data))
assert re.search(r"\/r\/[0-9]+", resp.data.decode("utf-8"))
def test_shortner_invalid(self):
self.login(username="admin")
invalid_urls = [
"hhttp://invalid.com",
"hhttps://invalid.com",
"www.invalid.com",
]
for invalid_url in invalid_urls:
resp = self.client.post("/r/shortner/", data=dict(data=invalid_url))
assert resp.status_code == 400
def test_redirect_invalid(self):
model_url = models.Url(url="hhttp://invalid.com")
db.session.add(model_url)
db.session.commit()
self.login(username="admin")
response = self.client.get(f"/r/{model_url.id}")
assert response.headers["Location"] == "http://localhost/"
db.session.delete(model_url)
db.session.commit()
@with_feature_flags(KV_STORE=False)
def test_kv_disabled(self):
self.login(username="admin")
resp = self.client.get("/kv/10001/")
self.assertEqual(404, resp.status_code)
value = json.dumps({"data": "this is a test"})
resp = self.client.post("/kv/store/", data=dict(data=value))
self.assertEqual(resp.status_code, 404)
@with_feature_flags(KV_STORE=True)
def test_kv_enabled(self):
self.login(username="admin")
resp = self.client.get("/kv/10001/")
self.assertEqual(404, resp.status_code)
value = json.dumps({"data": "this is a test"})
resp = self.client.post("/kv/store/", data=dict(data=value))
self.assertEqual(resp.status_code, 200)
kv = db.session.query(models.KeyValue).first()
kv_value = kv.value
self.assertEqual(json.loads(value), json.loads(kv_value))
resp = self.client.get("/kv/{}/".format(kv.id))
self.assertEqual(resp.status_code, 200)
self.assertEqual(json.loads(value), json.loads(resp.data.decode("utf-8")))
def test_gamma(self):
self.login(username="gamma")
assert "Charts" in self.get_resp("/chart/list/")
assert "Dashboards" in self.get_resp("/dashboard/list/")
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_csv_endpoint(self):
self.login()
client_id = "{}".format(random.getrandbits(64))[:10]
get_name_sql = """
SELECT name
FROM birth_names
LIMIT 1
"""
resp = self.run_sql(get_name_sql, client_id, raise_on_error=True)
name = resp["data"][0]["name"]
sql = f"""
SELECT name
FROM birth_names
WHERE name = '{name}'
LIMIT 1
"""
client_id = "{}".format(random.getrandbits(64))[:10]
self.run_sql(sql, client_id, raise_on_error=True)
resp = self.get_resp("/superset/csv/{}".format(client_id))
data = csv.reader(io.StringIO(resp))
expected_data = csv.reader(io.StringIO(f"name\n{name}\n"))
client_id = "{}".format(random.getrandbits(64))[:10]
self.run_sql(sql, client_id, raise_on_error=True)
resp = self.get_resp("/superset/csv/{}".format(client_id))
data = csv.reader(io.StringIO(resp))
expected_data = csv.reader(io.StringIO(f"name\n{name}\n"))
self.assertEqual(list(expected_data), list(data))
self.logout()
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_extra_table_metadata(self):
self.login()
example_db = superset.utils.database.get_example_database()
schema = "default" if example_db.backend in {"presto", "hive"} else "superset"
self.get_json_resp(
f"/superset/extra_table_metadata/{example_db.id}/birth_names/{schema}/"
)
def test_templated_sql_json(self):
if superset.utils.database.get_example_database().backend == "presto":
# TODO: make it work for presto
return
self.login()
sql = "SELECT '{{ 1+1 }}' as test"
data = self.run_sql(sql, "fdaklj3ws")
self.assertEqual(data["data"][0]["test"], "2")
@mock.patch(
"tests.integration_tests.superset_test_custom_template_processors.datetime"
)
@mock.patch("superset.views.core.get_sql_results")
def test_custom_templated_sql_json(self, sql_lab_mock, mock_dt) -> None:
"""Test sqllab receives macros expanded query."""
mock_dt.utcnow = mock.Mock(return_value=datetime.datetime(1970, 1, 1))
self.login()
sql = "SELECT '$DATE()' as test"
resp = {
"status": QueryStatus.SUCCESS,
"query": {"rows": 1},
"data": [{"test": "'1970-01-01'"}],
}
sql_lab_mock.return_value = resp
dbobj = self.create_fake_db_for_macros()
json_payload = dict(database_id=dbobj.id, sql=sql)
self.get_json_resp(
"/superset/sql_json/", raise_on_error=False, json_=json_payload
)
assert sql_lab_mock.called
self.assertEqual(sql_lab_mock.call_args[0][1], "SELECT '1970-01-01' as test")
self.delete_fake_db_for_macros()
def test_fetch_datasource_metadata(self):
self.login(username="admin")
url = "/superset/fetch_datasource_metadata?" "datasourceKey=1__table"
resp = self.get_json_resp(url)
keys = [
"name",
"type",
"order_by_choices",
"granularity_sqla",
"time_grain_sqla",
"id",
]
for k in keys:
self.assertIn(k, resp.keys())
@staticmethod
def _get_user_activity_endpoints(user: str):
userid = security_manager.find_user(user).id
return (
f"/superset/recent_activity/{userid}/",
f"/superset/created_slices/{userid}/",
f"/superset/created_dashboards/{userid}/",
f"/superset/fave_slices/{userid}/",
f"/superset/fave_dashboards/{userid}/",
f"/superset/user_slices/{userid}/",
f"/superset/fave_dashboards_by_username/{user}/",
)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_user_profile(self, username="admin"):
self.login(username=username)
slc = self.get_slice("Girls", db.session)
# Setting some faves
url = f"/superset/favstar/Slice/{slc.id}/select/"
resp = self.get_json_resp(url)
self.assertEqual(resp["count"], 1)
dash = db.session.query(Dashboard).filter_by(slug="births").first()
url = f"/superset/favstar/Dashboard/{dash.id}/select/"
resp = self.get_json_resp(url)
self.assertEqual(resp["count"], 1)
resp = self.get_resp(f"/superset/profile/{username}/")
self.assertIn('"app"', resp)
for endpoint in self._get_user_activity_endpoints(username):
data = self.get_json_resp(endpoint)
self.assertNotIn("message", data)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_user_activity_access(self, username="gamma"):
self.login(username=username)
# accessing own and other users' activity is allowed by default
for user in ("admin", "gamma"):
for endpoint in self._get_user_activity_endpoints(user):
resp = self.client.get(endpoint)
assert resp.status_code == 200
# disabling flag will block access to other users' activity data
access_flag = app.config["ENABLE_BROAD_ACTIVITY_ACCESS"]
app.config["ENABLE_BROAD_ACTIVITY_ACCESS"] = False
for user in ("admin", "gamma"):
for endpoint in self._get_user_activity_endpoints(user):
resp = self.client.get(endpoint)
expected_status_code = 200 if user == username else 403
assert resp.status_code == expected_status_code
# restore flag
app.config["ENABLE_BROAD_ACTIVITY_ACCESS"] = access_flag
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_slice_id_is_always_logged_correctly_on_web_request(self):
# superset/explore case
self.login("admin")
slc = db.session.query(Slice).filter_by(slice_name="Girls").one()
qry = db.session.query(models.Log).filter_by(slice_id=slc.id)
self.get_resp(slc.slice_url, {"form_data": json.dumps(slc.form_data)})
self.assertEqual(1, qry.count())
def create_sample_csvfile(self, filename: str, content: List[str]) -> None:
with open(filename, "w+") as test_file:
for l in content:
test_file.write(f"{l}\n")
def create_sample_excelfile(self, filename: str, content: Dict[str, str]) -> None:
pd.DataFrame(content).to_excel(filename)
def enable_csv_upload(self, database: models.Database) -> None:
"""Enables csv upload in the given database."""
database.allow_file_upload = True
db.session.commit()
add_datasource_page = self.get_resp("/databaseview/list/")
self.assertIn("Upload a CSV", add_datasource_page)
form_get = self.get_resp("/csvtodatabaseview/form")
self.assertIn("CSV to Database configuration", form_get)
def test_dataframe_timezone(self):
tz = pytz.FixedOffset(60)
data = [
(datetime.datetime(2017, 11, 18, 21, 53, 0, 219225, tzinfo=tz),),
(datetime.datetime(2017, 11, 18, 22, 6, 30, tzinfo=tz),),
]
results = SupersetResultSet(list(data), [["data"]], BaseEngineSpec)
df = results.to_pandas_df()
data = dataframe.df_to_records(df)
json_str = json.dumps(data, default=utils.pessimistic_json_iso_dttm_ser)
self.assertDictEqual(
data[0], {"data": pd.Timestamp("2017-11-18 21:53:00.219225+0100", tz=tz)}
)
self.assertDictEqual(
data[1], {"data": pd.Timestamp("2017-11-18 22:06:30+0100", tz=tz)}
)
self.assertEqual(
json_str,
'[{"data": "2017-11-18T21:53:00.219225+01:00"}, {"data": "2017-11-18T22:06:30+01:00"}]',
)
def test_mssql_engine_spec_pymssql(self):
# Test for case when tuple is returned (pymssql)
data = [
(1, 1, datetime.datetime(2017, 10, 19, 23, 39, 16, 660000)),
(2, 2, datetime.datetime(2018, 10, 19, 23, 39, 16, 660000)),
]
results = SupersetResultSet(
list(data), [["col1"], ["col2"], ["col3"]], MssqlEngineSpec
)
df = results.to_pandas_df()
data = dataframe.df_to_records(df)
self.assertEqual(len(data), 2)
self.assertEqual(
data[0],
{"col1": 1, "col2": 1, "col3": pd.Timestamp("2017-10-19 23:39:16.660000")},
)
def test_comments_in_sqlatable_query(self):
clean_query = "SELECT '/* val 1 */' as c1, '-- val 2' as c2 FROM tbl"
commented_query = "/* comment 1 */" + clean_query + "-- comment 2"
table = SqlaTable(
table_name="test_comments_in_sqlatable_query_table",
sql=commented_query,
database=get_example_database(),
)
rendered_query = str(table.get_from_clause())
self.assertEqual(clean_query, rendered_query)
def test_slice_payload_no_datasource(self):
self.login(username="admin")
data = self.get_json_resp("/superset/explore_json/", raise_on_error=False)
self.assertEqual(
data["errors"][0]["message"],
"The dataset associated with this chart no longer exists",
)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_explore_json(self):
tbl_id = self.table_ids.get("birth_names")
form_data = {
"datasource": f"{tbl_id}__table",
"viz_type": "dist_bar",
"time_range_endpoints": ["inclusive", "exclusive"],
"granularity_sqla": "ds",
"time_range": "No filter",
"metrics": ["count"],
"adhoc_filters": [],
"groupby": ["gender"],
"row_limit": 100,
}
self.login(username="admin")
rv = self.client.post(
"/superset/explore_json/", data={"form_data": json.dumps(form_data)},
)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 200)
self.assertEqual(data["rowcount"], 2)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_explore_json_dist_bar_order(self):
tbl_id = self.table_ids.get("birth_names")
form_data = {
"datasource": f"{tbl_id}__table",
"viz_type": "dist_bar",
"url_params": {},
"time_range_endpoints": ["inclusive", "exclusive"],
"granularity_sqla": "ds",
"time_range": 'DATEADD(DATETIME("2021-01-22T00:00:00"), -100, year) : 2021-01-22T00:00:00',
"metrics": [
{
"expressionType": "SIMPLE",
"column": {
"id": 334,
"column_name": "name",
"verbose_name": "null",
"description": "null",
"expression": "",
"filterable": True,
"groupby": True,
"is_dttm": False,
"type": "VARCHAR(255)",
"python_date_format": "null",
},
"aggregate": "COUNT",
"sqlExpression": "null",
"isNew": False,
"hasCustomLabel": False,
"label": "COUNT(name)",
"optionName": "metric_xdzsijn42f9_khi4h3v3vci",
},
{
"expressionType": "SIMPLE",
"column": {
"id": 332,
"column_name": "ds",
"verbose_name": "null",
"description": "null",
"expression": "",
"filterable": True,
"groupby": True,
"is_dttm": True,
"type": "TIMESTAMP WITHOUT TIME ZONE",
"python_date_format": "null",
},
"aggregate": "COUNT",
"sqlExpression": "null",
"isNew": False,
"hasCustomLabel": False,
"label": "COUNT(ds)",
"optionName": "metric_80g1qb9b6o7_ci5vquydcbe",
},
],
"order_desc": True,
"adhoc_filters": [],
"groupby": ["name"],
"columns": [],
"row_limit": 10,
"color_scheme": "supersetColors",
"label_colors": {},
"show_legend": True,
"y_axis_format": "SMART_NUMBER",
"bottom_margin": "auto",
"x_ticks_layout": "auto",
}
self.login(username="admin")
rv = self.client.post(
"/superset/explore_json/", data={"form_data": json.dumps(form_data)},
)
data = json.loads(rv.data.decode("utf-8"))
resp = self.run_sql(
"""
SELECT count(name) AS count_name, count(ds) AS count_ds
FROM birth_names
WHERE ds >= '1921-01-22 00:00:00.000000' AND ds < '2021-01-22 00:00:00.000000'
GROUP BY name
ORDER BY count_name DESC
LIMIT 10;
""",
client_id="client_id_1",
user_name="admin",
)
count_ds = []
count_name = []
for series in data["data"]:
if series["key"] == "COUNT(ds)":
count_ds = series["values"]
if series["key"] == "COUNT(name)":
count_name = series["values"]
for expected, actual_ds, actual_name in zip(resp["data"], count_ds, count_name):
assert expected["count_name"] == actual_name["y"]
assert expected["count_ds"] == actual_ds["y"]
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
GLOBAL_ASYNC_QUERIES=True,
)
def test_explore_json_async(self):
tbl_id = self.table_ids.get("birth_names")
form_data = {
"datasource": f"{tbl_id}__table",
"viz_type": "dist_bar",
"time_range_endpoints": ["inclusive", "exclusive"],
"granularity_sqla": "ds",
"time_range": "No filter",
"metrics": ["count"],
"adhoc_filters": [],
"groupby": ["gender"],
"row_limit": 100,
}
async_query_manager.init_app(app)
self.login(username="admin")
rv = self.client.post(
"/superset/explore_json/", data={"form_data": json.dumps(form_data)},
)
data = json.loads(rv.data.decode("utf-8"))
keys = list(data.keys())
self.assertEqual(rv.status_code, 202)
self.assertCountEqual(
keys, ["channel_id", "job_id", "user_id", "status", "errors", "result_url"]
)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
GLOBAL_ASYNC_QUERIES=True,
)
def test_explore_json_async_results_format(self):
tbl_id = self.table_ids.get("birth_names")
form_data = {
"datasource": f"{tbl_id}__table",
"viz_type": "dist_bar",
"time_range_endpoints": ["inclusive", "exclusive"],
"granularity_sqla": "ds",
"time_range": "No filter",
"metrics": ["count"],
"adhoc_filters": [],
"groupby": ["gender"],
"row_limit": 100,
}
async_query_manager.init_app(app)
self.login(username="admin")
rv = self.client.post(
"/superset/explore_json/?results=true",
data={"form_data": json.dumps(form_data)},
)
self.assertEqual(rv.status_code, 200)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
@mock.patch(
"superset.utils.cache_manager.CacheManager.cache",
new_callable=mock.PropertyMock,
)
@mock.patch("superset.viz.BaseViz.force_cached", new_callable=mock.PropertyMock)
def test_explore_json_data(self, mock_force_cached, mock_cache):
tbl_id = self.table_ids.get("birth_names")
form_data = dict(
{
"form_data": {
"datasource": f"{tbl_id}__table",
"viz_type": "dist_bar",
"time_range_endpoints": ["inclusive", "exclusive"],
"granularity_sqla": "ds",
"time_range": "No filter",
"metrics": ["count"],
"adhoc_filters": [],
"groupby": ["gender"],
"row_limit": 100,
}
}
)
class MockCache:
def get(self, key):
return form_data
def set(self):
return None
mock_cache.return_value = MockCache()
mock_force_cached.return_value = False
self.login(username="admin")
rv = self.client.get("/superset/explore_json/data/valid-cache-key")
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 200)
self.assertEqual(data["rowcount"], 2)
@mock.patch(
"superset.utils.cache_manager.CacheManager.cache",
new_callable=mock.PropertyMock,
)
def test_explore_json_data_no_login(self, mock_cache):
tbl_id = self.table_ids.get("birth_names")
form_data = dict(
{
"form_data": {
"datasource": f"{tbl_id}__table",
"viz_type": "dist_bar",
"time_range_endpoints": ["inclusive", "exclusive"],
"granularity_sqla": "ds",
"time_range": "No filter",
"metrics": ["count"],
"adhoc_filters": [],
"groupby": ["gender"],
"row_limit": 100,
}
}
)
class MockCache:
def get(self, key):
return form_data
def set(self):
return None
mock_cache.return_value = MockCache()
rv = self.client.get("/superset/explore_json/data/valid-cache-key")
self.assertEqual(rv.status_code, 401)
def test_explore_json_data_invalid_cache_key(self):
self.login(username="admin")
cache_key = "invalid-cache-key"
rv = self.client.get(f"/superset/explore_json/data/{cache_key}")
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 404)
self.assertEqual(data["error"], "Cached data not found")
@mock.patch(
"superset.security.SupersetSecurityManager.get_schemas_accessible_by_user"
)
@mock.patch("superset.security.SupersetSecurityManager.can_access_database")
@mock.patch("superset.security.SupersetSecurityManager.can_access_all_datasources")
def test_schemas_access_for_csv_upload_endpoint(
self,
mock_can_access_all_datasources,
mock_can_access_database,
mock_schemas_accessible,
):
self.login(username="admin")
dbobj = self.create_fake_db()
mock_can_access_all_datasources.return_value = False
mock_can_access_database.return_value = False
mock_schemas_accessible.return_value = ["this_schema_is_allowed_too"]
data = self.get_json_resp(
url="/superset/schemas_access_for_file_upload?db_id={db_id}".format(
db_id=dbobj.id
)
)
assert data == ["this_schema_is_allowed_too"]
self.delete_fake_db()
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_select_star(self):
self.login(username="admin")
examples_db = superset.utils.database.get_example_database()
resp = self.get_resp(f"/superset/select_star/{examples_db.id}/birth_names")
self.assertIn("gender", resp)
def test_get_select_star_not_allowed(self):
"""
Database API: Test get select star not allowed
"""
self.login(username="gamma")
example_db = superset.utils.database.get_example_database()
resp = self.client.get(f"/superset/select_star/{example_db.id}/birth_names")
self.assertEqual(resp.status_code, 403)
@mock.patch("superset.views.core.results_backend_use_msgpack", False)
@mock.patch("superset.views.core.results_backend")
def test_display_limit(self, mock_results_backend):
self.login()
data = [{"col_0": i} for i in range(100)]
payload = {
"status": QueryStatus.SUCCESS,
"query": {"rows": 100},
"data": data,
}
# limit results to 1
expected_key = {"status": "success", "query": {"rows": 100}, "data": data}
limited_data = data[:1]
expected_limited = {
"status": "success",
"query": {"rows": 100},
"data": limited_data,
"displayLimitReached": True,
}
query_mock = mock.Mock()
query_mock.sql = "SELECT *"
query_mock.database = 1
query_mock.schema = "superset"
# do not apply msgpack serialization
use_msgpack = app.config["RESULTS_BACKEND_USE_MSGPACK"]
app.config["RESULTS_BACKEND_USE_MSGPACK"] = False
serialized_payload = sql_lab._serialize_payload(payload, False)
compressed = utils.zlib_compress(serialized_payload)
mock_results_backend.get.return_value = compressed
with mock.patch("superset.views.core.db") as mock_superset_db:
mock_superset_db.session.query().filter_by().one_or_none.return_value = (
query_mock
)
# get all results
result_key = json.loads(self.get_resp("/superset/results/key/"))
result_limited = json.loads(self.get_resp("/superset/results/key/?rows=1"))
self.assertEqual(result_key, expected_key)
self.assertEqual(result_limited, expected_limited)
app.config["RESULTS_BACKEND_USE_MSGPACK"] = use_msgpack
def test_results_default_deserialization(self):
use_new_deserialization = False
data = [("a", 4, 4.0, "2019-08-18T16:39:16.660000")]
cursor_descr = (
("a", "string"),
("b", "int"),
("c", "float"),
("d", "datetime"),
)
db_engine_spec = BaseEngineSpec()
results = SupersetResultSet(data, cursor_descr, db_engine_spec)
query = {
"database_id": 1,
"sql": "SELECT * FROM birth_names LIMIT 100",
"status": QueryStatus.PENDING,
}
(
serialized_data,
selected_columns,
all_columns,
expanded_columns,
) = sql_lab._serialize_and_expand_data(
results, db_engine_spec, use_new_deserialization
)
payload = {
"query_id": 1,
"status": QueryStatus.SUCCESS,
"state": QueryStatus.SUCCESS,
"data": serialized_data,
"columns": all_columns,
"selected_columns": selected_columns,
"expanded_columns": expanded_columns,
"query": query,
}
serialized_payload = sql_lab._serialize_payload(
payload, use_new_deserialization
)
self.assertIsInstance(serialized_payload, str)
query_mock = mock.Mock()
deserialized_payload = superset.views.utils._deserialize_results_payload(
serialized_payload, query_mock, use_new_deserialization
)
self.assertDictEqual(deserialized_payload, payload)
query_mock.assert_not_called()
def test_results_msgpack_deserialization(self):
use_new_deserialization = True
data = [("a", 4, 4.0, "2019-08-18T16:39:16.660000")]
cursor_descr = (
("a", "string"),
("b", "int"),
("c", "float"),
("d", "datetime"),
)
db_engine_spec = BaseEngineSpec()
results = SupersetResultSet(data, cursor_descr, db_engine_spec)
query = {
"database_id": 1,
"sql": "SELECT * FROM birth_names LIMIT 100",
"status": QueryStatus.PENDING,
}
(
serialized_data,
selected_columns,
all_columns,
expanded_columns,
) = sql_lab._serialize_and_expand_data(
results, db_engine_spec, use_new_deserialization
)
payload = {
"query_id": 1,
"status": QueryStatus.SUCCESS,
"state": QueryStatus.SUCCESS,
"data": serialized_data,
"columns": all_columns,
"selected_columns": selected_columns,
"expanded_columns": expanded_columns,
"query": query,
}
serialized_payload = sql_lab._serialize_payload(
payload, use_new_deserialization
)
self.assertIsInstance(serialized_payload, bytes)
with mock.patch.object(
db_engine_spec, "expand_data", wraps=db_engine_spec.expand_data
) as expand_data:
query_mock = mock.Mock()
query_mock.database.db_engine_spec.expand_data = expand_data
deserialized_payload = superset.views.utils._deserialize_results_payload(
serialized_payload, query_mock, use_new_deserialization
)
df = results.to_pandas_df()
payload["data"] = dataframe.df_to_records(df)
self.assertDictEqual(deserialized_payload, payload)
expand_data.assert_called_once()
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
{"FOO": lambda x: 1},
clear=True,
)
@pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
def test_feature_flag_serialization(self):
"""
Functions in feature flags don't break bootstrap data serialization.
"""
self.login()
encoded = json.dumps(
{"FOO": lambda x: 1, "super": "set"},
default=utils.pessimistic_json_iso_dttm_ser,
)
html_string = (
html.escape(encoded, quote=False)
.replace("'", "'")
.replace('"', """)
)
dash_id = db.session.query(Dashboard.id).first()[0]
tbl_id = self.table_ids.get("wb_health_population")
urls = [
"/superset/sqllab",
"/superset/welcome",
f"/superset/dashboard/{dash_id}/",
"/superset/profile/admin/",
f"/superset/explore/table/{tbl_id}",
]
for url in urls:
data = self.get_resp(url)
self.assertTrue(html_string in data)
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
{"SQLLAB_BACKEND_PERSISTENCE": True},
clear=True,
)
def test_sqllab_backend_persistence_payload(self):
username = "admin"
self.login(username)
user_id = security_manager.find_user(username).id
# create a tab
data = {
"queryEditor": json.dumps(
{
"title": "Untitled Query 1",
"dbId": 1,
"schema": None,
"autorun": False,
"sql": "SELECT ...",
"queryLimit": 1000,
}
)
}
resp = self.get_json_resp("/tabstateview/", data=data)
tab_state_id = resp["id"]
# run a query in the created tab
self.run_sql(
"SELECT name FROM birth_names",
"client_id_1",
user_name=username,
raise_on_error=True,
sql_editor_id=str(tab_state_id),
)
# run an orphan query (no tab)
self.run_sql(
"SELECT name FROM birth_names",
"client_id_2",
user_name=username,
raise_on_error=True,
)
# we should have only 1 query returned, since the second one is not
# associated with any tabs
payload = views.Superset._get_sqllab_tabs(user_id=user_id)
self.assertEqual(len(payload["queries"]), 1)
def test_virtual_table_explore_visibility(self):
# test that default visibility it set to True
database = superset.utils.database.get_example_database()
self.assertEqual(database.allows_virtual_table_explore, True)
# test that visibility is disabled when extra is set to False
extra = database.get_extra()
extra["allows_virtual_table_explore"] = False
database.extra = json.dumps(extra)
self.assertEqual(database.allows_virtual_table_explore, False)
# test that visibility is enabled when extra is set to True
extra = database.get_extra()
extra["allows_virtual_table_explore"] = True
database.extra = json.dumps(extra)
self.assertEqual(database.allows_virtual_table_explore, True)
# test that visibility is not broken with bad values
extra = database.get_extra()
extra["allows_virtual_table_explore"] = "trash value"
database.extra = json.dumps(extra)
self.assertEqual(database.allows_virtual_table_explore, True)
def test_explore_database_id(self):
database = superset.utils.database.get_example_database()
explore_database = superset.utils.database.get_example_database()
# test that explore_database_id is the regular database
# id if none is set in the extra
self.assertEqual(database.explore_database_id, database.id)
# test that explore_database_id is correct if the extra is set
extra = database.get_extra()
extra["explore_database_id"] = explore_database.id
database.extra = json.dumps(extra)
self.assertEqual(database.explore_database_id, explore_database.id)
def test_get_column_names_from_metric(self):
simple_metric = {
"expressionType": utils.AdhocMetricExpressionType.SIMPLE.value,
"column": {"column_name": "my_col"},
"aggregate": "SUM",
"label": "My Simple Label",
}
assert utils.get_column_name_from_metric(simple_metric) == "my_col"
sql_metric = {
"expressionType": utils.AdhocMetricExpressionType.SQL.value,
"sqlExpression": "SUM(my_label)",
"label": "My SQL Label",
}
assert utils.get_column_name_from_metric(sql_metric) is None
assert utils.get_column_names_from_metrics([simple_metric, sql_metric]) == [
"my_col"
]
@pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
@mock.patch("superset.models.core.DB_CONNECTION_MUTATOR")
def test_explore_injected_exceptions(self, mock_db_connection_mutator):
"""
Handle injected exceptions from the db mutator
"""
# Assert we can handle a custom exception at the mutator level
exception = SupersetException("Error message")
mock_db_connection_mutator.side_effect = exception
slice = db.session.query(Slice).first()
url = f"/superset/explore/?form_data=%7B%22slice_id%22%3A%20{slice.id}%7D"
self.login()
data = self.get_resp(url)
self.assertIn("Error message", data)
# Assert we can handle a driver exception at the mutator level
exception = SQLAlchemyError("Error message")
mock_db_connection_mutator.side_effect = exception
slice = db.session.query(Slice).first()
url = f"/superset/explore/?form_data=%7B%22slice_id%22%3A%20{slice.id}%7D"
self.login()
data = self.get_resp(url)
self.assertIn("Error message", data)
@pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
@mock.patch("superset.models.core.DB_CONNECTION_MUTATOR")
def test_dashboard_injected_exceptions(self, mock_db_connection_mutator):
"""
Handle injected exceptions from the db mutator
"""
# Assert we can handle a custom excetion at the mutator level
exception = SupersetException("Error message")
mock_db_connection_mutator.side_effect = exception
dash = db.session.query(Dashboard).first()
url = f"/superset/dashboard/{dash.id}/"
self.login()
data = self.get_resp(url)
self.assertIn("Error message", data)
# Assert we can handle a driver exception at the mutator level
exception = SQLAlchemyError("Error message")
mock_db_connection_mutator.side_effect = exception
dash = db.session.query(Dashboard).first()
url = f"/superset/dashboard/{dash.id}/"
self.login()
data = self.get_resp(url)
self.assertIn("Error message", data)
@mock.patch("superset.sql_lab.cancel_query")
@mock.patch("superset.views.core.db.session")
def test_stop_query_not_implemented(
self, mock_superset_db_session, mock_sql_lab_cancel_query
):
"""
Handles stop query when the DB engine spec does not
have a cancel query method.
"""
form_data = {"client_id": "foo"}
query_mock = mock.Mock()
query_mock.client_id = "foo"
query_mock.status = QueryStatus.RUNNING
self.login(username="admin")
mock_superset_db_session.query().filter_by().one().return_value = query_mock
mock_sql_lab_cancel_query.return_value = False
rv = self.client.post(
"/superset/stop_query/", data={"form_data": json.dumps(form_data)},
)
assert rv.status_code == 422
if __name__ == "__main__":
unittest.main()
| 38.228803 | 103 | 0.606566 |
import csv
import datetime
import doctest
import html
import io
import json
import logging
from typing import Dict, List
from urllib.parse import quote
import superset.utils.database
from tests.integration_tests.fixtures.birth_names_dashboard import (
load_birth_names_dashboard_with_slices,
load_birth_names_data,
)
import pytest
import pytz
import random
import re
import unittest
from unittest import mock
import pandas as pd
import sqlalchemy as sqla
from sqlalchemy.exc import SQLAlchemyError
from superset.models.cache import CacheKey
from superset.utils.database import get_example_database
from tests.integration_tests.conftest import with_feature_flags
from tests.integration_tests.fixtures.energy_dashboard import (
load_energy_table_with_slice,
load_energy_table_data,
)
from tests.integration_tests.test_app import app
import superset.views.utils
from superset import (
dataframe,
db,
security_manager,
sql_lab,
)
from superset.common.db_query_status import QueryStatus
from superset.connectors.sqla.models import SqlaTable
from superset.db_engine_specs.base import BaseEngineSpec
from superset.db_engine_specs.mssql import MssqlEngineSpec
from superset.exceptions import SupersetException
from superset.extensions import async_query_manager
from superset.models import core as models
from superset.models.annotations import Annotation, AnnotationLayer
from superset.models.dashboard import Dashboard
from superset.models.datasource_access_request import DatasourceAccessRequest
from superset.models.slice import Slice
from superset.models.sql_lab import Query
from superset.result_set import SupersetResultSet
from superset.utils import core as utils
from superset.views import core as views
from superset.views.database.views import DatabaseView
from .base_tests import SupersetTestCase
from tests.integration_tests.fixtures.world_bank_dashboard import (
load_world_bank_dashboard_with_slices,
load_world_bank_data,
)
logger = logging.getLogger(__name__)
class TestCore(SupersetTestCase):
def setUp(self):
db.session.query(Query).delete()
db.session.query(DatasourceAccessRequest).delete()
db.session.query(models.Log).delete()
self.table_ids = {
tbl.table_name: tbl.id for tbl in (db.session.query(SqlaTable).all())
}
self.original_unsafe_db_setting = app.config["PREVENT_UNSAFE_DB_CONNECTIONS"]
def tearDown(self):
db.session.query(Query).delete()
app.config["PREVENT_UNSAFE_DB_CONNECTIONS"] = self.original_unsafe_db_setting
def test_login(self):
resp = self.get_resp("/login/", data=dict(username="admin", password="general"))
self.assertNotIn("User confirmation needed", resp)
resp = self.get_resp("/logout/", follow_redirects=True)
self.assertIn("User confirmation needed", resp)
resp = self.get_resp(
"/login/", data=dict(username="admin", password="wrongPassword")
)
self.assertIn("User confirmation needed", resp)
def test_dashboard_endpoint(self):
self.login()
resp = self.client.get("/superset/dashboard/-1/")
assert resp.status_code == 404
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_slice_endpoint(self):
self.login(username="admin")
slc = self.get_slice("Girls", db.session)
resp = self.get_resp("/superset/slice/{}/".format(slc.id))
assert "Original value" in resp
assert "List Roles" in resp
resp = self.get_resp("/superset/slice/{}/?standalone=true".format(slc.id))
assert '<div class="navbar' not in resp
resp = self.client.get("/superset/slice/-1/")
assert resp.status_code == 404
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_viz_cache_key(self):
self.login(username="admin")
slc = self.get_slice("Girls", db.session)
viz = slc.viz
qobj = viz.query_obj()
cache_key = viz.cache_key(qobj)
qobj["groupby"] = []
cache_key_with_groupby = viz.cache_key(qobj)
self.assertNotEqual(cache_key, cache_key_with_groupby)
self.assertNotEqual(
viz.cache_key(qobj), viz.cache_key(qobj, time_compare="12 weeks")
)
self.assertNotEqual(
viz.cache_key(qobj, time_compare="28 days"),
viz.cache_key(qobj, time_compare="12 weeks"),
)
qobj["inner_from_dttm"] = datetime.datetime(1901, 1, 1)
self.assertEqual(cache_key_with_groupby, viz.cache_key(qobj))
def test_get_superset_tables_not_allowed(self):
example_db = superset.utils.database.get_example_database()
schema_name = self.default_schema_backend_map[example_db.backend]
self.login(username="gamma")
uri = f"superset/tables/{example_db.id}/{schema_name}/undefined/"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 404)
def test_get_superset_tables_substr(self):
example_db = superset.utils.database.get_example_database()
if example_db.backend in {"presto", "hive"}:
# TODO: change table to the real table that is in examples.
return
self.login(username="admin")
schema_name = self.default_schema_backend_map[example_db.backend]
uri = f"superset/tables/{example_db.id}/{schema_name}/ab_role/"
rv = self.client.get(uri)
response = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 200)
expected_response = {
"options": [
{
"label": "ab_role",
"schema": schema_name,
"title": "ab_role",
"type": "table",
"value": "ab_role",
"extra": None,
}
],
"tableLength": 1,
}
self.assertEqual(response, expected_response)
def test_get_superset_tables_not_found(self):
self.login(username="admin")
uri = f"superset/tables/invalid/public/undefined/"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 404)
def test_annotation_json_endpoint(self):
# Set up an annotation layer and annotation
layer = AnnotationLayer(name="foo", descr="bar")
db.session.add(layer)
db.session.commit()
annotation = Annotation(
layer_id=layer.id,
short_descr="my_annotation",
start_dttm=datetime.datetime(2020, 5, 20, 18, 21, 51),
end_dttm=datetime.datetime(2020, 5, 20, 18, 31, 51),
)
db.session.add(annotation)
db.session.commit()
self.login()
resp_annotations = json.loads(
self.get_resp("annotationlayermodelview/api/read")
)
# the UI needs id and name to function
self.assertIn("id", resp_annotations["result"][0])
self.assertIn("name", resp_annotations["result"][0])
response = self.get_resp(
f"/superset/annotation_json/{layer.id}?form_data="
+ quote(json.dumps({"time_range": "100 years ago : now"}))
)
assert "my_annotation" in response
# Rollback changes
db.session.delete(annotation)
db.session.delete(layer)
db.session.commit()
def test_admin_only_permissions(self):
def assert_admin_permission_in(role_name, assert_func):
role = security_manager.find_role(role_name)
permissions = [p.permission.name for p in role.permissions]
assert_func("can_sync_druid_source", permissions)
assert_func("can_approve", permissions)
assert_admin_permission_in("Admin", self.assertIn)
assert_admin_permission_in("Alpha", self.assertNotIn)
assert_admin_permission_in("Gamma", self.assertNotIn)
def test_admin_only_menu_views(self):
def assert_admin_view_menus_in(role_name, assert_func):
role = security_manager.find_role(role_name)
view_menus = [p.view_menu.name for p in role.permissions]
assert_func("ResetPasswordView", view_menus)
assert_func("RoleModelView", view_menus)
assert_func("Security", view_menus)
assert_func("SQL Lab", view_menus)
assert_admin_view_menus_in("Admin", self.assertIn)
assert_admin_view_menus_in("Alpha", self.assertNotIn)
assert_admin_view_menus_in("Gamma", self.assertNotIn)
@pytest.mark.usefixtures("load_energy_table_with_slice")
def test_save_slice(self):
self.login(username="admin")
slice_name = f"Energy Sankey"
slice_id = self.get_slice(slice_name, db.session).id
copy_name_prefix = "Test Sankey"
copy_name = f"{copy_name_prefix}[save]{random.random()}"
tbl_id = self.table_ids.get("energy_usage")
new_slice_name = f"{copy_name_prefix}[overwrite]{random.random()}"
url = (
"/superset/explore/table/{}/?slice_name={}&"
"action={}&datasource_name=energy_usage"
)
form_data = {
"adhoc_filters": [],
"viz_type": "sankey",
"groupby": ["target"],
"metric": "sum__value",
"row_limit": 5000,
"slice_id": slice_id,
"time_range_endpoints": ["inclusive", "exclusive"],
}
# Changing name and save as a new slice
resp = self.client.post(
url.format(tbl_id, copy_name, "saveas"),
data={"form_data": json.dumps(form_data)},
)
db.session.expunge_all()
new_slice_id = resp.json["form_data"]["slice_id"]
slc = db.session.query(Slice).filter_by(id=new_slice_id).one()
self.assertEqual(slc.slice_name, copy_name)
form_data.pop("slice_id") # We don't save the slice id when saving as
self.assertEqual(slc.viz.form_data, form_data)
form_data = {
"adhoc_filters": [],
"viz_type": "sankey",
"groupby": ["source"],
"metric": "sum__value",
"row_limit": 5000,
"slice_id": new_slice_id,
"time_range": "now",
"time_range_endpoints": ["inclusive", "exclusive"],
}
# Setting the name back to its original name by overwriting new slice
self.client.post(
url.format(tbl_id, new_slice_name, "overwrite"),
data={"form_data": json.dumps(form_data)},
)
db.session.expunge_all()
slc = db.session.query(Slice).filter_by(id=new_slice_id).one()
self.assertEqual(slc.slice_name, new_slice_name)
self.assertEqual(slc.viz.form_data, form_data)
# Cleanup
slices = (
db.session.query(Slice)
.filter(Slice.slice_name.like(copy_name_prefix + "%"))
.all()
)
for slc in slices:
db.session.delete(slc)
db.session.commit()
@pytest.mark.usefixtures("load_energy_table_with_slice")
def test_filter_endpoint(self):
self.login(username="admin")
slice_name = "Energy Sankey"
slice_id = self.get_slice(slice_name, db.session).id
db.session.commit()
tbl_id = self.table_ids.get("energy_usage")
table = db.session.query(SqlaTable).filter(SqlaTable.id == tbl_id)
table.filter_select_enabled = True
url = (
"/superset/filter/table/{}/target/?viz_type=sankey&groupby=source"
"&metric=sum__value&flt_col_0=source&flt_op_0=in&flt_eq_0=&"
"slice_id={}&datasource_name=energy_usage&"
"datasource_id=1&datasource_type=table"
)
# Changing name
resp = self.get_resp(url.format(tbl_id, slice_id))
assert len(resp) > 0
assert "energy_target0" in resp
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_slice_data(self):
# slice data should have some required attributes
self.login(username="admin")
slc = self.get_slice(
slice_name="Girls", session=db.session, expunge_from_session=False
)
slc_data_attributes = slc.data.keys()
assert "changed_on" in slc_data_attributes
assert "modified" in slc_data_attributes
assert "owners" in slc_data_attributes
@pytest.mark.usefixtures("load_energy_table_with_slice")
def test_slices(self):
# Testing by hitting the two supported end points for all slices
self.login(username="admin")
Slc = Slice
urls = []
for slc in db.session.query(Slc).all():
urls += [
(slc.slice_name, "explore", slc.slice_url),
]
for name, method, url in urls:
logger.info(f"[{name}]/[{method}]: {url}")
print(f"[{name}]/[{method}]: {url}")
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
def test_tablemodelview_list(self):
self.login(username="admin")
url = "/tablemodelview/list/"
resp = self.get_resp(url)
# assert that a table is listed
table = db.session.query(SqlaTable).first()
assert table.name in resp
assert "/superset/explore/table/{}".format(table.id) in resp
def test_add_slice(self):
self.login(username="admin")
# assert that /chart/add responds with 200
url = "/chart/add"
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_get_user_slices_for_owners(self):
self.login(username="alpha")
user = security_manager.find_user("alpha")
slice_name = "Girls"
# ensure user is not owner of any slices
url = f"/superset/user_slices/{user.id}/"
resp = self.client.get(url)
data = json.loads(resp.data)
self.assertEqual(data, [])
# make user owner of slice and verify that endpoint returns said slice
slc = self.get_slice(
slice_name=slice_name, session=db.session, expunge_from_session=False
)
slc.owners = [user]
db.session.merge(slc)
db.session.commit()
url = f"/superset/user_slices/{user.id}/"
resp = self.client.get(url)
data = json.loads(resp.data)
self.assertEqual(len(data), 1)
self.assertEqual(data[0]["title"], slice_name)
# remove ownership and ensure user no longer gets slice
slc = self.get_slice(
slice_name=slice_name, session=db.session, expunge_from_session=False
)
slc.owners = []
db.session.merge(slc)
db.session.commit()
url = f"/superset/user_slices/{user.id}/"
resp = self.client.get(url)
data = json.loads(resp.data)
self.assertEqual(data, [])
def test_get_user_slices(self):
self.login(username="admin")
userid = security_manager.find_user("admin").id
url = f"/sliceasync/api/read?_flt_0_created_by={userid}"
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
@pytest.mark.usefixtures("load_energy_table_with_slice")
def test_slices_V2(self):
# Add explore-v2-beta role to admin user
# Test all slice urls as user with with explore-v2-beta role
security_manager.add_role("explore-v2-beta")
security_manager.add_user(
"explore_beta",
"explore_beta",
" user",
"explore_beta@airbnb.com",
security_manager.find_role("explore-v2-beta"),
password="general",
)
self.login(username="explore_beta", password="general")
Slc = Slice
urls = []
for slc in db.session.query(Slc).all():
urls += [(slc.slice_name, "slice_url", slc.slice_url)]
for name, method, url in urls:
print(f"[{name}]/[{method}]: {url}")
self.client.get(url)
def test_doctests(self):
modules = [utils, models, sql_lab]
for mod in modules:
failed, tests = doctest.testmod(mod)
if failed:
raise Exception("Failed a doctest")
def test_misc(self):
assert self.get_resp("/health") == "OK"
assert self.get_resp("/healthcheck") == "OK"
assert self.get_resp("/ping") == "OK"
def test_testconn(self, username="admin"):
# need to temporarily allow sqlite dbs, teardown will undo this
app.config["PREVENT_UNSAFE_DB_CONNECTIONS"] = False
self.login(username=username)
database = superset.utils.database.get_example_database()
# validate that the endpoint works with the password-masked sqlalchemy uri
data = json.dumps(
{
"uri": database.safe_sqlalchemy_uri(),
"name": "examples",
"impersonate_user": False,
}
)
response = self.client.post(
"/superset/testconn", data=data, content_type="application/json"
)
assert response.status_code == 200
assert response.headers["Content-Type"] == "application/json"
# validate that the endpoint works with the decrypted sqlalchemy uri
data = json.dumps(
{
"uri": database.sqlalchemy_uri_decrypted,
"name": "examples",
"impersonate_user": False,
}
)
response = self.client.post(
"/superset/testconn", data=data, content_type="application/json"
)
assert response.status_code == 200
assert response.headers["Content-Type"] == "application/json"
def test_testconn_failed_conn(self, username="admin"):
self.login(username=username)
data = json.dumps(
{"uri": "broken://url", "name": "examples", "impersonate_user": False}
)
response = self.client.post(
"/superset/testconn", data=data, content_type="application/json"
)
assert response.status_code == 400
assert response.headers["Content-Type"] == "application/json"
response_body = json.loads(response.data.decode("utf-8"))
expected_body = {"error": "Could not load database driver: broken"}
assert response_body == expected_body, "%s != %s" % (
response_body,
expected_body,
)
data = json.dumps(
{
"uri": "mssql+pymssql://url",
"name": "examples",
"impersonate_user": False,
}
)
response = self.client.post(
"/superset/testconn", data=data, content_type="application/json"
)
assert response.status_code == 400
assert response.headers["Content-Type"] == "application/json"
response_body = json.loads(response.data.decode("utf-8"))
expected_body = {"error": "Could not load database driver: mssql+pymssql"}
assert response_body == expected_body, "%s != %s" % (
response_body,
expected_body,
)
def test_testconn_unsafe_uri(self, username="admin"):
self.login(username=username)
app.config["PREVENT_UNSAFE_DB_CONNECTIONS"] = True
response = self.client.post(
"/superset/testconn",
data=json.dumps(
{
"uri": "sqlite:///home/superset/unsafe.db",
"name": "unsafe",
"impersonate_user": False,
}
),
content_type="application/json",
)
self.assertEqual(400, response.status_code)
response_body = json.loads(response.data.decode("utf-8"))
expected_body = {
"error": "SQLiteDialect_pysqlite cannot be used as a data source for security reasons."
}
self.assertEqual(expected_body, response_body)
def test_custom_password_store(self):
database = superset.utils.database.get_example_database()
conn_pre = sqla.engine.url.make_url(database.sqlalchemy_uri_decrypted)
def custom_password_store(uri):
return "password_store_test"
models.custom_password_store = custom_password_store
conn = sqla.engine.url.make_url(database.sqlalchemy_uri_decrypted)
if conn_pre.password:
assert conn.password == "password_store_test"
assert conn.password != conn_pre.password
# Disable for password store for later tests
models.custom_password_store = None
def test_databaseview_edit(self, username="admin"):
# validate that sending a password-masked uri does not over-write the decrypted
# uri
self.login(username=username)
database = superset.utils.database.get_example_database()
sqlalchemy_uri_decrypted = database.sqlalchemy_uri_decrypted
url = "databaseview/edit/{}".format(database.id)
data = {k: database.__getattribute__(k) for k in DatabaseView.add_columns}
data["sqlalchemy_uri"] = database.safe_sqlalchemy_uri()
self.client.post(url, data=data)
database = superset.utils.database.get_example_database()
self.assertEqual(sqlalchemy_uri_decrypted, database.sqlalchemy_uri_decrypted)
# Need to clean up after ourselves
database.impersonate_user = False
database.allow_dml = False
database.allow_run_async = False
db.session.commit()
@pytest.mark.usefixtures(
"load_energy_table_with_slice", "load_birth_names_dashboard_with_slices"
)
def test_warm_up_cache(self):
self.login()
slc = self.get_slice("Girls", db.session)
data = self.get_json_resp("/superset/warm_up_cache?slice_id={}".format(slc.id))
self.assertEqual(
data, [{"slice_id": slc.id, "viz_error": None, "viz_status": "success"}]
)
data = self.get_json_resp(
"/superset/warm_up_cache?table_name=energy_usage&db_name=main"
)
assert len(data) > 0
dashboard = self.get_dash_by_slug("births")
assert self.get_json_resp(
f"/superset/warm_up_cache?dashboard_id={dashboard.id}&slice_id={slc.id}"
) == [{"slice_id": slc.id, "viz_error": None, "viz_status": "success"}]
assert self.get_json_resp(
f"/superset/warm_up_cache?dashboard_id={dashboard.id}&slice_id={slc.id}&extra_filters="
+ quote(json.dumps([{"col": "name", "op": "in", "val": ["Jennifer"]}]))
) == [{"slice_id": slc.id, "viz_error": None, "viz_status": "success"}]
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_cache_logging(self):
self.login("admin")
store_cache_keys = app.config["STORE_CACHE_KEYS_IN_METADATA_DB"]
app.config["STORE_CACHE_KEYS_IN_METADATA_DB"] = True
girls_slice = self.get_slice("Girls", db.session)
self.get_json_resp("/superset/warm_up_cache?slice_id={}".format(girls_slice.id))
ck = db.session.query(CacheKey).order_by(CacheKey.id.desc()).first()
assert ck.datasource_uid == f"{girls_slice.table.id}__table"
app.config["STORE_CACHE_KEYS_IN_METADATA_DB"] = store_cache_keys
def test_shortner(self):
self.login(username="admin")
data = (
"//superset/explore/table/1/?viz_type=sankey&groupby=source&"
"groupby=target&metric=sum__value&row_limit=5000&where=&having=&"
"flt_col_0=source&flt_op_0=in&flt_eq_0=&slice_id=78&slice_name="
"Energy+Sankey&collapsed_fieldsets=&action=&datasource_name="
"energy_usage&datasource_id=1&datasource_type=table&"
"previous_viz_type=sankey"
)
resp = self.client.post("/r/shortner/", data=dict(data=data))
assert re.search(r"\/r\/[0-9]+", resp.data.decode("utf-8"))
def test_shortner_invalid(self):
self.login(username="admin")
invalid_urls = [
"hhttp://invalid.com",
"hhttps://invalid.com",
"www.invalid.com",
]
for invalid_url in invalid_urls:
resp = self.client.post("/r/shortner/", data=dict(data=invalid_url))
assert resp.status_code == 400
def test_redirect_invalid(self):
model_url = models.Url(url="hhttp://invalid.com")
db.session.add(model_url)
db.session.commit()
self.login(username="admin")
response = self.client.get(f"/r/{model_url.id}")
assert response.headers["Location"] == "http://localhost/"
db.session.delete(model_url)
db.session.commit()
@with_feature_flags(KV_STORE=False)
def test_kv_disabled(self):
self.login(username="admin")
resp = self.client.get("/kv/10001/")
self.assertEqual(404, resp.status_code)
value = json.dumps({"data": "this is a test"})
resp = self.client.post("/kv/store/", data=dict(data=value))
self.assertEqual(resp.status_code, 404)
@with_feature_flags(KV_STORE=True)
def test_kv_enabled(self):
self.login(username="admin")
resp = self.client.get("/kv/10001/")
self.assertEqual(404, resp.status_code)
value = json.dumps({"data": "this is a test"})
resp = self.client.post("/kv/store/", data=dict(data=value))
self.assertEqual(resp.status_code, 200)
kv = db.session.query(models.KeyValue).first()
kv_value = kv.value
self.assertEqual(json.loads(value), json.loads(kv_value))
resp = self.client.get("/kv/{}/".format(kv.id))
self.assertEqual(resp.status_code, 200)
self.assertEqual(json.loads(value), json.loads(resp.data.decode("utf-8")))
def test_gamma(self):
self.login(username="gamma")
assert "Charts" in self.get_resp("/chart/list/")
assert "Dashboards" in self.get_resp("/dashboard/list/")
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_csv_endpoint(self):
self.login()
client_id = "{}".format(random.getrandbits(64))[:10]
get_name_sql = """
SELECT name
FROM birth_names
LIMIT 1
"""
resp = self.run_sql(get_name_sql, client_id, raise_on_error=True)
name = resp["data"][0]["name"]
sql = f"""
SELECT name
FROM birth_names
WHERE name = '{name}'
LIMIT 1
"""
client_id = "{}".format(random.getrandbits(64))[:10]
self.run_sql(sql, client_id, raise_on_error=True)
resp = self.get_resp("/superset/csv/{}".format(client_id))
data = csv.reader(io.StringIO(resp))
expected_data = csv.reader(io.StringIO(f"name\n{name}\n"))
client_id = "{}".format(random.getrandbits(64))[:10]
self.run_sql(sql, client_id, raise_on_error=True)
resp = self.get_resp("/superset/csv/{}".format(client_id))
data = csv.reader(io.StringIO(resp))
expected_data = csv.reader(io.StringIO(f"name\n{name}\n"))
self.assertEqual(list(expected_data), list(data))
self.logout()
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_extra_table_metadata(self):
self.login()
example_db = superset.utils.database.get_example_database()
schema = "default" if example_db.backend in {"presto", "hive"} else "superset"
self.get_json_resp(
f"/superset/extra_table_metadata/{example_db.id}/birth_names/{schema}/"
)
def test_templated_sql_json(self):
if superset.utils.database.get_example_database().backend == "presto":
# TODO: make it work for presto
return
self.login()
sql = "SELECT '{{ 1+1 }}' as test"
data = self.run_sql(sql, "fdaklj3ws")
self.assertEqual(data["data"][0]["test"], "2")
@mock.patch(
"tests.integration_tests.superset_test_custom_template_processors.datetime"
)
@mock.patch("superset.views.core.get_sql_results")
def test_custom_templated_sql_json(self, sql_lab_mock, mock_dt) -> None:
mock_dt.utcnow = mock.Mock(return_value=datetime.datetime(1970, 1, 1))
self.login()
sql = "SELECT '$DATE()' as test"
resp = {
"status": QueryStatus.SUCCESS,
"query": {"rows": 1},
"data": [{"test": "'1970-01-01'"}],
}
sql_lab_mock.return_value = resp
dbobj = self.create_fake_db_for_macros()
json_payload = dict(database_id=dbobj.id, sql=sql)
self.get_json_resp(
"/superset/sql_json/", raise_on_error=False, json_=json_payload
)
assert sql_lab_mock.called
self.assertEqual(sql_lab_mock.call_args[0][1], "SELECT '1970-01-01' as test")
self.delete_fake_db_for_macros()
def test_fetch_datasource_metadata(self):
self.login(username="admin")
url = "/superset/fetch_datasource_metadata?" "datasourceKey=1__table"
resp = self.get_json_resp(url)
keys = [
"name",
"type",
"order_by_choices",
"granularity_sqla",
"time_grain_sqla",
"id",
]
for k in keys:
self.assertIn(k, resp.keys())
@staticmethod
def _get_user_activity_endpoints(user: str):
userid = security_manager.find_user(user).id
return (
f"/superset/recent_activity/{userid}/",
f"/superset/created_slices/{userid}/",
f"/superset/created_dashboards/{userid}/",
f"/superset/fave_slices/{userid}/",
f"/superset/fave_dashboards/{userid}/",
f"/superset/user_slices/{userid}/",
f"/superset/fave_dashboards_by_username/{user}/",
)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_user_profile(self, username="admin"):
self.login(username=username)
slc = self.get_slice("Girls", db.session)
# Setting some faves
url = f"/superset/favstar/Slice/{slc.id}/select/"
resp = self.get_json_resp(url)
self.assertEqual(resp["count"], 1)
dash = db.session.query(Dashboard).filter_by(slug="births").first()
url = f"/superset/favstar/Dashboard/{dash.id}/select/"
resp = self.get_json_resp(url)
self.assertEqual(resp["count"], 1)
resp = self.get_resp(f"/superset/profile/{username}/")
self.assertIn('"app"', resp)
for endpoint in self._get_user_activity_endpoints(username):
data = self.get_json_resp(endpoint)
self.assertNotIn("message", data)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_user_activity_access(self, username="gamma"):
self.login(username=username)
# accessing own and other users' activity is allowed by default
for user in ("admin", "gamma"):
for endpoint in self._get_user_activity_endpoints(user):
resp = self.client.get(endpoint)
assert resp.status_code == 200
# disabling flag will block access to other users' activity data
access_flag = app.config["ENABLE_BROAD_ACTIVITY_ACCESS"]
app.config["ENABLE_BROAD_ACTIVITY_ACCESS"] = False
for user in ("admin", "gamma"):
for endpoint in self._get_user_activity_endpoints(user):
resp = self.client.get(endpoint)
expected_status_code = 200 if user == username else 403
assert resp.status_code == expected_status_code
# restore flag
app.config["ENABLE_BROAD_ACTIVITY_ACCESS"] = access_flag
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_slice_id_is_always_logged_correctly_on_web_request(self):
# superset/explore case
self.login("admin")
slc = db.session.query(Slice).filter_by(slice_name="Girls").one()
qry = db.session.query(models.Log).filter_by(slice_id=slc.id)
self.get_resp(slc.slice_url, {"form_data": json.dumps(slc.form_data)})
self.assertEqual(1, qry.count())
def create_sample_csvfile(self, filename: str, content: List[str]) -> None:
with open(filename, "w+") as test_file:
for l in content:
test_file.write(f"{l}\n")
def create_sample_excelfile(self, filename: str, content: Dict[str, str]) -> None:
pd.DataFrame(content).to_excel(filename)
def enable_csv_upload(self, database: models.Database) -> None:
database.allow_file_upload = True
db.session.commit()
add_datasource_page = self.get_resp("/databaseview/list/")
self.assertIn("Upload a CSV", add_datasource_page)
form_get = self.get_resp("/csvtodatabaseview/form")
self.assertIn("CSV to Database configuration", form_get)
def test_dataframe_timezone(self):
tz = pytz.FixedOffset(60)
data = [
(datetime.datetime(2017, 11, 18, 21, 53, 0, 219225, tzinfo=tz),),
(datetime.datetime(2017, 11, 18, 22, 6, 30, tzinfo=tz),),
]
results = SupersetResultSet(list(data), [["data"]], BaseEngineSpec)
df = results.to_pandas_df()
data = dataframe.df_to_records(df)
json_str = json.dumps(data, default=utils.pessimistic_json_iso_dttm_ser)
self.assertDictEqual(
data[0], {"data": pd.Timestamp("2017-11-18 21:53:00.219225+0100", tz=tz)}
)
self.assertDictEqual(
data[1], {"data": pd.Timestamp("2017-11-18 22:06:30+0100", tz=tz)}
)
self.assertEqual(
json_str,
'[{"data": "2017-11-18T21:53:00.219225+01:00"}, {"data": "2017-11-18T22:06:30+01:00"}]',
)
def test_mssql_engine_spec_pymssql(self):
# Test for case when tuple is returned (pymssql)
data = [
(1, 1, datetime.datetime(2017, 10, 19, 23, 39, 16, 660000)),
(2, 2, datetime.datetime(2018, 10, 19, 23, 39, 16, 660000)),
]
results = SupersetResultSet(
list(data), [["col1"], ["col2"], ["col3"]], MssqlEngineSpec
)
df = results.to_pandas_df()
data = dataframe.df_to_records(df)
self.assertEqual(len(data), 2)
self.assertEqual(
data[0],
{"col1": 1, "col2": 1, "col3": pd.Timestamp("2017-10-19 23:39:16.660000")},
)
def test_comments_in_sqlatable_query(self):
clean_query = "SELECT '/* val 1 */' as c1, '-- val 2' as c2 FROM tbl"
commented_query = "/* comment 1 */" + clean_query + "-- comment 2"
table = SqlaTable(
table_name="test_comments_in_sqlatable_query_table",
sql=commented_query,
database=get_example_database(),
)
rendered_query = str(table.get_from_clause())
self.assertEqual(clean_query, rendered_query)
def test_slice_payload_no_datasource(self):
self.login(username="admin")
data = self.get_json_resp("/superset/explore_json/", raise_on_error=False)
self.assertEqual(
data["errors"][0]["message"],
"The dataset associated with this chart no longer exists",
)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_explore_json(self):
tbl_id = self.table_ids.get("birth_names")
form_data = {
"datasource": f"{tbl_id}__table",
"viz_type": "dist_bar",
"time_range_endpoints": ["inclusive", "exclusive"],
"granularity_sqla": "ds",
"time_range": "No filter",
"metrics": ["count"],
"adhoc_filters": [],
"groupby": ["gender"],
"row_limit": 100,
}
self.login(username="admin")
rv = self.client.post(
"/superset/explore_json/", data={"form_data": json.dumps(form_data)},
)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 200)
self.assertEqual(data["rowcount"], 2)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_explore_json_dist_bar_order(self):
tbl_id = self.table_ids.get("birth_names")
form_data = {
"datasource": f"{tbl_id}__table",
"viz_type": "dist_bar",
"url_params": {},
"time_range_endpoints": ["inclusive", "exclusive"],
"granularity_sqla": "ds",
"time_range": 'DATEADD(DATETIME("2021-01-22T00:00:00"), -100, year) : 2021-01-22T00:00:00',
"metrics": [
{
"expressionType": "SIMPLE",
"column": {
"id": 334,
"column_name": "name",
"verbose_name": "null",
"description": "null",
"expression": "",
"filterable": True,
"groupby": True,
"is_dttm": False,
"type": "VARCHAR(255)",
"python_date_format": "null",
},
"aggregate": "COUNT",
"sqlExpression": "null",
"isNew": False,
"hasCustomLabel": False,
"label": "COUNT(name)",
"optionName": "metric_xdzsijn42f9_khi4h3v3vci",
},
{
"expressionType": "SIMPLE",
"column": {
"id": 332,
"column_name": "ds",
"verbose_name": "null",
"description": "null",
"expression": "",
"filterable": True,
"groupby": True,
"is_dttm": True,
"type": "TIMESTAMP WITHOUT TIME ZONE",
"python_date_format": "null",
},
"aggregate": "COUNT",
"sqlExpression": "null",
"isNew": False,
"hasCustomLabel": False,
"label": "COUNT(ds)",
"optionName": "metric_80g1qb9b6o7_ci5vquydcbe",
},
],
"order_desc": True,
"adhoc_filters": [],
"groupby": ["name"],
"columns": [],
"row_limit": 10,
"color_scheme": "supersetColors",
"label_colors": {},
"show_legend": True,
"y_axis_format": "SMART_NUMBER",
"bottom_margin": "auto",
"x_ticks_layout": "auto",
}
self.login(username="admin")
rv = self.client.post(
"/superset/explore_json/", data={"form_data": json.dumps(form_data)},
)
data = json.loads(rv.data.decode("utf-8"))
resp = self.run_sql(
"""
SELECT count(name) AS count_name, count(ds) AS count_ds
FROM birth_names
WHERE ds >= '1921-01-22 00:00:00.000000' AND ds < '2021-01-22 00:00:00.000000'
GROUP BY name
ORDER BY count_name DESC
LIMIT 10;
""",
client_id="client_id_1",
user_name="admin",
)
count_ds = []
count_name = []
for series in data["data"]:
if series["key"] == "COUNT(ds)":
count_ds = series["values"]
if series["key"] == "COUNT(name)":
count_name = series["values"]
for expected, actual_ds, actual_name in zip(resp["data"], count_ds, count_name):
assert expected["count_name"] == actual_name["y"]
assert expected["count_ds"] == actual_ds["y"]
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
GLOBAL_ASYNC_QUERIES=True,
)
def test_explore_json_async(self):
tbl_id = self.table_ids.get("birth_names")
form_data = {
"datasource": f"{tbl_id}__table",
"viz_type": "dist_bar",
"time_range_endpoints": ["inclusive", "exclusive"],
"granularity_sqla": "ds",
"time_range": "No filter",
"metrics": ["count"],
"adhoc_filters": [],
"groupby": ["gender"],
"row_limit": 100,
}
async_query_manager.init_app(app)
self.login(username="admin")
rv = self.client.post(
"/superset/explore_json/", data={"form_data": json.dumps(form_data)},
)
data = json.loads(rv.data.decode("utf-8"))
keys = list(data.keys())
self.assertEqual(rv.status_code, 202)
self.assertCountEqual(
keys, ["channel_id", "job_id", "user_id", "status", "errors", "result_url"]
)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
GLOBAL_ASYNC_QUERIES=True,
)
def test_explore_json_async_results_format(self):
tbl_id = self.table_ids.get("birth_names")
form_data = {
"datasource": f"{tbl_id}__table",
"viz_type": "dist_bar",
"time_range_endpoints": ["inclusive", "exclusive"],
"granularity_sqla": "ds",
"time_range": "No filter",
"metrics": ["count"],
"adhoc_filters": [],
"groupby": ["gender"],
"row_limit": 100,
}
async_query_manager.init_app(app)
self.login(username="admin")
rv = self.client.post(
"/superset/explore_json/?results=true",
data={"form_data": json.dumps(form_data)},
)
self.assertEqual(rv.status_code, 200)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
@mock.patch(
"superset.utils.cache_manager.CacheManager.cache",
new_callable=mock.PropertyMock,
)
@mock.patch("superset.viz.BaseViz.force_cached", new_callable=mock.PropertyMock)
def test_explore_json_data(self, mock_force_cached, mock_cache):
tbl_id = self.table_ids.get("birth_names")
form_data = dict(
{
"form_data": {
"datasource": f"{tbl_id}__table",
"viz_type": "dist_bar",
"time_range_endpoints": ["inclusive", "exclusive"],
"granularity_sqla": "ds",
"time_range": "No filter",
"metrics": ["count"],
"adhoc_filters": [],
"groupby": ["gender"],
"row_limit": 100,
}
}
)
class MockCache:
def get(self, key):
return form_data
def set(self):
return None
mock_cache.return_value = MockCache()
mock_force_cached.return_value = False
self.login(username="admin")
rv = self.client.get("/superset/explore_json/data/valid-cache-key")
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 200)
self.assertEqual(data["rowcount"], 2)
@mock.patch(
"superset.utils.cache_manager.CacheManager.cache",
new_callable=mock.PropertyMock,
)
def test_explore_json_data_no_login(self, mock_cache):
tbl_id = self.table_ids.get("birth_names")
form_data = dict(
{
"form_data": {
"datasource": f"{tbl_id}__table",
"viz_type": "dist_bar",
"time_range_endpoints": ["inclusive", "exclusive"],
"granularity_sqla": "ds",
"time_range": "No filter",
"metrics": ["count"],
"adhoc_filters": [],
"groupby": ["gender"],
"row_limit": 100,
}
}
)
class MockCache:
def get(self, key):
return form_data
def set(self):
return None
mock_cache.return_value = MockCache()
rv = self.client.get("/superset/explore_json/data/valid-cache-key")
self.assertEqual(rv.status_code, 401)
def test_explore_json_data_invalid_cache_key(self):
self.login(username="admin")
cache_key = "invalid-cache-key"
rv = self.client.get(f"/superset/explore_json/data/{cache_key}")
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 404)
self.assertEqual(data["error"], "Cached data not found")
@mock.patch(
"superset.security.SupersetSecurityManager.get_schemas_accessible_by_user"
)
@mock.patch("superset.security.SupersetSecurityManager.can_access_database")
@mock.patch("superset.security.SupersetSecurityManager.can_access_all_datasources")
def test_schemas_access_for_csv_upload_endpoint(
self,
mock_can_access_all_datasources,
mock_can_access_database,
mock_schemas_accessible,
):
self.login(username="admin")
dbobj = self.create_fake_db()
mock_can_access_all_datasources.return_value = False
mock_can_access_database.return_value = False
mock_schemas_accessible.return_value = ["this_schema_is_allowed_too"]
data = self.get_json_resp(
url="/superset/schemas_access_for_file_upload?db_id={db_id}".format(
db_id=dbobj.id
)
)
assert data == ["this_schema_is_allowed_too"]
self.delete_fake_db()
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_select_star(self):
self.login(username="admin")
examples_db = superset.utils.database.get_example_database()
resp = self.get_resp(f"/superset/select_star/{examples_db.id}/birth_names")
self.assertIn("gender", resp)
def test_get_select_star_not_allowed(self):
self.login(username="gamma")
example_db = superset.utils.database.get_example_database()
resp = self.client.get(f"/superset/select_star/{example_db.id}/birth_names")
self.assertEqual(resp.status_code, 403)
@mock.patch("superset.views.core.results_backend_use_msgpack", False)
@mock.patch("superset.views.core.results_backend")
def test_display_limit(self, mock_results_backend):
self.login()
data = [{"col_0": i} for i in range(100)]
payload = {
"status": QueryStatus.SUCCESS,
"query": {"rows": 100},
"data": data,
}
# limit results to 1
expected_key = {"status": "success", "query": {"rows": 100}, "data": data}
limited_data = data[:1]
expected_limited = {
"status": "success",
"query": {"rows": 100},
"data": limited_data,
"displayLimitReached": True,
}
query_mock = mock.Mock()
query_mock.sql = "SELECT *"
query_mock.database = 1
query_mock.schema = "superset"
# do not apply msgpack serialization
use_msgpack = app.config["RESULTS_BACKEND_USE_MSGPACK"]
app.config["RESULTS_BACKEND_USE_MSGPACK"] = False
serialized_payload = sql_lab._serialize_payload(payload, False)
compressed = utils.zlib_compress(serialized_payload)
mock_results_backend.get.return_value = compressed
with mock.patch("superset.views.core.db") as mock_superset_db:
mock_superset_db.session.query().filter_by().one_or_none.return_value = (
query_mock
)
# get all results
result_key = json.loads(self.get_resp("/superset/results/key/"))
result_limited = json.loads(self.get_resp("/superset/results/key/?rows=1"))
self.assertEqual(result_key, expected_key)
self.assertEqual(result_limited, expected_limited)
app.config["RESULTS_BACKEND_USE_MSGPACK"] = use_msgpack
def test_results_default_deserialization(self):
use_new_deserialization = False
data = [("a", 4, 4.0, "2019-08-18T16:39:16.660000")]
cursor_descr = (
("a", "string"),
("b", "int"),
("c", "float"),
("d", "datetime"),
)
db_engine_spec = BaseEngineSpec()
results = SupersetResultSet(data, cursor_descr, db_engine_spec)
query = {
"database_id": 1,
"sql": "SELECT * FROM birth_names LIMIT 100",
"status": QueryStatus.PENDING,
}
(
serialized_data,
selected_columns,
all_columns,
expanded_columns,
) = sql_lab._serialize_and_expand_data(
results, db_engine_spec, use_new_deserialization
)
payload = {
"query_id": 1,
"status": QueryStatus.SUCCESS,
"state": QueryStatus.SUCCESS,
"data": serialized_data,
"columns": all_columns,
"selected_columns": selected_columns,
"expanded_columns": expanded_columns,
"query": query,
}
serialized_payload = sql_lab._serialize_payload(
payload, use_new_deserialization
)
self.assertIsInstance(serialized_payload, str)
query_mock = mock.Mock()
deserialized_payload = superset.views.utils._deserialize_results_payload(
serialized_payload, query_mock, use_new_deserialization
)
self.assertDictEqual(deserialized_payload, payload)
query_mock.assert_not_called()
def test_results_msgpack_deserialization(self):
use_new_deserialization = True
data = [("a", 4, 4.0, "2019-08-18T16:39:16.660000")]
cursor_descr = (
("a", "string"),
("b", "int"),
("c", "float"),
("d", "datetime"),
)
db_engine_spec = BaseEngineSpec()
results = SupersetResultSet(data, cursor_descr, db_engine_spec)
query = {
"database_id": 1,
"sql": "SELECT * FROM birth_names LIMIT 100",
"status": QueryStatus.PENDING,
}
(
serialized_data,
selected_columns,
all_columns,
expanded_columns,
) = sql_lab._serialize_and_expand_data(
results, db_engine_spec, use_new_deserialization
)
payload = {
"query_id": 1,
"status": QueryStatus.SUCCESS,
"state": QueryStatus.SUCCESS,
"data": serialized_data,
"columns": all_columns,
"selected_columns": selected_columns,
"expanded_columns": expanded_columns,
"query": query,
}
serialized_payload = sql_lab._serialize_payload(
payload, use_new_deserialization
)
self.assertIsInstance(serialized_payload, bytes)
with mock.patch.object(
db_engine_spec, "expand_data", wraps=db_engine_spec.expand_data
) as expand_data:
query_mock = mock.Mock()
query_mock.database.db_engine_spec.expand_data = expand_data
deserialized_payload = superset.views.utils._deserialize_results_payload(
serialized_payload, query_mock, use_new_deserialization
)
df = results.to_pandas_df()
payload["data"] = dataframe.df_to_records(df)
self.assertDictEqual(deserialized_payload, payload)
expand_data.assert_called_once()
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
{"FOO": lambda x: 1},
clear=True,
)
@pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
def test_feature_flag_serialization(self):
self.login()
encoded = json.dumps(
{"FOO": lambda x: 1, "super": "set"},
default=utils.pessimistic_json_iso_dttm_ser,
)
html_string = (
html.escape(encoded, quote=False)
.replace("'", "&
.replace('"', """)
)
dash_id = db.session.query(Dashboard.id).first()[0]
tbl_id = self.table_ids.get("wb_health_population")
urls = [
"/superset/sqllab",
"/superset/welcome",
f"/superset/dashboard/{dash_id}/",
"/superset/profile/admin/",
f"/superset/explore/table/{tbl_id}",
]
for url in urls:
data = self.get_resp(url)
self.assertTrue(html_string in data)
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
{"SQLLAB_BACKEND_PERSISTENCE": True},
clear=True,
)
def test_sqllab_backend_persistence_payload(self):
username = "admin"
self.login(username)
user_id = security_manager.find_user(username).id
data = {
"queryEditor": json.dumps(
{
"title": "Untitled Query 1",
"dbId": 1,
"schema": None,
"autorun": False,
"sql": "SELECT ...",
"queryLimit": 1000,
}
)
}
resp = self.get_json_resp("/tabstateview/", data=data)
tab_state_id = resp["id"]
self.run_sql(
"SELECT name FROM birth_names",
"client_id_1",
user_name=username,
raise_on_error=True,
sql_editor_id=str(tab_state_id),
)
self.run_sql(
"SELECT name FROM birth_names",
"client_id_2",
user_name=username,
raise_on_error=True,
)
payload = views.Superset._get_sqllab_tabs(user_id=user_id)
self.assertEqual(len(payload["queries"]), 1)
def test_virtual_table_explore_visibility(self):
database = superset.utils.database.get_example_database()
self.assertEqual(database.allows_virtual_table_explore, True)
extra = database.get_extra()
extra["allows_virtual_table_explore"] = False
database.extra = json.dumps(extra)
self.assertEqual(database.allows_virtual_table_explore, False)
extra = database.get_extra()
extra["allows_virtual_table_explore"] = True
database.extra = json.dumps(extra)
self.assertEqual(database.allows_virtual_table_explore, True)
extra = database.get_extra()
extra["allows_virtual_table_explore"] = "trash value"
database.extra = json.dumps(extra)
self.assertEqual(database.allows_virtual_table_explore, True)
def test_explore_database_id(self):
database = superset.utils.database.get_example_database()
explore_database = superset.utils.database.get_example_database()
self.assertEqual(database.explore_database_id, database.id)
extra = database.get_extra()
extra["explore_database_id"] = explore_database.id
database.extra = json.dumps(extra)
self.assertEqual(database.explore_database_id, explore_database.id)
def test_get_column_names_from_metric(self):
simple_metric = {
"expressionType": utils.AdhocMetricExpressionType.SIMPLE.value,
"column": {"column_name": "my_col"},
"aggregate": "SUM",
"label": "My Simple Label",
}
assert utils.get_column_name_from_metric(simple_metric) == "my_col"
sql_metric = {
"expressionType": utils.AdhocMetricExpressionType.SQL.value,
"sqlExpression": "SUM(my_label)",
"label": "My SQL Label",
}
assert utils.get_column_name_from_metric(sql_metric) is None
assert utils.get_column_names_from_metrics([simple_metric, sql_metric]) == [
"my_col"
]
@pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
@mock.patch("superset.models.core.DB_CONNECTION_MUTATOR")
def test_explore_injected_exceptions(self, mock_db_connection_mutator):
exception = SupersetException("Error message")
mock_db_connection_mutator.side_effect = exception
slice = db.session.query(Slice).first()
url = f"/superset/explore/?form_data=%7B%22slice_id%22%3A%20{slice.id}%7D"
self.login()
data = self.get_resp(url)
self.assertIn("Error message", data)
exception = SQLAlchemyError("Error message")
mock_db_connection_mutator.side_effect = exception
slice = db.session.query(Slice).first()
url = f"/superset/explore/?form_data=%7B%22slice_id%22%3A%20{slice.id}%7D"
self.login()
data = self.get_resp(url)
self.assertIn("Error message", data)
@pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
@mock.patch("superset.models.core.DB_CONNECTION_MUTATOR")
def test_dashboard_injected_exceptions(self, mock_db_connection_mutator):
exception = SupersetException("Error message")
mock_db_connection_mutator.side_effect = exception
dash = db.session.query(Dashboard).first()
url = f"/superset/dashboard/{dash.id}/"
self.login()
data = self.get_resp(url)
self.assertIn("Error message", data)
exception = SQLAlchemyError("Error message")
mock_db_connection_mutator.side_effect = exception
dash = db.session.query(Dashboard).first()
url = f"/superset/dashboard/{dash.id}/"
self.login()
data = self.get_resp(url)
self.assertIn("Error message", data)
@mock.patch("superset.sql_lab.cancel_query")
@mock.patch("superset.views.core.db.session")
def test_stop_query_not_implemented(
self, mock_superset_db_session, mock_sql_lab_cancel_query
):
form_data = {"client_id": "foo"}
query_mock = mock.Mock()
query_mock.client_id = "foo"
query_mock.status = QueryStatus.RUNNING
self.login(username="admin")
mock_superset_db_session.query().filter_by().one().return_value = query_mock
mock_sql_lab_cancel_query.return_value = False
rv = self.client.post(
"/superset/stop_query/", data={"form_data": json.dumps(form_data)},
)
assert rv.status_code == 422
if __name__ == "__main__":
unittest.main()
| true | true |
1c4683a2b04793b6517763584a3577a7502613a2 | 620 | py | Python | adv/pia.py | mattkw/dl | 45bfc28ad9ff827045a3734730deb893a2436c09 | [
"Apache-2.0"
] | null | null | null | adv/pia.py | mattkw/dl | 45bfc28ad9ff827045a3734730deb893a2436c09 | [
"Apache-2.0"
] | null | null | null | adv/pia.py | mattkw/dl | 45bfc28ad9ff827045a3734730deb893a2436c09 | [
"Apache-2.0"
] | null | null | null | import adv_test
from adv import *
from module import energy
def module():
return Pia
class Pia(Adv):
def pre(this):
if this.condition('energy'):
this.init = this.c_init
def init(this):
this.conf['acl'] = """
`s1, seq=5 and cancel
`s3, seq=5 and cancel
"""
energy.Energy(this,{},{})
def c_init(this):
energy.Energy(this,{'s2':1},{'s2':1})
if __name__ == '__main__':
conf = {}
conf['acl'] = """
`s1
`s2
`s3
`fs, seq=5
"""
adv_test.test(module(), conf, verbose=0)
| 16.756757 | 45 | 0.485484 | import adv_test
from adv import *
from module import energy
def module():
return Pia
class Pia(Adv):
def pre(this):
if this.condition('energy'):
this.init = this.c_init
def init(this):
this.conf['acl'] = """
`s1, seq=5 and cancel
`s3, seq=5 and cancel
"""
energy.Energy(this,{},{})
def c_init(this):
energy.Energy(this,{'s2':1},{'s2':1})
if __name__ == '__main__':
conf = {}
conf['acl'] = """
`s1
`s2
`s3
`fs, seq=5
"""
adv_test.test(module(), conf, verbose=0)
| true | true |
1c4683a648d2c1ac36ba542e426f889e72d0b473 | 1,925 | py | Python | medexbot/spiders/dosage_form_spider.py | ahmedshahriar/bd-medicine-scraper | ea97d929fc9cdcbdde2602827cdc3d12709e2ca9 | [
"Apache-2.0"
] | 1 | 2022-03-17T03:02:49.000Z | 2022-03-17T03:02:49.000Z | medexbot/spiders/dosage_form_spider.py | ahmedshahriar/bd-medicine-scraper | ea97d929fc9cdcbdde2602827cdc3d12709e2ca9 | [
"Apache-2.0"
] | null | null | null | medexbot/spiders/dosage_form_spider.py | ahmedshahriar/bd-medicine-scraper | ea97d929fc9cdcbdde2602827cdc3d12709e2ca9 | [
"Apache-2.0"
] | null | null | null | import re
import scrapy
from django.utils.text import slugify
from medexbot.items import DosageFormItem
class DosageFormSpider(scrapy.Spider):
name = "dosage"
allowed_domains = ['medex.com.bd']
start_urls = ['https://medex.com.bd/dosage-forms']
def parse(self, response, **kwargs):
for dosage_form_info in response.css('a.hoverable-block'):
dosage_form_link = dosage_form_info.css('a.hoverable-block ::attr("href") ').get()
dosage_form_id = re.findall("dosage-forms/(\S*)/", dosage_form_link)[0]
dosage_form_name = dosage_form_info.css('div.data-row-top img ::attr("title") ').get()
brand_names_counter = dosage_form_info.css('div.data-row-company ::text').re(r"(\d+)")
brand_names_count = 0 if len(brand_names_counter) == 0 else brand_names_counter[0]
yield from response.follow_all(dosage_form_info.css('a.hoverable-block ::attr("href") '),
self.parse_dosage_form,
meta={"dosage_form_id": dosage_form_id, "dosage_form_name": dosage_form_name,
"brand_names_count": brand_names_count})
def parse_dosage_form(self, response):
item = DosageFormItem()
item["dosage_form_id"] = response.request.meta['dosage_form_id']
item['dosage_form_name'] = response.request.meta['dosage_form_name']
item['brand_names_count'] = response.request.meta['brand_names_count']
item['slug'] = slugify(item['dosage_form_name'] + '-' + item['dosage_form_id'],
allow_unicode=True)
# todo brand ids mapping
# brand_name_links = response.css('a.hoverable-block ::attr(href)').extract()
# brand_name_ids = [re.findall("brands/(\S*)/", brand_name_link)[0] for brand_name_link in brand_name_links]
yield item
| 48.125 | 120 | 0.628571 | import re
import scrapy
from django.utils.text import slugify
from medexbot.items import DosageFormItem
class DosageFormSpider(scrapy.Spider):
name = "dosage"
allowed_domains = ['medex.com.bd']
start_urls = ['https://medex.com.bd/dosage-forms']
def parse(self, response, **kwargs):
for dosage_form_info in response.css('a.hoverable-block'):
dosage_form_link = dosage_form_info.css('a.hoverable-block ::attr("href") ').get()
dosage_form_id = re.findall("dosage-forms/(\S*)/", dosage_form_link)[0]
dosage_form_name = dosage_form_info.css('div.data-row-top img ::attr("title") ').get()
brand_names_counter = dosage_form_info.css('div.data-row-company ::text').re(r"(\d+)")
brand_names_count = 0 if len(brand_names_counter) == 0 else brand_names_counter[0]
yield from response.follow_all(dosage_form_info.css('a.hoverable-block ::attr("href") '),
self.parse_dosage_form,
meta={"dosage_form_id": dosage_form_id, "dosage_form_name": dosage_form_name,
"brand_names_count": brand_names_count})
def parse_dosage_form(self, response):
item = DosageFormItem()
item["dosage_form_id"] = response.request.meta['dosage_form_id']
item['dosage_form_name'] = response.request.meta['dosage_form_name']
item['brand_names_count'] = response.request.meta['brand_names_count']
item['slug'] = slugify(item['dosage_form_name'] + '-' + item['dosage_form_id'],
allow_unicode=True)
yield item
| true | true |
1c4684004d467ca7f02daba2bde3f7b30970341c | 2,502 | py | Python | velkoz_web_packages/objects_base/db_orm_models_base.py | MatthewTe/velkoz-web-data-extraction-library | d6acb8bd86106a6ab754be99488436eb37037e54 | [
"MIT"
] | null | null | null | velkoz_web_packages/objects_base/db_orm_models_base.py | MatthewTe/velkoz-web-data-extraction-library | d6acb8bd86106a6ab754be99488436eb37037e54 | [
"MIT"
] | 2 | 2021-03-31T20:12:25.000Z | 2021-12-13T20:48:22.000Z | velkoz_web_packages/objects_base/db_orm_models_base.py | MatthewTe/velkoz-web-data-extraction-library | d6acb8bd86106a6ab754be99488436eb37037e54 | [
"MIT"
] | null | null | null | # Importing the database orm management packages:
from sqlalchemy import Column, Integer, String, Text, DateTime, LargeBinary
from sqlalchemy.ext.declarative import declarative_base
# Creating the declarative base object used to create base database orm models:
Base = declarative_base()
class BaseWebPageResponseModel(Base):
"""This is the database model that represents the database table for the
BaseWebPageResponse object.
This is the orm model that the BaseDataIngestionEngine uses to write base data
extracted from each BaseWebPageResponse object into a database. The data model
is designed to represent the following data from the BaseWebPageResponse object:
* The time the BaseWebPageResponse object was initialized.
* The http response code retrieved from the url of the BaseWebPageResponse object_url.
* The url used to initialize the BaseWebPageResponse object.
* The raw html content scraped from the site by the BaseWebPageResponse object.
It is expected that WebObjects and Ingestion Engines that inherit from their
base objects use a custom response model another method of writing data to
a database.
Attributes:
__tablename__ (str): A metadata attribute that determines the name of the table
created by the engine.
date (sqlalchemy.Column): The Datetime that the BaseWebPageResponse
object being ingested was created.
response_code (sqlalchemy.Column): The http response Integer the BaseWebPageResponse
object got from the url it was initialized with.
url (sqlalchemy.Column): The url string that was used to initialize the
BaseWebPageResponse object stored as Text.
html_content (sqlalchemy.Column): The LargeBinary element used to store the
raw html data scraped from the webpage by the BaseWebPageResponse object.
"""
# Declaring table metadata attributes:
__tablename__ = "default_web_obj_tbl"
# Declaring table column attributes:
date = Column(
"date_initialized",
DateTime,
primary_key = True
)
response_code = Column(
"response_code",
Integer
)
url = Column(
"url",
Text
)
html_content = Column(
"html_content",
LargeBinary,
nullable = True
)
# __dunder methods:
def __repr__(self):
return f"BaseWebPageResponse Model({self.url}_{self.date}_{self.response_code})"
| 36.26087 | 92 | 0.71303 |
from sqlalchemy import Column, Integer, String, Text, DateTime, LargeBinary
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class BaseWebPageResponseModel(Base):
__tablename__ = "default_web_obj_tbl"
date = Column(
"date_initialized",
DateTime,
primary_key = True
)
response_code = Column(
"response_code",
Integer
)
url = Column(
"url",
Text
)
html_content = Column(
"html_content",
LargeBinary,
nullable = True
)
def __repr__(self):
return f"BaseWebPageResponse Model({self.url}_{self.date}_{self.response_code})"
| true | true |
1c46857cccde346eb4a78768c8061b11e696ca88 | 4,379 | py | Python | contrib/seeds/generate-seeds.py | behi11/vectorium-plus | fb9c68db8e2450c949d75bff9e737562f125be27 | [
"MIT"
] | 5 | 2019-07-09T02:06:22.000Z | 2021-08-08T18:48:03.000Z | contrib/seeds/generate-seeds.py | behi11/vectorium-plus | fb9c68db8e2450c949d75bff9e737562f125be27 | [
"MIT"
] | null | null | null | contrib/seeds/generate-seeds.py | behi11/vectorium-plus | fb9c68db8e2450c949d75bff9e737562f125be27 | [
"MIT"
] | 6 | 2019-07-09T02:02:14.000Z | 2021-08-06T16:01:01.000Z | #!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 18884)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 28884)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| 31.503597 | 98 | 0.582553 |
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr:
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr:
sub = [[], []]
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1):
continue
x += 1
assert(x < 2)
else:
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'):
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match:
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1:
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 18884)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 28884)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| true | true |
1c468609ae8810d5516216a769cb965d04a1942a | 3,389 | py | Python | contrib/devtools/optimize-pngs.py | xraymemory/manna | b2f118bdce9b6a128ef171798ab3fac483517afa | [
"MIT"
] | null | null | null | contrib/devtools/optimize-pngs.py | xraymemory/manna | b2f118bdce9b6a128ef171798ab3fac483517afa | [
"MIT"
] | null | null | null | contrib/devtools/optimize-pngs.py | xraymemory/manna | b2f118bdce9b6a128ef171798ab3fac483517afa | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2014-2016 The manna Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script every time you change one of the png files. Using pngcrush, it will optimize the png files, remove various color profiles, remove ancillary chunks (alla) and text chunks (text).
#pngcrush -brute -ow -rem gAMA -rem cHRM -rem iCCP -rem sRGB -rem alla -rem text
'''
import os
import sys
import subprocess
import hashlib
from PIL import Image
def file_hash(filename):
'''Return hash of raw file contents'''
with open(filename, 'rb') as f:
return hashlib.sha256(f.read()).hexdigest()
def content_hash(filename):
'''Return hash of RGBA contents of image'''
i = Image.open(filename)
i = i.convert('RGBA')
data = i.tobytes()
return hashlib.sha256(data).hexdigest()
pngcrush = 'pngcrush'
git = 'git'
folders = ["src/qt/res/movies", "src/qt/res/icons", "share/pixmaps"]
basePath = subprocess.check_output([git, 'rev-parse', '--show-toplevel']).rstrip('\n')
totalSaveBytes = 0
noHashChange = True
outputArray = []
for folder in folders:
absFolder=os.path.join(basePath, folder)
for file in os.listdir(absFolder):
extension = os.path.splitext(file)[1]
if extension.lower() == '.png':
print("optimizing "+file+"..."),
file_path = os.path.join(absFolder, file)
fileMetaMap = {'file' : file, 'osize': os.path.getsize(file_path), 'sha256Old' : file_hash(file_path)}
fileMetaMap['contentHashPre'] = content_hash(file_path)
pngCrushOutput = ""
try:
pngCrushOutput = subprocess.check_output(
[pngcrush, "-brute", "-ow", "-rem", "gAMA", "-rem", "cHRM", "-rem", "iCCP", "-rem", "sRGB", "-rem", "alla", "-rem", "text", file_path],
stderr=subprocess.STDOUT).rstrip('\n')
except:
print "pngcrush is not installed, aborting..."
sys.exit(0)
#verify
if "Not a PNG file" in subprocess.check_output([pngcrush, "-n", "-v", file_path], stderr=subprocess.STDOUT):
print "PNG file "+file+" is corrupted after crushing, check out pngcursh version"
sys.exit(1)
fileMetaMap['sha256New'] = file_hash(file_path)
fileMetaMap['contentHashPost'] = content_hash(file_path)
if fileMetaMap['contentHashPre'] != fileMetaMap['contentHashPost']:
print "Image contents of PNG file "+file+" before and after crushing don't match"
sys.exit(1)
fileMetaMap['psize'] = os.path.getsize(file_path)
outputArray.append(fileMetaMap)
print("done\n"),
print "summary:\n+++++++++++++++++"
for fileDict in outputArray:
oldHash = fileDict['sha256Old']
newHash = fileDict['sha256New']
totalSaveBytes += fileDict['osize'] - fileDict['psize']
noHashChange = noHashChange and (oldHash == newHash)
print fileDict['file']+"\n size diff from: "+str(fileDict['osize'])+" to: "+str(fileDict['psize'])+"\n old sha256: "+oldHash+"\n new sha256: "+newHash+"\n"
print "completed. Checksum stable: "+str(noHashChange)+". Total reduction: "+str(totalSaveBytes)+" bytes"
| 42.898734 | 193 | 0.623488 |
'''
Run this script every time you change one of the png files. Using pngcrush, it will optimize the png files, remove various color profiles, remove ancillary chunks (alla) and text chunks (text).
#pngcrush -brute -ow -rem gAMA -rem cHRM -rem iCCP -rem sRGB -rem alla -rem text
'''
import os
import sys
import subprocess
import hashlib
from PIL import Image
def file_hash(filename):
'''Return hash of raw file contents'''
with open(filename, 'rb') as f:
return hashlib.sha256(f.read()).hexdigest()
def content_hash(filename):
'''Return hash of RGBA contents of image'''
i = Image.open(filename)
i = i.convert('RGBA')
data = i.tobytes()
return hashlib.sha256(data).hexdigest()
pngcrush = 'pngcrush'
git = 'git'
folders = ["src/qt/res/movies", "src/qt/res/icons", "share/pixmaps"]
basePath = subprocess.check_output([git, 'rev-parse', '--show-toplevel']).rstrip('\n')
totalSaveBytes = 0
noHashChange = True
outputArray = []
for folder in folders:
absFolder=os.path.join(basePath, folder)
for file in os.listdir(absFolder):
extension = os.path.splitext(file)[1]
if extension.lower() == '.png':
print("optimizing "+file+"..."),
file_path = os.path.join(absFolder, file)
fileMetaMap = {'file' : file, 'osize': os.path.getsize(file_path), 'sha256Old' : file_hash(file_path)}
fileMetaMap['contentHashPre'] = content_hash(file_path)
pngCrushOutput = ""
try:
pngCrushOutput = subprocess.check_output(
[pngcrush, "-brute", "-ow", "-rem", "gAMA", "-rem", "cHRM", "-rem", "iCCP", "-rem", "sRGB", "-rem", "alla", "-rem", "text", file_path],
stderr=subprocess.STDOUT).rstrip('\n')
except:
print "pngcrush is not installed, aborting..."
sys.exit(0)
if "Not a PNG file" in subprocess.check_output([pngcrush, "-n", "-v", file_path], stderr=subprocess.STDOUT):
print "PNG file "+file+" is corrupted after crushing, check out pngcursh version"
sys.exit(1)
fileMetaMap['sha256New'] = file_hash(file_path)
fileMetaMap['contentHashPost'] = content_hash(file_path)
if fileMetaMap['contentHashPre'] != fileMetaMap['contentHashPost']:
print "Image contents of PNG file "+file+" before and after crushing don't match"
sys.exit(1)
fileMetaMap['psize'] = os.path.getsize(file_path)
outputArray.append(fileMetaMap)
print("done\n"),
print "summary:\n+++++++++++++++++"
for fileDict in outputArray:
oldHash = fileDict['sha256Old']
newHash = fileDict['sha256New']
totalSaveBytes += fileDict['osize'] - fileDict['psize']
noHashChange = noHashChange and (oldHash == newHash)
print fileDict['file']+"\n size diff from: "+str(fileDict['osize'])+" to: "+str(fileDict['psize'])+"\n old sha256: "+oldHash+"\n new sha256: "+newHash+"\n"
print "completed. Checksum stable: "+str(noHashChange)+". Total reduction: "+str(totalSaveBytes)+" bytes"
| false | true |
1c46866d970d2c1b41f7f0473196477642241b48 | 5,200 | py | Python | src/python/dxpy/scripts/dx_reads_to_fastq.py | psung/dx-toolkit | f3a430c5e24184215eb4a9883a179edf07bfa08b | [
"Apache-2.0"
] | null | null | null | src/python/dxpy/scripts/dx_reads_to_fastq.py | psung/dx-toolkit | f3a430c5e24184215eb4a9883a179edf07bfa08b | [
"Apache-2.0"
] | null | null | null | src/python/dxpy/scripts/dx_reads_to_fastq.py | psung/dx-toolkit | f3a430c5e24184215eb4a9883a179edf07bfa08b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (C) 2013-2014 DNAnexus, Inc.
#
# This file is part of dx-toolkit (DNAnexus platform client libraries).
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys, argparse
import dxpy
arg_parser = argparse.ArgumentParser(description="Download a reads table into a FASTQ file")
arg_parser.add_argument("reads_table", help="ID of the reads GTable object")
arg_parser.add_argument("--output", help="Name of the output file", required=True)
arg_parser.add_argument("--output2", help="Name of the second output file (for paired reads)")
arg_parser.add_argument("--discard_names", help="Discard read names", type=bool, default=False)
arg_parser.add_argument("--output_FASTA", help="Output FASTA instead of FASTQ", type=bool, default=False)
arg_parser.add_argument("-s", "--start_row", help="Start at this table row", type=int, default=0)
arg_parser.add_argument("-e", "--end_row", help="End at this table row", type=int, default=None)
def main(**kwargs):
if len(kwargs) == 0:
kwargs = vars(arg_parser.parse_args(sys.argv[1:]))
if "end_row" not in kwargs:
kwargs["end_row"] = None
if kwargs["end_row"] is not None and kwargs["end_row"] <= kwargs["start_row"]:
arg_parser.error("End row %d must be greater than start row %d" % (kwargs["end_row"], kwargs["start_row"]))
try:
table = dxpy.DXGTable(kwargs['reads_table'])
except:
raise dxpy.AppError("Failed to open table for export")
existCols = table.get_col_names()
### sort out columns to download
col = []
col2 = []
# if there's a second sequence, it's paired
if "sequence2" in existCols:
isPaired = True
else:
isPaired = False
if "name" in existCols and kwargs['discard_names'] != True:
hasName = True
col.append("name")
if isPaired == True:
col2.append("name2")
else:
hasName = False
col.append("sequence")
if isPaired == True:
col2.append("sequence2")
if "quality" in existCols:
hasQual = True
col.append("quality")
if isPaired == True:
col2.append("quality2")
else:
hasQual = False
# if we don't have quals we must output FASTA instead
kwargs['output_FASTA'] = True
if kwargs['output'] is None:
raise dxpy.AppError("output parameter is required")
with open(kwargs['output'], 'wb') as out_fh:
exportToFile(columns=col, table=table, output_file=out_fh, hasName=hasName, hasQual=hasQual, FASTA=kwargs['output_FASTA'], start_row=kwargs['start_row'], end_row=kwargs['end_row'])
if isPaired == True:
if kwargs['output2'] is None:
raise dxpy.AppError("output2 parameter is required for paired reads")
with open(kwargs['output2'], 'wb') as out_fh2:
exportToFile(columns=col2, table=table, output_file=out_fh2, hasName=hasName, hasQual=hasQual, FASTA=kwargs['output_FASTA'], start_row=kwargs['start_row'], end_row=kwargs['end_row'])
def exportToFile(columns, table, output_file, hasName = True, hasQual = True, FASTA = False, start_row = 0, end_row = None):
for row in table.iterate_rows(start=start_row, end=end_row, columns=columns):
if FASTA == True:
if hasName == True:
# change comment character for FASTA
if row[0][0] == '@':
row[0] = u'>' + row[0][1:]
# if already has comment character (>)
if row[0][0] == ">":
output_file.write('\n'.join([ row[0], row[1] ]))
# otherwise, add it
else:
output_file.write('\n'.join([">" + row[0], row[1] ]))
else:
output_file.write('\n'.join([">", row[0]]))
#output FASTQ
else:
if hasName == True:
# if alread has comment character (@)
if row[0][0] == "@":
output_file.write('\n'.join([ row[0], row[1] ]))
# otherwise, add it
else:
output_file.write('\n'.join(["@" + row[0], row[1] ]))
# add qualities if they exist
if hasQual == True:
output_file.write('\n'.join(["\n+", row[2] ]))
# else add without name
else:
output_file.write('\n'.join(["@", row[0]]))
if hasQual == True:
output_file.write('\n'.join(['', "+", row[1] ]))
# end of current record
output_file.write('\n')
output_file.close()
return output_file.name
if __name__ == '__main__':
main()
| 38.518519 | 194 | 0.603846 |
import sys, argparse
import dxpy
arg_parser = argparse.ArgumentParser(description="Download a reads table into a FASTQ file")
arg_parser.add_argument("reads_table", help="ID of the reads GTable object")
arg_parser.add_argument("--output", help="Name of the output file", required=True)
arg_parser.add_argument("--output2", help="Name of the second output file (for paired reads)")
arg_parser.add_argument("--discard_names", help="Discard read names", type=bool, default=False)
arg_parser.add_argument("--output_FASTA", help="Output FASTA instead of FASTQ", type=bool, default=False)
arg_parser.add_argument("-s", "--start_row", help="Start at this table row", type=int, default=0)
arg_parser.add_argument("-e", "--end_row", help="End at this table row", type=int, default=None)
def main(**kwargs):
if len(kwargs) == 0:
kwargs = vars(arg_parser.parse_args(sys.argv[1:]))
if "end_row" not in kwargs:
kwargs["end_row"] = None
if kwargs["end_row"] is not None and kwargs["end_row"] <= kwargs["start_row"]:
arg_parser.error("End row %d must be greater than start row %d" % (kwargs["end_row"], kwargs["start_row"]))
try:
table = dxpy.DXGTable(kwargs['reads_table'])
except:
raise dxpy.AppError("Failed to open table for export")
existCols = table.get_col_names()
tCols:
isPaired = True
else:
isPaired = False
if "name" in existCols and kwargs['discard_names'] != True:
hasName = True
col.append("name")
if isPaired == True:
col2.append("name2")
else:
hasName = False
col.append("sequence")
if isPaired == True:
col2.append("sequence2")
if "quality" in existCols:
hasQual = True
col.append("quality")
if isPaired == True:
col2.append("quality2")
else:
hasQual = False
kwargs['output_FASTA'] = True
if kwargs['output'] is None:
raise dxpy.AppError("output parameter is required")
with open(kwargs['output'], 'wb') as out_fh:
exportToFile(columns=col, table=table, output_file=out_fh, hasName=hasName, hasQual=hasQual, FASTA=kwargs['output_FASTA'], start_row=kwargs['start_row'], end_row=kwargs['end_row'])
if isPaired == True:
if kwargs['output2'] is None:
raise dxpy.AppError("output2 parameter is required for paired reads")
with open(kwargs['output2'], 'wb') as out_fh2:
exportToFile(columns=col2, table=table, output_file=out_fh2, hasName=hasName, hasQual=hasQual, FASTA=kwargs['output_FASTA'], start_row=kwargs['start_row'], end_row=kwargs['end_row'])
def exportToFile(columns, table, output_file, hasName = True, hasQual = True, FASTA = False, start_row = 0, end_row = None):
for row in table.iterate_rows(start=start_row, end=end_row, columns=columns):
if FASTA == True:
if hasName == True:
# change comment character for FASTA
if row[0][0] == '@':
row[0] = u'>' + row[0][1:]
# if already has comment character (>)
if row[0][0] == ">":
output_file.write('\n'.join([ row[0], row[1] ]))
# otherwise, add it
else:
output_file.write('\n'.join([">" + row[0], row[1] ]))
else:
output_file.write('\n'.join([">", row[0]]))
#output FASTQ
else:
if hasName == True:
# if alread has comment character (@)
if row[0][0] == "@":
output_file.write('\n'.join([ row[0], row[1] ]))
# otherwise, add it
else:
output_file.write('\n'.join(["@" + row[0], row[1] ]))
# add qualities if they exist
if hasQual == True:
output_file.write('\n'.join(["\n+", row[2] ]))
# else add without name
else:
output_file.write('\n'.join(["@", row[0]]))
if hasQual == True:
output_file.write('\n'.join(['', "+", row[1] ]))
# end of current record
output_file.write('\n')
output_file.close()
return output_file.name
if __name__ == '__main__':
main()
| true | true |
1c468a5f559d58aac8a33d2176a44891f8e19041 | 88 | py | Python | hibiapi/api/tieba/net.py | cleoold/HibiAPI | d997c5a2bf3cdbccc758d7036447e443c6b6f0ff | [
"Apache-2.0"
] | 394 | 2020-12-19T05:51:02.000Z | 2022-03-30T07:44:42.000Z | hibiapi/api/tieba/net.py | cleoold/HibiAPI | d997c5a2bf3cdbccc758d7036447e443c6b6f0ff | [
"Apache-2.0"
] | 208 | 2020-12-20T14:47:31.000Z | 2022-03-31T11:11:00.000Z | hibiapi/api/tieba/net.py | cleoold/HibiAPI | d997c5a2bf3cdbccc758d7036447e443c6b6f0ff | [
"Apache-2.0"
] | 93 | 2020-12-29T08:19:04.000Z | 2022-03-30T06:08:16.000Z | from hibiapi.utils.net import BaseNetClient
class NetRequest(BaseNetClient):
pass
| 14.666667 | 43 | 0.795455 | from hibiapi.utils.net import BaseNetClient
class NetRequest(BaseNetClient):
pass
| true | true |
1c468a5f5e92b0751d052266b0d99570a56f5837 | 2,539 | py | Python | Normal_Equation/prep.py | Globe-Eater/Geographic-Duplicate-Detection- | ec467fc41cb456959da87fd913465dc9daa27d80 | [
"MIT"
] | null | null | null | Normal_Equation/prep.py | Globe-Eater/Geographic-Duplicate-Detection- | ec467fc41cb456959da87fd913465dc9daa27d80 | [
"MIT"
] | null | null | null | Normal_Equation/prep.py | Globe-Eater/Geographic-Duplicate-Detection- | ec467fc41cb456959da87fd913465dc9daa27d80 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
def start():
'''This method is designed to read input in from the user.'''
df = pd.read_excel("datasets/unprepared_data/" + input("Please enter the path for the data:"))
return df
def fill_empty(df):
'''fill_empty takes the arugment df (dataframe)
This should be used with apply.
For example:
df = fill_empty(df)'''
df = df.replace(r'^\s*$', np.nan, regex=True)
df = df.fillna(value="No Data")
return df
def check_numbers(lat, long):
'''This method is to make sure that the lats and longs are within the state of Oklahoma.
inputs: df['Lat', 'long']]
output "Everything should be within Oklahoma.
output There is a value that is outside the range of Oklahoma.'''
# assert # are the numbers within Oklahoma? Need to look this up and impliment it.
pass
def prep(df):
'''prep is used for vectorizing the data so that it can be used
in a machine learning model.
Dev Notes:
Order of OPS:
Convert fields like duplicate_check from text to
1 or 0.'''
df = df[['OBJECTID', 'PROPNAME', 'ADDRESS', 'RESNAME', 'Lat', 'Long', 'duplicate_check']]
return df
def labels(x):
'''This method is to be applied to the dataframe df, that has a column duplicate_check in it.
This is being used to convert poss_dup to 1 or good to 0. Later this will be expanded to cover
anything else within the dataset.'''
if x == 'pos_dup':
return 1
elif x == 'good':
return 0
elif x == 'No Data':
return 0
else:
return 0
def saver(df):
'''This method is designed to ask the user if they want to save and if so where.
The arguments are for asking the user if they want to save, and the dataframe to
be saved.'''
user_input = input("Would you like to save y/n?: ")
question = True
while question:
if user_input == 'n':
break
elif user_input == 'y':
path = input('Please input a valid path and filename such as /path/to/file/.xlsx : ')
try:
df.to_excel(path)
print("File successfully saved.")
question = False
except FileNotFoundError:
print("Path was not found please try again")
if __name__ == '__main__':
dataframe = start()
dataframe = fill_empty(dataframe)
dataframe = prep(dataframe)
dataframe['duplicate_check'] = dataframe['duplicate_check'].apply(labels)
dataframe.head()
saver(dataframe)
| 34.310811 | 98 | 0.633714 | import numpy as np
import pandas as pd
def start():
df = pd.read_excel("datasets/unprepared_data/" + input("Please enter the path for the data:"))
return df
def fill_empty(df):
df = df.replace(r'^\s*$', np.nan, regex=True)
df = df.fillna(value="No Data")
return df
def check_numbers(lat, long):
'RESNAME', 'Lat', 'Long', 'duplicate_check']]
return df
def labels(x):
if x == 'pos_dup':
return 1
elif x == 'good':
return 0
elif x == 'No Data':
return 0
else:
return 0
def saver(df):
user_input = input("Would you like to save y/n?: ")
question = True
while question:
if user_input == 'n':
break
elif user_input == 'y':
path = input('Please input a valid path and filename such as /path/to/file/.xlsx : ')
try:
df.to_excel(path)
print("File successfully saved.")
question = False
except FileNotFoundError:
print("Path was not found please try again")
if __name__ == '__main__':
dataframe = start()
dataframe = fill_empty(dataframe)
dataframe = prep(dataframe)
dataframe['duplicate_check'] = dataframe['duplicate_check'].apply(labels)
dataframe.head()
saver(dataframe)
| true | true |
1c468cbad038e07db05af79f02c42510983d4d81 | 5,341 | py | Python | sdk/python/pulumi_kubernetes/rbac/v1/ClusterRole.py | Carlangueitor/pulumi-kubernetes | 859ccaaeb8291de49128dbc202fbac1358b2a25a | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_kubernetes/rbac/v1/ClusterRole.py | Carlangueitor/pulumi-kubernetes | 859ccaaeb8291de49128dbc202fbac1358b2a25a | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_kubernetes/rbac/v1/ClusterRole.py | Carlangueitor/pulumi-kubernetes | 859ccaaeb8291de49128dbc202fbac1358b2a25a | [
"Apache-2.0"
] | null | null | null | # *** WARNING: this file was generated by the Pulumi Kubernetes codegen tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
from typing import Optional
import pulumi
import pulumi.runtime
from pulumi import Input, ResourceOptions
from ... import tables, version
class ClusterRole(pulumi.CustomResource):
"""
ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit
by a RoleBinding or ClusterRoleBinding.
"""
apiVersion: pulumi.Output[str]
"""
APIVersion defines the versioned schema of this representation of an object. Servers should
convert recognized schemas to the latest internal value, and may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
"""
kind: pulumi.Output[str]
"""
Kind is a string value representing the REST resource this object represents. Servers may infer
this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More
info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
"""
aggregation_rule: pulumi.Output[dict]
"""
AggregationRule is an optional field that describes how to build the Rules for this ClusterRole.
If AggregationRule is set, then the Rules are controller managed and direct changes to Rules
will be stomped by the controller.
"""
metadata: pulumi.Output[dict]
"""
Standard object's metadata.
"""
rules: pulumi.Output[list]
"""
Rules holds all the PolicyRules for this ClusterRole
"""
def __init__(self, resource_name, opts=None, aggregation_rule=None, metadata=None, rules=None, __name__=None, __opts__=None):
"""
Create a ClusterRole resource with the given unique name, arguments, and options.
:param str resource_name: The _unique_ name of the resource.
:param pulumi.ResourceOptions opts: A bag of options that control this resource's behavior.
:param pulumi.Input[dict] aggregation_rule: AggregationRule is an optional field that describes how to build the Rules for this
ClusterRole. If AggregationRule is set, then the Rules are controller managed and
direct changes to Rules will be stomped by the controller.
:param pulumi.Input[dict] metadata: Standard object's metadata.
:param pulumi.Input[list] rules: Rules holds all the PolicyRules for this ClusterRole
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
__props__['apiVersion'] = 'rbac.authorization.k8s.io/v1'
__props__['kind'] = 'ClusterRole'
__props__['aggregationRule'] = aggregation_rule
__props__['metadata'] = metadata
__props__['rules'] = rules
__props__['status'] = None
parent = opts.parent if opts and opts.parent else None
aliases = [
pulumi.Alias(type_="kubernetes:rbac.authorization.k8s.io/v1alpha1:ClusterRole"),
pulumi.Alias(type_="kubernetes:rbac.authorization.k8s.io/v1beta1:ClusterRole"),
]
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(
version=version.get_version(),
aliases=aliases,
))
super(ClusterRole, self).__init__(
"kubernetes:rbac.authorization.k8s.io/v1:ClusterRole",
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None):
"""
Get the state of an existing `ClusterRole` resource, as identified by `id`.
The ID is of the form `[namespace]/[name]`; if `[namespace]` is omitted,
then (per Kubernetes convention) the ID becomes `default/[name]`.
Pulumi will keep track of this resource using `resource_name` as the Pulumi ID.
:param str resource_name: _Unique_ name used to register this resource with Pulumi.
:param pulumi.Input[str] id: An ID for the Kubernetes resource to retrieve.
Takes the form `[namespace]/[name]` or `[name]`.
:param Optional[pulumi.ResourceOptions] opts: A bag of options that control this
resource's behavior.
"""
opts = ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
return ClusterRole(resource_name, opts)
def translate_output_property(self, prop: str) -> str:
return tables._CASING_FORWARD_TABLE.get(prop) or prop
def translate_input_property(self, prop: str) -> str:
return tables._CASING_BACKWARD_TABLE.get(prop) or prop
| 42.728 | 135 | 0.683767 |
import warnings
from typing import Optional
import pulumi
import pulumi.runtime
from pulumi import Input, ResourceOptions
from ... import tables, version
class ClusterRole(pulumi.CustomResource):
apiVersion: pulumi.Output[str]
kind: pulumi.Output[str]
aggregation_rule: pulumi.Output[dict]
metadata: pulumi.Output[dict]
rules: pulumi.Output[list]
def __init__(self, resource_name, opts=None, aggregation_rule=None, metadata=None, rules=None, __name__=None, __opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
__props__['apiVersion'] = 'rbac.authorization.k8s.io/v1'
__props__['kind'] = 'ClusterRole'
__props__['aggregationRule'] = aggregation_rule
__props__['metadata'] = metadata
__props__['rules'] = rules
__props__['status'] = None
parent = opts.parent if opts and opts.parent else None
aliases = [
pulumi.Alias(type_="kubernetes:rbac.authorization.k8s.io/v1alpha1:ClusterRole"),
pulumi.Alias(type_="kubernetes:rbac.authorization.k8s.io/v1beta1:ClusterRole"),
]
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(
version=version.get_version(),
aliases=aliases,
))
super(ClusterRole, self).__init__(
"kubernetes:rbac.authorization.k8s.io/v1:ClusterRole",
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None):
opts = ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
return ClusterRole(resource_name, opts)
def translate_output_property(self, prop: str) -> str:
return tables._CASING_FORWARD_TABLE.get(prop) or prop
def translate_input_property(self, prop: str) -> str:
return tables._CASING_BACKWARD_TABLE.get(prop) or prop
| true | true |
1c468d4a46a3aba4597155860cda3864d9364bfc | 3,145 | py | Python | test/functional/mining_getblocktemplate_longpoll.py | denofdevscrypto/Topaz2.0 | 34ca0e644a6b5d9524a06156568fc11c89dcffed | [
"MIT"
] | null | null | null | test/functional/mining_getblocktemplate_longpoll.py | denofdevscrypto/Topaz2.0 | 34ca0e644a6b5d9524a06156568fc11c89dcffed | [
"MIT"
] | null | null | null | test/functional/mining_getblocktemplate_longpoll.py | denofdevscrypto/Topaz2.0 | 34ca0e644a6b5d9524a06156568fc11c89dcffed | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test longpolling with getblocktemplate."""
from decimal import Decimal
from test_framework.test_framework import TOPAZTestFramework
from test_framework.util import get_rpc_proxy, random_transaction
import threading
class LongpollThread(threading.Thread):
def __init__(self, node):
threading.Thread.__init__(self)
# query current longpollid
templat = node.getblocktemplate()
self.longpollid = templat['longpollid']
# create a new connection to the node, we can't use the same
# connection from two threads
self.node = get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
def run(self):
self.node.getblocktemplate({'longpollid':self.longpollid})
class GetBlockTemplateLPTest(TOPAZTestFramework):
def set_test_params(self):
self.num_nodes = 2
def run_test(self):
self.log.info("Warning: this test will take about 70 seconds in the best case. Be patient.")
self.nodes[0].generate(10)
templat = self.nodes[0].getblocktemplate()
longpollid = templat['longpollid']
# longpollid should not change between successive invocations if nothing else happens
templat2 = self.nodes[0].getblocktemplate()
assert(templat2['longpollid'] == longpollid)
# Test 1: test that the longpolling wait if we do nothing
thr = LongpollThread(self.nodes[0])
thr.start()
# check that thread still lives
thr.join(5) # wait 5 seconds or until thread exits
assert(thr.is_alive())
# Test 2: test that longpoll will terminate if another node generates a block
self.nodes[1].generate(1) # generate a block on another node
# check that thread will exit now that new transaction entered mempool
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 3: test that longpoll will terminate if we generate a block ourselves
thr = LongpollThread(self.nodes[0])
thr.start()
self.nodes[0].generate(1) # generate a block on another node
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 4: test that introducing a new transaction into the mempool will terminate the longpoll
thr = LongpollThread(self.nodes[0])
thr.start()
# generate a random transaction and submit it
min_relay_fee = self.nodes[0].getnetworkinfo()["relayfee"]
# min_relay_fee is fee per 1000 bytes, which should be more than enough.
(txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"), min_relay_fee, Decimal("0.001"), 20)
# after one minute, every 10 seconds the mempool is probed, so in 80 seconds it should have returned
thr.join(60 + 20)
assert(not thr.is_alive())
if __name__ == '__main__':
GetBlockTemplateLPTest().main()
| 42.5 | 112 | 0.686169 |
from decimal import Decimal
from test_framework.test_framework import TOPAZTestFramework
from test_framework.util import get_rpc_proxy, random_transaction
import threading
class LongpollThread(threading.Thread):
def __init__(self, node):
threading.Thread.__init__(self)
templat = node.getblocktemplate()
self.longpollid = templat['longpollid']
# connection from two threads
self.node = get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
def run(self):
self.node.getblocktemplate({'longpollid':self.longpollid})
class GetBlockTemplateLPTest(TOPAZTestFramework):
def set_test_params(self):
self.num_nodes = 2
def run_test(self):
self.log.info("Warning: this test will take about 70 seconds in the best case. Be patient.")
self.nodes[0].generate(10)
templat = self.nodes[0].getblocktemplate()
longpollid = templat['longpollid']
# longpollid should not change between successive invocations if nothing else happens
templat2 = self.nodes[0].getblocktemplate()
assert(templat2['longpollid'] == longpollid)
# Test 1: test that the longpolling wait if we do nothing
thr = LongpollThread(self.nodes[0])
thr.start()
# check that thread still lives
thr.join(5) # wait 5 seconds or until thread exits
assert(thr.is_alive())
# Test 2: test that longpoll will terminate if another node generates a block
self.nodes[1].generate(1) # generate a block on another node
# check that thread will exit now that new transaction entered mempool
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 3: test that longpoll will terminate if we generate a block ourselves
thr = LongpollThread(self.nodes[0])
thr.start()
self.nodes[0].generate(1) # generate a block on another node
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 4: test that introducing a new transaction into the mempool will terminate the longpoll
thr = LongpollThread(self.nodes[0])
thr.start()
# generate a random transaction and submit it
min_relay_fee = self.nodes[0].getnetworkinfo()["relayfee"]
# min_relay_fee is fee per 1000 bytes, which should be more than enough.
(txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"), min_relay_fee, Decimal("0.001"), 20)
# after one minute, every 10 seconds the mempool is probed, so in 80 seconds it should have returned
thr.join(60 + 20)
assert(not thr.is_alive())
if __name__ == '__main__':
GetBlockTemplateLPTest().main()
| true | true |
1c468d8b2783656346e7be58ba7b95f781ed035f | 4,896 | py | Python | 2019_noe_deep_boltzmann_tfv2/deep_boltzmann/networks/noninvertible.py | zierenberg/machine_learning_muca | 6fcca12ccda7680ea4cb0e1f10bb53a68b6b0a02 | [
"CC0-1.0"
] | null | null | null | 2019_noe_deep_boltzmann_tfv2/deep_boltzmann/networks/noninvertible.py | zierenberg/machine_learning_muca | 6fcca12ccda7680ea4cb0e1f10bb53a68b6b0a02 | [
"CC0-1.0"
] | null | null | null | 2019_noe_deep_boltzmann_tfv2/deep_boltzmann/networks/noninvertible.py | zierenberg/machine_learning_muca | 6fcca12ccda7680ea4cb0e1f10bb53a68b6b0a02 | [
"CC0-1.0"
] | null | null | null | import keras
import tensorflow as tf
import numpy as np
from deep_boltzmann.networks import nonlinear_transform
from deep_boltzmann.networks import connect as _connect
class NormalTransformer(object):
def __init__(self, mu_layers, sigma_layers):
self.mu_layers = mu_layers
self.sigma_layers = sigma_layers
def _compute_x1(self, mu, log_sigma, w1):
return mu + tf.exp(log_sigma) * w1
def _compute_log_p1(self, mu, log_sigma, x1):
return -tf.reduce_sum(input_tensor=log_sigma, axis=1) - 0.5 * tf.reduce_sum(input_tensor=((x1 - mu)/(tf.exp(log_sigma)))**2, axis=1)
def connect(self, x0, w1):
# evaluate mu and sigma
mu = _connect(x0, self.mu_layers)
log_sigma = _connect(x0, self.sigma_layers)
# transform x
#x1 = mu + sigma * w0
self.x1 = keras.layers.Lambda(lambda args: self._compute_x1(args[0], args[1], args[2]))([mu, log_sigma, w1])
# compute density
#log_p1 = -tf.reduce_sum(sigma, axis=0) - 0.5 * tf.reduce_sum((self.x1 - mu)/sigma, axis=0)
self.log_p1 = keras.layers.Lambda(lambda args: self._compute_log_p1(args[0], args[1], args[2]))([mu, log_sigma, self.x1])
# return variable and density
return self.x1, self.log_p1
class NormalResidualTransformer(object):
def __init__(self, mu_layers, sigma_layers):
self.mu_layers = mu_layers
self.sigma_layers = sigma_layers
def _compute_x1(self, x0, mu, log_sigma, w1):
return x0 + mu + tf.exp(log_sigma) * w1
def _compute_log_p1(self, x0, mu, log_sigma, x1):
return -tf.reduce_sum(input_tensor=log_sigma, axis=1) - 0.5 * tf.reduce_sum(input_tensor=((x1 - x0 - mu)/(tf.exp(log_sigma)))**2, axis=1)
def connect(self, x0, w1):
# evaluate mu and sigma
mu = _connect(x0, self.mu_layers)
log_sigma = _connect(x0, self.sigma_layers)
# transform x
#x1 = mu + sigma * w0
self.x1 = keras.layers.Lambda(lambda args: self._compute_x1(args[0], args[1], args[2], args[3]))([x0, mu, log_sigma, w1])
# compute density
#log_p1 = -tf.reduce_sum(sigma, axis=0) - 0.5 * tf.reduce_sum((self.x1 - mu)/sigma, axis=0)
self.log_p1 = keras.layers.Lambda(lambda args: self._compute_log_p1(args[0], args[1], args[2], args[3]))([x0, mu, log_sigma, self.x1])
# return variable and density
return self.x1, self.log_p1
class NoninvNet(object):
def __init__(self, dim, layers):
self.dim = dim
self.layers = layers
self.log_p_total = None
def connect(self):
# x0 = 0
self.x0 = keras.layers.Input(shape=(self.dim,)) # current noise input
x_last = self.x0
self.xs = []
self.ws = []
self.log_ps = []
for layer in self.layers:
# noise input
w = keras.layers.Input(shape=(self.dim,)) # current noise input
self.ws.append(w)
# compute x and probability
x, log_p = layer.connect(x_last, w)
self.xs.append(x) # new state
self.log_ps.append(log_p) # conditional generation probability
# update x_last
x_last = x
# output
self.x_out = self.xs[-1]
# total probability
self.log_p_total = keras.layers.Lambda(lambda arg: tf.reduce_sum(input_tensor=arg, axis=0))(self.log_ps)
def log_probability(self):
""" Computes the total log probability of the current sample"""
return tf.reduce_sum(input_tensor=self.log_ps, axis=0)
def normal_transnet(dim, nlayers, mu_shape=(100, 100), mu_activation='relu',
sigma_shape=(100, 100), sigma_activation='tanh', residual=False,
**layer_args):
"""
dim : int
Dimension of variables
nlayers : int
Number of layers in the transformer
mu_shape : int
Number of hidden units in each nonlinear layer
mu_activation : str
Hidden-neuron activation functions used in the nonlinear layers
sigma_shape : int
Number of hidden units in each nonlinear layer
sigma_activation : str
Hidden-neuron activation functions used in the nonlinear layers
"""
layers = []
for l in range(nlayers):
mu_net = nonlinear_transform(dim, nlayers=len(mu_shape)+1, nhidden=mu_shape,
activation=mu_activation, **layer_args)
sigma_net = nonlinear_transform(dim, nlayers=len(sigma_shape)+1, nhidden=sigma_shape,
activation=sigma_activation, init_outputs=0, **layer_args)
if residual:
layer = NormalResidualTransformer(mu_net, sigma_net)
else:
layer = NormalTransformer(mu_net, sigma_net)
layers.append(layer)
ninvnet = NoninvNet(dim, layers)
ninvnet.connect()
return ninvnet
| 39.168 | 145 | 0.62643 | import keras
import tensorflow as tf
import numpy as np
from deep_boltzmann.networks import nonlinear_transform
from deep_boltzmann.networks import connect as _connect
class NormalTransformer(object):
def __init__(self, mu_layers, sigma_layers):
self.mu_layers = mu_layers
self.sigma_layers = sigma_layers
def _compute_x1(self, mu, log_sigma, w1):
return mu + tf.exp(log_sigma) * w1
def _compute_log_p1(self, mu, log_sigma, x1):
return -tf.reduce_sum(input_tensor=log_sigma, axis=1) - 0.5 * tf.reduce_sum(input_tensor=((x1 - mu)/(tf.exp(log_sigma)))**2, axis=1)
def connect(self, x0, w1):
mu = _connect(x0, self.mu_layers)
log_sigma = _connect(x0, self.sigma_layers)
self.x1 = keras.layers.Lambda(lambda args: self._compute_x1(args[0], args[1], args[2]))([mu, log_sigma, w1])
self.log_p1 = keras.layers.Lambda(lambda args: self._compute_log_p1(args[0], args[1], args[2]))([mu, log_sigma, self.x1])
return self.x1, self.log_p1
class NormalResidualTransformer(object):
def __init__(self, mu_layers, sigma_layers):
self.mu_layers = mu_layers
self.sigma_layers = sigma_layers
def _compute_x1(self, x0, mu, log_sigma, w1):
return x0 + mu + tf.exp(log_sigma) * w1
def _compute_log_p1(self, x0, mu, log_sigma, x1):
return -tf.reduce_sum(input_tensor=log_sigma, axis=1) - 0.5 * tf.reduce_sum(input_tensor=((x1 - x0 - mu)/(tf.exp(log_sigma)))**2, axis=1)
def connect(self, x0, w1):
mu = _connect(x0, self.mu_layers)
log_sigma = _connect(x0, self.sigma_layers)
self.x1 = keras.layers.Lambda(lambda args: self._compute_x1(args[0], args[1], args[2], args[3]))([x0, mu, log_sigma, w1])
self.log_p1 = keras.layers.Lambda(lambda args: self._compute_log_p1(args[0], args[1], args[2], args[3]))([x0, mu, log_sigma, self.x1])
return self.x1, self.log_p1
class NoninvNet(object):
def __init__(self, dim, layers):
self.dim = dim
self.layers = layers
self.log_p_total = None
def connect(self):
self.x0 = keras.layers.Input(shape=(self.dim,))
x_last = self.x0
self.xs = []
self.ws = []
self.log_ps = []
for layer in self.layers:
w = keras.layers.Input(shape=(self.dim,))
self.ws.append(w)
x, log_p = layer.connect(x_last, w)
self.xs.append(x)
self.log_ps.append(log_p)
x_last = x
self.x_out = self.xs[-1]
self.log_p_total = keras.layers.Lambda(lambda arg: tf.reduce_sum(input_tensor=arg, axis=0))(self.log_ps)
def log_probability(self):
return tf.reduce_sum(input_tensor=self.log_ps, axis=0)
def normal_transnet(dim, nlayers, mu_shape=(100, 100), mu_activation='relu',
sigma_shape=(100, 100), sigma_activation='tanh', residual=False,
**layer_args):
layers = []
for l in range(nlayers):
mu_net = nonlinear_transform(dim, nlayers=len(mu_shape)+1, nhidden=mu_shape,
activation=mu_activation, **layer_args)
sigma_net = nonlinear_transform(dim, nlayers=len(sigma_shape)+1, nhidden=sigma_shape,
activation=sigma_activation, init_outputs=0, **layer_args)
if residual:
layer = NormalResidualTransformer(mu_net, sigma_net)
else:
layer = NormalTransformer(mu_net, sigma_net)
layers.append(layer)
ninvnet = NoninvNet(dim, layers)
ninvnet.connect()
return ninvnet
| true | true |
1c468d98816f3a6edf4390c586235b73703529fd | 2,298 | py | Python | src/07 - Blurring And Smoothing/01-img_analysis.py | hritik5102/Awesome-Computer-Vision-Guide | 005cd96f6d6c7dacdf1b9b5f5bf56cae3d6cea18 | [
"MIT"
] | null | null | null | src/07 - Blurring And Smoothing/01-img_analysis.py | hritik5102/Awesome-Computer-Vision-Guide | 005cd96f6d6c7dacdf1b9b5f5bf56cae3d6cea18 | [
"MIT"
] | null | null | null | src/07 - Blurring And Smoothing/01-img_analysis.py | hritik5102/Awesome-Computer-Vision-Guide | 005cd96f6d6c7dacdf1b9b5f5bf56cae3d6cea18 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
def nothing(x):
pass
#img = cv2.imread('img.jpeg',-1)
cap=cv2.VideoCapture(0)
cv2.namedWindow('image')
cv2.resizeWindow('image',600,350)
#Creating trackbar
cv2.createTrackbar('lh','image',0,255,nothing)
cv2.createTrackbar('uh','image',0,255,nothing)
cv2.createTrackbar('ls','image',0,255,nothing)
cv2.createTrackbar('us','image',0,255,nothing)
cv2.createTrackbar('lv','image',0,255,nothing)
cv2.createTrackbar('uv','image',0,255,nothing)
#cv2.createTrackbar('switch','image',0,1,nothing)
#set track bar
cv2.setTrackbarPos('lh','image',0)
cv2.setTrackbarPos('uh','image',58)
cv2.setTrackbarPos('ls','image',45)
cv2.setTrackbarPos('us','image',255)
cv2.setTrackbarPos('lv','image',54)
cv2.setTrackbarPos('uv','image',168)
while True:
_,img=cap.read()
hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
#while 1 :
#reading trackbar
lh=cv2.getTrackbarPos('lh','image')
uh=cv2.getTrackbarPos('uh','image')
ls=cv2.getTrackbarPos('ls','image')
us=cv2.getTrackbarPos('us','image')
lv=cv2.getTrackbarPos('lv','image')
uv=cv2.getTrackbarPos('uv','image')
#switch = cv2.getTrackbarPos('switch','image')
l_r=np.array([lh,ls,lv])
u_r=np.array([uh,us,uv])
mask = cv2.inRange(hsv,l_r,u_r)
res=cv2.bitwise_and(img,img,mask=mask)
#blur
k=np.ones((15,15),np.float32)/225
s= cv2.filter2D(res,-1,k)
b= cv2.GaussianBlur(res,(15,15),0)
m= cv2.medianBlur(res,15)
bb =cv2.bilateralFilter(res , 15 , 75, 75)#useless
#morphology
k2= np.ones((5,5) , np.uint8)
e=cv2.erode(mask,k2,1)
d=cv2.dilate(mask,k2,1)
o=cv2.morphologyEx(mask,cv2.MORPH_OPEN,k2)
c=cv2.morphologyEx(mask,cv2.MORPH_CLOSE,k2)
oc=cv2.morphologyEx(o,cv2.MORPH_CLOSE,k2)#same as close+open
#output
#cv2.imshow('img',img)
#cv2.imshow('mask',mask)
#cv2.waitKey(1000)
cv2.imshow('res',res)
#cv2.imshow('blur',s)
#cv2.imshow('Gblur',b)
#cv2.imshow('medblur',m)
#cv2.imshow('bilateralblur',bb)
#cv2.imshow('erode',e)
#cv2.imshow('dillate',d)
#cv2.imshow('openM',o)
#cv2.imshow('closeM',c)
#cv2.imshow('OnC_M',oc)
if cv2.waitKey(1) & 0xFF==ord('q'):
break
#cv2.waitKey(0)
cap.release()
cv2.destroyAllWindows()
| 21.679245 | 64 | 0.64839 | import cv2
import numpy as np
def nothing(x):
pass
cap=cv2.VideoCapture(0)
cv2.namedWindow('image')
cv2.resizeWindow('image',600,350)
cv2.createTrackbar('lh','image',0,255,nothing)
cv2.createTrackbar('uh','image',0,255,nothing)
cv2.createTrackbar('ls','image',0,255,nothing)
cv2.createTrackbar('us','image',0,255,nothing)
cv2.createTrackbar('lv','image',0,255,nothing)
cv2.createTrackbar('uv','image',0,255,nothing)
cv2.setTrackbarPos('lh','image',0)
cv2.setTrackbarPos('uh','image',58)
cv2.setTrackbarPos('ls','image',45)
cv2.setTrackbarPos('us','image',255)
cv2.setTrackbarPos('lv','image',54)
cv2.setTrackbarPos('uv','image',168)
while True:
_,img=cap.read()
hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
lh=cv2.getTrackbarPos('lh','image')
uh=cv2.getTrackbarPos('uh','image')
ls=cv2.getTrackbarPos('ls','image')
us=cv2.getTrackbarPos('us','image')
lv=cv2.getTrackbarPos('lv','image')
uv=cv2.getTrackbarPos('uv','image')
l_r=np.array([lh,ls,lv])
u_r=np.array([uh,us,uv])
mask = cv2.inRange(hsv,l_r,u_r)
res=cv2.bitwise_and(img,img,mask=mask)
k=np.ones((15,15),np.float32)/225
s= cv2.filter2D(res,-1,k)
b= cv2.GaussianBlur(res,(15,15),0)
m= cv2.medianBlur(res,15)
bb =cv2.bilateralFilter(res , 15 , 75, 75)
k2= np.ones((5,5) , np.uint8)
e=cv2.erode(mask,k2,1)
d=cv2.dilate(mask,k2,1)
o=cv2.morphologyEx(mask,cv2.MORPH_OPEN,k2)
c=cv2.morphologyEx(mask,cv2.MORPH_CLOSE,k2)
oc=cv2.morphologyEx(o,cv2.MORPH_CLOSE,k2)
cv2.imshow('res',res)
if cv2.waitKey(1) & 0xFF==ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| true | true |
1c468dca7335ecf4d19068d904ca06e6efdee798 | 18,712 | py | Python | leanerp/helpdesk/management/commands/get_email.py | seLain/Leaf | f02e15576071429a29f76a06328d024b58a2d69e | [
"Apache-2.0"
] | null | null | null | leanerp/helpdesk/management/commands/get_email.py | seLain/Leaf | f02e15576071429a29f76a06328d024b58a2d69e | [
"Apache-2.0"
] | 6 | 2018-02-20T13:59:07.000Z | 2018-03-06T17:35:41.000Z | leanerp/helpdesk/management/commands/get_email.py | seLain/Leaf | f02e15576071429a29f76a06328d024b58a2d69e | [
"Apache-2.0"
] | 1 | 2018-03-06T17:28:07.000Z | 2018-03-06T17:28:07.000Z | #!/usr/bin/python
"""
Jutda Helpdesk - A Django powered ticket tracker for small enterprise.
(c) Copyright 2008 Jutda. All Rights Reserved. See LICENSE for details.
scripts/get_email.py - Designed to be run from cron, this script checks the
POP and IMAP boxes, or a local mailbox directory,
defined for the queues within a
helpdesk, creating tickets from the new messages (or
adding to existing tickets if needed)
"""
from __future__ import unicode_literals
from datetime import timedelta
import email
import imaplib
import mimetypes
from os import listdir, unlink
from os.path import isfile, join
import poplib
import re
import socket
from time import ctime
from email_reply_parser import EmailReplyParser
from django.core.files.base import ContentFile
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.management.base import BaseCommand
from django.db.models import Q
from django.utils.translation import ugettext as _
from django.utils import encoding, six, timezone
from helpdesk import settings
from helpdesk.lib import send_templated_mail, safe_template_context, process_attachments
from helpdesk.models import Queue, Ticket, TicketCC, FollowUp, IgnoreEmail
from django.contrib.auth.models import User
import logging
STRIPPED_SUBJECT_STRINGS = [
"Re: ",
"Fw: ",
"RE: ",
"FW: ",
"Automatic reply: ",
]
class Command(BaseCommand):
def __init__(self):
BaseCommand.__init__(self)
help = 'Process django-helpdesk queues and process e-mails via POP3/IMAP or ' \
'from a local mailbox directory as required, feeding them into the helpdesk.'
def add_arguments(self, parser):
parser.add_argument(
'--quiet',
action='store_true',
dest='quiet',
default=False,
help='Hide details about each queue/message as they are processed',
)
def handle(self, *args, **options):
quiet = options.get('quiet', False)
process_email(quiet=quiet)
def process_email(quiet=False):
for q in Queue.objects.filter(
email_box_type__isnull=False,
allow_email_submission=True):
logger = logging.getLogger('django.helpdesk.queue.' + q.slug)
if not q.logging_type or q.logging_type == 'none':
logging.disable(logging.CRITICAL) # disable all messages
elif q.logging_type == 'info':
logger.setLevel(logging.INFO)
elif q.logging_type == 'warn':
logger.setLevel(logging.WARN)
elif q.logging_type == 'error':
logger.setLevel(logging.ERROR)
elif q.logging_type == 'crit':
logger.setLevel(logging.CRITICAL)
elif q.logging_type == 'debug':
logger.setLevel(logging.DEBUG)
if quiet:
logger.propagate = False # do not propagate to root logger that would log to console
logdir = q.logging_dir or '/var/log/helpdesk/'
handler = logging.FileHandler(join(logdir, q.slug + '_get_email.log'))
logger.addHandler(handler)
if not q.email_box_last_check:
q.email_box_last_check = timezone.now() - timedelta(minutes=30)
queue_time_delta = timedelta(minutes=q.email_box_interval or 0)
if (q.email_box_last_check + queue_time_delta) < timezone.now():
process_queue(q, logger=logger)
q.email_box_last_check = timezone.now()
q.save()
def process_queue(q, logger):
logger.info("***** %s: Begin processing mail for django-helpdesk" % ctime())
if q.socks_proxy_type and q.socks_proxy_host and q.socks_proxy_port:
try:
import socks
except ImportError:
no_socks_msg = "Queue has been configured with proxy settings, " \
"but no socks library was installed. Try to " \
"install PySocks via PyPI."
logger.error(no_socks_msg)
raise ImportError(no_socks_msg)
proxy_type = {
'socks4': socks.SOCKS4,
'socks5': socks.SOCKS5,
}.get(q.socks_proxy_type)
socks.set_default_proxy(proxy_type=proxy_type,
addr=q.socks_proxy_host,
port=q.socks_proxy_port)
socket.socket = socks.socksocket
elif six.PY2:
socket.socket = socket._socketobject
email_box_type = settings.QUEUE_EMAIL_BOX_TYPE or q.email_box_type
if email_box_type == 'pop3':
if q.email_box_ssl or settings.QUEUE_EMAIL_BOX_SSL:
if not q.email_box_port:
q.email_box_port = 995
server = poplib.POP3_SSL(q.email_box_host or
settings.QUEUE_EMAIL_BOX_HOST,
int(q.email_box_port))
else:
if not q.email_box_port:
q.email_box_port = 110
server = poplib.POP3(q.email_box_host or
settings.QUEUE_EMAIL_BOX_HOST,
int(q.email_box_port))
logger.info("Attempting POP3 server login")
server.getwelcome()
server.user(q.email_box_user or settings.QUEUE_EMAIL_BOX_USER)
server.pass_(q.email_box_pass or settings.QUEUE_EMAIL_BOX_PASSWORD)
messagesInfo = server.list()[1]
logger.info("Received %d messages from POP3 server" % len(messagesInfo))
for msg in messagesInfo:
msgNum = msg.split(" ")[0]
logger.info("Processing message %s" % msgNum)
full_message = encoding.force_text("\n".join(server.retr(msgNum)[1]), errors='replace')
ticket = ticket_from_message(message=full_message, queue=q, logger=logger)
if ticket:
server.dele(msgNum)
logger.info("Successfully processed message %s, deleted from POP3 server" % msgNum)
else:
logger.warn("Message %s was not successfully processed, and will be left on POP3 server" % msgNum)
server.quit()
elif email_box_type == 'imap':
if q.email_box_ssl or settings.QUEUE_EMAIL_BOX_SSL:
if not q.email_box_port:
q.email_box_port = 993
server = imaplib.IMAP4_SSL(q.email_box_host or
settings.QUEUE_EMAIL_BOX_HOST,
int(q.email_box_port))
else:
if not q.email_box_port:
q.email_box_port = 143
server = imaplib.IMAP4(q.email_box_host or
settings.QUEUE_EMAIL_BOX_HOST,
int(q.email_box_port))
logger.info("Attempting IMAP server login")
server.login(q.email_box_user or
settings.QUEUE_EMAIL_BOX_USER,
q.email_box_pass or
settings.QUEUE_EMAIL_BOX_PASSWORD)
server.select(q.email_box_imap_folder)
status, data = server.search(None, 'NOT', 'DELETED')
if data:
msgnums = data[0].split()
logger.info("Received %d messages from IMAP server" % len(msgnums))
for num in msgnums:
logger.info("Processing message %s" % num)
status, data = server.fetch(num, '(RFC822)')
full_message = encoding.force_text(data[0][1], errors='replace')
ticket = ticket_from_message(message=full_message, queue=q, logger=logger)
if ticket:
server.store(num, '+FLAGS', '\\Deleted')
logger.info("Successfully processed message %s, deleted from IMAP server" % num)
else:
logger.warn("Message %s was not successfully processed, and will be left on IMAP server" % num)
server.expunge()
server.close()
server.logout()
elif email_box_type == 'local':
mail_dir = q.email_box_local_dir or '/var/lib/mail/helpdesk/'
mail = [join(mail_dir, f) for f in listdir(mail_dir) if isfile(join(mail_dir, f))]
logger.info("Found %d messages in local mailbox directory" % len(mail))
logger.info("Found %d messages in local mailbox directory" % len(mail))
for i, m in enumerate(mail, 1):
logger.info("Processing message %d" % i)
with open(m, 'r') as f:
full_message = encoding.force_text(f.read(), errors='replace')
ticket = ticket_from_message(message=full_message, queue=q, logger=logger)
if ticket:
logger.info("Successfully processed message %d, ticket/comment created." % i)
try:
unlink(m) # delete message file if ticket was successful
except:
logger.error("Unable to delete message %d." % i)
else:
logger.info("Successfully deleted message %d." % i)
else:
logger.warn("Message %d was not successfully processed, and will be left in local directory" % i)
def decodeUnknown(charset, string):
if six.PY2:
if not charset:
try:
return string.decode('utf-8', 'replace')
except:
return string.decode('iso8859-1', 'replace')
return unicode(string, charset)
elif six.PY3:
if type(string) is not str:
if not charset:
try:
return str(string, encoding='utf-8', errors='replace')
except:
return str(string, encoding='iso8859-1', errors='replace')
return str(string, encoding=charset, errors='replace')
return string
def decode_mail_headers(string):
decoded = email.header.decode_header(string) if six.PY3 else email.header.decode_header(string.encode('utf-8'))
if six.PY2:
return u' '.join([unicode(msg, charset or 'utf-8') for msg, charset in decoded])
elif six.PY3:
return u' '.join([str(msg, encoding=charset, errors='replace') if charset else str(msg) for msg, charset in decoded])
def ticket_from_message(message, queue, logger):
# 'message' must be an RFC822 formatted message.
message = email.message_from_string(message) if six.PY3 else email.message_from_string(message.encode('utf-8'))
subject = message.get('subject', _('Comment from e-mail'))
subject = decode_mail_headers(decodeUnknown(message.get_charset(), subject))
for affix in STRIPPED_SUBJECT_STRINGS:
subject = subject.replace(affix, "")
subject = subject.strip()
sender = message.get('from', _('Unknown Sender'))
sender = decode_mail_headers(decodeUnknown(message.get_charset(), sender))
sender_email = email.utils.parseaddr(sender)[1]
cc = message.get_all('cc', None)
if cc:
# first, fixup the encoding if necessary
cc = [decode_mail_headers(decodeUnknown(message.get_charset(), x)) for x in cc]
# get_all checks if multiple CC headers, but individual emails may be comma separated too
tempcc = []
for hdr in cc:
tempcc.extend(hdr.split(','))
# use a set to ensure no duplicates
cc = set([x.strip() for x in tempcc])
for ignore in IgnoreEmail.objects.filter(Q(queues=queue) | Q(queues__isnull=True)):
if ignore.test(sender_email):
if ignore.keep_in_mailbox:
# By returning 'False' the message will be kept in the mailbox,
# and the 'True' will cause the message to be deleted.
return False
return True
matchobj = re.match(r".*\[" + queue.slug + "-(?P<id>\d+)\]", subject)
if matchobj:
# This is a reply or forward.
ticket = matchobj.group('id')
logger.info("Matched tracking ID %s-%s" % (queue.slug, ticket))
else:
logger.info("No tracking ID matched.")
ticket = None
body = None
counter = 0
files = []
for part in message.walk():
if part.get_content_maintype() == 'multipart':
continue
name = part.get_param("name")
if name:
name = email.utils.collapse_rfc2231_value(name)
if part.get_content_maintype() == 'text' and name is None:
if part.get_content_subtype() == 'plain':
body = EmailReplyParser.parse_reply(
decodeUnknown(part.get_content_charset(), part.get_payload(decode=True))
)
# workaround to get unicode text out rather than escaped text
body = body.encode('ascii').decode('unicode_escape') if six.PY3 else body.encode('utf-8')
logger.debug("Discovered plain text MIME part")
else:
files.append(
SimpleUploadedFile(_("email_html_body.html"), encoding.smart_bytes(part.get_payload()), 'text/html')
)
logger.debug("Discovered HTML MIME part")
else:
if not name:
ext = mimetypes.guess_extension(part.get_content_type())
name = "part-%i%s" % (counter, ext)
files.append(SimpleUploadedFile(name, encoding.smart_bytes(part.get_payload()), part.get_content_type()))
logger.debug("Found MIME attachment %s" % name)
counter += 1
if not body:
body = _('No plain-text email body available. Please see attachment "email_html_body.html".')
if ticket:
try:
t = Ticket.objects.get(id=ticket)
except Ticket.DoesNotExist:
logger.info("Tracking ID %s-%s not associated with existing ticket. Creating new ticket." % (queue.slug, ticket))
ticket = None
else:
logger.info("Found existing ticket with Tracking ID %s-%s" % (t.queue.slug, t.id))
if t.status == Ticket.CLOSED_STATUS:
t.status = Ticket.REOPENED_STATUS
t.save()
new = False
smtp_priority = message.get('priority', '')
smtp_importance = message.get('importance', '')
high_priority_types = {'high', 'important', '1', 'urgent'}
priority = 2 if high_priority_types & {smtp_priority, smtp_importance} else 3
if ticket is None:
new = True
t = Ticket.objects.create(
title=subject,
queue=queue,
submitter_email=sender_email,
created=timezone.now(),
description=body,
priority=priority,
)
logger.debug("Created new ticket %s-%s" % (t.queue.slug, t.id))
if cc:
# get list of currently CC'd emails
current_cc = TicketCC.objects.filter(ticket=ticket)
current_cc_emails = [x.email for x in current_cc]
# get emails of any Users CC'd to email
current_cc_users = [x.user.email for x in current_cc]
# ensure submitter, assigned user, queue email not added
other_emails = [queue.email_address]
if t.submitter_email:
other_emails.append(t.submitter_email)
if t.assigned_to:
other_emails.append(t.assigned_to.email)
current_cc = set(current_cc_emails + current_cc_users + other_emails)
# first, add any User not previously CC'd (as identified by User's email)
all_users = User.objects.all()
all_user_emails = set([x.email for x in all_users])
users_not_currently_ccd = all_user_emails.difference(set(current_cc))
users_to_cc = cc.intersection(users_not_currently_ccd)
for user in users_to_cc:
tcc = TicketCC.objects.create(
ticket=t,
user=User.objects.get(email=user),
can_view=True,
can_update=False
)
tcc.save()
# then add remaining emails alphabetically, makes testing easy
new_cc = cc.difference(current_cc).difference(all_user_emails)
new_cc = sorted(list(new_cc))
for ccemail in new_cc:
tcc = TicketCC.objects.create(
ticket=t,
email=ccemail,
can_view=True,
can_update=False
)
tcc.save()
f = FollowUp(
ticket=t,
title=_('E-Mail Received from %(sender_email)s' % {'sender_email': sender_email}),
date=timezone.now(),
public=True,
comment=body,
)
if t.status == Ticket.REOPENED_STATUS:
f.new_status = Ticket.REOPENED_STATUS
f.title = _('Ticket Re-Opened by E-Mail Received from %(sender_email)s' % {'sender_email': sender_email})
f.save()
logger.debug("Created new FollowUp for Ticket")
if six.PY2:
logger.info(("[%s-%s] %s" % (t.queue.slug, t.id, t.title,)).encode('ascii', 'replace'))
elif six.PY3:
logger.info("[%s-%s] %s" % (t.queue.slug, t.id, t.title,))
attached = process_attachments(f, files)
for att_file in attached:
logger.info("Attachment '%s' successfully added to ticket from email." % att_file[0])
context = safe_template_context(t)
if new:
if sender_email:
send_templated_mail(
'newticket_submitter',
context,
recipients=sender_email,
sender=queue.from_address,
fail_silently=True,
)
if queue.new_ticket_cc:
send_templated_mail(
'newticket_cc',
context,
recipients=queue.new_ticket_cc,
sender=queue.from_address,
fail_silently=True,
)
if queue.updated_ticket_cc and queue.updated_ticket_cc != queue.new_ticket_cc:
send_templated_mail(
'newticket_cc',
context,
recipients=queue.updated_ticket_cc,
sender=queue.from_address,
fail_silently=True,
)
else:
context.update(comment=f.comment)
if t.assigned_to:
send_templated_mail(
'updated_owner',
context,
recipients=t.assigned_to.email,
sender=queue.from_address,
fail_silently=True,
)
if queue.updated_ticket_cc:
send_templated_mail(
'updated_cc',
context,
recipients=queue.updated_ticket_cc,
sender=queue.from_address,
fail_silently=True,
)
return t
if __name__ == '__main__':
process_email()
| 38.502058 | 125 | 0.595981 |
from __future__ import unicode_literals
from datetime import timedelta
import email
import imaplib
import mimetypes
from os import listdir, unlink
from os.path import isfile, join
import poplib
import re
import socket
from time import ctime
from email_reply_parser import EmailReplyParser
from django.core.files.base import ContentFile
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.management.base import BaseCommand
from django.db.models import Q
from django.utils.translation import ugettext as _
from django.utils import encoding, six, timezone
from helpdesk import settings
from helpdesk.lib import send_templated_mail, safe_template_context, process_attachments
from helpdesk.models import Queue, Ticket, TicketCC, FollowUp, IgnoreEmail
from django.contrib.auth.models import User
import logging
STRIPPED_SUBJECT_STRINGS = [
"Re: ",
"Fw: ",
"RE: ",
"FW: ",
"Automatic reply: ",
]
class Command(BaseCommand):
def __init__(self):
BaseCommand.__init__(self)
help = 'Process django-helpdesk queues and process e-mails via POP3/IMAP or ' \
'from a local mailbox directory as required, feeding them into the helpdesk.'
def add_arguments(self, parser):
parser.add_argument(
'--quiet',
action='store_true',
dest='quiet',
default=False,
help='Hide details about each queue/message as they are processed',
)
def handle(self, *args, **options):
quiet = options.get('quiet', False)
process_email(quiet=quiet)
def process_email(quiet=False):
for q in Queue.objects.filter(
email_box_type__isnull=False,
allow_email_submission=True):
logger = logging.getLogger('django.helpdesk.queue.' + q.slug)
if not q.logging_type or q.logging_type == 'none':
logging.disable(logging.CRITICAL)
elif q.logging_type == 'info':
logger.setLevel(logging.INFO)
elif q.logging_type == 'warn':
logger.setLevel(logging.WARN)
elif q.logging_type == 'error':
logger.setLevel(logging.ERROR)
elif q.logging_type == 'crit':
logger.setLevel(logging.CRITICAL)
elif q.logging_type == 'debug':
logger.setLevel(logging.DEBUG)
if quiet:
logger.propagate = False
logdir = q.logging_dir or '/var/log/helpdesk/'
handler = logging.FileHandler(join(logdir, q.slug + '_get_email.log'))
logger.addHandler(handler)
if not q.email_box_last_check:
q.email_box_last_check = timezone.now() - timedelta(minutes=30)
queue_time_delta = timedelta(minutes=q.email_box_interval or 0)
if (q.email_box_last_check + queue_time_delta) < timezone.now():
process_queue(q, logger=logger)
q.email_box_last_check = timezone.now()
q.save()
def process_queue(q, logger):
logger.info("***** %s: Begin processing mail for django-helpdesk" % ctime())
if q.socks_proxy_type and q.socks_proxy_host and q.socks_proxy_port:
try:
import socks
except ImportError:
no_socks_msg = "Queue has been configured with proxy settings, " \
"but no socks library was installed. Try to " \
"install PySocks via PyPI."
logger.error(no_socks_msg)
raise ImportError(no_socks_msg)
proxy_type = {
'socks4': socks.SOCKS4,
'socks5': socks.SOCKS5,
}.get(q.socks_proxy_type)
socks.set_default_proxy(proxy_type=proxy_type,
addr=q.socks_proxy_host,
port=q.socks_proxy_port)
socket.socket = socks.socksocket
elif six.PY2:
socket.socket = socket._socketobject
email_box_type = settings.QUEUE_EMAIL_BOX_TYPE or q.email_box_type
if email_box_type == 'pop3':
if q.email_box_ssl or settings.QUEUE_EMAIL_BOX_SSL:
if not q.email_box_port:
q.email_box_port = 995
server = poplib.POP3_SSL(q.email_box_host or
settings.QUEUE_EMAIL_BOX_HOST,
int(q.email_box_port))
else:
if not q.email_box_port:
q.email_box_port = 110
server = poplib.POP3(q.email_box_host or
settings.QUEUE_EMAIL_BOX_HOST,
int(q.email_box_port))
logger.info("Attempting POP3 server login")
server.getwelcome()
server.user(q.email_box_user or settings.QUEUE_EMAIL_BOX_USER)
server.pass_(q.email_box_pass or settings.QUEUE_EMAIL_BOX_PASSWORD)
messagesInfo = server.list()[1]
logger.info("Received %d messages from POP3 server" % len(messagesInfo))
for msg in messagesInfo:
msgNum = msg.split(" ")[0]
logger.info("Processing message %s" % msgNum)
full_message = encoding.force_text("\n".join(server.retr(msgNum)[1]), errors='replace')
ticket = ticket_from_message(message=full_message, queue=q, logger=logger)
if ticket:
server.dele(msgNum)
logger.info("Successfully processed message %s, deleted from POP3 server" % msgNum)
else:
logger.warn("Message %s was not successfully processed, and will be left on POP3 server" % msgNum)
server.quit()
elif email_box_type == 'imap':
if q.email_box_ssl or settings.QUEUE_EMAIL_BOX_SSL:
if not q.email_box_port:
q.email_box_port = 993
server = imaplib.IMAP4_SSL(q.email_box_host or
settings.QUEUE_EMAIL_BOX_HOST,
int(q.email_box_port))
else:
if not q.email_box_port:
q.email_box_port = 143
server = imaplib.IMAP4(q.email_box_host or
settings.QUEUE_EMAIL_BOX_HOST,
int(q.email_box_port))
logger.info("Attempting IMAP server login")
server.login(q.email_box_user or
settings.QUEUE_EMAIL_BOX_USER,
q.email_box_pass or
settings.QUEUE_EMAIL_BOX_PASSWORD)
server.select(q.email_box_imap_folder)
status, data = server.search(None, 'NOT', 'DELETED')
if data:
msgnums = data[0].split()
logger.info("Received %d messages from IMAP server" % len(msgnums))
for num in msgnums:
logger.info("Processing message %s" % num)
status, data = server.fetch(num, '(RFC822)')
full_message = encoding.force_text(data[0][1], errors='replace')
ticket = ticket_from_message(message=full_message, queue=q, logger=logger)
if ticket:
server.store(num, '+FLAGS', '\\Deleted')
logger.info("Successfully processed message %s, deleted from IMAP server" % num)
else:
logger.warn("Message %s was not successfully processed, and will be left on IMAP server" % num)
server.expunge()
server.close()
server.logout()
elif email_box_type == 'local':
mail_dir = q.email_box_local_dir or '/var/lib/mail/helpdesk/'
mail = [join(mail_dir, f) for f in listdir(mail_dir) if isfile(join(mail_dir, f))]
logger.info("Found %d messages in local mailbox directory" % len(mail))
logger.info("Found %d messages in local mailbox directory" % len(mail))
for i, m in enumerate(mail, 1):
logger.info("Processing message %d" % i)
with open(m, 'r') as f:
full_message = encoding.force_text(f.read(), errors='replace')
ticket = ticket_from_message(message=full_message, queue=q, logger=logger)
if ticket:
logger.info("Successfully processed message %d, ticket/comment created." % i)
try:
unlink(m)
except:
logger.error("Unable to delete message %d." % i)
else:
logger.info("Successfully deleted message %d." % i)
else:
logger.warn("Message %d was not successfully processed, and will be left in local directory" % i)
def decodeUnknown(charset, string):
if six.PY2:
if not charset:
try:
return string.decode('utf-8', 'replace')
except:
return string.decode('iso8859-1', 'replace')
return unicode(string, charset)
elif six.PY3:
if type(string) is not str:
if not charset:
try:
return str(string, encoding='utf-8', errors='replace')
except:
return str(string, encoding='iso8859-1', errors='replace')
return str(string, encoding=charset, errors='replace')
return string
def decode_mail_headers(string):
decoded = email.header.decode_header(string) if six.PY3 else email.header.decode_header(string.encode('utf-8'))
if six.PY2:
return u' '.join([unicode(msg, charset or 'utf-8') for msg, charset in decoded])
elif six.PY3:
return u' '.join([str(msg, encoding=charset, errors='replace') if charset else str(msg) for msg, charset in decoded])
def ticket_from_message(message, queue, logger):
message = email.message_from_string(message) if six.PY3 else email.message_from_string(message.encode('utf-8'))
subject = message.get('subject', _('Comment from e-mail'))
subject = decode_mail_headers(decodeUnknown(message.get_charset(), subject))
for affix in STRIPPED_SUBJECT_STRINGS:
subject = subject.replace(affix, "")
subject = subject.strip()
sender = message.get('from', _('Unknown Sender'))
sender = decode_mail_headers(decodeUnknown(message.get_charset(), sender))
sender_email = email.utils.parseaddr(sender)[1]
cc = message.get_all('cc', None)
if cc:
cc = [decode_mail_headers(decodeUnknown(message.get_charset(), x)) for x in cc]
tempcc = []
for hdr in cc:
tempcc.extend(hdr.split(','))
cc = set([x.strip() for x in tempcc])
for ignore in IgnoreEmail.objects.filter(Q(queues=queue) | Q(queues__isnull=True)):
if ignore.test(sender_email):
if ignore.keep_in_mailbox:
return False
return True
matchobj = re.match(r".*\[" + queue.slug + "-(?P<id>\d+)\]", subject)
if matchobj:
ticket = matchobj.group('id')
logger.info("Matched tracking ID %s-%s" % (queue.slug, ticket))
else:
logger.info("No tracking ID matched.")
ticket = None
body = None
counter = 0
files = []
for part in message.walk():
if part.get_content_maintype() == 'multipart':
continue
name = part.get_param("name")
if name:
name = email.utils.collapse_rfc2231_value(name)
if part.get_content_maintype() == 'text' and name is None:
if part.get_content_subtype() == 'plain':
body = EmailReplyParser.parse_reply(
decodeUnknown(part.get_content_charset(), part.get_payload(decode=True))
)
body = body.encode('ascii').decode('unicode_escape') if six.PY3 else body.encode('utf-8')
logger.debug("Discovered plain text MIME part")
else:
files.append(
SimpleUploadedFile(_("email_html_body.html"), encoding.smart_bytes(part.get_payload()), 'text/html')
)
logger.debug("Discovered HTML MIME part")
else:
if not name:
ext = mimetypes.guess_extension(part.get_content_type())
name = "part-%i%s" % (counter, ext)
files.append(SimpleUploadedFile(name, encoding.smart_bytes(part.get_payload()), part.get_content_type()))
logger.debug("Found MIME attachment %s" % name)
counter += 1
if not body:
body = _('No plain-text email body available. Please see attachment "email_html_body.html".')
if ticket:
try:
t = Ticket.objects.get(id=ticket)
except Ticket.DoesNotExist:
logger.info("Tracking ID %s-%s not associated with existing ticket. Creating new ticket." % (queue.slug, ticket))
ticket = None
else:
logger.info("Found existing ticket with Tracking ID %s-%s" % (t.queue.slug, t.id))
if t.status == Ticket.CLOSED_STATUS:
t.status = Ticket.REOPENED_STATUS
t.save()
new = False
smtp_priority = message.get('priority', '')
smtp_importance = message.get('importance', '')
high_priority_types = {'high', 'important', '1', 'urgent'}
priority = 2 if high_priority_types & {smtp_priority, smtp_importance} else 3
if ticket is None:
new = True
t = Ticket.objects.create(
title=subject,
queue=queue,
submitter_email=sender_email,
created=timezone.now(),
description=body,
priority=priority,
)
logger.debug("Created new ticket %s-%s" % (t.queue.slug, t.id))
if cc:
current_cc = TicketCC.objects.filter(ticket=ticket)
current_cc_emails = [x.email for x in current_cc]
# get emails of any Users CC'd to email
current_cc_users = [x.user.email for x in current_cc]
other_emails = [queue.email_address]
if t.submitter_email:
other_emails.append(t.submitter_email)
if t.assigned_to:
other_emails.append(t.assigned_to.email)
current_cc = set(current_cc_emails + current_cc_users + other_emails)
all_users = User.objects.all()
all_user_emails = set([x.email for x in all_users])
users_not_currently_ccd = all_user_emails.difference(set(current_cc))
users_to_cc = cc.intersection(users_not_currently_ccd)
for user in users_to_cc:
tcc = TicketCC.objects.create(
ticket=t,
user=User.objects.get(email=user),
can_view=True,
can_update=False
)
tcc.save()
new_cc = cc.difference(current_cc).difference(all_user_emails)
new_cc = sorted(list(new_cc))
for ccemail in new_cc:
tcc = TicketCC.objects.create(
ticket=t,
email=ccemail,
can_view=True,
can_update=False
)
tcc.save()
f = FollowUp(
ticket=t,
title=_('E-Mail Received from %(sender_email)s' % {'sender_email': sender_email}),
date=timezone.now(),
public=True,
comment=body,
)
if t.status == Ticket.REOPENED_STATUS:
f.new_status = Ticket.REOPENED_STATUS
f.title = _('Ticket Re-Opened by E-Mail Received from %(sender_email)s' % {'sender_email': sender_email})
f.save()
logger.debug("Created new FollowUp for Ticket")
if six.PY2:
logger.info(("[%s-%s] %s" % (t.queue.slug, t.id, t.title,)).encode('ascii', 'replace'))
elif six.PY3:
logger.info("[%s-%s] %s" % (t.queue.slug, t.id, t.title,))
attached = process_attachments(f, files)
for att_file in attached:
logger.info("Attachment '%s' successfully added to ticket from email." % att_file[0])
context = safe_template_context(t)
if new:
if sender_email:
send_templated_mail(
'newticket_submitter',
context,
recipients=sender_email,
sender=queue.from_address,
fail_silently=True,
)
if queue.new_ticket_cc:
send_templated_mail(
'newticket_cc',
context,
recipients=queue.new_ticket_cc,
sender=queue.from_address,
fail_silently=True,
)
if queue.updated_ticket_cc and queue.updated_ticket_cc != queue.new_ticket_cc:
send_templated_mail(
'newticket_cc',
context,
recipients=queue.updated_ticket_cc,
sender=queue.from_address,
fail_silently=True,
)
else:
context.update(comment=f.comment)
if t.assigned_to:
send_templated_mail(
'updated_owner',
context,
recipients=t.assigned_to.email,
sender=queue.from_address,
fail_silently=True,
)
if queue.updated_ticket_cc:
send_templated_mail(
'updated_cc',
context,
recipients=queue.updated_ticket_cc,
sender=queue.from_address,
fail_silently=True,
)
return t
if __name__ == '__main__':
process_email()
| true | true |
1c468e275b37e8f12a979296a79fb4db9882c0cd | 838 | py | Python | lib/colors.py | Saveurian/Cleartext_Scanner | c54828fc6321b8549245f7914b4749b9114df7e0 | [
"Unlicense"
] | null | null | null | lib/colors.py | Saveurian/Cleartext_Scanner | c54828fc6321b8549245f7914b4749b9114df7e0 | [
"Unlicense"
] | null | null | null | lib/colors.py | Saveurian/Cleartext_Scanner | c54828fc6321b8549245f7914b4749b9114df7e0 | [
"Unlicense"
] | null | null | null | class Colors:
"""
Provides ANSI terminal colors helping the eyes identify
potential clear-text passwords.
"""
NONE = "\033[0m"
RED = "\033[31m"
GREEN = "\033[32m"
LIGHT_GRAY = "\033[37m"
LIGHT_BLUE = "\033[34m"
YELLOW = "\033[33m"
def __init__(self):
return
# Red terminal color
def red(self, text):
return self.RED + text + self.NONE
# Green terminal color
def green(self, text):
return self.GREEN + text + self.NONE
# Light gray terminal color
def light_gray(self, text):
return self.LIGHT_GRAY + text + self.NONE
# Light blue terminal color
def light_blue(self, text):
return self.LIGHT_BLUE + text + self.NONE
# Yellow terminal color
def yellow(self, text):
return self.YELLOW + text + self.NONE
| 23.942857 | 59 | 0.608592 | class Colors:
NONE = "\033[0m"
RED = "\033[31m"
GREEN = "\033[32m"
LIGHT_GRAY = "\033[37m"
LIGHT_BLUE = "\033[34m"
YELLOW = "\033[33m"
def __init__(self):
return
def red(self, text):
return self.RED + text + self.NONE
def green(self, text):
return self.GREEN + text + self.NONE
def light_gray(self, text):
return self.LIGHT_GRAY + text + self.NONE
def light_blue(self, text):
return self.LIGHT_BLUE + text + self.NONE
def yellow(self, text):
return self.YELLOW + text + self.NONE
| true | true |
1c468f73f1203314d42997ed056bf5f884a64d3b | 2,065 | py | Python | pytorch/poly/polygonize.py | IUResearchApplications/BuildingFootprints | 97dc2ba9303bb5fdfd1c357c94b9e1e903a52ebe | [
"MIT"
] | 2 | 2020-05-01T15:41:14.000Z | 2020-05-27T20:49:09.000Z | pytorch/poly/polygonize.py | IUResearchApplications/BuildingFootprints | 97dc2ba9303bb5fdfd1c357c94b9e1e903a52ebe | [
"MIT"
] | null | null | null | pytorch/poly/polygonize.py | IUResearchApplications/BuildingFootprints | 97dc2ba9303bb5fdfd1c357c94b9e1e903a52ebe | [
"MIT"
] | 1 | 2020-05-01T15:41:15.000Z | 2020-05-01T15:41:15.000Z | import sys
import subprocess
import os
import ogr
import glob
from setup import setup_run
def call_gdal_polygonize(input_file, output_file):
# If the file already exists, delete it first or additional polygons will be saved to the
# files if this is ran more than once.
if os.path.isfile(output_file):
os.remove(output_file)
# Call gdal_polygonize.py
subprocess.call(['gdal_polygonize.py', input_file, '-b', '1', '-q', '-f','GeoJSON',
output_file])
# Open the image with OGR
src = ogr.Open(output_file)
# If the GeoTIFF has no shapes to polygonize then gdal_polygonize outputs a GeoJSON in an
# incorrect format, so delete it.
layer = src.GetLayer(0)
# The GeoJSON that needs to be deleted will have no features
count = layer.GetFeatureCount()
if count == 0:
os.remove(output_file)
print ('Removed ' + os.path.basename(output_file))
def run_polygonize(main_path):
# Set up required file paths to the images
poly_fp = glob.glob(os.path.join(main_path, '*.tif'))
# Set up the main file path to where the original GeoJSONs will be saved
geojson_path = os.path.join(main_path, 'original_geojson')
# If the directory to save the GeoJSONs to does not exist then create it
if not os.path.isdir(geojson_path):
os.mkdir(geojson_path)
print ("Created folder 'original_geojson'")
print ('Polygonizing the predictions...')
for tif_fp in poly_fp:
# Switch the file extension from .tif to .geojson
file_name = os.path.splitext(os.path.basename(tif_fp))[0]
geojson_name = os.path.join(geojson_path, file_name + '.geojson')
# Set up the file path and name of the new GeoJSON file
json_fp = os.path.join(main_path, geojson_name)
# Polygonize the predictions
call_gdal_polygonize(tif_fp, json_fp)
print ('Done.')
return geojson_path
def main():
main_path = setup_run('polygonize')
run_polygonize(main_path)
if __name__ == '__main__':
main()
| 30.820896 | 94 | 0.678935 | import sys
import subprocess
import os
import ogr
import glob
from setup import setup_run
def call_gdal_polygonize(input_file, output_file):
if os.path.isfile(output_file):
os.remove(output_file)
subprocess.call(['gdal_polygonize.py', input_file, '-b', '1', '-q', '-f','GeoJSON',
output_file])
src = ogr.Open(output_file)
layer = src.GetLayer(0)
count = layer.GetFeatureCount()
if count == 0:
os.remove(output_file)
print ('Removed ' + os.path.basename(output_file))
def run_polygonize(main_path):
poly_fp = glob.glob(os.path.join(main_path, '*.tif'))
geojson_path = os.path.join(main_path, 'original_geojson')
if not os.path.isdir(geojson_path):
os.mkdir(geojson_path)
print ("Created folder 'original_geojson'")
print ('Polygonizing the predictions...')
for tif_fp in poly_fp:
file_name = os.path.splitext(os.path.basename(tif_fp))[0]
geojson_name = os.path.join(geojson_path, file_name + '.geojson')
json_fp = os.path.join(main_path, geojson_name)
call_gdal_polygonize(tif_fp, json_fp)
print ('Done.')
return geojson_path
def main():
main_path = setup_run('polygonize')
run_polygonize(main_path)
if __name__ == '__main__':
main()
| true | true |
1c46914d8a6cc3062ee889cf79e110b85a8762f3 | 938 | py | Python | pycrypt.py | o0void0o/pycrypt | 46ea779f2de983a9d6caa974a3b932590af5c156 | [
"MIT"
] | null | null | null | pycrypt.py | o0void0o/pycrypt | 46ea779f2de983a9d6caa974a3b932590af5c156 | [
"MIT"
] | null | null | null | pycrypt.py | o0void0o/pycrypt | 46ea779f2de983a9d6caa974a3b932590af5c156 | [
"MIT"
] | null | null | null | from Crypto.Random import get_random_bytes
from Crypto.Cipher import AES
key = get_random_bytes(32) # 32 bytes * 8 = 256 bits (1 byte = 8 bits)
print(key)
output_file = 'enc/encrypted.bin'
file = open("supernoooichFile.adoc", "rb")
data = file.read(-1)
cipher = AES.new(key, AES.MODE_CFB) # CFB mode
ciphered_data = cipher.encrypt(data) # Only need to encrypt the data, no padding required for this mode
file_out = open(output_file, "wb")
file_out.write(cipher.iv)
file_out.write(ciphered_data)
file_out.close()
input_file= 'enc/encrypted.bin'
file_in = open(input_file, 'rb')
iv = file_in.read(16)
ciphered_data = file_in.read()
file_in.close()
cipher = AES.new(key, AES.MODE_CFB, iv=iv)
original_data = cipher.decrypt(ciphered_data) # No need to un-pad
print(original_data)
f = open('dec/lkjl.adoc', 'wb')
f.write(original_data)
f.close()
#https://nitratine.net/blog/post/python-encryption-and-decryption-with-pycryptodome/
| 24.684211 | 103 | 0.744136 | from Crypto.Random import get_random_bytes
from Crypto.Cipher import AES
key = get_random_bytes(32)
print(key)
output_file = 'enc/encrypted.bin'
file = open("supernoooichFile.adoc", "rb")
data = file.read(-1)
cipher = AES.new(key, AES.MODE_CFB)
ciphered_data = cipher.encrypt(data)
file_out = open(output_file, "wb")
file_out.write(cipher.iv)
file_out.write(ciphered_data)
file_out.close()
input_file= 'enc/encrypted.bin'
file_in = open(input_file, 'rb')
iv = file_in.read(16)
ciphered_data = file_in.read()
file_in.close()
cipher = AES.new(key, AES.MODE_CFB, iv=iv)
original_data = cipher.decrypt(ciphered_data)
print(original_data)
f = open('dec/lkjl.adoc', 'wb')
f.write(original_data)
f.close()
| true | true |
1c4691903b60395ec59f186203e784ee1996ad0a | 1,966 | py | Python | ex35.py | Zinmarlwin711/python-exercises | 361cb426a8bc03760906e25b6cb6a4a458260bfc | [
"MIT"
] | null | null | null | ex35.py | Zinmarlwin711/python-exercises | 361cb426a8bc03760906e25b6cb6a4a458260bfc | [
"MIT"
] | null | null | null | ex35.py | Zinmarlwin711/python-exercises | 361cb426a8bc03760906e25b6cb6a4a458260bfc | [
"MIT"
] | null | null | null | from sys import exit
def gold_room():
print("This room is full of gold. How much do you take?")
choice = input("> ")
if "0" in choice or "1" in choice:
how_much = int (choice)
else:
dead("Man, learn to type a number.")
if how_much < 50:
print("Nice, you're not greedy, you win!")
exit(0)
else:
dead("You greedy bastard!")
def bear_room():
print("There is a bear here.")
print("The bear has bunch of honey.")
print("The fat bear is in front of another door.")
print("How are you going to move the bear?")
bear_moved = False
while True:
choice = input("> ")
if choice == "take honey":
dead ("The bear looks at you then slaps your face.")
elif choice == "taunt bear" and not bear_moved:
print("The bear has moved from the door.")
print("You can go through it now.")
bear_moved = True
elif choice == "taunt bear" and bear_moved:
dead("The bear gets pissed off and chews your legs.")
elif choice == "open door" and bear_moved:
gold_room()
else:
print("I got no idea what that means.")
def cthulhu_room():
print("Here you see the great evil Cthulhu.")
print("He, it, whatever stares at you and you go insane.")
print("Do you flee for your life or eat your head?")
choice = input("> ")
if "flee" in choice:
start()
elif "head" in choice:
dead ("Well that was tasty!")
else:
cthulhu_room()
def dead(why):
print(why, "Good job!")
exit(0)
def start():
print("You are in a dark room.")
print("There is a door to your right and left.")
print("Which one do you take?")
choice = input("> ")
if choice == "left":
bear_room()
elif choice == "right":
cthulhu_room()
else:
dead("You stumble around the room until you starve.")
start()
| 26.931507 | 65 | 0.571719 | from sys import exit
def gold_room():
print("This room is full of gold. How much do you take?")
choice = input("> ")
if "0" in choice or "1" in choice:
how_much = int (choice)
else:
dead("Man, learn to type a number.")
if how_much < 50:
print("Nice, you're not greedy, you win!")
exit(0)
else:
dead("You greedy bastard!")
def bear_room():
print("There is a bear here.")
print("The bear has bunch of honey.")
print("The fat bear is in front of another door.")
print("How are you going to move the bear?")
bear_moved = False
while True:
choice = input("> ")
if choice == "take honey":
dead ("The bear looks at you then slaps your face.")
elif choice == "taunt bear" and not bear_moved:
print("The bear has moved from the door.")
print("You can go through it now.")
bear_moved = True
elif choice == "taunt bear" and bear_moved:
dead("The bear gets pissed off and chews your legs.")
elif choice == "open door" and bear_moved:
gold_room()
else:
print("I got no idea what that means.")
def cthulhu_room():
print("Here you see the great evil Cthulhu.")
print("He, it, whatever stares at you and you go insane.")
print("Do you flee for your life or eat your head?")
choice = input("> ")
if "flee" in choice:
start()
elif "head" in choice:
dead ("Well that was tasty!")
else:
cthulhu_room()
def dead(why):
print(why, "Good job!")
exit(0)
def start():
print("You are in a dark room.")
print("There is a door to your right and left.")
print("Which one do you take?")
choice = input("> ")
if choice == "left":
bear_room()
elif choice == "right":
cthulhu_room()
else:
dead("You stumble around the room until you starve.")
start()
| true | true |
1c4691faed7347e61b1a3bcb8447db2c3d16ec2e | 801 | py | Python | DataMining/Stats/coord_bounds.py | CKPalk/SeattleCrime_DM | 0bfbf597ef7c4e87a4030e1c03f62b2f4c9f3c5b | [
"MIT"
] | null | null | null | DataMining/Stats/coord_bounds.py | CKPalk/SeattleCrime_DM | 0bfbf597ef7c4e87a4030e1c03f62b2f4c9f3c5b | [
"MIT"
] | null | null | null | DataMining/Stats/coord_bounds.py | CKPalk/SeattleCrime_DM | 0bfbf597ef7c4e87a4030e1c03f62b2f4c9f3c5b | [
"MIT"
] | null | null | null | ''' Work of Cameron Palk '''
import sys
import pandas as pd
def main( argv ):
try:
csv_filepath = argv[ 0 ]
output_filepath = argv[ 1 ]
except IndexError:
print( "Error, usage: \"python3 coord_bounds.py <CSV> <output_file>\"" )
return
training_data = pd.read_csv( csv_filepath )
training_data[ 'clean_Latitude' ] = training_data[ training_data.Latitude > 47 ].Latitude
training_data[ 'clean_Longitude' ] = training_data[ training_data.Longitude < -122 ].Longitude
training_data.dropna()
print( training_data[ 'clean_Latitude' ] )
for axis in [ 'clean_Longitude', 'clean_Latitude' ]:
print( "{:16} min: {:16} max: {:16}".format(
axis,
min( training_data[ axis ] ),
max( training_data[ axis ] )
) )
#
if __name__=='__main__':
main( sys.argv[ 1: ] )
| 23.558824 | 95 | 0.66417 |
import sys
import pandas as pd
def main( argv ):
try:
csv_filepath = argv[ 0 ]
output_filepath = argv[ 1 ]
except IndexError:
print( "Error, usage: \"python3 coord_bounds.py <CSV> <output_file>\"" )
return
training_data = pd.read_csv( csv_filepath )
training_data[ 'clean_Latitude' ] = training_data[ training_data.Latitude > 47 ].Latitude
training_data[ 'clean_Longitude' ] = training_data[ training_data.Longitude < -122 ].Longitude
training_data.dropna()
print( training_data[ 'clean_Latitude' ] )
for axis in [ 'clean_Longitude', 'clean_Latitude' ]:
print( "{:16} min: {:16} max: {:16}".format(
axis,
min( training_data[ axis ] ),
max( training_data[ axis ] )
) )
if __name__=='__main__':
main( sys.argv[ 1: ] )
| true | true |
1c4691ff67ef169df40aa3167b4cbb97f94211d2 | 1,425 | py | Python | dufi/gui/boxes/ttkstyles.py | Shura1oplot/dufi | c9c25524020e57d3670c298acca305900b6490e7 | [
"MIT"
] | null | null | null | dufi/gui/boxes/ttkstyles.py | Shura1oplot/dufi | c9c25524020e57d3670c298acca305900b6490e7 | [
"MIT"
] | null | null | null | dufi/gui/boxes/ttkstyles.py | Shura1oplot/dufi | c9c25524020e57d3670c298acca305900b6490e7 | [
"MIT"
] | null | null | null | # [SublimeLinter @python:3]
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, division, print_function, absolute_import
try:
from tkinter import ttk
from tkinter import font as tk_font
except ImportError:
import ttk
import tkFont as tk_font
_ttk_styles = set()
_ttk_style_customizers = set()
def create_ttk_styles():
for func in _ttk_style_customizers:
func()
def _ttk_style_customizer(func):
_ttk_style_customizers.add(func)
def wrapper():
if func.__name__ not in _ttk_styles:
func()
_ttk_styles.add(func.__name__)
return wrapper
@_ttk_style_customizer
def create_ttk_style_white_frame():
ttk.Style().configure("WhiteFrame.TFrame", background="White")
ttk.Style().configure("WhiteLabel.TLabel", background="White")
ttk.Style().configure("WhiteCheckbutton.TCheckbutton", background="White")
@_ttk_style_customizer
def create_ttk_style_plain_notebook():
s = ttk.Style()
s.configure("Plain.TNotebook", borderwidth=0)
s.layout("Plain.TNotebook.Tab", [])
@_ttk_style_customizer
def create_ttk_style_bold_label():
# bold_font = tk_font.nametofont("TkDefaultFont").copy()
# bold_font.config(weight=tk_font.BOLD)
bold_font = ("Segoe UI", 9, "bold")
ttk.Style().configure("Bold.TLabel", font=bold_font)
ttk.Style().configure("WhiteLabelBold.TLabel", font=bold_font, background="White")
| 25.909091 | 86 | 0.716491 |
from __future__ import unicode_literals, division, print_function, absolute_import
try:
from tkinter import ttk
from tkinter import font as tk_font
except ImportError:
import ttk
import tkFont as tk_font
_ttk_styles = set()
_ttk_style_customizers = set()
def create_ttk_styles():
for func in _ttk_style_customizers:
func()
def _ttk_style_customizer(func):
_ttk_style_customizers.add(func)
def wrapper():
if func.__name__ not in _ttk_styles:
func()
_ttk_styles.add(func.__name__)
return wrapper
@_ttk_style_customizer
def create_ttk_style_white_frame():
ttk.Style().configure("WhiteFrame.TFrame", background="White")
ttk.Style().configure("WhiteLabel.TLabel", background="White")
ttk.Style().configure("WhiteCheckbutton.TCheckbutton", background="White")
@_ttk_style_customizer
def create_ttk_style_plain_notebook():
s = ttk.Style()
s.configure("Plain.TNotebook", borderwidth=0)
s.layout("Plain.TNotebook.Tab", [])
@_ttk_style_customizer
def create_ttk_style_bold_label():
bold_font = ("Segoe UI", 9, "bold")
ttk.Style().configure("Bold.TLabel", font=bold_font)
ttk.Style().configure("WhiteLabelBold.TLabel", font=bold_font, background="White")
| true | true |
1c4692b085f29de50a0f9046a066367f90b43390 | 3,641 | py | Python | learn/cnn/extractCNNFeaturesOnRegions.py | jccaicedo/localization-agent | d280acf355307b74e68dca9ec80ab293f0d18642 | [
"MIT"
] | 8 | 2016-11-20T19:43:45.000Z | 2020-12-09T04:58:05.000Z | learn/cnn/extractCNNFeaturesOnRegions.py | jccaicedo/localization-agent | d280acf355307b74e68dca9ec80ab293f0d18642 | [
"MIT"
] | 45 | 2015-05-04T20:41:05.000Z | 2017-07-17T12:04:13.000Z | learn/cnn/extractCNNFeaturesOnRegions.py | jccaicedo/localization-agent | d280acf355307b74e68dca9ec80ab293f0d18642 | [
"MIT"
] | 9 | 2016-11-20T19:43:46.000Z | 2020-09-01T21:01:54.000Z | # https://github.com/UCB-ICSI-Vision-Group/decaf-release/wiki/imagenet
import os, sys
from utils import tic, toc
import numpy as np
import time
##################################
# Parameter checking
#################################
if len(sys.argv) < 6:
print 'Use: extractCNNFeatures.py bboxes imgsDir modelFile pretrainedModel outputDir'
sys.exit()
bboxes = [ (x,x.split()) for x in open(sys.argv[1])]
imgsDir = sys.argv[2]
MODEL_FILE = sys.argv[3]
PRETRAINED = sys.argv[4]
outDir = sys.argv[5]
from caffe import wrapperv0
IMG_DIM = 256
CROP_SIZE = 227
CONTEXT_PAD = 16
batch = 50
meanImage = '/u/sciteam/caicedor/scratch/caffe/python/caffe/imagenet/ilsvrc_2012_mean.npy'
#meanImage = '/home/caicedo/workspace/sync/caffe/python/caffe/imagenet/ilsvrc_2012_mean.npy'
net = wrapperv0.ImageNetClassifier(MODEL_FILE, PRETRAINED, IMAGE_DIM=IMG_DIM, CROPPED_DIM=CROP_SIZE, MEAN_IMAGE=meanImage)
net.caffenet.set_mode_gpu()
net.caffenet.set_phase_test()
ImageNetMean = net._IMAGENET_MEAN.swapaxes(1, 2).swapaxes(0, 1).astype('float32')
##################################
# Functions
#################################
def processImg(info, filename, idx, batchSize, layers, output):
startTime = tic()
allFeat = {}
n = len(info)
for l in layers.keys():
allFeat[l] = emptyMatrix([n,layers[l]['dim']])
numBatches = (n + batchSize - 1) / batchSize
# Write the index file
[idx.write(b[4]) for b in info]
# Prepare boxes, make sure that extra rows are added to fill the last batch
boxes = [x[:-1] for x in info] + [ [0,0,0,0] for x in range(numBatches * batchSize - n) ]
# Initialize the image
net.caffenet.InitializeImage(filename, IMG_DIM, ImageNetMean, CROP_SIZE)
for k in range(numBatches):
s,f = k*batchSize,(k+1)*batchSize
e = batchSize if f <= n else n-s
# Forward this batch
net.caffenet.ForwardRegions(boxes[s:f],CONTEXT_PAD)
outputs = net.caffenet.blobs
f = n if f > n else f
# Collect outputs
for l in layers.keys():
allFeat[l][s:f,:] = outputs[layers[l]['idx']].data[0:e,:,:,:].reshape([e,layers[l]['dim']])
# Release image data
net.caffenet.ReleaseImageData()
# Save files for this image
for l in layers.keys():
saveMatrix(allFeat[l][0:n,:],output+'.'+l)
lap = toc('GPU is done with '+str(len(info))+' boxes in:',startTime)
def emptyMatrix(size):
data = np.zeros(size)
return data.astype(np.float32)
def saveMatrix(matrix,outFile):
outf = open(outFile,'w')
np.savez_compressed(outf,matrix)
outf.close()
##################################
# Organize boxes by source image
#################################
startTime = tic()
images = {}
for s,box in bboxes:
# Subtract 1 because RCNN proposals have 1-based indexes for Matlab
b = map(lambda x: int(x)-1,box[1:]) + [s]
#b = map(int,box[1:]) + [s]
try:
images[ box[0] ].append(b)
except:
images[ box[0] ] = [b]
lap = toc('Reading boxes file:',startTime)
#################################
# Extract Features
#################################
totalItems = len(bboxes)
del(bboxes)
#layers = {'fc7': {'dim':4096,'idx':'fc7'}}
layers = {'prob': {'dim':21, 'idx':'prob'}}
print 'Extracting features for',totalItems,'total images'
for name in images.keys():
# Check if files already exist
processed = 0
for l in layers.keys():
if os.path.isfile(outDir+'/'+name+'.'+l):
processed += 1
if processed == len(layers):
continue
# Get window proposals
indexFile = open(outDir+'/'+name+'.idx','w')
processImg(images[name], imgsDir+'/'+name+'.jpg', indexFile, batch, layers, outDir+'/'+name)
indexFile.close()
toc('Total processing time:',startTime)
| 30.855932 | 122 | 0.632519 |
import os, sys
from utils import tic, toc
import numpy as np
import time
| false | true |
1c4692d5cb27e6291a7c4ae6d099c67074bc40ee | 5,735 | py | Python | src/web-application/app.py | hitesh009911/thrain | 2535cddb8908772cbac3ba9fed194623aa9334d6 | [
"MIT"
] | null | null | null | src/web-application/app.py | hitesh009911/thrain | 2535cddb8908772cbac3ba9fed194623aa9334d6 | [
"MIT"
] | null | null | null | src/web-application/app.py | hitesh009911/thrain | 2535cddb8908772cbac3ba9fed194623aa9334d6 | [
"MIT"
] | null | null | null | import os
import os.path
from flask import Flask, request, redirect, url_for, render_template, session, send_from_directory, send_file
from werkzeug.utils import secure_filename
import DH
import pickle
import random
UPLOAD_FOLDER = './media/text-files/'
UPLOAD_KEY = './media/public-keys/'
ALLOWED_EXTENSIONS = set(['txt'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
'''
-----------------------------------------------------------
PAGE REDIRECTS
-----------------------------------------------------------
'''
def post_upload_redirect():
return render_template('post-upload.html')
@app.route('/register')
def call_page_register_user():
return render_template('register.html')
@app.route('/home')
def back_home():
return render_template('index.html')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/upload-file')
def call_page_upload():
return render_template('upload.html')
'''
-----------------------------------------------------------
DOWNLOAD KEY-FILE
-----------------------------------------------------------
'''
@app.route('/public-key-directory/retrieve/key/<username>')
def download_public_key(username):
for root,dirs,files in os.walk('./media/public-keys/'):
for file in files:
list = file.split('-')
if list[0] == username:
filename = UPLOAD_KEY+file
return send_file(filename, attachment_filename='publicKey.pem',as_attachment=True)
@app.route('/file-directory/retrieve/file/<filename>')
def download_file(filename):
filepath = UPLOAD_FOLDER+filename
if(os.path.isfile(filepath)):
return send_file(filepath, attachment_filename='fileMessage-thrainSecurity.txt',as_attachment=True)
else:
return render_template('file-list.html',msg='An issue encountered, our team is working on that')
'''
-----------------------------------------------------------
BUILD - DISPLAY FILE - KEY DIRECTORY
-----------------------------------------------------------
'''
# Build public key directory
@app.route('/public-key-directory/')
def downloads_pk():
username = []
if(os.path.isfile("./media/database/database_1.pickle")):
pickleObj = open("./media/database/database_1.pickle","rb")
username = pickle.load(pickleObj)
pickleObj.close()
if len(username) == 0:
return render_template('public-key-list.html',msg='Aww snap! No public key found in the database')
else:
return render_template('public-key-list.html',msg='',itr = 0, length = len(username),directory=username)
# Build file directory
@app.route('/file-directory/')
def download_f():
for root,dirs,files in os.walk(UPLOAD_FOLDER):
if(len(files) == 0):
return render_template('file-list.html',msg='Aww snap! No file found in directory')
else:
return render_template('file-list.html',msg='',itr=0,length=len(files),list=files)
'''
-----------------------------------------------------------
UPLOAD ENCRYPTED FILE
-----------------------------------------------------------
'''
@app.route('/data', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
# if user does not select file, browser also
# submit a empty part without filename
if file.filename == '':
flash('No selected file')
return 'NO FILE SELECTED'
if file:
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], file.filename))
return post_upload_redirect()
return 'Invalid File Format !'
'''
-----------------------------------------------------------
REGISTER UNIQUE USERNAME AND GENERATE PUBLIC KEY WITH FILE
-----------------------------------------------------------
'''
@app.route('/register-new-user', methods = ['GET', 'POST'])
def register_user():
files = []
privatekeylist = []
usernamelist = []
# Import pickle file to maintain uniqueness of the keys
if(os.path.isfile("./media/database/database.pickle")):
pickleObj = open("./media/database/database.pickle","rb")
privatekeylist = pickle.load(pickleObj)
pickleObj.close()
if(os.path.isfile("./media/database/database_1.pickle")):
pickleObj = open("./media/database/database_1.pickle","rb")
usernamelist = pickle.load(pickleObj)
pickleObj.close()
# Declare a new list which consists all usernames
if request.form['username'] in usernamelist:
return render_template('register.html', name='Username already exists')
username = request.form['username']
firstname = request.form['first-name']
secondname = request.form['last-name']
pin = int(random.randint(1,128))
pin = pin % 64
#Generating a unique private key
privatekey = DH.generate_private_key(pin)
while privatekey in privatekeylist:
privatekey = DH.generate_private_key(pin)
privatekeylist.append(str(privatekey))
usernamelist.append(username)
#Save/update pickle
pickleObj = open("./media/database/database.pickle","wb")
pickle.dump(privatekeylist,pickleObj)
pickleObj.close()
pickleObj = open("./media/database/database_1.pickle","wb")
pickle.dump(usernamelist,pickleObj)
pickleObj.close()
#Updating a new public key for a new user
filename = UPLOAD_KEY+username+'-'+secondname.upper()+firstname.lower()+'-PublicKey.pem'
# Generate public key and save it in the file generated
publickey = DH.generate_public_key(privatekey)
fileObject = open(filename,"w")
fileObject.write(str(publickey))
return render_template('key-display.html',privatekey=str(privatekey))
if __name__ == '__main__':
app.run(host="0.0.0.0", port=80)
#app.run(debug=True)
| 33.735294 | 109 | 0.6551 | import os
import os.path
from flask import Flask, request, redirect, url_for, render_template, session, send_from_directory, send_file
from werkzeug.utils import secure_filename
import DH
import pickle
import random
UPLOAD_FOLDER = './media/text-files/'
UPLOAD_KEY = './media/public-keys/'
ALLOWED_EXTENSIONS = set(['txt'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def post_upload_redirect():
return render_template('post-upload.html')
@app.route('/register')
def call_page_register_user():
return render_template('register.html')
@app.route('/home')
def back_home():
return render_template('index.html')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/upload-file')
def call_page_upload():
return render_template('upload.html')
@app.route('/public-key-directory/retrieve/key/<username>')
def download_public_key(username):
for root,dirs,files in os.walk('./media/public-keys/'):
for file in files:
list = file.split('-')
if list[0] == username:
filename = UPLOAD_KEY+file
return send_file(filename, attachment_filename='publicKey.pem',as_attachment=True)
@app.route('/file-directory/retrieve/file/<filename>')
def download_file(filename):
filepath = UPLOAD_FOLDER+filename
if(os.path.isfile(filepath)):
return send_file(filepath, attachment_filename='fileMessage-thrainSecurity.txt',as_attachment=True)
else:
return render_template('file-list.html',msg='An issue encountered, our team is working on that')
@app.route('/public-key-directory/')
def downloads_pk():
username = []
if(os.path.isfile("./media/database/database_1.pickle")):
pickleObj = open("./media/database/database_1.pickle","rb")
username = pickle.load(pickleObj)
pickleObj.close()
if len(username) == 0:
return render_template('public-key-list.html',msg='Aww snap! No public key found in the database')
else:
return render_template('public-key-list.html',msg='',itr = 0, length = len(username),directory=username)
@app.route('/file-directory/')
def download_f():
for root,dirs,files in os.walk(UPLOAD_FOLDER):
if(len(files) == 0):
return render_template('file-list.html',msg='Aww snap! No file found in directory')
else:
return render_template('file-list.html',msg='',itr=0,length=len(files),list=files)
@app.route('/data', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
flash('No selected file')
return 'NO FILE SELECTED'
if file:
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], file.filename))
return post_upload_redirect()
return 'Invalid File Format !'
@app.route('/register-new-user', methods = ['GET', 'POST'])
def register_user():
files = []
privatekeylist = []
usernamelist = []
if(os.path.isfile("./media/database/database.pickle")):
pickleObj = open("./media/database/database.pickle","rb")
privatekeylist = pickle.load(pickleObj)
pickleObj.close()
if(os.path.isfile("./media/database/database_1.pickle")):
pickleObj = open("./media/database/database_1.pickle","rb")
usernamelist = pickle.load(pickleObj)
pickleObj.close()
if request.form['username'] in usernamelist:
return render_template('register.html', name='Username already exists')
username = request.form['username']
firstname = request.form['first-name']
secondname = request.form['last-name']
pin = int(random.randint(1,128))
pin = pin % 64
privatekey = DH.generate_private_key(pin)
while privatekey in privatekeylist:
privatekey = DH.generate_private_key(pin)
privatekeylist.append(str(privatekey))
usernamelist.append(username)
pickleObj = open("./media/database/database.pickle","wb")
pickle.dump(privatekeylist,pickleObj)
pickleObj.close()
pickleObj = open("./media/database/database_1.pickle","wb")
pickle.dump(usernamelist,pickleObj)
pickleObj.close()
filename = UPLOAD_KEY+username+'-'+secondname.upper()+firstname.lower()+'-PublicKey.pem'
publickey = DH.generate_public_key(privatekey)
fileObject = open(filename,"w")
fileObject.write(str(publickey))
return render_template('key-display.html',privatekey=str(privatekey))
if __name__ == '__main__':
app.run(host="0.0.0.0", port=80)
| true | true |
1c4694493a461d612c26de35ae341c0d3a012bea | 4,594 | py | Python | plasmapy/utils/roman/tests/test_roman.py | KhalilBryant/PlasmaPy | 05f7cb60348c7048fb3b8fbaf25985f2fba47fb7 | [
"MIT",
"BSD-2-Clause-Patent",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1 | 2020-02-14T16:35:02.000Z | 2020-02-14T16:35:02.000Z | plasmapy/utils/roman/tests/test_roman.py | KhalilBryant/PlasmaPy | 05f7cb60348c7048fb3b8fbaf25985f2fba47fb7 | [
"MIT",
"BSD-2-Clause-Patent",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | plasmapy/utils/roman/tests/test_roman.py | KhalilBryant/PlasmaPy | 05f7cb60348c7048fb3b8fbaf25985f2fba47fb7 | [
"MIT",
"BSD-2-Clause-Patent",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | import pytest
import numpy as np
import plasmapy.utils.roman as roman
from plasmapy.utils.pytest_helpers import run_test
ints_and_roman_numerals = [
(1, "I"),
(2, "II"),
(3, "III"),
(4, "IV"),
(5, "V"),
(6, "VI"),
(7, "VII"),
(8, "VIII"),
(9, "IX"),
(10, "X"),
(11, "XI"),
(12, "XII"),
(13, "XIII"),
(14, "XIV"),
(15, "XV"),
(16, "XVI"),
(17, "XVII"),
(18, "XVIII"),
(19, "XIX"),
(20, "XX"),
(21, "XXI"),
(22, "XXII"),
(23, "XXIII"),
(24, "XXIV"),
(25, "XXV"),
(26, "XXVI"),
(27, "XXVII"),
(28, "XXVIII"),
(29, "XXIX"),
(30, "XXX"),
(31, "XXXI"),
(32, "XXXII"),
(33, "XXXIII"),
(34, "XXXIV"),
(35, "XXXV"),
(36, "XXXVI"),
(37, "XXXVII"),
(38, "XXXVIII"),
(39, "XXXIX"),
(40, "XL"),
(41, "XLI"),
(42, "XLII"),
(43, "XLIII"),
(44, "XLIV"),
(45, "XLV"),
(46, "XLVI"),
(47, "XLVII"),
(48, "XLVIII"),
(49, "XLIX"),
(50, "L"),
(51, "LI"),
(52, "LII"),
(53, "LIII"),
(54, "LIV"),
(55, "LV"),
(56, "LVI"),
(57, "LVII"),
(58, "LVIII"),
(59, "LIX"),
(60, "LX"),
(61, "LXI"),
(62, "LXII"),
(63, "LXIII"),
(64, "LXIV"),
(65, "LXV"),
(66, "LXVI"),
(67, "LXVII"),
(68, "LXVIII"),
(69, "LXIX"),
(70, "LXX"),
(71, "LXXI"),
(72, "LXXII"),
(73, "LXXIII"),
(74, "LXXIV"),
(75, "LXXV"),
(76, "LXXVI"),
(77, "LXXVII"),
(78, "LXXVIII"),
(79, "LXXIX"),
(80, "LXXX"),
(81, "LXXXI"),
(82, "LXXXII"),
(83, "LXXXIII"),
(84, "LXXXIV"),
(85, "LXXXV"),
(86, "LXXXVI"),
(87, "LXXXVII"),
(88, "LXXXVIII"),
(89, "LXXXIX"),
(90, "XC"),
(91, "XCI"),
(92, "XCII"),
(93, "XCIII"),
(94, "XCIV"),
(95, "XCV"),
(96, "XCVI"),
(97, "XCVII"),
(98, "XCVIII"),
(99, "XCIX"),
(100, "C"),
(101, "CI"),
(102, "CII"),
(103, "CIII"),
(104, "CIV"),
(105, "CV"),
(106, "CVI"),
(107, "CVII"),
(108, "CVIII"),
(109, "CIX"),
(110, "CX"),
(111, "CXI"),
(112, "CXII"),
(113, "CXIII"),
(114, "CXIV"),
(115, "CXV"),
(116, "CXVI"),
(117, "CXVII"),
(118, "CXVIII"),
(119, "CXIX"),
(120, "CXX"),
(121, "CXXI"),
(122, "CXXII"),
(188, "CLXXXVIII"),
(189, "CLXXXIX"),
(198, "CXCVIII"),
(199, "CXCIX"),
(200, "CC"),
(np.int(9), "IX"),
(np.int16(10), "X"),
(np.int32(11), "XI"),
(np.int64(14), "XIV"),
]
toRoman_exceptions_table = [
("X", TypeError),
(-1, roman.OutOfRangeError),
(0, roman.OutOfRangeError),
(5000, roman.OutOfRangeError),
]
fromRoman_exceptions_table = [
("asdfasd", roman.InvalidRomanNumeralError),
(1, TypeError),
("xi", roman.InvalidRomanNumeralError),
]
@pytest.mark.parametrize("integer, roman_numeral", ints_and_roman_numerals)
def test_to_roman(integer, roman_numeral):
"""
Test that `~plasmapy.utils.roman.to_roman` correctly converts
integers to Roman numerals.
"""
run_test(func=roman.to_roman, args=integer, expected_outcome=roman_numeral)
@pytest.mark.parametrize("integer, roman_numeral", ints_and_roman_numerals)
def test_from_roman(integer, roman_numeral):
"""
Test that `~plasmapy.utils.roman.from_roman` correctly converts
Roman numerals to integers.
"""
run_test(func=roman.from_roman, args=roman_numeral, expected_outcome=int(integer))
@pytest.mark.parametrize("input, expected_exception", toRoman_exceptions_table)
def test_to_roman_exceptions(input, expected_exception):
"""
Test that `~plasmapy.utils.roman.to_roman` raises the correct
exceptions when necessary.
"""
run_test(func=roman.to_roman, args=input, expected_outcome=expected_exception)
@pytest.mark.parametrize("input, expected_exception", fromRoman_exceptions_table)
def test_from_roman_exceptions(input, expected_exception):
"""
Test that `~plasmapy.utils.roman.from_roman` raises the correct
exceptions when necessary.
"""
run_test(func=roman.from_roman, args=input, expected_outcome=expected_exception)
test_is_roman_numeral_table = [
("I", True),
("i", False),
("CLXXXVIII", True),
(1, TypeError),
("khjfda", False),
("VIIII", False),
("IXX", False),
(("I", "II"), TypeError),
]
@pytest.mark.parametrize("input, expected", test_is_roman_numeral_table)
def test_is_roman_numeral(input, expected):
run_test(func=roman.is_roman_numeral, args=input, expected_outcome=expected)
| 22.300971 | 86 | 0.529822 | import pytest
import numpy as np
import plasmapy.utils.roman as roman
from plasmapy.utils.pytest_helpers import run_test
ints_and_roman_numerals = [
(1, "I"),
(2, "II"),
(3, "III"),
(4, "IV"),
(5, "V"),
(6, "VI"),
(7, "VII"),
(8, "VIII"),
(9, "IX"),
(10, "X"),
(11, "XI"),
(12, "XII"),
(13, "XIII"),
(14, "XIV"),
(15, "XV"),
(16, "XVI"),
(17, "XVII"),
(18, "XVIII"),
(19, "XIX"),
(20, "XX"),
(21, "XXI"),
(22, "XXII"),
(23, "XXIII"),
(24, "XXIV"),
(25, "XXV"),
(26, "XXVI"),
(27, "XXVII"),
(28, "XXVIII"),
(29, "XXIX"),
(30, "XXX"),
(31, "XXXI"),
(32, "XXXII"),
(33, "XXXIII"),
(34, "XXXIV"),
(35, "XXXV"),
(36, "XXXVI"),
(37, "XXXVII"),
(38, "XXXVIII"),
(39, "XXXIX"),
(40, "XL"),
(41, "XLI"),
(42, "XLII"),
(43, "XLIII"),
(44, "XLIV"),
(45, "XLV"),
(46, "XLVI"),
(47, "XLVII"),
(48, "XLVIII"),
(49, "XLIX"),
(50, "L"),
(51, "LI"),
(52, "LII"),
(53, "LIII"),
(54, "LIV"),
(55, "LV"),
(56, "LVI"),
(57, "LVII"),
(58, "LVIII"),
(59, "LIX"),
(60, "LX"),
(61, "LXI"),
(62, "LXII"),
(63, "LXIII"),
(64, "LXIV"),
(65, "LXV"),
(66, "LXVI"),
(67, "LXVII"),
(68, "LXVIII"),
(69, "LXIX"),
(70, "LXX"),
(71, "LXXI"),
(72, "LXXII"),
(73, "LXXIII"),
(74, "LXXIV"),
(75, "LXXV"),
(76, "LXXVI"),
(77, "LXXVII"),
(78, "LXXVIII"),
(79, "LXXIX"),
(80, "LXXX"),
(81, "LXXXI"),
(82, "LXXXII"),
(83, "LXXXIII"),
(84, "LXXXIV"),
(85, "LXXXV"),
(86, "LXXXVI"),
(87, "LXXXVII"),
(88, "LXXXVIII"),
(89, "LXXXIX"),
(90, "XC"),
(91, "XCI"),
(92, "XCII"),
(93, "XCIII"),
(94, "XCIV"),
(95, "XCV"),
(96, "XCVI"),
(97, "XCVII"),
(98, "XCVIII"),
(99, "XCIX"),
(100, "C"),
(101, "CI"),
(102, "CII"),
(103, "CIII"),
(104, "CIV"),
(105, "CV"),
(106, "CVI"),
(107, "CVII"),
(108, "CVIII"),
(109, "CIX"),
(110, "CX"),
(111, "CXI"),
(112, "CXII"),
(113, "CXIII"),
(114, "CXIV"),
(115, "CXV"),
(116, "CXVI"),
(117, "CXVII"),
(118, "CXVIII"),
(119, "CXIX"),
(120, "CXX"),
(121, "CXXI"),
(122, "CXXII"),
(188, "CLXXXVIII"),
(189, "CLXXXIX"),
(198, "CXCVIII"),
(199, "CXCIX"),
(200, "CC"),
(np.int(9), "IX"),
(np.int16(10), "X"),
(np.int32(11), "XI"),
(np.int64(14), "XIV"),
]
toRoman_exceptions_table = [
("X", TypeError),
(-1, roman.OutOfRangeError),
(0, roman.OutOfRangeError),
(5000, roman.OutOfRangeError),
]
fromRoman_exceptions_table = [
("asdfasd", roman.InvalidRomanNumeralError),
(1, TypeError),
("xi", roman.InvalidRomanNumeralError),
]
@pytest.mark.parametrize("integer, roman_numeral", ints_and_roman_numerals)
def test_to_roman(integer, roman_numeral):
run_test(func=roman.to_roman, args=integer, expected_outcome=roman_numeral)
@pytest.mark.parametrize("integer, roman_numeral", ints_and_roman_numerals)
def test_from_roman(integer, roman_numeral):
run_test(func=roman.from_roman, args=roman_numeral, expected_outcome=int(integer))
@pytest.mark.parametrize("input, expected_exception", toRoman_exceptions_table)
def test_to_roman_exceptions(input, expected_exception):
run_test(func=roman.to_roman, args=input, expected_outcome=expected_exception)
@pytest.mark.parametrize("input, expected_exception", fromRoman_exceptions_table)
def test_from_roman_exceptions(input, expected_exception):
run_test(func=roman.from_roman, args=input, expected_outcome=expected_exception)
test_is_roman_numeral_table = [
("I", True),
("i", False),
("CLXXXVIII", True),
(1, TypeError),
("khjfda", False),
("VIIII", False),
("IXX", False),
(("I", "II"), TypeError),
]
@pytest.mark.parametrize("input, expected", test_is_roman_numeral_table)
def test_is_roman_numeral(input, expected):
run_test(func=roman.is_roman_numeral, args=input, expected_outcome=expected)
| true | true |
1c46946534565ded99af86b7fdb56b8971f0ef04 | 3,778 | py | Python | apps/Todo/tests/TodoModelTests.py | Eduardo-RFarias/DjangoReactBackend | b8183ea4b24be5c0aa557ffbc79fc23e0777b8ad | [
"MIT"
] | null | null | null | apps/Todo/tests/TodoModelTests.py | Eduardo-RFarias/DjangoReactBackend | b8183ea4b24be5c0aa557ffbc79fc23e0777b8ad | [
"MIT"
] | null | null | null | apps/Todo/tests/TodoModelTests.py | Eduardo-RFarias/DjangoReactBackend | b8183ea4b24be5c0aa557ffbc79fc23e0777b8ad | [
"MIT"
] | null | null | null | from apps.User.models import User
from django.test import RequestFactory
from django.utils import timezone
from rest_framework import status
from ..models import Todo
from ..serializers import TodoSerializer
from .BaseAuthenticatedTest import BaseAuthenticatedTest
name = "Test todo"
description = "This is an auto generated Todo"
class TodoModelTests(BaseAuthenticatedTest):
def setUp(self) -> None:
loginResponse = self.login_and_set("03699132137", "123456")
self.userJson = loginResponse.get("user")
self.user = User.objects.get(cpf=self.userJson.get("cpf"))
def test_todo_model(self):
todo = Todo.objects.create(name=name, description=description)
self.assertGreater(todo.id, 0)
self.assertEqual(todo.name, name)
self.assertEqual(todo.description, description)
self.assertFalse(todo.done)
self.assertLessEqual(todo.created_at, timezone.now())
self.assertIsNone(todo.owner)
todo.delete()
def test_todo_list(self):
todo = Todo.objects.create(name=name, description=description, owner=self.user)
request = RequestFactory().post("/")
todoJson = TodoSerializer(todo, context={"request": request}).data
response = self.client.get("/api/todo/")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data[0].get("name"),
todoJson.get("name"),
)
self.assertEqual(
response.data[0].get("description"),
todoJson.get("description"),
)
self.assertEqual(
response.data[0].get("owner"),
todoJson.get("owner"),
)
def test_todo_detail(self):
todo = Todo.objects.create(name=name, description=description, owner=self.user)
request = RequestFactory().post("/")
todoJson = TodoSerializer(todo, context={"request": request}).data
response = self.client.get(f"/api/todo/{todo.pk}/")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data.get("name"),
todoJson.get("name"),
)
self.assertEqual(
response.data.get("description"),
todoJson.get("description"),
)
self.assertEqual(
response.data.get("owner"),
todoJson.get("owner"),
)
def test_todo_create(self):
response = self.client.post(
"/api/todo/",
{"name": name, "description": description},
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data.get("name"), name)
self.assertEqual(response.data.get("description"), description)
self.assertEqual(response.data.get("owner"), self.userJson.get("url"))
def test_todo_update(self):
todo = Todo.objects.create(name=name, description=description, owner=self.user)
request = RequestFactory().post("/")
todoJson = TodoSerializer(todo, context={"request": request}).data
new_description = "Updated description for testing"
todoJson["description"] = new_description
response = self.client.put(f"/api/todo/{todo.pk}/", todoJson)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get("description"), new_description)
def test_todo_delete(self):
todo = Todo.objects.create(name=name, description=description, owner=self.user)
response = self.client.delete(f"/api/todo/{todo.pk}/")
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
with self.assertRaises(Todo.DoesNotExist):
Todo.objects.get(pk=1)
| 34.345455 | 87 | 0.647697 | from apps.User.models import User
from django.test import RequestFactory
from django.utils import timezone
from rest_framework import status
from ..models import Todo
from ..serializers import TodoSerializer
from .BaseAuthenticatedTest import BaseAuthenticatedTest
name = "Test todo"
description = "This is an auto generated Todo"
class TodoModelTests(BaseAuthenticatedTest):
def setUp(self) -> None:
loginResponse = self.login_and_set("03699132137", "123456")
self.userJson = loginResponse.get("user")
self.user = User.objects.get(cpf=self.userJson.get("cpf"))
def test_todo_model(self):
todo = Todo.objects.create(name=name, description=description)
self.assertGreater(todo.id, 0)
self.assertEqual(todo.name, name)
self.assertEqual(todo.description, description)
self.assertFalse(todo.done)
self.assertLessEqual(todo.created_at, timezone.now())
self.assertIsNone(todo.owner)
todo.delete()
def test_todo_list(self):
todo = Todo.objects.create(name=name, description=description, owner=self.user)
request = RequestFactory().post("/")
todoJson = TodoSerializer(todo, context={"request": request}).data
response = self.client.get("/api/todo/")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data[0].get("name"),
todoJson.get("name"),
)
self.assertEqual(
response.data[0].get("description"),
todoJson.get("description"),
)
self.assertEqual(
response.data[0].get("owner"),
todoJson.get("owner"),
)
def test_todo_detail(self):
todo = Todo.objects.create(name=name, description=description, owner=self.user)
request = RequestFactory().post("/")
todoJson = TodoSerializer(todo, context={"request": request}).data
response = self.client.get(f"/api/todo/{todo.pk}/")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data.get("name"),
todoJson.get("name"),
)
self.assertEqual(
response.data.get("description"),
todoJson.get("description"),
)
self.assertEqual(
response.data.get("owner"),
todoJson.get("owner"),
)
def test_todo_create(self):
response = self.client.post(
"/api/todo/",
{"name": name, "description": description},
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data.get("name"), name)
self.assertEqual(response.data.get("description"), description)
self.assertEqual(response.data.get("owner"), self.userJson.get("url"))
def test_todo_update(self):
todo = Todo.objects.create(name=name, description=description, owner=self.user)
request = RequestFactory().post("/")
todoJson = TodoSerializer(todo, context={"request": request}).data
new_description = "Updated description for testing"
todoJson["description"] = new_description
response = self.client.put(f"/api/todo/{todo.pk}/", todoJson)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get("description"), new_description)
def test_todo_delete(self):
todo = Todo.objects.create(name=name, description=description, owner=self.user)
response = self.client.delete(f"/api/todo/{todo.pk}/")
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
with self.assertRaises(Todo.DoesNotExist):
Todo.objects.get(pk=1)
| true | true |
1c469501b5b017b977afe49a3917b281287e2777 | 1,611 | py | Python | src/web/monitorforms/heroku-dyno-status-single/__init__.py | anderson-attilio/runbook | 7b68622f75ef09f654046f0394540025f3ee7445 | [
"Apache-2.0"
] | 155 | 2015-07-15T14:06:06.000Z | 2021-03-31T01:41:44.000Z | src/web/monitorforms/heroku-dyno-status-single/__init__.py | anderson-attilio/runbook | 7b68622f75ef09f654046f0394540025f3ee7445 | [
"Apache-2.0"
] | 78 | 2015-01-01T05:49:20.000Z | 2015-07-12T01:48:44.000Z | src/web/monitorforms/heroku-dyno-status-single/__init__.py | Runbook/runbook | 7b68622f75ef09f654046f0394540025f3ee7445 | [
"Apache-2.0"
] | 36 | 2015-07-20T22:42:23.000Z | 2021-12-05T10:00:44.000Z | ######################################################################
# Cloud Routes Web Application
# -------------------------------------------------------------------
# Health Check - Forms Class
######################################################################
from wtforms import TextField
from wtforms.validators import DataRequired
from ..datacenter import DatacenterCheckForm
class CheckForm(DatacenterCheckForm):
''' Creates a wtforms form object for monitors '''
title = "Heroku: Dyno Status"
description = """
This monitor will query the status of a specified Dyno within the specified Application. If the Dyno is not in an "up" or "idle" state this monitor will return False. If the Dyno is in a healthy status this monitor will return True.
"""
placeholders = DatacenterCheckForm.placeholders
placeholders.update({
'appname' : 'Application Name',
'dynoname' : 'web.1',
})
apikey = TextField(
"API Key",
description=DatacenterCheckForm.descriptions['apikey'],
validators=[DataRequired(
message='API Key is a required field')])
appname = TextField(
"Application Name",
description=DatacenterCheckForm.descriptions['heroku']['appname'],
validators=[DataRequired(
message='Application Name is a required field')])
dynoname = TextField(
"Dyno Name",
description=DatacenterCheckForm.descriptions['heroku']['dynoname'],
validators=[DataRequired(
message='Dyno Name is a required field')])
if __name__ == '__main__':
pass
| 38.357143 | 236 | 0.590317 | true | true | |
1c469515024b2146bc7f69544490c4e70ded4e10 | 15,622 | py | Python | tests/admin_checks/tests.py | PirosB3/django | 9b729ddd8f2040722971ccfb3b12f7d8162633d1 | [
"BSD-3-Clause"
] | 1 | 2015-05-16T13:13:06.000Z | 2015-05-16T13:13:06.000Z | tests/admin_checks/tests.py | PirosB3/django | 9b729ddd8f2040722971ccfb3b12f7d8162633d1 | [
"BSD-3-Clause"
] | null | null | null | tests/admin_checks/tests.py | PirosB3/django | 9b729ddd8f2040722971ccfb3b12f7d8162633d1 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals
import warnings
from django import forms
from django.contrib import admin
from django.core import checks
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from .models import Song, Book, Album, TwoAlbumFKAndAnE, City, State
class SongForm(forms.ModelForm):
pass
class ValidFields(admin.ModelAdmin):
form = SongForm
fields = ['title']
class ValidFormFieldsets(admin.ModelAdmin):
def get_form(self, request, obj=None, **kwargs):
class ExtraFieldForm(SongForm):
name = forms.CharField(max_length=50)
return ExtraFieldForm
fieldsets = (
(None, {
'fields': ('name',),
}),
)
class SystemChecksTestCase(TestCase):
def test_checks_are_performed(self):
class MyAdmin(admin.ModelAdmin):
@classmethod
def check(self, model, **kwargs):
return ['error!']
admin.site.register(Song, MyAdmin)
try:
errors = checks.run_checks()
expected = ['error!']
self.assertEqual(errors, expected)
finally:
admin.site.unregister(Song)
def test_readonly_and_editable(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ["original_release"]
list_display = ["pk", "original_release"]
list_editable = ["original_release"]
fieldsets = [
(None, {
"fields": ["title", "original_release"],
}),
]
errors = SongAdmin.check(model=Song)
expected = [
checks.Error(
("The value of 'list_editable[0]' refers to 'original_release', "
"which is not editable through the admin."),
hint=None,
obj=SongAdmin,
id='admin.E125',
)
]
self.assertEqual(errors, expected)
def test_editable(self):
class SongAdmin(admin.ModelAdmin):
list_display = ["pk", "title"]
list_editable = ["title"]
fieldsets = [
(None, {
"fields": ["title", "original_release"],
}),
]
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_custom_modelforms_with_fields_fieldsets(self):
"""
# Regression test for #8027: custom ModelForms with fields/fieldsets
"""
errors = ValidFields.check(model=Song)
self.assertEqual(errors, [])
def test_custom_get_form_with_fieldsets(self):
"""
Ensure that the fieldsets checks are skipped when the ModelAdmin.get_form() method
is overridden.
Refs #19445.
"""
errors = ValidFormFieldsets.check(model=Song)
self.assertEqual(errors, [])
def test_exclude_values(self):
"""
Tests for basic system checks of 'exclude' option values (#12689)
"""
class ExcludedFields1(admin.ModelAdmin):
exclude = 'foo'
errors = ExcludedFields1.check(model=Book)
expected = [
checks.Error(
"The value of 'exclude' must be a list or tuple.",
hint=None,
obj=ExcludedFields1,
id='admin.E014',
)
]
self.assertEqual(errors, expected)
def test_exclude_duplicate_values(self):
class ExcludedFields2(admin.ModelAdmin):
exclude = ('name', 'name')
errors = ExcludedFields2.check(model=Book)
expected = [
checks.Error(
"The value of 'exclude' contains duplicate field(s).",
hint=None,
obj=ExcludedFields2,
id='admin.E015',
)
]
self.assertEqual(errors, expected)
def test_exclude_in_inline(self):
class ExcludedFieldsInline(admin.TabularInline):
model = Song
exclude = 'foo'
class ExcludedFieldsAlbumAdmin(admin.ModelAdmin):
model = Album
inlines = [ExcludedFieldsInline]
errors = ExcludedFieldsAlbumAdmin.check(model=Album)
expected = [
checks.Error(
"The value of 'exclude' must be a list or tuple.",
hint=None,
obj=ExcludedFieldsInline,
id='admin.E014',
)
]
self.assertEqual(errors, expected)
def test_exclude_inline_model_admin(self):
"""
Regression test for #9932 - exclude in InlineModelAdmin should not
contain the ForeignKey field used in ModelAdmin.model
"""
class SongInline(admin.StackedInline):
model = Song
exclude = ['album']
class AlbumAdmin(admin.ModelAdmin):
model = Album
inlines = [SongInline]
errors = AlbumAdmin.check(model=Album)
expected = [
checks.Error(
("Cannot exclude the field 'album', because it is the foreign key "
"to the parent model 'admin_checks.Album'."),
hint=None,
obj=SongInline,
id='admin.E201',
)
]
self.assertEqual(errors, expected)
def test_app_label_in_admin_checks(self):
"""
Regression test for #15669 - Include app label in admin system check messages
"""
class RawIdNonexistingAdmin(admin.ModelAdmin):
raw_id_fields = ('nonexisting',)
errors = RawIdNonexistingAdmin.check(model=Album)
expected = [
checks.Error(
("The value of 'raw_id_fields[0]' refers to 'nonexisting', which is "
"not an attribute of 'admin_checks.Album'."),
hint=None,
obj=RawIdNonexistingAdmin,
id='admin.E002',
)
]
self.assertEqual(errors, expected)
def test_fk_exclusion(self):
"""
Regression test for #11709 - when testing for fk excluding (when exclude is
given) make sure fk_name is honored or things blow up when there is more
than one fk to the parent model.
"""
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
exclude = ("e",)
fk_name = "album1"
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin.check(model=Album)
self.assertEqual(errors, [])
def test_inline_self_check(self):
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin.check(model=Album)
expected = [
checks.Error(
"'admin_checks.TwoAlbumFKAndAnE' has more than one ForeignKey to 'admin_checks.Album'.",
hint=None,
obj=TwoAlbumFKAndAnEInline,
id='admin.E202',
)
]
self.assertEqual(errors, expected)
def test_inline_with_specified(self):
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
fk_name = "album1"
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin.check(model=Album)
self.assertEqual(errors, [])
def test_readonly(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("title",)
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_readonly_on_method(self):
def my_function(obj):
pass
class SongAdmin(admin.ModelAdmin):
readonly_fields = (my_function,)
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_readonly_on_modeladmin(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("readonly_method_on_modeladmin",)
def readonly_method_on_modeladmin(self, obj):
pass
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_readonly_method_on_model(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("readonly_method_on_model",)
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_nonexistant_field(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("title", "nonexistant")
errors = SongAdmin.check(model=Song)
expected = [
checks.Error(
("The value of 'readonly_fields[1]' is not a callable, an attribute "
"of 'SongAdmin', or an attribute of 'admin_checks.Song'."),
hint=None,
obj=SongAdmin,
id='admin.E035',
)
]
self.assertEqual(errors, expected)
def test_nonexistant_field_on_inline(self):
class CityInline(admin.TabularInline):
model = City
readonly_fields = ['i_dont_exist'] # Missing attribute
errors = CityInline.check(State)
expected = [
checks.Error(
("The value of 'readonly_fields[0]' is not a callable, an attribute "
"of 'CityInline', or an attribute of 'admin_checks.City'."),
hint=None,
obj=CityInline,
id='admin.E035',
)
]
self.assertEqual(errors, expected)
def test_extra(self):
class SongAdmin(admin.ModelAdmin):
def awesome_song(self, instance):
if instance.title == "Born to Run":
return "Best Ever!"
return "Status unknown."
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_readonly_lambda(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = (lambda obj: "test",)
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_graceful_m2m_fail(self):
"""
Regression test for #12203/#12237 - Fail more gracefully when a M2M field that
specifies the 'through' option is included in the 'fields' or the 'fieldsets'
ModelAdmin options.
"""
class BookAdmin(admin.ModelAdmin):
fields = ['authors']
errors = BookAdmin.check(model=Book)
expected = [
checks.Error(
("The value of 'fields' cannot include the ManyToManyField 'authors', "
"because that field manually specifies a relationship model."),
hint=None,
obj=BookAdmin,
id='admin.E013',
)
]
self.assertEqual(errors, expected)
def test_cannot_include_through(self):
class FieldsetBookAdmin(admin.ModelAdmin):
fieldsets = (
('Header 1', {'fields': ('name',)}),
('Header 2', {'fields': ('authors',)}),
)
errors = FieldsetBookAdmin.check(model=Book)
expected = [
checks.Error(
("The value of 'fieldsets[1][1][\"fields\"]' cannot include the ManyToManyField "
"'authors', because that field manually specifies a relationship model."),
hint=None,
obj=FieldsetBookAdmin,
id='admin.E013',
)
]
self.assertEqual(errors, expected)
def test_nested_fields(self):
class NestedFieldsAdmin(admin.ModelAdmin):
fields = ('price', ('name', 'subtitle'))
errors = NestedFieldsAdmin.check(model=Book)
self.assertEqual(errors, [])
def test_nested_fieldsets(self):
class NestedFieldsetAdmin(admin.ModelAdmin):
fieldsets = (
('Main', {'fields': ('price', ('name', 'subtitle'))}),
)
errors = NestedFieldsetAdmin.check(model=Book)
self.assertEqual(errors, [])
def test_explicit_through_override(self):
"""
Regression test for #12209 -- If the explicitly provided through model
is specified as a string, the admin should still be able use
Model.m2m_field.through
"""
class AuthorsInline(admin.TabularInline):
model = Book.authors.through
class BookAdmin(admin.ModelAdmin):
inlines = [AuthorsInline]
errors = BookAdmin.check(model=Book)
self.assertEqual(errors, [])
def test_non_model_fields(self):
"""
Regression for ensuring ModelAdmin.fields can contain non-model fields
that broke with r11737
"""
class SongForm(forms.ModelForm):
extra_data = forms.CharField()
class FieldsOnFormOnlyAdmin(admin.ModelAdmin):
form = SongForm
fields = ['title', 'extra_data']
errors = FieldsOnFormOnlyAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_non_model_first_field(self):
"""
Regression for ensuring ModelAdmin.field can handle first elem being a
non-model field (test fix for UnboundLocalError introduced with r16225).
"""
class SongForm(forms.ModelForm):
extra_data = forms.CharField()
class Meta:
model = Song
fields = '__all__'
class FieldsOnFormOnlyAdmin(admin.ModelAdmin):
form = SongForm
fields = ['extra_data', 'title']
errors = FieldsOnFormOnlyAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_validator_compatibility(self):
class MyValidator(object):
def validate(self, cls, model):
raise ImproperlyConfigured("error!")
class MyModelAdmin(admin.ModelAdmin):
validator_class = MyValidator
with warnings.catch_warnings(record=True):
warnings.filterwarnings('ignore', module='django.contrib.admin.options')
errors = MyModelAdmin.check(model=Song)
expected = [
checks.Error(
'error!',
hint=None,
obj=MyModelAdmin,
)
]
self.assertEqual(errors, expected)
def test_check_sublists_for_duplicates(self):
class MyModelAdmin(admin.ModelAdmin):
fields = ['state', ['state']]
errors = MyModelAdmin.check(model=Song)
expected = [
checks.Error(
"The value of 'fields' contains duplicate field(s).",
hint=None,
obj=MyModelAdmin,
id='admin.E006'
)
]
self.assertEqual(errors, expected)
def test_check_fieldset_sublists_for_duplicates(self):
class MyModelAdmin(admin.ModelAdmin):
fieldsets = [
(None, {
'fields': ['title', 'album', ('title', 'album')]
}),
]
errors = MyModelAdmin.check(model=Song)
expected = [
checks.Error(
"There are duplicate field(s) in 'fieldsets[0][1]'.",
hint=None,
obj=MyModelAdmin,
id='admin.E012'
)
]
self.assertEqual(errors, expected)
| 31.244 | 104 | 0.563692 | from __future__ import unicode_literals
import warnings
from django import forms
from django.contrib import admin
from django.core import checks
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from .models import Song, Book, Album, TwoAlbumFKAndAnE, City, State
class SongForm(forms.ModelForm):
pass
class ValidFields(admin.ModelAdmin):
form = SongForm
fields = ['title']
class ValidFormFieldsets(admin.ModelAdmin):
def get_form(self, request, obj=None, **kwargs):
class ExtraFieldForm(SongForm):
name = forms.CharField(max_length=50)
return ExtraFieldForm
fieldsets = (
(None, {
'fields': ('name',),
}),
)
class SystemChecksTestCase(TestCase):
def test_checks_are_performed(self):
class MyAdmin(admin.ModelAdmin):
@classmethod
def check(self, model, **kwargs):
return ['error!']
admin.site.register(Song, MyAdmin)
try:
errors = checks.run_checks()
expected = ['error!']
self.assertEqual(errors, expected)
finally:
admin.site.unregister(Song)
def test_readonly_and_editable(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ["original_release"]
list_display = ["pk", "original_release"]
list_editable = ["original_release"]
fieldsets = [
(None, {
"fields": ["title", "original_release"],
}),
]
errors = SongAdmin.check(model=Song)
expected = [
checks.Error(
("The value of 'list_editable[0]' refers to 'original_release', "
"which is not editable through the admin."),
hint=None,
obj=SongAdmin,
id='admin.E125',
)
]
self.assertEqual(errors, expected)
def test_editable(self):
class SongAdmin(admin.ModelAdmin):
list_display = ["pk", "title"]
list_editable = ["title"]
fieldsets = [
(None, {
"fields": ["title", "original_release"],
}),
]
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_custom_modelforms_with_fields_fieldsets(self):
errors = ValidFields.check(model=Song)
self.assertEqual(errors, [])
def test_custom_get_form_with_fieldsets(self):
errors = ValidFormFieldsets.check(model=Song)
self.assertEqual(errors, [])
def test_exclude_values(self):
class ExcludedFields1(admin.ModelAdmin):
exclude = 'foo'
errors = ExcludedFields1.check(model=Book)
expected = [
checks.Error(
"The value of 'exclude' must be a list or tuple.",
hint=None,
obj=ExcludedFields1,
id='admin.E014',
)
]
self.assertEqual(errors, expected)
def test_exclude_duplicate_values(self):
class ExcludedFields2(admin.ModelAdmin):
exclude = ('name', 'name')
errors = ExcludedFields2.check(model=Book)
expected = [
checks.Error(
"The value of 'exclude' contains duplicate field(s).",
hint=None,
obj=ExcludedFields2,
id='admin.E015',
)
]
self.assertEqual(errors, expected)
def test_exclude_in_inline(self):
class ExcludedFieldsInline(admin.TabularInline):
model = Song
exclude = 'foo'
class ExcludedFieldsAlbumAdmin(admin.ModelAdmin):
model = Album
inlines = [ExcludedFieldsInline]
errors = ExcludedFieldsAlbumAdmin.check(model=Album)
expected = [
checks.Error(
"The value of 'exclude' must be a list or tuple.",
hint=None,
obj=ExcludedFieldsInline,
id='admin.E014',
)
]
self.assertEqual(errors, expected)
def test_exclude_inline_model_admin(self):
class SongInline(admin.StackedInline):
model = Song
exclude = ['album']
class AlbumAdmin(admin.ModelAdmin):
model = Album
inlines = [SongInline]
errors = AlbumAdmin.check(model=Album)
expected = [
checks.Error(
("Cannot exclude the field 'album', because it is the foreign key "
"to the parent model 'admin_checks.Album'."),
hint=None,
obj=SongInline,
id='admin.E201',
)
]
self.assertEqual(errors, expected)
def test_app_label_in_admin_checks(self):
class RawIdNonexistingAdmin(admin.ModelAdmin):
raw_id_fields = ('nonexisting',)
errors = RawIdNonexistingAdmin.check(model=Album)
expected = [
checks.Error(
("The value of 'raw_id_fields[0]' refers to 'nonexisting', which is "
"not an attribute of 'admin_checks.Album'."),
hint=None,
obj=RawIdNonexistingAdmin,
id='admin.E002',
)
]
self.assertEqual(errors, expected)
def test_fk_exclusion(self):
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
exclude = ("e",)
fk_name = "album1"
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin.check(model=Album)
self.assertEqual(errors, [])
def test_inline_self_check(self):
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin.check(model=Album)
expected = [
checks.Error(
"'admin_checks.TwoAlbumFKAndAnE' has more than one ForeignKey to 'admin_checks.Album'.",
hint=None,
obj=TwoAlbumFKAndAnEInline,
id='admin.E202',
)
]
self.assertEqual(errors, expected)
def test_inline_with_specified(self):
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
fk_name = "album1"
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin.check(model=Album)
self.assertEqual(errors, [])
def test_readonly(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("title",)
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_readonly_on_method(self):
def my_function(obj):
pass
class SongAdmin(admin.ModelAdmin):
readonly_fields = (my_function,)
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_readonly_on_modeladmin(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("readonly_method_on_modeladmin",)
def readonly_method_on_modeladmin(self, obj):
pass
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_readonly_method_on_model(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("readonly_method_on_model",)
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_nonexistant_field(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("title", "nonexistant")
errors = SongAdmin.check(model=Song)
expected = [
checks.Error(
("The value of 'readonly_fields[1]' is not a callable, an attribute "
"of 'SongAdmin', or an attribute of 'admin_checks.Song'."),
hint=None,
obj=SongAdmin,
id='admin.E035',
)
]
self.assertEqual(errors, expected)
def test_nonexistant_field_on_inline(self):
class CityInline(admin.TabularInline):
model = City
readonly_fields = ['i_dont_exist']
errors = CityInline.check(State)
expected = [
checks.Error(
("The value of 'readonly_fields[0]' is not a callable, an attribute "
"of 'CityInline', or an attribute of 'admin_checks.City'."),
hint=None,
obj=CityInline,
id='admin.E035',
)
]
self.assertEqual(errors, expected)
def test_extra(self):
class SongAdmin(admin.ModelAdmin):
def awesome_song(self, instance):
if instance.title == "Born to Run":
return "Best Ever!"
return "Status unknown."
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_readonly_lambda(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = (lambda obj: "test",)
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_graceful_m2m_fail(self):
class BookAdmin(admin.ModelAdmin):
fields = ['authors']
errors = BookAdmin.check(model=Book)
expected = [
checks.Error(
("The value of 'fields' cannot include the ManyToManyField 'authors', "
"because that field manually specifies a relationship model."),
hint=None,
obj=BookAdmin,
id='admin.E013',
)
]
self.assertEqual(errors, expected)
def test_cannot_include_through(self):
class FieldsetBookAdmin(admin.ModelAdmin):
fieldsets = (
('Header 1', {'fields': ('name',)}),
('Header 2', {'fields': ('authors',)}),
)
errors = FieldsetBookAdmin.check(model=Book)
expected = [
checks.Error(
("The value of 'fieldsets[1][1][\"fields\"]' cannot include the ManyToManyField "
"'authors', because that field manually specifies a relationship model."),
hint=None,
obj=FieldsetBookAdmin,
id='admin.E013',
)
]
self.assertEqual(errors, expected)
def test_nested_fields(self):
class NestedFieldsAdmin(admin.ModelAdmin):
fields = ('price', ('name', 'subtitle'))
errors = NestedFieldsAdmin.check(model=Book)
self.assertEqual(errors, [])
def test_nested_fieldsets(self):
class NestedFieldsetAdmin(admin.ModelAdmin):
fieldsets = (
('Main', {'fields': ('price', ('name', 'subtitle'))}),
)
errors = NestedFieldsetAdmin.check(model=Book)
self.assertEqual(errors, [])
def test_explicit_through_override(self):
class AuthorsInline(admin.TabularInline):
model = Book.authors.through
class BookAdmin(admin.ModelAdmin):
inlines = [AuthorsInline]
errors = BookAdmin.check(model=Book)
self.assertEqual(errors, [])
def test_non_model_fields(self):
class SongForm(forms.ModelForm):
extra_data = forms.CharField()
class FieldsOnFormOnlyAdmin(admin.ModelAdmin):
form = SongForm
fields = ['title', 'extra_data']
errors = FieldsOnFormOnlyAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_non_model_first_field(self):
class SongForm(forms.ModelForm):
extra_data = forms.CharField()
class Meta:
model = Song
fields = '__all__'
class FieldsOnFormOnlyAdmin(admin.ModelAdmin):
form = SongForm
fields = ['extra_data', 'title']
errors = FieldsOnFormOnlyAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_validator_compatibility(self):
class MyValidator(object):
def validate(self, cls, model):
raise ImproperlyConfigured("error!")
class MyModelAdmin(admin.ModelAdmin):
validator_class = MyValidator
with warnings.catch_warnings(record=True):
warnings.filterwarnings('ignore', module='django.contrib.admin.options')
errors = MyModelAdmin.check(model=Song)
expected = [
checks.Error(
'error!',
hint=None,
obj=MyModelAdmin,
)
]
self.assertEqual(errors, expected)
def test_check_sublists_for_duplicates(self):
class MyModelAdmin(admin.ModelAdmin):
fields = ['state', ['state']]
errors = MyModelAdmin.check(model=Song)
expected = [
checks.Error(
"The value of 'fields' contains duplicate field(s).",
hint=None,
obj=MyModelAdmin,
id='admin.E006'
)
]
self.assertEqual(errors, expected)
def test_check_fieldset_sublists_for_duplicates(self):
class MyModelAdmin(admin.ModelAdmin):
fieldsets = [
(None, {
'fields': ['title', 'album', ('title', 'album')]
}),
]
errors = MyModelAdmin.check(model=Song)
expected = [
checks.Error(
"There are duplicate field(s) in 'fieldsets[0][1]'.",
hint=None,
obj=MyModelAdmin,
id='admin.E012'
)
]
self.assertEqual(errors, expected)
| true | true |
1c4695708c7b41b299a783d8c3a65e327eb9a2a5 | 2,240 | py | Python | cekit/test/behave_tester.py | stephengaito/cekit | d6d254af6bb6820e5c725680bd77b6c195636cf6 | [
"MIT"
] | 59 | 2018-03-01T14:32:17.000Z | 2022-03-31T12:18:05.000Z | cekit/test/behave_tester.py | stephengaito/cekit | d6d254af6bb6820e5c725680bd77b6c195636cf6 | [
"MIT"
] | 446 | 2018-03-02T08:20:49.000Z | 2022-03-20T10:10:42.000Z | cekit/test/behave_tester.py | stephengaito/cekit | d6d254af6bb6820e5c725680bd77b6c195636cf6 | [
"MIT"
] | 29 | 2018-03-01T13:27:55.000Z | 2022-02-08T08:15:39.000Z | import logging
import os
from cekit.builder import Command
from cekit.generator.base import Generator
from cekit.test.collector import BehaveTestCollector
from cekit.test.behave_runner import BehaveTestRunner
LOGGER = logging.getLogger('cekit')
class BehaveTester(Command):
"""
Tester implementation for the Behave framework
"""
def __init__(self, params):
super(BehaveTester, self).__init__('behave', Command.TYPE_TESTER)
self.params = params
self.collected = False
self.test_collector = BehaveTestCollector(os.path.dirname(self.params.descriptor), self.params.target)
self.test_runner = BehaveTestRunner(self.params.target)
self.generator = None
def prepare(self):
self.generator = Generator(self.params.descriptor,
self.params.target,
self.params.overrides)
# Handle dependencies for selected generator, if any
LOGGER.debug("Checking CEKit generate dependencies...")
self.dependency_handler.handle(self.generator, self.params)
self.generator.init()
# TODO: investigate if we can improve handling different schema versions
self.collected = self.test_collector.collect(
self.generator.image.get('schema_version'), self.params.steps_url)
if self.collected:
# Handle test dependencies, if any
LOGGER.debug("Checking CEKit test collector dependencies...")
self.dependency_handler.handle(self.test_collector, self.params)
LOGGER.debug("Checking CEKit test runner dependencies...")
self.dependency_handler.handle(self.test_runner, self.params)
def run(self):
if not self.collected:
LOGGER.warning("No test collected, test can't be run.")
return
test_tags = [self.generator.get_tags()[0]]
# If wip is specified set tags to @wip
if self.params.wip:
test_tags = ['@wip']
image = self.params.image
if not image:
image = self.generator.get_tags()[0]
self.test_runner.run(image, test_tags,
test_names=self.params.names)
| 32.941176 | 110 | 0.647768 | import logging
import os
from cekit.builder import Command
from cekit.generator.base import Generator
from cekit.test.collector import BehaveTestCollector
from cekit.test.behave_runner import BehaveTestRunner
LOGGER = logging.getLogger('cekit')
class BehaveTester(Command):
def __init__(self, params):
super(BehaveTester, self).__init__('behave', Command.TYPE_TESTER)
self.params = params
self.collected = False
self.test_collector = BehaveTestCollector(os.path.dirname(self.params.descriptor), self.params.target)
self.test_runner = BehaveTestRunner(self.params.target)
self.generator = None
def prepare(self):
self.generator = Generator(self.params.descriptor,
self.params.target,
self.params.overrides)
LOGGER.debug("Checking CEKit generate dependencies...")
self.dependency_handler.handle(self.generator, self.params)
self.generator.init()
self.collected = self.test_collector.collect(
self.generator.image.get('schema_version'), self.params.steps_url)
if self.collected:
LOGGER.debug("Checking CEKit test collector dependencies...")
self.dependency_handler.handle(self.test_collector, self.params)
LOGGER.debug("Checking CEKit test runner dependencies...")
self.dependency_handler.handle(self.test_runner, self.params)
def run(self):
if not self.collected:
LOGGER.warning("No test collected, test can't be run.")
return
test_tags = [self.generator.get_tags()[0]]
# If wip is specified set tags to @wip
if self.params.wip:
test_tags = ['@wip']
image = self.params.image
if not image:
image = self.generator.get_tags()[0]
self.test_runner.run(image, test_tags,
test_names=self.params.names)
| true | true |
1c4696b02290e4a4e51e7a76e7e5bf7ddffbc1f9 | 2,567 | py | Python | tests/test_io.py | nthndy/BayesianTracker | 443f984ce830373e140f744a27179debdf34ae58 | [
"MIT"
] | null | null | null | tests/test_io.py | nthndy/BayesianTracker | 443f984ce830373e140f744a27179debdf34ae58 | [
"MIT"
] | null | null | null | tests/test_io.py | nthndy/BayesianTracker | 443f984ce830373e140f744a27179debdf34ae58 | [
"MIT"
] | null | null | null | import os
from pathlib import Path
import numpy as np
import pytest
from _utils import (
create_test_object,
create_test_properties,
simple_tracker_example,
)
import btrack
def test_hdf5_write(tmp_path):
"""Test writing an HDF5 file with some objects."""
fn = os.path.join(tmp_path, "test.h5")
objects = []
for i in range(10):
obj, _ = create_test_object(id=i)
objects.append(obj)
with btrack.dataio.HDF5FileHandler(fn, "w") as h:
h.write_objects(objects)
# now try to read those objects and compare with those used to write
with btrack.dataio.HDF5FileHandler(fn, "r") as h:
objects_from_file = h.objects
properties = ["x", "y", "z", "t", "label", "ID"]
for orig, read in zip(objects, objects_from_file):
for p in properties:
# use all close, since h5 file stores in float32 default
np.testing.assert_allclose(getattr(orig, p), getattr(read, p))
def test_hdf5_write_with_properties(tmp_path):
"""Test writing an HDF5 file with some objects with additional properties."""
fn = os.path.join(tmp_path, "test.h5")
objects = []
for i in range(10):
obj, _ = create_test_object(id=i)
obj.properties = create_test_properties()
objects.append(obj)
with btrack.dataio.HDF5FileHandler(fn, "w") as h:
h.write_objects(objects)
# now try to read those objects and compare with those used to write
with btrack.dataio.HDF5FileHandler(fn, "r") as h:
objects_from_file = h.objects
extra_props = list(create_test_properties().keys())
properties = ["x", "y", "z", "t", "label", "ID"]
for orig, read in zip(objects, objects_from_file):
for p in properties:
# use all close, since h5 file stores in float32 default
np.testing.assert_allclose(getattr(orig, p), getattr(read, p))
for p in extra_props:
np.testing.assert_allclose(orig.properties[p], read.properties[p])
@pytest.mark.parametrize("export_format", ["", ".csv", ".h5"])
def test_tracker_export(tmp_path, export_format):
"""Test that file export works using the `export_delegator`."""
tracker, _ = simple_tracker_example()
export_filename = f"test{export_format}"
# string type path
fn = os.path.join(tmp_path, export_filename)
tracker.export(fn, obj_type="obj_type_1")
# Pathlib type path
fn = Path(tmp_path) / export_filename
tracker.export(fn, obj_type="obj_type_1")
if export_format:
assert os.path.exists(fn)
| 29.848837 | 81 | 0.66342 | import os
from pathlib import Path
import numpy as np
import pytest
from _utils import (
create_test_object,
create_test_properties,
simple_tracker_example,
)
import btrack
def test_hdf5_write(tmp_path):
fn = os.path.join(tmp_path, "test.h5")
objects = []
for i in range(10):
obj, _ = create_test_object(id=i)
objects.append(obj)
with btrack.dataio.HDF5FileHandler(fn, "w") as h:
h.write_objects(objects)
with btrack.dataio.HDF5FileHandler(fn, "r") as h:
objects_from_file = h.objects
properties = ["x", "y", "z", "t", "label", "ID"]
for orig, read in zip(objects, objects_from_file):
for p in properties:
np.testing.assert_allclose(getattr(orig, p), getattr(read, p))
def test_hdf5_write_with_properties(tmp_path):
fn = os.path.join(tmp_path, "test.h5")
objects = []
for i in range(10):
obj, _ = create_test_object(id=i)
obj.properties = create_test_properties()
objects.append(obj)
with btrack.dataio.HDF5FileHandler(fn, "w") as h:
h.write_objects(objects)
with btrack.dataio.HDF5FileHandler(fn, "r") as h:
objects_from_file = h.objects
extra_props = list(create_test_properties().keys())
properties = ["x", "y", "z", "t", "label", "ID"]
for orig, read in zip(objects, objects_from_file):
for p in properties:
np.testing.assert_allclose(getattr(orig, p), getattr(read, p))
for p in extra_props:
np.testing.assert_allclose(orig.properties[p], read.properties[p])
@pytest.mark.parametrize("export_format", ["", ".csv", ".h5"])
def test_tracker_export(tmp_path, export_format):
tracker, _ = simple_tracker_example()
export_filename = f"test{export_format}"
fn = os.path.join(tmp_path, export_filename)
tracker.export(fn, obj_type="obj_type_1")
fn = Path(tmp_path) / export_filename
tracker.export(fn, obj_type="obj_type_1")
if export_format:
assert os.path.exists(fn)
| true | true |
1c46971c776696ac88b0afdf08de5e2e9aa0b53e | 4,999 | py | Python | modules/tools/rosbag/extract_images.py | seeclong/apollo | 99c8afb5ebcae2a3c9359a156a957ff03944b27b | [
"Apache-2.0"
] | 27 | 2019-04-06T02:27:14.000Z | 2021-11-27T13:47:06.000Z | modules/tools/rosbag/extract_images.py | seeclong/apollo | 99c8afb5ebcae2a3c9359a156a957ff03944b27b | [
"Apache-2.0"
] | 5 | 2021-10-06T22:57:52.000Z | 2022-02-27T14:04:05.000Z | modules/tools/rosbag/extract_images.py | seeclong/apollo | 99c8afb5ebcae2a3c9359a156a957ff03944b27b | [
"Apache-2.0"
] | 38 | 2019-04-15T10:58:37.000Z | 2022-01-27T08:52:39.000Z | #!/usr/bin/env python
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
Extract images from a recorded bag.
Usage:
extract_images.py --input_bag=a.bag
See the gflags for more optional args.
"""
import os
import sys
import cv2
import cv_bridge
import gflags
import glog
import rosbag
import yaml
# Requried flags.
gflags.DEFINE_string('input_bag', None, 'Input bag path.')
# Optional flags.
gflags.DEFINE_string('output_path', './', 'Output path.')
gflags.DEFINE_string('weather', 'CLEAR', 'Options: CLEAR, SUNNY, RAINY.')
gflags.DEFINE_string('scene', 'CITY', 'Options: CITY, HIGHWAY.')
gflags.DEFINE_string('time_interval', 'DAYTIME', 'Options: DAYTIME, NIGHT.')
gflags.DEFINE_float('extract_rate', 3, 'Rate to extract image, in seconds.')
# Stable flags which rarely change.
gflags.DEFINE_string('topic', '/apollo/sensor/camera/obstacle/front_6mm',
'Source topic.')
gflags.DEFINE_integer('sensor_id', 436, 'Source sensor ID.')
gflags.DEFINE_string('capture_place', 'Multiple', 'E.g.: Multiple, Sunnyvale.')
def extract_meta_info(bag):
"""Extract information from a bag file, return an info dict."""
# Extract from bag info.
info_dict = yaml.load(bag._get_yaml_info())
meta_info = {
'car_id': 'MKZ056',
'driver': 'UNKNOWN',
'start': int(info_dict['start']),
'end': int(info_dict['end']),
}
# Extract from bag message.
kStaticInfoTopic = '/apollo/monitor/static_info'
static_info = next(
(msg for _, msg, _ in bag.read_messages(topics=[kStaticInfoTopic])),
None)
if static_info is not None:
if static_info.vehicle.name:
meta_info['car_id'] = static_info.vehicle.name.upper()
if static_info.user.driver:
meta_info['driver'] = static_info.user.driver
return meta_info
def extract_images(bag, dst_dir, args):
"""Extract images to the destination dir."""
time_nsecs = []
pre_time_sec = 0
bridge = cv_bridge.CvBridge()
seq = 0
for _, msg, t in bag.read_messages(topics=args.topic):
# Check timestamp.
cur_time_sec = msg.header.stamp.to_sec()
if cur_time_sec - pre_time_sec < args.extract_rate:
continue
pre_time_sec = cur_time_sec
time_nsecs.append(msg.header.stamp.to_nsec())
# Save image.
seq += 1
msg.encoding = 'yuv422'
img = bridge.imgmsg_to_cv2(msg, 'yuv422')
img = cv2.cvtColor(img, cv2.COLOR_YUV2BGR_YUYV)
img_file = os.path.join(dst_dir, '{}.jpg'.format(seq))
cv2.imwrite(img_file, img)
glog.info('#{}: header.seq={}, header.stamp={}, saved as {}'.format(
seq, msg.header.seq, cur_time_sec, img_file))
return time_nsecs
def process_bag(bag, args):
"""Process a bag."""
meta_info = extract_meta_info(bag)
dst_dir_name = '{}_{}_{}_{}'.format(meta_info['car_id'], args.sensor_id,
meta_info['start'], meta_info['end'])
dst_dir = os.path.join(args.output_path, dst_dir_name)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
# Generate meta file.
meta_file = os.path.join(dst_dir, dst_dir_name + '.meta')
with open(meta_file, 'w') as meta_w:
meta_w.write('car_id:{}\n'.format(meta_info['car_id']))
meta_w.write('driver:{}\n'.format(meta_info['driver']))
meta_w.write('capture_place:{}\n'.format(args.capture_place))
meta_w.write('weather:{}\n'.format(args.weather))
meta_w.write('topic:{}\n'.format(args.topic))
meta_w.write('scene:{}\n'.format(args.scene))
meta_w.write('time_interval:{}\n'.format(args.time_interval))
# Generate images.
time_nsecs = extract_images(bag, dst_dir, args)
# Generate timestamp sequence.
timestamp_file = os.path.join(dst_dir, 'timestamp.txt')
with open(timestamp_file, 'w') as timestamp_w:
timestamp_w.write('seq\ttimestamp_ns\n')
for seq, timestamp_ns in enumerate(time_nsecs, start=1):
timestamp_w.write('{}\t{}\n'.format(seq, timestamp_ns))
def main():
"""Entry point."""
gflags.FLAGS(sys.argv)
with rosbag.Bag(gflags.FLAGS.input_bag) as bag:
process_bag(bag, gflags.FLAGS)
if __name__ == '__main__':
main()
| 34.475862 | 79 | 0.640928 | true | true | |
1c46987bff6123b37edc08ade402cc724f07010b | 9,073 | py | Python | weechat/python/grep_filter.py | TyranicMoron/dotfiles | 277b85c84cc2d0ed542175db218fc6313b3d85c0 | [
"MIT"
] | 1 | 2017-04-18T20:05:22.000Z | 2017-04-18T20:05:22.000Z | weechat/python/grep_filter.py | TyranicMoron/dotfiles | 277b85c84cc2d0ed542175db218fc6313b3d85c0 | [
"MIT"
] | 2 | 2015-06-26T10:53:57.000Z | 2015-06-26T11:22:56.000Z | weechat/python/grep_filter.py | MatthewCox/dotfiles | 277b85c84cc2d0ed542175db218fc6313b3d85c0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015 by Simmo Saan <simmo.saan@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# History:
#
# 2019-06-07, Trygve Aaberge <trygveaa@gmail.com>
# version 0.10: remove newlines from command completion
# 2015-10-04, Simmo Saan <simmo.saan@gmail.com>
# version 0.9: fix text search imitation in filter
# 2015-08-27, Simmo Saan <simmo.saan@gmail.com>
# version 0.8: add documentation
# 2015-08-25, Simmo Saan <simmo.saan@gmail.com>
# version 0.7: mute filter add/del
# 2015-08-25, Simmo Saan <simmo.saan@gmail.com>
# version 0.6: imitate search settings in filter
# 2015-08-25, Simmo Saan <simmo.saan@gmail.com>
# version 0.5: option for bar item text
# 2015-08-25, Simmo Saan <simmo.saan@gmail.com>
# version 0.4: option for default state
# 2015-08-25, Simmo Saan <simmo.saan@gmail.com>
# version 0.3: allow toggling during search
# 2015-08-25, Simmo Saan <simmo.saan@gmail.com>
# version 0.2: add bar item for indication
# 2015-08-25, Simmo Saan <simmo.saan@gmail.com>
# version 0.1: initial script
#
"""
Filter buffers automatically while searching them
"""
from __future__ import print_function
SCRIPT_NAME = "grep_filter"
SCRIPT_AUTHOR = "Simmo Saan <simmo.saan@gmail.com>"
SCRIPT_VERSION = "0.10"
SCRIPT_LICENSE = "GPL3"
SCRIPT_DESC = "Filter buffers automatically while searching them"
SCRIPT_REPO = "https://github.com/sim642/grep_filter"
SCRIPT_COMMAND = SCRIPT_NAME
SCRIPT_BAR_ITEM = SCRIPT_NAME
SCRIPT_LOCALVAR = SCRIPT_NAME
IMPORT_OK = True
try:
import weechat
except ImportError:
print("This script must be run under WeeChat.")
print("Get WeeChat now at: http://www.weechat.org/")
IMPORT_OK = False
import re # re.escape
SETTINGS = {
"enable": (
"off",
"enable automatically start filtering when searching"),
"bar_item": (
"grep",
"text to show in bar item when filtering")
}
KEYS = {
"ctrl-G": "/%s toggle" % SCRIPT_COMMAND
}
def get_merged_buffers(ptr):
"""
Get a list of buffers which are merged with "ptr".
"""
hdata = weechat.hdata_get("buffer")
buffers = weechat.hdata_get_list(hdata, "gui_buffers")
buffer = weechat.hdata_search(hdata, buffers, "${buffer.number} == %i" % weechat.hdata_integer(hdata, ptr, "number"), 1)
nbuffer = weechat.hdata_move(hdata, buffer, 1)
ret = []
while buffer:
ret.append(weechat.hdata_string(hdata, buffer, "full_name"))
if (weechat.hdata_integer(hdata, buffer, "number") == weechat.hdata_integer(hdata, nbuffer, "number")):
buffer = nbuffer
nbuffer = weechat.hdata_move(hdata, nbuffer, 1)
else:
buffer = None
return ret
def filter_exists(name):
"""
Check whether a filter named "name" exists.
"""
hdata = weechat.hdata_get("filter")
filters = weechat.hdata_get_list(hdata, "gui_filters")
filter = weechat.hdata_search(hdata, filters, "${filter.name} == %s" % name, 1)
return bool(filter)
def filter_del(name):
"""
Delete a filter named "name".
"""
weechat.command(weechat.buffer_search_main(), "/mute filter del %s" % name)
def filter_addreplace(name, buffers, tags, regex):
"""
Add (or replace if already exists) a filter named "name" with specified argumets.
"""
if filter_exists(name):
filter_del(name)
weechat.command(weechat.buffer_search_main(), "/mute filter add %s %s %s %s" % (name, buffers, tags, regex))
def buffer_searching(buffer):
"""
Check whether "buffer" is in search mode.
"""
hdata = weechat.hdata_get("buffer")
return bool(weechat.hdata_integer(hdata, buffer, "text_search"))
def buffer_filtering(buffer):
"""
Check whether "buffer" should be filtered.
"""
local = weechat.buffer_get_string(buffer, "localvar_%s" % SCRIPT_LOCALVAR)
return {"": None, "0": False, "1": True}[local]
def buffer_build_regex(buffer):
"""
Build a regex according to "buffer"'s search settings.
"""
hdata = weechat.hdata_get("buffer")
input = weechat.hdata_string(hdata, buffer, "input_buffer")
exact = weechat.hdata_integer(hdata, buffer, "text_search_exact")
where = weechat.hdata_integer(hdata, buffer, "text_search_where")
regex = weechat.hdata_integer(hdata, buffer, "text_search_regex")
if not regex:
input = re.escape(input)
if exact:
input = "(?-i)%s" % input
filter_regex = None
if where == 1: # message
filter_regex = input
elif where == 2: # prefix
filter_regex = "%s\\t" % input
else: # prefix | message
filter_regex = input # TODO: impossible with current filter regex
return "!%s" % filter_regex
def buffer_update(buffer):
"""
Refresh filtering in "buffer" by updating (or removing) the filter and update the bar item.
"""
hdata = weechat.hdata_get("buffer")
buffers = ",".join(get_merged_buffers(buffer))
name = "%s_%s" % (SCRIPT_NAME, buffers)
if buffer_searching(buffer):
if buffer_filtering(buffer):
filter_addreplace(name, buffers, "*", buffer_build_regex(buffer))
elif not buffer_filtering(buffer) and filter_exists(name):
filter_del(name)
elif filter_exists(name):
filter_del(name)
where = weechat.hdata_integer(hdata, buffer, "text_search_where")
weechat.buffer_set(buffer, "localvar_set_%s_warn" % SCRIPT_LOCALVAR, "1" if where == 3 else "0") # warn about incorrect filter
weechat.bar_item_update(SCRIPT_BAR_ITEM)
def input_search_cb(data, signal, buffer):
"""
Handle "input_search" signal.
"""
if buffer_searching(buffer) and buffer_filtering(buffer) is None:
enable = weechat.config_string_to_boolean(weechat.config_get_plugin("enable"))
weechat.buffer_set(buffer, "localvar_set_%s" % SCRIPT_LOCALVAR, "1" if enable else "0")
weechat.buffer_set(buffer, "localvar_set_%s_warn" % SCRIPT_LOCALVAR, "0")
elif not buffer_searching(buffer):
weechat.buffer_set(buffer, "localvar_del_%s" % SCRIPT_LOCALVAR, "")
weechat.buffer_set(buffer, "localvar_del_%s_warn" % SCRIPT_LOCALVAR, "")
buffer_update(buffer)
return weechat.WEECHAT_RC_OK
def input_text_changed_cb(data, signal, buffer):
"""
Handle "input_text_changed" signal.
"""
if buffer_searching(buffer) and buffer_filtering(buffer):
buffers = ",".join(get_merged_buffers(buffer))
name = "%s_%s" % (SCRIPT_NAME, buffers)
filter_addreplace(name, buffers, "*", buffer_build_regex(buffer))
return weechat.WEECHAT_RC_OK
def command_cb(data, buffer, args):
"""
Handle command.
"""
if args == "enable":
weechat.buffer_set(buffer, "localvar_set_%s" % SCRIPT_LOCALVAR, "1")
elif args == "disable":
weechat.buffer_set(buffer, "localvar_set_%s" % SCRIPT_LOCALVAR, "0")
elif args == "toggle":
weechat.buffer_set(buffer, "localvar_set_%s" % SCRIPT_LOCALVAR, "0" if buffer_filtering(buffer) else "1")
else:
pass
buffer_update(buffer)
return weechat.WEECHAT_RC_OK
def bar_item_cb(data, item, window, buffer, extra_info):
"""
Build the bar item's content for "buffer".
"""
buffers = ",".join(get_merged_buffers(buffer))
name = "%s_%s" % (SCRIPT_NAME, buffers)
if filter_exists(name):
warn = int(weechat.buffer_get_string(buffer, "localvar_%s_warn" % SCRIPT_LOCALVAR))
return "%s%s%s" % (
weechat.color("input_text_not_found" if warn else "bar_fg"),
weechat.config_get_plugin("bar_item"),
weechat.color("reset"))
else:
return ""
if __name__ == "__main__" and IMPORT_OK:
if weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE, SCRIPT_DESC, "", ""):
weechat.hook_signal("input_search", "input_search_cb", "")
weechat.hook_signal("input_text_changed", "input_text_changed_cb", "")
weechat.hook_command(SCRIPT_COMMAND, SCRIPT_DESC,
"""enable || disable || toggle""",
""" enable: enable {0} in current buffer
disable: disable {0} in current buffer
toggle: toggle {0} in current buffer
By default a bind in "search" context is added to toggle with "ctrl-G".
To see {0} status during search, add "{1}" item to some bar. On default configuration you can do it with:
/set weechat.bar.input.items "[input_prompt]+(away),[{1}],[input_search],[input_paste],input_text"
Due to technical reasons with /filter it is not possible to exactly {0} in "pre|msg" search mode, thus the bar item is shown in warning color.""".format(SCRIPT_NAME, SCRIPT_BAR_ITEM),
"""enable || disable || toggle""",
"command_cb", "")
weechat.bar_item_new("(extra)%s" % SCRIPT_BAR_ITEM, "bar_item_cb", "")
for option, value in SETTINGS.items():
if not weechat.config_is_set_plugin(option):
weechat.config_set_plugin(option, value[0])
weechat.config_set_desc_plugin(option, "%s (default: \"%s\")" % (value[1], value[0]))
weechat.key_bind("search", KEYS)
| 30.548822 | 183 | 0.721922 |
from __future__ import print_function
SCRIPT_NAME = "grep_filter"
SCRIPT_AUTHOR = "Simmo Saan <simmo.saan@gmail.com>"
SCRIPT_VERSION = "0.10"
SCRIPT_LICENSE = "GPL3"
SCRIPT_DESC = "Filter buffers automatically while searching them"
SCRIPT_REPO = "https://github.com/sim642/grep_filter"
SCRIPT_COMMAND = SCRIPT_NAME
SCRIPT_BAR_ITEM = SCRIPT_NAME
SCRIPT_LOCALVAR = SCRIPT_NAME
IMPORT_OK = True
try:
import weechat
except ImportError:
print("This script must be run under WeeChat.")
print("Get WeeChat now at: http://www.weechat.org/")
IMPORT_OK = False
import re
SETTINGS = {
"enable": (
"off",
"enable automatically start filtering when searching"),
"bar_item": (
"grep",
"text to show in bar item when filtering")
}
KEYS = {
"ctrl-G": "/%s toggle" % SCRIPT_COMMAND
}
def get_merged_buffers(ptr):
hdata = weechat.hdata_get("buffer")
buffers = weechat.hdata_get_list(hdata, "gui_buffers")
buffer = weechat.hdata_search(hdata, buffers, "${buffer.number} == %i" % weechat.hdata_integer(hdata, ptr, "number"), 1)
nbuffer = weechat.hdata_move(hdata, buffer, 1)
ret = []
while buffer:
ret.append(weechat.hdata_string(hdata, buffer, "full_name"))
if (weechat.hdata_integer(hdata, buffer, "number") == weechat.hdata_integer(hdata, nbuffer, "number")):
buffer = nbuffer
nbuffer = weechat.hdata_move(hdata, nbuffer, 1)
else:
buffer = None
return ret
def filter_exists(name):
hdata = weechat.hdata_get("filter")
filters = weechat.hdata_get_list(hdata, "gui_filters")
filter = weechat.hdata_search(hdata, filters, "${filter.name} == %s" % name, 1)
return bool(filter)
def filter_del(name):
weechat.command(weechat.buffer_search_main(), "/mute filter del %s" % name)
def filter_addreplace(name, buffers, tags, regex):
if filter_exists(name):
filter_del(name)
weechat.command(weechat.buffer_search_main(), "/mute filter add %s %s %s %s" % (name, buffers, tags, regex))
def buffer_searching(buffer):
hdata = weechat.hdata_get("buffer")
return bool(weechat.hdata_integer(hdata, buffer, "text_search"))
def buffer_filtering(buffer):
local = weechat.buffer_get_string(buffer, "localvar_%s" % SCRIPT_LOCALVAR)
return {"": None, "0": False, "1": True}[local]
def buffer_build_regex(buffer):
hdata = weechat.hdata_get("buffer")
input = weechat.hdata_string(hdata, buffer, "input_buffer")
exact = weechat.hdata_integer(hdata, buffer, "text_search_exact")
where = weechat.hdata_integer(hdata, buffer, "text_search_where")
regex = weechat.hdata_integer(hdata, buffer, "text_search_regex")
if not regex:
input = re.escape(input)
if exact:
input = "(?-i)%s" % input
filter_regex = None
if where == 1:
filter_regex = input
elif where == 2:
filter_regex = "%s\\t" % input
else:
filter_regex = input
return "!%s" % filter_regex
def buffer_update(buffer):
hdata = weechat.hdata_get("buffer")
buffers = ",".join(get_merged_buffers(buffer))
name = "%s_%s" % (SCRIPT_NAME, buffers)
if buffer_searching(buffer):
if buffer_filtering(buffer):
filter_addreplace(name, buffers, "*", buffer_build_regex(buffer))
elif not buffer_filtering(buffer) and filter_exists(name):
filter_del(name)
elif filter_exists(name):
filter_del(name)
where = weechat.hdata_integer(hdata, buffer, "text_search_where")
weechat.buffer_set(buffer, "localvar_set_%s_warn" % SCRIPT_LOCALVAR, "1" if where == 3 else "0")
weechat.bar_item_update(SCRIPT_BAR_ITEM)
def input_search_cb(data, signal, buffer):
if buffer_searching(buffer) and buffer_filtering(buffer) is None:
enable = weechat.config_string_to_boolean(weechat.config_get_plugin("enable"))
weechat.buffer_set(buffer, "localvar_set_%s" % SCRIPT_LOCALVAR, "1" if enable else "0")
weechat.buffer_set(buffer, "localvar_set_%s_warn" % SCRIPT_LOCALVAR, "0")
elif not buffer_searching(buffer):
weechat.buffer_set(buffer, "localvar_del_%s" % SCRIPT_LOCALVAR, "")
weechat.buffer_set(buffer, "localvar_del_%s_warn" % SCRIPT_LOCALVAR, "")
buffer_update(buffer)
return weechat.WEECHAT_RC_OK
def input_text_changed_cb(data, signal, buffer):
if buffer_searching(buffer) and buffer_filtering(buffer):
buffers = ",".join(get_merged_buffers(buffer))
name = "%s_%s" % (SCRIPT_NAME, buffers)
filter_addreplace(name, buffers, "*", buffer_build_regex(buffer))
return weechat.WEECHAT_RC_OK
def command_cb(data, buffer, args):
if args == "enable":
weechat.buffer_set(buffer, "localvar_set_%s" % SCRIPT_LOCALVAR, "1")
elif args == "disable":
weechat.buffer_set(buffer, "localvar_set_%s" % SCRIPT_LOCALVAR, "0")
elif args == "toggle":
weechat.buffer_set(buffer, "localvar_set_%s" % SCRIPT_LOCALVAR, "0" if buffer_filtering(buffer) else "1")
else:
pass
buffer_update(buffer)
return weechat.WEECHAT_RC_OK
def bar_item_cb(data, item, window, buffer, extra_info):
buffers = ",".join(get_merged_buffers(buffer))
name = "%s_%s" % (SCRIPT_NAME, buffers)
if filter_exists(name):
warn = int(weechat.buffer_get_string(buffer, "localvar_%s_warn" % SCRIPT_LOCALVAR))
return "%s%s%s" % (
weechat.color("input_text_not_found" if warn else "bar_fg"),
weechat.config_get_plugin("bar_item"),
weechat.color("reset"))
else:
return ""
if __name__ == "__main__" and IMPORT_OK:
if weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE, SCRIPT_DESC, "", ""):
weechat.hook_signal("input_search", "input_search_cb", "")
weechat.hook_signal("input_text_changed", "input_text_changed_cb", "")
weechat.hook_command(SCRIPT_COMMAND, SCRIPT_DESC,
"""enable || disable || toggle""",
""" enable: enable {0} in current buffer
disable: disable {0} in current buffer
toggle: toggle {0} in current buffer
By default a bind in "search" context is added to toggle with "ctrl-G".
To see {0} status during search, add "{1}" item to some bar. On default configuration you can do it with:
/set weechat.bar.input.items "[input_prompt]+(away),[{1}],[input_search],[input_paste],input_text"
Due to technical reasons with /filter it is not possible to exactly {0} in "pre|msg" search mode, thus the bar item is shown in warning color.""".format(SCRIPT_NAME, SCRIPT_BAR_ITEM),
"""enable || disable || toggle""",
"command_cb", "")
weechat.bar_item_new("(extra)%s" % SCRIPT_BAR_ITEM, "bar_item_cb", "")
for option, value in SETTINGS.items():
if not weechat.config_is_set_plugin(option):
weechat.config_set_plugin(option, value[0])
weechat.config_set_desc_plugin(option, "%s (default: \"%s\")" % (value[1], value[0]))
weechat.key_bind("search", KEYS)
| true | true |
1c46996f63d32c290afa2e3cc34a753d12d8719d | 3,601 | py | Python | tricks/lsh_pp_pretaining.py | yanzhoupan/dlrm_ssm | 49ca1e4487ff0e148065c0a133acb078835a9b86 | [
"MIT"
] | 3 | 2021-03-16T03:33:44.000Z | 2022-03-14T08:48:01.000Z | tricks/lsh_pp_pretaining.py | yanzhoupan/dlrm_ssm | 49ca1e4487ff0e148065c0a133acb078835a9b86 | [
"MIT"
] | 2 | 2021-03-25T08:19:25.000Z | 2021-04-10T16:43:45.000Z | tricks/lsh_pp_pretaining.py | yanzhoupan/dlrm_ssm | 49ca1e4487ff0e148065c0a133acb078835a9b86 | [
"MIT"
] | 1 | 2021-09-08T21:47:06.000Z | 2021-09-08T21:47:06.000Z | # data preprocessing for LSH embedding
import numpy as np
import torch
from min_hash_generator import SparseBitVectorMinHashGenerator
from collections import defaultdict
# import multiprocessing
from tqdm import tqdm
import time
import random
import concurrent.futures
import pdb
seed = 123
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# use partial data set to get minhash table.
min_hash_gen = None
val_indices = None
import sys
if len(sys.argv) <=1:
print("Usage: <script> embedding hash num_pt")
assert(False)
EMBEDDING = int(sys.argv[1])
NUM_HASH = int(sys.argv[2])
NUM_PT = int(sys.argv[3])
print("EMB:",EMBEDDING, "NUMH",NUM_HASH, "NUM_PT",NUM_PT)
def compute(start, end):
global min_hash_table
p_min_hash_table = np.zeros((end-start, EMBEDDING))
for val_id in range(start, end):
p_min_hash_table[val_id-start] = min_hash_gen.generate(val_indices[val_id])
return start,end ,p_min_hash_table
def getBigMinHashTable():
global min_hash_gen, min_hash_table, val_indices
data = np.load('./input/kaggleAdDisplayChallenge_processed.npz')
data_num, cat_num = data["X_cat"].shape # (45840617, 26) for criteo
partial_idx = np.random.choice(np.arange(data_num), size=NUM_PT, replace=False)
partial_cat_data = data['X_cat'][partial_idx]
print(partial_cat_data.shape)
start_time = time.time()
np.savez(r'./cat_counts.npz', cat_counts = data['counts'])
base = 0
val_indices = defaultdict(lambda:[])
# generate signiture matrix for category values (partial data)
for fea_id in tqdm(range(cat_num)):
cat_fea = partial_cat_data[:, fea_id]
for doc_id in range(len(cat_fea)): # loop over docs
val_indices[cat_fea[doc_id] + base].append(doc_id)
for val in range(data['counts'][fea_id]):
if val_indices[val+base] == []:
val_indices[val+base] = [random.randint(0, data_num+1)] # set val_indices to a random place if never seen it
base += data['counts'][fea_id]
embedding_dim = EMBEDDING
min_hash_table = np.zeros((len(val_indices), embedding_dim))
input_size = len(cat_fea) # number of the data items
min_hash_gen = SparseBitVectorMinHashGenerator(input_size, embedding_dim, NUM_HASH)
batch_size=1000
with concurrent.futures.ProcessPoolExecutor(50) as executor:
print("submitting jobs")
futures = []
print ("total", len(val_indices))
total = len(val_indices)
num_batches = int(np.ceil(len(val_indices) / batch_size))
for i in tqdm(range(num_batches)):
start = i * batch_size
end = min(total, start + batch_size)
if end > start:
futures.append(executor.submit(compute, start, end))
#compute(start, end)
ip = 0
for res in tqdm(concurrent.futures.as_completed(futures), total = num_batches):
st,ed,output = res.result()
ip = ip + 1
min_hash_table[st:ed,:] = output
#print(st, ed, np.sum(min_hash_table[st:ed]))
np.savez(r'./input/bigMinHashTable_H'+ str(NUM_HASH) + '_E' + str(EMBEDDING)+ '_P' + str(NUM_PT) + '.npz', big_min_hash_table = min_hash_table.astype(int))
end_time = time.time()
print(end_time - start_time)
if __name__ == "__main__":
# getMinHashTable()
getBigMinHashTable()
# bigMinHashTable = np.load('./input/bigMinHashTable.npz')
# minHashTables = np.load('./input/minHashTables.npz')
# print(len(minHashTables['arr_0'][:, 0]))
# print(len(bigMinHashTable['big_min_hash_table'][:, 0]))
| 35.303922 | 159 | 0.67759 |
import numpy as np
import torch
from min_hash_generator import SparseBitVectorMinHashGenerator
from collections import defaultdict
from tqdm import tqdm
import time
import random
import concurrent.futures
import pdb
seed = 123
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
min_hash_gen = None
val_indices = None
import sys
if len(sys.argv) <=1:
print("Usage: <script> embedding hash num_pt")
assert(False)
EMBEDDING = int(sys.argv[1])
NUM_HASH = int(sys.argv[2])
NUM_PT = int(sys.argv[3])
print("EMB:",EMBEDDING, "NUMH",NUM_HASH, "NUM_PT",NUM_PT)
def compute(start, end):
global min_hash_table
p_min_hash_table = np.zeros((end-start, EMBEDDING))
for val_id in range(start, end):
p_min_hash_table[val_id-start] = min_hash_gen.generate(val_indices[val_id])
return start,end ,p_min_hash_table
def getBigMinHashTable():
global min_hash_gen, min_hash_table, val_indices
data = np.load('./input/kaggleAdDisplayChallenge_processed.npz')
data_num, cat_num = data["X_cat"].shape
partial_idx = np.random.choice(np.arange(data_num), size=NUM_PT, replace=False)
partial_cat_data = data['X_cat'][partial_idx]
print(partial_cat_data.shape)
start_time = time.time()
np.savez(r'./cat_counts.npz', cat_counts = data['counts'])
base = 0
val_indices = defaultdict(lambda:[])
for fea_id in tqdm(range(cat_num)):
cat_fea = partial_cat_data[:, fea_id]
for doc_id in range(len(cat_fea)):
val_indices[cat_fea[doc_id] + base].append(doc_id)
for val in range(data['counts'][fea_id]):
if val_indices[val+base] == []:
val_indices[val+base] = [random.randint(0, data_num+1)]
base += data['counts'][fea_id]
embedding_dim = EMBEDDING
min_hash_table = np.zeros((len(val_indices), embedding_dim))
input_size = len(cat_fea)
min_hash_gen = SparseBitVectorMinHashGenerator(input_size, embedding_dim, NUM_HASH)
batch_size=1000
with concurrent.futures.ProcessPoolExecutor(50) as executor:
print("submitting jobs")
futures = []
print ("total", len(val_indices))
total = len(val_indices)
num_batches = int(np.ceil(len(val_indices) / batch_size))
for i in tqdm(range(num_batches)):
start = i * batch_size
end = min(total, start + batch_size)
if end > start:
futures.append(executor.submit(compute, start, end))
ip = 0
for res in tqdm(concurrent.futures.as_completed(futures), total = num_batches):
st,ed,output = res.result()
ip = ip + 1
min_hash_table[st:ed,:] = output
np.savez(r'./input/bigMinHashTable_H'+ str(NUM_HASH) + '_E' + str(EMBEDDING)+ '_P' + str(NUM_PT) + '.npz', big_min_hash_table = min_hash_table.astype(int))
end_time = time.time()
print(end_time - start_time)
if __name__ == "__main__":
getBigMinHashTable()
| true | true |
1c469984e0ce27e7f993f3a5fceabf990b93bb2c | 1,686 | py | Python | eggs/bx_python-0.7.1_7b95ff194725-py2.7-linux-i686-ucs4.egg/EGG-INFO/scripts/maf_split_by_src.py | bopopescu/phyG | 023f505b705ab953f502cbc55e90612047867583 | [
"CC-BY-3.0"
] | null | null | null | eggs/bx_python-0.7.1_7b95ff194725-py2.7-linux-i686-ucs4.egg/EGG-INFO/scripts/maf_split_by_src.py | bopopescu/phyG | 023f505b705ab953f502cbc55e90612047867583 | [
"CC-BY-3.0"
] | null | null | null | eggs/bx_python-0.7.1_7b95ff194725-py2.7-linux-i686-ucs4.egg/EGG-INFO/scripts/maf_split_by_src.py | bopopescu/phyG | 023f505b705ab953f502cbc55e90612047867583 | [
"CC-BY-3.0"
] | 1 | 2020-07-25T21:03:18.000Z | 2020-07-25T21:03:18.000Z | #!/afs/bx.psu.edu/project/pythons/linux-i686-ucs4/bin/python2.7
"""
Read a MAF from stdin and break into several mafs based on the source of
each block. If the `component` option is provided then only that component
will be used to determine the new file for each block, otherwise the src
for *all* components will be used.
TODO: Should be able to specify component by species/prefix?
usage: %prog [options] < maf
-o, --outprefix: prepend this to the name of each generate maf
-c, --component: use only this component (by index!) to split
"""
import sys, string
import bx.align.maf
from optparse import OptionParser
import psyco_full
INF="inf"
def __main__():
# Parse command line arguments
parser = OptionParser()
parser.add_option( "-o", "--outprefix", action="store", default="" )
parser.add_option( "-c", "--component", action="store", default=None )
( options, args ) = parser.parse_args()
out_prefix = options.outprefix
comp = options.component
if comp is not None:
comp = int( comp )
maf_reader = bx.align.maf.Reader( sys.stdin )
writers = {}
for m in maf_reader:
if comp is None:
writer_key = string.join( [ c.src for c in m.components ], '_' )
else:
writer_key = m.components[ comp ].src
if not writers.has_key( writer_key ):
writer = bx.align.maf.Writer( file( "%s%s.maf" % ( out_prefix, writer_key ), "w" ) )
writers[ writer_key ] = writer
else:
writer = writers[ writer_key ]
writer.write( m )
for key in writers:
writers[ key ].close()
if __name__ == "__main__": __main__()
| 27.639344 | 96 | 0.640569 |
import sys, string
import bx.align.maf
from optparse import OptionParser
import psyco_full
INF="inf"
def __main__():
parser = OptionParser()
parser.add_option( "-o", "--outprefix", action="store", default="" )
parser.add_option( "-c", "--component", action="store", default=None )
( options, args ) = parser.parse_args()
out_prefix = options.outprefix
comp = options.component
if comp is not None:
comp = int( comp )
maf_reader = bx.align.maf.Reader( sys.stdin )
writers = {}
for m in maf_reader:
if comp is None:
writer_key = string.join( [ c.src for c in m.components ], '_' )
else:
writer_key = m.components[ comp ].src
if not writers.has_key( writer_key ):
writer = bx.align.maf.Writer( file( "%s%s.maf" % ( out_prefix, writer_key ), "w" ) )
writers[ writer_key ] = writer
else:
writer = writers[ writer_key ]
writer.write( m )
for key in writers:
writers[ key ].close()
if __name__ == "__main__": __main__()
| true | true |
1c469a53f45b615fde86d33a3918d754e428abba | 1,474 | py | Python | src/simgnn.py | pulkit1joshi/SimGNN | 199b6014482a1dc8719394de4fc17f03c1b7192c | [
"MIT"
] | 22 | 2020-10-09T13:36:57.000Z | 2022-02-10T04:07:54.000Z | src/simgnn.py | kartiklucky9n/SimGNN | 199b6014482a1dc8719394de4fc17f03c1b7192c | [
"MIT"
] | 8 | 2020-10-10T11:02:39.000Z | 2021-12-29T17:45:05.000Z | src/simgnn.py | kartiklucky9n/SimGNN | 199b6014482a1dc8719394de4fc17f03c1b7192c | [
"MIT"
] | 11 | 2020-10-11T03:58:36.000Z | 2022-03-30T09:54:55.000Z | from tensorflow import keras
from tensorflow.keras import layers
from keras_gcn import GraphConv
from keras.models import Model
from keras.layers import Input
from custom_layers import Attention, NeuralTensorLayer
"""
Main model : Node-to-Node interaction not implemented.
Functional API :
Shared layers are shared_gcn1, shared_gcn2, shard_gcn3, shared_attention
"""
def simgnn(parser):
inputA = Input(shape=(None,16))
GinputA = Input(shape=(None,None))
inputB = Input(shape=(None,16))
GinputB = Input(shape=(None,None))
shared_gcn1 = GraphConv(units=parser.filters_1,step_num=3, activation="relu")
shared_gcn2 = GraphConv(units=parser.filters_2,step_num=3, activation="relu")
shared_gcn3 = GraphConv(units=parser.filters_3,step_num=3, activation="relu")
shared_attention = Attention(parser)
x = shared_gcn1([inputA, GinputA])
x = shared_gcn2([x, GinputA])
x = shared_gcn3([x, GinputA])
x = shared_attention(x[0])
y = shared_gcn1([inputB, GinputB])
y = shared_gcn2([y, GinputB])
y = shared_gcn3([y, GinputB])
y = shared_attention(y[0])
z = NeuralTensorLayer(output_dim=16, input_dim=16)([x, y])
z = keras.layers.Dense(16, activation="relu")(z)
z = keras.layers.Dense(8, activation="relu")(z)
z = keras.layers.Dense(4, activation="relu")(z)
z = keras.layers.Dense(1)(z)
z = keras.activations.sigmoid(z)
return Model(inputs=[inputA, GinputA, inputB, GinputB], outputs=z) | 36.85 | 82 | 0.705563 | from tensorflow import keras
from tensorflow.keras import layers
from keras_gcn import GraphConv
from keras.models import Model
from keras.layers import Input
from custom_layers import Attention, NeuralTensorLayer
def simgnn(parser):
inputA = Input(shape=(None,16))
GinputA = Input(shape=(None,None))
inputB = Input(shape=(None,16))
GinputB = Input(shape=(None,None))
shared_gcn1 = GraphConv(units=parser.filters_1,step_num=3, activation="relu")
shared_gcn2 = GraphConv(units=parser.filters_2,step_num=3, activation="relu")
shared_gcn3 = GraphConv(units=parser.filters_3,step_num=3, activation="relu")
shared_attention = Attention(parser)
x = shared_gcn1([inputA, GinputA])
x = shared_gcn2([x, GinputA])
x = shared_gcn3([x, GinputA])
x = shared_attention(x[0])
y = shared_gcn1([inputB, GinputB])
y = shared_gcn2([y, GinputB])
y = shared_gcn3([y, GinputB])
y = shared_attention(y[0])
z = NeuralTensorLayer(output_dim=16, input_dim=16)([x, y])
z = keras.layers.Dense(16, activation="relu")(z)
z = keras.layers.Dense(8, activation="relu")(z)
z = keras.layers.Dense(4, activation="relu")(z)
z = keras.layers.Dense(1)(z)
z = keras.activations.sigmoid(z)
return Model(inputs=[inputA, GinputA, inputB, GinputB], outputs=z) | true | true |
1c469b82d550b35023a31690c11f6d79e68fe635 | 3,452 | py | Python | testing/modules/args.py | marshallmidden/m4 | 8ff1cb050efdefe6963c6d7f459fd6f3d25eea94 | [
"BSD-2-Clause"
] | null | null | null | testing/modules/args.py | marshallmidden/m4 | 8ff1cb050efdefe6963c6d7f459fd6f3d25eea94 | [
"BSD-2-Clause"
] | null | null | null | testing/modules/args.py | marshallmidden/m4 | 8ff1cb050efdefe6963c6d7f459fd6f3d25eea94 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/python3
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
#-----------------------------------------------------------------------------
import argparse
import sys
#-----------------------------------------------------------------------------
# Global option variables follow.
#-----------------------------------------------------------------------------
def parse_args(values):
global args, initiator, target, qla2xxx
init = ('i', 'in', 'ini', 'init', 'initi', 'initia', 'initiat', 'initiato', 'initiator', 'initiators')
targ = ('t', 'ta', 'tar', 'targ', 'targe', 'target', 'targets')
qla2 = ('q', 'ql', 'qla', 'qla2', 'qla2x', 'qla2xx', 'qla2xxx')
initiator = False
target = False
qla2xxx = False
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog= "List Linux host numbers, Fibre Channel WWPNs, Card type and Firmware versions,\n" +
"PCI slot numbers, Speed and Supported Speeds, and Link States.\n" +
"May be useful for setting up the file: /etc/modprobe.d/qla2xxx.conf\n")
parser.add_argument('--verbose', '-v', action='store_true',
help = 'Print each line of qla2xxx.conf file.')
parser.add_argument('--vv', '-vv', action='store_true',
help = 'Print each Fibre line of \'lspci -vmmD\' output.')
parser.add_argument('--seen', '-s', action='store_true',
help = 'Print rports seen on target ports. (Positional arguments should not be present. See (nothing) above.)')
parser.add_argument('rest', nargs='*',
metavar="initiator|target|qla2xxx",
help='Optional output or format limiting.')
args = parser.parse_args(values)
error = False
for a in args.rest:
if a in init:
initiator = True
elif a in targ:
target = True
elif a in qla2:
qla2xxx = True
else:
if not error:
print('-' * 78, file=sys.stderr)
print("ERROR - unrecognized argument '{}'".format(a), file=sys.stderr)
error = True
if error:
print('-' * 78, file=sys.stderr)
parser.print_help()
print('-' * 78, file=sys.stderr)
print("ERROR - read line(s) above help message!", file=sys.stderr)
exit(1)
# End of parse_args
#-----------------------------------------------------------------------------
def print_args():
global args, initiator, target, qla2xxx
print(type(args.verbose), "args.verbose=", args.verbose)
print(type(args.vv), "args.vv=", args.vv)
print(type(args.seen), "args.seen=", args.seen)
print(type(args.rest), "args.rest=", args.rest)
print(type(initiator), "initiator=", initiator)
print(type(target), "target=", target)
print(type(qla2xxx), "qla2xxx=", qla2xxx)
# End of print_args
#-----------------------------------------------------------------------------
# Main script processing.
def main(values):
print("values=", values)
parse_args(values)
print_args()
# End of main
#-----------------------------------------------------------------------------
# Execute the main routine.
if __name__ == "__main__":
main(sys.argv[1:])
exit(0)
#-----------------------------------------------------------------------------
# End of file args.py
| 41.095238 | 135 | 0.503476 |
import argparse
import sys
def parse_args(values):
global args, initiator, target, qla2xxx
init = ('i', 'in', 'ini', 'init', 'initi', 'initia', 'initiat', 'initiato', 'initiator', 'initiators')
targ = ('t', 'ta', 'tar', 'targ', 'targe', 'target', 'targets')
qla2 = ('q', 'ql', 'qla', 'qla2', 'qla2x', 'qla2xx', 'qla2xxx')
initiator = False
target = False
qla2xxx = False
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog= "List Linux host numbers, Fibre Channel WWPNs, Card type and Firmware versions,\n" +
"PCI slot numbers, Speed and Supported Speeds, and Link States.\n" +
"May be useful for setting up the file: /etc/modprobe.d/qla2xxx.conf\n")
parser.add_argument('--verbose', '-v', action='store_true',
help = 'Print each line of qla2xxx.conf file.')
parser.add_argument('--vv', '-vv', action='store_true',
help = 'Print each Fibre line of \'lspci -vmmD\' output.')
parser.add_argument('--seen', '-s', action='store_true',
help = 'Print rports seen on target ports. (Positional arguments should not be present. See (nothing) above.)')
parser.add_argument('rest', nargs='*',
metavar="initiator|target|qla2xxx",
help='Optional output or format limiting.')
args = parser.parse_args(values)
error = False
for a in args.rest:
if a in init:
initiator = True
elif a in targ:
target = True
elif a in qla2:
qla2xxx = True
else:
if not error:
print('-' * 78, file=sys.stderr)
print("ERROR - unrecognized argument '{}'".format(a), file=sys.stderr)
error = True
if error:
print('-' * 78, file=sys.stderr)
parser.print_help()
print('-' * 78, file=sys.stderr)
print("ERROR - read line(s) above help message!", file=sys.stderr)
exit(1)
def print_args():
global args, initiator, target, qla2xxx
print(type(args.verbose), "args.verbose=", args.verbose)
print(type(args.vv), "args.vv=", args.vv)
print(type(args.seen), "args.seen=", args.seen)
print(type(args.rest), "args.rest=", args.rest)
print(type(initiator), "initiator=", initiator)
print(type(target), "target=", target)
print(type(qla2xxx), "qla2xxx=", qla2xxx)
def main(values):
print("values=", values)
parse_args(values)
print_args()
if __name__ == "__main__":
main(sys.argv[1:])
exit(0)
| true | true |
1c469ce9ba3bc866b5d62e0d55297ba6728a69f8 | 275 | py | Python | flaskblog/App/models/__init__.py | riverstation/project-all | c56f1879e1303d561e95a3ff3a70f94fb5fa2191 | [
"Apache-2.0"
] | null | null | null | flaskblog/App/models/__init__.py | riverstation/project-all | c56f1879e1303d561e95a3ff3a70f94fb5fa2191 | [
"Apache-2.0"
] | null | null | null | flaskblog/App/models/__init__.py | riverstation/project-all | c56f1879e1303d561e95a3ff3a70f94fb5fa2191 | [
"Apache-2.0"
] | null | null | null | from .user import User
from .posts import Posts
from App.extensions import db
#创建一个关联模型和用户 存储id 多对多的个关系模型中间表
collections = db.Table('collections',
db.Column('user_id',db.Integer,db.ForeignKey('user.id')),
db.Column('posts_id',db.Integer,db.ForeignKey('posts.id'))
) | 30.555556 | 62 | 0.745455 | from .user import User
from .posts import Posts
from App.extensions import db
collections = db.Table('collections',
db.Column('user_id',db.Integer,db.ForeignKey('user.id')),
db.Column('posts_id',db.Integer,db.ForeignKey('posts.id'))
) | true | true |
1c469d46abeed2c741846f66801fcc1ae85fbd0c | 1,407 | py | Python | pyproc/views/message.py | cmin764/pyproc | be69b5a35fbe3818accea472735effec0825f17c | [
"MIT"
] | null | null | null | pyproc/views/message.py | cmin764/pyproc | be69b5a35fbe3818accea472735effec0825f17c | [
"MIT"
] | null | null | null | pyproc/views/message.py | cmin764/pyproc | be69b5a35fbe3818accea472735effec0825f17c | [
"MIT"
] | null | null | null | """Handle /message page."""
from flask import (
abort,
request,
)
from pyproc import app, tasks
from pyproc.views.base import responsify
@app.route("/message", methods=["POST"])
@responsify
def message():
"""Messaging page available for users/clients submitting tasks."""
# Retrieve JSON parameters data.
data = request.get_json() or {}
data.update(dict(request.values))
msg = data.get("msg")
if not msg:
raise abort(400, "missing 'msg' data")
# Deffer the message as a task.
result = tasks.process_message.delay(msg, delta=10)
task_id = result.task_id
if not task_id or result.failed():
raise abort(400, "task failed")
# Then check and return ID.
return {
"task_id": result.id
}
@app.route("/result", methods=["GET"])
@responsify
def result():
"""Get results when ready regarding a previously submitted task."""
# Retrieve JSON parameters data.
data = request.get_json() or {}
data.update(dict(request.values))
tid = data.get("tid")
if not tid:
raise abort(400, "missing 'tid' data")
# Get the result (if exists and finished).
result = tasks.process_message.AsyncResult(tid)
# Return status and result if available.
resp = {
"status": result.status,
"result": None,
}
if result.ready():
resp["result"] = result.get()
return resp
| 25.125 | 71 | 0.633262 |
from flask import (
abort,
request,
)
from pyproc import app, tasks
from pyproc.views.base import responsify
@app.route("/message", methods=["POST"])
@responsify
def message():
data = request.get_json() or {}
data.update(dict(request.values))
msg = data.get("msg")
if not msg:
raise abort(400, "missing 'msg' data")
result = tasks.process_message.delay(msg, delta=10)
task_id = result.task_id
if not task_id or result.failed():
raise abort(400, "task failed")
return {
"task_id": result.id
}
@app.route("/result", methods=["GET"])
@responsify
def result():
data = request.get_json() or {}
data.update(dict(request.values))
tid = data.get("tid")
if not tid:
raise abort(400, "missing 'tid' data")
result = tasks.process_message.AsyncResult(tid)
resp = {
"status": result.status,
"result": None,
}
if result.ready():
resp["result"] = result.get()
return resp
| true | true |
1c469feaecafc257a8d704b752cfef5883265ac6 | 291 | py | Python | random/agent.py | iejMac/TTTArena | 056636f064769c3251fb2448e7487b4fa8394733 | [
"MIT"
] | 3 | 2021-05-23T23:55:03.000Z | 2021-07-09T16:01:10.000Z | random/agent.py | iejMac/TTTArena | 056636f064769c3251fb2448e7487b4fa8394733 | [
"MIT"
] | null | null | null | random/agent.py | iejMac/TTTArena | 056636f064769c3251fb2448e7487b4fa8394733 | [
"MIT"
] | 2 | 2021-07-09T11:44:09.000Z | 2021-07-11T12:32:58.000Z | from agent import Agent
from numpy.random import randint
class RandomAgent(Agent):
def __init__(self, name):
super().__init__(name)
def make_action(self, state):
movex = randint(0, state.shape[1])
movey = randint(0, state.shape[0])
return (movey, movex)
| 20.785714 | 39 | 0.659794 | from agent import Agent
from numpy.random import randint
class RandomAgent(Agent):
def __init__(self, name):
super().__init__(name)
def make_action(self, state):
movex = randint(0, state.shape[1])
movey = randint(0, state.shape[0])
return (movey, movex)
| true | true |
1c46a09640f355fa068c98b298841bc96a9474b0 | 1,533 | py | Python | count_zeroavg.py | Renata1995/Topic-Distance-and-Coherence | d567d5b3ef71ea5654f214aa3736add7f3ac94bc | [
"Apache-2.0"
] | 5 | 2018-08-25T07:16:31.000Z | 2020-11-12T00:36:15.000Z | count_zeroavg.py | Renata1995/Topic-Distance-and-Coherence | d567d5b3ef71ea5654f214aa3736add7f3ac94bc | [
"Apache-2.0"
] | 1 | 2018-09-24T16:17:47.000Z | 2018-09-24T16:17:47.000Z | count_zeroavg.py | Renata1995/Topic-Distance-and-Coherence | d567d5b3ef71ea5654f214aa3736add7f3ac94bc | [
"Apache-2.0"
] | 4 | 2018-05-07T07:52:10.000Z | 2020-11-12T00:36:18.000Z | import sys
import utils.name_convention as name
import numpy as np
if len(sys.argv) <= 1:
corpus_type = "bow"
else:
if sys.argv[1] == "t":
corpus_type = "tfidf"
elif sys.argv[1] == "b":
corpus_type = "binary"
else:
corpus_type = "bow"
if len(sys.argv) <= 2:
topics_count = 3
else:
topics_count = int(sys.argv[2])
if len(sys.argv) <= 3:
src = "pp_reuters"
else:
src = sys.argv[3]
if len(sys.argv) <= 4:
tc = "path"
else:
tc = sys.argv[4]
if len(sys.argv) <= 5:
words_count = 150
else:
words_count = int(sys.argv[5])
word_pairs = words_count*(words_count - 1)/2
ofile = open("wn_zeros_summary.txt", "w")
for tc in "path wup lch lin res jcn".split():
ofile.write(tc + ": ")
avgwn_list = []
avgdist_list = []
for corpus_type in ["tfidf", "bow","binary"]:
for topics_count in [5,10,15,20]:
dname = name.get_output_dir(corpus_type, topics_count, src)
zfile = open(dname + "/zeros_sum_" + tc + "_w" + str(words_count) + ".txt")
not_in_wn = int(zfile.readline().split(":")[1])
no_distance = int(zfile.readline().split(":")[1])
avg_wn = float(not_in_wn)/(topics_count * word_pairs)
avgwn_list.append(avg_wn)
avg_dis = float(no_distance)/(topics_count * word_pairs)
avgdist_list.append(avg_dis)
ofile.write("not in wn: " + str(np.average(avgwn_list))+ " no distance: " + str(np.average(avgdist_list))+"\n")
| 27.375 | 116 | 0.580561 | import sys
import utils.name_convention as name
import numpy as np
if len(sys.argv) <= 1:
corpus_type = "bow"
else:
if sys.argv[1] == "t":
corpus_type = "tfidf"
elif sys.argv[1] == "b":
corpus_type = "binary"
else:
corpus_type = "bow"
if len(sys.argv) <= 2:
topics_count = 3
else:
topics_count = int(sys.argv[2])
if len(sys.argv) <= 3:
src = "pp_reuters"
else:
src = sys.argv[3]
if len(sys.argv) <= 4:
tc = "path"
else:
tc = sys.argv[4]
if len(sys.argv) <= 5:
words_count = 150
else:
words_count = int(sys.argv[5])
word_pairs = words_count*(words_count - 1)/2
ofile = open("wn_zeros_summary.txt", "w")
for tc in "path wup lch lin res jcn".split():
ofile.write(tc + ": ")
avgwn_list = []
avgdist_list = []
for corpus_type in ["tfidf", "bow","binary"]:
for topics_count in [5,10,15,20]:
dname = name.get_output_dir(corpus_type, topics_count, src)
zfile = open(dname + "/zeros_sum_" + tc + "_w" + str(words_count) + ".txt")
not_in_wn = int(zfile.readline().split(":")[1])
no_distance = int(zfile.readline().split(":")[1])
avg_wn = float(not_in_wn)/(topics_count * word_pairs)
avgwn_list.append(avg_wn)
avg_dis = float(no_distance)/(topics_count * word_pairs)
avgdist_list.append(avg_dis)
ofile.write("not in wn: " + str(np.average(avgwn_list))+ " no distance: " + str(np.average(avgdist_list))+"\n")
| true | true |
1c46a1195f1820aed9bcbc13e7c2b5fa70a3462e | 2,788 | py | Python | 06-About_json/qq_geci.py | jiaxiaochu/Crawler | bb54d515dc217c27574b36124e16fd5b993775bd | [
"MIT"
] | null | null | null | 06-About_json/qq_geci.py | jiaxiaochu/Crawler | bb54d515dc217c27574b36124e16fd5b993775bd | [
"MIT"
] | 1 | 2020-08-27T10:25:38.000Z | 2020-08-27T10:25:38.000Z | 06-About_json/qq_geci.py | jiaxiaochu/Crawler | bb54d515dc217c27574b36124e16fd5b993775bd | [
"MIT"
] | null | null | null | # !/Library/Frameworks/Python.framework/Versions/3.7/bin/python3
# @blog : www.jiazhixiang.xyz
# @Author : Jiazhixiang
# -*- coding:utf-8 -*-
import requests
url_song = 'https://c.y.qq.com/soso/fcgi-bin/client_search_cp'
for x in range(1, 4):
params_song = {
'ct': '24',
'qqmusic_ver': '1298',
'new_json': '1',
'remoteplace': 'sizer.yqq.song_next',
'searchid': '64405487069162918',
't': '0',
'aggr': '1',
'cr': '1',
'catZhida': '1',
'lossless': '0',
'flag_qc': '0',
'p': x,
'n': '10',
'w': '五月天',
'g_tk': '5381',
'loginUin': '0',
'hostUin': '0',
'format': 'json',
'inCharset': 'utf8',
'outCharset': 'utf-8',
'notice': '0',
'platform': 'yqq.json',
'needNewCode': '0'
}
# 将参数封装为字典
headers_song = {
'origin': 'https://y.qq.com',
# 请求来源
'referer': 'https://y.qq.com/n/yqq/song/004Z8Ihr0JIu5s.html',
# 请求来源
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36',
# 标记了请求从什么设备,什么浏览器上发出
}
res_music = requests.get(url_song, params=params_song, headers=headers_song)
# 调用get方法,下载这个列表
json_music = res_music.json()
# 使用json()方法,将response对象,转为列表/字典
list_music = json_music['data']['song']['list']
# 一层一层地取字典,获取歌单列表
url_lyric = 'https://c.y.qq.com/lyric/fcgi-bin/fcg_query_lyric_yqq.fcg'
for music in list_music:
# print(music)
# print(music['id'])
name = music['name']
# 以name为键,查找歌曲名,把歌曲名赋值给name
params_lyric = {
'nobase64': '1',
'musicid': str(music['id']),
'-': 'jsonp1',
'g_tk': '5381',
'loginUin': '0',
'hostUin': '0',
'format': 'json',
'inCharset': 'utf8',
'outCharset': 'utf-8',
'notice': '0',
'platform': 'yqq.json',
'needNewCode': '0'
}
# print(params_lyric["musicid"])
headers_lyric = {
'origin': 'https://y.qq.com',
# 请求来源
'referer': 'https://y.qq.com/n/yqq/song/{0}.html'.format(music['mid']),
# 请求来源
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36',
# 标记了请求从什么设备,什么浏览器上发出
}
res_lyric = requests.get(url_lyric, params=params_lyric, headers=headers_lyric)
# 调用get方法,下载这个列表
json_lyric = res_lyric.json()
# 使用json()方法,将response对象,转为列表/字典
lyric = json_lyric['lyric']
# 查找播放链接,把链接赋值给link
print(name, lyric)
| 33.190476 | 142 | 0.51901 |
import requests
url_song = 'https://c.y.qq.com/soso/fcgi-bin/client_search_cp'
for x in range(1, 4):
params_song = {
'ct': '24',
'qqmusic_ver': '1298',
'new_json': '1',
'remoteplace': 'sizer.yqq.song_next',
'searchid': '64405487069162918',
't': '0',
'aggr': '1',
'cr': '1',
'catZhida': '1',
'lossless': '0',
'flag_qc': '0',
'p': x,
'n': '10',
'w': '五月天',
'g_tk': '5381',
'loginUin': '0',
'hostUin': '0',
'format': 'json',
'inCharset': 'utf8',
'outCharset': 'utf-8',
'notice': '0',
'platform': 'yqq.json',
'needNewCode': '0'
}
headers_song = {
'origin': 'https://y.qq.com',
'referer': 'https://y.qq.com/n/yqq/song/004Z8Ihr0JIu5s.html',
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36',
}
res_music = requests.get(url_song, params=params_song, headers=headers_song)
json_music = res_music.json()
list_music = json_music['data']['song']['list']
url_lyric = 'https://c.y.qq.com/lyric/fcgi-bin/fcg_query_lyric_yqq.fcg'
for music in list_music:
name = music['name']
params_lyric = {
'nobase64': '1',
'musicid': str(music['id']),
'-': 'jsonp1',
'g_tk': '5381',
'loginUin': '0',
'hostUin': '0',
'format': 'json',
'inCharset': 'utf8',
'outCharset': 'utf-8',
'notice': '0',
'platform': 'yqq.json',
'needNewCode': '0'
}
headers_lyric = {
'origin': 'https://y.qq.com',
'referer': 'https://y.qq.com/n/yqq/song/{0}.html'.format(music['mid']),
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36',
}
res_lyric = requests.get(url_lyric, params=params_lyric, headers=headers_lyric)
json_lyric = res_lyric.json()
lyric = json_lyric['lyric']
print(name, lyric)
| true | true |
1c46a149c4c1484cee731d07530a6c9bf4e29a18 | 2,633 | py | Python | dstream_excel/tracker/files.py | nickderobertis/datastream-excel-downloader-py | 3407decdf27da117758ce5ecc538d9f65c6ad5f6 | [
"MIT"
] | 1 | 2019-10-14T10:36:18.000Z | 2019-10-14T10:36:18.000Z | dstream_excel/tracker/files.py | whoopnip/datastream-excel-downloader-py | 3407decdf27da117758ce5ecc538d9f65c6ad5f6 | [
"MIT"
] | 4 | 2020-03-24T17:45:15.000Z | 2021-06-02T00:20:24.000Z | dstream_excel/tracker/files.py | whoopnip/datastream-excel-downloader-py | 3407decdf27da117758ce5ecc538d9f65c6ad5f6 | [
"MIT"
] | null | null | null | import ast
import os
import time
from dstream_excel.tracker.timing import TimeTracker
class FileProcessTracker:
def __init__(self, folder=None, restart=False, file_types=('csv',)):
if folder is None:
self.folder = os.getcwd()
else:
self.folder = os.path.abspath(folder)
self.completed_list_path = os.path.join(self.folder, 'completed.txt')
if restart:
self.delete_completed_files()
self.restart = restart
self.load_completed_files()
self.load_process_files(file_types=file_types)
def file_generator(self):
timer = TimeTracker(self.folder, restart=self.restart)
num_items = len(self.process_list)
for file in self.process_list:
yield os.path.join(self.folder, file)
self.add_file_to_completed(file)
timer.time_estimate(num_items)
def add_file_to_completed(self, file):
self.completed_list.extend([file])
_update_completed_files(self.completed_list_path, self.completed_list)
def load_completed_files(self):
self.completed_list = _load_completed_files(self.completed_list_path)
def load_process_files(self, file_types):
self.process_list = _load_to_process_files(self.folder, self.completed_list, file_types)
def delete_completed_files(self):
_delete_completed_files(self.completed_list_path)
def _load_to_process_files(folder, completed_list, file_types):
files = _load_initial_file_list(folder, file_types)
return [file for file in files if file not in completed_list]
def _update_completed_files(completed_list_path, completed_list):
_write_to_file_with_retries(completed_list_path, completed_list)
def _write_to_file_with_retries(*args, retries_remaining=10, **kwargs):
try:
with open(args[0], 'w') as f:
f.write(f'{args[1]}')
except (OSError, PermissionError):
time.sleep(.1)
_write_to_file_with_retries(*args, retries_remaining=retries_remaining-1, **kwargs)
def _load_completed_files(completed_list_path):
# Not started yet, none completed
if not os.path.exists(completed_list_path):
return []
with open(completed_list_path, 'r') as f:
completed_list = ast.literal_eval(f.read())
return completed_list
def _load_initial_file_list(folder, file_types):
return [file for file in next(os.walk(folder))[2] if any([file.endswith(ending) for ending in file_types])]
def _delete_completed_files(completed_list_path):
if os.path.exists(completed_list_path):
os.remove(completed_list_path) | 30.976471 | 111 | 0.713635 | import ast
import os
import time
from dstream_excel.tracker.timing import TimeTracker
class FileProcessTracker:
def __init__(self, folder=None, restart=False, file_types=('csv',)):
if folder is None:
self.folder = os.getcwd()
else:
self.folder = os.path.abspath(folder)
self.completed_list_path = os.path.join(self.folder, 'completed.txt')
if restart:
self.delete_completed_files()
self.restart = restart
self.load_completed_files()
self.load_process_files(file_types=file_types)
def file_generator(self):
timer = TimeTracker(self.folder, restart=self.restart)
num_items = len(self.process_list)
for file in self.process_list:
yield os.path.join(self.folder, file)
self.add_file_to_completed(file)
timer.time_estimate(num_items)
def add_file_to_completed(self, file):
self.completed_list.extend([file])
_update_completed_files(self.completed_list_path, self.completed_list)
def load_completed_files(self):
self.completed_list = _load_completed_files(self.completed_list_path)
def load_process_files(self, file_types):
self.process_list = _load_to_process_files(self.folder, self.completed_list, file_types)
def delete_completed_files(self):
_delete_completed_files(self.completed_list_path)
def _load_to_process_files(folder, completed_list, file_types):
files = _load_initial_file_list(folder, file_types)
return [file for file in files if file not in completed_list]
def _update_completed_files(completed_list_path, completed_list):
_write_to_file_with_retries(completed_list_path, completed_list)
def _write_to_file_with_retries(*args, retries_remaining=10, **kwargs):
try:
with open(args[0], 'w') as f:
f.write(f'{args[1]}')
except (OSError, PermissionError):
time.sleep(.1)
_write_to_file_with_retries(*args, retries_remaining=retries_remaining-1, **kwargs)
def _load_completed_files(completed_list_path):
if not os.path.exists(completed_list_path):
return []
with open(completed_list_path, 'r') as f:
completed_list = ast.literal_eval(f.read())
return completed_list
def _load_initial_file_list(folder, file_types):
return [file for file in next(os.walk(folder))[2] if any([file.endswith(ending) for ending in file_types])]
def _delete_completed_files(completed_list_path):
if os.path.exists(completed_list_path):
os.remove(completed_list_path) | true | true |
1c46a17bd0d5d84ec22deeb07d1811a1fdd110c1 | 37,607 | py | Python | msgraph-cli-extensions/v1_0/notes_v1_0/azext_notes_v1_0/vendored_sdks/notes/aio/operations/_sites_onenote_sections_parent_section_group_parent_notebook_operations.py | thewahome/msgraph-cli | 33127d9efa23a0e5f5303c93242fbdbb73348671 | [
"MIT"
] | null | null | null | msgraph-cli-extensions/v1_0/notes_v1_0/azext_notes_v1_0/vendored_sdks/notes/aio/operations/_sites_onenote_sections_parent_section_group_parent_notebook_operations.py | thewahome/msgraph-cli | 33127d9efa23a0e5f5303c93242fbdbb73348671 | [
"MIT"
] | 22 | 2022-03-29T22:54:37.000Z | 2022-03-29T22:55:27.000Z | msgraph-cli-extensions/v1_0/notes_v1_0/azext_notes_v1_0/vendored_sdks/notes/aio/operations/_sites_onenote_sections_parent_section_group_parent_notebook_operations.py | thewahome/msgraph-cli | 33127d9efa23a0e5f5303c93242fbdbb73348671 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, List, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SitesOnenoteSectionsParentSectionGroupParentNotebookOperations:
"""SitesOnenoteSectionsParentSectionGroupParentNotebookOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~notes.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_section_groups(
self,
site_id: str,
onenote_section_id: str,
orderby: Optional[List[Union[str, "models.Enum708"]]] = None,
select: Optional[List[Union[str, "models.Enum709"]]] = None,
expand: Optional[List[Union[str, "models.Enum710"]]] = None,
**kwargs
) -> AsyncIterable["models.CollectionOfSectionGroup39"]:
"""Get sectionGroups from sites.
Get sectionGroups from sites.
:param site_id: key: id of site.
:type site_id: str
:param onenote_section_id: key: id of onenoteSection.
:type onenote_section_id: str
:param orderby: Order items by property values.
:type orderby: list[str or ~notes.models.Enum708]
:param select: Select properties to be returned.
:type select: list[str or ~notes.models.Enum709]
:param expand: Expand related entities.
:type expand: list[str or ~notes.models.Enum710]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfSectionGroup39 or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~notes.models.CollectionOfSectionGroup39]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfSectionGroup39"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_section_groups.metadata['url'] # type: ignore
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfSectionGroup39', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_section_groups.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sectionGroups'} # type: ignore
async def create_section_groups(
self,
site_id: str,
onenote_section_id: str,
body: "models.MicrosoftGraphSectionGroup",
**kwargs
) -> "models.MicrosoftGraphSectionGroup":
"""Create new navigation property to sectionGroups for sites.
Create new navigation property to sectionGroups for sites.
:param site_id: key: id of site.
:type site_id: str
:param onenote_section_id: key: id of onenoteSection.
:type onenote_section_id: str
:param body: New navigation property.
:type body: ~notes.models.MicrosoftGraphSectionGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphSectionGroup, or the result of cls(response)
:rtype: ~notes.models.MicrosoftGraphSectionGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphSectionGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_section_groups.metadata['url'] # type: ignore
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphSectionGroup')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphSectionGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_section_groups.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sectionGroups'} # type: ignore
async def get_section_groups(
self,
site_id: str,
onenote_section_id: str,
section_group_id: str,
select: Optional[List[Union[str, "models.Enum711"]]] = None,
expand: Optional[List[Union[str, "models.Enum712"]]] = None,
**kwargs
) -> "models.MicrosoftGraphSectionGroup":
"""Get sectionGroups from sites.
Get sectionGroups from sites.
:param site_id: key: id of site.
:type site_id: str
:param onenote_section_id: key: id of onenoteSection.
:type onenote_section_id: str
:param section_group_id: key: id of sectionGroup.
:type section_group_id: str
:param select: Select properties to be returned.
:type select: list[str or ~notes.models.Enum711]
:param expand: Expand related entities.
:type expand: list[str or ~notes.models.Enum712]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphSectionGroup, or the result of cls(response)
:rtype: ~notes.models.MicrosoftGraphSectionGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphSectionGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_section_groups.metadata['url'] # type: ignore
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
'sectionGroup-id': self._serialize.url("section_group_id", section_group_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphSectionGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_section_groups.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sectionGroups/{sectionGroup-id}'} # type: ignore
async def update_section_groups(
self,
site_id: str,
onenote_section_id: str,
section_group_id: str,
body: "models.MicrosoftGraphSectionGroup",
**kwargs
) -> None:
"""Update the navigation property sectionGroups in sites.
Update the navigation property sectionGroups in sites.
:param site_id: key: id of site.
:type site_id: str
:param onenote_section_id: key: id of onenoteSection.
:type onenote_section_id: str
:param section_group_id: key: id of sectionGroup.
:type section_group_id: str
:param body: New navigation property values.
:type body: ~notes.models.MicrosoftGraphSectionGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_section_groups.metadata['url'] # type: ignore
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
'sectionGroup-id': self._serialize.url("section_group_id", section_group_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphSectionGroup')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_section_groups.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sectionGroups/{sectionGroup-id}'} # type: ignore
async def delete_section_groups(
self,
site_id: str,
onenote_section_id: str,
section_group_id: str,
if_match: Optional[str] = None,
**kwargs
) -> None:
"""Delete navigation property sectionGroups for sites.
Delete navigation property sectionGroups for sites.
:param site_id: key: id of site.
:type site_id: str
:param onenote_section_id: key: id of onenoteSection.
:type onenote_section_id: str
:param section_group_id: key: id of sectionGroup.
:type section_group_id: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_section_groups.metadata['url'] # type: ignore
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
'sectionGroup-id': self._serialize.url("section_group_id", section_group_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_section_groups.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sectionGroups/{sectionGroup-id}'} # type: ignore
def list_sections(
self,
site_id: str,
onenote_section_id: str,
orderby: Optional[List[Union[str, "models.Enum713"]]] = None,
select: Optional[List[Union[str, "models.Enum714"]]] = None,
expand: Optional[List[Union[str, "models.Enum715"]]] = None,
**kwargs
) -> AsyncIterable["models.CollectionOfOnenoteSection39"]:
"""Get sections from sites.
Get sections from sites.
:param site_id: key: id of site.
:type site_id: str
:param onenote_section_id: key: id of onenoteSection.
:type onenote_section_id: str
:param orderby: Order items by property values.
:type orderby: list[str or ~notes.models.Enum713]
:param select: Select properties to be returned.
:type select: list[str or ~notes.models.Enum714]
:param expand: Expand related entities.
:type expand: list[str or ~notes.models.Enum715]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfOnenoteSection39 or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~notes.models.CollectionOfOnenoteSection39]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfOnenoteSection39"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_sections.metadata['url'] # type: ignore
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfOnenoteSection39', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_sections.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sections'} # type: ignore
async def create_sections(
self,
site_id: str,
onenote_section_id: str,
body: "models.MicrosoftGraphOnenoteSection",
**kwargs
) -> "models.MicrosoftGraphOnenoteSection":
"""Create new navigation property to sections for sites.
Create new navigation property to sections for sites.
:param site_id: key: id of site.
:type site_id: str
:param onenote_section_id: key: id of onenoteSection.
:type onenote_section_id: str
:param body: New navigation property.
:type body: ~notes.models.MicrosoftGraphOnenoteSection
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphOnenoteSection, or the result of cls(response)
:rtype: ~notes.models.MicrosoftGraphOnenoteSection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphOnenoteSection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_sections.metadata['url'] # type: ignore
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphOnenoteSection')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphOnenoteSection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_sections.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sections'} # type: ignore
async def get_sections(
self,
site_id: str,
onenote_section_id: str,
onenote_section_id1: str,
select: Optional[List[Union[str, "models.Enum716"]]] = None,
expand: Optional[List[Union[str, "models.Enum717"]]] = None,
**kwargs
) -> "models.MicrosoftGraphOnenoteSection":
"""Get sections from sites.
Get sections from sites.
:param site_id: key: id of site.
:type site_id: str
:param onenote_section_id: key: id of onenoteSection.
:type onenote_section_id: str
:param onenote_section_id1: key: id of onenoteSection.
:type onenote_section_id1: str
:param select: Select properties to be returned.
:type select: list[str or ~notes.models.Enum716]
:param expand: Expand related entities.
:type expand: list[str or ~notes.models.Enum717]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphOnenoteSection, or the result of cls(response)
:rtype: ~notes.models.MicrosoftGraphOnenoteSection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphOnenoteSection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_sections.metadata['url'] # type: ignore
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
'onenoteSection-id1': self._serialize.url("onenote_section_id1", onenote_section_id1, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphOnenoteSection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_sections.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sections/{onenoteSection-id1}'} # type: ignore
async def update_sections(
self,
site_id: str,
onenote_section_id: str,
onenote_section_id1: str,
body: "models.MicrosoftGraphOnenoteSection",
**kwargs
) -> None:
"""Update the navigation property sections in sites.
Update the navigation property sections in sites.
:param site_id: key: id of site.
:type site_id: str
:param onenote_section_id: key: id of onenoteSection.
:type onenote_section_id: str
:param onenote_section_id1: key: id of onenoteSection.
:type onenote_section_id1: str
:param body: New navigation property values.
:type body: ~notes.models.MicrosoftGraphOnenoteSection
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_sections.metadata['url'] # type: ignore
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
'onenoteSection-id1': self._serialize.url("onenote_section_id1", onenote_section_id1, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphOnenoteSection')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_sections.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sections/{onenoteSection-id1}'} # type: ignore
async def delete_sections(
self,
site_id: str,
onenote_section_id: str,
onenote_section_id1: str,
if_match: Optional[str] = None,
**kwargs
) -> None:
"""Delete navigation property sections for sites.
Delete navigation property sections for sites.
:param site_id: key: id of site.
:type site_id: str
:param onenote_section_id: key: id of onenoteSection.
:type onenote_section_id: str
:param onenote_section_id1: key: id of onenoteSection.
:type onenote_section_id1: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_sections.metadata['url'] # type: ignore
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
'onenoteSection-id1': self._serialize.url("onenote_section_id1", onenote_section_id1, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_sections.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sections/{onenoteSection-id1}'} # type: ignore
| 48.275995 | 183 | 0.653416 |
from typing import Any, AsyncIterable, Callable, Dict, Generic, List, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SitesOnenoteSectionsParentSectionGroupParentNotebookOperations:
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_section_groups(
self,
site_id: str,
onenote_section_id: str,
orderby: Optional[List[Union[str, "models.Enum708"]]] = None,
select: Optional[List[Union[str, "models.Enum709"]]] = None,
expand: Optional[List[Union[str, "models.Enum710"]]] = None,
**kwargs
) -> AsyncIterable["models.CollectionOfSectionGroup39"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list_section_groups.metadata['url']
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfSectionGroup39', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_section_groups.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sectionGroups'}
async def create_section_groups(
self,
site_id: str,
onenote_section_id: str,
body: "models.MicrosoftGraphSectionGroup",
**kwargs
) -> "models.MicrosoftGraphSectionGroup":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self.create_section_groups.metadata['url']
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(body, 'MicrosoftGraphSectionGroup')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphSectionGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_section_groups.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sectionGroups'}
async def get_section_groups(
self,
site_id: str,
onenote_section_id: str,
section_group_id: str,
select: Optional[List[Union[str, "models.Enum711"]]] = None,
expand: Optional[List[Union[str, "models.Enum712"]]] = None,
**kwargs
) -> "models.MicrosoftGraphSectionGroup":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
url = self.get_section_groups.metadata['url']
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
'sectionGroup-id': self._serialize.url("section_group_id", section_group_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphSectionGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_section_groups.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sectionGroups/{sectionGroup-id}'}
async def update_section_groups(
self,
site_id: str,
onenote_section_id: str,
section_group_id: str,
body: "models.MicrosoftGraphSectionGroup",
**kwargs
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self.update_section_groups.metadata['url']
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
'sectionGroup-id': self._serialize.url("section_group_id", section_group_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(body, 'MicrosoftGraphSectionGroup')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_section_groups.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sectionGroups/{sectionGroup-id}'}
async def delete_section_groups(
self,
site_id: str,
onenote_section_id: str,
section_group_id: str,
if_match: Optional[str] = None,
**kwargs
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
url = self.delete_section_groups.metadata['url']
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
'sectionGroup-id': self._serialize.url("section_group_id", section_group_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
header_parameters = {}
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_section_groups.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sectionGroups/{sectionGroup-id}'}
def list_sections(
self,
site_id: str,
onenote_section_id: str,
orderby: Optional[List[Union[str, "models.Enum713"]]] = None,
select: Optional[List[Union[str, "models.Enum714"]]] = None,
expand: Optional[List[Union[str, "models.Enum715"]]] = None,
**kwargs
) -> AsyncIterable["models.CollectionOfOnenoteSection39"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list_sections.metadata['url']
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfOnenoteSection39', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_sections.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sections'}
async def create_sections(
self,
site_id: str,
onenote_section_id: str,
body: "models.MicrosoftGraphOnenoteSection",
**kwargs
) -> "models.MicrosoftGraphOnenoteSection":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self.create_sections.metadata['url']
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(body, 'MicrosoftGraphOnenoteSection')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphOnenoteSection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_sections.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sections'}
async def get_sections(
self,
site_id: str,
onenote_section_id: str,
onenote_section_id1: str,
select: Optional[List[Union[str, "models.Enum716"]]] = None,
expand: Optional[List[Union[str, "models.Enum717"]]] = None,
**kwargs
) -> "models.MicrosoftGraphOnenoteSection":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
url = self.get_sections.metadata['url']
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
'onenoteSection-id1': self._serialize.url("onenote_section_id1", onenote_section_id1, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphOnenoteSection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_sections.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sections/{onenoteSection-id1}'}
async def update_sections(
self,
site_id: str,
onenote_section_id: str,
onenote_section_id1: str,
body: "models.MicrosoftGraphOnenoteSection",
**kwargs
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self.update_sections.metadata['url']
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
'onenoteSection-id1': self._serialize.url("onenote_section_id1", onenote_section_id1, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(body, 'MicrosoftGraphOnenoteSection')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_sections.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sections/{onenoteSection-id1}'}
async def delete_sections(
self,
site_id: str,
onenote_section_id: str,
onenote_section_id1: str,
if_match: Optional[str] = None,
**kwargs
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
url = self.delete_sections.metadata['url']
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
'onenoteSection-id1': self._serialize.url("onenote_section_id1", onenote_section_id1, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
header_parameters = {}
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_sections.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sections/{onenoteSection-id1}'}
| true | true |
1c46a26fe64a116c11f1bc95d480deebd0f970c0 | 5,517 | py | Python | docs/source/conf.py | zeromake/restful-model | f2bed56a2aa23ade4a7882296c41222a64dc24f2 | [
"MIT"
] | 8 | 2018-08-09T10:03:53.000Z | 2020-03-03T11:02:11.000Z | docs/source/conf.py | zeromake/restful-model | f2bed56a2aa23ade4a7882296c41222a64dc24f2 | [
"MIT"
] | null | null | null | docs/source/conf.py | zeromake/restful-model | f2bed56a2aa23ade4a7882296c41222a64dc24f2 | [
"MIT"
] | 2 | 2019-08-12T20:53:46.000Z | 2021-11-04T06:01:23.000Z | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'restful_model'
copyright = '2018, zeromake <a390720046@gmail.com>'
author = 'zeromake <a390720046@gmail.com>'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.imgmath',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'zh'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'restful_modeldoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'restful_model.tex', 'restful\\_model Documentation',
'zeromake \\textless{}a390720046@gmail.com\\textgreater{}', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'restful_model', 'restful_model Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'restful_model', 'restful_model Documentation',
author, 'restful_model', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| 31.169492 | 79 | 0.64691 |
project = 'restful_model'
copyright = '2018, zeromake <a390720046@gmail.com>'
author = 'zeromake <a390720046@gmail.com>'
version = ''
release = ''
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.imgmath',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
language = 'zh'
exclude_patterns = []
pygments_style = 'sphinx'
html_theme = 'alabaster'
html_static_path = ['_static']
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'restful_modeldoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'restful_model.tex', 'restful\\_model Documentation',
'zeromake \\textless{}a390720046@gmail.com\\textgreater{}', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'restful_model', 'restful_model Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'restful_model', 'restful_model Documentation',
author, 'restful_model', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| true | true |
1c46a40c89f7ba576f27e1b22089cb2513eaa2c5 | 31,354 | py | Python | usaspending_api/spending_explorer/tests/integration/test_spending_explorer.py | g4brielvs/usaspending-api | bae7da2c204937ec1cdf75c052405b13145728d5 | [
"CC0-1.0"
] | null | null | null | usaspending_api/spending_explorer/tests/integration/test_spending_explorer.py | g4brielvs/usaspending-api | bae7da2c204937ec1cdf75c052405b13145728d5 | [
"CC0-1.0"
] | null | null | null | usaspending_api/spending_explorer/tests/integration/test_spending_explorer.py | g4brielvs/usaspending-api | bae7da2c204937ec1cdf75c052405b13145728d5 | [
"CC0-1.0"
] | null | null | null | import copy
import json
import pytest
from datetime import datetime, timezone
from model_mommy import mommy
from rest_framework import status
from usaspending_api.awards.models import FinancialAccountsByAwards
from usaspending_api.financial_activities.models import (
FinancialAccountsByProgramActivityObjectClass,
SubmissionAttributes,
TreasuryAppropriationAccount,
)
from usaspending_api.accounts.models import FederalAccount
from usaspending_api.references.models import Agency, GTASSF133Balances, ToptierAgency, ObjectClass
from usaspending_api.submissions.models import DABSSubmissionWindowSchedule
ENDPOINT_URL = "/api/v2/spending/"
CONTENT_TYPE = "application/json"
GLOBAL_MOCK_DICT = [
{
"model": DABSSubmissionWindowSchedule,
"id": 1600031,
"period_end_date": datetime(1599, 12, 31, tzinfo=timezone.utc),
"submission_fiscal_year": 1600,
"submission_fiscal_quarter": 1,
"submission_fiscal_month": 3,
"submission_reveal_date": datetime(1600, 1, 28, tzinfo=timezone.utc),
"is_quarter": True,
},
{"model": ObjectClass, "id": 1},
{"model": GTASSF133Balances, "fiscal_year": 1600, "fiscal_period": 3, "obligations_incurred_total_cpe": -10},
{
"model": SubmissionAttributes,
"submission_id": -1,
"reporting_fiscal_year": 1600,
"reporting_fiscal_period": 3,
"reporting_fiscal_quarter": 1,
},
{
"model": ToptierAgency,
"toptier_agency_id": -1,
"name": "random_funding_name_1",
"toptier_code": "random_funding_code_1",
},
{
"model": ToptierAgency,
"toptier_agency_id": -2,
"name": "random_funding_name_2",
"toptier_code": "random_funding_code_2",
},
{"model": Agency, "toptier_agency_id": -1, "toptier_flag": True},
{"model": Agency, "toptier_agency_id": -2, "toptier_flag": True},
{
"model": TreasuryAppropriationAccount,
"treasury_account_identifier": -1,
"funding_toptier_agency_id": -1,
"federal_account_id": 1,
},
{
"model": FederalAccount,
"id": 1,
"account_title": "Tommy Two-Tone",
"agency_identifier": "867",
"main_account_code": "5309",
"federal_account_code": "867-5309",
},
{
"model": TreasuryAppropriationAccount,
"treasury_account_identifier": -2,
"funding_toptier_agency_id": -2,
"federal_account_id": 1,
},
{
"model": FinancialAccountsByProgramActivityObjectClass,
"financial_accounts_by_program_activity_object_class_id": -1,
"submission_id": -1,
"treasury_account_id": -1,
"obligations_incurred_by_program_object_class_cpe": -5,
},
{
"model": FinancialAccountsByProgramActivityObjectClass,
"financial_accounts_by_program_activity_object_class_id": -2,
"submission_id": -1,
"treasury_account_id": -1,
"obligations_incurred_by_program_object_class_cpe": -10,
},
{
"model": FinancialAccountsByProgramActivityObjectClass,
"financial_accounts_by_program_activity_object_class_id": -3,
"submission_id": -1,
"treasury_account_id": -2,
"obligations_incurred_by_program_object_class_cpe": -1,
"object_class_id": 1,
},
]
@pytest.mark.django_db
def test_unreported_data_actual_value_file_b(client):
models = copy.deepcopy(GLOBAL_MOCK_DICT)
for entry in models:
mommy.make(entry.pop("model"), **entry)
json_request = {"type": "agency", "filters": {"fy": "1600", "quarter": "1"}}
response = client.post(path=ENDPOINT_URL, content_type=CONTENT_TYPE, data=json.dumps(json_request))
assert response.status_code == status.HTTP_200_OK
json_response = response.json()
expected_results = {
"total": -10,
"agencies": ["Unreported Data", "random_funding_name_2", "random_funding_name_1"],
"amounts": [6, -1, -15],
}
actual_results = {
"total": json_response["total"],
"agencies": [entry["name"] for entry in json_response["results"]],
"amounts": [entry["amount"] for entry in json_response["results"]],
}
assert expected_results == actual_results
@pytest.mark.django_db
def test_unreported_data_actual_value_file_c(client):
models_to_mock = [
{
"model": DABSSubmissionWindowSchedule,
"id": 1600031,
"period_end_date": datetime(1599, 12, 31, tzinfo=timezone.utc),
"submission_fiscal_year": 1600,
"submission_fiscal_quarter": 1,
"submission_fiscal_month": 3,
"submission_reveal_date": datetime(1600, 1, 28, tzinfo=timezone.utc),
"is_quarter": True,
},
{"model": GTASSF133Balances, "fiscal_year": 1600, "fiscal_period": 3, "obligations_incurred_total_cpe": -10},
{
"model": SubmissionAttributes,
"submission_id": -1,
"reporting_fiscal_year": 1600,
"reporting_fiscal_quarter": 1,
"reporting_fiscal_period": 3,
},
{
"model": ToptierAgency,
"toptier_agency_id": -1,
"name": "random_funding_name_1",
"toptier_code": "random_funding_code_1",
},
{
"model": ToptierAgency,
"toptier_agency_id": -2,
"name": "random_funding_name_2",
"toptier_code": "random_funding_code_2",
},
{"model": Agency, "id": -1, "toptier_agency_id": -1, "toptier_flag": True},
{"model": Agency, "id": -2, "toptier_agency_id": -2, "toptier_flag": True},
{"model": TreasuryAppropriationAccount, "treasury_account_identifier": -1, "funding_toptier_agency_id": -1},
{"model": TreasuryAppropriationAccount, "treasury_account_identifier": -2, "funding_toptier_agency_id": -2},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -1,
"submission_id": -1,
"award__latest_transaction__assistance_data__awardee_or_recipient_legal": "random_recipient_name_1",
"treasury_account_id": -1,
"transaction_obligated_amount": -2,
},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -2,
"submission_id": -1,
"award__latest_transaction__assistance_data__awardee_or_recipient_legal": "random_recipient_name_2",
"treasury_account_id": -1,
"transaction_obligated_amount": -3,
},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -3,
"submission_id": -1,
"award__latest_transaction__assistance_data__awardee_or_recipient_legal": "random_recipient_name_1",
"treasury_account_id": -2,
"transaction_obligated_amount": -5,
},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -4,
"submission_id": -1,
"award__latest_transaction__contract_data__awardee_or_recipient_legal": "random_recipient_name_1",
"treasury_account_id": -1,
"transaction_obligated_amount": -7,
},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -5,
"submission_id": -1,
"award__latest_transaction__contract_data__awardee_or_recipient_legal": "random_recipient_name_4",
"treasury_account_id": -2,
"transaction_obligated_amount": -11,
},
]
for entry in models_to_mock:
mommy.make(entry.pop("model"), **entry)
json_request = {"type": "recipient", "filters": {"agency": "-1", "fy": "1600", "quarter": "1"}}
response = client.post(path=ENDPOINT_URL, content_type=CONTENT_TYPE, data=json.dumps(json_request))
assert response.status_code == status.HTTP_200_OK
json_response = response.json()
expected_results = {
"total": -12,
"agencies": ["random_recipient_name_2", "random_recipient_name_1"],
"amounts": [-3, -9],
}
actual_results = {
"total": json_response["total"],
"agencies": [entry["name"] for entry in json_response["results"]],
"amounts": [entry["amount"] for entry in json_response["results"]],
}
assert expected_results == actual_results
@pytest.mark.django_db
def test_unreported_data_no_data_available(client):
json_request = {"type": "agency", "filters": {"fy": "1700", "quarter": "1"}}
response = client.post(path=ENDPOINT_URL, content_type=CONTENT_TYPE, data=json.dumps(json_request))
assert response.status_code == status.HTTP_200_OK
json_response = response.json()
expected_results = {"total": None}
actual_results = {"total": json_response["total"]}
assert expected_results == actual_results
@pytest.mark.django_db
def test_federal_account_linkage(client):
models = copy.deepcopy(GLOBAL_MOCK_DICT)
for entry in models:
mommy.make(entry.pop("model"), **entry)
json_request = {"type": "federal_account", "filters": {"fy": "1600", "quarter": "1"}}
response = client.post(path=ENDPOINT_URL, content_type=CONTENT_TYPE, data=json.dumps(json_request))
json_response = response.json()
assert json_response["results"][0]["account_number"] == "867-5309"
@pytest.mark.django_db
def test_budget_function_filter_success(client):
# Test for Budget Function Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "budget_function", "filters": {"fy": "2017", "quarter": 1}}),
)
assert resp.status_code == status.HTTP_200_OK
# Test for Budget Sub Function Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "federal_account", "filters": {"fy": "2017", "quarter": 1, "budget_function": "050"}}),
)
assert resp.status_code == status.HTTP_200_OK
# Test for Federal Account Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "federal_account",
"filters": {"fy": "2017", "quarter": 1, "budget_function": "050", "budget_subfunction": "053"},
}
),
)
assert resp.status_code == status.HTTP_200_OK
# Test for Program Activity Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "program_activity",
"filters": {
"fy": "2017",
"quarter": 1,
"budget_function": "050",
"budget_subfunction": "053",
"federal_account": 2715,
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
# Test for Object Class Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "object_class",
"filters": {
"fy": "2017",
"quarter": 1,
"budget_function": "050",
"budget_subfunction": "053",
"federal_account": 2715,
"program_activity": 17863,
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
# Test for Recipient Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "recipient",
"filters": {
"fy": "2017",
"quarter": 1,
"budget_function": "050",
"budget_subfunction": "053",
"federal_account": 2715,
"program_activity": 17863,
"object_class": "20",
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
# Test for Award Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "award",
"filters": {
"fy": "2017",
"quarter": 1,
"budget_function": "050",
"budget_subfunction": "053",
"federal_account": 2715,
"program_activity": 17863,
"object_class": "20",
"recipient": 13916,
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
@pytest.mark.django_db
def test_budget_function_failure(client):
"""Verify error on bad autocomplete request for budget function."""
resp = client.post("/api/v2/search/spending_over_time/", content_type="application/json", data=json.dumps({}))
assert resp.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
@pytest.mark.django_db
def test_object_class_filter_success(client):
# Test for Object Class Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "object_class", "filters": {"fy": "2017", "quarter": 1}}),
)
assert resp.status_code == status.HTTP_200_OK
# Test for Agency Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "agency", "filters": {"fy": "2017", "quarter": 1, "object_class": "20"}}),
)
assert resp.status_code == status.HTTP_200_OK
# Test for Federal Account Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "federal_account", "filters": {"fy": "2017", "quarter": 1, "object_class": "20"}}),
)
assert resp.status_code == status.HTTP_200_OK
# Test for Program Activity Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "program_activity",
"filters": {"fy": "2017", "quarter": 1, "object_class": "20", "federal_account": 2358},
}
),
)
assert resp.status_code == status.HTTP_200_OK
# Test for Recipient Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "recipient",
"filters": {
"fy": "2017",
"quarter": 1,
"object_class": "20",
"federal_account": 2358,
"program_activity": 15103,
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
# Test for Award Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "award",
"filters": {
"fy": "2017",
"quarter": 1,
"object_class": "20",
"federal_account": 2358,
"program_activity": 15103,
"recipient": 301773,
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
@pytest.mark.django_db
def test_object_class_failure(client):
"""Verify error on bad autocomplete request for budget function."""
resp = client.post("/api/v2/search/spending_over_time/", content_type="application/json", data=json.dumps({}))
assert resp.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
@pytest.mark.django_db
def test_agency_filter_success(client):
# Test for Object Class Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "agency", "filters": {"fy": "2017", "quarter": 1}}),
)
assert resp.status_code == status.HTTP_200_OK
# Test for Object Class Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "agency", "filters": {"fy": "2017", "quarter": "3"}}),
)
assert resp.status_code == status.HTTP_200_OK
# Test for Agency Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "federal_account", "filters": {"fy": "2017", "quarter": 1}}),
)
assert resp.status_code == status.HTTP_200_OK
# Test for Federal Account Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "program_activity", "filters": {"fy": "2017", "quarter": 1, "federal_account": 1500}}),
)
assert resp.status_code == status.HTTP_200_OK
# Test for Program Activity Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "object_class",
"filters": {"fy": "2017", "quarter": 1, "federal_account": 1500, "program_activity": 12697},
}
),
)
assert resp.status_code == status.HTTP_200_OK
# Test for Recipient Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "recipient",
"filters": {
"fy": "2017",
"quarter": 1,
"federal_account": 1500,
"program_activity": 12697,
"object_class": "40",
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
# Test for Award Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "award",
"filters": {
"fy": "2017",
"quarter": 1,
"federal_account": 1500,
"program_activity": 12697,
"object_class": "40",
"recipient": 792917,
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
@pytest.mark.django_db
def test_agency_failure(client):
"""Verify error on bad autocomplete request for budget function."""
resp = client.post("/api/v2/search/spending_over_time/", content_type="application/json", data=json.dumps({}))
assert resp.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
# Test for Object Class Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "agency", "filters": {"fy": "23", "quarter": "3"}}),
)
assert resp.status_code == status.HTTP_400_BAD_REQUEST
@pytest.mark.django_db
def test_object_budget_match(client):
models = copy.deepcopy(GLOBAL_MOCK_DICT)
for entry in models:
mommy.make(entry.pop("model"), **entry)
mommy.make(
FinancialAccountsByProgramActivityObjectClass,
**{
"financial_accounts_by_program_activity_object_class_id": -4,
"submission_id": -1,
"treasury_account_id": -1,
"obligations_incurred_by_program_object_class_cpe": -5,
"object_class_id": 1,
},
)
json_request = {"type": "budget_function", "filters": {"fy": "1600", "quarter": "1"}}
response = client.post(path=ENDPOINT_URL, content_type=CONTENT_TYPE, data=json.dumps(json_request))
assert response.status_code == status.HTTP_200_OK
json_response_1 = response.json()
json_request = {"type": "object_class", "filters": {"fy": "1600", "quarter": "1"}}
response = client.post(path=ENDPOINT_URL, content_type=CONTENT_TYPE, data=json.dumps(json_request))
json_response_2 = response.json()
assert json_response_1["results"][0]["amount"] == json_response_2["results"][0]["amount"]
@pytest.mark.django_db
def test_period(client):
# Test for Object Class Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "agency", "filters": {"fy": "2017", "quarter": 1}}),
)
assert resp.status_code == status.HTTP_200_OK
resp2 = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "agency", "filters": {"fy": "2017", "period": 3}}),
)
assert resp2.status_code == status.HTTP_200_OK
assert resp.json() == resp2.json()
# Test for Object Class Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "agency", "filters": {"fy": "2017", "quarter": "3"}}),
)
assert resp.status_code == status.HTTP_200_OK
resp2 = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "agency", "filters": {"fy": "2017", "period": "9"}}),
)
assert resp2.status_code == status.HTTP_200_OK
assert resp.json() == resp2.json()
# Test for Agency Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "federal_account", "filters": {"fy": "2017", "quarter": 1}}),
)
assert resp.status_code == status.HTTP_200_OK
resp2 = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "federal_account", "filters": {"fy": "2017", "period": 3}}),
)
assert resp2.status_code == status.HTTP_200_OK
assert resp.json() == resp2.json()
# Test for Federal Account Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "program_activity", "filters": {"fy": "2017", "quarter": 1, "federal_account": 1500}}),
)
assert resp.status_code == status.HTTP_200_OK
resp2 = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "program_activity", "filters": {"fy": "2017", "period": 3, "federal_account": 1500}}),
)
assert resp2.status_code == status.HTTP_200_OK
assert resp.json() == resp2.json()
# Test for Program Activity Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "object_class",
"filters": {"fy": "2017", "quarter": 1, "federal_account": 1500, "program_activity": 12697},
}
),
)
assert resp.status_code == status.HTTP_200_OK
resp2 = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "object_class",
"filters": {"fy": "2017", "period": 3, "federal_account": 1500, "program_activity": 12697},
}
),
)
assert resp2.status_code == status.HTTP_200_OK
assert resp.json() == resp2.json()
# Test for Recipient Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "recipient",
"filters": {
"fy": "2017",
"quarter": 1,
"federal_account": 1500,
"program_activity": 12697,
"object_class": "40",
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
resp2 = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "recipient",
"filters": {
"fy": "2017",
"period": 3,
"federal_account": 1500,
"program_activity": 12697,
"object_class": "40",
},
}
),
)
assert resp2.status_code == status.HTTP_200_OK
assert resp.json() == resp2.json()
# Test for Award Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "award",
"filters": {
"fy": "2017",
"quarter": 1,
"federal_account": 1500,
"program_activity": 12697,
"object_class": "40",
"recipient": 792917,
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
resp2 = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "award",
"filters": {
"fy": "2017",
"period": 3,
"federal_account": 1500,
"program_activity": 12697,
"object_class": "40",
"recipient": 792917,
},
}
),
)
assert resp2.status_code == status.HTTP_200_OK
assert resp.json() == resp2.json()
@pytest.mark.django_db
def test_unreported_file_c(client):
models_to_mock = [
{
"model": DABSSubmissionWindowSchedule,
"id": 1600031,
"period_end_date": datetime(1599, 12, 31, tzinfo=timezone.utc),
"submission_fiscal_year": 1600,
"submission_fiscal_quarter": 1,
"submission_fiscal_month": 3,
"submission_reveal_date": datetime(1600, 1, 28, tzinfo=timezone.utc),
"is_quarter": True,
},
{"model": GTASSF133Balances, "fiscal_year": 1600, "fiscal_period": 3, "obligations_incurred_total_cpe": -10},
{
"model": SubmissionAttributes,
"submission_id": -1,
"reporting_fiscal_year": 1600,
"reporting_fiscal_quarter": 1,
"reporting_fiscal_period": 3,
},
{
"model": ToptierAgency,
"toptier_agency_id": -1,
"name": "random_funding_name_1",
"toptier_code": "random_funding_code_1",
},
{
"model": ToptierAgency,
"toptier_agency_id": -2,
"name": "random_funding_name_2",
"toptier_code": "random_funding_code_2",
},
{"model": Agency, "id": -1, "toptier_agency_id": -1, "toptier_flag": True},
{"model": Agency, "id": -2, "toptier_agency_id": -2, "toptier_flag": True},
{"model": TreasuryAppropriationAccount, "treasury_account_identifier": -1, "funding_toptier_agency_id": -1},
{"model": TreasuryAppropriationAccount, "treasury_account_identifier": -2, "funding_toptier_agency_id": -2},
{
"model": FinancialAccountsByProgramActivityObjectClass,
"financial_accounts_by_program_activity_object_class_id": -1,
"submission_id": -1,
"treasury_account_id": -1,
"obligations_incurred_by_program_object_class_cpe": -5,
},
{
"model": FinancialAccountsByProgramActivityObjectClass,
"financial_accounts_by_program_activity_object_class_id": -2,
"submission_id": -1,
"treasury_account_id": -1,
"obligations_incurred_by_program_object_class_cpe": -5,
},
{
"model": FinancialAccountsByProgramActivityObjectClass,
"financial_accounts_by_program_activity_object_class_id": -3,
"submission_id": -1,
"treasury_account_id": -1,
"obligations_incurred_by_program_object_class_cpe": -5,
},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -1,
"submission_id": -1,
"award__latest_transaction__assistance_data__awardee_or_recipient_legal": "random_recipient_name_1",
"treasury_account_id": -1,
"transaction_obligated_amount": -2,
},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -2,
"submission_id": -1,
"award__latest_transaction__assistance_data__awardee_or_recipient_legal": "random_recipient_name_2",
"treasury_account_id": -1,
"transaction_obligated_amount": -3,
},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -3,
"submission_id": -1,
"award__latest_transaction__assistance_data__awardee_or_recipient_legal": "random_recipient_name_1",
"treasury_account_id": -2,
"transaction_obligated_amount": -5,
},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -4,
"submission_id": -1,
"award__latest_transaction__contract_data__awardee_or_recipient_legal": "random_recipient_name_1",
"treasury_account_id": -1,
"transaction_obligated_amount": -7,
},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -5,
"submission_id": -1,
"award__latest_transaction__contract_data__awardee_or_recipient_legal": "random_recipient_name_4",
"treasury_account_id": -2,
"transaction_obligated_amount": -11,
},
]
for entry in models_to_mock:
mommy.make(entry.pop("model"), **entry)
json_request = {"type": "recipient", "filters": {"agency": "-1", "fy": "1600", "quarter": "1"}}
resp = client.post("/api/v2/spending/", content_type="application/json", data=json_request)
json_request2 = {"type": "object_class", "filters": {"agency": "-1", "fy": "1600", "quarter": "1"}}
resp2 = client.post("/api/v2/spending/", content_type="application/json", data=json_request2)
assert resp.status_code == status.HTTP_200_OK
assert resp2.status_code == status.HTTP_200_OK
response = resp.json()
response2 = resp2.json()
expected_results = {
"total": -15,
"agencies": ["random_recipient_name_2", "Non-Award Spending", "random_recipient_name_1"],
"amounts": [-3, -3, -9],
}
actual_results = {
"total": response["total"],
"agencies": [entry["name"] for entry in response["results"]],
"amounts": [entry["amount"] for entry in response["results"]],
}
assert expected_results == actual_results
assert response["total"] == response2["total"]
| 34.607064 | 120 | 0.574919 | import copy
import json
import pytest
from datetime import datetime, timezone
from model_mommy import mommy
from rest_framework import status
from usaspending_api.awards.models import FinancialAccountsByAwards
from usaspending_api.financial_activities.models import (
FinancialAccountsByProgramActivityObjectClass,
SubmissionAttributes,
TreasuryAppropriationAccount,
)
from usaspending_api.accounts.models import FederalAccount
from usaspending_api.references.models import Agency, GTASSF133Balances, ToptierAgency, ObjectClass
from usaspending_api.submissions.models import DABSSubmissionWindowSchedule
ENDPOINT_URL = "/api/v2/spending/"
CONTENT_TYPE = "application/json"
GLOBAL_MOCK_DICT = [
{
"model": DABSSubmissionWindowSchedule,
"id": 1600031,
"period_end_date": datetime(1599, 12, 31, tzinfo=timezone.utc),
"submission_fiscal_year": 1600,
"submission_fiscal_quarter": 1,
"submission_fiscal_month": 3,
"submission_reveal_date": datetime(1600, 1, 28, tzinfo=timezone.utc),
"is_quarter": True,
},
{"model": ObjectClass, "id": 1},
{"model": GTASSF133Balances, "fiscal_year": 1600, "fiscal_period": 3, "obligations_incurred_total_cpe": -10},
{
"model": SubmissionAttributes,
"submission_id": -1,
"reporting_fiscal_year": 1600,
"reporting_fiscal_period": 3,
"reporting_fiscal_quarter": 1,
},
{
"model": ToptierAgency,
"toptier_agency_id": -1,
"name": "random_funding_name_1",
"toptier_code": "random_funding_code_1",
},
{
"model": ToptierAgency,
"toptier_agency_id": -2,
"name": "random_funding_name_2",
"toptier_code": "random_funding_code_2",
},
{"model": Agency, "toptier_agency_id": -1, "toptier_flag": True},
{"model": Agency, "toptier_agency_id": -2, "toptier_flag": True},
{
"model": TreasuryAppropriationAccount,
"treasury_account_identifier": -1,
"funding_toptier_agency_id": -1,
"federal_account_id": 1,
},
{
"model": FederalAccount,
"id": 1,
"account_title": "Tommy Two-Tone",
"agency_identifier": "867",
"main_account_code": "5309",
"federal_account_code": "867-5309",
},
{
"model": TreasuryAppropriationAccount,
"treasury_account_identifier": -2,
"funding_toptier_agency_id": -2,
"federal_account_id": 1,
},
{
"model": FinancialAccountsByProgramActivityObjectClass,
"financial_accounts_by_program_activity_object_class_id": -1,
"submission_id": -1,
"treasury_account_id": -1,
"obligations_incurred_by_program_object_class_cpe": -5,
},
{
"model": FinancialAccountsByProgramActivityObjectClass,
"financial_accounts_by_program_activity_object_class_id": -2,
"submission_id": -1,
"treasury_account_id": -1,
"obligations_incurred_by_program_object_class_cpe": -10,
},
{
"model": FinancialAccountsByProgramActivityObjectClass,
"financial_accounts_by_program_activity_object_class_id": -3,
"submission_id": -1,
"treasury_account_id": -2,
"obligations_incurred_by_program_object_class_cpe": -1,
"object_class_id": 1,
},
]
@pytest.mark.django_db
def test_unreported_data_actual_value_file_b(client):
models = copy.deepcopy(GLOBAL_MOCK_DICT)
for entry in models:
mommy.make(entry.pop("model"), **entry)
json_request = {"type": "agency", "filters": {"fy": "1600", "quarter": "1"}}
response = client.post(path=ENDPOINT_URL, content_type=CONTENT_TYPE, data=json.dumps(json_request))
assert response.status_code == status.HTTP_200_OK
json_response = response.json()
expected_results = {
"total": -10,
"agencies": ["Unreported Data", "random_funding_name_2", "random_funding_name_1"],
"amounts": [6, -1, -15],
}
actual_results = {
"total": json_response["total"],
"agencies": [entry["name"] for entry in json_response["results"]],
"amounts": [entry["amount"] for entry in json_response["results"]],
}
assert expected_results == actual_results
@pytest.mark.django_db
def test_unreported_data_actual_value_file_c(client):
models_to_mock = [
{
"model": DABSSubmissionWindowSchedule,
"id": 1600031,
"period_end_date": datetime(1599, 12, 31, tzinfo=timezone.utc),
"submission_fiscal_year": 1600,
"submission_fiscal_quarter": 1,
"submission_fiscal_month": 3,
"submission_reveal_date": datetime(1600, 1, 28, tzinfo=timezone.utc),
"is_quarter": True,
},
{"model": GTASSF133Balances, "fiscal_year": 1600, "fiscal_period": 3, "obligations_incurred_total_cpe": -10},
{
"model": SubmissionAttributes,
"submission_id": -1,
"reporting_fiscal_year": 1600,
"reporting_fiscal_quarter": 1,
"reporting_fiscal_period": 3,
},
{
"model": ToptierAgency,
"toptier_agency_id": -1,
"name": "random_funding_name_1",
"toptier_code": "random_funding_code_1",
},
{
"model": ToptierAgency,
"toptier_agency_id": -2,
"name": "random_funding_name_2",
"toptier_code": "random_funding_code_2",
},
{"model": Agency, "id": -1, "toptier_agency_id": -1, "toptier_flag": True},
{"model": Agency, "id": -2, "toptier_agency_id": -2, "toptier_flag": True},
{"model": TreasuryAppropriationAccount, "treasury_account_identifier": -1, "funding_toptier_agency_id": -1},
{"model": TreasuryAppropriationAccount, "treasury_account_identifier": -2, "funding_toptier_agency_id": -2},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -1,
"submission_id": -1,
"award__latest_transaction__assistance_data__awardee_or_recipient_legal": "random_recipient_name_1",
"treasury_account_id": -1,
"transaction_obligated_amount": -2,
},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -2,
"submission_id": -1,
"award__latest_transaction__assistance_data__awardee_or_recipient_legal": "random_recipient_name_2",
"treasury_account_id": -1,
"transaction_obligated_amount": -3,
},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -3,
"submission_id": -1,
"award__latest_transaction__assistance_data__awardee_or_recipient_legal": "random_recipient_name_1",
"treasury_account_id": -2,
"transaction_obligated_amount": -5,
},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -4,
"submission_id": -1,
"award__latest_transaction__contract_data__awardee_or_recipient_legal": "random_recipient_name_1",
"treasury_account_id": -1,
"transaction_obligated_amount": -7,
},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -5,
"submission_id": -1,
"award__latest_transaction__contract_data__awardee_or_recipient_legal": "random_recipient_name_4",
"treasury_account_id": -2,
"transaction_obligated_amount": -11,
},
]
for entry in models_to_mock:
mommy.make(entry.pop("model"), **entry)
json_request = {"type": "recipient", "filters": {"agency": "-1", "fy": "1600", "quarter": "1"}}
response = client.post(path=ENDPOINT_URL, content_type=CONTENT_TYPE, data=json.dumps(json_request))
assert response.status_code == status.HTTP_200_OK
json_response = response.json()
expected_results = {
"total": -12,
"agencies": ["random_recipient_name_2", "random_recipient_name_1"],
"amounts": [-3, -9],
}
actual_results = {
"total": json_response["total"],
"agencies": [entry["name"] for entry in json_response["results"]],
"amounts": [entry["amount"] for entry in json_response["results"]],
}
assert expected_results == actual_results
@pytest.mark.django_db
def test_unreported_data_no_data_available(client):
json_request = {"type": "agency", "filters": {"fy": "1700", "quarter": "1"}}
response = client.post(path=ENDPOINT_URL, content_type=CONTENT_TYPE, data=json.dumps(json_request))
assert response.status_code == status.HTTP_200_OK
json_response = response.json()
expected_results = {"total": None}
actual_results = {"total": json_response["total"]}
assert expected_results == actual_results
@pytest.mark.django_db
def test_federal_account_linkage(client):
models = copy.deepcopy(GLOBAL_MOCK_DICT)
for entry in models:
mommy.make(entry.pop("model"), **entry)
json_request = {"type": "federal_account", "filters": {"fy": "1600", "quarter": "1"}}
response = client.post(path=ENDPOINT_URL, content_type=CONTENT_TYPE, data=json.dumps(json_request))
json_response = response.json()
assert json_response["results"][0]["account_number"] == "867-5309"
@pytest.mark.django_db
def test_budget_function_filter_success(client):
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "budget_function", "filters": {"fy": "2017", "quarter": 1}}),
)
assert resp.status_code == status.HTTP_200_OK
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "federal_account", "filters": {"fy": "2017", "quarter": 1, "budget_function": "050"}}),
)
assert resp.status_code == status.HTTP_200_OK
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "federal_account",
"filters": {"fy": "2017", "quarter": 1, "budget_function": "050", "budget_subfunction": "053"},
}
),
)
assert resp.status_code == status.HTTP_200_OK
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "program_activity",
"filters": {
"fy": "2017",
"quarter": 1,
"budget_function": "050",
"budget_subfunction": "053",
"federal_account": 2715,
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "object_class",
"filters": {
"fy": "2017",
"quarter": 1,
"budget_function": "050",
"budget_subfunction": "053",
"federal_account": 2715,
"program_activity": 17863,
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "recipient",
"filters": {
"fy": "2017",
"quarter": 1,
"budget_function": "050",
"budget_subfunction": "053",
"federal_account": 2715,
"program_activity": 17863,
"object_class": "20",
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "award",
"filters": {
"fy": "2017",
"quarter": 1,
"budget_function": "050",
"budget_subfunction": "053",
"federal_account": 2715,
"program_activity": 17863,
"object_class": "20",
"recipient": 13916,
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
@pytest.mark.django_db
def test_budget_function_failure(client):
resp = client.post("/api/v2/search/spending_over_time/", content_type="application/json", data=json.dumps({}))
assert resp.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
@pytest.mark.django_db
def test_object_class_filter_success(client):
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "object_class", "filters": {"fy": "2017", "quarter": 1}}),
)
assert resp.status_code == status.HTTP_200_OK
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "agency", "filters": {"fy": "2017", "quarter": 1, "object_class": "20"}}),
)
assert resp.status_code == status.HTTP_200_OK
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "federal_account", "filters": {"fy": "2017", "quarter": 1, "object_class": "20"}}),
)
assert resp.status_code == status.HTTP_200_OK
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "program_activity",
"filters": {"fy": "2017", "quarter": 1, "object_class": "20", "federal_account": 2358},
}
),
)
assert resp.status_code == status.HTTP_200_OK
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "recipient",
"filters": {
"fy": "2017",
"quarter": 1,
"object_class": "20",
"federal_account": 2358,
"program_activity": 15103,
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "award",
"filters": {
"fy": "2017",
"quarter": 1,
"object_class": "20",
"federal_account": 2358,
"program_activity": 15103,
"recipient": 301773,
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
@pytest.mark.django_db
def test_object_class_failure(client):
resp = client.post("/api/v2/search/spending_over_time/", content_type="application/json", data=json.dumps({}))
assert resp.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
@pytest.mark.django_db
def test_agency_filter_success(client):
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "agency", "filters": {"fy": "2017", "quarter": 1}}),
)
assert resp.status_code == status.HTTP_200_OK
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "agency", "filters": {"fy": "2017", "quarter": "3"}}),
)
assert resp.status_code == status.HTTP_200_OK
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "federal_account", "filters": {"fy": "2017", "quarter": 1}}),
)
assert resp.status_code == status.HTTP_200_OK
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "program_activity", "filters": {"fy": "2017", "quarter": 1, "federal_account": 1500}}),
)
assert resp.status_code == status.HTTP_200_OK
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "object_class",
"filters": {"fy": "2017", "quarter": 1, "federal_account": 1500, "program_activity": 12697},
}
),
)
assert resp.status_code == status.HTTP_200_OK
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "recipient",
"filters": {
"fy": "2017",
"quarter": 1,
"federal_account": 1500,
"program_activity": 12697,
"object_class": "40",
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "award",
"filters": {
"fy": "2017",
"quarter": 1,
"federal_account": 1500,
"program_activity": 12697,
"object_class": "40",
"recipient": 792917,
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
@pytest.mark.django_db
def test_agency_failure(client):
resp = client.post("/api/v2/search/spending_over_time/", content_type="application/json", data=json.dumps({}))
assert resp.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "agency", "filters": {"fy": "23", "quarter": "3"}}),
)
assert resp.status_code == status.HTTP_400_BAD_REQUEST
@pytest.mark.django_db
def test_object_budget_match(client):
models = copy.deepcopy(GLOBAL_MOCK_DICT)
for entry in models:
mommy.make(entry.pop("model"), **entry)
mommy.make(
FinancialAccountsByProgramActivityObjectClass,
**{
"financial_accounts_by_program_activity_object_class_id": -4,
"submission_id": -1,
"treasury_account_id": -1,
"obligations_incurred_by_program_object_class_cpe": -5,
"object_class_id": 1,
},
)
json_request = {"type": "budget_function", "filters": {"fy": "1600", "quarter": "1"}}
response = client.post(path=ENDPOINT_URL, content_type=CONTENT_TYPE, data=json.dumps(json_request))
assert response.status_code == status.HTTP_200_OK
json_response_1 = response.json()
json_request = {"type": "object_class", "filters": {"fy": "1600", "quarter": "1"}}
response = client.post(path=ENDPOINT_URL, content_type=CONTENT_TYPE, data=json.dumps(json_request))
json_response_2 = response.json()
assert json_response_1["results"][0]["amount"] == json_response_2["results"][0]["amount"]
@pytest.mark.django_db
def test_period(client):
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "agency", "filters": {"fy": "2017", "quarter": 1}}),
)
assert resp.status_code == status.HTTP_200_OK
resp2 = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "agency", "filters": {"fy": "2017", "period": 3}}),
)
assert resp2.status_code == status.HTTP_200_OK
assert resp.json() == resp2.json()
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "agency", "filters": {"fy": "2017", "quarter": "3"}}),
)
assert resp.status_code == status.HTTP_200_OK
resp2 = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "agency", "filters": {"fy": "2017", "period": "9"}}),
)
assert resp2.status_code == status.HTTP_200_OK
assert resp.json() == resp2.json()
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "federal_account", "filters": {"fy": "2017", "quarter": 1}}),
)
assert resp.status_code == status.HTTP_200_OK
resp2 = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "federal_account", "filters": {"fy": "2017", "period": 3}}),
)
assert resp2.status_code == status.HTTP_200_OK
assert resp.json() == resp2.json()
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "program_activity", "filters": {"fy": "2017", "quarter": 1, "federal_account": 1500}}),
)
assert resp.status_code == status.HTTP_200_OK
resp2 = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "program_activity", "filters": {"fy": "2017", "period": 3, "federal_account": 1500}}),
)
assert resp2.status_code == status.HTTP_200_OK
assert resp.json() == resp2.json()
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "object_class",
"filters": {"fy": "2017", "quarter": 1, "federal_account": 1500, "program_activity": 12697},
}
),
)
assert resp.status_code == status.HTTP_200_OK
resp2 = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "object_class",
"filters": {"fy": "2017", "period": 3, "federal_account": 1500, "program_activity": 12697},
}
),
)
assert resp2.status_code == status.HTTP_200_OK
assert resp.json() == resp2.json()
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "recipient",
"filters": {
"fy": "2017",
"quarter": 1,
"federal_account": 1500,
"program_activity": 12697,
"object_class": "40",
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
resp2 = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "recipient",
"filters": {
"fy": "2017",
"period": 3,
"federal_account": 1500,
"program_activity": 12697,
"object_class": "40",
},
}
),
)
assert resp2.status_code == status.HTTP_200_OK
assert resp.json() == resp2.json()
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "award",
"filters": {
"fy": "2017",
"quarter": 1,
"federal_account": 1500,
"program_activity": 12697,
"object_class": "40",
"recipient": 792917,
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
resp2 = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "award",
"filters": {
"fy": "2017",
"period": 3,
"federal_account": 1500,
"program_activity": 12697,
"object_class": "40",
"recipient": 792917,
},
}
),
)
assert resp2.status_code == status.HTTP_200_OK
assert resp.json() == resp2.json()
@pytest.mark.django_db
def test_unreported_file_c(client):
models_to_mock = [
{
"model": DABSSubmissionWindowSchedule,
"id": 1600031,
"period_end_date": datetime(1599, 12, 31, tzinfo=timezone.utc),
"submission_fiscal_year": 1600,
"submission_fiscal_quarter": 1,
"submission_fiscal_month": 3,
"submission_reveal_date": datetime(1600, 1, 28, tzinfo=timezone.utc),
"is_quarter": True,
},
{"model": GTASSF133Balances, "fiscal_year": 1600, "fiscal_period": 3, "obligations_incurred_total_cpe": -10},
{
"model": SubmissionAttributes,
"submission_id": -1,
"reporting_fiscal_year": 1600,
"reporting_fiscal_quarter": 1,
"reporting_fiscal_period": 3,
},
{
"model": ToptierAgency,
"toptier_agency_id": -1,
"name": "random_funding_name_1",
"toptier_code": "random_funding_code_1",
},
{
"model": ToptierAgency,
"toptier_agency_id": -2,
"name": "random_funding_name_2",
"toptier_code": "random_funding_code_2",
},
{"model": Agency, "id": -1, "toptier_agency_id": -1, "toptier_flag": True},
{"model": Agency, "id": -2, "toptier_agency_id": -2, "toptier_flag": True},
{"model": TreasuryAppropriationAccount, "treasury_account_identifier": -1, "funding_toptier_agency_id": -1},
{"model": TreasuryAppropriationAccount, "treasury_account_identifier": -2, "funding_toptier_agency_id": -2},
{
"model": FinancialAccountsByProgramActivityObjectClass,
"financial_accounts_by_program_activity_object_class_id": -1,
"submission_id": -1,
"treasury_account_id": -1,
"obligations_incurred_by_program_object_class_cpe": -5,
},
{
"model": FinancialAccountsByProgramActivityObjectClass,
"financial_accounts_by_program_activity_object_class_id": -2,
"submission_id": -1,
"treasury_account_id": -1,
"obligations_incurred_by_program_object_class_cpe": -5,
},
{
"model": FinancialAccountsByProgramActivityObjectClass,
"financial_accounts_by_program_activity_object_class_id": -3,
"submission_id": -1,
"treasury_account_id": -1,
"obligations_incurred_by_program_object_class_cpe": -5,
},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -1,
"submission_id": -1,
"award__latest_transaction__assistance_data__awardee_or_recipient_legal": "random_recipient_name_1",
"treasury_account_id": -1,
"transaction_obligated_amount": -2,
},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -2,
"submission_id": -1,
"award__latest_transaction__assistance_data__awardee_or_recipient_legal": "random_recipient_name_2",
"treasury_account_id": -1,
"transaction_obligated_amount": -3,
},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -3,
"submission_id": -1,
"award__latest_transaction__assistance_data__awardee_or_recipient_legal": "random_recipient_name_1",
"treasury_account_id": -2,
"transaction_obligated_amount": -5,
},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -4,
"submission_id": -1,
"award__latest_transaction__contract_data__awardee_or_recipient_legal": "random_recipient_name_1",
"treasury_account_id": -1,
"transaction_obligated_amount": -7,
},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -5,
"submission_id": -1,
"award__latest_transaction__contract_data__awardee_or_recipient_legal": "random_recipient_name_4",
"treasury_account_id": -2,
"transaction_obligated_amount": -11,
},
]
for entry in models_to_mock:
mommy.make(entry.pop("model"), **entry)
json_request = {"type": "recipient", "filters": {"agency": "-1", "fy": "1600", "quarter": "1"}}
resp = client.post("/api/v2/spending/", content_type="application/json", data=json_request)
json_request2 = {"type": "object_class", "filters": {"agency": "-1", "fy": "1600", "quarter": "1"}}
resp2 = client.post("/api/v2/spending/", content_type="application/json", data=json_request2)
assert resp.status_code == status.HTTP_200_OK
assert resp2.status_code == status.HTTP_200_OK
response = resp.json()
response2 = resp2.json()
expected_results = {
"total": -15,
"agencies": ["random_recipient_name_2", "Non-Award Spending", "random_recipient_name_1"],
"amounts": [-3, -3, -9],
}
actual_results = {
"total": response["total"],
"agencies": [entry["name"] for entry in response["results"]],
"amounts": [entry["amount"] for entry in response["results"]],
}
assert expected_results == actual_results
assert response["total"] == response2["total"]
| true | true |
1c46a526cd71a1a644839ed9cdd68a4e1e211d57 | 15,561 | py | Python | examples/tsunami/eta_init_force_dry/setrun.py | AsianHam/geoclaw | b5f9ee8cd6e64d107ba8bba1e6d588aa7bf6d417 | [
"BSD-3-Clause"
] | 51 | 2015-07-01T13:39:17.000Z | 2022-03-07T16:13:17.000Z | examples/tsunami/eta_init_force_dry/setrun.py | AsianHam/geoclaw | b5f9ee8cd6e64d107ba8bba1e6d588aa7bf6d417 | [
"BSD-3-Clause"
] | 274 | 2015-02-20T18:25:04.000Z | 2022-03-09T23:51:47.000Z | examples/tsunami/eta_init_force_dry/setrun.py | AsianHam/geoclaw | b5f9ee8cd6e64d107ba8bba1e6d588aa7bf6d417 | [
"BSD-3-Clause"
] | 66 | 2015-01-10T00:05:00.000Z | 2022-02-24T22:05:16.000Z | """
Module to set up run time parameters for Clawpack.
The values set in the function setrun are then written out to data files
that will be read in by the Fortran code.
"""
from __future__ import absolute_import
from __future__ import print_function
import os, sys
import numpy as np
try:
CLAW = os.environ['CLAW']
except:
raise Exception("*** Must first set CLAW enviornment variable")
from clawpack.geoclaw.data import ForceDry
from clawpack.amrclaw.data import FlagRegion
#------------------------------
def setrun(claw_pkg='geoclaw'):
#------------------------------
"""
Define the parameters used for running Clawpack.
INPUT:
claw_pkg expected to be "geoclaw" for this setrun.
OUTPUT:
rundata - object of class ClawRunData
"""
from clawpack.clawutil import data
assert claw_pkg.lower() == 'geoclaw', "Expected claw_pkg = 'geoclaw'"
num_dim = 2
rundata = data.ClawRunData(claw_pkg, num_dim)
#------------------------------------------------------------------
# Problem-specific parameters to be written to setprob.data:
#------------------------------------------------------------------
#probdata = rundata.new_UserData(name='probdata',fname='setprob.data')
#probdata.add_param('variable_eta_init', True) # now in qinit info
#------------------------------------------------------------------
# Standard Clawpack parameters to be written to claw.data:
# (or to amr2ez.data for AMR)
#------------------------------------------------------------------
clawdata = rundata.clawdata # initialized when rundata instantiated
# Set single grid parameters first.
# See below for AMR parameters.
# ---------------
# Spatial domain:
# ---------------
# Number of space dimensions:
clawdata.num_dim = num_dim
# Lower and upper edge of computational domain:
# x values should be integer multipes of 1/3"
# y values should be integer multipes of 1/3"
# Note: always satisfied if limits are multiples of 0.01 degree
arcsec16 = 1./(6*3600.)
# choose domain and offset edges by half a 1/3" cell so
# cell centers are exactly at DEM grid points:
clawdata.lower[0] = -1.9 - arcsec16 # west longitude
clawdata.upper[0] = 0.1 - arcsec16 # east longitude
clawdata.lower[1] = -1.9 - arcsec16 # south latitude
clawdata.upper[1] = 1.9 - arcsec16 # north latitude
# choose mx and my so coarsest grid has 2 minute resolution:
clawdata.num_cells[0] = 60
clawdata.num_cells[1] = 114
# ---------------
# Size of system:
# ---------------
# Number of equations in the system:
clawdata.num_eqn = 3
# Number of auxiliary variables in the aux array (initialized in setaux)
clawdata.num_aux = 3
# Index of aux array corresponding to capacity function, if there is one:
clawdata.capa_index = 2
# -------------
# Initial time:
# -------------
clawdata.t0 = 0.0
# Restart from checkpoint file of a previous run?
# If restarting, t0 above should be from original run, and the
# restart_file 'fort.chkNNNNN' specified below should be in
# the OUTDIR indicated in Makefile.
clawdata.restart = False # True to restart from prior results
clawdata.restart_file = ''
# -------------
# Output times:
#--------------
# Specify at what times the results should be written to fort.q files.
# Note that the time integration stops after the final output time.
# The solution at initial time t0 is always written in addition.
clawdata.output_style = 1
if clawdata.output_style==1:
# Output nout frames at equally spaced times up to tfinal:
clawdata.num_output_times = 15
clawdata.tfinal = 30*60.
clawdata.output_t0 = True # output at initial (or restart) time?
elif clawdata.output_style == 2:
# Specify a list of output times.
clawdata.output_times = [0.5, 1.0]
elif clawdata.output_style == 3:
# Output every iout timesteps with a total of ntot time steps:
clawdata.output_step_interval = 1
clawdata.total_steps = 20
clawdata.output_t0 = True
clawdata.output_format = 'binary'
clawdata.output_q_components = 'all' # need all
clawdata.output_aux_components = 'none' # eta=h+B is in q
clawdata.output_aux_onlyonce = False # output aux arrays each frame
# ---------------------------------------------------
# Verbosity of messages to screen during integration:
# ---------------------------------------------------
# The current t, dt, and cfl will be printed every time step
# at AMR levels <= verbosity. Set verbosity = 0 for no printing.
# (E.g. verbosity == 2 means print only on levels 1 and 2.)
clawdata.verbosity = 1
# --------------
# Time stepping:
# --------------
# if dt_variable==1: variable time steps used based on cfl_desired,
# if dt_variable==0: fixed time steps dt = dt_initial will always be used.
clawdata.dt_variable = True
# Initial time step for variable dt.
# If dt_variable==0 then dt=dt_initial for all steps:
clawdata.dt_initial = 0.2
# Max time step to be allowed if variable dt used:
clawdata.dt_max = 1e+99
# Desired Courant number if variable dt used, and max to allow without
# retaking step with a smaller dt:
clawdata.cfl_desired = 0.8
clawdata.cfl_max = 1.0
# Maximum number of time steps to allow between output times:
clawdata.steps_max = 5000
# ------------------
# Method to be used:
# ------------------
# Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters
clawdata.order = 2
# Use dimensional splitting? (not yet available for AMR)
clawdata.dimensional_split = 'unsplit'
# For unsplit method, transverse_waves can be
# 0 or 'none' ==> donor cell (only normal solver used)
# 1 or 'increment' ==> corner transport of waves
# 2 or 'all' ==> corner transport of 2nd order corrections too
clawdata.transverse_waves = 2
# Number of waves in the Riemann solution:
clawdata.num_waves = 3
# List of limiters to use for each wave family:
# Required: len(limiter) == num_waves
# Some options:
# 0 or 'none' ==> no limiter (Lax-Wendroff)
# 1 or 'minmod' ==> minmod
# 2 or 'superbee' ==> superbee
# 3 or 'mc' ==> MC limiter
# 4 or 'vanleer' ==> van Leer
clawdata.limiter = ['mc', 'mc', 'mc']
clawdata.use_fwaves = True # True ==> use f-wave version of algorithms
# Source terms splitting:
# src_split == 0 or 'none' ==> no source term (src routine never called)
# src_split == 1 or 'godunov' ==> Godunov (1st order) splitting used,
# src_split == 2 or 'strang' ==> Strang (2nd order) splitting used, not recommended.
clawdata.source_split = 'godunov'
# --------------------
# Boundary conditions:
# --------------------
# Number of ghost cells (usually 2)
clawdata.num_ghost = 2
# Choice of BCs at xlower and xupper:
# 0 => user specified (must modify bcN.f to use this option)
# 1 => extrapolation (non-reflecting outflow)
# 2 => periodic (must specify this at both boundaries)
# 3 => solid wall for systems where q(2) is normal velocity
clawdata.bc_lower[0] = 'extrap'
clawdata.bc_upper[0] = 'extrap'
clawdata.bc_lower[1] = 'extrap'
clawdata.bc_upper[1] = 'extrap'
# --------------
# Checkpointing:
# --------------
# Specify when checkpoint files should be created that can be
# used to restart a computation.
# negative checkpoint_style means alternate between aaaaa and bbbbb files
# so that at most 2 checkpoint files exist at any time, useful when
# doing frequent checkpoints of large problems.
clawdata.checkpt_style = 0
if clawdata.checkpt_style == 0:
# Do not checkpoint at all
pass
elif clawdata.checkpt_style == 1:
# Checkpoint only at tfinal.
pass
elif abs(clawdata.checkpt_style) == 2:
# Specify a list of checkpoint times.
clawdata.checkpt_times = 3600.*np.arange(1,16,1)
elif abs(clawdata.checkpt_style) == 3:
# Checkpoint every checkpt_interval timesteps (on Level 1)
# and at the final time.
clawdata.checkpt_interval = 5
# ---------------
# AMR parameters:
# ---------------
amrdata = rundata.amrdata
# max number of refinement levels:
amrdata.amr_levels_max = 4
# List of refinement ratios at each level (length at least mxnest-1)
# dx = dy = 2', 10", 2", 1/3":
amrdata.refinement_ratios_x = [12,5,6]
amrdata.refinement_ratios_y = [12,5,6]
amrdata.refinement_ratios_t = [12,5,6]
# Specify type of each aux variable in amrdata.auxtype.
# This must be a list of length maux, each element of which is one of:
# 'center', 'capacity', 'xleft', or 'yleft' (see documentation).
amrdata.aux_type = ['center','capacity','yleft']
# Flag using refinement routine flag2refine rather than richardson error
amrdata.flag_richardson = False # use Richardson?
amrdata.flag2refine = True
# steps to take on each level L between regriddings of level L+1:
amrdata.regrid_interval = 3
# width of buffer zone around flagged points:
# (typically the same as regrid_interval so waves don't escape):
amrdata.regrid_buffer_width = 2
# clustering alg. cutoff for (# flagged pts) / (total # of cells refined)
# (closer to 1.0 => more small grids may be needed to cover flagged cells)
amrdata.clustering_cutoff = 0.700000
# print info about each regridding up to this level:
amrdata.verbosity_regrid = 1
# ---------------
# Regions:
# ---------------
#rundata.regiondata.regions = []
# to specify regions of refinement append lines of the form
# [minlevel,maxlevel,t1,t2,x1,x2,y1,y2]
# NO OLD STYLE REGIONS USED HERE
# ---------------
# NEW flagregions
# ---------------
flagregions = rundata.flagregiondata.flagregions # initialized to []
# now append as many flagregions as desired to this list:
# The entire domain restricted to level 1 for illustration:
# Note that this is a rectangle specified in the new way:
# (other regions below will force/allow more refinement)
flagregion = FlagRegion(num_dim=2)
flagregion.name = 'Region_domain'
flagregion.minlevel = 1
flagregion.maxlevel = 1
flagregion.t1 = 0.
flagregion.t2 = 1e9
flagregion.spatial_region_type = 1 # Rectangle
# domain plus a bit so kml files look nicer:
flagregion.spatial_region = [clawdata.lower[0] - 0.1,
clawdata.upper[0] + 0.1,
clawdata.lower[1] - 0.1,
clawdata.upper[1] + 0.1]
flagregions.append(flagregion)
# force 2 levels around dtopo source region for short time:
flagregion = FlagRegion(num_dim=2)
flagregion.name = 'Region_level2_dtopo'
flagregion.minlevel = 2
flagregion.maxlevel = 2
flagregion.t1 = 0.
flagregion.t2 = 2.
flagregion.spatial_region_type = 1 # Rectangle
flagregion.spatial_region = [-2,1,-1,1]
flagregions.append(flagregion)
# allow 3 levels around coastal region for all times:
flagregion = FlagRegion(num_dim=2)
flagregion.name = 'Region_level3'
flagregion.minlevel = 1
flagregion.maxlevel = 3
flagregion.t1 = 0.
flagregion.t2 = 1e9
flagregion.spatial_region_type = 1 # Rectangle
flagregion.spatial_region = [-0.01,0.01,-0.01,0.01]
flagregions.append(flagregion)
# force 4 levels around coastal region starting at 5 minutes:
flagregion = FlagRegion(num_dim=2)
flagregion.name = 'Region_level4'
flagregion.minlevel = 4
flagregion.maxlevel = 4
flagregion.t1 = 5*60.
flagregion.t2 = 1e9
flagregion.spatial_region_type = 1 # Rectangle
flagregion.spatial_region = [-0.005, 0.01, -0.011, 0.011]
flagregions.append(flagregion)
# ---------------
# Gauges:
# ---------------
# for gauges append lines of the form [gaugeno, x, y, t1, t2]
rundata.gaugedata.gauges = []
# Set GeoClaw specific runtime parameters.
try:
geo_data = rundata.geo_data
except:
print("*** Error, this rundata has no geo_data attribute")
raise AttributeError("Missing geo_data attribute")
# == Physics ==
geo_data.gravity = 9.81
geo_data.coordinate_system = 2
geo_data.earth_radius = 6367.5e3
# == Forcing Options
geo_data.coriolis_forcing = False
# == Algorithm and Initial Conditions ==
geo_data.sea_level = 0.0
geo_data.dry_tolerance = 1.e-3
geo_data.friction_forcing = True
geo_data.manning_coefficient =.025
geo_data.friction_depth = 1e6
# Refinement settings
refinement_data = rundata.refinement_data
refinement_data.variable_dt_refinement_ratios = True
refinement_data.wave_tolerance = 0.2
# == settopo.data values ==
topofiles = rundata.topo_data.topofiles
# for topography, append lines of the form
# [topotype, fname]
topodir = 'input_files'
topofiles.append([3, topodir + '/topo_ocean.tt3'])
topofiles.append([3, topodir + '/topo_shore.tt3'])
# == setdtopo.data values ==
dtopo_data = rundata.dtopo_data
# for moving topography, append lines of the form : (<= 1 allowed for now!)
# [topotype, fname]
dtopodir = 'input_files'
dtopo_data.dtopofiles.append([3, dtopodir + '/dtopo_test.tt3'])
dtopo_data.dt_max_dtopo = 1.0
# == setqinit.data values ==
rundata.qinit_data.qinit_type = 0
rundata.qinit_data.qinitfiles = []
# for qinit perturbations, append lines of the form: (<= 1 allowed for now!)
# [fname]
# NEW feature to adjust sea level by dtopo:
rundata.qinit_data.variable_eta_init = True
# NEW feature to force dry land some locations below sea level:
force_dry = ForceDry()
force_dry.tend = 7*60.
force_dry.fname = 'input_files/force_dry_init.tt3'
rundata.qinit_data.force_dry_list.append(force_dry)
# == fgmax.data values ==
#fgmax_files = rundata.fgmax_data.fgmax_files
# for fixed grids append to this list names of any fgmax input files
# ----- For developers -----
# Toggle debugging print statements:
amrdata.dprint = False # print domain flags
amrdata.eprint = False # print err est flags
amrdata.edebug = False # even more err est flags
amrdata.gprint = False # grid bisection/clustering
amrdata.nprint = False # proper nesting output
amrdata.pprint = False # proj. of tagged points
amrdata.rprint = False # print regridding summary
amrdata.sprint = False # space/memory output
amrdata.tprint = False # time step reporting each level
amrdata.uprint = False # update/upbnd reporting
# More AMR parameters can be set -- see the defaults in pyclaw/data.py
return rundata
# end of function setrun
# ----------------------
if __name__ == '__main__':
# Set up run-time parameters and write all data files.
import sys
rundata = setrun(*sys.argv[1:])
rundata.write()
| 30.998008 | 92 | 0.620911 |
from __future__ import absolute_import
from __future__ import print_function
import os, sys
import numpy as np
try:
CLAW = os.environ['CLAW']
except:
raise Exception("*** Must first set CLAW enviornment variable")
from clawpack.geoclaw.data import ForceDry
from clawpack.amrclaw.data import FlagRegion
def setrun(claw_pkg='geoclaw'):
from clawpack.clawutil import data
assert claw_pkg.lower() == 'geoclaw', "Expected claw_pkg = 'geoclaw'"
num_dim = 2
rundata = data.ClawRunData(claw_pkg, num_dim)
clawdata = rundata.clawdata
clawdata.num_dim = num_dim
# y values should be integer multipes of 1/3"
arcsec16 = 1./(6*3600.)
# cell centers are exactly at DEM grid points:
clawdata.lower[0] = -1.9 - arcsec16 # west longitude
clawdata.upper[0] = 0.1 - arcsec16 # east longitude
clawdata.lower[1] = -1.9 - arcsec16 # south latitude
clawdata.upper[1] = 1.9 - arcsec16 # north latitude
# choose mx and my so coarsest grid has 2 minute resolution:
clawdata.num_cells[0] = 60
clawdata.num_cells[1] = 114
# ---------------
# Size of system:
# ---------------
# Number of equations in the system:
clawdata.num_eqn = 3
# Number of auxiliary variables in the aux array (initialized in setaux)
clawdata.num_aux = 3
# Index of aux array corresponding to capacity function, if there is one:
clawdata.capa_index = 2
# -------------
# Initial time:
# -------------
clawdata.t0 = 0.0
# Restart from checkpoint file of a previous run?
# If restarting, t0 above should be from original run, and the
# restart_file 'fort.chkNNNNN' specified below should be in
# the OUTDIR indicated in Makefile.
clawdata.restart = False # True to restart from prior results
clawdata.restart_file = ''
# -------------
# Output times:
#--------------
# Specify at what times the results should be written to fort.q files.
# Note that the time integration stops after the final output time.
# The solution at initial time t0 is always written in addition.
clawdata.output_style = 1
if clawdata.output_style==1:
# Output nout frames at equally spaced times up to tfinal:
clawdata.num_output_times = 15
clawdata.tfinal = 30*60.
clawdata.output_t0 = True # output at initial (or restart) time?
elif clawdata.output_style == 2:
# Specify a list of output times.
clawdata.output_times = [0.5, 1.0]
elif clawdata.output_style == 3:
# Output every iout timesteps with a total of ntot time steps:
clawdata.output_step_interval = 1
clawdata.total_steps = 20
clawdata.output_t0 = True
clawdata.output_format = 'binary'
clawdata.output_q_components = 'all' # need all
clawdata.output_aux_components = 'none' # eta=h+B is in q
clawdata.output_aux_onlyonce = False # output aux arrays each frame
# ---------------------------------------------------
# Verbosity of messages to screen during integration:
# ---------------------------------------------------
# The current t, dt, and cfl will be printed every time step
# at AMR levels <= verbosity. Set verbosity = 0 for no printing.
# (E.g. verbosity == 2 means print only on levels 1 and 2.)
clawdata.verbosity = 1
# --------------
# Time stepping:
# --------------
# if dt_variable==1: variable time steps used based on cfl_desired,
# if dt_variable==0: fixed time steps dt = dt_initial will always be used.
clawdata.dt_variable = True
# Initial time step for variable dt.
# If dt_variable==0 then dt=dt_initial for all steps:
clawdata.dt_initial = 0.2
# Max time step to be allowed if variable dt used:
clawdata.dt_max = 1e+99
# Desired Courant number if variable dt used, and max to allow without
# retaking step with a smaller dt:
clawdata.cfl_desired = 0.8
clawdata.cfl_max = 1.0
# Maximum number of time steps to allow between output times:
clawdata.steps_max = 5000
# ------------------
# Method to be used:
# ------------------
# Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters
clawdata.order = 2
# Use dimensional splitting? (not yet available for AMR)
clawdata.dimensional_split = 'unsplit'
# For unsplit method, transverse_waves can be
# 0 or 'none' ==> donor cell (only normal solver used)
# 1 or 'increment' ==> corner transport of waves
# 2 or 'all' ==> corner transport of 2nd order corrections too
clawdata.transverse_waves = 2
# Number of waves in the Riemann solution:
clawdata.num_waves = 3
# List of limiters to use for each wave family:
# Required: len(limiter) == num_waves
# Some options:
# 0 or 'none' ==> no limiter (Lax-Wendroff)
# 1 or 'minmod' ==> minmod
# 2 or 'superbee' ==> superbee
# 3 or 'mc' ==> MC limiter
# 4 or 'vanleer' ==> van Leer
clawdata.limiter = ['mc', 'mc', 'mc']
clawdata.use_fwaves = True # True ==> use f-wave version of algorithms
# Source terms splitting:
# src_split == 0 or 'none' ==> no source term (src routine never called)
# src_split == 1 or 'godunov' ==> Godunov (1st order) splitting used,
# src_split == 2 or 'strang' ==> Strang (2nd order) splitting used, not recommended.
clawdata.source_split = 'godunov'
# --------------------
# Boundary conditions:
# --------------------
# Number of ghost cells (usually 2)
clawdata.num_ghost = 2
# Choice of BCs at xlower and xupper:
# 0 => user specified (must modify bcN.f to use this option)
# 1 => extrapolation (non-reflecting outflow)
# 2 => periodic (must specify this at both boundaries)
# 3 => solid wall for systems where q(2) is normal velocity
clawdata.bc_lower[0] = 'extrap'
clawdata.bc_upper[0] = 'extrap'
clawdata.bc_lower[1] = 'extrap'
clawdata.bc_upper[1] = 'extrap'
# --------------
# Checkpointing:
# --------------
# Specify when checkpoint files should be created that can be
# used to restart a computation.
# negative checkpoint_style means alternate between aaaaa and bbbbb files
# so that at most 2 checkpoint files exist at any time, useful when
# doing frequent checkpoints of large problems.
clawdata.checkpt_style = 0
if clawdata.checkpt_style == 0:
# Do not checkpoint at all
pass
elif clawdata.checkpt_style == 1:
# Checkpoint only at tfinal.
pass
elif abs(clawdata.checkpt_style) == 2:
# Specify a list of checkpoint times.
clawdata.checkpt_times = 3600.*np.arange(1,16,1)
elif abs(clawdata.checkpt_style) == 3:
# Checkpoint every checkpt_interval timesteps (on Level 1)
# and at the final time.
clawdata.checkpt_interval = 5
# ---------------
# AMR parameters:
# ---------------
amrdata = rundata.amrdata
# max number of refinement levels:
amrdata.amr_levels_max = 4
# List of refinement ratios at each level (length at least mxnest-1)
# dx = dy = 2', 10", 2", 1/3":
amrdata.refinement_ratios_x = [12,5,6]
amrdata.refinement_ratios_y = [12,5,6]
amrdata.refinement_ratios_t = [12,5,6]
# Specify type of each aux variable in amrdata.auxtype.
# This must be a list of length maux, each element of which is one of:
# 'center', 'capacity', 'xleft', or 'yleft' (see documentation).
amrdata.aux_type = ['center','capacity','yleft']
# Flag using refinement routine flag2refine rather than richardson error
amrdata.flag_richardson = False # use Richardson?
amrdata.flag2refine = True
# steps to take on each level L between regriddings of level L+1:
amrdata.regrid_interval = 3
# width of buffer zone around flagged points:
# (typically the same as regrid_interval so waves don't escape):
amrdata.regrid_buffer_width = 2
ta.verbosity_regrid = 1
flagregions = rundata.flagregiondata.flagregions
flagregion = FlagRegion(num_dim=2)
flagregion.name = 'Region_domain'
flagregion.minlevel = 1
flagregion.maxlevel = 1
flagregion.t1 = 0.
flagregion.t2 = 1e9
flagregion.spatial_region_type = 1
flagregion.spatial_region = [clawdata.lower[0] - 0.1,
clawdata.upper[0] + 0.1,
clawdata.lower[1] - 0.1,
clawdata.upper[1] + 0.1]
flagregions.append(flagregion)
flagregion = FlagRegion(num_dim=2)
flagregion.name = 'Region_level2_dtopo'
flagregion.minlevel = 2
flagregion.maxlevel = 2
flagregion.t1 = 0.
flagregion.t2 = 2.
flagregion.spatial_region_type = 1
flagregion.spatial_region = [-2,1,-1,1]
flagregions.append(flagregion)
flagregion = FlagRegion(num_dim=2)
flagregion.name = 'Region_level3'
flagregion.minlevel = 1
flagregion.maxlevel = 3
flagregion.t1 = 0.
flagregion.t2 = 1e9
flagregion.spatial_region_type = 1
flagregion.spatial_region = [-0.01,0.01,-0.01,0.01]
flagregions.append(flagregion)
flagregion = FlagRegion(num_dim=2)
flagregion.name = 'Region_level4'
flagregion.minlevel = 4
flagregion.maxlevel = 4
flagregion.t1 = 5*60.
flagregion.t2 = 1e9
flagregion.spatial_region_type = 1
flagregion.spatial_region = [-0.005, 0.01, -0.011, 0.011]
flagregions.append(flagregion)
rundata.gaugedata.gauges = []
try:
geo_data = rundata.geo_data
except:
print("*** Error, this rundata has no geo_data attribute")
raise AttributeError("Missing geo_data attribute")
geo_data.gravity = 9.81
geo_data.coordinate_system = 2
geo_data.earth_radius = 6367.5e3
geo_data.coriolis_forcing = False
geo_data.sea_level = 0.0
geo_data.dry_tolerance = 1.e-3
geo_data.friction_forcing = True
geo_data.manning_coefficient =.025
geo_data.friction_depth = 1e6
refinement_data = rundata.refinement_data
refinement_data.variable_dt_refinement_ratios = True
refinement_data.wave_tolerance = 0.2
topofiles = rundata.topo_data.topofiles
topodir = 'input_files'
topofiles.append([3, topodir + '/topo_ocean.tt3'])
topofiles.append([3, topodir + '/topo_shore.tt3'])
dtopo_data = rundata.dtopo_data
dtopodir = 'input_files'
dtopo_data.dtopofiles.append([3, dtopodir + '/dtopo_test.tt3'])
dtopo_data.dt_max_dtopo = 1.0
rundata.qinit_data.qinit_type = 0
rundata.qinit_data.qinitfiles = []
rundata.qinit_data.variable_eta_init = True
force_dry = ForceDry()
force_dry.tend = 7*60.
force_dry.fname = 'input_files/force_dry_init.tt3'
rundata.qinit_data.force_dry_list.append(force_dry)
amrdata.dprint = False
amrdata.eprint = False
amrdata.edebug = False
amrdata.gprint = False
amrdata.nprint = False
amrdata.pprint = False
amrdata.rprint = False
amrdata.sprint = False
amrdata.tprint = False
amrdata.uprint = False
return rundata
if __name__ == '__main__':
import sys
rundata = setrun(*sys.argv[1:])
rundata.write()
| true | true |
1c46a52971174cbf30b7942c93c8d2e79189a054 | 4,037 | py | Python | django_opentracing/tracer.py | dudymas/opentracing-django | 3ad67f5c28f7eb36df558dc9e5e171e960afd9cb | [
"BSD-3-Clause"
] | null | null | null | django_opentracing/tracer.py | dudymas/opentracing-django | 3ad67f5c28f7eb36df558dc9e5e171e960afd9cb | [
"BSD-3-Clause"
] | null | null | null | django_opentracing/tracer.py | dudymas/opentracing-django | 3ad67f5c28f7eb36df558dc9e5e171e960afd9cb | [
"BSD-3-Clause"
] | null | null | null | from django.conf import settings
import opentracing
try:
from threading import local
except ImportError:
from django.utils._threading_local import local
_thread_locals = local()
django_tracer = None
def get_tracer():
return opentracing.tracer
def get_current_span(request=None):
if request is None:
request = getattr(_thread_locals, "request", None)
# this lets django rest framework work seamlessly since they wrap the request
if hasattr(request, '_request'):
request = request._request
if django_tracer != None:
return django_tracer.get_span(request)
else:
return None
class DjangoTracer(object):
'''
@param tracer the OpenTracing tracer to be used
to trace requests using this DjangoTracer
'''
def __init__(self, tracer):
global django_tracer
django_tracer = self
self._tracer = tracer
self._current_spans = {}
if not hasattr(settings, 'OPENTRACING_TRACE_ALL'):
self._trace_all = False
elif not getattr(settings, 'OPENTRACING_TRACE_ALL'):
self._trace_all = False
else:
self._trace_all = True
def get_span(self, request):
'''
@param request
Returns the span tracing this request
'''
return self._current_spans.get(request, None)
def trace(self, *attributes):
'''
Function decorator that traces functions
NOTE: Must be placed after the @app.route decorator
@param attributes any number of flask.Request attributes
(strings) to be set as tags on the created span
'''
def decorator(view_func):
# TODO: do we want to provide option of overriding trace_all_requests so that they
# can trace certain attributes of the request for just this request (this would require
# to reinstate the name-mangling with a trace identifier, and another settings key)
if self._trace_all:
return view_func
# otherwise, execute decorator
def wrapper(request):
span = self._apply_tracing(request, view_func, list(attributes))
r = view_func(request)
self._finish_tracing(request)
return r
return wrapper
return decorator
def _apply_tracing(self, request, view_func, attributes):
'''
Helper function to avoid rewriting for middleware and decorator.
Returns a new span from the request with logged attributes and
correct operation name from the view_func.
'''
setattr(_thread_locals, 'request', request)
# strip headers for trace info
headers = {}
for k,v in request.META.items():
k = k.lower().replace('_','-')
if k.startswith('http-'):
k = k[5:]
headers[k] = v
# start new span from trace info
span = None
operation_name = view_func.__name__
try:
span_ctx = self._tracer.extract(opentracing.Format.HTTP_HEADERS, headers)
span = self._tracer.start_span(operation_name=operation_name, child_of=span_ctx)
except (opentracing.InvalidCarrierException, opentracing.SpanContextCorruptedException) as e:
span = self._tracer.start_span(operation_name=operation_name)
if span is None:
span = self._tracer.start_span(operation_name=operation_name)
# add span to current spans
self._current_spans[request] = span
# log any traced attributes
for attr in attributes:
if hasattr(request, attr):
payload = str(getattr(request, attr))
if payload:
span.set_tag(attr, payload)
return span
def _finish_tracing(self, request):
span = self._current_spans.pop(request, None)
if span is not None:
span.finish()
setattr(_thread_locals, 'request', None)
| 35.104348 | 101 | 0.628685 | from django.conf import settings
import opentracing
try:
from threading import local
except ImportError:
from django.utils._threading_local import local
_thread_locals = local()
django_tracer = None
def get_tracer():
return opentracing.tracer
def get_current_span(request=None):
if request is None:
request = getattr(_thread_locals, "request", None)
if hasattr(request, '_request'):
request = request._request
if django_tracer != None:
return django_tracer.get_span(request)
else:
return None
class DjangoTracer(object):
def __init__(self, tracer):
global django_tracer
django_tracer = self
self._tracer = tracer
self._current_spans = {}
if not hasattr(settings, 'OPENTRACING_TRACE_ALL'):
self._trace_all = False
elif not getattr(settings, 'OPENTRACING_TRACE_ALL'):
self._trace_all = False
else:
self._trace_all = True
def get_span(self, request):
return self._current_spans.get(request, None)
def trace(self, *attributes):
def decorator(view_func):
if self._trace_all:
return view_func
def wrapper(request):
span = self._apply_tracing(request, view_func, list(attributes))
r = view_func(request)
self._finish_tracing(request)
return r
return wrapper
return decorator
def _apply_tracing(self, request, view_func, attributes):
setattr(_thread_locals, 'request', request)
headers = {}
for k,v in request.META.items():
k = k.lower().replace('_','-')
if k.startswith('http-'):
k = k[5:]
headers[k] = v
span = None
operation_name = view_func.__name__
try:
span_ctx = self._tracer.extract(opentracing.Format.HTTP_HEADERS, headers)
span = self._tracer.start_span(operation_name=operation_name, child_of=span_ctx)
except (opentracing.InvalidCarrierException, opentracing.SpanContextCorruptedException) as e:
span = self._tracer.start_span(operation_name=operation_name)
if span is None:
span = self._tracer.start_span(operation_name=operation_name)
self._current_spans[request] = span
for attr in attributes:
if hasattr(request, attr):
payload = str(getattr(request, attr))
if payload:
span.set_tag(attr, payload)
return span
def _finish_tracing(self, request):
span = self._current_spans.pop(request, None)
if span is not None:
span.finish()
setattr(_thread_locals, 'request', None)
| true | true |
1c46a5e42c962596d622b69453575d9dba1e629a | 291 | py | Python | manage.py | cron-ooo/django-influxdb-metrics | 7cecc315e12219897d941a6c02eac8ffc182b645 | [
"MIT"
] | 54 | 2016-11-25T10:00:23.000Z | 2022-03-17T09:27:49.000Z | manage.py | cron-ooo/django-influxdb-metrics | 7cecc315e12219897d941a6c02eac8ffc182b645 | [
"MIT"
] | 27 | 2016-12-01T17:35:37.000Z | 2021-03-30T16:37:49.000Z | manage.py | cron-ooo/django-influxdb-metrics | 7cecc315e12219897d941a6c02eac8ffc182b645 | [
"MIT"
] | 23 | 2016-11-22T09:26:28.000Z | 2022-03-14T11:34:33.000Z | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault('DJANGO_SETTINGS_MODULE',
'influxdb_metrics.tests.settings')
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 24.25 | 64 | 0.71134 |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault('DJANGO_SETTINGS_MODULE',
'influxdb_metrics.tests.settings')
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| true | true |
1c46a757bacabb1d955e01d6573c009ce4f99f02 | 1,247 | py | Python | airflow/utils/types.py | shashijangra/airflow-1 | c3e340584bf1892c4f73aa9e7495b5823dab0c40 | [
"Apache-2.0"
] | 2 | 2021-07-30T17:25:56.000Z | 2021-08-03T13:51:09.000Z | airflow/utils/types.py | shashijangra/airflow-1 | c3e340584bf1892c4f73aa9e7495b5823dab0c40 | [
"Apache-2.0"
] | 14 | 2019-12-03T02:54:42.000Z | 2020-02-27T16:08:10.000Z | airflow/utils/types.py | shashijangra/airflow-1 | c3e340584bf1892c4f73aa9e7495b5823dab0c40 | [
"Apache-2.0"
] | 1 | 2021-07-02T04:23:18.000Z | 2021-07-02T04:23:18.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import enum
class DagRunType(enum.Enum):
"""Class with DagRun types"""
BACKFILL_JOB = "backfill"
SCHEDULED = "scheduled"
MANUAL = "manual"
@staticmethod
def from_run_id(run_id: str) -> "DagRunType":
"""
Resolved DagRun type from run_id.
"""
for run_type in DagRunType:
if run_id and run_id.startswith(f"{run_type.value}__"):
return run_type
return DagRunType.MANUAL
| 34.638889 | 67 | 0.709703 |
import enum
class DagRunType(enum.Enum):
BACKFILL_JOB = "backfill"
SCHEDULED = "scheduled"
MANUAL = "manual"
@staticmethod
def from_run_id(run_id: str) -> "DagRunType":
for run_type in DagRunType:
if run_id and run_id.startswith(f"{run_type.value}__"):
return run_type
return DagRunType.MANUAL
| true | true |
1c46a76ba0a8d816e3975d24a0ab719d4db28b66 | 1,038 | py | Python | Download/PythonExercicios/ex028.py | r-luis/Python-CursoemVideo | f978b2f4ab8444ebb746b4c85bd6db6d7775cbb4 | [
"MIT"
] | null | null | null | Download/PythonExercicios/ex028.py | r-luis/Python-CursoemVideo | f978b2f4ab8444ebb746b4c85bd6db6d7775cbb4 | [
"MIT"
] | null | null | null | Download/PythonExercicios/ex028.py | r-luis/Python-CursoemVideo | f978b2f4ab8444ebb746b4c85bd6db6d7775cbb4 | [
"MIT"
] | null | null | null | '''Escreva um programa que faça o computador "pensar" em um número inteiro entre 0 e 5 e peça para
o usuário tentar descobrir o número escolhido pelo computador.
O programa deverá escrever na tela se o usuário venceu ou perdeu.'''
from random import randint
from time import sleep
comp = randint(0, 5) #Computador pensa um número
cores = {'limpa':'\033[m',
'azul':'\033[34m',
'vermelho':'\033[31m',
'roxo':'\033[1;35m'}
print('{}-*{}'.format(cores['azul'], cores['limpa'])*12)
txt = '{}JOGO DA ADVINHAÇÃO 1.0v{}'.format(cores['vermelho'], cores['limpa'])
print('{:^30}'.format(txt))
print('{}*-{}'.format(cores['azul'], cores['limpa'])*12)
usu = int(input('''Digite um número entre 0 e 5 e tente descobrir
se é o mesmo número escolhido pelo computador: ''')) #O usuário pensa em um número
print('{}PROCESSANDO...{}'.format(cores['roxo'], cores['limpa']))
sleep(2)
if comp == usu:
print('PARABÉNS, VOCÊ VENCEU!')
else:
print('Você perdeu, o número escolhido pelo PC foi {} e não {}.'.format(comp, usu))
| 43.25 | 98 | 0.662813 | from random import randint
from time import sleep
comp = randint(0, 5)
cores = {'limpa':'\033[m',
'azul':'\033[34m',
'vermelho':'\033[31m',
'roxo':'\033[1;35m'}
print('{}-*{}'.format(cores['azul'], cores['limpa'])*12)
txt = '{}JOGO DA ADVINHAÇÃO 1.0v{}'.format(cores['vermelho'], cores['limpa'])
print('{:^30}'.format(txt))
print('{}*-{}'.format(cores['azul'], cores['limpa'])*12)
usu = int(input('''Digite um número entre 0 e 5 e tente descobrir
se é o mesmo número escolhido pelo computador: '''))
print('{}PROCESSANDO...{}'.format(cores['roxo'], cores['limpa']))
sleep(2)
if comp == usu:
print('PARABÉNS, VOCÊ VENCEU!')
else:
print('Você perdeu, o número escolhido pelo PC foi {} e não {}.'.format(comp, usu))
| true | true |
1c46a7ad97f59fed8090c80f1a79ff7c63808989 | 1,479 | py | Python | test/tests/16_view-status/hooks.py | airtower-luna/mod_gnutls | b6ce8add210095b2b983f63a818f5d157aff9f89 | [
"Apache-2.0"
] | 2 | 2015-04-06T11:28:40.000Z | 2021-07-11T12:34:53.000Z | test/tests/16_view-status/hooks.py | airtower-luna/mod_gnutls | b6ce8add210095b2b983f63a818f5d157aff9f89 | [
"Apache-2.0"
] | 4 | 2020-09-07T19:27:20.000Z | 2021-07-29T19:29:40.000Z | test/tests/16_view-status/hooks.py | airtower-luna/mod_gnutls | b6ce8add210095b2b983f63a818f5d157aff9f89 | [
"Apache-2.0"
] | 1 | 2022-01-26T12:17:17.000Z | 2022-01-26T12:17:17.000Z | from mgstest import require_match, TestExpectationFailed
import re
def post_check(conn_log, response_log):
"""Compare the TLS session information reported by gnutls-cli and the
mod_gnutls status listing."""
# Group 1 is the TLS version, group 2 the ciphers. The certificate
# type that may be enclosed in the same brackets as the TLS
# version is ignored.
re_session = r'\((TLS[\d\.]+).*?\)-(.*)'
# Prefix for gnutls-cli output
re_cli = re.compile(r'(?<=^-\sDescription:\s)' + re_session + '$')
# Prefix in mod_status output provided by mod_gnutls
re_status = re.compile(r'(?<=^Current TLS session:\s)' + re_session + '$')
cli_suite = require_match(re_cli, conn_log,
'Client cipher suite information is missing!')
status_suite = require_match(re_status, response_log,
'Server cipher suite information is missing!')
print(f'Client session info: {cli_suite.group(0)}')
print(f'Server session info: {status_suite.group(0)}')
if cli_suite.group(1) != status_suite.group(1):
raise TestExpectationFailed(
f'Client ({cli_suite.group(1)}) and server '
f'({status_suite.group(1)}) report different protocols!')
if cli_suite.group(2) != status_suite.group(2):
raise TestExpectationFailed(
f'Client ({cli_suite.group(2)}) and server '
f'({status_suite.group(2)}) report different ciphers!')
| 41.083333 | 79 | 0.646383 | from mgstest import require_match, TestExpectationFailed
import re
def post_check(conn_log, response_log):
re_session = r'\((TLS[\d\.]+).*?\)-(.*)'
re_cli = re.compile(r'(?<=^-\sDescription:\s)' + re_session + '$')
re_status = re.compile(r'(?<=^Current TLS session:\s)' + re_session + '$')
cli_suite = require_match(re_cli, conn_log,
'Client cipher suite information is missing!')
status_suite = require_match(re_status, response_log,
'Server cipher suite information is missing!')
print(f'Client session info: {cli_suite.group(0)}')
print(f'Server session info: {status_suite.group(0)}')
if cli_suite.group(1) != status_suite.group(1):
raise TestExpectationFailed(
f'Client ({cli_suite.group(1)}) and server '
f'({status_suite.group(1)}) report different protocols!')
if cli_suite.group(2) != status_suite.group(2):
raise TestExpectationFailed(
f'Client ({cli_suite.group(2)}) and server '
f'({status_suite.group(2)}) report different ciphers!')
| true | true |
1c46a7d2b1edab1bb3265dfd5cfcfa7042f23663 | 3,389 | py | Python | test/functional/test_framework/coverage.py | bitcoinexodus/bitcoinexodus-source | 742661b3dc9abce61c05fa1561b7fd9496629866 | [
"MIT"
] | null | null | null | test/functional/test_framework/coverage.py | bitcoinexodus/bitcoinexodus-source | 742661b3dc9abce61c05fa1561b7fd9496629866 | [
"MIT"
] | null | null | null | test/functional/test_framework/coverage.py | bitcoinexodus/bitcoinexodus-source | 742661b3dc9abce61c05fa1561b7fd9496629866 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Utilities for doing coverage analysis on the RPC interface.
Provides a way to track which RPC commands are exercised during
testing.
"""
import os
REFERENCE_FILENAME = 'rpc_interface.txt'
class AuthServiceProxyWrapper():
"""
An object that wraps AuthServiceProxy to record specific RPC calls.
"""
def __init__(self, auth_service_proxy_instance, coverage_logfile=None):
"""
Kwargs:
auth_service_proxy_instance (AuthServiceProxy): the instance
being wrapped.
coverage_logfile (str): if specified, write each service_name
out to a file when called.
"""
self.auth_service_proxy_instance = auth_service_proxy_instance
self.coverage_logfile = coverage_logfile
def __getattr__(self, name):
return_val = getattr(self.auth_service_proxy_instance, name)
if not isinstance(return_val, type(self.auth_service_proxy_instance)):
# If proxy getattr returned an unwrapped value, do the same here.
return return_val
return AuthServiceProxyWrapper(return_val, self.coverage_logfile)
def __call__(self, *args, **kwargs):
"""
Delegates to AuthServiceProxy, then writes the particular RPC method
called to a file.
"""
return_val = self.auth_service_proxy_instance.__call__(*args, **kwargs)
self._log_call()
return return_val
def _log_call(self):
rpc_method = self.auth_service_proxy_instance._service_name
if self.coverage_logfile:
with open(self.coverage_logfile, 'a+', encoding='utf8') as f:
f.write("%s\n" % rpc_method)
def __truediv__(self, relative_uri):
return AuthServiceProxyWrapper(self.auth_service_proxy_instance / relative_uri,
self.coverage_logfile)
def get_request(self, *args, **kwargs):
self._log_call()
return self.auth_service_proxy_instance.get_request(*args, **kwargs)
def get_filename(dirname, n_node):
"""
Get a filename unique to the test process ID and node.
This file will contain a list of RPC commands covered.
"""
pid = str(os.getpid())
return os.path.join(
dirname, "coverage.pid%s.node%s.txt" % (pid, str(n_node)))
def write_all_rpc_commands(dirname, node):
"""
Write out a list of all RPC functions available in `bitcoinexodus-cli` for
coverage comparison. This will only happen once per coverage
directory.
Args:
dirname (str): temporary test dir
node (AuthServiceProxy): client
Returns:
bool. if the RPC interface file was written.
"""
filename = os.path.join(dirname, REFERENCE_FILENAME)
if os.path.isfile(filename):
return False
help_output = node.help().split('\n')
commands = set()
for line in help_output:
line = line.strip()
# Ignore blanks and headers
if line and not line.startswith('='):
commands.add("%s\n" % line.split()[0])
with open(filename, 'w', encoding='utf8') as f:
f.writelines(list(commands))
return True
| 30.809091 | 87 | 0.661552 |
import os
REFERENCE_FILENAME = 'rpc_interface.txt'
class AuthServiceProxyWrapper():
def __init__(self, auth_service_proxy_instance, coverage_logfile=None):
self.auth_service_proxy_instance = auth_service_proxy_instance
self.coverage_logfile = coverage_logfile
def __getattr__(self, name):
return_val = getattr(self.auth_service_proxy_instance, name)
if not isinstance(return_val, type(self.auth_service_proxy_instance)):
return return_val
return AuthServiceProxyWrapper(return_val, self.coverage_logfile)
def __call__(self, *args, **kwargs):
return_val = self.auth_service_proxy_instance.__call__(*args, **kwargs)
self._log_call()
return return_val
def _log_call(self):
rpc_method = self.auth_service_proxy_instance._service_name
if self.coverage_logfile:
with open(self.coverage_logfile, 'a+', encoding='utf8') as f:
f.write("%s\n" % rpc_method)
def __truediv__(self, relative_uri):
return AuthServiceProxyWrapper(self.auth_service_proxy_instance / relative_uri,
self.coverage_logfile)
def get_request(self, *args, **kwargs):
self._log_call()
return self.auth_service_proxy_instance.get_request(*args, **kwargs)
def get_filename(dirname, n_node):
pid = str(os.getpid())
return os.path.join(
dirname, "coverage.pid%s.node%s.txt" % (pid, str(n_node)))
def write_all_rpc_commands(dirname, node):
filename = os.path.join(dirname, REFERENCE_FILENAME)
if os.path.isfile(filename):
return False
help_output = node.help().split('\n')
commands = set()
for line in help_output:
line = line.strip()
if line and not line.startswith('='):
commands.add("%s\n" % line.split()[0])
with open(filename, 'w', encoding='utf8') as f:
f.writelines(list(commands))
return True
| true | true |
1c46a823ab8bfa1ba9d65641cad3d178ad45586f | 902 | py | Python | psx/_dump_/33/_dump_ida_/overlay_3/set_funcs.py | maoa3/scalpel | 2e7381b516cded28996d290438acc618d00b2aa7 | [
"Unlicense"
] | 15 | 2018-06-28T01:11:25.000Z | 2021-09-27T15:57:18.000Z | psx/_dump_/33/_dump_ida_/overlay_3/set_funcs.py | maoa3/scalpel | 2e7381b516cded28996d290438acc618d00b2aa7 | [
"Unlicense"
] | 7 | 2018-06-29T04:08:23.000Z | 2019-10-17T13:57:22.000Z | psx/_dump_/33/_dump_ida_/overlay_3/set_funcs.py | maoa3/scalpel | 2e7381b516cded28996d290438acc618d00b2aa7 | [
"Unlicense"
] | 7 | 2018-06-28T01:11:34.000Z | 2020-05-23T09:21:48.000Z | del_items(0x800A1608)
SetType(0x800A1608, "void VID_OpenModule__Fv()")
del_items(0x800A16C8)
SetType(0x800A16C8, "void InitScreens__Fv()")
del_items(0x800A17B8)
SetType(0x800A17B8, "void MEM_SetupMem__Fv()")
del_items(0x800A17E4)
SetType(0x800A17E4, "void SetupWorkRam__Fv()")
del_items(0x800A1874)
SetType(0x800A1874, "void SYSI_Init__Fv()")
del_items(0x800A1980)
SetType(0x800A1980, "void GM_Open__Fv()")
del_items(0x800A19A4)
SetType(0x800A19A4, "void PA_Open__Fv()")
del_items(0x800A19DC)
SetType(0x800A19DC, "void PAD_Open__Fv()")
del_items(0x800A1A20)
SetType(0x800A1A20, "void OVR_Open__Fv()")
del_items(0x800A1A40)
SetType(0x800A1A40, "void SCR_Open__Fv()")
del_items(0x800A1A70)
SetType(0x800A1A70, "void DEC_Open__Fv()")
del_items(0x800A1CE4)
SetType(0x800A1CE4, "char *GetVersionString__FPc(char *VersionString2)")
del_items(0x800A1DB8)
SetType(0x800A1DB8, "char *GetWord__FPc(char *VStr)")
| 33.407407 | 72 | 0.805987 | del_items(0x800A1608)
SetType(0x800A1608, "void VID_OpenModule__Fv()")
del_items(0x800A16C8)
SetType(0x800A16C8, "void InitScreens__Fv()")
del_items(0x800A17B8)
SetType(0x800A17B8, "void MEM_SetupMem__Fv()")
del_items(0x800A17E4)
SetType(0x800A17E4, "void SetupWorkRam__Fv()")
del_items(0x800A1874)
SetType(0x800A1874, "void SYSI_Init__Fv()")
del_items(0x800A1980)
SetType(0x800A1980, "void GM_Open__Fv()")
del_items(0x800A19A4)
SetType(0x800A19A4, "void PA_Open__Fv()")
del_items(0x800A19DC)
SetType(0x800A19DC, "void PAD_Open__Fv()")
del_items(0x800A1A20)
SetType(0x800A1A20, "void OVR_Open__Fv()")
del_items(0x800A1A40)
SetType(0x800A1A40, "void SCR_Open__Fv()")
del_items(0x800A1A70)
SetType(0x800A1A70, "void DEC_Open__Fv()")
del_items(0x800A1CE4)
SetType(0x800A1CE4, "char *GetVersionString__FPc(char *VersionString2)")
del_items(0x800A1DB8)
SetType(0x800A1DB8, "char *GetWord__FPc(char *VStr)")
| true | true |
1c46a948950c865d0aa8057ce1992e94cb642566 | 816 | py | Python | ta/tests/__init__.py | levelvc/ta | 69aa29f60c691f1628a62e480cfd6bfb3a5c1793 | [
"MIT"
] | 2 | 2020-04-13T03:34:01.000Z | 2020-06-01T14:41:26.000Z | ta/tests/__init__.py | Glyphack/ta | ff46a2cb64f7446921bb3c47c882105a16f9d4f9 | [
"MIT"
] | null | null | null | ta/tests/__init__.py | Glyphack/ta | ff46a2cb64f7446921bb3c47c882105a16f9d4f9 | [
"MIT"
] | null | null | null | from ta.tests.momentum import (TestKAMAIndicator, TestRateOfChangeIndicator,
TestRSIIndicator, TestStochasticOscillator,
TestTSIIndicator, TestUltimateOscillator,
TestWilliamsRIndicator)
from ta.tests.trend import (TestADXIndicator, TestCCIIndicator,
TestMACDIndicator, TestPSARIndicator,
TestVortexIndicator)
from ta.tests.utils import TestGeneral
from ta.tests.volatility import TestAverageTrueRange, TestBollingerBands
from ta.tests.volume import (TestAccDistIndexIndicator,
TestEaseOfMovementIndicator,
TestForceIndexIndicator, TestMFIIndicator,
TestOnBalanceVolumeIndicator)
| 58.285714 | 76 | 0.629902 | from ta.tests.momentum import (TestKAMAIndicator, TestRateOfChangeIndicator,
TestRSIIndicator, TestStochasticOscillator,
TestTSIIndicator, TestUltimateOscillator,
TestWilliamsRIndicator)
from ta.tests.trend import (TestADXIndicator, TestCCIIndicator,
TestMACDIndicator, TestPSARIndicator,
TestVortexIndicator)
from ta.tests.utils import TestGeneral
from ta.tests.volatility import TestAverageTrueRange, TestBollingerBands
from ta.tests.volume import (TestAccDistIndexIndicator,
TestEaseOfMovementIndicator,
TestForceIndexIndicator, TestMFIIndicator,
TestOnBalanceVolumeIndicator)
| true | true |
1c46aa65d6d2fd5ed331e10e2a59dc999d7176c4 | 2,420 | py | Python | experiments/actions/action_utils.py | Tobias-Fischer/dreyeve | a65342d9c503ce3ec932e2229b90aaeebfd82944 | [
"MIT"
] | 83 | 2017-05-29T04:16:42.000Z | 2022-03-03T08:09:22.000Z | experiments/actions/action_utils.py | ashinmarin/dreyeve | d73979d738e706d90a8aa9d696c6e4dcb19c1134 | [
"MIT"
] | 26 | 2017-11-09T23:35:52.000Z | 2022-03-11T03:22:57.000Z | experiments/actions/action_utils.py | ashinmarin/dreyeve | d73979d738e706d90a8aa9d696c6e4dcb19c1134 | [
"MIT"
] | 36 | 2017-09-23T02:48:41.000Z | 2022-03-11T01:34:23.000Z | """
Utilities for improve code readability in `predict_actions_with_SVM.py`
"""
import itertools
import numpy as np
import matplotlib.pyplot as plt
from os.path import join, exists
class DreyeveRun:
"""
Single run of the DR(eye)VE dataset.
"""
def __init__(self, dataset_data_root, num_run):
self.num_run = num_run
self.file_course = join(dataset_data_root, '{:02d}'.format(self.num_run), 'speed_course_coord.txt')
self.file_steering = join(dataset_data_root, '{:02d}'.format(self.num_run), 'steering_directions.txt')
self.file_actions = join(dataset_data_root, '{:02d}'.format(self.num_run), 'actions.csv')
class DreyeveDataset:
"""
Class that models the Dreyeve dataset
"""
def __init__(self, dataset_root):
self.dataset_data_root = join(dataset_root, 'DATA')
self.dataset_pred_root = join(dataset_root, 'PREDICTIONS_2017')
self.train_runs = [DreyeveRun(self.dataset_data_root, r) for r in range(0 + 1, 38)]
self.test_runs = [DreyeveRun(self.dataset_data_root, r) for r in range(38, 74 + 1)]
self.frames_each_run = 7500
self.num_train_frames = len(self.train_runs) * self.frames_each_run
self.num_test_frames = len(self.test_runs) * self.frames_each_run
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
| 33.150685 | 110 | 0.645868 |
import itertools
import numpy as np
import matplotlib.pyplot as plt
from os.path import join, exists
class DreyeveRun:
def __init__(self, dataset_data_root, num_run):
self.num_run = num_run
self.file_course = join(dataset_data_root, '{:02d}'.format(self.num_run), 'speed_course_coord.txt')
self.file_steering = join(dataset_data_root, '{:02d}'.format(self.num_run), 'steering_directions.txt')
self.file_actions = join(dataset_data_root, '{:02d}'.format(self.num_run), 'actions.csv')
class DreyeveDataset:
def __init__(self, dataset_root):
self.dataset_data_root = join(dataset_root, 'DATA')
self.dataset_pred_root = join(dataset_root, 'PREDICTIONS_2017')
self.train_runs = [DreyeveRun(self.dataset_data_root, r) for r in range(0 + 1, 38)]
self.test_runs = [DreyeveRun(self.dataset_data_root, r) for r in range(38, 74 + 1)]
self.frames_each_run = 7500
self.num_train_frames = len(self.train_runs) * self.frames_each_run
self.num_test_frames = len(self.test_runs) * self.frames_each_run
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
| true | true |
1c46aabc654962175d4c60a0f2c647f7ad0ed11f | 435 | py | Python | leetcode/34.py | windniw/just-for-fun | 54e5c2be145f3848811bfd127f6a89545e921570 | [
"Apache-2.0"
] | 1 | 2019-08-28T23:15:25.000Z | 2019-08-28T23:15:25.000Z | leetcode/34.py | windniw/just-for-fun | 54e5c2be145f3848811bfd127f6a89545e921570 | [
"Apache-2.0"
] | null | null | null | leetcode/34.py | windniw/just-for-fun | 54e5c2be145f3848811bfd127f6a89545e921570 | [
"Apache-2.0"
] | null | null | null | """
link: https://leetcode-cn.com/problems/find-first-and-last-position-of-element-in-sorted-array
problem: 返回 target 在 nums 中的区间,不存在时返回 [-1, -1]
solution: 二分
"""
class Solution:
def searchRange(self, nums: List[int], target: int) -> List[int]:
a = bisect.bisect_left(nums, target)
b = bisect.bisect_right(nums, target)
if a == b:
return [-1, -1]
else:
return [a, b - 1]
| 24.166667 | 94 | 0.593103 | class Solution:
def searchRange(self, nums: List[int], target: int) -> List[int]:
a = bisect.bisect_left(nums, target)
b = bisect.bisect_right(nums, target)
if a == b:
return [-1, -1]
else:
return [a, b - 1]
| true | true |
1c46aad880355143649c5f0dc5f7f6f388eb64a8 | 3,783 | py | Python | pytest_ethereum/plugins.py | jacqueswww/pytest-ethereum | d45b441bd582eb4a17c37debd1dabf061a3e56eb | [
"MIT"
] | null | null | null | pytest_ethereum/plugins.py | jacqueswww/pytest-ethereum | d45b441bd582eb4a17c37debd1dabf061a3e56eb | [
"MIT"
] | null | null | null | pytest_ethereum/plugins.py | jacqueswww/pytest-ethereum | d45b441bd582eb4a17c37debd1dabf061a3e56eb | [
"MIT"
] | null | null | null | import json
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
from eth_utils import to_dict, to_hex, to_tuple
import pytest
from vyper import compiler
from web3 import Web3
from ethpm import Package
from ethpm.tools import builder as b
from ethpm.typing import Manifest
from pytest_ethereum.deployer import Deployer
@pytest.fixture
def w3() -> Web3:
w3 = Web3(Web3.EthereumTesterProvider())
return w3
CONTRACTS_DIR = Path("./contracts")
SOURCES_GLOB = "**/*.vy"
@pytest.fixture
def manifest() -> Manifest:
if not CONTRACTS_DIR.is_dir():
raise FileNotFoundError("no contracts_dir")
all_sources = CONTRACTS_DIR.glob(SOURCES_GLOB)
compiler_output = generate_compiler_output(all_sources)
composed_contract_types = generate_contract_types(compiler_output)
composed_inline_sources = generate_inline_sources(compiler_output)
manifest = b.build(
{},
b.package_name("greeter"),
b.version("1.0.0"),
b.manifest_version("2"),
*composed_inline_sources,
*composed_contract_types,
b.validate(),
)
return manifest
def twig_manifest(path: Path, name: str, version: str) -> Manifest:
all_sources = path.glob(SOURCES_GLOB)
compiler_output = generate_compiler_output(all_sources)
composed_contract_types = generate_contract_types(compiler_output)
composed_inline_sources = generate_inline_sources(compiler_output)
manifest = b.build(
{},
b.package_name(name),
b.version(version),
b.manifest_version("2"),
*composed_inline_sources,
*composed_contract_types,
b.validate(),
)
return manifest
@to_tuple
def generate_inline_sources(compiler_output: Dict[str, Any]) -> Iterable[Manifest]:
for path in compiler_output.keys():
contract_type = path.split("/")[-1].split(".")[0]
yield b.inline_source(contract_type, compiler_output)
@to_tuple
def generate_contract_types(compiler_output: Dict[str, Any]) -> Iterable[Manifest]:
for path in compiler_output.keys():
contract_type = path.split("/")[-1].split(".")[0]
yield b.contract_type(contract_type, compiler_output)
@to_dict
def generate_compiler_output(
all_sources: List[Path]
) -> Iterable[Tuple[str, Dict[str, Any]]]:
for source in all_sources:
contract_file = str(source).split("/")[-1]
contract_type = contract_file.split(".")[0]
# todo fix to accomodate multiple types in a single contract file
yield str(source), {contract_type: create_raw_asset_data(source.read_text())}
def create_raw_asset_data(source: str) -> Dict[str, Any]:
return {
"abi": compiler.mk_full_signature(source),
"evm": {
"bytecode": {
"object": to_hex(compiler.compile(source)),
"linkReferences": {},
}
},
}
@pytest.fixture
def package(manifest: Manifest, w3: Web3) -> Package:
return Package(manifest, w3)
# todo squash deployers
@pytest.fixture
def vy_deployer(package: Package) -> Deployer:
return Deployer(package)
@pytest.fixture
def twig_deployer(w3: Web3) -> Callable[[Path, str, str], Deployer]:
def _twig_deployer(
path: Path, name: Optional[str] = "twig", version: Optional[str] = "1.0.0"
) -> Deployer:
manifest = twig_manifest(path, name, version)
pkg = Package(manifest, w3)
return Deployer(pkg)
return _twig_deployer
@pytest.fixture
def solc_deployer(w3: Web3) -> Callable[[Path], Deployer]:
def _solc_deployer(path: Path) -> Deployer:
manifest = json.loads(path.read_text())
package = Package(manifest, w3)
return Deployer(package)
return _solc_deployer
| 28.877863 | 85 | 0.680941 | import json
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
from eth_utils import to_dict, to_hex, to_tuple
import pytest
from vyper import compiler
from web3 import Web3
from ethpm import Package
from ethpm.tools import builder as b
from ethpm.typing import Manifest
from pytest_ethereum.deployer import Deployer
@pytest.fixture
def w3() -> Web3:
w3 = Web3(Web3.EthereumTesterProvider())
return w3
CONTRACTS_DIR = Path("./contracts")
SOURCES_GLOB = "**/*.vy"
@pytest.fixture
def manifest() -> Manifest:
if not CONTRACTS_DIR.is_dir():
raise FileNotFoundError("no contracts_dir")
all_sources = CONTRACTS_DIR.glob(SOURCES_GLOB)
compiler_output = generate_compiler_output(all_sources)
composed_contract_types = generate_contract_types(compiler_output)
composed_inline_sources = generate_inline_sources(compiler_output)
manifest = b.build(
{},
b.package_name("greeter"),
b.version("1.0.0"),
b.manifest_version("2"),
*composed_inline_sources,
*composed_contract_types,
b.validate(),
)
return manifest
def twig_manifest(path: Path, name: str, version: str) -> Manifest:
all_sources = path.glob(SOURCES_GLOB)
compiler_output = generate_compiler_output(all_sources)
composed_contract_types = generate_contract_types(compiler_output)
composed_inline_sources = generate_inline_sources(compiler_output)
manifest = b.build(
{},
b.package_name(name),
b.version(version),
b.manifest_version("2"),
*composed_inline_sources,
*composed_contract_types,
b.validate(),
)
return manifest
@to_tuple
def generate_inline_sources(compiler_output: Dict[str, Any]) -> Iterable[Manifest]:
for path in compiler_output.keys():
contract_type = path.split("/")[-1].split(".")[0]
yield b.inline_source(contract_type, compiler_output)
@to_tuple
def generate_contract_types(compiler_output: Dict[str, Any]) -> Iterable[Manifest]:
for path in compiler_output.keys():
contract_type = path.split("/")[-1].split(".")[0]
yield b.contract_type(contract_type, compiler_output)
@to_dict
def generate_compiler_output(
all_sources: List[Path]
) -> Iterable[Tuple[str, Dict[str, Any]]]:
for source in all_sources:
contract_file = str(source).split("/")[-1]
contract_type = contract_file.split(".")[0]
yield str(source), {contract_type: create_raw_asset_data(source.read_text())}
def create_raw_asset_data(source: str) -> Dict[str, Any]:
return {
"abi": compiler.mk_full_signature(source),
"evm": {
"bytecode": {
"object": to_hex(compiler.compile(source)),
"linkReferences": {},
}
},
}
@pytest.fixture
def package(manifest: Manifest, w3: Web3) -> Package:
return Package(manifest, w3)
@pytest.fixture
def vy_deployer(package: Package) -> Deployer:
return Deployer(package)
@pytest.fixture
def twig_deployer(w3: Web3) -> Callable[[Path, str, str], Deployer]:
def _twig_deployer(
path: Path, name: Optional[str] = "twig", version: Optional[str] = "1.0.0"
) -> Deployer:
manifest = twig_manifest(path, name, version)
pkg = Package(manifest, w3)
return Deployer(pkg)
return _twig_deployer
@pytest.fixture
def solc_deployer(w3: Web3) -> Callable[[Path], Deployer]:
def _solc_deployer(path: Path) -> Deployer:
manifest = json.loads(path.read_text())
package = Package(manifest, w3)
return Deployer(package)
return _solc_deployer
| true | true |
1c46ab3936f9d783c9129bd39881ccdee4abfb5d | 25,848 | py | Python | tools/efrotools/pybuild.py | nitingupta910/ballistica | 7c8c645592ac184e80e409c14c7607f91fcc89df | [
"MIT"
] | 317 | 2020-04-04T00:33:10.000Z | 2022-03-28T01:07:09.000Z | tools/efrotools/pybuild.py | Alshahriah/ballistica | 326f6677a0118667e93ce9034849622ebef706fa | [
"MIT"
] | 315 | 2020-04-04T22:33:10.000Z | 2022-03-31T22:50:02.000Z | tools/efrotools/pybuild.py | Alshahriah/ballistica | 326f6677a0118667e93ce9034849622ebef706fa | [
"MIT"
] | 97 | 2020-04-04T01:32:17.000Z | 2022-03-16T19:02:59.000Z | # Released under the MIT License. See LICENSE for details.
#
"""Functionality related to building python for ios, android, etc."""
from __future__ import annotations
import os
from typing import TYPE_CHECKING
from efrotools import PYVER, run, readfile, writefile, replace_one
if TYPE_CHECKING:
from typing import Any
ENABLE_OPENSSL = True
NEWER_PY_TEST = True
PY_VER_EXACT_ANDROID = '3.9.7'
PY_VER_EXACT_APPLE = '3.9.6'
# Filenames we prune from Python lib dirs in source repo to cut down on size.
PRUNE_LIB_NAMES = [
'config-*', 'idlelib', 'lib-dynload', 'lib2to3', 'multiprocessing',
'pydoc_data', 'site-packages', 'ensurepip', 'tkinter', 'wsgiref',
'distutils', 'turtle.py', 'turtledemo', 'test', 'sqlite3/test', 'unittest',
'dbm', 'venv', 'ctypes/test', 'imaplib.py', '_sysconfigdata_*'
]
# Same but for DLLs dir (windows only)
PRUNE_DLL_NAMES = ['*.ico']
def build_apple(arch: str, debug: bool = False) -> None:
"""Run a build for the provided apple arch (mac, ios, or tvos)."""
import platform
import subprocess
from efro.error import CleanError
# IMPORTANT; seems we currently wind up building against /usr/local gettext
# stuff. Hopefully the maintainer fixes this, but for now I need to
# remind myself to blow it away while building.
# (via brew remove gettext --ignore-dependencies)
if ('MacBook-Fro' in platform.node()
and os.environ.get('SKIP_GETTEXT_WARNING') != '1'):
if (subprocess.run('which gettext', shell=True,
check=False).returncode == 0):
raise CleanError(
'NEED TO TEMP-KILL GETTEXT (or set SKIP_GETTEXT_WARNING=1)')
builddir = 'build/python_apple_' + arch + ('_debug' if debug else '')
run('rm -rf "' + builddir + '"')
run('mkdir -p build')
run('git clone '
'https://github.com/beeware/Python-Apple-support.git "' + builddir +
'"')
os.chdir(builddir)
# TEMP: Check out a particular commit while the branch head is broken.
# We can actually fix this to use the current one, but something
# broke in the underlying build even on old commits so keeping it
# locked for now...
# run('git checkout bf1ed73d0d5ff46862ba69dd5eb2ffaeff6f19b6')
run(f'git checkout {PYVER}')
txt = readfile('Makefile')
# Fix a bug where spaces in PATH cause errors (darn you vmware fusion!)
txt = replace_one(
txt, '&& PATH=$(PROJECT_DIR)/$(PYTHON_DIR-macOS)/dist/bin:$(PATH) .',
'&& PATH="$(PROJECT_DIR)/$(PYTHON_DIR-macOS)/dist/bin:$(PATH)" .')
# Turn doc strings on; looks like it only adds a few hundred k.
txt = txt.replace('--without-doc-strings', '--with-doc-strings')
# Set mac/ios version reqs
# (see issue with utimensat and futimens).
txt = replace_one(txt, 'MACOSX_DEPLOYMENT_TARGET=10.8',
'MACOSX_DEPLOYMENT_TARGET=10.15')
# And equivalent iOS (11+).
txt = replace_one(txt, 'CFLAGS-iOS=-mios-version-min=8.0',
'CFLAGS-iOS=-mios-version-min=13.0')
# Ditto for tvOS.
txt = replace_one(txt, 'CFLAGS-tvOS=-mtvos-version-min=9.0',
'CFLAGS-tvOS=-mtvos-version-min=13.0')
if debug:
# Add debug build flag
# (Currently expect to find 2 instances of this).
dline = '--with-doc-strings --enable-ipv6 --without-ensurepip'
splitlen = len(txt.split(dline))
if splitlen != 3:
raise Exception('unexpected configure lines')
txt = txt.replace(dline, '--with-pydebug ' + dline)
# Debug has a different name.
# (Currently expect to replace 12 instances of this).
dline = ('python$(PYTHON_VER)'
if NEWER_PY_TEST else 'python$(PYTHON_VER)m')
splitlen = len(txt.split(dline))
if splitlen != 13:
raise RuntimeError(f'Unexpected configure line count {splitlen}.')
txt = txt.replace(
dline, 'python$(PYTHON_VER)d'
if NEWER_PY_TEST else 'python$(PYTHON_VER)dm')
# Inject our custom modifications to fire before building.
txt = txt.replace(
' # Configure target Python\n',
' cd $$(PYTHON_DIR-$1) && '
f'../../../../../tools/pcommand python_apple_patch {arch}\n'
' # Configure target Python\n',
)
writefile('Makefile', txt)
# Ok; let 'er rip.
# (we run these in parallel so limit to 1 job a piece;
# otherwise they inherit the -j12 or whatever from the top level)
# (also this build seems to fail with multiple threads)
run(
'make -j1 ' + {
'mac': 'Python-macOS',
# 'mac': 'build/macOS/Python-3.9.6-macOS/Makefile',
'ios': 'Python-iOS',
'tvos': 'Python-tvOS'
}[arch])
print('python build complete! (apple/' + arch + ')')
def apple_patch(arch: str) -> None:
"""Run necessary patches on an apple archive before building."""
# Here's the deal: we want our custom static python libraries to
# be as similar as possible on apple platforms and android, so let's
# blow away all the tweaks that this setup does to Setup.local and
# instead apply our very similar ones directly to Setup, just as we
# do for android.
with open('Modules/Setup.local', 'w', encoding='utf-8') as outfile:
outfile.write('# cleared by efrotools build\n')
_patch_setup_file('apple', arch)
def build_android(rootdir: str, arch: str, debug: bool = False) -> None:
"""Run a build for android with the given architecture.
(can be arm, arm64, x86, or x86_64)
"""
import subprocess
builddir = 'build/python_android_' + arch + ('_debug' if debug else '')
run('rm -rf "' + builddir + '"')
run('mkdir -p build')
run('git clone '
'https://github.com/yan12125/python3-android.git "' + builddir + '"')
os.chdir(builddir)
# These builds require ANDROID_NDK to be set; make sure that's the case.
os.environ['ANDROID_NDK'] = subprocess.check_output(
[f'{rootdir}/tools/pcommand', 'android_sdk_utils',
'get-ndk-path']).decode().strip()
# Disable builds for dependencies we don't use.
ftxt = readfile('Android/build_deps.py')
# ftxt = replace_one(ftxt, ' NCurses,\n',
# '# NCurses,\n',)
ftxt = replace_one(
ftxt,
' '
'BZip2, GDBM, LibFFI, LibUUID, OpenSSL, Readline, SQLite, XZ, ZLib,\n',
' '
'BZip2, LibUUID, OpenSSL, SQLite, XZ, ZLib,\n',
)
# Older ssl seems to choke on newer ndk layouts.
ftxt = replace_one(
ftxt,
"source = 'https://www.openssl.org/source/openssl-1.1.1h.tar.gz'",
"source = 'https://www.openssl.org/source/openssl-1.1.1l.tar.gz'")
writefile('Android/build_deps.py', ftxt)
# Tweak some things in the base build script; grab the right version
# of Python and also inject some code to modify bits of python
# after it is extracted.
ftxt = readfile('build.sh')
ftxt = replace_one(ftxt, 'PYVER=3.9.0', f'PYVER={PY_VER_EXACT_ANDROID}')
ftxt = replace_one(
ftxt, ' popd\n', f' ../../../tools/pcommand'
f' python_android_patch Python-{PY_VER_EXACT_ANDROID}\n popd\n')
writefile('build.sh', ftxt)
# Ok, let 'er rip
# (we often run these builds in parallel so limit to 1 job a piece;
# otherwise they each inherit the -j12 or whatever from the top level).
exargs = ' --with-pydebug' if debug else ''
run(f'ARCH={arch} ANDROID_API=21 ./build.sh{exargs}')
print('python build complete! (android/' + arch + ')')
def android_patch() -> None:
"""Run necessary patches on an android archive before building."""
_patch_setup_file('android', '?')
def _patch_setup_file(platform: str, arch: str) -> None:
# pylint: disable=too-many-locals
# pylint: disable=too-many-statements
fname = 'Modules/Setup'
ftxt = readfile(fname)
if platform == 'android':
prefix = '$(srcdir)/Android/sysroot/usr'
uuid_ex = f' -L{prefix}/lib -luuid'
zlib_ex = f' -I{prefix}/include -L{prefix}/lib -lz'
bz2_ex = f' -I{prefix}/include -L{prefix}/lib -lbz2'
ssl_ex = f' -DUSE_SSL -I{prefix}/include -L{prefix}/lib -lssl -lcrypto'
sqlite_ex = f' -I{prefix}/include -L{prefix}/lib'
hash_ex = ' -DUSE_SSL -lssl -lcrypto'
lzma_ex = ' -llzma'
elif platform == 'apple':
prefix = '$(srcdir)/Android/sysroot/usr'
uuid_ex = ''
zlib_ex = ' -I$(prefix)/include -lz'
bz2_ex = (' -I$(srcdir)/../Support/BZip2/Headers'
' -L$(srcdir)/../Support/BZip2 -lbzip2')
ssl_ex = (' -I$(srcdir)/../Support/OpenSSL/Headers'
' -L$(srcdir)/../Support/OpenSSL -lOpenSSL -DUSE_SSL')
sqlite_ex = ' -I$(srcdir)/Modules/_sqlite'
hash_ex = (' -I$(srcdir)/../Support/OpenSSL/Headers'
' -L$(srcdir)/../Support/OpenSSL -lOpenSSL -DUSE_SSL')
lzma_ex = (' -I$(srcdir)/../Support/XZ/Headers'
' -L$(srcdir)/../Support/XZ/ -lxz')
else:
raise RuntimeError(f'Unknown platform {platform}')
# This list should contain all possible compiled modules to start.
# If any .so files are coming out of builds, their names should be
# added here to stop that.
cmodules = [
'_asyncio', '_bisect', '_blake2', '_codecs_cn', '_codecs_hk',
'_codecs_iso2022', '_codecs_jp', '_codecs_kr', '_codecs_tw',
'_contextvars', '_crypt', '_csv', '_ctypes_test', '_ctypes',
'_curses_panel', '_curses', '_datetime', '_decimal', '_elementtree',
'_heapq', '_json', '_lsprof', '_lzma', '_md5', '_multibytecodec',
'_multiprocessing', '_opcode', '_pickle', '_posixsubprocess', '_queue',
'_random', '_sha1', '_sha3', '_sha256', '_sha512', '_socket',
'_statistics', '_struct', '_testbuffer', '_testcapi',
'_testimportmultiple', '_testinternalcapi', '_testmultiphase', '_uuid',
'_xxsubinterpreters', '_xxtestfuzz', '_zoneinfo', 'array', 'audioop',
'binascii', 'cmath', 'fcntl', 'grp', 'math', 'mmap', 'ossaudiodev',
'parser', 'pyexpat', 'resource', 'select', 'syslog', 'termios',
'unicodedata', 'xxlimited', 'zlib'
]
# Selectively uncomment some existing modules for static compilation.
enables = [
'_asyncio', 'array', 'cmath', 'math', '_contextvars', '_struct',
'_random', '_elementtree', '_pickle', '_datetime', '_zoneinfo',
'_bisect', '_heapq', '_json', '_statistics', 'unicodedata', 'fcntl',
'select', 'mmap', '_csv', '_socket', '_sha3', '_blake2', 'binascii',
'_posixsubprocess'
]
# Note that the _md5 and _sha modules are normally only built if the
# system does not have the OpenSSL libs containing an optimized
# version.
if bool(False):
enables += ['_md5']
for enable in enables:
ftxt = replace_one(ftxt, f'#{enable} ', f'{enable} ')
cmodules.remove(enable)
# Disable ones that were enabled:
disables = ['xxsubtype']
for disable in disables:
ftxt = replace_one(ftxt, f'\n{disable} ', f'\n#{disable} ')
# Additions:
ftxt += '\n# Additions by efrotools:\n'
if bool(True):
ftxt += f'_uuid _uuidmodule.c{uuid_ex}\n'
cmodules.remove('_uuid')
ftxt += f'zlib zlibmodule.c{zlib_ex}\n'
cmodules.remove('zlib')
# Why isn't this getting built as a shared lib by default?
# Do we need it for sure?
ftxt += f'_hashlib _hashopenssl.c{hash_ex}\n'
ftxt += f'_lzma _lzmamodule.c{lzma_ex}\n'
cmodules.remove('_lzma')
ftxt += f'_bz2 _bz2module.c{bz2_ex}\n'
ftxt += f'_ssl _ssl.c{ssl_ex}\n'
ftxt += (f'_sqlite3'
f' _sqlite/cache.c'
f' _sqlite/connection.c'
f' _sqlite/cursor.c'
f' _sqlite/microprotocols.c'
f' _sqlite/module.c'
f' _sqlite/prepare_protocol.c'
f' _sqlite/row.c'
f' _sqlite/statement.c'
f' _sqlite/util.c'
f'{sqlite_ex}'
f' -DMODULE_NAME=\'\\"sqlite3\\"\''
f' -DSQLITE_OMIT_LOAD_EXTENSION'
f' -lsqlite3\n')
# Mac needs this:
if arch == 'mac':
ftxt += ('\n'
'# efrotools: mac urllib needs this:\n'
'_scproxy _scproxy.c '
'-framework SystemConfiguration '
'-framework CoreFoundation\n')
# Explicitly mark the remaining ones as disabled
# (so Python won't try to build them as dynamic libs).
remaining_disabled = ' '.join(cmodules)
ftxt += ('\n# Disabled by efrotools build:\n'
'*disabled*\n'
f'{remaining_disabled}\n')
writefile(fname, ftxt)
# Ok, this is weird.
# When applying the module Setup, python looks for any line containing *=*
# and interprets the whole thing a a global define?...
# This breaks things for our static sqlite compile above.
# The check used to look for [A-Z]*=* which didn't break, so let' just
# change it back to that for now.
# UPDATE: Currently this seems to only be necessary on Android;
# perhaps this broke between 3.9.6 and 3.9.7 or perhaps the apple
# bundle already patches it ¯\_(ツ)_/¯
fname = 'Modules/makesetup'
txt = readfile(fname)
if platform == 'android':
txt = replace_one(txt, ' *=*)'
' DEFS="$line$NL$DEFS"; continue;;',
' [A-Z]*=*) DEFS="$line$NL$DEFS";'
' continue;;')
assert txt.count('[A-Z]*=*') == 1
writefile(fname, txt)
def winprune() -> None:
"""Prune unneeded files from windows python dists."""
for libdir in ('assets/src/windows/Win32/Lib',
'assets/src/windows/x64/Lib'):
assert os.path.isdir(libdir)
run('cd "' + libdir + '" && rm -rf ' + ' '.join(PRUNE_LIB_NAMES))
for dlldir in ('assets/src/windows/Win32/DLLs',
'assets/src/windows/x64/DLLs'):
assert os.path.isdir(dlldir)
run('cd "' + dlldir + '" && rm -rf ' + ' '.join(PRUNE_DLL_NAMES))
print('Win-prune successful.')
def gather() -> None:
"""Gather per-platform python headers, libs, and modules together.
This assumes all embeddable py builds have been run successfully,
and that PROJROOT is the cwd.
"""
# pylint: disable=too-many-locals
do_android = True
# First off, clear out any existing output.
existing_dirs = [
os.path.join('src/external', d) for d in os.listdir('src/external')
if d.startswith('python-') and d != 'python-notes.txt'
]
existing_dirs += [
os.path.join('assets/src', d) for d in os.listdir('assets/src')
if d.startswith('pylib-')
]
if not do_android:
existing_dirs = [d for d in existing_dirs if 'android' not in d]
for existing_dir in existing_dirs:
run('rm -rf "' + existing_dir + '"')
apost2 = f'src/Python-{PY_VER_EXACT_ANDROID}/Android/sysroot'
for buildtype in ['debug', 'release']:
debug = buildtype == 'debug'
bsuffix = '_debug' if buildtype == 'debug' else ''
bsuffix2 = '-debug' if buildtype == 'debug' else ''
libname = 'python' + PYVER + ('d' if debug else '')
bases = {
'mac': f'build/python_apple_mac{bsuffix}/build/macOS',
'ios': f'build/python_apple_ios{bsuffix}/build/iOS',
'tvos': f'build/python_apple_tvos{bsuffix}/build/tvOS',
'android_arm': f'build/python_android_arm{bsuffix}/build',
'android_arm64': f'build/python_android_arm64{bsuffix}/build',
'android_x86': f'build/python_android_x86{bsuffix}/build',
'android_x86_64': f'build/python_android_x86_64{bsuffix}/build'
}
bases2 = {
'android_arm': f'build/python_android_arm{bsuffix}/{apost2}',
'android_arm64': f'build/python_android_arm64{bsuffix}/{apost2}',
'android_x86': f'build/python_android_x86{bsuffix}/{apost2}',
'android_x86_64': f'build/python_android_x86_64{bsuffix}/{apost2}'
}
# Note: only need pylib for the first in each group.
builds: list[dict[str, Any]] = [{
'name':
'macos',
'group':
'apple',
'headers':
bases['mac'] + '/Support/Python/Headers',
'libs': [
bases['mac'] + '/Support/Python/libPython.a',
bases['mac'] + '/Support/OpenSSL/libOpenSSL.a',
bases['mac'] + '/Support/XZ/libxz.a',
bases['mac'] + '/Support/BZip2/libbzip2.a',
],
'pylib':
(bases['mac'] + f'/Python-{PY_VER_EXACT_APPLE}-macOS/lib'),
}, {
'name':
'ios',
'group':
'apple',
'headers':
bases['ios'] + '/Support/Python/Headers',
'libs': [
bases['ios'] + '/Support/Python/libPython.a',
bases['ios'] + '/Support/OpenSSL/libOpenSSL.a',
bases['ios'] + '/Support/XZ/libxz.a',
bases['ios'] + '/Support/BZip2/libbzip2.a',
],
}, {
'name':
'tvos',
'group':
'apple',
'headers':
bases['tvos'] + '/Support/Python/Headers',
'libs': [
bases['tvos'] + '/Support/Python/libPython.a',
bases['tvos'] + '/Support/OpenSSL/libOpenSSL.a',
bases['tvos'] + '/Support/XZ/libxz.a',
bases['tvos'] + '/Support/BZip2/libbzip2.a',
],
}, {
'name': 'android_arm',
'group': 'android',
'headers': bases['android_arm'] + f'/usr/include/{libname}',
'libs': [
bases['android_arm'] + f'/usr/lib/lib{libname}.a',
bases2['android_arm'] + '/usr/lib/libssl.a',
bases2['android_arm'] + '/usr/lib/libcrypto.a',
bases2['android_arm'] + '/usr/lib/liblzma.a',
bases2['android_arm'] + '/usr/lib/libsqlite3.a',
bases2['android_arm'] + '/usr/lib/libbz2.a',
bases2['android_arm'] + '/usr/lib/libuuid.a',
],
'libinst': 'android_armeabi-v7a',
'pylib': (bases['android_arm'] + '/usr/lib/python' + PYVER),
}, {
'name': 'android_arm64',
'group': 'android',
'headers': bases['android_arm64'] + f'/usr/include/{libname}',
'libs': [
bases['android_arm64'] + f'/usr/lib/lib{libname}.a',
bases2['android_arm64'] + '/usr/lib/libssl.a',
bases2['android_arm64'] + '/usr/lib/libcrypto.a',
bases2['android_arm64'] + '/usr/lib/liblzma.a',
bases2['android_arm64'] + '/usr/lib/libsqlite3.a',
bases2['android_arm64'] + '/usr/lib/libbz2.a',
bases2['android_arm64'] + '/usr/lib/libuuid.a',
],
'libinst': 'android_arm64-v8a',
}, {
'name': 'android_x86',
'group': 'android',
'headers': bases['android_x86'] + f'/usr/include/{libname}',
'libs': [
bases['android_x86'] + f'/usr/lib/lib{libname}.a',
bases2['android_x86'] + '/usr/lib/libssl.a',
bases2['android_x86'] + '/usr/lib/libcrypto.a',
bases2['android_x86'] + '/usr/lib/liblzma.a',
bases2['android_x86'] + '/usr/lib/libsqlite3.a',
bases2['android_x86'] + '/usr/lib/libbz2.a',
bases2['android_x86'] + '/usr/lib/libuuid.a',
],
'libinst': 'android_x86',
}, {
'name': 'android_x86_64',
'group': 'android',
'headers': bases['android_x86_64'] + f'/usr/include/{libname}',
'libs': [
bases['android_x86_64'] + f'/usr/lib/lib{libname}.a',
bases2['android_x86_64'] + '/usr/lib/libssl.a',
bases2['android_x86_64'] + '/usr/lib/libcrypto.a',
bases2['android_x86_64'] + '/usr/lib/liblzma.a',
bases2['android_x86_64'] + '/usr/lib/libsqlite3.a',
bases2['android_x86_64'] + '/usr/lib/libbz2.a',
bases2['android_x86_64'] + '/usr/lib/libuuid.a',
],
'libinst': 'android_x86_64',
}]
for build in builds:
grp = build['group']
if not do_android and grp == 'android':
continue
builddir = f'src/external/python-{grp}{bsuffix2}'
header_dst = os.path.join(builddir, 'include')
lib_dst = os.path.join(builddir, 'lib')
assets_src_dst = f'assets/src/pylib-{grp}'
# Do some setup only once per group.
if not os.path.exists(builddir):
run('mkdir -p "' + builddir + '"')
run('mkdir -p "' + lib_dst + '"')
# Only pull modules into game assets on release pass.
if not debug:
# Copy system modules into the src assets
# dir for this group.
run('mkdir -p "' + assets_src_dst + '"')
run('rsync --recursive --include "*.py"'
' --exclude __pycache__ --include "*/" --exclude "*" "'
+ build['pylib'] + '/" "' + assets_src_dst + '"')
# Prune a bunch of modules we don't need to cut
# down on size.
run('cd "' + assets_src_dst + '" && rm -rf ' +
' '.join(PRUNE_LIB_NAMES))
# Some minor filtering to system scripts:
# on iOS/tvOS, addusersitepackages() leads to a crash
# due to _sysconfigdata_dm_ios_darwin module not existing,
# so let's skip that.
fname = f'{assets_src_dst}/site.py'
txt = readfile(fname)
txt = replace_one(
txt,
' known_paths = addusersitepackages(known_paths)',
' # efro tweak: this craps out on ios/tvos.\n'
' # (and we don\'t use it anyway)\n'
' # known_paths = addusersitepackages(known_paths)')
writefile(fname, txt)
# Copy in a base set of headers (everything in a group should
# be using the same headers)
run(f'cp -r "{build["headers"]}" "{header_dst}"')
# Clear whatever pyconfigs came across; we'll build our own
# universal one below.
run('rm ' + header_dst + '/pyconfig*')
# Write a master pyconfig header that reroutes to each
# platform's actual header.
with open(header_dst + '/pyconfig.h', 'w',
encoding='utf-8') as hfile:
hfile.write(
'#if BA_OSTYPE_MACOS\n'
'#include "pyconfig-macos.h"\n\n'
'#elif BA_OSTYPE_IOS\n'
'#include "pyconfig-ios.h"\n\n'
'#elif BA_OSTYPE_TVOS\n'
'#include "pyconfig-tvos.h"\n\n'
'#elif BA_OSTYPE_ANDROID and defined(__arm__)\n'
'#include "pyconfig-android_arm.h"\n\n'
'#elif BA_OSTYPE_ANDROID and defined(__aarch64__)\n'
'#include "pyconfig-android_arm64.h"\n\n'
'#elif BA_OSTYPE_ANDROID and defined(__i386__)\n'
'#include "pyconfig-android_x86.h"\n\n'
'#elif BA_OSTYPE_ANDROID and defined(__x86_64__)\n'
'#include "pyconfig-android_x86_64.h"\n\n'
'#else\n'
'#error unknown platform\n\n'
'#endif\n')
# Now copy each build's config headers in with unique names.
cfgs = [
f for f in os.listdir(build['headers'])
if f.startswith('pyconfig')
]
# Copy config headers to their filtered names.
for cfg in cfgs:
out = cfg.replace('pyconfig', 'pyconfig-' + build['name'])
if cfg == 'pyconfig.h':
# For platform's root pyconfig.h we need to filter
# contents too (those headers can themselves include
# others; ios for instance points to a arm64 and a
# x86_64 variant).
contents = readfile(build['headers'] + '/' + cfg)
contents = contents.replace('pyconfig',
'pyconfig-' + build['name'])
writefile(header_dst + '/' + out, contents)
else:
# other configs we just rename
run('cp "' + build['headers'] + '/' + cfg + '" "' +
header_dst + '/' + out + '"')
# Copy in libs. If the lib gave a specific install name,
# use that; otherwise use name.
targetdir = lib_dst + '/' + build.get('libinst', build['name'])
run('rm -rf "' + targetdir + '"')
run('mkdir -p "' + targetdir + '"')
for lib in build['libs']:
run('cp "' + lib + '" "' + targetdir + '"')
print('Great success!')
| 41.757674 | 79 | 0.548205 |
from __future__ import annotations
import os
from typing import TYPE_CHECKING
from efrotools import PYVER, run, readfile, writefile, replace_one
if TYPE_CHECKING:
from typing import Any
ENABLE_OPENSSL = True
NEWER_PY_TEST = True
PY_VER_EXACT_ANDROID = '3.9.7'
PY_VER_EXACT_APPLE = '3.9.6'
PRUNE_LIB_NAMES = [
'config-*', 'idlelib', 'lib-dynload', 'lib2to3', 'multiprocessing',
'pydoc_data', 'site-packages', 'ensurepip', 'tkinter', 'wsgiref',
'distutils', 'turtle.py', 'turtledemo', 'test', 'sqlite3/test', 'unittest',
'dbm', 'venv', 'ctypes/test', 'imaplib.py', '_sysconfigdata_*'
]
PRUNE_DLL_NAMES = ['*.ico']
def build_apple(arch: str, debug: bool = False) -> None:
import platform
import subprocess
from efro.error import CleanError
if ('MacBook-Fro' in platform.node()
and os.environ.get('SKIP_GETTEXT_WARNING') != '1'):
if (subprocess.run('which gettext', shell=True,
check=False).returncode == 0):
raise CleanError(
'NEED TO TEMP-KILL GETTEXT (or set SKIP_GETTEXT_WARNING=1)')
builddir = 'build/python_apple_' + arch + ('_debug' if debug else '')
run('rm -rf "' + builddir + '"')
run('mkdir -p build')
run('git clone '
'https://github.com/beeware/Python-Apple-support.git "' + builddir +
'"')
os.chdir(builddir)
run(f'git checkout {PYVER}')
txt = readfile('Makefile')
txt = replace_one(
txt, '&& PATH=$(PROJECT_DIR)/$(PYTHON_DIR-macOS)/dist/bin:$(PATH) .',
'&& PATH="$(PROJECT_DIR)/$(PYTHON_DIR-macOS)/dist/bin:$(PATH)" .')
txt = txt.replace('--without-doc-strings', '--with-doc-strings')
txt = replace_one(txt, 'MACOSX_DEPLOYMENT_TARGET=10.8',
'MACOSX_DEPLOYMENT_TARGET=10.15')
txt = replace_one(txt, 'CFLAGS-iOS=-mios-version-min=8.0',
'CFLAGS-iOS=-mios-version-min=13.0')
txt = replace_one(txt, 'CFLAGS-tvOS=-mtvos-version-min=9.0',
'CFLAGS-tvOS=-mtvos-version-min=13.0')
if debug:
dline = '--with-doc-strings --enable-ipv6 --without-ensurepip'
splitlen = len(txt.split(dline))
if splitlen != 3:
raise Exception('unexpected configure lines')
txt = txt.replace(dline, '--with-pydebug ' + dline)
dline = ('python$(PYTHON_VER)'
if NEWER_PY_TEST else 'python$(PYTHON_VER)m')
splitlen = len(txt.split(dline))
if splitlen != 13:
raise RuntimeError(f'Unexpected configure line count {splitlen}.')
txt = txt.replace(
dline, 'python$(PYTHON_VER)d'
if NEWER_PY_TEST else 'python$(PYTHON_VER)dm')
txt = txt.replace(
' # Configure target Python\n',
' cd $$(PYTHON_DIR-$1) && '
f'../../../../../tools/pcommand python_apple_patch {arch}\n'
' # Configure target Python\n',
)
writefile('Makefile', txt)
# (we run these in parallel so limit to 1 job a piece;
# otherwise they inherit the -j12 or whatever from the top level)
# (also this build seems to fail with multiple threads)
run(
'make -j1 ' + {
'mac': 'Python-macOS',
# 'mac': 'build/macOS/Python-3.9.6-macOS/Makefile',
'ios': 'Python-iOS',
'tvos': 'Python-tvOS'
}[arch])
print('python build complete! (apple/' + arch + ')')
def apple_patch(arch: str) -> None:
# Here's the deal: we want our custom static python libraries to
# blow away all the tweaks that this setup does to Setup.local and
# instead apply our very similar ones directly to Setup, just as we
# do for android.
with open('Modules/Setup.local', 'w', encoding='utf-8') as outfile:
outfile.write('
_patch_setup_file('apple', arch)
def build_android(rootdir: str, arch: str, debug: bool = False) -> None:
import subprocess
builddir = 'build/python_android_' + arch + ('_debug' if debug else '')
run('rm -rf "' + builddir + '"')
run('mkdir -p build')
run('git clone '
'https://github.com/yan12125/python3-android.git "' + builddir + '"')
os.chdir(builddir)
# These builds require ANDROID_NDK to be set; make sure that's the case.
os.environ['ANDROID_NDK'] = subprocess.check_output(
[f'{rootdir}/tools/pcommand', 'android_sdk_utils',
'get-ndk-path']).decode().strip()
ftxt = readfile('Android/build_deps.py')
# ftxt = replace_one(ftxt, ' NCurses,\n',
# '
ftxt = replace_one(
ftxt,
' '
'BZip2, GDBM, LibFFI, LibUUID, OpenSSL, Readline, SQLite, XZ, ZLib,\n',
' '
'BZip2, LibUUID, OpenSSL, SQLite, XZ, ZLib,\n',
)
# Older ssl seems to choke on newer ndk layouts.
ftxt = replace_one(
ftxt,
"source = 'https://www.openssl.org/source/openssl-1.1.1h.tar.gz'",
"source = 'https://www.openssl.org/source/openssl-1.1.1l.tar.gz'")
writefile('Android/build_deps.py', ftxt)
# Tweak some things in the base build script; grab the right version
# of Python and also inject some code to modify bits of python
# after it is extracted.
ftxt = readfile('build.sh')
ftxt = replace_one(ftxt, 'PYVER=3.9.0', f'PYVER={PY_VER_EXACT_ANDROID}')
ftxt = replace_one(
ftxt, ' popd\n', f' ../../../tools/pcommand'
f' python_android_patch Python-{PY_VER_EXACT_ANDROID}\n popd\n')
writefile('build.sh', ftxt)
# Ok, let 'er rip
exargs = ' --with-pydebug' if debug else ''
run(f'ARCH={arch} ANDROID_API=21 ./build.sh{exargs}')
print('python build complete! (android/' + arch + ')')
def android_patch() -> None:
_patch_setup_file('android', '?')
def _patch_setup_file(platform: str, arch: str) -> None:
fname = 'Modules/Setup'
ftxt = readfile(fname)
if platform == 'android':
prefix = '$(srcdir)/Android/sysroot/usr'
uuid_ex = f' -L{prefix}/lib -luuid'
zlib_ex = f' -I{prefix}/include -L{prefix}/lib -lz'
bz2_ex = f' -I{prefix}/include -L{prefix}/lib -lbz2'
ssl_ex = f' -DUSE_SSL -I{prefix}/include -L{prefix}/lib -lssl -lcrypto'
sqlite_ex = f' -I{prefix}/include -L{prefix}/lib'
hash_ex = ' -DUSE_SSL -lssl -lcrypto'
lzma_ex = ' -llzma'
elif platform == 'apple':
prefix = '$(srcdir)/Android/sysroot/usr'
uuid_ex = ''
zlib_ex = ' -I$(prefix)/include -lz'
bz2_ex = (' -I$(srcdir)/../Support/BZip2/Headers'
' -L$(srcdir)/../Support/BZip2 -lbzip2')
ssl_ex = (' -I$(srcdir)/../Support/OpenSSL/Headers'
' -L$(srcdir)/../Support/OpenSSL -lOpenSSL -DUSE_SSL')
sqlite_ex = ' -I$(srcdir)/Modules/_sqlite'
hash_ex = (' -I$(srcdir)/../Support/OpenSSL/Headers'
' -L$(srcdir)/../Support/OpenSSL -lOpenSSL -DUSE_SSL')
lzma_ex = (' -I$(srcdir)/../Support/XZ/Headers'
' -L$(srcdir)/../Support/XZ/ -lxz')
else:
raise RuntimeError(f'Unknown platform {platform}')
cmodules = [
'_asyncio', '_bisect', '_blake2', '_codecs_cn', '_codecs_hk',
'_codecs_iso2022', '_codecs_jp', '_codecs_kr', '_codecs_tw',
'_contextvars', '_crypt', '_csv', '_ctypes_test', '_ctypes',
'_curses_panel', '_curses', '_datetime', '_decimal', '_elementtree',
'_heapq', '_json', '_lsprof', '_lzma', '_md5', '_multibytecodec',
'_multiprocessing', '_opcode', '_pickle', '_posixsubprocess', '_queue',
'_random', '_sha1', '_sha3', '_sha256', '_sha512', '_socket',
'_statistics', '_struct', '_testbuffer', '_testcapi',
'_testimportmultiple', '_testinternalcapi', '_testmultiphase', '_uuid',
'_xxsubinterpreters', '_xxtestfuzz', '_zoneinfo', 'array', 'audioop',
'binascii', 'cmath', 'fcntl', 'grp', 'math', 'mmap', 'ossaudiodev',
'parser', 'pyexpat', 'resource', 'select', 'syslog', 'termios',
'unicodedata', 'xxlimited', 'zlib'
]
enables = [
'_asyncio', 'array', 'cmath', 'math', '_contextvars', '_struct',
'_random', '_elementtree', '_pickle', '_datetime', '_zoneinfo',
'_bisect', '_heapq', '_json', '_statistics', 'unicodedata', 'fcntl',
'select', 'mmap', '_csv', '_socket', '_sha3', '_blake2', 'binascii',
'_posixsubprocess'
]
if bool(False):
enables += ['_md5']
for enable in enables:
ftxt = replace_one(ftxt, f'#{enable} ', f'{enable} ')
cmodules.remove(enable)
disables = ['xxsubtype']
for disable in disables:
ftxt = replace_one(ftxt, f'\n{disable} ', f'\n#{disable} ')
ftxt += '\n# Additions by efrotools:\n'
if bool(True):
ftxt += f'_uuid _uuidmodule.c{uuid_ex}\n'
cmodules.remove('_uuid')
ftxt += f'zlib zlibmodule.c{zlib_ex}\n'
cmodules.remove('zlib')
# Do we need it for sure?
ftxt += f'_hashlib _hashopenssl.c{hash_ex}\n'
ftxt += f'_lzma _lzmamodule.c{lzma_ex}\n'
cmodules.remove('_lzma')
ftxt += f'_bz2 _bz2module.c{bz2_ex}\n'
ftxt += f'_ssl _ssl.c{ssl_ex}\n'
ftxt += (f'_sqlite3'
f' _sqlite/cache.c'
f' _sqlite/connection.c'
f' _sqlite/cursor.c'
f' _sqlite/microprotocols.c'
f' _sqlite/module.c'
f' _sqlite/prepare_protocol.c'
f' _sqlite/row.c'
f' _sqlite/statement.c'
f' _sqlite/util.c'
f'{sqlite_ex}'
f' -DMODULE_NAME=\'\\"sqlite3\\"\''
f' -DSQLITE_OMIT_LOAD_EXTENSION'
f' -lsqlite3\n')
# Mac needs this:
if arch == 'mac':
ftxt += ('\n'
'
'_scproxy _scproxy.c '
'-framework SystemConfiguration '
'-framework CoreFoundation\n')
# Explicitly mark the remaining ones as disabled
# (so Python won't try to build them as dynamic libs).
remaining_disabled = ' '.join(cmodules)
ftxt += ('\n# Disabled by efrotools build:\n'
'*disabled*\n'
f'{remaining_disabled}\n')
writefile(fname, ftxt)
fname = 'Modules/makesetup'
txt = readfile(fname)
if platform == 'android':
txt = replace_one(txt, ' *=*)'
' DEFS="$line$NL$DEFS"; continue;;',
' [A-Z]*=*) DEFS="$line$NL$DEFS";'
' continue;;')
assert txt.count('[A-Z]*=*') == 1
writefile(fname, txt)
def winprune() -> None:
for libdir in ('assets/src/windows/Win32/Lib',
'assets/src/windows/x64/Lib'):
assert os.path.isdir(libdir)
run('cd "' + libdir + '" && rm -rf ' + ' '.join(PRUNE_LIB_NAMES))
for dlldir in ('assets/src/windows/Win32/DLLs',
'assets/src/windows/x64/DLLs'):
assert os.path.isdir(dlldir)
run('cd "' + dlldir + '" && rm -rf ' + ' '.join(PRUNE_DLL_NAMES))
print('Win-prune successful.')
def gather() -> None:
do_android = True
existing_dirs = [
os.path.join('src/external', d) for d in os.listdir('src/external')
if d.startswith('python-') and d != 'python-notes.txt'
]
existing_dirs += [
os.path.join('assets/src', d) for d in os.listdir('assets/src')
if d.startswith('pylib-')
]
if not do_android:
existing_dirs = [d for d in existing_dirs if 'android' not in d]
for existing_dir in existing_dirs:
run('rm -rf "' + existing_dir + '"')
apost2 = f'src/Python-{PY_VER_EXACT_ANDROID}/Android/sysroot'
for buildtype in ['debug', 'release']:
debug = buildtype == 'debug'
bsuffix = '_debug' if buildtype == 'debug' else ''
bsuffix2 = '-debug' if buildtype == 'debug' else ''
libname = 'python' + PYVER + ('d' if debug else '')
bases = {
'mac': f'build/python_apple_mac{bsuffix}/build/macOS',
'ios': f'build/python_apple_ios{bsuffix}/build/iOS',
'tvos': f'build/python_apple_tvos{bsuffix}/build/tvOS',
'android_arm': f'build/python_android_arm{bsuffix}/build',
'android_arm64': f'build/python_android_arm64{bsuffix}/build',
'android_x86': f'build/python_android_x86{bsuffix}/build',
'android_x86_64': f'build/python_android_x86_64{bsuffix}/build'
}
bases2 = {
'android_arm': f'build/python_android_arm{bsuffix}/{apost2}',
'android_arm64': f'build/python_android_arm64{bsuffix}/{apost2}',
'android_x86': f'build/python_android_x86{bsuffix}/{apost2}',
'android_x86_64': f'build/python_android_x86_64{bsuffix}/{apost2}'
}
builds: list[dict[str, Any]] = [{
'name':
'macos',
'group':
'apple',
'headers':
bases['mac'] + '/Support/Python/Headers',
'libs': [
bases['mac'] + '/Support/Python/libPython.a',
bases['mac'] + '/Support/OpenSSL/libOpenSSL.a',
bases['mac'] + '/Support/XZ/libxz.a',
bases['mac'] + '/Support/BZip2/libbzip2.a',
],
'pylib':
(bases['mac'] + f'/Python-{PY_VER_EXACT_APPLE}-macOS/lib'),
}, {
'name':
'ios',
'group':
'apple',
'headers':
bases['ios'] + '/Support/Python/Headers',
'libs': [
bases['ios'] + '/Support/Python/libPython.a',
bases['ios'] + '/Support/OpenSSL/libOpenSSL.a',
bases['ios'] + '/Support/XZ/libxz.a',
bases['ios'] + '/Support/BZip2/libbzip2.a',
],
}, {
'name':
'tvos',
'group':
'apple',
'headers':
bases['tvos'] + '/Support/Python/Headers',
'libs': [
bases['tvos'] + '/Support/Python/libPython.a',
bases['tvos'] + '/Support/OpenSSL/libOpenSSL.a',
bases['tvos'] + '/Support/XZ/libxz.a',
bases['tvos'] + '/Support/BZip2/libbzip2.a',
],
}, {
'name': 'android_arm',
'group': 'android',
'headers': bases['android_arm'] + f'/usr/include/{libname}',
'libs': [
bases['android_arm'] + f'/usr/lib/lib{libname}.a',
bases2['android_arm'] + '/usr/lib/libssl.a',
bases2['android_arm'] + '/usr/lib/libcrypto.a',
bases2['android_arm'] + '/usr/lib/liblzma.a',
bases2['android_arm'] + '/usr/lib/libsqlite3.a',
bases2['android_arm'] + '/usr/lib/libbz2.a',
bases2['android_arm'] + '/usr/lib/libuuid.a',
],
'libinst': 'android_armeabi-v7a',
'pylib': (bases['android_arm'] + '/usr/lib/python' + PYVER),
}, {
'name': 'android_arm64',
'group': 'android',
'headers': bases['android_arm64'] + f'/usr/include/{libname}',
'libs': [
bases['android_arm64'] + f'/usr/lib/lib{libname}.a',
bases2['android_arm64'] + '/usr/lib/libssl.a',
bases2['android_arm64'] + '/usr/lib/libcrypto.a',
bases2['android_arm64'] + '/usr/lib/liblzma.a',
bases2['android_arm64'] + '/usr/lib/libsqlite3.a',
bases2['android_arm64'] + '/usr/lib/libbz2.a',
bases2['android_arm64'] + '/usr/lib/libuuid.a',
],
'libinst': 'android_arm64-v8a',
}, {
'name': 'android_x86',
'group': 'android',
'headers': bases['android_x86'] + f'/usr/include/{libname}',
'libs': [
bases['android_x86'] + f'/usr/lib/lib{libname}.a',
bases2['android_x86'] + '/usr/lib/libssl.a',
bases2['android_x86'] + '/usr/lib/libcrypto.a',
bases2['android_x86'] + '/usr/lib/liblzma.a',
bases2['android_x86'] + '/usr/lib/libsqlite3.a',
bases2['android_x86'] + '/usr/lib/libbz2.a',
bases2['android_x86'] + '/usr/lib/libuuid.a',
],
'libinst': 'android_x86',
}, {
'name': 'android_x86_64',
'group': 'android',
'headers': bases['android_x86_64'] + f'/usr/include/{libname}',
'libs': [
bases['android_x86_64'] + f'/usr/lib/lib{libname}.a',
bases2['android_x86_64'] + '/usr/lib/libssl.a',
bases2['android_x86_64'] + '/usr/lib/libcrypto.a',
bases2['android_x86_64'] + '/usr/lib/liblzma.a',
bases2['android_x86_64'] + '/usr/lib/libsqlite3.a',
bases2['android_x86_64'] + '/usr/lib/libbz2.a',
bases2['android_x86_64'] + '/usr/lib/libuuid.a',
],
'libinst': 'android_x86_64',
}]
for build in builds:
grp = build['group']
if not do_android and grp == 'android':
continue
builddir = f'src/external/python-{grp}{bsuffix2}'
header_dst = os.path.join(builddir, 'include')
lib_dst = os.path.join(builddir, 'lib')
assets_src_dst = f'assets/src/pylib-{grp}'
if not os.path.exists(builddir):
run('mkdir -p "' + builddir + '"')
run('mkdir -p "' + lib_dst + '"')
if not debug:
run('mkdir -p "' + assets_src_dst + '"')
run('rsync --recursive --include "*.py"'
' --exclude __pycache__ --include "*/" --exclude "*" "'
+ build['pylib'] + '/" "' + assets_src_dst + '"')
# down on size.
run('cd "' + assets_src_dst + '" && rm -rf ' +
' '.join(PRUNE_LIB_NAMES))
# Some minor filtering to system scripts:
# on iOS/tvOS, addusersitepackages() leads to a crash
# due to _sysconfigdata_dm_ios_darwin module not existing,
# so let's skip that.
fname = f'{assets_src_dst}/site.py'
txt = readfile(fname)
txt = replace_one(
txt,
' known_paths = addusersitepackages(known_paths)',
' # efro tweak: this craps out on ios/tvos.\n'
' # (and we don\'t use it anyway)\n'
'
writefile(fname, txt)
# Copy in a base set of headers (everything in a group should
# be using the same headers)
run(f'cp -r "{build["headers"]}" "{header_dst}"')
# Clear whatever pyconfigs came across; we'll build our own
run('rm ' + header_dst + '/pyconfig*')
with open(header_dst + '/pyconfig.h', 'w',
encoding='utf-8') as hfile:
hfile.write(
'
'
'
'
'
'
'
'
'
'
'
'
'
'
'
'
'
# Now copy each build's config headers in with unique names.
cfgs = [
f for f in os.listdir(build['headers'])
if f.startswith('pyconfig')
]
for cfg in cfgs:
out = cfg.replace('pyconfig', 'pyconfig-' + build['name'])
if cfg == 'pyconfig.h':
# contents too (those headers can themselves include
# others; ios for instance points to a arm64 and a
# x86_64 variant).
contents = readfile(build['headers'] + '/' + cfg)
contents = contents.replace('pyconfig',
'pyconfig-' + build['name'])
writefile(header_dst + '/' + out, contents)
else:
# other configs we just rename
run('cp "' + build['headers'] + '/' + cfg + '" "' +
header_dst + '/' + out + '"')
# Copy in libs. If the lib gave a specific install name,
# use that; otherwise use name.
targetdir = lib_dst + '/' + build.get('libinst', build['name'])
run('rm -rf "' + targetdir + '"')
run('mkdir -p "' + targetdir + '"')
for lib in build['libs']:
run('cp "' + lib + '" "' + targetdir + '"')
print('Great success!')
| true | true |
1c46ad650091fd8eb656b4ce0564489819168982 | 2,955 | py | Python | conans/test/unittests/util/local_db_test.py | Wonders11/conan | 28ec09f6cbf1d7e27ec27393fd7bbc74891e74a8 | [
"MIT"
] | 6,205 | 2015-12-01T13:40:05.000Z | 2022-03-31T07:30:25.000Z | conans/test/unittests/util/local_db_test.py | Wonders11/conan | 28ec09f6cbf1d7e27ec27393fd7bbc74891e74a8 | [
"MIT"
] | 8,747 | 2015-12-01T16:28:48.000Z | 2022-03-31T23:34:53.000Z | conans/test/unittests/util/local_db_test.py | Mattlk13/conan | 005fc53485557b0a570bb71670f2ca9c66082165 | [
"MIT"
] | 961 | 2015-12-01T16:56:43.000Z | 2022-03-31T13:50:52.000Z | import os
import unittest
import uuid
import six
import pytest
from conans.client.store.localdb import LocalDB
from conans.test.utils.test_files import temp_folder
class LocalStoreTest(unittest.TestCase):
def test_localdb(self):
tmp_dir = temp_folder()
db_file = os.path.join(tmp_dir, "dbfile")
localdb = LocalDB.create(db_file)
# Test write and read login
user, token, access_token = localdb.get_login("myurl1")
self.assertIsNone(user)
self.assertIsNone(token)
self.assertIsNone(access_token)
localdb.store("pepe", "token", "access_token", "myurl1")
user, token, access_token = localdb.get_login("myurl1")
self.assertEqual("pepe", user)
self.assertEqual("token", token)
self.assertEqual("access_token", access_token)
self.assertEqual("pepe", localdb.get_username("myurl1"))
def test_token_encryption_ascii(self):
tmp_dir = temp_folder()
db_file = os.path.join(tmp_dir, "dbfile")
encryption_key = str(uuid.uuid4())
localdb = LocalDB.create(db_file, encryption_key=encryption_key)
localdb.store("pepe", "token", "access_token", "myurl1")
user, token, access_token = localdb.get_login("myurl1")
self.assertEqual("pepe", user)
self.assertEqual("token", token)
self.assertEqual("access_token", access_token)
def test_token_encryption_none(self):
tmp_dir = temp_folder()
db_file = os.path.join(tmp_dir, "dbfile")
encryption_key = str(uuid.uuid4())
localdb = LocalDB.create(db_file, encryption_key=encryption_key)
localdb.store("pepe", "token", None, "myurl1")
user, token, access_token = localdb.get_login("myurl1")
self.assertEqual("pepe", user)
self.assertEqual("token", token)
self.assertEqual(None, access_token)
@pytest.mark.skipif(six.PY2, reason="Python2 sqlite3 converts to str")
def test_token_encryption_unicode(self):
tmp_dir = temp_folder()
db_file = os.path.join(tmp_dir, "dbfile")
encryption_key = str(uuid.uuid4())
localdb = LocalDB.create(db_file, encryption_key=encryption_key)
token_input = b'espa\xc3\xb1a\xe2\x82\xac$'.decode('utf-8') # Only ASCII files in codebase
localdb.store("pepe", token_input, token_input, "myurl1")
user, token, access_token = localdb.get_login("myurl1")
self.assertEqual("pepe", user)
self.assertEqual(token_input, token)
self.assertEqual(token_input, access_token)
self.assertEqual("pepe", localdb.get_username("myurl1"))
# Without the encryption key we get obfuscated values
other_db = LocalDB.create(db_file)
user, token, access_token = other_db.get_login("myurl1")
self.assertEqual("pepe", user)
self.assertNotEqual(token_input, token)
self.assertNotEqual(token_input, access_token)
| 38.376623 | 99 | 0.671743 | import os
import unittest
import uuid
import six
import pytest
from conans.client.store.localdb import LocalDB
from conans.test.utils.test_files import temp_folder
class LocalStoreTest(unittest.TestCase):
def test_localdb(self):
tmp_dir = temp_folder()
db_file = os.path.join(tmp_dir, "dbfile")
localdb = LocalDB.create(db_file)
user, token, access_token = localdb.get_login("myurl1")
self.assertIsNone(user)
self.assertIsNone(token)
self.assertIsNone(access_token)
localdb.store("pepe", "token", "access_token", "myurl1")
user, token, access_token = localdb.get_login("myurl1")
self.assertEqual("pepe", user)
self.assertEqual("token", token)
self.assertEqual("access_token", access_token)
self.assertEqual("pepe", localdb.get_username("myurl1"))
def test_token_encryption_ascii(self):
tmp_dir = temp_folder()
db_file = os.path.join(tmp_dir, "dbfile")
encryption_key = str(uuid.uuid4())
localdb = LocalDB.create(db_file, encryption_key=encryption_key)
localdb.store("pepe", "token", "access_token", "myurl1")
user, token, access_token = localdb.get_login("myurl1")
self.assertEqual("pepe", user)
self.assertEqual("token", token)
self.assertEqual("access_token", access_token)
def test_token_encryption_none(self):
tmp_dir = temp_folder()
db_file = os.path.join(tmp_dir, "dbfile")
encryption_key = str(uuid.uuid4())
localdb = LocalDB.create(db_file, encryption_key=encryption_key)
localdb.store("pepe", "token", None, "myurl1")
user, token, access_token = localdb.get_login("myurl1")
self.assertEqual("pepe", user)
self.assertEqual("token", token)
self.assertEqual(None, access_token)
@pytest.mark.skipif(six.PY2, reason="Python2 sqlite3 converts to str")
def test_token_encryption_unicode(self):
tmp_dir = temp_folder()
db_file = os.path.join(tmp_dir, "dbfile")
encryption_key = str(uuid.uuid4())
localdb = LocalDB.create(db_file, encryption_key=encryption_key)
token_input = b'espa\xc3\xb1a\xe2\x82\xac$'.decode('utf-8')
localdb.store("pepe", token_input, token_input, "myurl1")
user, token, access_token = localdb.get_login("myurl1")
self.assertEqual("pepe", user)
self.assertEqual(token_input, token)
self.assertEqual(token_input, access_token)
self.assertEqual("pepe", localdb.get_username("myurl1"))
other_db = LocalDB.create(db_file)
user, token, access_token = other_db.get_login("myurl1")
self.assertEqual("pepe", user)
self.assertNotEqual(token_input, token)
self.assertNotEqual(token_input, access_token)
| true | true |
1c46ae0e3f4a04853fd12feddc7987c8067cadb2 | 934 | py | Python | django_angular_url/templatetags/django_angular_url_tags.py | rafitorres/django-angular-url | c9734f54370f4fb0d2d7bfd2248107ba93126aac | [
"MIT"
] | 1 | 2018-06-17T19:28:24.000Z | 2018-06-17T19:28:24.000Z | django_angular_url/templatetags/django_angular_url_tags.py | rafitorres/django-angular-url | c9734f54370f4fb0d2d7bfd2248107ba93126aac | [
"MIT"
] | null | null | null | django_angular_url/templatetags/django_angular_url_tags.py | rafitorres/django-angular-url | c9734f54370f4fb0d2d7bfd2248107ba93126aac | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
from django.template import Library
from django.core.exceptions import ImproperlyConfigured
from django.utils.safestring import mark_safe
from django_angular_url.core.urlresolvers import get_urls
register = Library()
@register.simple_tag(name='load_djng_urls', takes_context=True)
def djng_urls(context, *namespaces):
def _replace_namespace(n):
if n == 'SELF':
request = context.get('request')
if not request:
raise ImproperlyConfigured(
"'SELF' was used in 'load_djng_urls' for request "
"namespace lookup, but there is no RequestContext.")
return request.resolver_match.namespace
elif n == '':
return None
return n
urls = get_urls([_replace_namespace(x) for x in namespaces])
return mark_safe(json.dumps(urls))
| 32.206897 | 72 | 0.671306 |
from __future__ import unicode_literals
import json
from django.template import Library
from django.core.exceptions import ImproperlyConfigured
from django.utils.safestring import mark_safe
from django_angular_url.core.urlresolvers import get_urls
register = Library()
@register.simple_tag(name='load_djng_urls', takes_context=True)
def djng_urls(context, *namespaces):
def _replace_namespace(n):
if n == 'SELF':
request = context.get('request')
if not request:
raise ImproperlyConfigured(
"'SELF' was used in 'load_djng_urls' for request "
"namespace lookup, but there is no RequestContext.")
return request.resolver_match.namespace
elif n == '':
return None
return n
urls = get_urls([_replace_namespace(x) for x in namespaces])
return mark_safe(json.dumps(urls))
| true | true |
1c46af2a12398dfe071582314575709997860fcd | 5,438 | py | Python | Dell/benchmarks/rnnt/implementations/DSS8440x8A100-PCIE-80GB/bind_launch.py | gglin001/training_results_v1.1 | 58fd4103f0f465bda6eb56a06a74b7bbccbbcf24 | [
"Apache-2.0"
] | null | null | null | Dell/benchmarks/rnnt/implementations/DSS8440x8A100-PCIE-80GB/bind_launch.py | gglin001/training_results_v1.1 | 58fd4103f0f465bda6eb56a06a74b7bbccbbcf24 | [
"Apache-2.0"
] | null | null | null | Dell/benchmarks/rnnt/implementations/DSS8440x8A100-PCIE-80GB/bind_launch.py | gglin001/training_results_v1.1 | 58fd4103f0f465bda6eb56a06a74b7bbccbbcf24 | [
"Apache-2.0"
] | null | null | null | import sys
import subprocess
import os
import socket
from argparse import ArgumentParser, REMAINDER
import torch
def parse_args():
"""
Helper function parsing the command line options
@retval ArgumentParser
"""
parser = ArgumentParser(description="PyTorch distributed training launch "
"helper utilty that will spawn up "
"multiple distributed processes")
# Optional arguments for the launch helper
parser.add_argument("--nnodes", type=int, default=1,
help="The number of nodes to use for distributed "
"training")
parser.add_argument("--node_rank", type=int, default=0,
help="The rank of the node for multi-node distributed "
"training")
parser.add_argument("--nproc_per_node", type=int, default=1,
help="The number of processes to launch on each node, "
"for GPU training, this is recommended to be set "
"to the number of GPUs in your system so that "
"each process can be bound to a single GPU.")
parser.add_argument("--master_addr", default="127.0.0.1", type=str,
help="Master node (rank 0)'s address, should be either "
"the IP address or the hostname of node 0, for "
"single node multi-proc training, the "
"--master_addr can simply be 127.0.0.1")
parser.add_argument("--master_port", default=29500, type=int,
help="Master node (rank 0)'s free port that needs to "
"be used for communciation during distributed "
"training")
parser.add_argument('--no_hyperthreads', action='store_true',
help='Flag to disable binding to hyperthreads')
parser.add_argument('--no_membind', action='store_true',
help='Flag to disable memory binding')
# non-optional arguments for binding
parser.add_argument("--nsockets_per_node", type=int, required=True,
help="Number of CPU sockets on a node")
parser.add_argument("--ncores_per_socket", type=int, required=True,
help="Number of CPU cores per socket")
# positional
parser.add_argument("training_script", type=str,
help="The full path to the single GPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script")
# rest from the training program
parser.add_argument('training_script_args', nargs=REMAINDER)
return parser.parse_args()
def main():
args = parse_args()
# variables for numactrl binding
NSOCKETS = args.nsockets_per_node
NGPUS_PER_SOCKET = args.nproc_per_node // args.nsockets_per_node
NCORES_PER_GPU = args.ncores_per_socket // NGPUS_PER_SOCKET
# world size in terms of number of processes
dist_world_size = args.nproc_per_node * args.nnodes
# set PyTorch distributed related environmental variables
current_env = os.environ.copy()
current_env["MASTER_ADDR"] = args.master_addr
current_env["MASTER_PORT"] = str(args.master_port)
current_env["WORLD_SIZE"] = str(dist_world_size)
processes = []
all_cores = torch.arange(0, 96)
even_cores, odd_cores = all_cores[::2].tolist(), all_cores[1::2].tolist()
for local_rank in range(0, args.nproc_per_node):
# each process's rank
dist_rank = args.nproc_per_node * args.node_rank + local_rank
current_env["RANK"] = str(dist_rank)
# form numactrl binding command
#cpu_ranges = [local_rank * NCORES_PER_GPU,
# (local_rank + 1) * NCORES_PER_GPU - 1,
# local_rank * NCORES_PER_GPU + (NCORES_PER_GPU * NGPUS_PER_SOCKET * NSOCKETS),
# (local_rank + 1) * NCORES_PER_GPU + (NCORES_PER_GPU * NGPUS_PER_SOCKET * NSOCKETS) - 1]
numactlargs = []
if args.no_hyperthreads:
raise ValueError("Please enable HT with DSS and continue")
#numactlargs += [ "--physcpubind={}-{}".format(*cpu_ranges[0:2]) ]
else:
if local_rank in [0,1,2,3]:
numactlargs += [ "--physcpubind={}".format(",".join(map(str, even_cores))) ]
elif local_rank in [4,5,6,7]:
numactlargs += [ "--physcpubind={}".format(",".join(map(str, odd_cores))) ]
if not args.no_membind:
memnode = local_rank // NGPUS_PER_SOCKET
numactlargs += [ "--membind={}".format(memnode) ]
# spawn the processes
cmd = [ "/usr/bin/numactl" ] \
+ numactlargs \
+ [ sys.executable,
"-u",
args.training_script,
"--local_rank={}".format(local_rank)
] \
+ args.training_script_args
print(f"##binding cmd: {cmd}")
print(f"##local_rank: {local_rank}")
process = subprocess.Popen(cmd, env=current_env)
processes.append(process)
for process in processes:
process.wait()
if __name__ == "__main__":
main()
| 40.887218 | 109 | 0.578338 | import sys
import subprocess
import os
import socket
from argparse import ArgumentParser, REMAINDER
import torch
def parse_args():
parser = ArgumentParser(description="PyTorch distributed training launch "
"helper utilty that will spawn up "
"multiple distributed processes")
parser.add_argument("--nnodes", type=int, default=1,
help="The number of nodes to use for distributed "
"training")
parser.add_argument("--node_rank", type=int, default=0,
help="The rank of the node for multi-node distributed "
"training")
parser.add_argument("--nproc_per_node", type=int, default=1,
help="The number of processes to launch on each node, "
"for GPU training, this is recommended to be set "
"to the number of GPUs in your system so that "
"each process can be bound to a single GPU.")
parser.add_argument("--master_addr", default="127.0.0.1", type=str,
help="Master node (rank 0)'s address, should be either "
"the IP address or the hostname of node 0, for "
"single node multi-proc training, the "
"--master_addr can simply be 127.0.0.1")
parser.add_argument("--master_port", default=29500, type=int,
help="Master node (rank 0)'s free port that needs to "
"be used for communciation during distributed "
"training")
parser.add_argument('--no_hyperthreads', action='store_true',
help='Flag to disable binding to hyperthreads')
parser.add_argument('--no_membind', action='store_true',
help='Flag to disable memory binding')
parser.add_argument("--nsockets_per_node", type=int, required=True,
help="Number of CPU sockets on a node")
parser.add_argument("--ncores_per_socket", type=int, required=True,
help="Number of CPU cores per socket")
parser.add_argument("training_script", type=str,
help="The full path to the single GPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script")
parser.add_argument('training_script_args', nargs=REMAINDER)
return parser.parse_args()
def main():
args = parse_args()
NSOCKETS = args.nsockets_per_node
NGPUS_PER_SOCKET = args.nproc_per_node // args.nsockets_per_node
NCORES_PER_GPU = args.ncores_per_socket // NGPUS_PER_SOCKET
dist_world_size = args.nproc_per_node * args.nnodes
current_env = os.environ.copy()
current_env["MASTER_ADDR"] = args.master_addr
current_env["MASTER_PORT"] = str(args.master_port)
current_env["WORLD_SIZE"] = str(dist_world_size)
processes = []
all_cores = torch.arange(0, 96)
even_cores, odd_cores = all_cores[::2].tolist(), all_cores[1::2].tolist()
for local_rank in range(0, args.nproc_per_node):
dist_rank = args.nproc_per_node * args.node_rank + local_rank
current_env["RANK"] = str(dist_rank)
# form numactrl binding command
#cpu_ranges = [local_rank * NCORES_PER_GPU,
# (local_rank + 1) * NCORES_PER_GPU - 1,
# local_rank * NCORES_PER_GPU + (NCORES_PER_GPU * NGPUS_PER_SOCKET * NSOCKETS),
# (local_rank + 1) * NCORES_PER_GPU + (NCORES_PER_GPU * NGPUS_PER_SOCKET * NSOCKETS) - 1]
numactlargs = []
if args.no_hyperthreads:
raise ValueError("Please enable HT with DSS and continue")
#numactlargs += [ "--physcpubind={}-{}".format(*cpu_ranges[0:2]) ]
else:
if local_rank in [0,1,2,3]:
numactlargs += [ "--physcpubind={}".format(",".join(map(str, even_cores))) ]
elif local_rank in [4,5,6,7]:
numactlargs += [ "--physcpubind={}".format(",".join(map(str, odd_cores))) ]
if not args.no_membind:
memnode = local_rank // NGPUS_PER_SOCKET
numactlargs += [ "--membind={}".format(memnode) ]
# spawn the processes
cmd = [ "/usr/bin/numactl" ] \
+ numactlargs \
+ [ sys.executable,
"-u",
args.training_script,
"--local_rank={}".format(local_rank)
] \
+ args.training_script_args
print(f"##binding cmd: {cmd}")
print(f"##local_rank: {local_rank}")
process = subprocess.Popen(cmd, env=current_env)
processes.append(process)
for process in processes:
process.wait()
if __name__ == "__main__":
main()
| true | true |
1c46b0e8e1b0e69a358fe2773d36f1292eb76c39 | 141 | py | Python | escapement/__init__.py | willingc/escapement | a02cc5f4367acf6cbc7f0734744b5093b4b02597 | [
"MIT"
] | null | null | null | escapement/__init__.py | willingc/escapement | a02cc5f4367acf6cbc7f0734744b5093b4b02597 | [
"MIT"
] | null | null | null | escapement/__init__.py | willingc/escapement | a02cc5f4367acf6cbc7f0734744b5093b4b02597 | [
"MIT"
] | null | null | null | """Top-level package for Escapement."""
__author__ = """Carol Willing"""
__email__ = "willingc@willingconsulting.com"
__version__ = "0.1.0"
| 23.5 | 44 | 0.716312 |
__author__ = """Carol Willing"""
__email__ = "willingc@willingconsulting.com"
__version__ = "0.1.0"
| true | true |
1c46b17b4df598ba18d2b2ad0e6b4ffe03ea914e | 2,378 | py | Python | gemd/demo/measurement_example.py | ventura-rivera/gemd-python | 078eed39de852f830111b77306c2f35146de8ec3 | [
"Apache-2.0"
] | null | null | null | gemd/demo/measurement_example.py | ventura-rivera/gemd-python | 078eed39de852f830111b77306c2f35146de8ec3 | [
"Apache-2.0"
] | null | null | null | gemd/demo/measurement_example.py | ventura-rivera/gemd-python | 078eed39de852f830111b77306c2f35146de8ec3 | [
"Apache-2.0"
] | null | null | null | """Demonstrate attaching measurements to a material."""
import random
import string
from gemd.entity.attribute.property import Property
from gemd.entity.object import MeasurementRun
from gemd.entity.value.nominal_real import NominalReal
from gemd.entity.value.normal_real import NormalReal
from gemd.enumeration import Origin
# recommended values taken from
# https://www.shimadzu.com/an/industry/petrochemicalchemical/n9j25k00000pyv3w.html
thickness = 4.0 # mm
length = 80.0 # mm
width = 10.0 # mm
span = 64.0 # mm
punch_radius = 5.0 # mm
support_radius = 5.0 # mm
applied_force = 100.0 # N
def __random_my_id():
"""Create random 8-letter id."""
return "".join([random.choice(string.ascii_lowercase) for _ in range(8)])
def make_demo_measurements(num_measurements, extra_tags=frozenset()):
"""Make a measurement object."""
return [
make_flexural_test_measurement(
my_id=__random_my_id(),
deflection=random.random(),
extra_tags=extra_tags
) for _ in range(num_measurements)
]
def make_flexural_test_measurement(my_id, deflection, extra_tags=frozenset()):
"""
Compute the stree, strain, and modulus.
According to https://en.wikipedia.org/wiki/Three-point_flexural_test
"""
stress = 3 * applied_force * span / (2 * thickness * thickness * width)
strain = 6 * deflection * thickness / (span * span)
modulus = stress / strain
measurement = MeasurementRun(
uids={"my_id": my_id},
tags=["3_pt_bend", "mechanical", "flex"] + list(extra_tags),
properties=[
Property(
name="flexural stress",
value=NormalReal(stress, std=(0.01 * stress), units="MPa"),
origin=Origin.MEASURED
),
Property(
name="flexural strain",
value=NormalReal(strain, std=(0.01 * strain), units=""),
origin=Origin.MEASURED
),
Property(
name="flexural modulus",
value=NormalReal(modulus, std=(0.01 * modulus), units="MPa"),
origin=Origin.MEASURED
),
Property(
name="deflection",
value=NominalReal(deflection, units="mm"),
origin=Origin.MEASURED
)
]
)
return measurement
| 31.706667 | 82 | 0.616905 | import random
import string
from gemd.entity.attribute.property import Property
from gemd.entity.object import MeasurementRun
from gemd.entity.value.nominal_real import NominalReal
from gemd.entity.value.normal_real import NormalReal
from gemd.enumeration import Origin
thickness = 4.0
length = 80.0
width = 10.0
span = 64.0
punch_radius = 5.0
support_radius = 5.0
applied_force = 100.0
def __random_my_id():
return "".join([random.choice(string.ascii_lowercase) for _ in range(8)])
def make_demo_measurements(num_measurements, extra_tags=frozenset()):
return [
make_flexural_test_measurement(
my_id=__random_my_id(),
deflection=random.random(),
extra_tags=extra_tags
) for _ in range(num_measurements)
]
def make_flexural_test_measurement(my_id, deflection, extra_tags=frozenset()):
stress = 3 * applied_force * span / (2 * thickness * thickness * width)
strain = 6 * deflection * thickness / (span * span)
modulus = stress / strain
measurement = MeasurementRun(
uids={"my_id": my_id},
tags=["3_pt_bend", "mechanical", "flex"] + list(extra_tags),
properties=[
Property(
name="flexural stress",
value=NormalReal(stress, std=(0.01 * stress), units="MPa"),
origin=Origin.MEASURED
),
Property(
name="flexural strain",
value=NormalReal(strain, std=(0.01 * strain), units=""),
origin=Origin.MEASURED
),
Property(
name="flexural modulus",
value=NormalReal(modulus, std=(0.01 * modulus), units="MPa"),
origin=Origin.MEASURED
),
Property(
name="deflection",
value=NominalReal(deflection, units="mm"),
origin=Origin.MEASURED
)
]
)
return measurement
| true | true |
1c46b284df73fbe899299978530eccccf17a8af1 | 3,066 | py | Python | vqa_image_preprocess.py | strieb/VisualQuestionAnswering | 28f6ae1f2abd839145306a1d4f34ee84271cf3c1 | [
"MIT"
] | 1 | 2020-04-23T09:15:33.000Z | 2020-04-23T09:15:33.000Z | vqa_image_preprocess.py | strieb/VisualQuestionAnswering | 28f6ae1f2abd839145306a1d4f34ee84271cf3c1 | [
"MIT"
] | null | null | null | vqa_image_preprocess.py | strieb/VisualQuestionAnswering | 28f6ae1f2abd839145306a1d4f34ee84271cf3c1 | [
"MIT"
] | null | null | null | import json
from collections import Counter
import re
from VQA.PythonHelperTools.vqaTools.vqa import VQA
import random
import numpy as np
from keras.preprocessing.image import load_img, img_to_array, ImageDataGenerator
from matplotlib import pyplot as plt
import os
import VQAModel
from keras.applications.xception import decode_predictions, preprocess_input
# from keras.applications.inception_v3 import decode_predictions, preprocess_input
from PIL import Image, ImageOps
from matplotlib import pyplot as plt
import math
from Environment import DATADIR
versionType = 'v2_' # this should be '' when using VQA v2.0 dataset
taskType = 'OpenEnded' # 'OpenEnded' only for v2.0. 'OpenEnded' or 'MultipleChoice' for v1.0
dataType = 'mscoco' # 'mscoco' only for v1.0. 'mscoco' for real and 'abstract_v002' for abstract for v1.0.
dataSubType = 'train2014'
saveDir = 'preprocessed_xcep_24'
annFile = '%s/Annotations/%s%s_%s_annotations.json' % (DATADIR, versionType, dataType, dataSubType)
quesFile = '%s/Questions/%s%s_%s_%s_questions.json' % (DATADIR, versionType, taskType, dataType, dataSubType)
imgDir = '%s/Images/%s/' % (DATADIR, dataSubType)
i = 0
directory = os.fsencode(imgDir)
# 363, 555
# 427, 619
size1 = 299+64
size2 = 299+64
model = VQAModel.createModelXception((size1, size2, 3))
model.summary()
for file in os.listdir(directory):
filename = os.fsdecode(file)
if filename.endswith(".jpg"):
imgPath = os.path.join(imgDir, filename)
id = int(filename[-16:-4])
img = load_img(imgPath)
width, height = img.size
if(width >= height):
img = img.resize((size2, size1), resample=Image.BICUBIC)
img_array = img_to_array(img)
img_array = preprocess_input(img_array)
# img_array = np.tile(img,(32,1,1,1))
img_array = np.expand_dims(img_array, axis=0)
predictions = model.predict(img_array)
pred = predictions[0].reshape(24,2048)
np.save(imgDir+saveDir+"/"+str(id), pred)
if i < 1000 and i%100 == 0:
print(i)
if i % 1000 == 0:
print(i)
i += 1
model = VQAModel.createModelXception((size2, size1, 3))
for file in os.listdir(directory):
filename = os.fsdecode(file)
if filename.endswith(".jpg"):
imgPath = os.path.join(imgDir, filename)
id = int(filename[-16:-4])
img = load_img(imgPath)
width, height = img.size
if(width < height):
img = img.resize((size1, size2), resample=Image.BICUBIC)
img_array = img_to_array(img)
img_array = preprocess_input(img_array)
# img_array = np.tile(img,(32,1,1,1))
img_array = np.expand_dims(img_array, axis=0)
# plt.imshow((img_array[0] + 1)/2)
# plt.show()
predictions = model.predict(img_array)
pred = predictions[0].reshape(24,2048)
np.save(imgDir+saveDir+"/"+str(id), pred)
if i % 1000 == 0:
print(i)
i += 1 | 37.851852 | 109 | 0.643509 | import json
from collections import Counter
import re
from VQA.PythonHelperTools.vqaTools.vqa import VQA
import random
import numpy as np
from keras.preprocessing.image import load_img, img_to_array, ImageDataGenerator
from matplotlib import pyplot as plt
import os
import VQAModel
from keras.applications.xception import decode_predictions, preprocess_input
from PIL import Image, ImageOps
from matplotlib import pyplot as plt
import math
from Environment import DATADIR
versionType = 'v2_'
taskType = 'OpenEnded'
dataType = 'mscoco'
dataSubType = 'train2014'
saveDir = 'preprocessed_xcep_24'
annFile = '%s/Annotations/%s%s_%s_annotations.json' % (DATADIR, versionType, dataType, dataSubType)
quesFile = '%s/Questions/%s%s_%s_%s_questions.json' % (DATADIR, versionType, taskType, dataType, dataSubType)
imgDir = '%s/Images/%s/' % (DATADIR, dataSubType)
i = 0
directory = os.fsencode(imgDir)
size1 = 299+64
size2 = 299+64
model = VQAModel.createModelXception((size1, size2, 3))
model.summary()
for file in os.listdir(directory):
filename = os.fsdecode(file)
if filename.endswith(".jpg"):
imgPath = os.path.join(imgDir, filename)
id = int(filename[-16:-4])
img = load_img(imgPath)
width, height = img.size
if(width >= height):
img = img.resize((size2, size1), resample=Image.BICUBIC)
img_array = img_to_array(img)
img_array = preprocess_input(img_array)
img_array = np.expand_dims(img_array, axis=0)
predictions = model.predict(img_array)
pred = predictions[0].reshape(24,2048)
np.save(imgDir+saveDir+"/"+str(id), pred)
if i < 1000 and i%100 == 0:
print(i)
if i % 1000 == 0:
print(i)
i += 1
model = VQAModel.createModelXception((size2, size1, 3))
for file in os.listdir(directory):
filename = os.fsdecode(file)
if filename.endswith(".jpg"):
imgPath = os.path.join(imgDir, filename)
id = int(filename[-16:-4])
img = load_img(imgPath)
width, height = img.size
if(width < height):
img = img.resize((size1, size2), resample=Image.BICUBIC)
img_array = img_to_array(img)
img_array = preprocess_input(img_array)
img_array = np.expand_dims(img_array, axis=0)
predictions = model.predict(img_array)
pred = predictions[0].reshape(24,2048)
np.save(imgDir+saveDir+"/"+str(id), pred)
if i % 1000 == 0:
print(i)
i += 1 | true | true |
1c46b394ee538fa30ae70b23a0b2eab1f2c3432d | 554 | py | Python | fn_isitPhishing/fn_isitPhishing/lib/isitphishing_util.py | rudimeyer/resilient-community-apps | 7a46841ba41fa7a1c421d4b392b0a3ca9e36bd00 | [
"MIT"
] | 1 | 2020-08-25T03:43:07.000Z | 2020-08-25T03:43:07.000Z | fn_isitPhishing/fn_isitPhishing/lib/isitphishing_util.py | rudimeyer/resilient-community-apps | 7a46841ba41fa7a1c421d4b392b0a3ca9e36bd00 | [
"MIT"
] | 1 | 2019-07-08T16:57:48.000Z | 2019-07-08T16:57:48.000Z | fn_isitPhishing/fn_isitPhishing/lib/isitphishing_util.py | rudimeyer/resilient-community-apps | 7a46841ba41fa7a1c421d4b392b0a3ca9e36bd00 | [
"MIT"
] | null | null | null | import sys
import base64
def get_license_key(name, license):
# Compute the base64 license key. This key will be provided to you by Vade Secure,
# and has the following format: <CUSTOMER_NAME>:<CUSTOMER_LICENSE>.
url_key = u'{0}:{1}'.format(name, license)
# It must be Base64-encoded. Handled different on Python 2 vs 3.
if sys.version_info[0] == 2:
auth_token = base64.b64encode(bytes(url_key).encode("utf-8"))
else:
auth_token = base64.b64encode(bytes(url_key, 'ascii')).decode('ascii')
return auth_token | 34.625 | 86 | 0.689531 | import sys
import base64
def get_license_key(name, license):
url_key = u'{0}:{1}'.format(name, license)
if sys.version_info[0] == 2:
auth_token = base64.b64encode(bytes(url_key).encode("utf-8"))
else:
auth_token = base64.b64encode(bytes(url_key, 'ascii')).decode('ascii')
return auth_token | true | true |
1c46b5a4c2eb213dddaa023db5903639152bb058 | 110 | py | Python | padaquant/__init__.py | felipm13/PadaQuant | 09c13d60dee2a75488e101391ab09e9845a66cb5 | [
"MIT"
] | 1 | 2019-06-21T01:13:29.000Z | 2019-06-21T01:13:29.000Z | padaquant/__init__.py | felipm13/PadaQuant | 09c13d60dee2a75488e101391ab09e9845a66cb5 | [
"MIT"
] | null | null | null | padaquant/__init__.py | felipm13/PadaQuant | 09c13d60dee2a75488e101391ab09e9845a66cb5 | [
"MIT"
] | null | null | null | import sys
from padaquant.asset_manager import asset_manager
from padaquant.blackscholes import blackscholes
| 22 | 49 | 0.881818 | import sys
from padaquant.asset_manager import asset_manager
from padaquant.blackscholes import blackscholes
| true | true |
1c46b5bee90335b45c1737463373c781e1e0b924 | 1,811 | py | Python | python/ray/tests/test_scheduling_2.py | daobook/ray | af9f1ef4dc160e0671206556b387f8017f3c3930 | [
"Apache-2.0"
] | 33 | 2020-05-27T14:25:24.000Z | 2022-03-22T06:11:30.000Z | python/ray/tests/test_scheduling_2.py | daobook/ray | af9f1ef4dc160e0671206556b387f8017f3c3930 | [
"Apache-2.0"
] | 115 | 2021-01-19T04:40:50.000Z | 2022-03-26T07:09:00.000Z | python/ray/tests/test_scheduling_2.py | daobook/ray | af9f1ef4dc160e0671206556b387f8017f3c3930 | [
"Apache-2.0"
] | 5 | 2020-08-06T15:53:07.000Z | 2022-02-09T03:31:31.000Z | import numpy as np
import platform
import pytest
import sys
import time
import ray
@pytest.mark.skipif(
platform.system() == "Windows", reason="Failing on Windows. Multi node.")
def test_load_balancing_under_constrained_memory(ray_start_cluster):
# This test ensures that tasks are being assigned to all raylets in a
# roughly equal manner even when the tasks have dependencies.
cluster = ray_start_cluster
num_nodes = 3
num_cpus = 4
object_size = 4e7
num_tasks = 100
for _ in range(num_nodes):
cluster.add_node(
num_cpus=num_cpus,
memory=(num_cpus - 2) * object_size,
object_store_memory=(num_cpus - 2) * object_size)
cluster.add_node(
num_cpus=0,
resources={"custom": 1},
memory=(num_tasks + 1) * object_size,
object_store_memory=(num_tasks + 1) * object_size)
ray.init(address=cluster.address)
@ray.remote(num_cpus=0, resources={"custom": 1})
def create_object():
return np.zeros(int(object_size), dtype=np.uint8)
@ray.remote
def f(i, x):
print(i, ray.worker.global_worker.node.unique_id)
time.sleep(0.1)
return ray.worker.global_worker.node.unique_id
deps = [create_object.remote() for _ in range(num_tasks)]
for i, dep in enumerate(deps):
print(i, dep)
# TODO(swang): Actually test load balancing. Load balancing is currently
# flaky on Travis, probably due to the scheduling policy ping-ponging
# waiting tasks.
deps = [create_object.remote() for _ in range(num_tasks)]
tasks = [f.remote(i, dep) for i, dep in enumerate(deps)]
for i, dep in enumerate(deps):
print(i, dep)
ray.get(tasks)
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
| 30.694915 | 77 | 0.663722 | import numpy as np
import platform
import pytest
import sys
import time
import ray
@pytest.mark.skipif(
platform.system() == "Windows", reason="Failing on Windows. Multi node.")
def test_load_balancing_under_constrained_memory(ray_start_cluster):
cluster = ray_start_cluster
num_nodes = 3
num_cpus = 4
object_size = 4e7
num_tasks = 100
for _ in range(num_nodes):
cluster.add_node(
num_cpus=num_cpus,
memory=(num_cpus - 2) * object_size,
object_store_memory=(num_cpus - 2) * object_size)
cluster.add_node(
num_cpus=0,
resources={"custom": 1},
memory=(num_tasks + 1) * object_size,
object_store_memory=(num_tasks + 1) * object_size)
ray.init(address=cluster.address)
@ray.remote(num_cpus=0, resources={"custom": 1})
def create_object():
return np.zeros(int(object_size), dtype=np.uint8)
@ray.remote
def f(i, x):
print(i, ray.worker.global_worker.node.unique_id)
time.sleep(0.1)
return ray.worker.global_worker.node.unique_id
deps = [create_object.remote() for _ in range(num_tasks)]
for i, dep in enumerate(deps):
print(i, dep)
deps = [create_object.remote() for _ in range(num_tasks)]
tasks = [f.remote(i, dep) for i, dep in enumerate(deps)]
for i, dep in enumerate(deps):
print(i, dep)
ray.get(tasks)
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
| true | true |
1c46b5cfbdc2bcd213cc2381fa6bb4cc7a0d00c3 | 323 | py | Python | tests/naip/test_stac.py | lossyrob/stactools | 68f416de38d91738a62c1b090a9c40cc2e56a9f6 | [
"Apache-2.0"
] | 1 | 2022-03-28T19:13:53.000Z | 2022-03-28T19:13:53.000Z | tests/naip/test_stac.py | lossyrob/stactools | 68f416de38d91738a62c1b090a9c40cc2e56a9f6 | [
"Apache-2.0"
] | 3 | 2021-08-12T18:06:50.000Z | 2022-03-29T14:20:33.000Z | tests/test_stac.py | stactools-packages/naip | 1f13cc86664436a10f7942ab06547f7e3d8b8928 | [
"Apache-2.0"
] | null | null | null | import unittest
from stactools.naip.stac import create_collection
class StacTest(unittest.TestCase):
def test_create_collection(self):
collection = create_collection(seasons=[2011, 2013, 2015, 2017, 2019])
collection.set_self_href('http://example.com/collection.json')
collection.validate()
| 26.916667 | 78 | 0.739938 | import unittest
from stactools.naip.stac import create_collection
class StacTest(unittest.TestCase):
def test_create_collection(self):
collection = create_collection(seasons=[2011, 2013, 2015, 2017, 2019])
collection.set_self_href('http://example.com/collection.json')
collection.validate()
| true | true |
1c46b5f9d0a2b7779bfbb2eb9b3e116a5cd194b6 | 493 | py | Python | Lib/site-packages/plotly/validators/scattercarpet/_uid.py | tytanya/my-first-blog | 2b40adb0816c3546e90ad6ca1e7fb50d924c1536 | [
"bzip2-1.0.6"
] | 12 | 2020-04-18T18:10:22.000Z | 2021-12-06T10:11:15.000Z | plotly/validators/scattercarpet/_uid.py | Vesauza/plotly.py | e53e626d59495d440341751f60aeff73ff365c28 | [
"MIT"
] | 6 | 2021-03-18T22:27:08.000Z | 2022-03-11T23:40:50.000Z | plotly/validators/scattercarpet/_uid.py | Vesauza/plotly.py | e53e626d59495d440341751f60aeff73ff365c28 | [
"MIT"
] | 6 | 2020-04-18T23:07:08.000Z | 2021-11-18T07:53:06.000Z | import _plotly_utils.basevalidators
class UidValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name='uid', parent_name='scattercarpet', **kwargs
):
super(UidValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
anim=kwargs.pop('anim', True),
edit_type=kwargs.pop('edit_type', 'plot'),
role=kwargs.pop('role', 'info'),
**kwargs
)
| 29 | 70 | 0.614604 | import _plotly_utils.basevalidators
class UidValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name='uid', parent_name='scattercarpet', **kwargs
):
super(UidValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
anim=kwargs.pop('anim', True),
edit_type=kwargs.pop('edit_type', 'plot'),
role=kwargs.pop('role', 'info'),
**kwargs
)
| true | true |
1c46b67a3322491426a8dcefbb023986ece49b17 | 26,977 | py | Python | src/olympia/activity/models.py | elyse0/addons-server | 44fa4946b4b82f7003687b590b8c82c10c418e9e | [
"BSD-3-Clause"
] | null | null | null | src/olympia/activity/models.py | elyse0/addons-server | 44fa4946b4b82f7003687b590b8c82c10c418e9e | [
"BSD-3-Clause"
] | 760 | 2021-05-17T07:59:30.000Z | 2022-03-31T11:14:15.000Z | src/olympia/activity/models.py | championshuttler/addons-server | 5d4c1bfbed2fc509ecc1f3f5065955996e057eeb | [
"BSD-3-Clause"
] | null | null | null | import json
import string
import uuid
from collections import defaultdict
from copy import copy
from datetime import datetime
from django.apps import apps
from django.conf import settings
from django.db import models
from django.utils import timezone
from django.utils.functional import cached_property
from django.utils.translation import gettext
import jinja2
import olympia.core.logger
from olympia import amo, constants
from olympia.access.models import Group
from olympia.addons.models import Addon
from olympia.amo.fields import PositiveAutoField
from olympia.amo.models import BaseQuerySet, ManagerBase, ModelBase
from olympia.bandwagon.models import Collection
from olympia.blocklist.models import Block
from olympia.files.models import File
from olympia.ratings.models import Rating
from olympia.reviewers.models import CannedResponse
from olympia.tags.models import Tag
from olympia.users.models import UserProfile
from olympia.users.templatetags.jinja_helpers import user_link
from olympia.versions.models import Version
log = olympia.core.logger.getLogger('z.amo.activity')
# Number of times a token can be used.
MAX_TOKEN_USE_COUNT = 100
class ActivityLogToken(ModelBase):
id = PositiveAutoField(primary_key=True)
version = models.ForeignKey(Version, related_name='token', on_delete=models.CASCADE)
user = models.ForeignKey(
'users.UserProfile',
related_name='activity_log_tokens',
on_delete=models.CASCADE,
)
uuid = models.UUIDField(default=uuid.uuid4, unique=True)
use_count = models.IntegerField(
default=0, help_text='Stores the number of times the token has been used'
)
class Meta:
db_table = 'log_activity_tokens'
constraints = [
models.UniqueConstraint(fields=('version', 'user'), name='version_id'),
]
def is_expired(self):
return self.use_count >= MAX_TOKEN_USE_COUNT
def is_valid(self):
return (
not self.is_expired()
and self.version
== self.version.addon.find_latest_version(
channel=self.version.channel, exclude=()
)
)
def expire(self):
self.update(use_count=MAX_TOKEN_USE_COUNT)
def increment_use(self):
self.__class__.objects.filter(pk=self.pk).update(
use_count=models.expressions.F('use_count') + 1
)
self.use_count = self.use_count + 1
class ActivityLogEmails(ModelBase):
"""A log of message ids of incoming emails so we don't duplicate process
them."""
messageid = models.CharField(max_length=255, unique=True)
class Meta:
db_table = 'log_activity_emails'
class AddonLog(ModelBase):
"""
This table is for indexing the activity log by addon.
"""
addon = models.ForeignKey(Addon, on_delete=models.CASCADE)
activity_log = models.ForeignKey('ActivityLog', on_delete=models.CASCADE)
class Meta:
db_table = 'log_activity_addon'
ordering = ('-created',)
def transfer(self, new_addon):
try:
# arguments is a structure:
# ``arguments = [{'addons.addon':12}, {'addons.addon':1}, ... ]``
arguments = json.loads(self.activity_log._arguments)
except Exception:
log.info(
'unserializing data from addon_log failed: %s' % self.activity_log.id
)
return None
new_arguments = []
for item in arguments:
if item.get('addons.addon', 0) == self.addon.id:
new_arguments.append({'addons.addon': new_addon.id})
else:
new_arguments.append(item)
self.activity_log.update(_arguments=json.dumps(new_arguments))
self.update(addon=new_addon)
class CommentLog(ModelBase):
"""
This table is for indexing the activity log by comment.
"""
activity_log = models.ForeignKey('ActivityLog', on_delete=models.CASCADE)
comments = models.TextField()
class Meta:
db_table = 'log_activity_comment'
ordering = ('-created',)
class VersionLog(ModelBase):
"""
This table is for indexing the activity log by version.
"""
activity_log = models.ForeignKey('ActivityLog', on_delete=models.CASCADE)
version = models.ForeignKey(Version, on_delete=models.CASCADE)
class Meta:
db_table = 'log_activity_version'
ordering = ('-created',)
class UserLog(ModelBase):
"""
This table is for indexing the activity log by user.
Note: This includes activity performed unto the user.
"""
activity_log = models.ForeignKey('ActivityLog', on_delete=models.CASCADE)
user = models.ForeignKey(UserProfile, on_delete=models.CASCADE)
class Meta:
db_table = 'log_activity_user'
ordering = ('-created',)
class GroupLog(ModelBase):
"""
This table is for indexing the activity log by access group.
"""
id = PositiveAutoField(primary_key=True)
activity_log = models.ForeignKey('ActivityLog', on_delete=models.CASCADE)
group = models.ForeignKey(Group, on_delete=models.CASCADE)
class Meta:
db_table = 'log_activity_group'
ordering = ('-created',)
class BlockLog(ModelBase):
"""
This table is for indexing the activity log by Blocklist Block.
"""
id = PositiveAutoField(primary_key=True)
activity_log = models.ForeignKey('ActivityLog', on_delete=models.CASCADE)
block = models.ForeignKey(Block, on_delete=models.SET_NULL, null=True)
guid = models.CharField(max_length=255, null=False)
class Meta:
db_table = 'log_activity_block'
ordering = ('-created',)
class IPLog(ModelBase):
"""
This table is for indexing the activity log by IP (only for specific
actions).
"""
activity_log = models.ForeignKey('ActivityLog', on_delete=models.CASCADE)
ip_address = models.CharField(max_length=45)
class Meta:
db_table = 'log_activity_ip'
ordering = ('-created',)
class DraftComment(ModelBase):
"""A model that allows us to draft comments for reviews before we have
an ActivityLog instance ready.
This is being used by the commenting API by the code-manager.
"""
id = PositiveAutoField(primary_key=True)
version = models.ForeignKey(Version, on_delete=models.CASCADE)
user = models.ForeignKey(UserProfile, on_delete=models.CASCADE)
filename = models.CharField(max_length=255, null=True, blank=True)
lineno = models.PositiveIntegerField(null=True)
canned_response = models.ForeignKey(
CannedResponse, null=True, default=None, on_delete=models.SET_DEFAULT
)
comment = models.TextField(blank=True)
class Meta:
db_table = 'log_activity_comment_draft'
class ActivityLogQuerySet(BaseQuerySet):
def default_transformer(self, logs):
ActivityLog.arguments_builder(logs)
class ActivityLogManager(ManagerBase):
_queryset_class = ActivityLogQuerySet
def get_queryset(self):
qs = super().get_queryset()
qs = qs.transform(qs.default_transformer).prefetch_related('user')
return qs
def for_addons(self, addons):
if isinstance(addons, Addon):
addons = (addons,)
return self.filter(addonlog__addon__in=addons)
def for_versions(self, versions):
if isinstance(versions, Version):
versions = (versions,)
return self.filter(versionlog__version__in=versions)
def for_groups(self, groups):
if isinstance(groups, Group):
groups = (groups,)
return self.filter(grouplog__group__in=groups)
def for_user(self, user):
return self.filter(userlog__user=user)
def for_block(self, block):
return self.filter(blocklog__block=block)
def for_guidblock(self, guid):
return self.filter(blocklog__guid=guid)
def for_developer(self):
return self.exclude(
action__in=constants.activity.LOG_ADMINS
+ constants.activity.LOG_HIDE_DEVELOPER
)
def admin_events(self):
return self.filter(action__in=constants.activity.LOG_ADMINS)
def moderation_events(self):
return self.filter(action__in=constants.activity.LOG_RATING_MODERATION)
def review_queue(self):
qs = self._by_type()
return qs.filter(action__in=constants.activity.LOG_REVIEW_QUEUE).exclude(
user__id=settings.TASK_USER_ID
)
def review_log(self):
qs = self._by_type()
return qs.filter(
action__in=constants.activity.LOG_REVIEWER_REVIEW_ACTION
).exclude(user__id=settings.TASK_USER_ID)
def total_ratings(self, theme=False):
"""Return the top users, and their # of reviews."""
qs = self._by_type()
action_ids = (
[amo.LOG.THEME_REVIEW.id]
if theme
else constants.activity.LOG_REVIEWER_REVIEW_ACTION
)
return (
qs.values('user', 'user__display_name', 'user__username')
.filter(action__in=action_ids)
.exclude(user__id=settings.TASK_USER_ID)
.annotate(approval_count=models.Count('id'))
.order_by('-approval_count')
)
def monthly_reviews(self, theme=False):
"""Return the top users for the month, and their # of reviews."""
qs = self._by_type()
now = datetime.now()
created_date = datetime(now.year, now.month, 1)
actions = (
[constants.activity.LOG.THEME_REVIEW.id]
if theme
else constants.activity.LOG_REVIEWER_REVIEW_ACTION
)
return (
qs.values('user', 'user__display_name', 'user__username')
.filter(created__gte=created_date, action__in=actions)
.exclude(user__id=settings.TASK_USER_ID)
.annotate(approval_count=models.Count('id'))
.order_by('-approval_count')
)
def user_approve_reviews(self, user):
qs = self._by_type()
return qs.filter(
action__in=constants.activity.LOG_REVIEWER_REVIEW_ACTION, user__id=user.id
)
def current_month_user_approve_reviews(self, user):
now = datetime.now()
ago = datetime(now.year, now.month, 1)
return self.user_approve_reviews(user).filter(created__gte=ago)
def user_position(self, values_qs, user):
try:
return (
next(
i
for (i, d) in enumerate(list(values_qs))
if d.get('user') == user.id
)
+ 1
)
except StopIteration:
return None
def total_ratings_user_position(self, user, theme=False):
return self.user_position(self.total_ratings(theme), user)
def monthly_reviews_user_position(self, user, theme=False):
return self.user_position(self.monthly_reviews(theme), user)
def _by_type(self):
qs = self.get_queryset()
table = 'log_activity_addon'
return qs.extra(
tables=[table], where=['%s.activity_log_id=%s.id' % (table, 'log_activity')]
)
class SafeFormatter(string.Formatter):
"""A replacement for str.format that escapes interpolated values."""
def get_field(self, *args, **kw):
# obj is the value getting interpolated into the string.
obj, used_key = super(SafeFormatter, self).get_field(*args, **kw)
return jinja2.escape(obj), used_key
class ActivityLog(ModelBase):
TYPES = sorted(
[(value.id, key) for key, value in constants.activity.LOG_BY_ID.items()]
)
user = models.ForeignKey('users.UserProfile', null=True, on_delete=models.SET_NULL)
action = models.SmallIntegerField(choices=TYPES)
_arguments = models.TextField(blank=True, db_column='arguments')
_details = models.TextField(blank=True, db_column='details')
objects = ActivityLogManager()
formatter = SafeFormatter()
class Meta:
db_table = 'log_activity'
ordering = ('-created',)
indexes = [
models.Index(fields=('action',), name='log_activity_1bd4707b'),
models.Index(fields=('created',), name='created_idx'),
]
def f(self, *args, **kw):
"""Calls SafeFormatter.format and returns a Markup string."""
# SafeFormatter escapes everything so this is safe.
return jinja2.Markup(self.formatter.format(*args, **kw))
@classmethod
def arguments_builder(cls, activities):
def handle_renames(value):
# Cope with renames of key models (use the original model name like
# it was in the ActivityLog as the key so that we can find it
# later)
return 'ratings.rating' if value == 'reviews.review' else value
# We need to do 2 passes on each log:
# - The first time, gather the references to every instance we need
# - The second time, we built querysets for all instances of the same
# type, pick data from that queryset.
#
# Because it relies on in_bulk(), this method needs the pks to be of a
# consistent type, which doesn't appear to be guaranteed in our
# existing data. For this reason, it forces a conversion to int. If we
# ever want to store ActivityLog items pointing to models using a non
# integer PK field, we'll need to make this a little smarter.
instances_to_load = defaultdict(list)
instances = {}
for activity in activities:
try:
# `arguments_data` will be a list of dicts like:
# `[{'addons.addon':12}, {'addons.addon':1}, ... ]`
activity.arguments_data = json.loads(activity._arguments)
except Exception as e:
log.info('unserializing data from activity_log failed: %s', activity.id)
log.info(e)
activity.arguments_data = []
for item in activity.arguments_data:
# Each 'item' should have one key and one value only.
name, pk = list(item.items())[0]
if name not in ('str', 'int', 'null') and pk:
# Convert pk to int to have consistent data for when we
# call .in_bulk() later.
name = handle_renames(name)
instances_to_load[name].append(int(pk))
# At this point, instances_to_load is a dict of "names" that
# each have a bunch of pks we want to load.
for name, pks in instances_to_load.items():
(app_label, model_name) = name.split('.')
model = apps.get_model(app_label, model_name)
# Load the instances, avoiding transformers other than translations
# and coping with soft-deleted models and unlisted add-ons.
qs = model.get_unfiltered_manager().all()
if hasattr(qs, 'only_translations'):
qs = qs.only_translations()
instances[name] = qs.in_bulk(pks)
# instances is now a dict of "model names" that each have a dict of
# {pk: instance}. We do our second pass on the logs to build the
# "arguments" property from that data, which is a list of the instances
# that each particular log has, in the correct order.
for activity in activities:
objs = []
# We preloaded that property earlier
for item in activity.arguments_data:
# As above, each 'item' should have one key and one value only.
name, pk = list(item.items())[0]
if name in ('str', 'int', 'null'):
# It's not actually a model reference, just return the
# value directly.
objs.append(pk)
elif pk:
# Fetch the instance from the cache we built.
name = handle_renames(name)
obj = instances[name].get(int(pk))
# Most of the time, we're eventually going to call
# to_string() on each ActivityLog that we're processing
# here. For some of the models, that will result in a call
# to <model>.get_absolute_url(), which in turn can cause an
# extra SQL query because some parent model is needed to
# build the URL.
# It's difficult to predict what we'll need as ActivitLog
# is fairly generic, but we know Addon is going to be
# needed in some cases for sure (Version, Rating) so if
# we're dealing with objects that have an `addon_id`
# property, and we have already fetched the corresponding
# Addon instance, set the `addon` property on the object
# to the Addon instance we already have to avoid the extra
# SQL query.
addon_id = getattr(obj, 'addon_id', None)
if addon := instances.get('addons.addon', {}).get(addon_id):
obj.addon = addon
objs.append(obj)
# Override the arguments cached_property with what we got.
activity.arguments = objs
@cached_property
def arguments(self):
# This is a fallback : in 99% of the cases we should not be using this
# but go through the default transformer instead, which executes
# arguments_builder on the whole list of items in the queryset,
# allowing us to fetch the instances in arguments in an optimized
# manner.
self.arguments_builder([self])
return self.arguments
def set_arguments(self, args=None):
"""
Takes an object or a tuple of objects and serializes them and stores it
in the db as a json string.
"""
if args is None:
args = []
if not isinstance(args, (list, tuple)):
args = (args,)
serialize_me = []
for arg in args:
if isinstance(arg, str):
serialize_me.append({'str': arg})
elif isinstance(arg, int):
serialize_me.append({'int': arg})
elif isinstance(arg, tuple):
# Instead of passing an addon instance you can pass a tuple:
# (Addon, 3) for Addon with pk=3
serialize_me.append(dict(((str(arg[0]._meta), arg[1]),)))
else:
serialize_me.append(dict(((str(arg._meta), arg.pk),)))
self._arguments = json.dumps(serialize_me)
@property
def details(self):
if self._details:
return json.loads(self._details)
@details.setter
def details(self, data):
self._details = json.dumps(data)
@property
def log(self):
return constants.activity.LOG_BY_ID[self.action]
def to_string(self, type_=None):
log_type = constants.activity.LOG_BY_ID[self.action]
if type_ and hasattr(log_type, '%s_format' % type_):
format = getattr(log_type, '%s_format' % type_)
else:
format = log_type.format
# We need to copy arguments so we can remove elements from it
# while we loop over self.arguments.
arguments = copy(self.arguments)
addon = None
rating = None
version = None
collection = None
tag = None
group = None
file_ = None
status = None
for arg in self.arguments:
if isinstance(arg, Addon) and not addon:
if arg.has_listed_versions():
addon = self.f(
'<a href="{0}">{1}</a>', arg.get_absolute_url(), arg.name
)
else:
addon = self.f('{0}', arg.name)
arguments.remove(arg)
if isinstance(arg, Rating) and not rating:
rating = self.f(
'<a href="{0}">{1}</a>', arg.get_absolute_url(), gettext('Review')
)
arguments.remove(arg)
if isinstance(arg, Version) and not version:
text = gettext('Version {0}')
if arg.channel == amo.RELEASE_CHANNEL_LISTED:
version = self.f(
'<a href="{1}">%s</a>' % text,
arg.version,
arg.get_absolute_url(),
)
else:
version = self.f(text, arg.version)
arguments.remove(arg)
if isinstance(arg, Collection) and not collection:
collection = self.f(
'<a href="{0}">{1}</a>', arg.get_absolute_url(), arg.name
)
arguments.remove(arg)
if isinstance(arg, Tag) and not tag:
if arg.can_reverse():
tag = self.f(
'<a href="{0}">{1}</a>', arg.get_absolute_url(), arg.tag_text
)
else:
tag = self.f('{0}', arg.tag_text)
if isinstance(arg, Group) and not group:
group = arg.name
arguments.remove(arg)
if isinstance(arg, File) and not file_:
validation = 'passed'
if self.action in (
amo.LOG.UNLISTED_SIGNED.id,
amo.LOG.UNLISTED_SIGNED_VALIDATION_FAILED.id,
):
validation = 'ignored'
file_ = self.f(
'<a href="{0}">{1}</a> (validation {2})',
arg.get_absolute_url(),
arg.filename,
validation,
)
arguments.remove(arg)
if self.action == amo.LOG.CHANGE_STATUS.id and not isinstance(arg, Addon):
# Unfortunately, this action has been abused in the past and
# the non-addon argument could be a string or an int. If it's
# an int, we want to retrieve the string and translate it.
if isinstance(arg, int) and arg in amo.STATUS_CHOICES_ADDON:
status = gettext(amo.STATUS_CHOICES_ADDON[arg])
else:
# It's not an int or not one of the choices, so assume it's
# a string or an unknown int we want to display as-is.
status = arg
arguments.remove(arg)
user = user_link(self.user)
try:
kw = {
'addon': addon,
'rating': rating,
'version': version,
'collection': collection,
'tag': tag,
'user': user,
'group': group,
'file': file_,
'status': status,
}
return self.f(str(format), *arguments, **kw)
except (AttributeError, KeyError, IndexError):
log.warning('%d contains garbage data' % (self.id or 0))
return 'Something magical happened.'
def __str__(self):
return self.to_string()
def __html__(self):
return self
@property
def author_name(self):
"""Name of the user that triggered the activity.
If it's a reviewer action that will be shown to developers, the
`reviewer_name` property is used if present, otherwise `name` is
used."""
if self.action in constants.activity.LOG_REVIEW_QUEUE_DEVELOPER:
return self.user.reviewer_name or self.user.name
return self.user.name
@classmethod
def create(cls, action, *args, **kw):
"""
e.g. ActivityLog.create(amo.LOG.CREATE_ADDON, addon),
ActivityLog.create(amo.LOG.ADD_FILE_TO_VERSION, file, version)
In case of circular import you can use `olympia.activity.log_create()`
"""
from olympia import core
user = kw.get('user', core.get_user())
if not user:
log.warning('Activity log called with no user: %s' % action.id)
return
# We make sure that we take the timestamp if provided, instead of
# creating a new one, especially useful for log entries created
# in a loop.
al = ActivityLog(
user=user, action=action.id, created=kw.get('created', timezone.now())
)
al.set_arguments(args)
if 'details' in kw:
al.details = kw['details']
al.save()
if 'details' in kw and 'comments' in al.details:
CommentLog.objects.create(
comments=al.details['comments'],
activity_log=al,
created=kw.get('created', timezone.now()),
)
for arg in args:
if isinstance(arg, tuple):
class_ = arg[0]
id_ = arg[1]
else:
class_ = arg.__class__
id_ = arg.id if isinstance(arg, ModelBase) else None
if class_ == Addon:
AddonLog.objects.create(
addon_id=id_,
activity_log=al,
created=kw.get('created', timezone.now()),
)
elif class_ == Version:
VersionLog.objects.create(
version_id=id_,
activity_log=al,
created=kw.get('created', timezone.now()),
)
elif class_ == UserProfile:
UserLog.objects.create(
user_id=id_,
activity_log=al,
created=kw.get('created', timezone.now()),
)
elif class_ == Group:
GroupLog.objects.create(
group_id=id_,
activity_log=al,
created=kw.get('created', timezone.now()),
)
elif class_ == Block:
BlockLog.objects.create(
block_id=id_,
activity_log=al,
guid=arg.guid,
created=kw.get('created', timezone.now()),
)
if getattr(action, 'store_ip', False):
# Index specific actions by their IP address. Note that the caller
# must take care of overriding remote addr if the action is created
# from a task.
IPLog.objects.create(
ip_address=core.get_remote_addr(),
activity_log=al,
created=kw.get('created', timezone.now()),
)
# Index by every user
UserLog.objects.create(
activity_log=al, user=user, created=kw.get('created', timezone.now())
)
return al
| 35.87367 | 88 | 0.586907 | import json
import string
import uuid
from collections import defaultdict
from copy import copy
from datetime import datetime
from django.apps import apps
from django.conf import settings
from django.db import models
from django.utils import timezone
from django.utils.functional import cached_property
from django.utils.translation import gettext
import jinja2
import olympia.core.logger
from olympia import amo, constants
from olympia.access.models import Group
from olympia.addons.models import Addon
from olympia.amo.fields import PositiveAutoField
from olympia.amo.models import BaseQuerySet, ManagerBase, ModelBase
from olympia.bandwagon.models import Collection
from olympia.blocklist.models import Block
from olympia.files.models import File
from olympia.ratings.models import Rating
from olympia.reviewers.models import CannedResponse
from olympia.tags.models import Tag
from olympia.users.models import UserProfile
from olympia.users.templatetags.jinja_helpers import user_link
from olympia.versions.models import Version
log = olympia.core.logger.getLogger('z.amo.activity')
MAX_TOKEN_USE_COUNT = 100
class ActivityLogToken(ModelBase):
id = PositiveAutoField(primary_key=True)
version = models.ForeignKey(Version, related_name='token', on_delete=models.CASCADE)
user = models.ForeignKey(
'users.UserProfile',
related_name='activity_log_tokens',
on_delete=models.CASCADE,
)
uuid = models.UUIDField(default=uuid.uuid4, unique=True)
use_count = models.IntegerField(
default=0, help_text='Stores the number of times the token has been used'
)
class Meta:
db_table = 'log_activity_tokens'
constraints = [
models.UniqueConstraint(fields=('version', 'user'), name='version_id'),
]
def is_expired(self):
return self.use_count >= MAX_TOKEN_USE_COUNT
def is_valid(self):
return (
not self.is_expired()
and self.version
== self.version.addon.find_latest_version(
channel=self.version.channel, exclude=()
)
)
def expire(self):
self.update(use_count=MAX_TOKEN_USE_COUNT)
def increment_use(self):
self.__class__.objects.filter(pk=self.pk).update(
use_count=models.expressions.F('use_count') + 1
)
self.use_count = self.use_count + 1
class ActivityLogEmails(ModelBase):
messageid = models.CharField(max_length=255, unique=True)
class Meta:
db_table = 'log_activity_emails'
class AddonLog(ModelBase):
addon = models.ForeignKey(Addon, on_delete=models.CASCADE)
activity_log = models.ForeignKey('ActivityLog', on_delete=models.CASCADE)
class Meta:
db_table = 'log_activity_addon'
ordering = ('-created',)
def transfer(self, new_addon):
try:
arguments = json.loads(self.activity_log._arguments)
except Exception:
log.info(
'unserializing data from addon_log failed: %s' % self.activity_log.id
)
return None
new_arguments = []
for item in arguments:
if item.get('addons.addon', 0) == self.addon.id:
new_arguments.append({'addons.addon': new_addon.id})
else:
new_arguments.append(item)
self.activity_log.update(_arguments=json.dumps(new_arguments))
self.update(addon=new_addon)
class CommentLog(ModelBase):
activity_log = models.ForeignKey('ActivityLog', on_delete=models.CASCADE)
comments = models.TextField()
class Meta:
db_table = 'log_activity_comment'
ordering = ('-created',)
class VersionLog(ModelBase):
activity_log = models.ForeignKey('ActivityLog', on_delete=models.CASCADE)
version = models.ForeignKey(Version, on_delete=models.CASCADE)
class Meta:
db_table = 'log_activity_version'
ordering = ('-created',)
class UserLog(ModelBase):
activity_log = models.ForeignKey('ActivityLog', on_delete=models.CASCADE)
user = models.ForeignKey(UserProfile, on_delete=models.CASCADE)
class Meta:
db_table = 'log_activity_user'
ordering = ('-created',)
class GroupLog(ModelBase):
id = PositiveAutoField(primary_key=True)
activity_log = models.ForeignKey('ActivityLog', on_delete=models.CASCADE)
group = models.ForeignKey(Group, on_delete=models.CASCADE)
class Meta:
db_table = 'log_activity_group'
ordering = ('-created',)
class BlockLog(ModelBase):
id = PositiveAutoField(primary_key=True)
activity_log = models.ForeignKey('ActivityLog', on_delete=models.CASCADE)
block = models.ForeignKey(Block, on_delete=models.SET_NULL, null=True)
guid = models.CharField(max_length=255, null=False)
class Meta:
db_table = 'log_activity_block'
ordering = ('-created',)
class IPLog(ModelBase):
activity_log = models.ForeignKey('ActivityLog', on_delete=models.CASCADE)
ip_address = models.CharField(max_length=45)
class Meta:
db_table = 'log_activity_ip'
ordering = ('-created',)
class DraftComment(ModelBase):
id = PositiveAutoField(primary_key=True)
version = models.ForeignKey(Version, on_delete=models.CASCADE)
user = models.ForeignKey(UserProfile, on_delete=models.CASCADE)
filename = models.CharField(max_length=255, null=True, blank=True)
lineno = models.PositiveIntegerField(null=True)
canned_response = models.ForeignKey(
CannedResponse, null=True, default=None, on_delete=models.SET_DEFAULT
)
comment = models.TextField(blank=True)
class Meta:
db_table = 'log_activity_comment_draft'
class ActivityLogQuerySet(BaseQuerySet):
def default_transformer(self, logs):
ActivityLog.arguments_builder(logs)
class ActivityLogManager(ManagerBase):
_queryset_class = ActivityLogQuerySet
def get_queryset(self):
qs = super().get_queryset()
qs = qs.transform(qs.default_transformer).prefetch_related('user')
return qs
def for_addons(self, addons):
if isinstance(addons, Addon):
addons = (addons,)
return self.filter(addonlog__addon__in=addons)
def for_versions(self, versions):
if isinstance(versions, Version):
versions = (versions,)
return self.filter(versionlog__version__in=versions)
def for_groups(self, groups):
if isinstance(groups, Group):
groups = (groups,)
return self.filter(grouplog__group__in=groups)
def for_user(self, user):
return self.filter(userlog__user=user)
def for_block(self, block):
return self.filter(blocklog__block=block)
def for_guidblock(self, guid):
return self.filter(blocklog__guid=guid)
def for_developer(self):
return self.exclude(
action__in=constants.activity.LOG_ADMINS
+ constants.activity.LOG_HIDE_DEVELOPER
)
def admin_events(self):
return self.filter(action__in=constants.activity.LOG_ADMINS)
def moderation_events(self):
return self.filter(action__in=constants.activity.LOG_RATING_MODERATION)
def review_queue(self):
qs = self._by_type()
return qs.filter(action__in=constants.activity.LOG_REVIEW_QUEUE).exclude(
user__id=settings.TASK_USER_ID
)
def review_log(self):
qs = self._by_type()
return qs.filter(
action__in=constants.activity.LOG_REVIEWER_REVIEW_ACTION
).exclude(user__id=settings.TASK_USER_ID)
def total_ratings(self, theme=False):
qs = self._by_type()
action_ids = (
[amo.LOG.THEME_REVIEW.id]
if theme
else constants.activity.LOG_REVIEWER_REVIEW_ACTION
)
return (
qs.values('user', 'user__display_name', 'user__username')
.filter(action__in=action_ids)
.exclude(user__id=settings.TASK_USER_ID)
.annotate(approval_count=models.Count('id'))
.order_by('-approval_count')
)
def monthly_reviews(self, theme=False):
qs = self._by_type()
now = datetime.now()
created_date = datetime(now.year, now.month, 1)
actions = (
[constants.activity.LOG.THEME_REVIEW.id]
if theme
else constants.activity.LOG_REVIEWER_REVIEW_ACTION
)
return (
qs.values('user', 'user__display_name', 'user__username')
.filter(created__gte=created_date, action__in=actions)
.exclude(user__id=settings.TASK_USER_ID)
.annotate(approval_count=models.Count('id'))
.order_by('-approval_count')
)
def user_approve_reviews(self, user):
qs = self._by_type()
return qs.filter(
action__in=constants.activity.LOG_REVIEWER_REVIEW_ACTION, user__id=user.id
)
def current_month_user_approve_reviews(self, user):
now = datetime.now()
ago = datetime(now.year, now.month, 1)
return self.user_approve_reviews(user).filter(created__gte=ago)
def user_position(self, values_qs, user):
try:
return (
next(
i
for (i, d) in enumerate(list(values_qs))
if d.get('user') == user.id
)
+ 1
)
except StopIteration:
return None
def total_ratings_user_position(self, user, theme=False):
return self.user_position(self.total_ratings(theme), user)
def monthly_reviews_user_position(self, user, theme=False):
return self.user_position(self.monthly_reviews(theme), user)
def _by_type(self):
qs = self.get_queryset()
table = 'log_activity_addon'
return qs.extra(
tables=[table], where=['%s.activity_log_id=%s.id' % (table, 'log_activity')]
)
class SafeFormatter(string.Formatter):
def get_field(self, *args, **kw):
obj, used_key = super(SafeFormatter, self).get_field(*args, **kw)
return jinja2.escape(obj), used_key
class ActivityLog(ModelBase):
TYPES = sorted(
[(value.id, key) for key, value in constants.activity.LOG_BY_ID.items()]
)
user = models.ForeignKey('users.UserProfile', null=True, on_delete=models.SET_NULL)
action = models.SmallIntegerField(choices=TYPES)
_arguments = models.TextField(blank=True, db_column='arguments')
_details = models.TextField(blank=True, db_column='details')
objects = ActivityLogManager()
formatter = SafeFormatter()
class Meta:
db_table = 'log_activity'
ordering = ('-created',)
indexes = [
models.Index(fields=('action',), name='log_activity_1bd4707b'),
models.Index(fields=('created',), name='created_idx'),
]
def f(self, *args, **kw):
return jinja2.Markup(self.formatter.format(*args, **kw))
@classmethod
def arguments_builder(cls, activities):
def handle_renames(value):
return 'ratings.rating' if value == 'reviews.review' else value
# existing data. For this reason, it forces a conversion to int. If we
# ever want to store ActivityLog items pointing to models using a non
# integer PK field, we'll need to make this a little smarter.
instances_to_load = defaultdict(list)
instances = {}
for activity in activities:
try:
activity.arguments_data = json.loads(activity._arguments)
except Exception as e:
log.info('unserializing data from activity_log failed: %s', activity.id)
log.info(e)
activity.arguments_data = []
for item in activity.arguments_data:
name, pk = list(item.items())[0]
if name not in ('str', 'int', 'null') and pk:
name = handle_renames(name)
instances_to_load[name].append(int(pk))
for name, pks in instances_to_load.items():
(app_label, model_name) = name.split('.')
model = apps.get_model(app_label, model_name)
qs = model.get_unfiltered_manager().all()
if hasattr(qs, 'only_translations'):
qs = qs.only_translations()
instances[name] = qs.in_bulk(pks)
for activity in activities:
objs = []
for item in activity.arguments_data:
name, pk = list(item.items())[0]
if name in ('str', 'int', 'null'):
# value directly.
objs.append(pk)
elif pk:
# Fetch the instance from the cache we built.
name = handle_renames(name)
obj = instances[name].get(int(pk))
# Most of the time, we're eventually going to call
# here. For some of the models, that will result in a call
# to <model>.get_absolute_url(), which in turn can cause an
# extra SQL query because some parent model is needed to
# build the URL.
# It's difficult to predict what we'll need as ActivitLog
# is fairly generic, but we know Addon is going to be
# needed in some cases for sure (Version, Rating) so if
# we're dealing with objects that have an `addon_id`
addon_id = getattr(obj, 'addon_id', None)
if addon := instances.get('addons.addon', {}).get(addon_id):
obj.addon = addon
objs.append(obj)
activity.arguments = objs
@cached_property
def arguments(self):
self.arguments_builder([self])
return self.arguments
def set_arguments(self, args=None):
if args is None:
args = []
if not isinstance(args, (list, tuple)):
args = (args,)
serialize_me = []
for arg in args:
if isinstance(arg, str):
serialize_me.append({'str': arg})
elif isinstance(arg, int):
serialize_me.append({'int': arg})
elif isinstance(arg, tuple):
serialize_me.append(dict(((str(arg[0]._meta), arg[1]),)))
else:
serialize_me.append(dict(((str(arg._meta), arg.pk),)))
self._arguments = json.dumps(serialize_me)
@property
def details(self):
if self._details:
return json.loads(self._details)
@details.setter
def details(self, data):
self._details = json.dumps(data)
@property
def log(self):
return constants.activity.LOG_BY_ID[self.action]
def to_string(self, type_=None):
log_type = constants.activity.LOG_BY_ID[self.action]
if type_ and hasattr(log_type, '%s_format' % type_):
format = getattr(log_type, '%s_format' % type_)
else:
format = log_type.format
arguments = copy(self.arguments)
addon = None
rating = None
version = None
collection = None
tag = None
group = None
file_ = None
status = None
for arg in self.arguments:
if isinstance(arg, Addon) and not addon:
if arg.has_listed_versions():
addon = self.f(
'<a href="{0}">{1}</a>', arg.get_absolute_url(), arg.name
)
else:
addon = self.f('{0}', arg.name)
arguments.remove(arg)
if isinstance(arg, Rating) and not rating:
rating = self.f(
'<a href="{0}">{1}</a>', arg.get_absolute_url(), gettext('Review')
)
arguments.remove(arg)
if isinstance(arg, Version) and not version:
text = gettext('Version {0}')
if arg.channel == amo.RELEASE_CHANNEL_LISTED:
version = self.f(
'<a href="{1}">%s</a>' % text,
arg.version,
arg.get_absolute_url(),
)
else:
version = self.f(text, arg.version)
arguments.remove(arg)
if isinstance(arg, Collection) and not collection:
collection = self.f(
'<a href="{0}">{1}</a>', arg.get_absolute_url(), arg.name
)
arguments.remove(arg)
if isinstance(arg, Tag) and not tag:
if arg.can_reverse():
tag = self.f(
'<a href="{0}">{1}</a>', arg.get_absolute_url(), arg.tag_text
)
else:
tag = self.f('{0}', arg.tag_text)
if isinstance(arg, Group) and not group:
group = arg.name
arguments.remove(arg)
if isinstance(arg, File) and not file_:
validation = 'passed'
if self.action in (
amo.LOG.UNLISTED_SIGNED.id,
amo.LOG.UNLISTED_SIGNED_VALIDATION_FAILED.id,
):
validation = 'ignored'
file_ = self.f(
'<a href="{0}">{1}</a> (validation {2})',
arg.get_absolute_url(),
arg.filename,
validation,
)
arguments.remove(arg)
if self.action == amo.LOG.CHANGE_STATUS.id and not isinstance(arg, Addon):
# an int, we want to retrieve the string and translate it.
if isinstance(arg, int) and arg in amo.STATUS_CHOICES_ADDON:
status = gettext(amo.STATUS_CHOICES_ADDON[arg])
else:
# It's not an int or not one of the choices, so assume it's
# a string or an unknown int we want to display as-is.
status = arg
arguments.remove(arg)
user = user_link(self.user)
try:
kw = {
'addon': addon,
'rating': rating,
'version': version,
'collection': collection,
'tag': tag,
'user': user,
'group': group,
'file': file_,
'status': status,
}
return self.f(str(format), *arguments, **kw)
except (AttributeError, KeyError, IndexError):
log.warning('%d contains garbage data' % (self.id or 0))
return 'Something magical happened.'
def __str__(self):
return self.to_string()
def __html__(self):
return self
@property
def author_name(self):
if self.action in constants.activity.LOG_REVIEW_QUEUE_DEVELOPER:
return self.user.reviewer_name or self.user.name
return self.user.name
@classmethod
def create(cls, action, *args, **kw):
from olympia import core
user = kw.get('user', core.get_user())
if not user:
log.warning('Activity log called with no user: %s' % action.id)
return
# We make sure that we take the timestamp if provided, instead of
# creating a new one, especially useful for log entries created
# in a loop.
al = ActivityLog(
user=user, action=action.id, created=kw.get('created', timezone.now())
)
al.set_arguments(args)
if 'details' in kw:
al.details = kw['details']
al.save()
if 'details' in kw and 'comments' in al.details:
CommentLog.objects.create(
comments=al.details['comments'],
activity_log=al,
created=kw.get('created', timezone.now()),
)
for arg in args:
if isinstance(arg, tuple):
class_ = arg[0]
id_ = arg[1]
else:
class_ = arg.__class__
id_ = arg.id if isinstance(arg, ModelBase) else None
if class_ == Addon:
AddonLog.objects.create(
addon_id=id_,
activity_log=al,
created=kw.get('created', timezone.now()),
)
elif class_ == Version:
VersionLog.objects.create(
version_id=id_,
activity_log=al,
created=kw.get('created', timezone.now()),
)
elif class_ == UserProfile:
UserLog.objects.create(
user_id=id_,
activity_log=al,
created=kw.get('created', timezone.now()),
)
elif class_ == Group:
GroupLog.objects.create(
group_id=id_,
activity_log=al,
created=kw.get('created', timezone.now()),
)
elif class_ == Block:
BlockLog.objects.create(
block_id=id_,
activity_log=al,
guid=arg.guid,
created=kw.get('created', timezone.now()),
)
if getattr(action, 'store_ip', False):
# Index specific actions by their IP address. Note that the caller
# must take care of overriding remote addr if the action is created
# from a task.
IPLog.objects.create(
ip_address=core.get_remote_addr(),
activity_log=al,
created=kw.get('created', timezone.now()),
)
# Index by every user
UserLog.objects.create(
activity_log=al, user=user, created=kw.get('created', timezone.now())
)
return al
| true | true |
1c46b6c6d53ac094d8f4c8e0d1401edb439f6fc3 | 4,863 | py | Python | tests/core/test_virtual_group.py | TileDB-Inc/TileDB-CF-Py | 9aab0fe9ba7346a1846c7458a5d08b123dcf90a8 | [
"MIT"
] | 12 | 2021-06-07T16:51:32.000Z | 2022-03-10T12:48:00.000Z | tests/core/test_virtual_group.py | TileDB-Inc/TileDB-CF-Py | 9aab0fe9ba7346a1846c7458a5d08b123dcf90a8 | [
"MIT"
] | 72 | 2021-04-28T21:49:41.000Z | 2022-02-24T13:58:11.000Z | tests/core/test_virtual_group.py | TileDB-Inc/TileDB-CF-Py | 9aab0fe9ba7346a1846c7458a5d08b123dcf90a8 | [
"MIT"
] | 3 | 2021-08-11T16:33:37.000Z | 2021-12-01T20:31:12.000Z | # Copyright 2021 TileDB Inc.
# Licensed under the MIT License.
import numpy as np
import pytest
import tiledb
from tiledb.cf import GroupSchema, VirtualGroup
_row = tiledb.Dim(name="rows", domain=(1, 4), tile=4, dtype=np.uint64)
_col = tiledb.Dim(name="cols", domain=(1, 4), tile=4, dtype=np.uint64)
_attr_a = tiledb.Attr(name="a", dtype=np.uint64)
_attr_b = tiledb.Attr(name="b", dtype=np.float64)
_attr_c = tiledb.Attr(name="c", dtype=np.dtype("U"))
_array_schema_1 = tiledb.ArraySchema(
domain=tiledb.Domain(_row, _col),
attrs=[_attr_a],
)
_array_schema_2 = tiledb.ArraySchema(
domain=tiledb.Domain(_row),
sparse=True,
attrs=[_attr_b, _attr_c],
)
_array_schema_3 = tiledb.ArraySchema(
domain=tiledb.Domain(_row, _col),
attrs=[_attr_c],
)
class TestCreateVirtualGroup:
_metadata_schema = _array_schema_1
_array_schemas = [
("A1", _array_schema_1),
("A2", _array_schema_2),
]
_group_schema = GroupSchema(_array_schemas, _metadata_schema)
@pytest.fixture(scope="class")
def group_uri(self, tmpdir_factory):
"""Creates a TileDB Group from GroupSchema and returns scenario dict."""
uri = str(tmpdir_factory.mktemp("group1").join("virtual"))
ctx = None
VirtualGroup.create(uri, self._group_schema, ctx=ctx)
return {"__tiledb_group": uri, "A1": f"{uri}_A1", "A2": f"{uri}_A2"}
def test_array_schemas(self, group_uri):
assert (
tiledb.ArraySchema.load(group_uri["__tiledb_group"])
== self._metadata_schema
)
assert tiledb.ArraySchema.load(group_uri["A1"]) == _array_schema_1
assert tiledb.ArraySchema.load(group_uri["A2"]) == _array_schema_2
class TestMetadataOnlyGroup:
_metadata_schema = tiledb.ArraySchema(
domain=tiledb.Domain(
tiledb.Dim(name="rows", domain=(1, 4), tile=2, dtype=np.uint64)
),
attrs=[tiledb.Attr(name="a", dtype=np.uint64)],
sparse=True,
)
@pytest.fixture(scope="class")
def group_uris(self, tmpdir_factory):
uri = str(tmpdir_factory.mktemp("group1"))
tiledb.Array.create(uri, self._metadata_schema)
return {"__tiledb_group": uri}
def test_has_metadata(self, group_uris):
with VirtualGroup(group_uris) as group:
assert isinstance(group, VirtualGroup)
assert group.has_metadata_array
assert group.meta is not None
def test_no_such_attr_error(self, group_uris):
with VirtualGroup(group_uris) as group:
with pytest.raises(KeyError):
group.open_array(attr="a")
class TestVirtualGroupWithArrays:
_metadata_schema = tiledb.ArraySchema(
domain=tiledb.Domain(
tiledb.Dim(name="rows", domain=(1, 4), tile=2, dtype=np.uint64)
),
attrs=[tiledb.Attr(name="a", dtype=np.uint64)],
sparse=True,
)
_A1_data = np.array(
([1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]), dtype=np.uint64
)
@pytest.fixture(scope="class")
def group_uris(self, tmpdir_factory):
uri = str(tmpdir_factory.mktemp("simple_group"))
tiledb.Array.create(uri + "/metadata", self._metadata_schema)
tiledb.Array.create(uri + "/array1", _array_schema_1)
with tiledb.DenseArray(uri + "/array1", mode="w") as array:
array[:] = self._A1_data
tiledb.Array.create(uri + "/array2", _array_schema_2)
tiledb.Array.create(uri + "/array3", _array_schema_3)
return {
"__tiledb_group": f"{uri}/metadata",
"A1": f"{uri}/array1",
"A2": f"{uri}/array2",
"A3": f"{uri}/array3",
}
def test_open_array_from_group(self, group_uris):
with VirtualGroup(group_uris) as group:
with group.open_array(array="A1") as array:
assert isinstance(array, tiledb.Array)
assert array.mode == "r"
np.testing.assert_equal(array[:, :]["a"], self._A1_data)
def test_open_attr(self, group_uris):
with VirtualGroup(group_uris) as group:
with group.open_array(attr="a") as array:
assert isinstance(array, tiledb.Array)
assert array.mode == "r"
np.testing.assert_equal(array[:, :], self._A1_data)
def test_attr_ambiguous_error(self, group_uris):
with VirtualGroup(group_uris) as group:
with pytest.raises(ValueError):
group.open_array(attr="c")
def test_append_group_warning(tmpdir):
uri = str(tmpdir.mkdir("append_group_test"))
with pytest.warns(Warning):
VirtualGroup.create(
uri + "/test", GroupSchema({"A1": _array_schema_1}), append=True
)
schema = tiledb.ArraySchema.load(uri + "/test_A1")
assert schema == _array_schema_1
| 34.006993 | 88 | 0.631298 |
import numpy as np
import pytest
import tiledb
from tiledb.cf import GroupSchema, VirtualGroup
_row = tiledb.Dim(name="rows", domain=(1, 4), tile=4, dtype=np.uint64)
_col = tiledb.Dim(name="cols", domain=(1, 4), tile=4, dtype=np.uint64)
_attr_a = tiledb.Attr(name="a", dtype=np.uint64)
_attr_b = tiledb.Attr(name="b", dtype=np.float64)
_attr_c = tiledb.Attr(name="c", dtype=np.dtype("U"))
_array_schema_1 = tiledb.ArraySchema(
domain=tiledb.Domain(_row, _col),
attrs=[_attr_a],
)
_array_schema_2 = tiledb.ArraySchema(
domain=tiledb.Domain(_row),
sparse=True,
attrs=[_attr_b, _attr_c],
)
_array_schema_3 = tiledb.ArraySchema(
domain=tiledb.Domain(_row, _col),
attrs=[_attr_c],
)
class TestCreateVirtualGroup:
_metadata_schema = _array_schema_1
_array_schemas = [
("A1", _array_schema_1),
("A2", _array_schema_2),
]
_group_schema = GroupSchema(_array_schemas, _metadata_schema)
@pytest.fixture(scope="class")
def group_uri(self, tmpdir_factory):
uri = str(tmpdir_factory.mktemp("group1").join("virtual"))
ctx = None
VirtualGroup.create(uri, self._group_schema, ctx=ctx)
return {"__tiledb_group": uri, "A1": f"{uri}_A1", "A2": f"{uri}_A2"}
def test_array_schemas(self, group_uri):
assert (
tiledb.ArraySchema.load(group_uri["__tiledb_group"])
== self._metadata_schema
)
assert tiledb.ArraySchema.load(group_uri["A1"]) == _array_schema_1
assert tiledb.ArraySchema.load(group_uri["A2"]) == _array_schema_2
class TestMetadataOnlyGroup:
_metadata_schema = tiledb.ArraySchema(
domain=tiledb.Domain(
tiledb.Dim(name="rows", domain=(1, 4), tile=2, dtype=np.uint64)
),
attrs=[tiledb.Attr(name="a", dtype=np.uint64)],
sparse=True,
)
@pytest.fixture(scope="class")
def group_uris(self, tmpdir_factory):
uri = str(tmpdir_factory.mktemp("group1"))
tiledb.Array.create(uri, self._metadata_schema)
return {"__tiledb_group": uri}
def test_has_metadata(self, group_uris):
with VirtualGroup(group_uris) as group:
assert isinstance(group, VirtualGroup)
assert group.has_metadata_array
assert group.meta is not None
def test_no_such_attr_error(self, group_uris):
with VirtualGroup(group_uris) as group:
with pytest.raises(KeyError):
group.open_array(attr="a")
class TestVirtualGroupWithArrays:
_metadata_schema = tiledb.ArraySchema(
domain=tiledb.Domain(
tiledb.Dim(name="rows", domain=(1, 4), tile=2, dtype=np.uint64)
),
attrs=[tiledb.Attr(name="a", dtype=np.uint64)],
sparse=True,
)
_A1_data = np.array(
([1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]), dtype=np.uint64
)
@pytest.fixture(scope="class")
def group_uris(self, tmpdir_factory):
uri = str(tmpdir_factory.mktemp("simple_group"))
tiledb.Array.create(uri + "/metadata", self._metadata_schema)
tiledb.Array.create(uri + "/array1", _array_schema_1)
with tiledb.DenseArray(uri + "/array1", mode="w") as array:
array[:] = self._A1_data
tiledb.Array.create(uri + "/array2", _array_schema_2)
tiledb.Array.create(uri + "/array3", _array_schema_3)
return {
"__tiledb_group": f"{uri}/metadata",
"A1": f"{uri}/array1",
"A2": f"{uri}/array2",
"A3": f"{uri}/array3",
}
def test_open_array_from_group(self, group_uris):
with VirtualGroup(group_uris) as group:
with group.open_array(array="A1") as array:
assert isinstance(array, tiledb.Array)
assert array.mode == "r"
np.testing.assert_equal(array[:, :]["a"], self._A1_data)
def test_open_attr(self, group_uris):
with VirtualGroup(group_uris) as group:
with group.open_array(attr="a") as array:
assert isinstance(array, tiledb.Array)
assert array.mode == "r"
np.testing.assert_equal(array[:, :], self._A1_data)
def test_attr_ambiguous_error(self, group_uris):
with VirtualGroup(group_uris) as group:
with pytest.raises(ValueError):
group.open_array(attr="c")
def test_append_group_warning(tmpdir):
uri = str(tmpdir.mkdir("append_group_test"))
with pytest.warns(Warning):
VirtualGroup.create(
uri + "/test", GroupSchema({"A1": _array_schema_1}), append=True
)
schema = tiledb.ArraySchema.load(uri + "/test_A1")
assert schema == _array_schema_1
| true | true |
1c46bab99d58eea58a638a070fe13030e84bce32 | 14,212 | py | Python | tensorflow/contrib/timeseries/examples/lstm.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | tensorflow/contrib/timeseries/examples/lstm.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | tensorflow/contrib/timeseries/examples/lstm.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A more advanced example, of building an RNN-based time series model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from os import path
import tempfile
import numpy
import tensorflow as tf
from tensorflow.contrib.timeseries.python.timeseries import estimators as ts_estimators
from tensorflow.contrib.timeseries.python.timeseries import model as ts_model
from tensorflow.contrib.timeseries.python.timeseries import state_management
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example.
HAS_MATPLOTLIB = False
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/multivariate_periods.csv")
class _LSTMModel(ts_model.SequentialTimeSeriesModel):
"""A time series model-building example using an RNNCell."""
def __init__(self, num_units, num_features, exogenous_feature_columns=None,
dtype=tf.float32):
"""Initialize/configure the model object.
Note that we do not start graph building here. Rather, this object is a
configurable factory for TensorFlow graphs which are run by an Estimator.
Args:
num_units: The number of units in the model's LSTMCell.
num_features: The dimensionality of the time series (features per
timestep).
exogenous_feature_columns: A list of `tf.feature_column`s representing
features which are inputs to the model but are not predicted by
it. These must then be present for training, evaluation, and
prediction.
dtype: The floating point data type to use.
"""
super(_LSTMModel, self).__init__(
# Pre-register the metrics we'll be outputting (just a mean here).
train_output_names=["mean"],
predict_output_names=["mean"],
num_features=num_features,
exogenous_feature_columns=exogenous_feature_columns,
dtype=dtype)
self._num_units = num_units
# Filled in by initialize_graph()
self._lstm_cell = None
self._lstm_cell_run = None
self._predict_from_lstm_output = None
def initialize_graph(self, input_statistics=None):
"""Save templates for components, which can then be used repeatedly.
This method is called every time a new graph is created. It's safe to start
adding ops to the current default graph here, but the graph should be
constructed from scratch.
Args:
input_statistics: A math_utils.InputStatistics object.
"""
super(_LSTMModel, self).initialize_graph(input_statistics=input_statistics)
with tf.variable_scope("", use_resource=True):
# Use ResourceVariables to avoid race conditions.
self._lstm_cell = tf.nn.rnn_cell.LSTMCell(num_units=self._num_units)
# Create templates so we don't have to worry about variable reuse.
self._lstm_cell_run = tf.make_template(
name_="lstm_cell",
func_=self._lstm_cell,
create_scope_now_=True)
# Transforms LSTM output into mean predictions.
self._predict_from_lstm_output = tf.make_template(
name_="predict_from_lstm_output",
func_=functools.partial(tf.layers.dense, units=self.num_features),
create_scope_now_=True)
def get_start_state(self):
"""Return initial state for the time series model."""
return (
# Keeps track of the time associated with this state for error checking.
tf.zeros([], dtype=tf.int64),
# The previous observation or prediction.
tf.zeros([self.num_features], dtype=self.dtype),
# The most recently seen exogenous features.
tf.zeros(self._get_exogenous_embedding_shape(), dtype=self.dtype),
# The state of the RNNCell (batch dimension removed since this parent
# class will broadcast).
[tf.squeeze(state_element, axis=0)
for state_element
in self._lstm_cell.zero_state(batch_size=1, dtype=self.dtype)])
def _filtering_step(self, current_times, current_values, state, predictions):
"""Update model state based on observations.
Note that we don't do much here aside from computing a loss. In this case
it's easier to update the RNN state in _prediction_step, since that covers
running the RNN both on observations (from this method) and our own
predictions. This distinction can be important for probabilistic models,
where repeatedly predicting without filtering should lead to low-confidence
predictions.
Args:
current_times: A [batch size] integer Tensor.
current_values: A [batch size, self.num_features] floating point Tensor
with new observations.
state: The model's state tuple.
predictions: The output of the previous `_prediction_step`.
Returns:
A tuple of new state and a predictions dictionary updated to include a
loss (note that we could also return other measures of goodness of fit,
although only "loss" will be optimized).
"""
state_from_time, prediction, exogenous, lstm_state = state
with tf.control_dependencies(
[tf.assert_equal(current_times, state_from_time)]):
# Subtract the mean and divide by the variance of the series. Slightly
# more efficient if done for a whole window (using the normalize_features
# argument to SequentialTimeSeriesModel).
transformed_values = self._scale_data(current_values)
# Use mean squared error across features for the loss.
predictions["loss"] = tf.reduce_mean(
(prediction - transformed_values) ** 2, axis=-1)
# Keep track of the new observation in model state. It won't be run
# through the LSTM until the next _imputation_step.
new_state_tuple = (current_times, transformed_values,
exogenous, lstm_state)
return (new_state_tuple, predictions)
def _prediction_step(self, current_times, state):
"""Advance the RNN state using a previous observation or prediction."""
_, previous_observation_or_prediction, exogenous, lstm_state = state
# Update LSTM state based on the most recent exogenous and endogenous
# features.
inputs = tf.concat([previous_observation_or_prediction, exogenous],
axis=-1)
lstm_output, new_lstm_state = self._lstm_cell_run(
inputs=inputs, state=lstm_state)
next_prediction = self._predict_from_lstm_output(lstm_output)
new_state_tuple = (current_times, next_prediction,
exogenous, new_lstm_state)
return new_state_tuple, {"mean": self._scale_back_data(next_prediction)}
def _imputation_step(self, current_times, state):
"""Advance model state across a gap."""
# Does not do anything special if we're jumping across a gap. More advanced
# models, especially probabilistic ones, would want a special case that
# depends on the gap size.
return state
def _exogenous_input_step(
self, current_times, current_exogenous_regressors, state):
"""Save exogenous regressors in model state for use in _prediction_step."""
state_from_time, prediction, _, lstm_state = state
return (state_from_time, prediction,
current_exogenous_regressors, lstm_state)
def train_and_predict(
csv_file_name=_DATA_FILE, training_steps=200, estimator_config=None,
export_directory=None):
"""Train and predict using a custom time series model."""
# Construct an Estimator from our LSTM model.
categorical_column = tf.feature_column.categorical_column_with_hash_bucket(
key="categorical_exogenous_feature", hash_bucket_size=16)
exogenous_feature_columns = [
# Exogenous features are not part of the loss, but can inform
# predictions. In this example the features have no extra information, but
# are included as an API example.
tf.feature_column.numeric_column(
"2d_exogenous_feature", shape=(2,)),
tf.feature_column.embedding_column(
categorical_column=categorical_column, dimension=10)]
estimator = ts_estimators.TimeSeriesRegressor(
model=_LSTMModel(
num_features=5,
num_units=128,
exogenous_feature_columns=exogenous_feature_columns),
optimizer=tf.compat.v1.train.AdamOptimizer(0.001),
config=estimator_config,
# Set state to be saved across windows.
state_manager=state_management.ChainingStateManager())
reader = tf.contrib.timeseries.CSVReader(
csv_file_name,
column_names=((tf.contrib.timeseries.TrainEvalFeatures.TIMES,)
+ (tf.contrib.timeseries.TrainEvalFeatures.VALUES,) * 5
+ ("2d_exogenous_feature",) * 2
+ ("categorical_exogenous_feature",)),
# Data types other than for `times` need to be specified if they aren't
# float32. In this case one of our exogenous features has string dtype.
column_dtypes=((tf.int64,) + (tf.float32,) * 7 + (tf.string,)))
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader, batch_size=4, window_size=32)
estimator.train(input_fn=train_input_fn, steps=training_steps)
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Predict starting after the evaluation
predict_exogenous_features = {
"2d_exogenous_feature": numpy.concatenate(
[numpy.ones([1, 100, 1]), numpy.zeros([1, 100, 1])],
axis=-1),
"categorical_exogenous_feature": numpy.array(
["strkey"] * 100)[None, :, None]}
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=100,
exogenous_features=predict_exogenous_features)))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, :]
predicted_mean = numpy.squeeze(numpy.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
all_times = numpy.concatenate([times, predictions["times"]], axis=0)
# Export the model in SavedModel format. We include a bit of extra boilerplate
# for "cold starting" as if we didn't have any state from the Estimator, which
# is the case when serving from a SavedModel. If Estimator output is
# available, the result of "Estimator.evaluate" can be passed directly to
# `tf.contrib.timeseries.saved_model_utils.predict_continuation` as the
# `continue_from` argument.
with tf.Graph().as_default():
filter_feature_tensors, _ = evaluation_input_fn()
with tf.train.MonitoredSession() as session:
# Fetch the series to "warm up" our state, which will allow us to make
# predictions for its future values. This is just a dictionary of times,
# values, and exogenous features mapping to numpy arrays. The use of an
# input_fn is just a convenience for the example; they can also be
# specified manually.
filter_features = session.run(filter_feature_tensors)
if export_directory is None:
export_directory = tempfile.mkdtemp()
input_receiver_fn = estimator.build_raw_serving_input_receiver_fn()
export_location = estimator.export_saved_model(export_directory,
input_receiver_fn)
# Warm up and predict using the SavedModel
with tf.Graph().as_default():
with tf.compat.v1.Session() as session:
signatures = tf.saved_model.loader.load(
session, [tf.saved_model.tag_constants.SERVING], export_location)
state = tf.contrib.timeseries.saved_model_utils.cold_start_filter(
signatures=signatures, session=session, features=filter_features)
saved_model_output = (
tf.contrib.timeseries.saved_model_utils.predict_continuation(
continue_from=state, signatures=signatures,
session=session, steps=100,
exogenous_features=predict_exogenous_features))
# The exported model gives the same results as the Estimator.predict()
# call above.
numpy.testing.assert_allclose(
predictions["mean"],
numpy.squeeze(saved_model_output["mean"], axis=0))
return times, observed, all_times, predicted_mean
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
(observed_times, observations,
all_times, predictions) = train_and_predict()
pyplot.axvline(99, linestyle="dotted")
observed_lines = pyplot.plot(
observed_times, observations, label="Observed", color="k")
predicted_lines = pyplot.plot(
all_times, predictions, label="Predicted", color="b")
pyplot.legend(handles=[observed_lines[0], predicted_lines[0]],
loc="upper left")
pyplot.show()
if __name__ == "__main__":
tf.compat.v1.app.run(main=main)
| 47.373333 | 88 | 0.700113 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from os import path
import tempfile
import numpy
import tensorflow as tf
from tensorflow.contrib.timeseries.python.timeseries import estimators as ts_estimators
from tensorflow.contrib.timeseries.python.timeseries import model as ts_model
from tensorflow.contrib.timeseries.python.timeseries import state_management
try:
import matplotlib
matplotlib.use("TkAgg")
from matplotlib import pyplot
HAS_MATPLOTLIB = True
except ImportError:
# example.
HAS_MATPLOTLIB = False
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/multivariate_periods.csv")
class _LSTMModel(ts_model.SequentialTimeSeriesModel):
def __init__(self, num_units, num_features, exogenous_feature_columns=None,
dtype=tf.float32):
super(_LSTMModel, self).__init__(
# Pre-register the metrics we'll be outputting (just a mean here).
train_output_names=["mean"],
predict_output_names=["mean"],
num_features=num_features,
exogenous_feature_columns=exogenous_feature_columns,
dtype=dtype)
self._num_units = num_units
self._lstm_cell = None
self._lstm_cell_run = None
self._predict_from_lstm_output = None
def initialize_graph(self, input_statistics=None):
super(_LSTMModel, self).initialize_graph(input_statistics=input_statistics)
with tf.variable_scope("", use_resource=True):
self._lstm_cell = tf.nn.rnn_cell.LSTMCell(num_units=self._num_units)
self._lstm_cell_run = tf.make_template(
name_="lstm_cell",
func_=self._lstm_cell,
create_scope_now_=True)
# Transforms LSTM output into mean predictions.
self._predict_from_lstm_output = tf.make_template(
name_="predict_from_lstm_output",
func_=functools.partial(tf.layers.dense, units=self.num_features),
create_scope_now_=True)
def get_start_state(self):
return (
# Keeps track of the time associated with this state for error checking.
tf.zeros([], dtype=tf.int64),
# The previous observation or prediction.
tf.zeros([self.num_features], dtype=self.dtype),
# The most recently seen exogenous features.
tf.zeros(self._get_exogenous_embedding_shape(), dtype=self.dtype),
# The state of the RNNCell (batch dimension removed since this parent
# class will broadcast).
[tf.squeeze(state_element, axis=0)
for state_element
in self._lstm_cell.zero_state(batch_size=1, dtype=self.dtype)])
def _filtering_step(self, current_times, current_values, state, predictions):
state_from_time, prediction, exogenous, lstm_state = state
with tf.control_dependencies(
[tf.assert_equal(current_times, state_from_time)]):
# Subtract the mean and divide by the variance of the series. Slightly
# more efficient if done for a whole window (using the normalize_features
# argument to SequentialTimeSeriesModel).
transformed_values = self._scale_data(current_values)
# Use mean squared error across features for the loss.
predictions["loss"] = tf.reduce_mean(
(prediction - transformed_values) ** 2, axis=-1)
# Keep track of the new observation in model state. It won't be run
new_state_tuple = (current_times, transformed_values,
exogenous, lstm_state)
return (new_state_tuple, predictions)
def _prediction_step(self, current_times, state):
_, previous_observation_or_prediction, exogenous, lstm_state = state
inputs = tf.concat([previous_observation_or_prediction, exogenous],
axis=-1)
lstm_output, new_lstm_state = self._lstm_cell_run(
inputs=inputs, state=lstm_state)
next_prediction = self._predict_from_lstm_output(lstm_output)
new_state_tuple = (current_times, next_prediction,
exogenous, new_lstm_state)
return new_state_tuple, {"mean": self._scale_back_data(next_prediction)}
def _imputation_step(self, current_times, state):
# models, especially probabilistic ones, would want a special case that
# depends on the gap size.
return state
def _exogenous_input_step(
self, current_times, current_exogenous_regressors, state):
state_from_time, prediction, _, lstm_state = state
return (state_from_time, prediction,
current_exogenous_regressors, lstm_state)
def train_and_predict(
csv_file_name=_DATA_FILE, training_steps=200, estimator_config=None,
export_directory=None):
# Construct an Estimator from our LSTM model.
categorical_column = tf.feature_column.categorical_column_with_hash_bucket(
key="categorical_exogenous_feature", hash_bucket_size=16)
exogenous_feature_columns = [
# Exogenous features are not part of the loss, but can inform
# predictions. In this example the features have no extra information, but
# are included as an API example.
tf.feature_column.numeric_column(
"2d_exogenous_feature", shape=(2,)),
tf.feature_column.embedding_column(
categorical_column=categorical_column, dimension=10)]
estimator = ts_estimators.TimeSeriesRegressor(
model=_LSTMModel(
num_features=5,
num_units=128,
exogenous_feature_columns=exogenous_feature_columns),
optimizer=tf.compat.v1.train.AdamOptimizer(0.001),
config=estimator_config,
# Set state to be saved across windows.
state_manager=state_management.ChainingStateManager())
reader = tf.contrib.timeseries.CSVReader(
csv_file_name,
column_names=((tf.contrib.timeseries.TrainEvalFeatures.TIMES,)
+ (tf.contrib.timeseries.TrainEvalFeatures.VALUES,) * 5
+ ("2d_exogenous_feature",) * 2
+ ("categorical_exogenous_feature",)),
# Data types other than for `times` need to be specified if they aren't
column_dtypes=((tf.int64,) + (tf.float32,) * 7 + (tf.string,)))
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader, batch_size=4, window_size=32)
estimator.train(input_fn=train_input_fn, steps=training_steps)
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
predict_exogenous_features = {
"2d_exogenous_feature": numpy.concatenate(
[numpy.ones([1, 100, 1]), numpy.zeros([1, 100, 1])],
axis=-1),
"categorical_exogenous_feature": numpy.array(
["strkey"] * 100)[None, :, None]}
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=100,
exogenous_features=predict_exogenous_features)))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, :]
predicted_mean = numpy.squeeze(numpy.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
all_times = numpy.concatenate([times, predictions["times"]], axis=0)
# is the case when serving from a SavedModel. If Estimator output is
# available, the result of "Estimator.evaluate" can be passed directly to
# `tf.contrib.timeseries.saved_model_utils.predict_continuation` as the
# `continue_from` argument.
with tf.Graph().as_default():
filter_feature_tensors, _ = evaluation_input_fn()
with tf.train.MonitoredSession() as session:
# Fetch the series to "warm up" our state, which will allow us to make
# predictions for its future values. This is just a dictionary of times,
# values, and exogenous features mapping to numpy arrays. The use of an
# input_fn is just a convenience for the example; they can also be
# specified manually.
filter_features = session.run(filter_feature_tensors)
if export_directory is None:
export_directory = tempfile.mkdtemp()
input_receiver_fn = estimator.build_raw_serving_input_receiver_fn()
export_location = estimator.export_saved_model(export_directory,
input_receiver_fn)
# Warm up and predict using the SavedModel
with tf.Graph().as_default():
with tf.compat.v1.Session() as session:
signatures = tf.saved_model.loader.load(
session, [tf.saved_model.tag_constants.SERVING], export_location)
state = tf.contrib.timeseries.saved_model_utils.cold_start_filter(
signatures=signatures, session=session, features=filter_features)
saved_model_output = (
tf.contrib.timeseries.saved_model_utils.predict_continuation(
continue_from=state, signatures=signatures,
session=session, steps=100,
exogenous_features=predict_exogenous_features))
# The exported model gives the same results as the Estimator.predict()
# call above.
numpy.testing.assert_allclose(
predictions["mean"],
numpy.squeeze(saved_model_output["mean"], axis=0))
return times, observed, all_times, predicted_mean
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
(observed_times, observations,
all_times, predictions) = train_and_predict()
pyplot.axvline(99, linestyle="dotted")
observed_lines = pyplot.plot(
observed_times, observations, label="Observed", color="k")
predicted_lines = pyplot.plot(
all_times, predictions, label="Predicted", color="b")
pyplot.legend(handles=[observed_lines[0], predicted_lines[0]],
loc="upper left")
pyplot.show()
if __name__ == "__main__":
tf.compat.v1.app.run(main=main)
| true | true |
1c46bc0e536a5b58bd77d13f7adfafa098ff3d02 | 2,906 | py | Python | initialExp/classifiers/iscx_naive_bayes.py | bakkerjarr/NetTrafClassificationExploration | 66febafcbe4820851784ae72c50a49c28fa91df4 | [
"Apache-2.0"
] | null | null | null | initialExp/classifiers/iscx_naive_bayes.py | bakkerjarr/NetTrafClassificationExploration | 66febafcbe4820851784ae72c50a49c28fa91df4 | [
"Apache-2.0"
] | null | null | null | initialExp/classifiers/iscx_naive_bayes.py | bakkerjarr/NetTrafClassificationExploration | 66febafcbe4820851784ae72c50a49c28fa91df4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Jarrod N. Bakker
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from numpy import float32 as np_float
import numpy.core.multiarray as np_array
from sklearn.naive_bayes import GaussianNB
import iscx_result_calc as rc
__author__ = "Jarrod N. Bakker"
class NaiveBayesCls:
NAME = "Naive_Bayes"
def __init__(self, data, labels, skf):
"""Initialise.
:param data: Data set for the classifier to use.
:param labels: Labels indicating if a flow is normal or attack.
:param skf: StratifiedKFold object representing what data set
elements belong in each fold.
"""
self._data = data
self._labels = labels
self._kfold = skf
self._classifier = GaussianNB()
def classify(self):
"""Classify DDoS flows using Naive Bayes.
The data passed through to the fit() method cannot be a string
type.
:return: Results of the classification.
"""
all_results = [] # Results from all fold trials
fold_num = 1
for train, test in self._kfold:
print("\tTraining Naive Bayes...")
# NOTE: I have switched the training and testing set around.
train_array = np_array.array(map(self._data.__getitem__,
test)).astype(np_float)
train_label_array = np_array.array(map(
self._labels.__getitem__, test)).astype(np_float)
self._classifier.fit(train_array, train_label_array)
print("\tTesting classifier...")
test_array = np_array.array(map(self._data.__getitem__,
train)).astype(np_float)
test_label_array = np_array.array(map(
self._labels.__getitem__, train)).astype(np_float)
test_size = len(train) # Remember the switch of sets!
pred = self._classifier.predict(test_array)
mislabeled = (test_label_array != pred).sum()
tp, tn, fp, fn = rc.calculate_tpn_fpn(test_label_array, pred)
detection_rate = rc.detection_rate(tp, fn)
false_pos_rate = rc.false_positive_rate(tn, fp)
all_results.append([fold_num, tp, tn, fp, fn, detection_rate,
false_pos_rate, mislabeled, test_size])
fold_num += 1
return all_results
| 38.746667 | 74 | 0.63627 |
from numpy import float32 as np_float
import numpy.core.multiarray as np_array
from sklearn.naive_bayes import GaussianNB
import iscx_result_calc as rc
__author__ = "Jarrod N. Bakker"
class NaiveBayesCls:
NAME = "Naive_Bayes"
def __init__(self, data, labels, skf):
self._data = data
self._labels = labels
self._kfold = skf
self._classifier = GaussianNB()
def classify(self):
all_results = []
fold_num = 1
for train, test in self._kfold:
print("\tTraining Naive Bayes...")
train_array = np_array.array(map(self._data.__getitem__,
test)).astype(np_float)
train_label_array = np_array.array(map(
self._labels.__getitem__, test)).astype(np_float)
self._classifier.fit(train_array, train_label_array)
print("\tTesting classifier...")
test_array = np_array.array(map(self._data.__getitem__,
train)).astype(np_float)
test_label_array = np_array.array(map(
self._labels.__getitem__, train)).astype(np_float)
test_size = len(train)
pred = self._classifier.predict(test_array)
mislabeled = (test_label_array != pred).sum()
tp, tn, fp, fn = rc.calculate_tpn_fpn(test_label_array, pred)
detection_rate = rc.detection_rate(tp, fn)
false_pos_rate = rc.false_positive_rate(tn, fp)
all_results.append([fold_num, tp, tn, fp, fn, detection_rate,
false_pos_rate, mislabeled, test_size])
fold_num += 1
return all_results
| true | true |
1c46bc96f2ea4fe428bfdde14733d08a8f455696 | 84,164 | py | Python | core/domain/state_domain.py | SamriddhiMishra/oppia | 9f239ce13c11e60e64ca7c04726a55755231d530 | [
"Apache-2.0"
] | null | null | null | core/domain/state_domain.py | SamriddhiMishra/oppia | 9f239ce13c11e60e64ca7c04726a55755231d530 | [
"Apache-2.0"
] | null | null | null | core/domain/state_domain.py | SamriddhiMishra/oppia | 9f239ce13c11e60e64ca7c04726a55755231d530 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain object for states and their constituents."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import collections
import copy
import logging
from constants import constants
from core.domain import customization_args_util
from core.domain import html_cleaner
from core.domain import interaction_registry
from core.domain import param_domain
import feconf
import python_utils
import utils
class AnswerGroup(python_utils.OBJECT):
"""Value object for an answer group. Answer groups represent a set of rules
dictating whether a shared feedback should be shared with the user. These
rules are ORed together. Answer groups may also support a classifier
that involve soft matching of answers to a set of training data and/or
example answers dictated by the creator.
"""
def to_dict(self):
"""Returns a dict representing this AnswerGroup domain object.
Returns:
dict. A dict, mapping all fields of AnswerGroup instance.
"""
return {
'rule_specs': [rule_spec.to_dict()
for rule_spec in self.rule_specs],
'outcome': self.outcome.to_dict(),
'training_data': self.training_data,
'tagged_skill_misconception_id': self.tagged_skill_misconception_id
}
@classmethod
def from_dict(cls, answer_group_dict):
"""Return a AnswerGroup domain object from a dict.
Args:
answer_group_dict: dict. The dict representation of AnswerGroup
object.
Returns:
AnswerGroup. The corresponding AnswerGroup domain object.
"""
return cls(
Outcome.from_dict(answer_group_dict['outcome']),
[RuleSpec.from_dict(rs) for rs in answer_group_dict['rule_specs']],
answer_group_dict['training_data'],
answer_group_dict['tagged_skill_misconception_id']
)
def __init__(
self, outcome, rule_specs, training_data,
tagged_skill_misconception_id):
"""Initializes a AnswerGroup domain object.
Args:
outcome: Outcome. The outcome corresponding to the answer group.
rule_specs: list(RuleSpec). List of rule specifications.
training_data: list(*). List of answers belonging to training
data of this answer group.
tagged_skill_misconception_id: str or None. The format is
'<skill_id>-<misconception_id>', where skill_id is the skill ID
of the tagged misconception and misconception_id is the id of
the tagged misconception for the answer group. It is not None
only when a state is part of a Question object that
tests a particular skill.
"""
self.rule_specs = [RuleSpec(
rule_spec.rule_type, rule_spec.inputs
) for rule_spec in rule_specs]
self.outcome = outcome
self.training_data = training_data
self.tagged_skill_misconception_id = tagged_skill_misconception_id
def validate(self, interaction, exp_param_specs_dict):
"""Verifies that all rule classes are valid, and that the AnswerGroup
only has one classifier rule.
Args:
interaction: InteractionInstance. The interaction object.
exp_param_specs_dict: dict. A dict of all parameters used in the
exploration. Keys are parameter names and values are ParamSpec
value objects with an object type property (obj_type).
Raises:
ValidationError: One or more attributes of the AnswerGroup are
invalid.
ValidationError: The AnswerGroup contains more than one classifier
rule.
"""
if not isinstance(self.rule_specs, list):
raise utils.ValidationError(
'Expected answer group rules to be a list, received %s'
% self.rule_specs)
if self.tagged_skill_misconception_id is not None:
if not isinstance(
self.tagged_skill_misconception_id,
python_utils.BASESTRING):
raise utils.ValidationError(
'Expected tagged skill misconception id to be a str, '
'received %s' % self.tagged_skill_misconception_id)
if self.tagged_skill_misconception_id.count('-') != 1:
raise utils.ValidationError(
'Expected the format of tagged skill misconception id '
'to be <skill_id>-<misconception_id>, received %s'
% self.tagged_skill_misconception_id)
if len(self.rule_specs) == 0 and len(self.training_data) == 0:
raise utils.ValidationError(
'There must be at least one rule or training data for each'
' answer group.')
for rule_spec in self.rule_specs:
if rule_spec.rule_type not in interaction.rules_dict:
raise utils.ValidationError(
'Unrecognized rule type: %s' % rule_spec.rule_type)
rule_spec.validate(
interaction.get_rule_param_list(rule_spec.rule_type),
exp_param_specs_dict)
self.outcome.validate()
class Hint(python_utils.OBJECT):
"""Value object representing a hint."""
def __init__(self, hint_content):
"""Constructs a Hint domain object.
Args:
hint_content: SubtitledHtml. The hint text and ID referring to the
other assets for this content.
"""
self.hint_content = hint_content
def to_dict(self):
"""Returns a dict representing this Hint domain object.
Returns:
dict. A dict mapping the field of Hint instance.
"""
return {
'hint_content': self.hint_content.to_dict(),
}
@classmethod
def from_dict(cls, hint_dict):
"""Return a Hint domain object from a dict.
Args:
hint_dict: dict. The dict representation of Hint object.
Returns:
Hint. The corresponding Hint domain object.
"""
return cls(SubtitledHtml.from_dict(hint_dict['hint_content']))
def validate(self):
"""Validates all properties of Hint."""
self.hint_content.validate()
class Solution(python_utils.OBJECT):
"""Value object representing a solution.
A solution consists of answer_is_exclusive, correct_answer and an
explanation.When answer_is_exclusive is True, this indicates that it is
the only correct answer; when it is False, this indicates that it is one
possible answer. correct_answer records an answer that enables the learner
to progress to the next card and explanation is an HTML string containing
an explanation for the solution.
"""
def __init__(
self, interaction_id, answer_is_exclusive,
correct_answer, explanation):
"""Constructs a Solution domain object.
Args:
interaction_id: str. The interaction id.
answer_is_exclusive: bool. True if is the only correct answer;
False if is one of possible answer.
correct_answer: str. The correct answer; this answer enables the
learner to progress to the next card.
explanation: SubtitledHtml. Contains text and text id to link audio
translations for the solution's explanation.
"""
self.answer_is_exclusive = answer_is_exclusive
self.correct_answer = (
interaction_registry.Registry.get_interaction_by_id(
interaction_id).normalize_answer(correct_answer))
self.explanation = explanation
def to_dict(self):
"""Returns a dict representing this Solution domain object.
Returns:
dict. A dict mapping all fields of Solution instance.
"""
return {
'answer_is_exclusive': self.answer_is_exclusive,
'correct_answer': self.correct_answer,
'explanation': self.explanation.to_dict(),
}
@classmethod
def from_dict(cls, interaction_id, solution_dict):
"""Return a Solution domain object from a dict.
Args:
interaction_id: str. The interaction id.
solution_dict: dict. The dict representation of Solution object.
Returns:
Solution. The corresponding Solution domain object.
"""
return cls(
interaction_id,
solution_dict['answer_is_exclusive'],
interaction_registry.Registry.get_interaction_by_id(
interaction_id).normalize_answer(
solution_dict['correct_answer']),
SubtitledHtml.from_dict(solution_dict['explanation']))
def validate(self, interaction_id):
"""Validates all properties of Solution.
Args:
interaction_id: str. The interaction id.
Raises:
ValidationError: One or more attributes of the Solution are not
valid.
"""
if not isinstance(self.answer_is_exclusive, bool):
raise utils.ValidationError(
'Expected answer_is_exclusive to be bool, received %s' %
self.answer_is_exclusive)
interaction_registry.Registry.get_interaction_by_id(
interaction_id).normalize_answer(self.correct_answer)
self.explanation.validate()
class InteractionInstance(python_utils.OBJECT):
"""Value object for an instance of an interaction."""
# The default interaction used for a new state.
_DEFAULT_INTERACTION_ID = None
def to_dict(self):
"""Returns a dict representing this InteractionInstance domain object.
Returns:
dict. A dict mapping all fields of InteractionInstance instance.
"""
return {
'id': self.id,
'customization_args': (
{} if self.id is None else
customization_args_util.get_full_customization_args(
self.customization_args,
interaction_registry.Registry.get_interaction_by_id(
self.id).customization_arg_specs)),
'answer_groups': [group.to_dict() for group in self.answer_groups],
'default_outcome': (
self.default_outcome.to_dict()
if self.default_outcome is not None
else None),
'confirmed_unclassified_answers': (
self.confirmed_unclassified_answers),
'hints': [hint.to_dict() for hint in self.hints],
'solution': self.solution.to_dict() if self.solution else None,
}
@classmethod
def from_dict(cls, interaction_dict):
"""Return a InteractionInstance domain object from a dict.
Args:
interaction_dict: dict. The dict representation of
InteractionInstance object.
Returns:
InteractionInstance. The corresponding InteractionInstance domain
object.
"""
default_outcome_dict = (
Outcome.from_dict(interaction_dict['default_outcome'])
if interaction_dict['default_outcome'] is not None else None)
solution_dict = (
Solution.from_dict(
interaction_dict['id'], interaction_dict['solution'])
if (interaction_dict['solution'] and interaction_dict['id'])
else None)
return cls(
interaction_dict['id'],
interaction_dict['customization_args'],
[AnswerGroup.from_dict(h)
for h in interaction_dict['answer_groups']],
default_outcome_dict,
interaction_dict['confirmed_unclassified_answers'],
[Hint.from_dict(h) for h in interaction_dict['hints']],
solution_dict)
def __init__(
self, interaction_id, customization_args, answer_groups,
default_outcome, confirmed_unclassified_answers, hints, solution):
"""Initializes a InteractionInstance domain object.
Args:
interaction_id: str. The interaction id.
customization_args: dict. The customization dict. The keys are
names of customization_args and the values are dicts with a
single key, 'value', whose corresponding value is the value of
the customization arg.
answer_groups: list(AnswerGroup). List of answer groups of the
interaction instance.
default_outcome: Outcome. The default outcome of the interaction
instance.
confirmed_unclassified_answers: list(AnswerGroup). List of answers
which have been confirmed to be associated with the default
outcome.
hints: list(Hint). List of hints for this interaction.
solution: Solution. A possible solution for the question asked in
this interaction.
"""
self.id = interaction_id
# Customization args for the interaction's view. Parts of these
# args may be Jinja templates that refer to state parameters.
# This is a dict: the keys are names of customization_args and the
# values are dicts with a single key, 'value', whose corresponding
# value is the value of the customization arg.
self.customization_args = customization_args
self.answer_groups = answer_groups
self.default_outcome = default_outcome
self.confirmed_unclassified_answers = confirmed_unclassified_answers
self.hints = hints
self.solution = solution
@property
def is_terminal(self):
"""Determines if this interaction type is terminal. If no ID is set for
this interaction, it is assumed to not be terminal.
Returns:
bool. Whether the interaction is terminal.
"""
return self.id and interaction_registry.Registry.get_interaction_by_id(
self.id).is_terminal
def get_all_outcomes(self):
"""Returns a list of all outcomes of this interaction, taking into
consideration every answer group and the default outcome.
Returns:
list(Outcome). List of all outcomes of this interaction.
"""
outcomes = []
for answer_group in self.answer_groups:
outcomes.append(answer_group.outcome)
if self.default_outcome is not None:
outcomes.append(self.default_outcome)
return outcomes
def validate(self, exp_param_specs_dict):
"""Validates various properties of the InteractionInstance.
Args:
exp_param_specs_dict: dict. A dict of specified parameters used in
the exploration. Keys are parameter names and values are
ParamSpec value objects with an object type property(obj_type).
Is used to validate AnswerGroup objects.
Raises:
ValidationError: One or more attributes of the InteractionInstance
are invalid.
"""
if not isinstance(self.id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected interaction id to be a string, received %s' %
self.id)
try:
interaction = interaction_registry.Registry.get_interaction_by_id(
self.id)
except KeyError:
raise utils.ValidationError('Invalid interaction id: %s' % self.id)
customization_args_util.validate_customization_args_and_values(
'interaction', self.id, self.customization_args,
interaction.customization_arg_specs)
if not isinstance(self.answer_groups, list):
raise utils.ValidationError(
'Expected answer groups to be a list, received %s.'
% self.answer_groups)
if not self.is_terminal and self.default_outcome is None:
raise utils.ValidationError(
'Non-terminal interactions must have a default outcome.')
if self.is_terminal and self.default_outcome is not None:
raise utils.ValidationError(
'Terminal interactions must not have a default outcome.')
if self.is_terminal and self.answer_groups:
raise utils.ValidationError(
'Terminal interactions must not have any answer groups.')
for answer_group in self.answer_groups:
answer_group.validate(interaction, exp_param_specs_dict)
if self.default_outcome is not None:
self.default_outcome.validate()
if not isinstance(self.hints, list):
raise utils.ValidationError(
'Expected hints to be a list, received %s'
% self.hints)
for hint in self.hints:
hint.validate()
if self.solution:
self.solution.validate(self.id)
if self.solution and not self.hints:
raise utils.ValidationError(
'Hint(s) must be specified if solution is specified')
@classmethod
def create_default_interaction(cls, default_dest_state_name):
"""Create a default InteractionInstance domain object:
- customization_args: empty dictionary;
- answer_groups: empty list;
- default_outcome: dest is set to 'default_dest_state_name' and
feedback and param_changes are initialized as empty lists;
- confirmed_unclassified_answers: empty list;
Args:
default_dest_state_name: str. The default destination state.
Returns:
InteractionInstance. The corresponding InteractionInstance domain
object with default values.
"""
default_outcome = Outcome(
default_dest_state_name,
SubtitledHtml.create_default_subtitled_html(
feconf.DEFAULT_OUTCOME_CONTENT_ID), False, {}, None, None)
return cls(
cls._DEFAULT_INTERACTION_ID, {}, [], default_outcome, [], [], {})
def get_all_html_content_strings(self):
"""Get all html content strings in the interaction.
Returns:
list(str): The list of all html content strings in the interaction.
"""
html_list = []
for answer_group in self.answer_groups:
outcome_html = answer_group.outcome.feedback.html
html_list = html_list + [outcome_html]
# Note that ItemSelectionInput replicates the customization arg HTML
# in its answer groups.
if self.id == 'ItemSelectionInput':
for answer_group in self.answer_groups:
for rule_spec in answer_group.rule_specs:
rule_spec_html = rule_spec.inputs['x']
html_list = html_list + rule_spec_html
if self.id == 'DragAndDropSortInput':
for answer_group in self.answer_groups:
for rule_spec in answer_group.rule_specs:
rule_spec_html_list = rule_spec.inputs['x']
for rule_spec_html in rule_spec_html_list:
html_list = html_list + rule_spec_html
if self.default_outcome:
default_outcome_html = self.default_outcome.feedback.html
html_list = html_list + [default_outcome_html]
for hint in self.hints:
hint_html = hint.hint_content.html
html_list = html_list + [hint_html]
if self.solution:
solution_html = self.solution.explanation.html
html_list = html_list + [solution_html]
if self.id in (
'ItemSelectionInput', 'MultipleChoiceInput',
'DragAndDropSortInput'):
customization_args_html_list = (
self.customization_args['choices']['value'])
html_list = html_list + customization_args_html_list
return html_list
class Outcome(python_utils.OBJECT):
"""Value object representing an outcome of an interaction. An outcome
consists of a destination state, feedback to show the user, and any
parameter changes.
"""
def to_dict(self):
"""Returns a dict representing this Outcome domain object.
Returns:
dict. A dict, mapping all fields of Outcome instance.
"""
return {
'dest': self.dest,
'feedback': self.feedback.to_dict(),
'labelled_as_correct': self.labelled_as_correct,
'param_changes': [
param_change.to_dict() for param_change in self.param_changes],
'refresher_exploration_id': self.refresher_exploration_id,
'missing_prerequisite_skill_id': self.missing_prerequisite_skill_id
}
@classmethod
def from_dict(cls, outcome_dict):
"""Return a Outcome domain object from a dict.
Args:
outcome_dict: dict. The dict representation of Outcome object.
Returns:
Outcome. The corresponding Outcome domain object.
"""
return cls(
outcome_dict['dest'],
SubtitledHtml.from_dict(outcome_dict['feedback']),
outcome_dict['labelled_as_correct'],
[param_domain.ParamChange(
param_change['name'], param_change['generator_id'],
param_change['customization_args'])
for param_change in outcome_dict['param_changes']],
outcome_dict['refresher_exploration_id'],
outcome_dict['missing_prerequisite_skill_id']
)
def __init__(
self, dest, feedback, labelled_as_correct, param_changes,
refresher_exploration_id, missing_prerequisite_skill_id):
"""Initializes a Outcome domain object.
Args:
dest: str. The name of the destination state.
feedback: SubtitledHtml. Feedback to give to the user if this rule
is triggered.
labelled_as_correct: bool. Whether this outcome has been labelled
by the creator as corresponding to a "correct" answer.
param_changes: list(ParamChange). List of exploration-level
parameter changes to make if this rule is triggered.
refresher_exploration_id: str or None. An optional exploration ID
to redirect the learner to if they seem to lack understanding
of a prerequisite concept. This should only exist if the
destination state for this outcome is a self-loop.
missing_prerequisite_skill_id: str or None. The id of the skill that
this answer group tests. If this is not None, the exploration
player would redirect to this skill when a learner receives this
outcome.
"""
# Id of the destination state.
# TODO(sll): Check that this state actually exists.
self.dest = dest
# Feedback to give the reader if this rule is triggered.
self.feedback = feedback
# Whether this outcome has been labelled by the creator as
# corresponding to a "correct" answer.
self.labelled_as_correct = labelled_as_correct
# Exploration-level parameter changes to make if this rule is
# triggered.
self.param_changes = param_changes or []
# An optional exploration ID to redirect the learner to if they lack
# understanding of a prerequisite concept. This should only exist if
# the destination state for this outcome is a self-loop.
self.refresher_exploration_id = refresher_exploration_id
# An optional skill id whose concept card would be shown to the learner
# when the learner receives this outcome.
self.missing_prerequisite_skill_id = missing_prerequisite_skill_id
def validate(self):
"""Validates various properties of the Outcome.
Raises:
ValidationError: One or more attributes of the Outcome are invalid.
"""
self.feedback.validate()
if not isinstance(self.labelled_as_correct, bool):
raise utils.ValidationError(
'The "labelled_as_correct" field should be a boolean, received '
'%s' % self.labelled_as_correct)
if self.missing_prerequisite_skill_id is not None:
if not isinstance(
self.missing_prerequisite_skill_id,
python_utils.BASESTRING):
raise utils.ValidationError(
'Expected outcome missing_prerequisite_skill_id to be a '
'string, received %s' % self.missing_prerequisite_skill_id)
if not isinstance(self.param_changes, list):
raise utils.ValidationError(
'Expected outcome param_changes to be a list, received %s'
% self.param_changes)
for param_change in self.param_changes:
param_change.validate()
if self.refresher_exploration_id is not None:
if not isinstance(
self.refresher_exploration_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected outcome refresher_exploration_id to be a string, '
'received %s' % self.refresher_exploration_id)
class Voiceover(python_utils.OBJECT):
"""Value object representing an voiceover."""
def to_dict(self):
"""Returns a dict representing this Voiceover domain object.
Returns:
dict. A dict, mapping all fields of Voiceover instance.
"""
return {
'filename': self.filename,
'file_size_bytes': self.file_size_bytes,
'needs_update': self.needs_update,
}
@classmethod
def from_dict(cls, voiceover_dict):
"""Return a Voiceover domain object from a dict.
Args:
voiceover_dict: dict. The dict representation of
Voiceover object.
Returns:
Voiceover. The corresponding Voiceover domain object.
"""
return cls(
voiceover_dict['filename'],
voiceover_dict['file_size_bytes'],
voiceover_dict['needs_update'])
def __init__(self, filename, file_size_bytes, needs_update):
"""Initializes a Voiceover domain object.
Args:
filename: str. The corresponding voiceover file path.
file_size_bytes: int. The file size, in bytes. Used to display
potential bandwidth usage to the learner before they download
the file.
needs_update: bool. Whether voiceover is marked for needing review.
"""
# str. The corresponding audio file path, e.g.
# "content-en-2-h7sjp8s.mp3".
self.filename = filename
# int. The file size, in bytes. Used to display potential bandwidth
# usage to the learner before they download the file.
self.file_size_bytes = file_size_bytes
# bool. Whether audio is marked for needing review.
self.needs_update = needs_update
def validate(self):
"""Validates properties of the Voiceover.
Raises:
ValidationError: One or more attributes of the Voiceover are
invalid.
"""
if not isinstance(self.filename, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected audio filename to be a string, received %s' %
self.filename)
dot_index = self.filename.rfind('.')
if dot_index == -1 or dot_index == 0:
raise utils.ValidationError(
'Invalid audio filename: %s' % self.filename)
extension = self.filename[dot_index + 1:]
if extension not in feconf.ACCEPTED_AUDIO_EXTENSIONS:
raise utils.ValidationError(
'Invalid audio filename: it should have one of '
'the following extensions: %s. Received: %s'
% (list(feconf.ACCEPTED_AUDIO_EXTENSIONS.keys()),
self.filename))
if not isinstance(self.file_size_bytes, int):
raise utils.ValidationError(
'Expected file size to be an int, received %s' %
self.file_size_bytes)
if self.file_size_bytes <= 0:
raise utils.ValidationError(
'Invalid file size: %s' % self.file_size_bytes)
if not isinstance(self.needs_update, bool):
raise utils.ValidationError(
'Expected needs_update to be a bool, received %s' %
self.needs_update)
class WrittenTranslation(python_utils.OBJECT):
"""Value object representing a written translation for a content."""
def __init__(self, html, needs_update):
"""Initializes a WrittenTranslation domain object.
Args:
html: str. A piece of user submitted HTML. This is cleaned in such
a way as to contain a restricted set of HTML tags.
needs_update: bool. Whether html is marked for needing review.
"""
self.html = html_cleaner.clean(html)
self.needs_update = needs_update
def to_dict(self):
"""Returns a dict representing this WrittenTranslation domain object.
Returns:
dict. A dict, mapping all fields of WrittenTranslation instance.
"""
return {
'html': self.html,
'needs_update': self.needs_update,
}
@classmethod
def from_dict(cls, written_translation_dict):
"""Return a WrittenTranslation domain object from a dict.
Args:
written_translation_dict: dict. The dict representation of
WrittenTranslation object.
Returns:
WrittenTranslation. The corresponding WrittenTranslation domain
object.
"""
return cls(
written_translation_dict['html'],
written_translation_dict['needs_update'])
def validate(self):
"""Validates properties of the WrittenTranslation.
Raises:
ValidationError: One or more attributes of the WrittenTranslation
are invalid.
"""
if not isinstance(self.html, python_utils.BASESTRING):
raise utils.ValidationError(
'Invalid content HTML: %s' % self.html)
if not isinstance(self.needs_update, bool):
raise utils.ValidationError(
'Expected needs_update to be a bool, received %s' %
self.needs_update)
class WrittenTranslations(python_utils.OBJECT):
"""Value object representing a content translations which stores
translated contents of all state contents (like hints, feedback etc.) in
different languages linked through their content_id.
"""
def __init__(self, translations_mapping):
"""Initializes a WrittenTranslations domain object."""
self.translations_mapping = translations_mapping
def to_dict(self):
"""Returns a dict representing this WrittenTranslations domain object.
Returns:
dict. A dict, mapping all fields of WrittenTranslations instance.
"""
translations_mapping = {}
for (content_id, language_code_to_written_translation) in (
self.translations_mapping.items()):
translations_mapping[content_id] = {}
for (language_code, written_translation) in (
language_code_to_written_translation.items()):
translations_mapping[content_id][language_code] = (
written_translation.to_dict())
written_translations_dict = {
'translations_mapping': translations_mapping
}
return written_translations_dict
@classmethod
def from_dict(cls, written_translations_dict):
"""Return a WrittenTranslations domain object from a dict.
Args:
written_translations_dict: dict. The dict representation of
WrittenTranslations object.
Returns:
WrittenTranslations. The corresponding WrittenTranslations domain
object.
"""
translations_mapping = {}
for (content_id, language_code_to_written_translation) in (
written_translations_dict['translations_mapping'].items()):
translations_mapping[content_id] = {}
for (language_code, written_translation) in (
language_code_to_written_translation.items()):
translations_mapping[content_id][language_code] = (
WrittenTranslation.from_dict(written_translation))
return cls(translations_mapping)
def get_content_ids_that_are_correctly_translated(self, language_code):
"""Returns a list of content ids in which a correct translation is
available in the given language.
Args:
language_code: str. The abbreviated code of the language.
Return:
list(str). A list of content ids in which the translations are
available in the given language.
"""
correctly_translated_content_ids = []
for content_id, translations in self.translations_mapping.items():
if language_code in translations and not (
translations[language_code].needs_update):
correctly_translated_content_ids.append(content_id)
return correctly_translated_content_ids
def add_translation(self, content_id, language_code, html):
"""Adds a translation for the given content id in a given language.
Args:
content_id: str. The id of the content.
language_code: str. The language code of the translated html.
html: str. The translated html.
"""
written_translation = WrittenTranslation(html, False)
self.translations_mapping[content_id][language_code] = (
written_translation)
def validate(self, expected_content_id_list):
"""Validates properties of the WrittenTranslations.
Args:
expected_content_id_list: A list of content id which are expected to
be inside they WrittenTranslations.
Raises:
ValidationError: One or more attributes of the WrittenTranslations
are invalid.
"""
if expected_content_id_list is not None:
if not set(self.translations_mapping.keys()) == (
set(expected_content_id_list)):
raise utils.ValidationError(
'Expected state written_translations to match the listed '
'content ids %s, found %s' % (
expected_content_id_list,
list(self.translations_mapping.keys()))
)
for (content_id, language_code_to_written_translation) in (
self.translations_mapping.items()):
if not isinstance(content_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected content_id to be a string, received %s'
% content_id)
if not isinstance(language_code_to_written_translation, dict):
raise utils.ValidationError(
'Expected content_id value to be a dict, received %s'
% language_code_to_written_translation)
for (language_code, written_translation) in (
language_code_to_written_translation.items()):
if not isinstance(language_code, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected language_code to be a string, received %s'
% language_code)
# Currently, we assume written translations are used by the
# voice-artist to voiceover the translated text so written
# translations can be in supported audio/voiceover languages.
allowed_language_codes = [language['id'] for language in (
constants.SUPPORTED_AUDIO_LANGUAGES)]
if language_code not in allowed_language_codes:
raise utils.ValidationError(
'Invalid language_code: %s' % language_code)
written_translation.validate()
def get_content_ids_for_text_translation(self):
"""Returns a list of content_id available for text translation.
Returns:
list(str). A list of content id available for text translation.
"""
return list(self.translations_mapping.keys())
def get_translated_content(self, content_id, language_code):
"""Returns the translated content for the given content_id in the given
language.
Args:
content_id: str. The ID of the content.
language_code: str. The language code for the translated content.
Returns:
str. The translated content for a given content id in a language.
Raises:
Exception: Translation doesn't exist in the given language.
Exception: The given content id doesn't exist.
"""
if content_id in self.translations_mapping:
if language_code in self.translations_mapping[content_id]:
return self.translations_mapping[content_id][language_code].html
else:
raise Exception(
'Translation for the given content_id %s does not exist in '
'%s language code' % (content_id, language_code))
else:
raise Exception('Invalid content_id: %s' % content_id)
def add_content_id_for_translation(self, content_id):
"""Adds a content id as a key for the translation into the
content_translation dict.
Args:
content_id: str. The id representing a subtitled html.
Raises:
Exception: The content id isn't a string.
"""
if not isinstance(content_id, python_utils.BASESTRING):
raise Exception(
'Expected content_id to be a string, received %s' % content_id)
if content_id in self.translations_mapping:
raise Exception(
'The content_id %s already exist.' % content_id)
else:
self.translations_mapping[content_id] = {}
def delete_content_id_for_translation(self, content_id):
"""Deletes a content id from the content_translation dict.
Args:
content_id: str. The id representing a subtitled html.
Raises:
Exception: The content id isn't a string.
"""
if not isinstance(content_id, python_utils.BASESTRING):
raise Exception(
'Expected content_id to be a string, received %s' % content_id)
if content_id not in self.translations_mapping:
raise Exception(
'The content_id %s does not exist.' % content_id)
else:
self.translations_mapping.pop(content_id, None)
def get_translation_counts(self):
"""Return a dict representing the number of translation available in a
languages in which there exist at least one translation in the
WrittenTranslation object.
Returns:
dict(str, int). A dict with language code as a key and number of
translation available in that language as the value.
"""
translation_counts = collections.defaultdict(int)
for translations in self.translations_mapping.values():
for language, translation in translations.items():
if not translation.needs_update:
translation_counts[language] += 1
return translation_counts
class RecordedVoiceovers(python_utils.OBJECT):
"""Value object representing a recorded voiceovers which stores voiceover of
all state contents (like hints, feedback etc.) in different languages linked
through their content_id.
"""
def __init__(self, voiceovers_mapping):
"""Initializes a RecordedVoiceovers domain object."""
self.voiceovers_mapping = voiceovers_mapping
def to_dict(self):
"""Returns a dict representing this RecordedVoiceovers domain object.
Returns:
dict. A dict, mapping all fields of RecordedVoiceovers instance.
"""
voiceovers_mapping = {}
for (content_id, language_code_to_voiceover) in (
self.voiceovers_mapping.items()):
voiceovers_mapping[content_id] = {}
for (language_code, voiceover) in (
language_code_to_voiceover.items()):
voiceovers_mapping[content_id][language_code] = (
voiceover.to_dict())
recorded_voiceovers_dict = {
'voiceovers_mapping': voiceovers_mapping
}
return recorded_voiceovers_dict
@classmethod
def from_dict(cls, recorded_voiceovers_dict):
"""Return a RecordedVoiceovers domain object from a dict.
Args:
recorded_voiceovers_dict: dict. The dict representation of
RecordedVoiceovers object.
Returns:
RecordedVoiceovers. The corresponding RecordedVoiceovers domain
object.
"""
voiceovers_mapping = {}
for (content_id, language_code_to_voiceover) in (
recorded_voiceovers_dict['voiceovers_mapping'].items()):
voiceovers_mapping[content_id] = {}
for (language_code, voiceover) in (
language_code_to_voiceover.items()):
voiceovers_mapping[content_id][language_code] = (
Voiceover.from_dict(voiceover))
return cls(voiceovers_mapping)
def validate(self, expected_content_id_list):
"""Validates properties of the RecordedVoiceovers.
Args:
expected_content_id_list: A list of content id which are expected to
be inside they RecordedVoiceovers.
Raises:
ValidationError: One or more attributes of the RecordedVoiceovers
are invalid.
"""
if expected_content_id_list is not None:
if not set(self.voiceovers_mapping.keys()) == (
set(expected_content_id_list)):
raise utils.ValidationError(
'Expected state recorded_voiceovers to match the listed '
'content ids %s, found %s' % (
expected_content_id_list,
list(self.voiceovers_mapping.keys()))
)
for (content_id, language_code_to_voiceover) in (
self.voiceovers_mapping.items()):
if not isinstance(content_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected content_id to be a string, received %s'
% content_id)
if not isinstance(language_code_to_voiceover, dict):
raise utils.ValidationError(
'Expected content_id value to be a dict, received %s'
% language_code_to_voiceover)
for (language_code, voiceover) in (
language_code_to_voiceover.items()):
if not isinstance(language_code, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected language_code to be a string, received %s'
% language_code)
allowed_language_codes = [language['id'] for language in (
constants.SUPPORTED_AUDIO_LANGUAGES)]
if language_code not in allowed_language_codes:
raise utils.ValidationError(
'Invalid language_code: %s' % language_code)
voiceover.validate()
def get_content_ids_for_voiceovers(self):
"""Returns a list of content_id available for voiceover.
Returns:
list(str). A list of content id available for voiceover.
"""
return list(self.voiceovers_mapping.keys())
def strip_all_existing_voiceovers(self):
"""Strips all existing voiceovers from the voiceovers_mapping."""
for content_id in self.voiceovers_mapping.keys():
self.voiceovers_mapping[content_id] = {}
def add_content_id_for_voiceover(self, content_id):
"""Adds a content id as a key for the voiceover into the
voiceovers_mapping dict.
Args:
content_id: str. The id representing a subtitled html.
Raises:
Exception: The content id isn't a string.
Exception: The content id already exist in the voiceovers_mapping
dict.
"""
if not isinstance(content_id, python_utils.BASESTRING):
raise Exception(
'Expected content_id to be a string, received %s' % content_id)
if content_id in self.voiceovers_mapping:
raise Exception(
'The content_id %s already exist.' % content_id)
self.voiceovers_mapping[content_id] = {}
def delete_content_id_for_voiceover(self, content_id):
"""Deletes a content id from the voiceovers_mapping dict.
Args:
content_id: str. The id representing a subtitled html.
Raises:
Exception: The content id isn't a string.
Exception: The content id does not exist in the voiceovers_mapping
dict.
"""
if not isinstance(content_id, python_utils.BASESTRING):
raise Exception(
'Expected content_id to be a string, received %s' % content_id)
if content_id not in self.voiceovers_mapping:
raise Exception(
'The content_id %s does not exist.' % content_id)
else:
self.voiceovers_mapping.pop(content_id, None)
class RuleSpec(python_utils.OBJECT):
"""Value object representing a rule specification."""
def to_dict(self):
"""Returns a dict representing this RuleSpec domain object.
Returns:
dict. A dict, mapping all fields of RuleSpec instance.
"""
return {
'rule_type': self.rule_type,
'inputs': self.inputs,
}
@classmethod
def from_dict(cls, rulespec_dict):
"""Return a RuleSpec domain object from a dict.
Args:
rulespec_dict: dict. The dict representation of RuleSpec object.
Returns:
RuleSpec. The corresponding RuleSpec domain object.
"""
return cls(
rulespec_dict['rule_type'],
rulespec_dict['inputs']
)
def __init__(self, rule_type, inputs):
"""Initializes a RuleSpec domain object.
Args:
rule_type: str. The rule type, e.g. "CodeContains" or "Equals". A
full list of rule types can be found in
extensions/interactions/rule_templates.json.
inputs: dict. The values of the parameters needed in order to fully
specify the rule. The keys for this dict can be deduced from
the relevant description field in
extensions/interactions/rule_templates.json -- they are
enclosed in {{...}} braces.
"""
self.rule_type = rule_type
self.inputs = inputs
def validate(self, rule_params_list, exp_param_specs_dict):
"""Validates a RuleSpec value object. It ensures the inputs dict does
not refer to any non-existent parameters and that it contains values
for all the parameters the rule expects.
Args:
rule_params_list: A list of parameters used by the rule represented
by this RuleSpec instance, to be used to validate the inputs of
this RuleSpec. Each element of the list represents a single
parameter and is a tuple with two elements:
0: The name (string) of the parameter.
1: The typed object instance for that
parameter (e.g. Real).
exp_param_specs_dict: A dict of specified parameters used in this
exploration. Keys are parameter names and values are ParamSpec
value objects with an object type property (obj_type). RuleSpec
inputs may have a parameter value which refers to one of these
exploration parameters.
Raises:
ValidationError: One or more attributes of the RuleSpec are
invalid.
"""
if not isinstance(self.inputs, dict):
raise utils.ValidationError(
'Expected inputs to be a dict, received %s' % self.inputs)
input_key_set = set(self.inputs.keys())
param_names_set = set([rp[0] for rp in rule_params_list])
leftover_input_keys = input_key_set - param_names_set
leftover_param_names = param_names_set - input_key_set
# Check if there are input keys which are not rule parameters.
if leftover_input_keys:
logging.warning(
'RuleSpec \'%s\' has inputs which are not recognized '
'parameter names: %s' % (self.rule_type, leftover_input_keys))
# Check if there are missing parameters.
if leftover_param_names:
raise utils.ValidationError(
'RuleSpec \'%s\' is missing inputs: %s'
% (self.rule_type, leftover_param_names))
rule_params_dict = {rp[0]: rp[1] for rp in rule_params_list}
for (param_name, param_value) in self.inputs.items():
param_obj = rule_params_dict[param_name]
# Validate the parameter type given the value.
if isinstance(
param_value,
python_utils.BASESTRING) and '{{' in param_value:
# Value refers to a parameter spec. Cross-validate the type of
# the parameter spec with the rule parameter.
start_brace_index = param_value.index('{{') + 2
end_brace_index = param_value.index('}}')
param_spec_name = param_value[
start_brace_index:end_brace_index]
if param_spec_name not in exp_param_specs_dict:
raise utils.ValidationError(
'RuleSpec \'%s\' has an input with name \'%s\' which '
'refers to an unknown parameter within the '
'exploration: %s' % (
self.rule_type, param_name, param_spec_name))
# TODO(bhenning): The obj_type of the param_spec
# (exp_param_specs_dict[param_spec_name]) should be validated
# to be the same as param_obj.__name__ to ensure the rule spec
# can accept the type of the parameter.
else:
# Otherwise, a simple parameter value needs to be normalizable
# by the parameter object in order to be valid.
param_obj.normalize(param_value)
class SubtitledHtml(python_utils.OBJECT):
"""Value object representing subtitled HTML."""
def __init__(self, content_id, html):
"""Initializes a SubtitledHtml domain object.
Args:
content_id: str. A unique id referring to the other assets for this
content.
html: str. A piece of user submitted HTML. This is cleaned in such
a way as to contain a restricted set of HTML tags.
"""
self.content_id = content_id
self.html = html_cleaner.clean(html)
self.validate()
def to_dict(self):
"""Returns a dict representing this SubtitledHtml domain object.
Returns:
dict. A dict, mapping all fields of SubtitledHtml instance.
"""
return {
'content_id': self.content_id,
'html': self.html
}
@classmethod
def from_dict(cls, subtitled_html_dict):
"""Return a SubtitledHtml domain object from a dict.
Args:
subtitled_html_dict: dict. The dict representation of SubtitledHtml
object.
Returns:
SubtitledHtml. The corresponding SubtitledHtml domain object.
"""
return cls(
subtitled_html_dict['content_id'], subtitled_html_dict['html'])
def validate(self):
"""Validates properties of the SubtitledHtml.
Raises:
ValidationError: One or more attributes of the SubtitledHtml are
invalid.
"""
if not isinstance(self.content_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected content id to be a string, received %s' %
self.content_id)
if not isinstance(self.html, python_utils.BASESTRING):
raise utils.ValidationError(
'Invalid content HTML: %s' % self.html)
@classmethod
def create_default_subtitled_html(cls, content_id):
"""Create a default SubtitledHtml domain object."""
return cls(content_id, '')
class State(python_utils.OBJECT):
"""Domain object for a state."""
def __init__(
self, content, param_changes, interaction, recorded_voiceovers,
written_translations, solicit_answer_details,
classifier_model_id=None):
"""Initializes a State domain object.
Args:
content: SubtitledHtml. The contents displayed to the reader in this
state.
param_changes: list(ParamChange). Parameter changes associated with
this state.
interaction: InteractionInstance. The interaction instance
associated with this state.
recorded_voiceovers: RecordedVoiceovers. The recorded voiceovers for
the state contents and translations.
written_translations: WrittenTranslations. The written translations
for the state contents.
solicit_answer_details: bool. Whether the creator wants to ask
for answer details from the learner about why they picked a
particular answer while playing the exploration.
classifier_model_id: str or None. The classifier model ID
associated with this state, if applicable.
"""
# The content displayed to the reader in this state.
self.content = content
# Parameter changes associated with this state.
self.param_changes = [param_domain.ParamChange(
param_change.name, param_change.generator.id,
param_change.customization_args
) for param_change in param_changes]
# The interaction instance associated with this state.
self.interaction = InteractionInstance(
interaction.id, interaction.customization_args,
interaction.answer_groups, interaction.default_outcome,
interaction.confirmed_unclassified_answers,
interaction.hints, interaction.solution)
self.classifier_model_id = classifier_model_id
self.recorded_voiceovers = recorded_voiceovers
self.written_translations = written_translations
self.solicit_answer_details = solicit_answer_details
def validate(self, exp_param_specs_dict, allow_null_interaction):
"""Validates various properties of the State.
Args:
exp_param_specs_dict: dict or None. A dict of specified parameters
used in this exploration. Keys are parameter names and values
are ParamSpec value objects with an object type
property(obj_type). It is None if the state belongs to a
question.
allow_null_interaction: bool. Whether this state's interaction is
allowed to be unspecified.
Raises:
ValidationError: One or more attributes of the State are invalid.
"""
self.content.validate()
if not isinstance(self.param_changes, list):
raise utils.ValidationError(
'Expected state param_changes to be a list, received %s'
% self.param_changes)
for param_change in self.param_changes:
param_change.validate()
if not allow_null_interaction and self.interaction.id is None:
raise utils.ValidationError(
'This state does not have any interaction specified.')
elif self.interaction.id is not None:
self.interaction.validate(exp_param_specs_dict)
content_id_list = []
content_id_list.append(self.content.content_id)
for answer_group in self.interaction.answer_groups:
feedback_content_id = answer_group.outcome.feedback.content_id
if feedback_content_id in content_id_list:
raise utils.ValidationError(
'Found a duplicate content id %s' % feedback_content_id)
content_id_list.append(feedback_content_id)
if self.interaction.default_outcome:
default_outcome_content_id = (
self.interaction.default_outcome.feedback.content_id)
if default_outcome_content_id in content_id_list:
raise utils.ValidationError(
'Found a duplicate content id %s'
% default_outcome_content_id)
content_id_list.append(default_outcome_content_id)
for hint in self.interaction.hints:
hint_content_id = hint.hint_content.content_id
if hint_content_id in content_id_list:
raise utils.ValidationError(
'Found a duplicate content id %s' % hint_content_id)
content_id_list.append(hint_content_id)
if self.interaction.solution:
solution_content_id = (
self.interaction.solution.explanation.content_id)
if solution_content_id in content_id_list:
raise utils.ValidationError(
'Found a duplicate content id %s' % solution_content_id)
content_id_list.append(solution_content_id)
if not isinstance(self.solicit_answer_details, bool):
raise utils.ValidationError(
'Expected solicit_answer_details to be a boolean, '
'received %s' % self.solicit_answer_details)
if self.solicit_answer_details:
if self.interaction.id in (
constants.INTERACTION_IDS_WITHOUT_ANSWER_DETAILS):
raise utils.ValidationError(
'The %s interaction does not support soliciting '
'answer details from learners.' % (self.interaction.id))
self.written_translations.validate(content_id_list)
self.recorded_voiceovers.validate(content_id_list)
def get_content_html(self, content_id):
"""Returns the content belongs to a given content id of the object.
Args:
content_id: The id of the content.
Returns:
str. The html content corresponding to the given content id.
Raises:
ValueError: The given content_id does not exist.
"""
content_id_to_html = self._get_all_translatable_content()
if content_id not in content_id_to_html:
raise ValueError('Content ID %s does not exist' % content_id)
return content_id_to_html[content_id]
def get_training_data(self):
"""Retrieves training data from the State domain object."""
state_training_data_by_answer_group = []
for (answer_group_index, answer_group) in enumerate(
self.interaction.answer_groups):
if answer_group.training_data:
answers = copy.deepcopy(answer_group.training_data)
state_training_data_by_answer_group.append({
'answer_group_index': answer_group_index,
'answers': answers
})
return state_training_data_by_answer_group
def can_undergo_classification(self):
"""Checks whether the answers for this state satisfy the preconditions
for a ML model to be trained.
Returns:
bool: True, if the conditions are satisfied.
"""
training_examples_count = 0
labels_count = 0
training_examples_count += len(
self.interaction.confirmed_unclassified_answers)
for answer_group in self.interaction.answer_groups:
training_examples_count += len(answer_group.training_data)
labels_count += 1
if ((training_examples_count >= feconf.MIN_TOTAL_TRAINING_EXAMPLES) and
(labels_count >= feconf.MIN_ASSIGNED_LABELS)):
return True
return False
@classmethod
def convert_state_dict_to_yaml(cls, state_dict, width):
"""Converts the given state dict to yaml format.
Args:
state_dict: dict. A dict representing a state in an exploration.
width: int. The maximum number of characters in a line for the
returned YAML string.
Returns:
str. The YAML version of the state_dict.
Raises:
Exception: The state_dict does not represent a valid state.
"""
try:
# Check if the state_dict can be converted to a State.
state = cls.from_dict(state_dict)
except Exception:
logging.info(
'Bad state dict: %s' % python_utils.UNICODE(state_dict))
raise Exception('Could not convert state dict to YAML.')
return python_utils.yaml_from_dict(state.to_dict(), width=width)
def get_translation_counts(self):
"""Return a dict representing the number of translations available in a
languages in which there exists at least one translation in the state
object.
Returns:
dict(str, int). A dict with language code as a key and number of
translations available in that language as the value.
"""
return self.written_translations.get_translation_counts()
def get_content_count(self):
"""Returns the number of distinct content fields available in the
object.
Returns:
int. The number of distinct content fields available in the state.
"""
return len(self.written_translations.translations_mapping)
def _update_content_ids_in_assets(self, old_ids_list, new_ids_list):
"""Adds or deletes content ids in assets i.e, other parts of state
object such as recorded_voiceovers and written_translations.
Args:
old_ids_list: list(str). A list of content ids present earlier
within the substructure (like answer groups, hints etc.) of
state.
new_ids_list: list(str). A list of content ids currently present
within the substructure (like answer groups, hints etc.) of
state.
"""
content_ids_to_delete = set(old_ids_list) - set(new_ids_list)
content_ids_to_add = set(new_ids_list) - set(old_ids_list)
content_ids_for_text_translations = (
self.written_translations.get_content_ids_for_text_translation())
content_ids_for_voiceovers = (
self.recorded_voiceovers.get_content_ids_for_voiceovers())
for content_id in content_ids_to_delete:
if not content_id in content_ids_for_voiceovers:
raise Exception(
'The content_id %s does not exist in recorded_voiceovers.'
% content_id)
elif not content_id in content_ids_for_text_translations:
raise Exception(
'The content_id %s does not exist in written_translations.'
% content_id)
else:
self.recorded_voiceovers.delete_content_id_for_voiceover(
content_id)
self.written_translations.delete_content_id_for_translation(
content_id)
for content_id in content_ids_to_add:
if content_id in content_ids_for_voiceovers:
raise Exception(
'The content_id %s already exists in recorded_voiceovers'
% content_id)
elif content_id in content_ids_for_text_translations:
raise Exception(
'The content_id %s already exists in written_translations.'
% content_id)
else:
self.recorded_voiceovers.add_content_id_for_voiceover(
content_id)
self.written_translations.add_content_id_for_translation(
content_id)
def add_translation(self, content_id, language_code, translation_html):
"""Adds translation to a given content id in a specific language.
Args:
content_id: str. The id of the content.
language_code: str. The language code.
translation_html: str. The translated html content.
"""
translation_html = html_cleaner.clean(translation_html)
self.written_translations.add_translation(
content_id, language_code, translation_html)
def update_content(self, content):
"""Update the content of this state.
Args:
content: SubtitledHtml. Representation of updated content.
"""
# TODO(sll): Must sanitize all content in RTE component attrs.
self.content = content
def update_param_changes(self, param_changes):
"""Update the param_changes dict attribute.
Args:
param_changes: list(ParamChange). List of param_change domain
objects that represents ParamChange domain object.
"""
self.param_changes = param_changes
def update_interaction_id(self, interaction_id):
"""Update the interaction id attribute.
Args:
interaction_id: str. The new interaction id to set.
"""
self.interaction.id = interaction_id
# TODO(sll): This should also clear interaction.answer_groups (except
# for the default rule). This is somewhat mitigated because the client
# updates interaction_answer_groups directly after this, but we should
# fix it.
def update_interaction_customization_args(self, customization_args):
"""Update the customization_args of InteractionInstance domain object.
Args:
customization_args: dict. The new customization_args to set.
"""
self.interaction.customization_args = customization_args
def update_interaction_answer_groups(self, answer_groups_list):
"""Update the list of AnswerGroup in IteractioInstancen domain object.
Args:
answer_groups_list: list(dict). List of dicts that represent
AnswerGroup domain object.
"""
if not isinstance(answer_groups_list, list):
raise Exception(
'Expected interaction_answer_groups to be a list, received %s'
% answer_groups_list)
interaction_answer_groups = []
old_content_id_list = [
answer_group.outcome.feedback.content_id for answer_group in (
self.interaction.answer_groups)]
# TODO(yanamal): Do additional calculations here to get the
# parameter changes, if necessary.
for answer_group_dict in answer_groups_list:
rule_specs_list = answer_group_dict['rule_specs']
if not isinstance(rule_specs_list, list):
raise Exception(
'Expected answer group rule specs to be a list, '
'received %s' % rule_specs_list)
answer_group = AnswerGroup(
Outcome.from_dict(answer_group_dict['outcome']), [],
answer_group_dict['training_data'],
answer_group_dict['tagged_skill_misconception_id'])
for rule_dict in rule_specs_list:
rule_spec = RuleSpec.from_dict(rule_dict)
# Normalize and store the rule params.
rule_inputs = rule_spec.inputs
if not isinstance(rule_inputs, dict):
raise Exception(
'Expected rule_inputs to be a dict, received %s'
% rule_inputs)
for param_name, value in rule_inputs.items():
param_type = (
interaction_registry.Registry.get_interaction_by_id(
self.interaction.id
).get_rule_param_type(rule_spec.rule_type, param_name))
if (isinstance(value, python_utils.BASESTRING) and
'{{' in value and '}}' in value):
# TODO(jacobdavis11): Create checks that all parameters
# referred to exist and have the correct types.
normalized_param = value
else:
try:
normalized_param = param_type.normalize(value)
except Exception:
raise Exception(
'%s has the wrong type. It should be a %s.' %
(value, param_type.__name__))
rule_inputs[param_name] = normalized_param
answer_group.rule_specs.append(rule_spec)
interaction_answer_groups.append(answer_group)
self.interaction.answer_groups = interaction_answer_groups
new_content_id_list = [
answer_group.outcome.feedback.content_id for answer_group in (
self.interaction.answer_groups)]
self._update_content_ids_in_assets(
old_content_id_list, new_content_id_list)
def update_interaction_default_outcome(self, default_outcome_dict):
"""Update the default_outcome of InteractionInstance domain object.
Args:
default_outcome_dict: dict. Dict that represents Outcome domain
object.
"""
old_content_id_list = []
new_content_id_list = []
if self.interaction.default_outcome:
old_content_id_list.append(
self.interaction.default_outcome.feedback.content_id)
if default_outcome_dict:
if not isinstance(default_outcome_dict, dict):
raise Exception(
'Expected default_outcome_dict to be a dict, received %s'
% default_outcome_dict)
self.interaction.default_outcome = Outcome.from_dict(
default_outcome_dict)
new_content_id_list.append(
self.interaction.default_outcome.feedback.content_id)
else:
self.interaction.default_outcome = None
self._update_content_ids_in_assets(
old_content_id_list, new_content_id_list)
def update_interaction_confirmed_unclassified_answers(
self, confirmed_unclassified_answers):
"""Update the confirmed_unclassified_answers of IteractionInstance
domain object.
Args:
confirmed_unclassified_answers: list(AnswerGroup). The new list of
answers which have been confirmed to be associated with the
default outcome.
Raises:
Exception: 'confirmed_unclassified_answers' is not a list.
"""
if not isinstance(confirmed_unclassified_answers, list):
raise Exception(
'Expected confirmed_unclassified_answers to be a list,'
' received %s' % confirmed_unclassified_answers)
self.interaction.confirmed_unclassified_answers = (
confirmed_unclassified_answers)
def update_interaction_hints(self, hints_list):
"""Update the list of hints.
Args:
hints_list: list(dict). A list of dict; each dict represents a Hint
object.
Raises:
Exception: 'hints_list' is not a list.
"""
if not isinstance(hints_list, list):
raise Exception(
'Expected hints_list to be a list, received %s'
% hints_list)
old_content_id_list = [
hint.hint_content.content_id for hint in self.interaction.hints]
self.interaction.hints = [
Hint.from_dict(hint_dict)
for hint_dict in hints_list]
new_content_id_list = [
hint.hint_content.content_id for hint in self.interaction.hints]
self._update_content_ids_in_assets(
old_content_id_list, new_content_id_list)
def update_interaction_solution(self, solution_dict):
"""Update the solution of interaction.
Args:
solution_dict: dict or None. The dict representation of
Solution object.
Raises:
Exception: 'solution_dict' is not a dict.
"""
old_content_id_list = []
new_content_id_list = []
if self.interaction.solution:
old_content_id_list.append(
self.interaction.solution.explanation.content_id)
if solution_dict is not None:
if not isinstance(solution_dict, dict):
raise Exception(
'Expected solution to be a dict, received %s'
% solution_dict)
self.interaction.solution = Solution.from_dict(
self.interaction.id, solution_dict)
new_content_id_list.append(
self.interaction.solution.explanation.content_id)
else:
self.interaction.solution = None
self._update_content_ids_in_assets(
old_content_id_list, new_content_id_list)
def update_recorded_voiceovers(self, recorded_voiceovers):
"""Update the recorded_voiceovers of a state.
Args:
recorded_voiceovers: RecordedVoiceovers. The new RecordedVoiceovers
object for the state.
"""
self.recorded_voiceovers = recorded_voiceovers
def update_written_translations(self, written_translations):
"""Update the written_translations of a state.
Args:
written_translations: WrittenTranslations. The new
WrittenTranslations object for the state.
"""
self.written_translations = written_translations
def update_solicit_answer_details(self, solicit_answer_details):
"""Update the solicit_answer_details of a state.
Args:
solicit_answer_details: bool. The new value of
solicit_answer_details for the state.
"""
if not isinstance(solicit_answer_details, bool):
raise Exception(
'Expected solicit_answer_details to be a boolean, received %s'
% solicit_answer_details)
self.solicit_answer_details = solicit_answer_details
def _get_all_translatable_content(self):
"""Returns all content which can be translated into different languages.
Returns:
dict(str, str). Returns a dict with key as content id and content
html as the value.
"""
content_id_to_html = {}
content_id_to_html[self.content.content_id] = self.content.html
# TODO(#6178): Remove empty html checks once we add a validation
# check that ensures each content in state should be non-empty html.
default_outcome = self.interaction.default_outcome
if default_outcome is not None and default_outcome.feedback.html != '':
content_id_to_html[default_outcome.feedback.content_id] = (
default_outcome.feedback.html)
for answer_group in self.interaction.answer_groups:
if answer_group.outcome.feedback.html != '':
content_id_to_html[answer_group.outcome.feedback.content_id] = (
answer_group.outcome.feedback.html)
for hint in self.interaction.hints:
if hint.hint_content.html != '':
content_id_to_html[hint.hint_content.content_id] = (
hint.hint_content.html)
solution = self.interaction.solution
if solution is not None and solution.explanation.html != '':
content_id_to_html[solution.explanation.content_id] = (
solution.explanation.html)
return content_id_to_html
def get_content_id_mapping_needing_translations(self, language_code):
"""Returns all text html which can be translated in the given language.
Args:
language_code: str. The abbreviated code of the language.
Returns:
dict(str, str). A dict with key as content id and value as the
content html.
"""
content_id_to_html = self._get_all_translatable_content()
available_translation_content_ids = (
self.written_translations
.get_content_ids_that_are_correctly_translated(language_code))
for content_id in available_translation_content_ids:
del content_id_to_html[content_id]
# TODO(#7571): Add functionality to return the list of
# translations which needs update.
return content_id_to_html
def to_dict(self):
"""Returns a dict representing this State domain object.
Returns:
dict. A dict mapping all fields of State instance.
"""
return {
'content': self.content.to_dict(),
'param_changes': [param_change.to_dict()
for param_change in self.param_changes],
'interaction': self.interaction.to_dict(),
'classifier_model_id': self.classifier_model_id,
'recorded_voiceovers': self.recorded_voiceovers.to_dict(),
'written_translations': self.written_translations.to_dict(),
'solicit_answer_details': self.solicit_answer_details
}
@classmethod
def from_dict(cls, state_dict):
"""Return a State domain object from a dict.
Args:
state_dict: dict. The dict representation of State object.
Returns:
State. The corresponding State domain object.
"""
return cls(
SubtitledHtml.from_dict(state_dict['content']),
[param_domain.ParamChange.from_dict(param)
for param in state_dict['param_changes']],
InteractionInstance.from_dict(state_dict['interaction']),
RecordedVoiceovers.from_dict(state_dict['recorded_voiceovers']),
WrittenTranslations.from_dict(state_dict['written_translations']),
state_dict['solicit_answer_details'],
state_dict['classifier_model_id'])
@classmethod
def create_default_state(
cls, default_dest_state_name, is_initial_state=False):
"""Return a State domain object with default value.
Args:
default_dest_state_name: str. The default destination state.
is_initial_state: bool. Whether this state represents the initial
state of an exploration.
Returns:
State. The corresponding State domain object.
"""
content_html = (
feconf.DEFAULT_INIT_STATE_CONTENT_STR if is_initial_state else '')
content_id = feconf.DEFAULT_NEW_STATE_CONTENT_ID
return cls(
SubtitledHtml(content_id, content_html),
[],
InteractionInstance.create_default_interaction(
default_dest_state_name),
RecordedVoiceovers.from_dict(copy.deepcopy(
feconf.DEFAULT_RECORDED_VOICEOVERS)),
WrittenTranslations.from_dict(
copy.deepcopy(feconf.DEFAULT_WRITTEN_TRANSLATIONS)),
False)
@classmethod
def convert_html_fields_in_state(cls, state_dict, conversion_fn):
"""Applies a conversion function on all the html strings in a state
to migrate them to a desired state.
Args:
state_dict: dict. The dict representation of State object.
conversion_fn: function. The conversion function to be applied on
the states_dict.
Returns:
dict. The converted state_dict.
"""
state_dict['content']['html'] = (
conversion_fn(state_dict['content']['html']))
if state_dict['interaction']['default_outcome']:
interaction_feedback_html = state_dict[
'interaction']['default_outcome']['feedback']['html']
state_dict['interaction']['default_outcome']['feedback'][
'html'] = conversion_fn(interaction_feedback_html)
for answer_group_index, answer_group in enumerate(
state_dict['interaction']['answer_groups']):
answer_group_html = answer_group['outcome']['feedback']['html']
state_dict['interaction']['answer_groups'][
answer_group_index]['outcome']['feedback']['html'] = (
conversion_fn(answer_group_html))
if state_dict['interaction']['id'] == 'ItemSelectionInput':
for rule_spec_index, rule_spec in enumerate(
answer_group['rule_specs']):
for x_index, x in enumerate(rule_spec['inputs']['x']):
state_dict['interaction']['answer_groups'][
answer_group_index]['rule_specs'][
rule_spec_index]['inputs']['x'][x_index] = (
conversion_fn(x))
for hint_index, hint in enumerate(
state_dict['interaction']['hints']):
hint_html = hint['hint_content']['html']
state_dict['interaction']['hints'][hint_index][
'hint_content']['html'] = conversion_fn(hint_html)
if state_dict['interaction']['solution']:
solution_html = state_dict[
'interaction']['solution']['explanation']['html']
state_dict['interaction']['solution']['explanation']['html'] = (
conversion_fn(solution_html))
if state_dict['interaction']['id'] in (
'ItemSelectionInput', 'MultipleChoiceInput'):
for value_index, value in enumerate(
state_dict['interaction']['customization_args'][
'choices']['value']):
state_dict['interaction']['customization_args'][
'choices']['value'][value_index] = conversion_fn(value)
return state_dict
| 41.256863 | 80 | 0.624032 |
from __future__ import absolute_import
from __future__ import unicode_literals
import collections
import copy
import logging
from constants import constants
from core.domain import customization_args_util
from core.domain import html_cleaner
from core.domain import interaction_registry
from core.domain import param_domain
import feconf
import python_utils
import utils
class AnswerGroup(python_utils.OBJECT):
def to_dict(self):
return {
'rule_specs': [rule_spec.to_dict()
for rule_spec in self.rule_specs],
'outcome': self.outcome.to_dict(),
'training_data': self.training_data,
'tagged_skill_misconception_id': self.tagged_skill_misconception_id
}
@classmethod
def from_dict(cls, answer_group_dict):
return cls(
Outcome.from_dict(answer_group_dict['outcome']),
[RuleSpec.from_dict(rs) for rs in answer_group_dict['rule_specs']],
answer_group_dict['training_data'],
answer_group_dict['tagged_skill_misconception_id']
)
def __init__(
self, outcome, rule_specs, training_data,
tagged_skill_misconception_id):
self.rule_specs = [RuleSpec(
rule_spec.rule_type, rule_spec.inputs
) for rule_spec in rule_specs]
self.outcome = outcome
self.training_data = training_data
self.tagged_skill_misconception_id = tagged_skill_misconception_id
def validate(self, interaction, exp_param_specs_dict):
if not isinstance(self.rule_specs, list):
raise utils.ValidationError(
'Expected answer group rules to be a list, received %s'
% self.rule_specs)
if self.tagged_skill_misconception_id is not None:
if not isinstance(
self.tagged_skill_misconception_id,
python_utils.BASESTRING):
raise utils.ValidationError(
'Expected tagged skill misconception id to be a str, '
'received %s' % self.tagged_skill_misconception_id)
if self.tagged_skill_misconception_id.count('-') != 1:
raise utils.ValidationError(
'Expected the format of tagged skill misconception id '
'to be <skill_id>-<misconception_id>, received %s'
% self.tagged_skill_misconception_id)
if len(self.rule_specs) == 0 and len(self.training_data) == 0:
raise utils.ValidationError(
'There must be at least one rule or training data for each'
' answer group.')
for rule_spec in self.rule_specs:
if rule_spec.rule_type not in interaction.rules_dict:
raise utils.ValidationError(
'Unrecognized rule type: %s' % rule_spec.rule_type)
rule_spec.validate(
interaction.get_rule_param_list(rule_spec.rule_type),
exp_param_specs_dict)
self.outcome.validate()
class Hint(python_utils.OBJECT):
def __init__(self, hint_content):
self.hint_content = hint_content
def to_dict(self):
return {
'hint_content': self.hint_content.to_dict(),
}
@classmethod
def from_dict(cls, hint_dict):
return cls(SubtitledHtml.from_dict(hint_dict['hint_content']))
def validate(self):
self.hint_content.validate()
class Solution(python_utils.OBJECT):
def __init__(
self, interaction_id, answer_is_exclusive,
correct_answer, explanation):
self.answer_is_exclusive = answer_is_exclusive
self.correct_answer = (
interaction_registry.Registry.get_interaction_by_id(
interaction_id).normalize_answer(correct_answer))
self.explanation = explanation
def to_dict(self):
return {
'answer_is_exclusive': self.answer_is_exclusive,
'correct_answer': self.correct_answer,
'explanation': self.explanation.to_dict(),
}
@classmethod
def from_dict(cls, interaction_id, solution_dict):
return cls(
interaction_id,
solution_dict['answer_is_exclusive'],
interaction_registry.Registry.get_interaction_by_id(
interaction_id).normalize_answer(
solution_dict['correct_answer']),
SubtitledHtml.from_dict(solution_dict['explanation']))
def validate(self, interaction_id):
if not isinstance(self.answer_is_exclusive, bool):
raise utils.ValidationError(
'Expected answer_is_exclusive to be bool, received %s' %
self.answer_is_exclusive)
interaction_registry.Registry.get_interaction_by_id(
interaction_id).normalize_answer(self.correct_answer)
self.explanation.validate()
class InteractionInstance(python_utils.OBJECT):
_DEFAULT_INTERACTION_ID = None
def to_dict(self):
return {
'id': self.id,
'customization_args': (
{} if self.id is None else
customization_args_util.get_full_customization_args(
self.customization_args,
interaction_registry.Registry.get_interaction_by_id(
self.id).customization_arg_specs)),
'answer_groups': [group.to_dict() for group in self.answer_groups],
'default_outcome': (
self.default_outcome.to_dict()
if self.default_outcome is not None
else None),
'confirmed_unclassified_answers': (
self.confirmed_unclassified_answers),
'hints': [hint.to_dict() for hint in self.hints],
'solution': self.solution.to_dict() if self.solution else None,
}
@classmethod
def from_dict(cls, interaction_dict):
default_outcome_dict = (
Outcome.from_dict(interaction_dict['default_outcome'])
if interaction_dict['default_outcome'] is not None else None)
solution_dict = (
Solution.from_dict(
interaction_dict['id'], interaction_dict['solution'])
if (interaction_dict['solution'] and interaction_dict['id'])
else None)
return cls(
interaction_dict['id'],
interaction_dict['customization_args'],
[AnswerGroup.from_dict(h)
for h in interaction_dict['answer_groups']],
default_outcome_dict,
interaction_dict['confirmed_unclassified_answers'],
[Hint.from_dict(h) for h in interaction_dict['hints']],
solution_dict)
def __init__(
self, interaction_id, customization_args, answer_groups,
default_outcome, confirmed_unclassified_answers, hints, solution):
self.id = interaction_id
# args may be Jinja templates that refer to state parameters.
# This is a dict: the keys are names of customization_args and the
# values are dicts with a single key, 'value', whose corresponding
# value is the value of the customization arg.
self.customization_args = customization_args
self.answer_groups = answer_groups
self.default_outcome = default_outcome
self.confirmed_unclassified_answers = confirmed_unclassified_answers
self.hints = hints
self.solution = solution
@property
def is_terminal(self):
return self.id and interaction_registry.Registry.get_interaction_by_id(
self.id).is_terminal
def get_all_outcomes(self):
outcomes = []
for answer_group in self.answer_groups:
outcomes.append(answer_group.outcome)
if self.default_outcome is not None:
outcomes.append(self.default_outcome)
return outcomes
def validate(self, exp_param_specs_dict):
if not isinstance(self.id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected interaction id to be a string, received %s' %
self.id)
try:
interaction = interaction_registry.Registry.get_interaction_by_id(
self.id)
except KeyError:
raise utils.ValidationError('Invalid interaction id: %s' % self.id)
customization_args_util.validate_customization_args_and_values(
'interaction', self.id, self.customization_args,
interaction.customization_arg_specs)
if not isinstance(self.answer_groups, list):
raise utils.ValidationError(
'Expected answer groups to be a list, received %s.'
% self.answer_groups)
if not self.is_terminal and self.default_outcome is None:
raise utils.ValidationError(
'Non-terminal interactions must have a default outcome.')
if self.is_terminal and self.default_outcome is not None:
raise utils.ValidationError(
'Terminal interactions must not have a default outcome.')
if self.is_terminal and self.answer_groups:
raise utils.ValidationError(
'Terminal interactions must not have any answer groups.')
for answer_group in self.answer_groups:
answer_group.validate(interaction, exp_param_specs_dict)
if self.default_outcome is not None:
self.default_outcome.validate()
if not isinstance(self.hints, list):
raise utils.ValidationError(
'Expected hints to be a list, received %s'
% self.hints)
for hint in self.hints:
hint.validate()
if self.solution:
self.solution.validate(self.id)
if self.solution and not self.hints:
raise utils.ValidationError(
'Hint(s) must be specified if solution is specified')
@classmethod
def create_default_interaction(cls, default_dest_state_name):
default_outcome = Outcome(
default_dest_state_name,
SubtitledHtml.create_default_subtitled_html(
feconf.DEFAULT_OUTCOME_CONTENT_ID), False, {}, None, None)
return cls(
cls._DEFAULT_INTERACTION_ID, {}, [], default_outcome, [], [], {})
def get_all_html_content_strings(self):
html_list = []
for answer_group in self.answer_groups:
outcome_html = answer_group.outcome.feedback.html
html_list = html_list + [outcome_html]
# Note that ItemSelectionInput replicates the customization arg HTML
# in its answer groups.
if self.id == 'ItemSelectionInput':
for answer_group in self.answer_groups:
for rule_spec in answer_group.rule_specs:
rule_spec_html = rule_spec.inputs['x']
html_list = html_list + rule_spec_html
if self.id == 'DragAndDropSortInput':
for answer_group in self.answer_groups:
for rule_spec in answer_group.rule_specs:
rule_spec_html_list = rule_spec.inputs['x']
for rule_spec_html in rule_spec_html_list:
html_list = html_list + rule_spec_html
if self.default_outcome:
default_outcome_html = self.default_outcome.feedback.html
html_list = html_list + [default_outcome_html]
for hint in self.hints:
hint_html = hint.hint_content.html
html_list = html_list + [hint_html]
if self.solution:
solution_html = self.solution.explanation.html
html_list = html_list + [solution_html]
if self.id in (
'ItemSelectionInput', 'MultipleChoiceInput',
'DragAndDropSortInput'):
customization_args_html_list = (
self.customization_args['choices']['value'])
html_list = html_list + customization_args_html_list
return html_list
class Outcome(python_utils.OBJECT):
def to_dict(self):
return {
'dest': self.dest,
'feedback': self.feedback.to_dict(),
'labelled_as_correct': self.labelled_as_correct,
'param_changes': [
param_change.to_dict() for param_change in self.param_changes],
'refresher_exploration_id': self.refresher_exploration_id,
'missing_prerequisite_skill_id': self.missing_prerequisite_skill_id
}
@classmethod
def from_dict(cls, outcome_dict):
return cls(
outcome_dict['dest'],
SubtitledHtml.from_dict(outcome_dict['feedback']),
outcome_dict['labelled_as_correct'],
[param_domain.ParamChange(
param_change['name'], param_change['generator_id'],
param_change['customization_args'])
for param_change in outcome_dict['param_changes']],
outcome_dict['refresher_exploration_id'],
outcome_dict['missing_prerequisite_skill_id']
)
def __init__(
self, dest, feedback, labelled_as_correct, param_changes,
refresher_exploration_id, missing_prerequisite_skill_id):
# Id of the destination state.
# TODO(sll): Check that this state actually exists.
self.dest = dest
# Feedback to give the reader if this rule is triggered.
self.feedback = feedback
# Whether this outcome has been labelled by the creator as
# corresponding to a "correct" answer.
self.labelled_as_correct = labelled_as_correct
# Exploration-level parameter changes to make if this rule is
# triggered.
self.param_changes = param_changes or []
# An optional exploration ID to redirect the learner to if they lack
# understanding of a prerequisite concept. This should only exist if
# the destination state for this outcome is a self-loop.
self.refresher_exploration_id = refresher_exploration_id
# An optional skill id whose concept card would be shown to the learner
# when the learner receives this outcome.
self.missing_prerequisite_skill_id = missing_prerequisite_skill_id
def validate(self):
self.feedback.validate()
if not isinstance(self.labelled_as_correct, bool):
raise utils.ValidationError(
'The "labelled_as_correct" field should be a boolean, received '
'%s' % self.labelled_as_correct)
if self.missing_prerequisite_skill_id is not None:
if not isinstance(
self.missing_prerequisite_skill_id,
python_utils.BASESTRING):
raise utils.ValidationError(
'Expected outcome missing_prerequisite_skill_id to be a '
'string, received %s' % self.missing_prerequisite_skill_id)
if not isinstance(self.param_changes, list):
raise utils.ValidationError(
'Expected outcome param_changes to be a list, received %s'
% self.param_changes)
for param_change in self.param_changes:
param_change.validate()
if self.refresher_exploration_id is not None:
if not isinstance(
self.refresher_exploration_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected outcome refresher_exploration_id to be a string, '
'received %s' % self.refresher_exploration_id)
class Voiceover(python_utils.OBJECT):
def to_dict(self):
return {
'filename': self.filename,
'file_size_bytes': self.file_size_bytes,
'needs_update': self.needs_update,
}
@classmethod
def from_dict(cls, voiceover_dict):
return cls(
voiceover_dict['filename'],
voiceover_dict['file_size_bytes'],
voiceover_dict['needs_update'])
def __init__(self, filename, file_size_bytes, needs_update):
# str. The corresponding audio file path, e.g.
# "content-en-2-h7sjp8s.mp3".
self.filename = filename
# int. The file size, in bytes. Used to display potential bandwidth
# usage to the learner before they download the file.
self.file_size_bytes = file_size_bytes
# bool. Whether audio is marked for needing review.
self.needs_update = needs_update
def validate(self):
if not isinstance(self.filename, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected audio filename to be a string, received %s' %
self.filename)
dot_index = self.filename.rfind('.')
if dot_index == -1 or dot_index == 0:
raise utils.ValidationError(
'Invalid audio filename: %s' % self.filename)
extension = self.filename[dot_index + 1:]
if extension not in feconf.ACCEPTED_AUDIO_EXTENSIONS:
raise utils.ValidationError(
'Invalid audio filename: it should have one of '
'the following extensions: %s. Received: %s'
% (list(feconf.ACCEPTED_AUDIO_EXTENSIONS.keys()),
self.filename))
if not isinstance(self.file_size_bytes, int):
raise utils.ValidationError(
'Expected file size to be an int, received %s' %
self.file_size_bytes)
if self.file_size_bytes <= 0:
raise utils.ValidationError(
'Invalid file size: %s' % self.file_size_bytes)
if not isinstance(self.needs_update, bool):
raise utils.ValidationError(
'Expected needs_update to be a bool, received %s' %
self.needs_update)
class WrittenTranslation(python_utils.OBJECT):
def __init__(self, html, needs_update):
self.html = html_cleaner.clean(html)
self.needs_update = needs_update
def to_dict(self):
return {
'html': self.html,
'needs_update': self.needs_update,
}
@classmethod
def from_dict(cls, written_translation_dict):
return cls(
written_translation_dict['html'],
written_translation_dict['needs_update'])
def validate(self):
if not isinstance(self.html, python_utils.BASESTRING):
raise utils.ValidationError(
'Invalid content HTML: %s' % self.html)
if not isinstance(self.needs_update, bool):
raise utils.ValidationError(
'Expected needs_update to be a bool, received %s' %
self.needs_update)
class WrittenTranslations(python_utils.OBJECT):
def __init__(self, translations_mapping):
self.translations_mapping = translations_mapping
def to_dict(self):
translations_mapping = {}
for (content_id, language_code_to_written_translation) in (
self.translations_mapping.items()):
translations_mapping[content_id] = {}
for (language_code, written_translation) in (
language_code_to_written_translation.items()):
translations_mapping[content_id][language_code] = (
written_translation.to_dict())
written_translations_dict = {
'translations_mapping': translations_mapping
}
return written_translations_dict
@classmethod
def from_dict(cls, written_translations_dict):
translations_mapping = {}
for (content_id, language_code_to_written_translation) in (
written_translations_dict['translations_mapping'].items()):
translations_mapping[content_id] = {}
for (language_code, written_translation) in (
language_code_to_written_translation.items()):
translations_mapping[content_id][language_code] = (
WrittenTranslation.from_dict(written_translation))
return cls(translations_mapping)
def get_content_ids_that_are_correctly_translated(self, language_code):
correctly_translated_content_ids = []
for content_id, translations in self.translations_mapping.items():
if language_code in translations and not (
translations[language_code].needs_update):
correctly_translated_content_ids.append(content_id)
return correctly_translated_content_ids
def add_translation(self, content_id, language_code, html):
written_translation = WrittenTranslation(html, False)
self.translations_mapping[content_id][language_code] = (
written_translation)
def validate(self, expected_content_id_list):
if expected_content_id_list is not None:
if not set(self.translations_mapping.keys()) == (
set(expected_content_id_list)):
raise utils.ValidationError(
'Expected state written_translations to match the listed '
'content ids %s, found %s' % (
expected_content_id_list,
list(self.translations_mapping.keys()))
)
for (content_id, language_code_to_written_translation) in (
self.translations_mapping.items()):
if not isinstance(content_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected content_id to be a string, received %s'
% content_id)
if not isinstance(language_code_to_written_translation, dict):
raise utils.ValidationError(
'Expected content_id value to be a dict, received %s'
% language_code_to_written_translation)
for (language_code, written_translation) in (
language_code_to_written_translation.items()):
if not isinstance(language_code, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected language_code to be a string, received %s'
% language_code)
# Currently, we assume written translations are used by the
# voice-artist to voiceover the translated text so written
# translations can be in supported audio/voiceover languages.
allowed_language_codes = [language['id'] for language in (
constants.SUPPORTED_AUDIO_LANGUAGES)]
if language_code not in allowed_language_codes:
raise utils.ValidationError(
'Invalid language_code: %s' % language_code)
written_translation.validate()
def get_content_ids_for_text_translation(self):
return list(self.translations_mapping.keys())
def get_translated_content(self, content_id, language_code):
if content_id in self.translations_mapping:
if language_code in self.translations_mapping[content_id]:
return self.translations_mapping[content_id][language_code].html
else:
raise Exception(
'Translation for the given content_id %s does not exist in '
'%s language code' % (content_id, language_code))
else:
raise Exception('Invalid content_id: %s' % content_id)
def add_content_id_for_translation(self, content_id):
if not isinstance(content_id, python_utils.BASESTRING):
raise Exception(
'Expected content_id to be a string, received %s' % content_id)
if content_id in self.translations_mapping:
raise Exception(
'The content_id %s already exist.' % content_id)
else:
self.translations_mapping[content_id] = {}
def delete_content_id_for_translation(self, content_id):
if not isinstance(content_id, python_utils.BASESTRING):
raise Exception(
'Expected content_id to be a string, received %s' % content_id)
if content_id not in self.translations_mapping:
raise Exception(
'The content_id %s does not exist.' % content_id)
else:
self.translations_mapping.pop(content_id, None)
def get_translation_counts(self):
translation_counts = collections.defaultdict(int)
for translations in self.translations_mapping.values():
for language, translation in translations.items():
if not translation.needs_update:
translation_counts[language] += 1
return translation_counts
class RecordedVoiceovers(python_utils.OBJECT):
def __init__(self, voiceovers_mapping):
self.voiceovers_mapping = voiceovers_mapping
def to_dict(self):
voiceovers_mapping = {}
for (content_id, language_code_to_voiceover) in (
self.voiceovers_mapping.items()):
voiceovers_mapping[content_id] = {}
for (language_code, voiceover) in (
language_code_to_voiceover.items()):
voiceovers_mapping[content_id][language_code] = (
voiceover.to_dict())
recorded_voiceovers_dict = {
'voiceovers_mapping': voiceovers_mapping
}
return recorded_voiceovers_dict
@classmethod
def from_dict(cls, recorded_voiceovers_dict):
voiceovers_mapping = {}
for (content_id, language_code_to_voiceover) in (
recorded_voiceovers_dict['voiceovers_mapping'].items()):
voiceovers_mapping[content_id] = {}
for (language_code, voiceover) in (
language_code_to_voiceover.items()):
voiceovers_mapping[content_id][language_code] = (
Voiceover.from_dict(voiceover))
return cls(voiceovers_mapping)
def validate(self, expected_content_id_list):
if expected_content_id_list is not None:
if not set(self.voiceovers_mapping.keys()) == (
set(expected_content_id_list)):
raise utils.ValidationError(
'Expected state recorded_voiceovers to match the listed '
'content ids %s, found %s' % (
expected_content_id_list,
list(self.voiceovers_mapping.keys()))
)
for (content_id, language_code_to_voiceover) in (
self.voiceovers_mapping.items()):
if not isinstance(content_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected content_id to be a string, received %s'
% content_id)
if not isinstance(language_code_to_voiceover, dict):
raise utils.ValidationError(
'Expected content_id value to be a dict, received %s'
% language_code_to_voiceover)
for (language_code, voiceover) in (
language_code_to_voiceover.items()):
if not isinstance(language_code, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected language_code to be a string, received %s'
% language_code)
allowed_language_codes = [language['id'] for language in (
constants.SUPPORTED_AUDIO_LANGUAGES)]
if language_code not in allowed_language_codes:
raise utils.ValidationError(
'Invalid language_code: %s' % language_code)
voiceover.validate()
def get_content_ids_for_voiceovers(self):
return list(self.voiceovers_mapping.keys())
def strip_all_existing_voiceovers(self):
for content_id in self.voiceovers_mapping.keys():
self.voiceovers_mapping[content_id] = {}
def add_content_id_for_voiceover(self, content_id):
if not isinstance(content_id, python_utils.BASESTRING):
raise Exception(
'Expected content_id to be a string, received %s' % content_id)
if content_id in self.voiceovers_mapping:
raise Exception(
'The content_id %s already exist.' % content_id)
self.voiceovers_mapping[content_id] = {}
def delete_content_id_for_voiceover(self, content_id):
if not isinstance(content_id, python_utils.BASESTRING):
raise Exception(
'Expected content_id to be a string, received %s' % content_id)
if content_id not in self.voiceovers_mapping:
raise Exception(
'The content_id %s does not exist.' % content_id)
else:
self.voiceovers_mapping.pop(content_id, None)
class RuleSpec(python_utils.OBJECT):
def to_dict(self):
return {
'rule_type': self.rule_type,
'inputs': self.inputs,
}
@classmethod
def from_dict(cls, rulespec_dict):
return cls(
rulespec_dict['rule_type'],
rulespec_dict['inputs']
)
def __init__(self, rule_type, inputs):
self.rule_type = rule_type
self.inputs = inputs
def validate(self, rule_params_list, exp_param_specs_dict):
if not isinstance(self.inputs, dict):
raise utils.ValidationError(
'Expected inputs to be a dict, received %s' % self.inputs)
input_key_set = set(self.inputs.keys())
param_names_set = set([rp[0] for rp in rule_params_list])
leftover_input_keys = input_key_set - param_names_set
leftover_param_names = param_names_set - input_key_set
# Check if there are input keys which are not rule parameters.
if leftover_input_keys:
logging.warning(
'RuleSpec \'%s\' has inputs which are not recognized '
'parameter names: %s' % (self.rule_type, leftover_input_keys))
# Check if there are missing parameters.
if leftover_param_names:
raise utils.ValidationError(
'RuleSpec \'%s\' is missing inputs: %s'
% (self.rule_type, leftover_param_names))
rule_params_dict = {rp[0]: rp[1] for rp in rule_params_list}
for (param_name, param_value) in self.inputs.items():
param_obj = rule_params_dict[param_name]
# Validate the parameter type given the value.
if isinstance(
param_value,
python_utils.BASESTRING) and '{{' in param_value:
# Value refers to a parameter spec. Cross-validate the type of
# the parameter spec with the rule parameter.
start_brace_index = param_value.index('{{') + 2
end_brace_index = param_value.index('}}')
param_spec_name = param_value[
start_brace_index:end_brace_index]
if param_spec_name not in exp_param_specs_dict:
raise utils.ValidationError(
'RuleSpec \'%s\' has an input with name \'%s\' which '
'refers to an unknown parameter within the '
'exploration: %s' % (
self.rule_type, param_name, param_spec_name))
# TODO(bhenning): The obj_type of the param_spec
# (exp_param_specs_dict[param_spec_name]) should be validated
# to be the same as param_obj.__name__ to ensure the rule spec
# can accept the type of the parameter.
else:
# Otherwise, a simple parameter value needs to be normalizable
# by the parameter object in order to be valid.
param_obj.normalize(param_value)
class SubtitledHtml(python_utils.OBJECT):
def __init__(self, content_id, html):
self.content_id = content_id
self.html = html_cleaner.clean(html)
self.validate()
def to_dict(self):
return {
'content_id': self.content_id,
'html': self.html
}
@classmethod
def from_dict(cls, subtitled_html_dict):
return cls(
subtitled_html_dict['content_id'], subtitled_html_dict['html'])
def validate(self):
if not isinstance(self.content_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected content id to be a string, received %s' %
self.content_id)
if not isinstance(self.html, python_utils.BASESTRING):
raise utils.ValidationError(
'Invalid content HTML: %s' % self.html)
@classmethod
def create_default_subtitled_html(cls, content_id):
return cls(content_id, '')
class State(python_utils.OBJECT):
def __init__(
self, content, param_changes, interaction, recorded_voiceovers,
written_translations, solicit_answer_details,
classifier_model_id=None):
# The content displayed to the reader in this state.
self.content = content
# Parameter changes associated with this state.
self.param_changes = [param_domain.ParamChange(
param_change.name, param_change.generator.id,
param_change.customization_args
) for param_change in param_changes]
# The interaction instance associated with this state.
self.interaction = InteractionInstance(
interaction.id, interaction.customization_args,
interaction.answer_groups, interaction.default_outcome,
interaction.confirmed_unclassified_answers,
interaction.hints, interaction.solution)
self.classifier_model_id = classifier_model_id
self.recorded_voiceovers = recorded_voiceovers
self.written_translations = written_translations
self.solicit_answer_details = solicit_answer_details
def validate(self, exp_param_specs_dict, allow_null_interaction):
self.content.validate()
if not isinstance(self.param_changes, list):
raise utils.ValidationError(
'Expected state param_changes to be a list, received %s'
% self.param_changes)
for param_change in self.param_changes:
param_change.validate()
if not allow_null_interaction and self.interaction.id is None:
raise utils.ValidationError(
'This state does not have any interaction specified.')
elif self.interaction.id is not None:
self.interaction.validate(exp_param_specs_dict)
content_id_list = []
content_id_list.append(self.content.content_id)
for answer_group in self.interaction.answer_groups:
feedback_content_id = answer_group.outcome.feedback.content_id
if feedback_content_id in content_id_list:
raise utils.ValidationError(
'Found a duplicate content id %s' % feedback_content_id)
content_id_list.append(feedback_content_id)
if self.interaction.default_outcome:
default_outcome_content_id = (
self.interaction.default_outcome.feedback.content_id)
if default_outcome_content_id in content_id_list:
raise utils.ValidationError(
'Found a duplicate content id %s'
% default_outcome_content_id)
content_id_list.append(default_outcome_content_id)
for hint in self.interaction.hints:
hint_content_id = hint.hint_content.content_id
if hint_content_id in content_id_list:
raise utils.ValidationError(
'Found a duplicate content id %s' % hint_content_id)
content_id_list.append(hint_content_id)
if self.interaction.solution:
solution_content_id = (
self.interaction.solution.explanation.content_id)
if solution_content_id in content_id_list:
raise utils.ValidationError(
'Found a duplicate content id %s' % solution_content_id)
content_id_list.append(solution_content_id)
if not isinstance(self.solicit_answer_details, bool):
raise utils.ValidationError(
'Expected solicit_answer_details to be a boolean, '
'received %s' % self.solicit_answer_details)
if self.solicit_answer_details:
if self.interaction.id in (
constants.INTERACTION_IDS_WITHOUT_ANSWER_DETAILS):
raise utils.ValidationError(
'The %s interaction does not support soliciting '
'answer details from learners.' % (self.interaction.id))
self.written_translations.validate(content_id_list)
self.recorded_voiceovers.validate(content_id_list)
def get_content_html(self, content_id):
content_id_to_html = self._get_all_translatable_content()
if content_id not in content_id_to_html:
raise ValueError('Content ID %s does not exist' % content_id)
return content_id_to_html[content_id]
def get_training_data(self):
state_training_data_by_answer_group = []
for (answer_group_index, answer_group) in enumerate(
self.interaction.answer_groups):
if answer_group.training_data:
answers = copy.deepcopy(answer_group.training_data)
state_training_data_by_answer_group.append({
'answer_group_index': answer_group_index,
'answers': answers
})
return state_training_data_by_answer_group
def can_undergo_classification(self):
training_examples_count = 0
labels_count = 0
training_examples_count += len(
self.interaction.confirmed_unclassified_answers)
for answer_group in self.interaction.answer_groups:
training_examples_count += len(answer_group.training_data)
labels_count += 1
if ((training_examples_count >= feconf.MIN_TOTAL_TRAINING_EXAMPLES) and
(labels_count >= feconf.MIN_ASSIGNED_LABELS)):
return True
return False
@classmethod
def convert_state_dict_to_yaml(cls, state_dict, width):
try:
# Check if the state_dict can be converted to a State.
state = cls.from_dict(state_dict)
except Exception:
logging.info(
'Bad state dict: %s' % python_utils.UNICODE(state_dict))
raise Exception('Could not convert state dict to YAML.')
return python_utils.yaml_from_dict(state.to_dict(), width=width)
def get_translation_counts(self):
return self.written_translations.get_translation_counts()
def get_content_count(self):
return len(self.written_translations.translations_mapping)
def _update_content_ids_in_assets(self, old_ids_list, new_ids_list):
content_ids_to_delete = set(old_ids_list) - set(new_ids_list)
content_ids_to_add = set(new_ids_list) - set(old_ids_list)
content_ids_for_text_translations = (
self.written_translations.get_content_ids_for_text_translation())
content_ids_for_voiceovers = (
self.recorded_voiceovers.get_content_ids_for_voiceovers())
for content_id in content_ids_to_delete:
if not content_id in content_ids_for_voiceovers:
raise Exception(
'The content_id %s does not exist in recorded_voiceovers.'
% content_id)
elif not content_id in content_ids_for_text_translations:
raise Exception(
'The content_id %s does not exist in written_translations.'
% content_id)
else:
self.recorded_voiceovers.delete_content_id_for_voiceover(
content_id)
self.written_translations.delete_content_id_for_translation(
content_id)
for content_id in content_ids_to_add:
if content_id in content_ids_for_voiceovers:
raise Exception(
'The content_id %s already exists in recorded_voiceovers'
% content_id)
elif content_id in content_ids_for_text_translations:
raise Exception(
'The content_id %s already exists in written_translations.'
% content_id)
else:
self.recorded_voiceovers.add_content_id_for_voiceover(
content_id)
self.written_translations.add_content_id_for_translation(
content_id)
def add_translation(self, content_id, language_code, translation_html):
translation_html = html_cleaner.clean(translation_html)
self.written_translations.add_translation(
content_id, language_code, translation_html)
def update_content(self, content):
# TODO(sll): Must sanitize all content in RTE component attrs.
self.content = content
def update_param_changes(self, param_changes):
self.param_changes = param_changes
def update_interaction_id(self, interaction_id):
self.interaction.id = interaction_id
# TODO(sll): This should also clear interaction.answer_groups (except
# for the default rule). This is somewhat mitigated because the client
# updates interaction_answer_groups directly after this, but we should
# fix it.
def update_interaction_customization_args(self, customization_args):
self.interaction.customization_args = customization_args
def update_interaction_answer_groups(self, answer_groups_list):
if not isinstance(answer_groups_list, list):
raise Exception(
'Expected interaction_answer_groups to be a list, received %s'
% answer_groups_list)
interaction_answer_groups = []
old_content_id_list = [
answer_group.outcome.feedback.content_id for answer_group in (
self.interaction.answer_groups)]
# TODO(yanamal): Do additional calculations here to get the
# parameter changes, if necessary.
for answer_group_dict in answer_groups_list:
rule_specs_list = answer_group_dict['rule_specs']
if not isinstance(rule_specs_list, list):
raise Exception(
'Expected answer group rule specs to be a list, '
'received %s' % rule_specs_list)
answer_group = AnswerGroup(
Outcome.from_dict(answer_group_dict['outcome']), [],
answer_group_dict['training_data'],
answer_group_dict['tagged_skill_misconception_id'])
for rule_dict in rule_specs_list:
rule_spec = RuleSpec.from_dict(rule_dict)
# Normalize and store the rule params.
rule_inputs = rule_spec.inputs
if not isinstance(rule_inputs, dict):
raise Exception(
'Expected rule_inputs to be a dict, received %s'
% rule_inputs)
for param_name, value in rule_inputs.items():
param_type = (
interaction_registry.Registry.get_interaction_by_id(
self.interaction.id
).get_rule_param_type(rule_spec.rule_type, param_name))
if (isinstance(value, python_utils.BASESTRING) and
'{{' in value and '}}' in value):
# TODO(jacobdavis11): Create checks that all parameters
# referred to exist and have the correct types.
normalized_param = value
else:
try:
normalized_param = param_type.normalize(value)
except Exception:
raise Exception(
'%s has the wrong type. It should be a %s.' %
(value, param_type.__name__))
rule_inputs[param_name] = normalized_param
answer_group.rule_specs.append(rule_spec)
interaction_answer_groups.append(answer_group)
self.interaction.answer_groups = interaction_answer_groups
new_content_id_list = [
answer_group.outcome.feedback.content_id for answer_group in (
self.interaction.answer_groups)]
self._update_content_ids_in_assets(
old_content_id_list, new_content_id_list)
def update_interaction_default_outcome(self, default_outcome_dict):
old_content_id_list = []
new_content_id_list = []
if self.interaction.default_outcome:
old_content_id_list.append(
self.interaction.default_outcome.feedback.content_id)
if default_outcome_dict:
if not isinstance(default_outcome_dict, dict):
raise Exception(
'Expected default_outcome_dict to be a dict, received %s'
% default_outcome_dict)
self.interaction.default_outcome = Outcome.from_dict(
default_outcome_dict)
new_content_id_list.append(
self.interaction.default_outcome.feedback.content_id)
else:
self.interaction.default_outcome = None
self._update_content_ids_in_assets(
old_content_id_list, new_content_id_list)
def update_interaction_confirmed_unclassified_answers(
self, confirmed_unclassified_answers):
if not isinstance(confirmed_unclassified_answers, list):
raise Exception(
'Expected confirmed_unclassified_answers to be a list,'
' received %s' % confirmed_unclassified_answers)
self.interaction.confirmed_unclassified_answers = (
confirmed_unclassified_answers)
def update_interaction_hints(self, hints_list):
if not isinstance(hints_list, list):
raise Exception(
'Expected hints_list to be a list, received %s'
% hints_list)
old_content_id_list = [
hint.hint_content.content_id for hint in self.interaction.hints]
self.interaction.hints = [
Hint.from_dict(hint_dict)
for hint_dict in hints_list]
new_content_id_list = [
hint.hint_content.content_id for hint in self.interaction.hints]
self._update_content_ids_in_assets(
old_content_id_list, new_content_id_list)
def update_interaction_solution(self, solution_dict):
old_content_id_list = []
new_content_id_list = []
if self.interaction.solution:
old_content_id_list.append(
self.interaction.solution.explanation.content_id)
if solution_dict is not None:
if not isinstance(solution_dict, dict):
raise Exception(
'Expected solution to be a dict, received %s'
% solution_dict)
self.interaction.solution = Solution.from_dict(
self.interaction.id, solution_dict)
new_content_id_list.append(
self.interaction.solution.explanation.content_id)
else:
self.interaction.solution = None
self._update_content_ids_in_assets(
old_content_id_list, new_content_id_list)
def update_recorded_voiceovers(self, recorded_voiceovers):
self.recorded_voiceovers = recorded_voiceovers
def update_written_translations(self, written_translations):
self.written_translations = written_translations
def update_solicit_answer_details(self, solicit_answer_details):
if not isinstance(solicit_answer_details, bool):
raise Exception(
'Expected solicit_answer_details to be a boolean, received %s'
% solicit_answer_details)
self.solicit_answer_details = solicit_answer_details
def _get_all_translatable_content(self):
content_id_to_html = {}
content_id_to_html[self.content.content_id] = self.content.html
# TODO(#6178): Remove empty html checks once we add a validation
# check that ensures each content in state should be non-empty html.
default_outcome = self.interaction.default_outcome
if default_outcome is not None and default_outcome.feedback.html != '':
content_id_to_html[default_outcome.feedback.content_id] = (
default_outcome.feedback.html)
for answer_group in self.interaction.answer_groups:
if answer_group.outcome.feedback.html != '':
content_id_to_html[answer_group.outcome.feedback.content_id] = (
answer_group.outcome.feedback.html)
for hint in self.interaction.hints:
if hint.hint_content.html != '':
content_id_to_html[hint.hint_content.content_id] = (
hint.hint_content.html)
solution = self.interaction.solution
if solution is not None and solution.explanation.html != '':
content_id_to_html[solution.explanation.content_id] = (
solution.explanation.html)
return content_id_to_html
def get_content_id_mapping_needing_translations(self, language_code):
content_id_to_html = self._get_all_translatable_content()
available_translation_content_ids = (
self.written_translations
.get_content_ids_that_are_correctly_translated(language_code))
for content_id in available_translation_content_ids:
del content_id_to_html[content_id]
# TODO(#7571): Add functionality to return the list of
# translations which needs update.
return content_id_to_html
def to_dict(self):
return {
'content': self.content.to_dict(),
'param_changes': [param_change.to_dict()
for param_change in self.param_changes],
'interaction': self.interaction.to_dict(),
'classifier_model_id': self.classifier_model_id,
'recorded_voiceovers': self.recorded_voiceovers.to_dict(),
'written_translations': self.written_translations.to_dict(),
'solicit_answer_details': self.solicit_answer_details
}
@classmethod
def from_dict(cls, state_dict):
return cls(
SubtitledHtml.from_dict(state_dict['content']),
[param_domain.ParamChange.from_dict(param)
for param in state_dict['param_changes']],
InteractionInstance.from_dict(state_dict['interaction']),
RecordedVoiceovers.from_dict(state_dict['recorded_voiceovers']),
WrittenTranslations.from_dict(state_dict['written_translations']),
state_dict['solicit_answer_details'],
state_dict['classifier_model_id'])
@classmethod
def create_default_state(
cls, default_dest_state_name, is_initial_state=False):
content_html = (
feconf.DEFAULT_INIT_STATE_CONTENT_STR if is_initial_state else '')
content_id = feconf.DEFAULT_NEW_STATE_CONTENT_ID
return cls(
SubtitledHtml(content_id, content_html),
[],
InteractionInstance.create_default_interaction(
default_dest_state_name),
RecordedVoiceovers.from_dict(copy.deepcopy(
feconf.DEFAULT_RECORDED_VOICEOVERS)),
WrittenTranslations.from_dict(
copy.deepcopy(feconf.DEFAULT_WRITTEN_TRANSLATIONS)),
False)
@classmethod
def convert_html_fields_in_state(cls, state_dict, conversion_fn):
state_dict['content']['html'] = (
conversion_fn(state_dict['content']['html']))
if state_dict['interaction']['default_outcome']:
interaction_feedback_html = state_dict[
'interaction']['default_outcome']['feedback']['html']
state_dict['interaction']['default_outcome']['feedback'][
'html'] = conversion_fn(interaction_feedback_html)
for answer_group_index, answer_group in enumerate(
state_dict['interaction']['answer_groups']):
answer_group_html = answer_group['outcome']['feedback']['html']
state_dict['interaction']['answer_groups'][
answer_group_index]['outcome']['feedback']['html'] = (
conversion_fn(answer_group_html))
if state_dict['interaction']['id'] == 'ItemSelectionInput':
for rule_spec_index, rule_spec in enumerate(
answer_group['rule_specs']):
for x_index, x in enumerate(rule_spec['inputs']['x']):
state_dict['interaction']['answer_groups'][
answer_group_index]['rule_specs'][
rule_spec_index]['inputs']['x'][x_index] = (
conversion_fn(x))
for hint_index, hint in enumerate(
state_dict['interaction']['hints']):
hint_html = hint['hint_content']['html']
state_dict['interaction']['hints'][hint_index][
'hint_content']['html'] = conversion_fn(hint_html)
if state_dict['interaction']['solution']:
solution_html = state_dict[
'interaction']['solution']['explanation']['html']
state_dict['interaction']['solution']['explanation']['html'] = (
conversion_fn(solution_html))
if state_dict['interaction']['id'] in (
'ItemSelectionInput', 'MultipleChoiceInput'):
for value_index, value in enumerate(
state_dict['interaction']['customization_args'][
'choices']['value']):
state_dict['interaction']['customization_args'][
'choices']['value'][value_index] = conversion_fn(value)
return state_dict
| true | true |
1c46bcd3d9c7631a1c1fc9bbcad0750ae3adc519 | 159 | py | Python | src/dash_init.py | JavaScriipt/iHashTag | 3b6e95fde0e4b7f35e074c0b0733f2b98bc7763a | [
"CC0-1.0"
] | null | null | null | src/dash_init.py | JavaScriipt/iHashTag | 3b6e95fde0e4b7f35e074c0b0733f2b98bc7763a | [
"CC0-1.0"
] | null | null | null | src/dash_init.py | JavaScriipt/iHashTag | 3b6e95fde0e4b7f35e074c0b0733f2b98bc7763a | [
"CC0-1.0"
] | null | null | null | import os
file = open("resultados.txt", "w")
file.write("Timestamp, Muy Positivos, Muy Negativos, Neutros, Negativos, Muy Negativos, Average\n")
file.close()
| 26.5 | 99 | 0.735849 | import os
file = open("resultados.txt", "w")
file.write("Timestamp, Muy Positivos, Muy Negativos, Neutros, Negativos, Muy Negativos, Average\n")
file.close()
| true | true |
1c46bcdb1d10c9fe63a5f971609c2b06295d9890 | 1,926 | py | Python | setup.py | wj-Mcat/python-wechaty-puppet-official-account | 92e762b0345c1faab2563d6da302efa4de273425 | [
"Apache-2.0"
] | null | null | null | setup.py | wj-Mcat/python-wechaty-puppet-official-account | 92e762b0345c1faab2563d6da302efa4de273425 | [
"Apache-2.0"
] | null | null | null | setup.py | wj-Mcat/python-wechaty-puppet-official-account | 92e762b0345c1faab2563d6da302efa4de273425 | [
"Apache-2.0"
] | null | null | null | """
setup
"""
import os
import semver
import setuptools
def versioning(version: str) -> str:
"""
version to specification
X.Y.Z -> X.Y.devZ
"""
sem_ver = semver.parse(version)
major = sem_ver['major']
minor = sem_ver['minor']
patch = str(sem_ver['patch'])
fin_ver = '%d.%d.%s' % (
major,
minor,
patch,
)
return fin_ver
def get_version() -> str:
"""
read version from VERSION file
"""
version = '0.0.0'
with open(
os.path.join(
os.path.dirname(__file__),
'VERSION'
)
) as version_fh:
# Get X.Y.Z
version = version_fh.read().strip()
# versioning from X.Y.Z to X.Y.devZ
version = versioning(version)
return version
def get_long_description() -> str:
"""get long_description"""
with open('README.md', 'r') as readme_fh:
return readme_fh.read()
def get_install_requires() -> str:
"""get install_requires"""
with open('requirements.txt', 'r') as requirements_fh:
return requirements_fh.read().splitlines()
setuptools.setup(
name='wechaty-puppet-official-account',
version=get_version(),
author='wj-Mcat',
author_email='wjmcater@gmail.com',
description='Wechaty Puppet for WeChat Official Account',
long_description=get_long_description(),
long_description_content_type='text/markdown',
license='Apache-2.0',
url='https://github.com/wechaty/python-wechaty-puppet-official-account',
packages=setuptools.find_packages('src'),
package_dir={'': 'src'},
install_requires=get_install_requires(),
# packages=setuptools.find_packages('wip'),
# package_dir={'': 'wip'},
classifiers=[
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
],
)
| 23.204819 | 76 | 0.609034 | import os
import semver
import setuptools
def versioning(version: str) -> str:
sem_ver = semver.parse(version)
major = sem_ver['major']
minor = sem_ver['minor']
patch = str(sem_ver['patch'])
fin_ver = '%d.%d.%s' % (
major,
minor,
patch,
)
return fin_ver
def get_version() -> str:
version = '0.0.0'
with open(
os.path.join(
os.path.dirname(__file__),
'VERSION'
)
) as version_fh:
version = version_fh.read().strip()
version = versioning(version)
return version
def get_long_description() -> str:
with open('README.md', 'r') as readme_fh:
return readme_fh.read()
def get_install_requires() -> str:
with open('requirements.txt', 'r') as requirements_fh:
return requirements_fh.read().splitlines()
setuptools.setup(
name='wechaty-puppet-official-account',
version=get_version(),
author='wj-Mcat',
author_email='wjmcater@gmail.com',
description='Wechaty Puppet for WeChat Official Account',
long_description=get_long_description(),
long_description_content_type='text/markdown',
license='Apache-2.0',
url='https://github.com/wechaty/python-wechaty-puppet-official-account',
packages=setuptools.find_packages('src'),
package_dir={'': 'src'},
install_requires=get_install_requires(),
classifiers=[
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
],
)
| true | true |
1c46bf0877dd01db082a6f46e72eeec5ee132dde | 5,847 | py | Python | scripts/inspect_un_data_sets.py | arwhyte/SI664-scripts | 99daaac123ebdbfb0fbca59251f711efb9a7d39f | [
"MIT"
] | null | null | null | scripts/inspect_un_data_sets.py | arwhyte/SI664-scripts | 99daaac123ebdbfb0fbca59251f711efb9a7d39f | [
"MIT"
] | null | null | null | scripts/inspect_un_data_sets.py | arwhyte/SI664-scripts | 99daaac123ebdbfb0fbca59251f711efb9a7d39f | [
"MIT"
] | 1 | 2018-12-08T16:43:45.000Z | 2018-12-08T16:43:45.000Z | import logging
import os
import pandas as pd
import sys as sys
def main(argv=None):
"""
Utilize Pandas library to read in both UNSD M49 country and area .csv file
(tab delimited) as well as the UNESCO heritage site .csv file (tab delimited).
Extract regions, sub-regions, intermediate regions, country and areas, and
other column data. Filter out duplicate values and NaN values and sort the
series in alphabetical order. Write out each series to a .csv file for inspection.
"""
if argv is None:
argv = sys.argv
msg = [
'Source file read {0}',
'UNSD M49 regions written to file {0}',
'UNSD M49 sub-regions written to file {0}',
'UNSD M49 intermediate regions written to file {0}',
'UNSD M49 countries and areas written to file {0}',
'UNSD M49 development status written to file {0}',
'UNESCO heritage site countries/areas written to file {0}',
'UNESCO heritage site categories written to file {0}',
'UNESCO heritage site regions written to file {0}',
'UNESCO heritage site transboundary values written to file {0}'
]
# Setting logging format and default level
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)
# Read in United Nations Statistical Division (UNSD) M49 Standard data set (tabbed separator)
unsd_csv = './input/csv/un_area_country_codes-m49.csv'
unsd_data_frame = read_csv(unsd_csv, '\t')
logging.info(msg[0].format(os.path.abspath(unsd_csv)))
# Write regions to a .csv file.
unsd_region = extract_filtered_series(unsd_data_frame, 'region_name')
unsd_region_csv = './output/unesco/unsd_region.csv'
write_series_to_csv(unsd_region, unsd_region_csv, '\t', False)
logging.info(msg[1].format(os.path.abspath(unsd_region_csv)))
# Write sub-regions to a .csv file.
unsd_sub_region = extract_filtered_series(unsd_data_frame, 'sub_region_name')
unsd_sub_region_csv = './output/unesco/unsd_sub_region.csv'
write_series_to_csv(unsd_sub_region, unsd_sub_region_csv, '\t', False)
logging.info(msg[2].format(os.path.abspath(unsd_sub_region_csv)))
# Write intermediate_regions to a .csv file.
unsd_intermed_region = extract_filtered_series(unsd_data_frame, 'intermediate_region_name')
unsd_intermed_region_csv = './output/unesco/unsd_intermed_region.csv'
write_series_to_csv(unsd_intermed_region, unsd_intermed_region_csv, '\t', False)
logging.info(msg[3].format(os.path.abspath(unsd_intermed_region_csv)))
# Write countries or areas to a .csv file.
unsd_country_area = extract_filtered_series(unsd_data_frame, 'country_area_name')
unsd_country_area_csv = './output/unesco/unsd_country_area.csv'
write_series_to_csv(unsd_country_area, unsd_country_area_csv, '\t', False)
logging.info(msg[4].format(os.path.abspath(unsd_country_area_csv)))
# Write development status to a .csv file.
unsd_dev_status = extract_filtered_series(unsd_data_frame, 'country_area_development_status')
unsd_dev_status_csv = './output/unesco/unsd_dev_status.csv'
write_series_to_csv(unsd_dev_status, unsd_dev_status_csv, '\t', False)
logging.info(msg[5].format(os.path.abspath(unsd_dev_status_csv)))
# Read UNESCO heritage sites data (tabbed separator)
unesco_csv = './input/csv/unesco_heritage_sites.csv'
unesco_data_frame = read_csv(unesco_csv, '\t')
logging.info(msg[0].format(os.path.abspath(unesco_csv)))
# Write UNESCO heritage site countries and areas to a .csv file
unesco_country_area = extract_filtered_series(unesco_data_frame, 'country_area')
unesco_country_area_csv = './output/unesco/unesco_heritage_site_country_area.csv'
write_series_to_csv(unesco_country_area, unesco_country_area_csv, '\t', False)
logging.info(msg[6].format(os.path.abspath(unesco_country_area_csv)))
# Write UNESCO heritage site categories to a .csv file
unesco_site_category = extract_filtered_series(unesco_data_frame, 'category')
unesco_site_category_csv = './output/unesco/unesco_heritage_site_category.csv'
write_series_to_csv(unesco_site_category, unesco_site_category_csv, '\t', False)
logging.info(msg[7].format(os.path.abspath(unesco_site_category_csv)))
# Write UNESCO heritage site regions to a .csv file
unesco_region = extract_filtered_series(unesco_data_frame, 'region')
unesco_region_csv = './output/unesco/unesco_heritage_site_region.csv'
write_series_to_csv(unesco_region, unesco_region_csv, '\t', False)
logging.info(msg[8].format(os.path.abspath(unesco_region_csv)))
# Write UNESCO heritage site transboundary values to a .csv file
unesco_transboundary = extract_filtered_series(unesco_data_frame, 'transboundary')
unesco_transboundary_csv = './output/unesco/unesco_heritage_site_transboundary.csv'
write_series_to_csv(unesco_transboundary, unesco_transboundary_csv, '\t', False)
logging.info(msg[9].format(os.path.abspath(unesco_transboundary_csv)))
def extract_filtered_series(data_frame, column_name):
"""
Returns a filtered Panda Series one-dimensional ndarray from a targeted column.
Duplicate values and NaN or blank values are dropped from the result set which is
returned sorted (ascending).
:param data_frame: Pandas DataFrame
:param column_name: column name string
:return: Panda Series one-dimensional ndarray
"""
return data_frame[column_name].drop_duplicates().dropna().sort_values(by=column_name)
def read_csv(path, delimiter=','):
"""
Utilize Pandas to read in *.csv file.
:param path: file path
:param delimiter: field delimiter
:return: Pandas DataFrame
"""
return pd.read_csv(path, sep=delimiter, encoding='utf-8', engine='python')
def write_series_to_csv(series, path, delimiter=',', row_name=True):
"""
Write Pandas DataFrame to a *.csv file.
:param series: Pandas one dimensional ndarray
:param path: file path
:param delimiter: field delimiter
:param row_name: include row name boolean
"""
series.to_csv(path, sep=delimiter, index=row_name)
if __name__ == '__main__':
sys.exit(main()) | 43.962406 | 94 | 0.783308 | import logging
import os
import pandas as pd
import sys as sys
def main(argv=None):
if argv is None:
argv = sys.argv
msg = [
'Source file read {0}',
'UNSD M49 regions written to file {0}',
'UNSD M49 sub-regions written to file {0}',
'UNSD M49 intermediate regions written to file {0}',
'UNSD M49 countries and areas written to file {0}',
'UNSD M49 development status written to file {0}',
'UNESCO heritage site countries/areas written to file {0}',
'UNESCO heritage site categories written to file {0}',
'UNESCO heritage site regions written to file {0}',
'UNESCO heritage site transboundary values written to file {0}'
]
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)
unsd_csv = './input/csv/un_area_country_codes-m49.csv'
unsd_data_frame = read_csv(unsd_csv, '\t')
logging.info(msg[0].format(os.path.abspath(unsd_csv)))
unsd_region = extract_filtered_series(unsd_data_frame, 'region_name')
unsd_region_csv = './output/unesco/unsd_region.csv'
write_series_to_csv(unsd_region, unsd_region_csv, '\t', False)
logging.info(msg[1].format(os.path.abspath(unsd_region_csv)))
unsd_sub_region = extract_filtered_series(unsd_data_frame, 'sub_region_name')
unsd_sub_region_csv = './output/unesco/unsd_sub_region.csv'
write_series_to_csv(unsd_sub_region, unsd_sub_region_csv, '\t', False)
logging.info(msg[2].format(os.path.abspath(unsd_sub_region_csv)))
unsd_intermed_region = extract_filtered_series(unsd_data_frame, 'intermediate_region_name')
unsd_intermed_region_csv = './output/unesco/unsd_intermed_region.csv'
write_series_to_csv(unsd_intermed_region, unsd_intermed_region_csv, '\t', False)
logging.info(msg[3].format(os.path.abspath(unsd_intermed_region_csv)))
unsd_country_area = extract_filtered_series(unsd_data_frame, 'country_area_name')
unsd_country_area_csv = './output/unesco/unsd_country_area.csv'
write_series_to_csv(unsd_country_area, unsd_country_area_csv, '\t', False)
logging.info(msg[4].format(os.path.abspath(unsd_country_area_csv)))
unsd_dev_status = extract_filtered_series(unsd_data_frame, 'country_area_development_status')
unsd_dev_status_csv = './output/unesco/unsd_dev_status.csv'
write_series_to_csv(unsd_dev_status, unsd_dev_status_csv, '\t', False)
logging.info(msg[5].format(os.path.abspath(unsd_dev_status_csv)))
unesco_csv = './input/csv/unesco_heritage_sites.csv'
unesco_data_frame = read_csv(unesco_csv, '\t')
logging.info(msg[0].format(os.path.abspath(unesco_csv)))
unesco_country_area = extract_filtered_series(unesco_data_frame, 'country_area')
unesco_country_area_csv = './output/unesco/unesco_heritage_site_country_area.csv'
write_series_to_csv(unesco_country_area, unesco_country_area_csv, '\t', False)
logging.info(msg[6].format(os.path.abspath(unesco_country_area_csv)))
unesco_site_category = extract_filtered_series(unesco_data_frame, 'category')
unesco_site_category_csv = './output/unesco/unesco_heritage_site_category.csv'
write_series_to_csv(unesco_site_category, unesco_site_category_csv, '\t', False)
logging.info(msg[7].format(os.path.abspath(unesco_site_category_csv)))
unesco_region = extract_filtered_series(unesco_data_frame, 'region')
unesco_region_csv = './output/unesco/unesco_heritage_site_region.csv'
write_series_to_csv(unesco_region, unesco_region_csv, '\t', False)
logging.info(msg[8].format(os.path.abspath(unesco_region_csv)))
unesco_transboundary = extract_filtered_series(unesco_data_frame, 'transboundary')
unesco_transboundary_csv = './output/unesco/unesco_heritage_site_transboundary.csv'
write_series_to_csv(unesco_transboundary, unesco_transboundary_csv, '\t', False)
logging.info(msg[9].format(os.path.abspath(unesco_transboundary_csv)))
def extract_filtered_series(data_frame, column_name):
return data_frame[column_name].drop_duplicates().dropna().sort_values(by=column_name)
def read_csv(path, delimiter=','):
return pd.read_csv(path, sep=delimiter, encoding='utf-8', engine='python')
def write_series_to_csv(series, path, delimiter=',', row_name=True):
series.to_csv(path, sep=delimiter, index=row_name)
if __name__ == '__main__':
sys.exit(main()) | true | true |
1c46bf9669398d790db830f2381d8c2ac1675ffc | 4,642 | py | Python | tests/unit/workflows/java_gradle/test_gradle.py | verdimrc/aws-lambda-builders | 67f42dd936fd4f0c517c38acb8b6a170156549ec | [
"Apache-2.0"
] | 1 | 2020-07-21T20:16:12.000Z | 2020-07-21T20:16:12.000Z | tests/unit/workflows/java_gradle/test_gradle.py | verdimrc/aws-lambda-builders | 67f42dd936fd4f0c517c38acb8b6a170156549ec | [
"Apache-2.0"
] | 1 | 2020-06-26T12:36:39.000Z | 2020-06-26T12:36:39.000Z | tests/unit/workflows/java_gradle/test_gradle.py | verdimrc/aws-lambda-builders | 67f42dd936fd4f0c517c38acb8b6a170156549ec | [
"Apache-2.0"
] | 1 | 2020-04-02T19:12:39.000Z | 2020-04-02T19:12:39.000Z | import subprocess
from unittest import TestCase
from mock import patch
from aws_lambda_builders.binary_path import BinaryPath
from aws_lambda_builders.workflows.java_gradle.gradle import (
SubprocessGradle,
GradleExecutionError,
BuildFileNotFoundError,
)
class FakePopen:
def __init__(self, out=b"out", err=b"err", retcode=0):
self.out = out
self.err = err
self.returncode = retcode
def communicate(self):
return self.out, self.err
def wait(self):
pass
class TestSubprocessGradle(TestCase):
@patch("aws_lambda_builders.workflows.java_gradle.utils.OSUtils")
def setUp(self, MockOSUtils):
self.os_utils = MockOSUtils.return_value
self.os_utils.exists.side_effect = lambda d: True
self.popen = FakePopen()
self.os_utils.popen.side_effect = [self.popen]
self.gradle_path = "/path/to/gradle"
self.gradle_binary = BinaryPath(None, None, "gradle", binary_path=self.gradle_path)
self.source_dir = "/foo/bar/baz"
self.manifest_path = "/foo/bar/baz/build.gradle"
self.init_script = "/path/to/init"
def test_no_os_utils_build_init_throws(self):
with self.assertRaises(ValueError) as err_assert:
SubprocessGradle(gradle_binary=self.gradle_binary)
self.assertEquals(err_assert.exception.args[0], "Must provide OSUtils")
def test_no_gradle_exec_init_throws(self):
with self.assertRaises(ValueError) as err_assert:
SubprocessGradle(None)
self.assertEquals(err_assert.exception.args[0], "Must provide Gradle BinaryPath")
def test_no_build_file_throws(self):
self.os_utils.exists.side_effect = lambda d: False
gradle = SubprocessGradle(gradle_binary=self.gradle_binary, os_utils=self.os_utils)
with self.assertRaises(BuildFileNotFoundError) as raised:
gradle.build(self.source_dir, self.manifest_path)
self.assertEquals(
raised.exception.args[0], "Gradle Failed: Gradle build file not found: %s" % self.manifest_path
)
def test_build_no_init_script(self):
gradle = SubprocessGradle(gradle_binary=self.gradle_binary, os_utils=self.os_utils)
gradle.build(self.source_dir, self.manifest_path)
self.os_utils.popen.assert_called_with(
[self.gradle_path, "build", "--build-file", self.manifest_path],
cwd=self.source_dir,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
)
def test_gradlew_path_is_dummy_uses_gradle_binary(self):
gradle = SubprocessGradle(gradle_binary=self.gradle_binary, os_utils=self.os_utils)
gradle.build(self.source_dir, self.manifest_path)
self.os_utils.popen.assert_called_with(
[self.gradle_path, "build", "--build-file", self.manifest_path],
cwd=self.source_dir,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
)
def test_build_with_init_script(self):
gradle = SubprocessGradle(gradle_binary=self.gradle_binary, os_utils=self.os_utils)
gradle.build(self.source_dir, self.manifest_path, init_script_path=self.init_script)
self.os_utils.popen.assert_called_with(
[self.gradle_path, "build", "--build-file", self.manifest_path, "--init-script", self.init_script],
cwd=self.source_dir,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
)
def test_raises_exception_if_retcode_not_0(self):
self.popen = FakePopen(retcode=1, err=b"Some Error Message")
self.os_utils.popen.side_effect = [self.popen]
gradle = SubprocessGradle(gradle_binary=self.gradle_binary, os_utils=self.os_utils)
with self.assertRaises(GradleExecutionError) as err:
gradle.build(self.source_dir, self.manifest_path)
self.assertEquals(err.exception.args[0], "Gradle Failed: Some Error Message")
def test_includes_build_properties_in_command(self):
gradle = SubprocessGradle(gradle_binary=self.gradle_binary, os_utils=self.os_utils)
gradle.build(self.source_dir, self.manifest_path, init_script_path=self.init_script, properties={"foo": "bar"})
self.os_utils.popen.assert_called_with(
[
self.gradle_path,
"build",
"--build-file",
self.manifest_path,
"-Dfoo=bar",
"--init-script",
self.init_script,
],
cwd=self.source_dir,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
)
| 40.719298 | 119 | 0.673632 | import subprocess
from unittest import TestCase
from mock import patch
from aws_lambda_builders.binary_path import BinaryPath
from aws_lambda_builders.workflows.java_gradle.gradle import (
SubprocessGradle,
GradleExecutionError,
BuildFileNotFoundError,
)
class FakePopen:
def __init__(self, out=b"out", err=b"err", retcode=0):
self.out = out
self.err = err
self.returncode = retcode
def communicate(self):
return self.out, self.err
def wait(self):
pass
class TestSubprocessGradle(TestCase):
@patch("aws_lambda_builders.workflows.java_gradle.utils.OSUtils")
def setUp(self, MockOSUtils):
self.os_utils = MockOSUtils.return_value
self.os_utils.exists.side_effect = lambda d: True
self.popen = FakePopen()
self.os_utils.popen.side_effect = [self.popen]
self.gradle_path = "/path/to/gradle"
self.gradle_binary = BinaryPath(None, None, "gradle", binary_path=self.gradle_path)
self.source_dir = "/foo/bar/baz"
self.manifest_path = "/foo/bar/baz/build.gradle"
self.init_script = "/path/to/init"
def test_no_os_utils_build_init_throws(self):
with self.assertRaises(ValueError) as err_assert:
SubprocessGradle(gradle_binary=self.gradle_binary)
self.assertEquals(err_assert.exception.args[0], "Must provide OSUtils")
def test_no_gradle_exec_init_throws(self):
with self.assertRaises(ValueError) as err_assert:
SubprocessGradle(None)
self.assertEquals(err_assert.exception.args[0], "Must provide Gradle BinaryPath")
def test_no_build_file_throws(self):
self.os_utils.exists.side_effect = lambda d: False
gradle = SubprocessGradle(gradle_binary=self.gradle_binary, os_utils=self.os_utils)
with self.assertRaises(BuildFileNotFoundError) as raised:
gradle.build(self.source_dir, self.manifest_path)
self.assertEquals(
raised.exception.args[0], "Gradle Failed: Gradle build file not found: %s" % self.manifest_path
)
def test_build_no_init_script(self):
gradle = SubprocessGradle(gradle_binary=self.gradle_binary, os_utils=self.os_utils)
gradle.build(self.source_dir, self.manifest_path)
self.os_utils.popen.assert_called_with(
[self.gradle_path, "build", "--build-file", self.manifest_path],
cwd=self.source_dir,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
)
def test_gradlew_path_is_dummy_uses_gradle_binary(self):
gradle = SubprocessGradle(gradle_binary=self.gradle_binary, os_utils=self.os_utils)
gradle.build(self.source_dir, self.manifest_path)
self.os_utils.popen.assert_called_with(
[self.gradle_path, "build", "--build-file", self.manifest_path],
cwd=self.source_dir,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
)
def test_build_with_init_script(self):
gradle = SubprocessGradle(gradle_binary=self.gradle_binary, os_utils=self.os_utils)
gradle.build(self.source_dir, self.manifest_path, init_script_path=self.init_script)
self.os_utils.popen.assert_called_with(
[self.gradle_path, "build", "--build-file", self.manifest_path, "--init-script", self.init_script],
cwd=self.source_dir,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
)
def test_raises_exception_if_retcode_not_0(self):
self.popen = FakePopen(retcode=1, err=b"Some Error Message")
self.os_utils.popen.side_effect = [self.popen]
gradle = SubprocessGradle(gradle_binary=self.gradle_binary, os_utils=self.os_utils)
with self.assertRaises(GradleExecutionError) as err:
gradle.build(self.source_dir, self.manifest_path)
self.assertEquals(err.exception.args[0], "Gradle Failed: Some Error Message")
def test_includes_build_properties_in_command(self):
gradle = SubprocessGradle(gradle_binary=self.gradle_binary, os_utils=self.os_utils)
gradle.build(self.source_dir, self.manifest_path, init_script_path=self.init_script, properties={"foo": "bar"})
self.os_utils.popen.assert_called_with(
[
self.gradle_path,
"build",
"--build-file",
self.manifest_path,
"-Dfoo=bar",
"--init-script",
self.init_script,
],
cwd=self.source_dir,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
)
| true | true |
1c46c13896c2f68690261b134a22b45479e29be0 | 4,599 | py | Python | test/connector/exchange/crypto_com/test_crypto_com_order_book_tracker.py | BGTCapital/hummingbot | 2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242 | [
"Apache-2.0"
] | 3,027 | 2019-04-04T18:52:17.000Z | 2022-03-30T09:38:34.000Z | test/connector/exchange/crypto_com/test_crypto_com_order_book_tracker.py | BGTCapital/hummingbot | 2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242 | [
"Apache-2.0"
] | 4,080 | 2019-04-04T19:51:11.000Z | 2022-03-31T23:45:21.000Z | test/connector/exchange/crypto_com/test_crypto_com_order_book_tracker.py | BGTCapital/hummingbot | 2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242 | [
"Apache-2.0"
] | 1,342 | 2019-04-04T20:50:53.000Z | 2022-03-31T15:22:36.000Z | #!/usr/bin/env python
from os.path import join, realpath
import sys; sys.path.insert(0, realpath(join(__file__, "../../../../../")))
import math
import time
import asyncio
import logging
import unittest
from typing import Dict, Optional, List
from hummingbot.core.event.event_logger import EventLogger
from hummingbot.core.event.events import OrderBookEvent, OrderBookTradeEvent, TradeType
from hummingbot.connector.exchange.crypto_com.crypto_com_order_book_tracker import CryptoComOrderBookTracker
from hummingbot.connector.exchange.crypto_com.crypto_com_api_order_book_data_source import CryptoComAPIOrderBookDataSource
from hummingbot.core.data_type.order_book import OrderBook
class CryptoComOrderBookTrackerUnitTest(unittest.TestCase):
order_book_tracker: Optional[CryptoComOrderBookTracker] = None
events: List[OrderBookEvent] = [
OrderBookEvent.TradeEvent
]
trading_pairs: List[str] = [
"BTC-USDT",
"ETH-USDT",
]
@classmethod
def setUpClass(cls):
cls.ev_loop: asyncio.BaseEventLoop = asyncio.get_event_loop()
cls.order_book_tracker: CryptoComOrderBookTracker = CryptoComOrderBookTracker(cls.trading_pairs)
cls.order_book_tracker.start()
cls.ev_loop.run_until_complete(cls.wait_til_tracker_ready())
@classmethod
async def wait_til_tracker_ready(cls):
while True:
if len(cls.order_book_tracker.order_books) > 0:
print("Initialized real-time order books.")
return
await asyncio.sleep(1)
async def run_parallel_async(self, *tasks, timeout=None):
future: asyncio.Future = asyncio.ensure_future(asyncio.gather(*tasks))
timer = 0
while not future.done():
if timeout and timer > timeout:
raise Exception("Timeout running parallel async tasks in tests")
timer += 1
now = time.time()
_next_iteration = now // 1.0 + 1 # noqa: F841
await asyncio.sleep(1.0)
return future.result()
def run_parallel(self, *tasks):
return self.ev_loop.run_until_complete(self.run_parallel_async(*tasks))
def setUp(self):
self.event_logger = EventLogger()
for event_tag in self.events:
for trading_pair, order_book in self.order_book_tracker.order_books.items():
order_book.add_listener(event_tag, self.event_logger)
def test_order_book_trade_event_emission(self):
"""
Tests if the order book tracker is able to retrieve order book trade message from exchange and emit order book
trade events after correctly parsing the trade messages
"""
self.run_parallel(self.event_logger.wait_for(OrderBookTradeEvent))
for ob_trade_event in self.event_logger.event_log:
self.assertTrue(type(ob_trade_event) == OrderBookTradeEvent)
self.assertTrue(ob_trade_event.trading_pair in self.trading_pairs)
self.assertTrue(type(ob_trade_event.timestamp) in [float, int])
self.assertTrue(type(ob_trade_event.amount) == float)
self.assertTrue(type(ob_trade_event.price) == float)
self.assertTrue(type(ob_trade_event.type) == TradeType)
# datetime is in seconds
self.assertTrue(math.ceil(math.log10(ob_trade_event.timestamp)) == 10)
self.assertTrue(ob_trade_event.amount > 0)
self.assertTrue(ob_trade_event.price > 0)
def test_tracker_integrity(self):
# Wait 5 seconds to process some diffs.
self.ev_loop.run_until_complete(asyncio.sleep(10.0))
order_books: Dict[str, OrderBook] = self.order_book_tracker.order_books
eth_usdt: OrderBook = order_books["ETH-USDT"]
self.assertIsNot(eth_usdt.last_diff_uid, 0)
self.assertGreaterEqual(eth_usdt.get_price_for_volume(True, 10).result_price,
eth_usdt.get_price(True))
self.assertLessEqual(eth_usdt.get_price_for_volume(False, 10).result_price,
eth_usdt.get_price(False))
def test_api_get_last_traded_prices(self):
prices = self.ev_loop.run_until_complete(
CryptoComAPIOrderBookDataSource.get_last_traded_prices(["BTC-USDT", "LTC-BTC"]))
for key, value in prices.items():
print(f"{key} last_trade_price: {value}")
self.assertGreater(prices["BTC-USDT"], 1000)
self.assertLess(prices["LTC-BTC"], 1)
def main():
logging.basicConfig(level=logging.INFO)
unittest.main()
if __name__ == "__main__":
main()
| 42.583333 | 122 | 0.688845 |
from os.path import join, realpath
import sys; sys.path.insert(0, realpath(join(__file__, "../../../../../")))
import math
import time
import asyncio
import logging
import unittest
from typing import Dict, Optional, List
from hummingbot.core.event.event_logger import EventLogger
from hummingbot.core.event.events import OrderBookEvent, OrderBookTradeEvent, TradeType
from hummingbot.connector.exchange.crypto_com.crypto_com_order_book_tracker import CryptoComOrderBookTracker
from hummingbot.connector.exchange.crypto_com.crypto_com_api_order_book_data_source import CryptoComAPIOrderBookDataSource
from hummingbot.core.data_type.order_book import OrderBook
class CryptoComOrderBookTrackerUnitTest(unittest.TestCase):
order_book_tracker: Optional[CryptoComOrderBookTracker] = None
events: List[OrderBookEvent] = [
OrderBookEvent.TradeEvent
]
trading_pairs: List[str] = [
"BTC-USDT",
"ETH-USDT",
]
@classmethod
def setUpClass(cls):
cls.ev_loop: asyncio.BaseEventLoop = asyncio.get_event_loop()
cls.order_book_tracker: CryptoComOrderBookTracker = CryptoComOrderBookTracker(cls.trading_pairs)
cls.order_book_tracker.start()
cls.ev_loop.run_until_complete(cls.wait_til_tracker_ready())
@classmethod
async def wait_til_tracker_ready(cls):
while True:
if len(cls.order_book_tracker.order_books) > 0:
print("Initialized real-time order books.")
return
await asyncio.sleep(1)
async def run_parallel_async(self, *tasks, timeout=None):
future: asyncio.Future = asyncio.ensure_future(asyncio.gather(*tasks))
timer = 0
while not future.done():
if timeout and timer > timeout:
raise Exception("Timeout running parallel async tasks in tests")
timer += 1
now = time.time()
_next_iteration = now // 1.0 + 1
await asyncio.sleep(1.0)
return future.result()
def run_parallel(self, *tasks):
return self.ev_loop.run_until_complete(self.run_parallel_async(*tasks))
def setUp(self):
self.event_logger = EventLogger()
for event_tag in self.events:
for trading_pair, order_book in self.order_book_tracker.order_books.items():
order_book.add_listener(event_tag, self.event_logger)
def test_order_book_trade_event_emission(self):
self.run_parallel(self.event_logger.wait_for(OrderBookTradeEvent))
for ob_trade_event in self.event_logger.event_log:
self.assertTrue(type(ob_trade_event) == OrderBookTradeEvent)
self.assertTrue(ob_trade_event.trading_pair in self.trading_pairs)
self.assertTrue(type(ob_trade_event.timestamp) in [float, int])
self.assertTrue(type(ob_trade_event.amount) == float)
self.assertTrue(type(ob_trade_event.price) == float)
self.assertTrue(type(ob_trade_event.type) == TradeType)
self.assertTrue(math.ceil(math.log10(ob_trade_event.timestamp)) == 10)
self.assertTrue(ob_trade_event.amount > 0)
self.assertTrue(ob_trade_event.price > 0)
def test_tracker_integrity(self):
self.ev_loop.run_until_complete(asyncio.sleep(10.0))
order_books: Dict[str, OrderBook] = self.order_book_tracker.order_books
eth_usdt: OrderBook = order_books["ETH-USDT"]
self.assertIsNot(eth_usdt.last_diff_uid, 0)
self.assertGreaterEqual(eth_usdt.get_price_for_volume(True, 10).result_price,
eth_usdt.get_price(True))
self.assertLessEqual(eth_usdt.get_price_for_volume(False, 10).result_price,
eth_usdt.get_price(False))
def test_api_get_last_traded_prices(self):
prices = self.ev_loop.run_until_complete(
CryptoComAPIOrderBookDataSource.get_last_traded_prices(["BTC-USDT", "LTC-BTC"]))
for key, value in prices.items():
print(f"{key} last_trade_price: {value}")
self.assertGreater(prices["BTC-USDT"], 1000)
self.assertLess(prices["LTC-BTC"], 1)
def main():
logging.basicConfig(level=logging.INFO)
unittest.main()
if __name__ == "__main__":
main()
| true | true |
1c46c1a3d3a1d1d7895e4b0c6561df3c3c4494fb | 4,771 | py | Python | library/wait_for_pid.py | dusennn/clickhouse-ansible | e1fb665c2afc095c9a46087bf948b633e7bcd6f6 | [
"Apache-2.0"
] | 2 | 2021-09-27T10:16:17.000Z | 2021-09-27T10:18:20.000Z | library/wait_for_pid.py | dusennn/clickhouse-ansible | e1fb665c2afc095c9a46087bf948b633e7bcd6f6 | [
"Apache-2.0"
] | null | null | null | library/wait_for_pid.py | dusennn/clickhouse-ansible | e1fb665c2afc095c9a46087bf948b633e7bcd6f6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import binascii
import datetime
import math
import re
import select
import socket
import sys
import time
import os
from ansible.module_utils._text import to_native
def main():
module = AnsibleModule(
argument_spec = dict(
pid=dict(default=None, type='int'),
pid_file=dict(default=None, type='path'),
timeout=dict(default=300, type='int'),
delay=dict(default=0, type='int'),
thread_name_regex=dict(default=None, type='str'),
thread_num=dict(default=1, type='int'),
state=dict(default='present', choices=['present', 'absent']),
sleep=dict(default=1, type='int')
),
)
params = module.params
pid = params['pid']
pid_file = params['pid_file']
timeout = params['timeout']
delay = params['delay']
thread_name_regex = params['thread_name_regex']
thread_num = params['thread_num']
state = params['state']
sleep = params['sleep']
if thread_name_regex is not None:
compiled_search_re = re.compile(thread_name_regex, re.MULTILINE)
else:
compiled_search_re = None
if pid and pid_file:
module.fail_json(msg="pid and pid_file parameter can not both be passed to wait_for_pid")
start = datetime.datetime.now()
if delay:
time.sleep(delay)
if not pid and not pid_file:
time.sleep(timeout)
elif state == 'absent':
### first wait for the stop condition
end = start + datetime.timedelta(seconds=timeout)
while datetime.datetime.now() < end:
try:
if pid_file:
f = open(pid_file)
pid = f.read().strip()
f.close()
f = open("/proc/%s/comm' %s pid")
f.close()
except IOError:
break
except:
break
# Conditions not yet met, wait and try again
time.sleep(params['sleep'])
else:
elapsed = datetime.datetime.now() - start
if pid_file:
module.fail_json(msg="Timeout when waiting for PID:%s to stop." % (pid_file), elapsed=elapsed.seconds)
elif pid:
module.fail_json(msg="Timeout when waiting for PID:%s to be absent." % (pid), elapsed=elapsed.seconds)
elif state == 'present':
### wait for start condition
end = start + datetime.timedelta(seconds=timeout)
while datetime.datetime.now() < end:
try:
if pid_file:
f = open(pid_file)
pid = f.read().strip()
f.close()
f = open('/proc/%s/comm' % pid)
f.close()
except (OSError, IOError):
e = get_exception()
# If anything except file not present, throw an error
if e.errno != 2:
elapsed = datetime.datetime.now() - start
module.fail_json(msg="Failed to stat %s, %s" % (path, e.strerror), elapsed=elapsed.seconds)
# file doesn't exist yet, so continue
else:
# process exists. Are there additional things to check?
if not compiled_search_re:
# nope, succeed!
break
try:
matches = 0
for thread in os.listdir('/proc/%s/task' % pid):
f = open('/proc/%s/task/%s/comm' % (pid, thread))
try:
if re.search(compiled_search_re, f.read()):
matches += 1
finally:
f.close()
if matches >= thread_num:
# found, success!
break
except (OSError, IOError):
pass
# Conditions not yet met, wait and try again
time.sleep(params['sleep'])
else: # while-else
# Timeout expired
elapsed = datetime.datetime.now() - start
if pid_file:
module.fail_json(msg="Timeout when waiting for PID:%s to stop." % (pid_file), elapsed=elapsed.seconds)
elif pid:
module.fail_json(msg="Timeout when waiting for PID:%s to be absent." % (pid), elapsed=elapsed.seconds)
elapsed = datetime.datetime.now() - start
module.exit_json(state=state, pid=pid, thread_name_regex=thread_name_regex, pid_file=pid_file, elapsed=elapsed.seconds)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| 35.080882 | 123 | 0.530916 |
import binascii
import datetime
import math
import re
import select
import socket
import sys
import time
import os
from ansible.module_utils._text import to_native
def main():
module = AnsibleModule(
argument_spec = dict(
pid=dict(default=None, type='int'),
pid_file=dict(default=None, type='path'),
timeout=dict(default=300, type='int'),
delay=dict(default=0, type='int'),
thread_name_regex=dict(default=None, type='str'),
thread_num=dict(default=1, type='int'),
state=dict(default='present', choices=['present', 'absent']),
sleep=dict(default=1, type='int')
),
)
params = module.params
pid = params['pid']
pid_file = params['pid_file']
timeout = params['timeout']
delay = params['delay']
thread_name_regex = params['thread_name_regex']
thread_num = params['thread_num']
state = params['state']
sleep = params['sleep']
if thread_name_regex is not None:
compiled_search_re = re.compile(thread_name_regex, re.MULTILINE)
else:
compiled_search_re = None
if pid and pid_file:
module.fail_json(msg="pid and pid_file parameter can not both be passed to wait_for_pid")
start = datetime.datetime.now()
if delay:
time.sleep(delay)
if not pid and not pid_file:
time.sleep(timeout)
elif state == 'absent':
le datetime.datetime.now() < end:
try:
if pid_file:
f = open(pid_file)
pid = f.read().strip()
f.close()
f = open("/proc/%s/comm' %s pid")
f.close()
except IOError:
break
except:
break
# Conditions not yet met, wait and try again
time.sleep(params['sleep'])
else:
elapsed = datetime.datetime.now() - start
if pid_file:
module.fail_json(msg="Timeout when waiting for PID:%s to stop." % (pid_file), elapsed=elapsed.seconds)
elif pid:
module.fail_json(msg="Timeout when waiting for PID:%s to be absent." % (pid), elapsed=elapsed.seconds)
elif state == 'present':
### wait for start condition
end = start + datetime.timedelta(seconds=timeout)
while datetime.datetime.now() < end:
try:
if pid_file:
f = open(pid_file)
pid = f.read().strip()
f.close()
f = open('/proc/%s/comm' % pid)
f.close()
except (OSError, IOError):
e = get_exception()
# If anything except file not present, throw an error
if e.errno != 2:
elapsed = datetime.datetime.now() - start
module.fail_json(msg="Failed to stat %s, %s" % (path, e.strerror), elapsed=elapsed.seconds)
# file doesn't exist yet, so continue
else:
if not compiled_search_re:
break
try:
matches = 0
for thread in os.listdir('/proc/%s/task' % pid):
f = open('/proc/%s/task/%s/comm' % (pid, thread))
try:
if re.search(compiled_search_re, f.read()):
matches += 1
finally:
f.close()
if matches >= thread_num:
break
except (OSError, IOError):
pass
time.sleep(params['sleep'])
else:
elapsed = datetime.datetime.now() - start
if pid_file:
module.fail_json(msg="Timeout when waiting for PID:%s to stop." % (pid_file), elapsed=elapsed.seconds)
elif pid:
module.fail_json(msg="Timeout when waiting for PID:%s to be absent." % (pid), elapsed=elapsed.seconds)
elapsed = datetime.datetime.now() - start
module.exit_json(state=state, pid=pid, thread_name_regex=thread_name_regex, pid_file=pid_file, elapsed=elapsed.seconds)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| true | true |
1c46c1ef0648d2c7242778c928e30f3a9eb9d157 | 2,205 | py | Python | test/TEMPFILEPREFIX.py | EmanueleCannizzaro/scons | 6baa4e65cdf4df6951473545b69435711864e509 | [
"MIT"
] | 1 | 2019-09-18T06:37:02.000Z | 2019-09-18T06:37:02.000Z | test/TEMPFILEPREFIX.py | EmanueleCannizzaro/scons | 6baa4e65cdf4df6951473545b69435711864e509 | [
"MIT"
] | null | null | null | test/TEMPFILEPREFIX.py | EmanueleCannizzaro/scons | 6baa4e65cdf4df6951473545b69435711864e509 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/TEMPFILEPREFIX.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Verify that setting the $TEMPFILEPREFIX variable will append to the
beginning of the TEMPFILE invocation of a long command line.
"""
import os
import stat
import TestSCons
test = TestSCons.TestSCons(match = TestSCons.match_re)
test.write('echo.py', """\
#!/usr/bin/env python
import sys
print sys.argv
""")
echo_py = test.workpath('echo.py')
st = os.stat(echo_py)
os.chmod(echo_py, st[stat.ST_MODE]|0111)
test.write('SConstruct', """
import os
env = Environment(
BUILDCOM = '${TEMPFILE("xxx.py $TARGET $SOURCES")}',
MAXLINELENGTH = 16,
TEMPFILEPREFIX = '-via',
)
env.AppendENVPath('PATH', os.curdir)
env.Command('foo.out', 'foo.in', '$BUILDCOM')
""")
test.write('foo.in', "foo.in\n")
test.run(arguments = '-n -Q .',
stdout = """\
Using tempfile \\S+ for command line:
xxx.py foo.out foo.in
xxx.py -via\\S+
""")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 28.636364 | 96 | 0.730612 |
__revision__ = "test/TEMPFILEPREFIX.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Verify that setting the $TEMPFILEPREFIX variable will append to the
beginning of the TEMPFILE invocation of a long command line.
"""
import os
import stat
import TestSCons
test = TestSCons.TestSCons(match = TestSCons.match_re)
test.write('echo.py', """\
#!/usr/bin/env python
import sys
print sys.argv
""")
echo_py = test.workpath('echo.py')
st = os.stat(echo_py)
os.chmod(echo_py, st[stat.ST_MODE]|0111)
test.write('SConstruct', """
import os
env = Environment(
BUILDCOM = '${TEMPFILE("xxx.py $TARGET $SOURCES")}',
MAXLINELENGTH = 16,
TEMPFILEPREFIX = '-via',
)
env.AppendENVPath('PATH', os.curdir)
env.Command('foo.out', 'foo.in', '$BUILDCOM')
""")
test.write('foo.in', "foo.in\n")
test.run(arguments = '-n -Q .',
stdout = """\
Using tempfile \\S+ for command line:
xxx.py foo.out foo.in
xxx.py -via\\S+
""")
test.pass_test()
| false | true |
1c46c33547965d1902ac5b6fd51ac5393e78bf60 | 3,694 | py | Python | nengo/utils/tests/test_ensemble.py | HugoChateauLaurent/nengo | 749893186ee09aa6c621a40da3ffd3878114db9c | [
"BSD-2-Clause"
] | null | null | null | nengo/utils/tests/test_ensemble.py | HugoChateauLaurent/nengo | 749893186ee09aa6c621a40da3ffd3878114db9c | [
"BSD-2-Clause"
] | null | null | null | nengo/utils/tests/test_ensemble.py | HugoChateauLaurent/nengo | 749893186ee09aa6c621a40da3ffd3878114db9c | [
"BSD-2-Clause"
] | null | null | null | from __future__ import absolute_import
import numpy as np
import mpl_toolkits.mplot3d
import pytest
import nengo
from nengo.dists import Uniform
from nengo.utils.ensemble import response_curves, tuning_curves
def plot_tuning_curves(plt, eval_points, activities):
if eval_points.ndim <= 2:
plt.plot(eval_points, activities)
elif eval_points.ndim == 3:
assert mpl_toolkits.mplot3d
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(eval_points.T[0], eval_points.T[1], activities.T[0])
else:
raise NotImplementedError()
def test_tuning_curves_1d(Simulator, plt, seed):
"""For 1D ensembles, should be able to do plt.plot(*tuning_curves(...))."""
model = nengo.Network(seed=seed)
with model:
ens_1d = nengo.Ensemble(10, dimensions=1, neuron_type=nengo.LIF())
with Simulator(model) as sim:
plt.plot(*tuning_curves(ens_1d, sim))
@pytest.mark.parametrize('dimensions', [1, 2])
def test_tuning_curves(Simulator, nl_nodirect, plt, seed, dimensions):
radius = 10
max_rate = 400
model = nengo.Network(seed=seed)
with model:
ens = nengo.Ensemble(
10, dimensions=dimensions, neuron_type=nl_nodirect(),
max_rates=Uniform(200, max_rate), radius=radius)
with Simulator(model) as sim:
eval_points, activities = tuning_curves(ens, sim)
plot_tuning_curves(plt, eval_points, activities)
# Check that eval_points cover up to the radius.
assert np.abs(radius - np.max(np.abs(eval_points))) <= (
2 * radius / dimensions)
assert np.all(activities >= 0)
d = np.sqrt(np.sum(np.asarray(eval_points) ** 2, axis=-1))
assert np.all(activities[d <= radius] <= max_rate)
@pytest.mark.parametrize('dimensions', [1, 2])
def test_tuning_curves_direct_mode(Simulator, plt, seed, dimensions):
model = nengo.Network(seed=seed)
with model:
ens = nengo.Ensemble(10, dimensions, neuron_type=nengo.Direct())
with Simulator(model) as sim:
eval_points, activities = tuning_curves(ens, sim)
plot_tuning_curves(plt, eval_points, activities)
# eval_points is passed through in direct mode neurons
assert np.allclose(eval_points, activities)
def test_response_curves(Simulator, nl_nodirect, plt, seed):
max_rate = 400
model = nengo.Network(seed=seed)
with model:
ens = nengo.Ensemble(
10, dimensions=10, neuron_type=nl_nodirect(), radius=1.5,
max_rates=Uniform(200, max_rate))
with Simulator(model) as sim:
eval_points, activities = response_curves(ens, sim)
plot_tuning_curves(plt, eval_points, activities)
assert eval_points.ndim == 1 and eval_points.size > 0
assert np.all(eval_points >= -1.0) and np.all(eval_points <= 1.0)
assert np.all(activities >= 0.0)
assert np.all(activities <= max_rate)
# Activities along preferred direction must increase monotonically.
assert np.all(np.diff(activities, axis=0) >= 0.0)
@pytest.mark.parametrize('dimensions', [1, 2])
def test_response_curves_direct_mode(Simulator, plt, seed, dimensions):
model = nengo.Network(seed=seed)
with model:
ens = nengo.Ensemble(
10, dimensions=dimensions, neuron_type=nengo.Direct(), radius=1.5)
with Simulator(model) as sim:
eval_points, activities = response_curves(ens, sim)
plot_tuning_curves(plt, eval_points, activities)
assert eval_points.ndim == 1 and eval_points.size > 0
assert np.all(eval_points >= -1.0) and np.all(eval_points <= 1.0)
# eval_points is passed through in direct mode neurons
assert np.allclose(eval_points, activities)
| 33.581818 | 79 | 0.692204 | from __future__ import absolute_import
import numpy as np
import mpl_toolkits.mplot3d
import pytest
import nengo
from nengo.dists import Uniform
from nengo.utils.ensemble import response_curves, tuning_curves
def plot_tuning_curves(plt, eval_points, activities):
if eval_points.ndim <= 2:
plt.plot(eval_points, activities)
elif eval_points.ndim == 3:
assert mpl_toolkits.mplot3d
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(eval_points.T[0], eval_points.T[1], activities.T[0])
else:
raise NotImplementedError()
def test_tuning_curves_1d(Simulator, plt, seed):
model = nengo.Network(seed=seed)
with model:
ens_1d = nengo.Ensemble(10, dimensions=1, neuron_type=nengo.LIF())
with Simulator(model) as sim:
plt.plot(*tuning_curves(ens_1d, sim))
@pytest.mark.parametrize('dimensions', [1, 2])
def test_tuning_curves(Simulator, nl_nodirect, plt, seed, dimensions):
radius = 10
max_rate = 400
model = nengo.Network(seed=seed)
with model:
ens = nengo.Ensemble(
10, dimensions=dimensions, neuron_type=nl_nodirect(),
max_rates=Uniform(200, max_rate), radius=radius)
with Simulator(model) as sim:
eval_points, activities = tuning_curves(ens, sim)
plot_tuning_curves(plt, eval_points, activities)
assert np.abs(radius - np.max(np.abs(eval_points))) <= (
2 * radius / dimensions)
assert np.all(activities >= 0)
d = np.sqrt(np.sum(np.asarray(eval_points) ** 2, axis=-1))
assert np.all(activities[d <= radius] <= max_rate)
@pytest.mark.parametrize('dimensions', [1, 2])
def test_tuning_curves_direct_mode(Simulator, plt, seed, dimensions):
model = nengo.Network(seed=seed)
with model:
ens = nengo.Ensemble(10, dimensions, neuron_type=nengo.Direct())
with Simulator(model) as sim:
eval_points, activities = tuning_curves(ens, sim)
plot_tuning_curves(plt, eval_points, activities)
assert np.allclose(eval_points, activities)
def test_response_curves(Simulator, nl_nodirect, plt, seed):
max_rate = 400
model = nengo.Network(seed=seed)
with model:
ens = nengo.Ensemble(
10, dimensions=10, neuron_type=nl_nodirect(), radius=1.5,
max_rates=Uniform(200, max_rate))
with Simulator(model) as sim:
eval_points, activities = response_curves(ens, sim)
plot_tuning_curves(plt, eval_points, activities)
assert eval_points.ndim == 1 and eval_points.size > 0
assert np.all(eval_points >= -1.0) and np.all(eval_points <= 1.0)
assert np.all(activities >= 0.0)
assert np.all(activities <= max_rate)
assert np.all(np.diff(activities, axis=0) >= 0.0)
@pytest.mark.parametrize('dimensions', [1, 2])
def test_response_curves_direct_mode(Simulator, plt, seed, dimensions):
model = nengo.Network(seed=seed)
with model:
ens = nengo.Ensemble(
10, dimensions=dimensions, neuron_type=nengo.Direct(), radius=1.5)
with Simulator(model) as sim:
eval_points, activities = response_curves(ens, sim)
plot_tuning_curves(plt, eval_points, activities)
assert eval_points.ndim == 1 and eval_points.size > 0
assert np.all(eval_points >= -1.0) and np.all(eval_points <= 1.0)
assert np.allclose(eval_points, activities)
| true | true |
1c46c3bd574a713b7791ae587b09e515b813b794 | 584 | py | Python | tock/employees/migrations/0024_auto_20171229_1156.py | mikiec84/tock | 15318a45b2b144360e4d7e15db655467a45c2ab9 | [
"CC0-1.0"
] | 134 | 2015-02-02T18:42:03.000Z | 2022-01-20T04:27:06.000Z | tock/employees/migrations/0024_auto_20171229_1156.py | mikiec84/tock | 15318a45b2b144360e4d7e15db655467a45c2ab9 | [
"CC0-1.0"
] | 1,220 | 2015-03-19T01:57:30.000Z | 2022-03-23T21:52:15.000Z | tock/employees/migrations/0024_auto_20171229_1156.py | mikiec84/tock | 15318a45b2b144360e4d7e15db655467a45c2ab9 | [
"CC0-1.0"
] | 49 | 2015-03-09T15:44:33.000Z | 2022-01-19T02:02:37.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-12-29 16:56
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('employees', '0023_userdata_organization'),
]
operations = [
migrations.AlterField(
model_name='userdata',
name='organization',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='organizations.Organization'),
),
]
| 26.545455 | 137 | 0.667808 |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('employees', '0023_userdata_organization'),
]
operations = [
migrations.AlterField(
model_name='userdata',
name='organization',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='organizations.Organization'),
),
]
| true | true |
1c46c3de0b7231d58a348ef921880f2a3b454ce7 | 64,803 | py | Python | Base Converter/main.py | mrif449/simple-python-projects | 1d57b861f2d54568ebab955722f782a351a57f21 | [
"MIT"
] | null | null | null | Base Converter/main.py | mrif449/simple-python-projects | 1d57b861f2d54568ebab955722f782a351a57f21 | [
"MIT"
] | null | null | null | Base Converter/main.py | mrif449/simple-python-projects | 1d57b861f2d54568ebab955722f782a351a57f21 | [
"MIT"
] | null | null | null | print("Welcome to Base Converter Calculator!!!")
print("You can select your calculation mode by entering the serial number, or write 'close' stop calculating.")
print()
print("Note: You can also close the whole program by pressing Enter after closing calculation menu or manually.")
#Options:
print("Basic Bases:")
print("Decimal = 10")
print("Binary = 2")
print("Octal = 8")
print("Hexa-Decimal = 16")
print("...............................")
print("Let's Start...")
print("Press Enter to Start...")
inp = input("or Anything to Stop...")
while True:
if inp == "":
#Selecting Calculation Mode:
#command = (input("Select your calculation mode (1-14): "))
i_base = int(input("Enter the input Base: "))
o_base = int(input("Enter the output Base: "))
#Decimal to Binary
if i_base == 10 and o_base == 2:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
string += str(temp%2)
temp = temp // 2
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
#Decimal to Octal
elif i_base == 10 and o_base == 8:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
string += str(temp%8)
temp = temp // 8
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
#Decimal to Hexa-Decimal
elif i_base == 10 and o_base == 16:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%16
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
else:
string += str(temp%16)
temp = temp // 16
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
#Binary to Decimal
elif i_base == 2 and o_base == 10:
number = int(input("Enter the Binary number: "))
string = str(number)
count = 0
sum = 0
for x in string:
count += 1
string_list = []
for b in string:
string_list.append(int(b))
temp_list = []
for y in range(0,count):
temp_list.append(int(y))
temp_list.reverse()
for x in range(0,len(string_list)):
sum += (string_list[x]*(2**temp_list[x]))
print("=============================")
print("Your result is",sum)
print("=============================")
#Binary to Octal
elif i_base == 2 and o_base == 8:
number = int(input("Enter the Binary number: "))
string = str(number)
count = 0
sum = 0
for x in string:
count += 1
string_list = []
for b in string:
string_list.append(int(b))
temp_list = []
for y in range(0,count):
temp_list.append(int(y))
temp_list.reverse()
for x in range(0,len(string_list)):
sum += (string_list[x]*(2**temp_list[x]))
number2 = sum
temp = number2
string = ""
temp_list = []
while temp > 0:
string += str(temp%8)
temp = temp // 8
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
#Binary to Hexa-Decimal
elif i_base == 2 and o_base == 16:
number = int(input("Enter the Binary number: "))
string = str(number)
count = 0
sum = 0
for x in string:
count += 1
string_list = []
for b in string:
string_list.append(int(b))
temp_list = []
for y in range(0,count):
temp_list.append(int(y))
temp_list.reverse()
for x in range(0,len(string_list)):
sum += (string_list[x]*(2**temp_list[x]))
number2 = sum
temp = number2
string = ""
temp_list = []
while temp > 0:
temp2 = temp%16
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
else:
string += str(temp%16)
temp = temp // 16
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
#Octal to Decimal
elif i_base == 8 and o_base == 10:
number = int(input("Enter the Octal number: "))
string = str(number)
count = 0
sum = 0
for x in string:
count += 1
string_list = []
for b in string:
string_list.append(int(b))
temp_list = []
for y in range(0,count):
temp_list.append(int(y))
temp_list.reverse()
for x in range(0,len(string_list)):
sum += (string_list[x]*(8**temp_list[x]))
print("=============================")
print("Your result is",sum)
print("=============================")
#Octal to Binary
elif i_base == 8 and o_base == 2:
number = int(input("Enter the Octal number: "))
string = str(number)
count = 0
sum = 0
for x in string:
count += 1
string_list = []
for b in string:
string_list.append(int(b))
temp_list = []
for y in range(0,count):
temp_list.append(int(y))
temp_list.reverse()
for x in range(0,len(string_list)):
sum += (string_list[x]*(8**temp_list[x]))
number2 = sum
temp = number2
string = ""
temp_list = []
while temp > 0:
string += str(temp%2)
temp = temp // 2
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
#Octal to Hexa-Decimal
elif i_base == 8 and o_base == 16:
number = int(input("Enter the Octal number: "))
string = str(number)
count = 0
sum = 0
for x in string:
count += 1
string_list = []
for b in string:
string_list.append(int(b))
temp_list = []
for y in range(0,count):
temp_list.append(int(y))
temp_list.reverse()
for x in range(0,len(string_list)):
sum += (string_list[x]*(8**temp_list[x]))
number2 = sum
temp = number2
string = ""
temp_list = []
while temp > 0:
temp2 = temp%16
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
else:
string += str(temp%16)
temp = temp // 16
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
#Hexa-Decimal to Decimal
elif i_base == 16 and o_base == 10:
string = input("Enter the Hexa-Decimal Number: ")
count = 0
sum = 0
for x in string:
count += 1
string_list = []
for b in string:
if b.upper() == "A":
string_list.append(10)
elif b.upper() == "B":
string_list.append(11)
elif b.upper() == "C":
string_list.append(12)
elif b.upper() == "D":
string_list.append(13)
elif b.upper() == "E":
string_list.append(14)
elif b.upper() == "F":
string_list.append(15)
else:
string_list.append(int(b))
temp_list = []
for y in range(0,count):
temp_list.append(int(y))
temp_list.reverse()
for x in range(0,len(string_list)):
sum += (string_list[x]*(16**temp_list[x]))
print("=============================")
print("Your result is",sum)
print("=============================")
#Hexa-Decimal to Binary
elif i_base == 16 and o_base == 2:
string = input("Enter the Hexa-Decimal Number: ")
count = 0
sum = 0
for x in string:
count += 1
string_list = []
for b in string:
if b.upper() == "A":
string_list.append(10)
elif b.upper() == "B":
string_list.append(11)
elif b.upper() == "C":
string_list.append(12)
elif b.upper() == "D":
string_list.append(13)
elif b.upper() == "E":
string_list.append(14)
elif b.upper() == "F":
string_list.append(15)
else:
string_list.append(int(b))
temp_list = []
for y in range(0,count):
temp_list.append(int(y))
temp_list.reverse()
for x in range(0,len(string_list)):
sum += (string_list[x]*(16**temp_list[x]))
number2 = sum
temp = number2
string = ""
temp_list = []
while temp > 0:
string += str(temp%2)
temp = temp // 2
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
#Hexa-Decimal to Octal
elif i_base == 16 and o_base == 8:
string = input("Enter the Hexa-Decimal Number: ")
count = 0
sum = 0
for x in string:
count += 1
string_list = []
for b in string:
if b.upper() == "A":
string_list.append(10)
elif b.upper() == "B":
string_list.append(11)
elif b.upper() == "C":
string_list.append(12)
elif b.upper() == "D":
string_list.append(13)
elif b.upper() == "E":
string_list.append(14)
elif b.upper() == "F":
string_list.append(15)
else:
string_list.append(int(b))
temp_list = []
for y in range(0,count):
temp_list.append(int(y))
temp_list.reverse()
for x in range(0,len(string_list)):
sum += (string_list[x]*(16**temp_list[x]))
number2 = sum
temp = number2
string = ""
temp_list = []
while temp > 0:
string += str(temp%8)
temp = temp // 8
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
#Decimal to Other Base:
elif i_base == 10:
if o_base == 3:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
string += str(temp%3)
temp = temp // 3
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 4:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
string += str(temp%4)
temp = temp // 4
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 5:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
string += str(temp%5)
temp = temp // 5
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 6:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
string += str(temp%6)
temp = temp // 6
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 7:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
string += str(temp%7)
temp = temp // 7
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 9:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
string += str(temp%9)
temp = temp // 9
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 11:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%11
if temp2 == 10:
string += "A"
else:
string += str(temp%11)
temp = temp // 11
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 12:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%12
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
else:
string += str(temp%12)
temp = temp // 12
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 13:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%13
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
else:
string += str(temp%13)
temp = temp // 13
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 14:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%14
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
else:
string += str(temp%14)
temp = temp // 14
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 15:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%15
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
else:
string += str(temp%15)
temp = temp // 15
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 17:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%17
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
else:
string += str(temp%17)
temp = temp // 17
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 18:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%18
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
else:
string += str(temp%18)
temp = temp // 18
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 19:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%19
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
else:
string += str(temp%19)
temp = temp // 19
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 20:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%20
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
else:
string += str(temp%20)
temp = temp // 20
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 21:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%21
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
else:
string += str(temp%21)
temp = temp // 21
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 22:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%22
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
else:
string += str(temp%22)
temp = temp // 22
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 23:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%23
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
else:
string += str(temp%23)
temp = temp // 23
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 24:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%24
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
else:
string += str(temp%24)
temp = temp // 24
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 25:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%25
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
else:
string += str(temp%25)
temp = temp // 25
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 26:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%26
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
else:
string += str(temp%26)
temp = temp // 26
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 27:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%27
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
else:
string += str(temp%27)
temp = temp // 27
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 28:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%28
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
elif temp2 == 27:
string += "R"
else:
string += str(temp%28)
temp = temp // 28
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 29:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%29
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
elif temp2 == 27:
string += "R"
elif temp2 == 28:
string += "S"
else:
string += str(temp%29)
temp = temp // 29
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 30:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%30
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
elif temp2 == 27:
string += "R"
elif temp2 == 28:
string += "S"
elif temp2 == 29:
string += "T"
else:
string += str(temp%30)
temp = temp // 30
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 31:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%31
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
elif temp2 == 27:
string += "R"
elif temp2 == 28:
string += "S"
elif temp2 == 29:
string += "T"
elif temp2 == 30:
string += "U"
else:
string += str(temp%31)
temp = temp // 31
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 32:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%32
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
elif temp2 == 27:
string += "R"
elif temp2 == 28:
string += "S"
elif temp2 == 29:
string += "T"
elif temp2 == 30:
string += "U"
elif temp2 == 31:
string += "V"
else:
string += str(temp%32)
temp = temp // 32
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 33:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%33
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
elif temp2 == 27:
string += "R"
elif temp2 == 28:
string += "S"
elif temp2 == 29:
string += "T"
elif temp2 == 30:
string += "U"
elif temp2 == 31:
string += "V"
elif temp2 == 32:
string += "W"
else:
string += str(temp%33)
temp = temp // 33
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 34:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%34
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
elif temp2 == 27:
string += "R"
elif temp2 == 28:
string += "S"
elif temp2 == 29:
string += "T"
elif temp2 == 30:
string += "U"
elif temp2 == 31:
string += "V"
elif temp2 == 32:
string += "W"
elif temp2 == 33:
string += "X"
else:
string += str(temp%34)
temp = temp // 34
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 35:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%35
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
elif temp2 == 27:
string += "R"
elif temp2 == 28:
string += "S"
elif temp2 == 29:
string += "T"
elif temp2 == 30:
string += "U"
elif temp2 == 31:
string += "V"
elif temp2 == 32:
string += "W"
elif temp2 == 33:
string += "X"
elif temp2 == 34:
string += "Y"
else:
string += str(temp%35)
temp = temp // 35
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 36:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%36
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
elif temp2 == 27:
string += "R"
elif temp2 == 28:
string += "S"
elif temp2 == 29:
string += "T"
elif temp2 == 30:
string += "U"
elif temp2 == 31:
string += "V"
elif temp2 == 32:
string += "W"
elif temp2 == 33:
string += "X"
elif temp2 == 34:
string += "Y"
elif temp2 == 35:
string += "Z"
else:
string += str(temp%36)
temp = temp // 36
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
else:
break
inp = input("Press Enter to close...") | 38.141848 | 114 | 0.290496 | print("Welcome to Base Converter Calculator!!!")
print("You can select your calculation mode by entering the serial number, or write 'close' stop calculating.")
print()
print("Note: You can also close the whole program by pressing Enter after closing calculation menu or manually.")
print("Basic Bases:")
print("Decimal = 10")
print("Binary = 2")
print("Octal = 8")
print("Hexa-Decimal = 16")
print("...............................")
print("Let's Start...")
print("Press Enter to Start...")
inp = input("or Anything to Stop...")
while True:
if inp == "":
#Selecting Calculation Mode:
#command = (input("Select your calculation mode (1-14): "))
i_base = int(input("Enter the input Base: "))
o_base = int(input("Enter the output Base: "))
#Decimal to Binary
if i_base == 10 and o_base == 2:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
string += str(temp%2)
temp = temp // 2
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
#Decimal to Octal
elif i_base == 10 and o_base == 8:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
string += str(temp%8)
temp = temp // 8
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
#Decimal to Hexa-Decimal
elif i_base == 10 and o_base == 16:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%16
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
else:
string += str(temp%16)
temp = temp // 16
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
#Binary to Decimal
elif i_base == 2 and o_base == 10:
number = int(input("Enter the Binary number: "))
string = str(number)
count = 0
sum = 0
for x in string:
count += 1
string_list = []
for b in string:
string_list.append(int(b))
temp_list = []
for y in range(0,count):
temp_list.append(int(y))
temp_list.reverse()
for x in range(0,len(string_list)):
sum += (string_list[x]*(2**temp_list[x]))
print("=============================")
print("Your result is",sum)
print("=============================")
#Binary to Octal
elif i_base == 2 and o_base == 8:
number = int(input("Enter the Binary number: "))
string = str(number)
count = 0
sum = 0
for x in string:
count += 1
string_list = []
for b in string:
string_list.append(int(b))
temp_list = []
for y in range(0,count):
temp_list.append(int(y))
temp_list.reverse()
for x in range(0,len(string_list)):
sum += (string_list[x]*(2**temp_list[x]))
number2 = sum
temp = number2
string = ""
temp_list = []
while temp > 0:
string += str(temp%8)
temp = temp // 8
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
#Binary to Hexa-Decimal
elif i_base == 2 and o_base == 16:
number = int(input("Enter the Binary number: "))
string = str(number)
count = 0
sum = 0
for x in string:
count += 1
string_list = []
for b in string:
string_list.append(int(b))
temp_list = []
for y in range(0,count):
temp_list.append(int(y))
temp_list.reverse()
for x in range(0,len(string_list)):
sum += (string_list[x]*(2**temp_list[x]))
number2 = sum
temp = number2
string = ""
temp_list = []
while temp > 0:
temp2 = temp%16
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
else:
string += str(temp%16)
temp = temp // 16
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
#Octal to Decimal
elif i_base == 8 and o_base == 10:
number = int(input("Enter the Octal number: "))
string = str(number)
count = 0
sum = 0
for x in string:
count += 1
string_list = []
for b in string:
string_list.append(int(b))
temp_list = []
for y in range(0,count):
temp_list.append(int(y))
temp_list.reverse()
for x in range(0,len(string_list)):
sum += (string_list[x]*(8**temp_list[x]))
print("=============================")
print("Your result is",sum)
print("=============================")
#Octal to Binary
elif i_base == 8 and o_base == 2:
number = int(input("Enter the Octal number: "))
string = str(number)
count = 0
sum = 0
for x in string:
count += 1
string_list = []
for b in string:
string_list.append(int(b))
temp_list = []
for y in range(0,count):
temp_list.append(int(y))
temp_list.reverse()
for x in range(0,len(string_list)):
sum += (string_list[x]*(8**temp_list[x]))
number2 = sum
temp = number2
string = ""
temp_list = []
while temp > 0:
string += str(temp%2)
temp = temp // 2
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
#Octal to Hexa-Decimal
elif i_base == 8 and o_base == 16:
number = int(input("Enter the Octal number: "))
string = str(number)
count = 0
sum = 0
for x in string:
count += 1
string_list = []
for b in string:
string_list.append(int(b))
temp_list = []
for y in range(0,count):
temp_list.append(int(y))
temp_list.reverse()
for x in range(0,len(string_list)):
sum += (string_list[x]*(8**temp_list[x]))
number2 = sum
temp = number2
string = ""
temp_list = []
while temp > 0:
temp2 = temp%16
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
else:
string += str(temp%16)
temp = temp // 16
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
#Hexa-Decimal to Decimal
elif i_base == 16 and o_base == 10:
string = input("Enter the Hexa-Decimal Number: ")
count = 0
sum = 0
for x in string:
count += 1
string_list = []
for b in string:
if b.upper() == "A":
string_list.append(10)
elif b.upper() == "B":
string_list.append(11)
elif b.upper() == "C":
string_list.append(12)
elif b.upper() == "D":
string_list.append(13)
elif b.upper() == "E":
string_list.append(14)
elif b.upper() == "F":
string_list.append(15)
else:
string_list.append(int(b))
temp_list = []
for y in range(0,count):
temp_list.append(int(y))
temp_list.reverse()
for x in range(0,len(string_list)):
sum += (string_list[x]*(16**temp_list[x]))
print("=============================")
print("Your result is",sum)
print("=============================")
#Hexa-Decimal to Binary
elif i_base == 16 and o_base == 2:
string = input("Enter the Hexa-Decimal Number: ")
count = 0
sum = 0
for x in string:
count += 1
string_list = []
for b in string:
if b.upper() == "A":
string_list.append(10)
elif b.upper() == "B":
string_list.append(11)
elif b.upper() == "C":
string_list.append(12)
elif b.upper() == "D":
string_list.append(13)
elif b.upper() == "E":
string_list.append(14)
elif b.upper() == "F":
string_list.append(15)
else:
string_list.append(int(b))
temp_list = []
for y in range(0,count):
temp_list.append(int(y))
temp_list.reverse()
for x in range(0,len(string_list)):
sum += (string_list[x]*(16**temp_list[x]))
number2 = sum
temp = number2
string = ""
temp_list = []
while temp > 0:
string += str(temp%2)
temp = temp // 2
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
#Hexa-Decimal to Octal
elif i_base == 16 and o_base == 8:
string = input("Enter the Hexa-Decimal Number: ")
count = 0
sum = 0
for x in string:
count += 1
string_list = []
for b in string:
if b.upper() == "A":
string_list.append(10)
elif b.upper() == "B":
string_list.append(11)
elif b.upper() == "C":
string_list.append(12)
elif b.upper() == "D":
string_list.append(13)
elif b.upper() == "E":
string_list.append(14)
elif b.upper() == "F":
string_list.append(15)
else:
string_list.append(int(b))
temp_list = []
for y in range(0,count):
temp_list.append(int(y))
temp_list.reverse()
for x in range(0,len(string_list)):
sum += (string_list[x]*(16**temp_list[x]))
number2 = sum
temp = number2
string = ""
temp_list = []
while temp > 0:
string += str(temp%8)
temp = temp // 8
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
#Decimal to Other Base:
elif i_base == 10:
if o_base == 3:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
string += str(temp%3)
temp = temp // 3
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 4:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
string += str(temp%4)
temp = temp // 4
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 5:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
string += str(temp%5)
temp = temp // 5
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 6:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
string += str(temp%6)
temp = temp // 6
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 7:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
string += str(temp%7)
temp = temp // 7
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 9:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
string += str(temp%9)
temp = temp // 9
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 11:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%11
if temp2 == 10:
string += "A"
else:
string += str(temp%11)
temp = temp // 11
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 12:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%12
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
else:
string += str(temp%12)
temp = temp // 12
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 13:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%13
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
else:
string += str(temp%13)
temp = temp // 13
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 14:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%14
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
else:
string += str(temp%14)
temp = temp // 14
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 15:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%15
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
else:
string += str(temp%15)
temp = temp // 15
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 17:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%17
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
else:
string += str(temp%17)
temp = temp // 17
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 18:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%18
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
else:
string += str(temp%18)
temp = temp // 18
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 19:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%19
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
else:
string += str(temp%19)
temp = temp // 19
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 20:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%20
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
else:
string += str(temp%20)
temp = temp // 20
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 21:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%21
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
else:
string += str(temp%21)
temp = temp // 21
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 22:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%22
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
else:
string += str(temp%22)
temp = temp // 22
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 23:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%23
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
else:
string += str(temp%23)
temp = temp // 23
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 24:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%24
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
else:
string += str(temp%24)
temp = temp // 24
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 25:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%25
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
else:
string += str(temp%25)
temp = temp // 25
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 26:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%26
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
else:
string += str(temp%26)
temp = temp // 26
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 27:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%27
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
else:
string += str(temp%27)
temp = temp // 27
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 28:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%28
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
elif temp2 == 27:
string += "R"
else:
string += str(temp%28)
temp = temp // 28
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 29:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%29
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
elif temp2 == 27:
string += "R"
elif temp2 == 28:
string += "S"
else:
string += str(temp%29)
temp = temp // 29
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 30:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%30
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
elif temp2 == 27:
string += "R"
elif temp2 == 28:
string += "S"
elif temp2 == 29:
string += "T"
else:
string += str(temp%30)
temp = temp // 30
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 31:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%31
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
elif temp2 == 27:
string += "R"
elif temp2 == 28:
string += "S"
elif temp2 == 29:
string += "T"
elif temp2 == 30:
string += "U"
else:
string += str(temp%31)
temp = temp // 31
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 32:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%32
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
elif temp2 == 27:
string += "R"
elif temp2 == 28:
string += "S"
elif temp2 == 29:
string += "T"
elif temp2 == 30:
string += "U"
elif temp2 == 31:
string += "V"
else:
string += str(temp%32)
temp = temp // 32
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 33:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%33
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
elif temp2 == 27:
string += "R"
elif temp2 == 28:
string += "S"
elif temp2 == 29:
string += "T"
elif temp2 == 30:
string += "U"
elif temp2 == 31:
string += "V"
elif temp2 == 32:
string += "W"
else:
string += str(temp%33)
temp = temp // 33
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 34:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%34
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
elif temp2 == 27:
string += "R"
elif temp2 == 28:
string += "S"
elif temp2 == 29:
string += "T"
elif temp2 == 30:
string += "U"
elif temp2 == 31:
string += "V"
elif temp2 == 32:
string += "W"
elif temp2 == 33:
string += "X"
else:
string += str(temp%34)
temp = temp // 34
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 35:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%35
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
elif temp2 == 27:
string += "R"
elif temp2 == 28:
string += "S"
elif temp2 == 29:
string += "T"
elif temp2 == 30:
string += "U"
elif temp2 == 31:
string += "V"
elif temp2 == 32:
string += "W"
elif temp2 == 33:
string += "X"
elif temp2 == 34:
string += "Y"
else:
string += str(temp%35)
temp = temp // 35
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 36:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%36
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
elif temp2 == 27:
string += "R"
elif temp2 == 28:
string += "S"
elif temp2 == 29:
string += "T"
elif temp2 == 30:
string += "U"
elif temp2 == 31:
string += "V"
elif temp2 == 32:
string += "W"
elif temp2 == 33:
string += "X"
elif temp2 == 34:
string += "Y"
elif temp2 == 35:
string += "Z"
else:
string += str(temp%36)
temp = temp // 36
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
else:
break
inp = input("Press Enter to close...") | true | true |
1c46c40e2bfd9e44bd757c0752d89f57ed80ef32 | 9,575 | py | Python | env/lib/python3.8/site-packages/plotly/graph_objs/scattergl/marker/colorbar/_tickformatstop.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11,750 | 2015-10-12T07:03:39.000Z | 2022-03-31T20:43:15.000Z | env/lib/python3.8/site-packages/plotly/graph_objs/scattergl/marker/colorbar/_tickformatstop.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 2,951 | 2015-10-12T00:41:25.000Z | 2022-03-31T22:19:26.000Z | env/lib/python3.8/site-packages/plotly/graph_objs/scattergl/marker/colorbar/_tickformatstop.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 2,623 | 2015-10-15T14:40:27.000Z | 2022-03-28T16:05:50.000Z | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tickformatstop(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scattergl.marker.colorbar"
_path_str = "scattergl.marker.colorbar.tickformatstop"
_valid_props = {"dtickrange", "enabled", "name", "templateitemname", "value"}
# dtickrange
# ----------
@property
def dtickrange(self):
"""
range [*min*, *max*], where "min", "max" - dtick values which
describe some zoom level, it is possible to omit "min" or "max"
value by passing "null"
The 'dtickrange' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'dtickrange[0]' property accepts values of any type
(1) The 'dtickrange[1]' property accepts values of any type
Returns
-------
list
"""
return self["dtickrange"]
@dtickrange.setter
def dtickrange(self, val):
self["dtickrange"] = val
# enabled
# -------
@property
def enabled(self):
"""
Determines whether or not this stop is used. If `false`, this
stop is ignored even within its `dtickrange`.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
# name
# ----
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# templateitemname
# ----------------
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
# value
# -----
@property
def value(self):
"""
string - dtickformat for described zoom level, the same as
"tickformat"
The 'value' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs
):
"""
Construct a new Tickformatstop object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scattergl.mark
er.colorbar.Tickformatstop`
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
Returns
-------
Tickformatstop
"""
super(Tickformatstop, self).__init__("tickformatstops")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergl.marker.colorbar.Tickformatstop
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattergl.marker.colorbar.Tickformatstop`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("dtickrange", None)
_v = dtickrange if dtickrange is not None else _v
if _v is not None:
self["dtickrange"] = _v
_v = arg.pop("enabled", None)
_v = enabled if enabled is not None else _v
if _v is not None:
self["enabled"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("templateitemname", None)
_v = templateitemname if templateitemname is not None else _v
if _v is not None:
self["templateitemname"] = _v
_v = arg.pop("value", None)
_v = value if value is not None else _v
if _v is not None:
self["value"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 33.714789 | 85 | 0.571488 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tickformatstop(_BaseTraceHierarchyType):
_parent_path_str = "scattergl.marker.colorbar"
_path_str = "scattergl.marker.colorbar.tickformatstop"
_valid_props = {"dtickrange", "enabled", "name", "templateitemname", "value"}
@property
def dtickrange(self):
return self["dtickrange"]
@dtickrange.setter
def dtickrange(self, val):
self["dtickrange"] = val
@property
def enabled(self):
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
@property
def name(self):
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def templateitemname(self):
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
@property
def value(self):
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs
):
super(Tickformatstop, self).__init__("tickformatstops")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergl.marker.colorbar.Tickformatstop
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattergl.marker.colorbar.Tickformatstop`"""
)
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
_v = arg.pop("dtickrange", None)
_v = dtickrange if dtickrange is not None else _v
if _v is not None:
self["dtickrange"] = _v
_v = arg.pop("enabled", None)
_v = enabled if enabled is not None else _v
if _v is not None:
self["enabled"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("templateitemname", None)
_v = templateitemname if templateitemname is not None else _v
if _v is not None:
self["templateitemname"] = _v
_v = arg.pop("value", None)
_v = value if value is not None else _v
if _v is not None:
self["value"] = _v
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.