id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
1730185 | <reponame>LukeSkywalker92/heuslertools
from heuslertools.tools.measurement import Measurement
import xrayutilities as xu
import warnings
import numpy as np
class RSMMeasurement(Measurement):
"""Object representing rsm measurement.
Parameters
----------
file : str
path of xrdml file.
Attributes
----------
en : str
Energy of xrays.
wavelength : type
wavelength of xrays.
resol : float
resolution in qz.
"""
def __init__(self, file, material=None, geometry='hi_lo', beam_direction=None, surface_normale=None, reflex=None):
self.material = material
self.geometry = geometry
self.beam_direction = beam_direction
self.surface_normale = surface_normale
self.reflex = reflex
if any(elem is not None for elem in [material, geometry, beam_direction, surface_normale, reflex]):
self._create_experiment()
super().__init__(file, "")
def _load_data(self):
data = {}
data['scanmot'], data['Omega'], data['2Theta'], data['Chi'], data['Phi'], data['psd'] = xu.io.getxrdml_scan(self.file, 'om', 'tt', 'c', 'p')
return data
def _generate_names(self):
for name in self.data:
self.names[name] = {"short_name": name, "unit": "a.u."}
def _create_experiment(self):
self.hxrd = xu.HXRD(self.material.Q(self.beam_direction),
self.material.Q(self.surface_normale),
geometry=self.geometry)
def _get_nominal_angle(self, axis):
angles = {}
[angles['Omega'],
angles['Chi'],
angles['Phi'],
angles['2Theta']] = self.hxrd.Q2Ang(self.material.Q(self.reflex))
return angles[axis]
def _get_substrate_peak(self):
# anggridder = xu.FuzzyGridder2D(500, 500)
# anggridder(self.data['Omega'], self.data['2Theta'], self.data['psd'])
# angINT = xu.maplog(anggridder.data.transpose(), 10, 0)
# return anggridder.xaxis[np.unravel_index(angINT.argmax(), angINT.shape)[0]], anggridder.yaxis[np.unravel_index(angINT.argmax(), angINT.shape)[1]]
threshold = 10**int(np.log10(self.data['psd'].max()))
max_values = np.where(self.data['psd'] > threshold)
return np.mean(self.data['Omega'][max_values]), np.mean(self.data['2Theta'][max_values])
def get_angle_data(self, size=300, dynamic_range=10):
anggridder = xu.FuzzyGridder2D(300, 300)
anggridder(self.data['Omega'], self.data['2Theta'], self.data['psd'])
angINT = xu.maplog(anggridder.data.transpose(), dynamic_range, 0)
ticks = []
for i in range(round(dynamic_range)+1):
ticks.append(i)
return anggridder, angINT, ticks
def get_q_data(self, size=300, dynamic_range=10, om_sub=None, tt_sub=None):
sub_peak = self._get_substrate_peak()
if om_sub == None:
om_sub = sub_peak[0]
if tt_sub == None:
tt_sub = sub_peak[1]
#om_sub, tt_sub = self._get_substrate_peak()
qx, qy, qz = self.hxrd.Ang2Q(self.data['Omega'],
self.data['2Theta'],
delta=[om_sub - self._get_nominal_angle('Omega'),
tt_sub - self._get_nominal_angle('2Theta')])
qgridder = xu.FuzzyGridder2D(size, size)
qgridder(qy, qz, self.data['psd'])
qINT = xu.maplog(qgridder.data.transpose(), dynamic_range, 0)
ticks = []
for i in range(round(dynamic_range)+1):
ticks.append(i)
return qgridder, qINT, ticks
def get_hkl_data(self, size=300, dynamic_range=10, om_sub=None, tt_sub=None):
sub_peak = self._get_substrate_peak()
if om_sub == None:
om_sub = sub_peak[0]
if tt_sub == None:
tt_sub = sub_peak[1]
#om_sub, tt_sub = self._get_substrate_peak()
h, k, l = self.hxrd.Ang2HKL(self.data['Omega'],
self.data['2Theta'],
delta=[om_sub - self._get_nominal_angle('Omega'),
tt_sub - self._get_nominal_angle('2Theta')],
mat=self.material)
hklgridder = xu.FuzzyGridder2D(size, size)
hklgridder(h, l, self.data['psd'])
hklINT = xu.maplog(hklgridder.data.transpose(), dynamic_range, 0)
ticks = []
for i in range(round(dynamic_range)+1):
ticks.append(i)
return hklgridder, hklINT, ticks | StarcoderdataPython |
1618352 | import logging
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from helium.common.views.views import HeliumAPIView
from helium.planner.models import Course, CourseSchedule
from helium.planner.schemas import CourseScheduleDetailSchema
from helium.planner.serializers.eventserializer import EventSerializer
from helium.planner.services import coursescheduleservice
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, Helium Edu"
__version__ = "1.4.38"
logger = logging.getLogger(__name__)
class CourseScheduleAsEventsResourceView(HeliumAPIView):
"""
get:
Return all course schedules as a list of event instances.
"""
permission_classes = (IsAuthenticated,)
schema = CourseScheduleDetailSchema()
def get(self, request, *args, **kwargs):
user = self.request.user
course = Course.objects.get(pk=self.kwargs['course'])
course_schedules = CourseSchedule.objects.for_user(user.pk).for_course(course.pk)
events = coursescheduleservice.course_schedules_to_events(course, course_schedules)
serializer = EventSerializer(events, many=True)
return Response(serializer.data)
| StarcoderdataPython |
3326964 | import cv2
import numpy as np
import os
import tensorflow as tf
import sys
import skimage
import json
import datetime
import time
import time
import argparse
def reframe_box_masks_to_image_masks(box_masks, boxes, image_height,
image_width):
"""Transforms the box masks back to full image masks.
Embeds masks in bounding boxes of larger masks whose shapes correspond to
image shape.
Args:
box_masks: A tf.float32 tensor of size [num_masks, mask_height, mask_width].
boxes: A tf.float32 tensor of size [num_masks, 4] containing the box
corners. Row i contains [ymin, xmin, ymax, xmax] of the box
corresponding to mask i. Note that the box corners are in
normalized coordinates.
image_height: Image height. The output mask will have the same height as
the image height.
image_width: Image width. The output mask will have the same width as the
image width.
Returns:
A tf.float32 tensor of size [num_masks, image_height, image_width].
"""
# TODO(rathodv): Make this a public function.
def reframe_box_masks_to_image_masks_default():
"""The default function when there are more than 0 box masks."""
def transform_boxes_relative_to_boxes(boxes, reference_boxes):
boxes = tf.reshape(boxes, [-1, 2, 2])
min_corner = tf.expand_dims(reference_boxes[:, 0:2], 1)
max_corner = tf.expand_dims(reference_boxes[:, 2:4], 1)
transformed_boxes = (boxes - min_corner) / (max_corner - min_corner)
return tf.reshape(transformed_boxes, [-1, 4])
box_masks_expanded = tf.expand_dims(box_masks, axis=3)
num_boxes = tf.shape(box_masks_expanded)[0]
unit_boxes = tf.concat(
[tf.zeros([num_boxes, 2]), tf.ones([num_boxes, 2])], axis=1)
reverse_boxes = transform_boxes_relative_to_boxes(unit_boxes, boxes)
return tf.image.crop_and_resize(
image=box_masks_expanded,
boxes=reverse_boxes,
box_ind=tf.range(num_boxes),
crop_size=[image_height, image_width],
extrapolation_value=0.0)
image_masks = tf.cond(
tf.shape(box_masks)[0] > 0,
reframe_box_masks_to_image_masks_default,
lambda: tf.zeros([0, image_height, image_width, 1], dtype=tf.float32))
return tf.squeeze(image_masks, axis=3)
def main(args):
# PATH_TO_FROZEN_GRAPH = '/home/apptech/Downloads/ssdlite_mobilenet_v2_coco_2018_05_09/frozen_inference_graph.pb'
PATH_TO_FROZEN_GRAPH=args.PATH_TO_FROZEN_GRAPH
# f = open("/home/apptech/apptech_tf_models/label.json")
f = open(args.js_file)
labels=json.loads(f.read())
# List of the strings that is used to add correct label for each box.
# labels={1:'heavy_machine'}
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
with tf.Session() as sess:
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
tensor_name)
if 'detection_masks' in tensor_dict:
# The following processing is only for single image
detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
# Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
detection_masks_reframed = reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(
tf.greater(detection_masks_reframed, 0.5), tf.uint8)
# Follow the convention by adding back the batch dimension
tensor_dict['detection_masks'] = tf.expand_dims(detection_masks_reframed, 0)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# cam=cv2.VideoCapture(0)
cam=cv2.VideoCapture(args.cam_dir)
# height, width = int(cam.get(3)),int(cam.get(4))
# fps = cam.get(cv2.CAP_PROP_FPS)
# fourcc = cv2.VideoWriter_fourcc(*'XVID')
# out = cv2.VideoWriter(video_save_path, fourcc, fps, 920, 1080))
fps_time = 0
cnt=0
while True:
success,image = cam.read()
if not success:
break
# image=cv2.resize(image,(1920,1080),interpolation=cv2.INTER_AREA)
image_h = image.shape[0]
image_w = image.shape[1]
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
output_dict = sess.run(tensor_dict,feed_dict={image_tensor: np.expand_dims(image, 0)})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict[
'detection_classes'][0].astype(np.uint8)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
if 'detection_masks' in output_dict:
output_dict['detection_masks'] = output_dict['detection_masks'][0]
image=cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
for i, score in enumerate(output_dict['detection_scores']):
if score >0.8:
classes=labels[str(output_dict['detection_classes'][i])]
print(classes)
ymin, xmin, ymax, xmax = tuple(output_dict['detection_boxes'][i].tolist())
# print(ymin, xmin, ymax, xmax)
ymin = int(ymin * image_h)
xmin = int(xmin * image_w)
ymax = int(ymax * image_h)
xmax = int(xmax * image_w)
cv2.rectangle(image,(xmin,ymin),(xmax,ymax),(0,255,0),2)
# cv2.putText(image, classes, (xmin,ymax+10),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cnt+=1
# out.write(frame)
cv2.putText(image, "FPS: %f" % (1.0 / (time.time() - fps_time)), (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
fps_time = time.time()
show_image = cv2.resize(image, (1920,1080))
cv2.imshow('win', show_image)
pressed_key = cv2.waitKey(2)
if pressed_key == ord('q'):
break
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('PATH_TO_FROZEN_GRAPH', type=str, help='PATH_TO_FROZEN_GRAPH')
parser.add_argument('cam_dir', default= 0, help='path to videos/cams')
parser.add_argument('js_file', type=str, help='path to label.json')
args = parser.parse_args()
main(args)
| StarcoderdataPython |
4825479 | def sortiraj(karta):
karta = karta[:-1]
if karta in "7 8 9 10".split():
return int(karta)-10
elif karta == "B":
return 2
elif karta == "D":
return 3
elif karta == "K":
return 4
elif karta == "A":
return 11
def vrijednost(karta, adut):
if karta[-1] == adut:
karta = karta[:-1]
if karta in ["7", "8"]:
return int(karta) / 100
elif karta == "9":
return 14
elif karta == "10":
return 10
elif karta == "B":
return 20
elif karta == "D":
return 3
elif karta == "K":
return 4
elif karta == "A":
return 11
else:
karta = karta[:-1]
if karta in "7 8 9".split():
return int(karta) / 100
elif karta == "10":
return 10
elif karta == "B":
return 2
elif karta == "D":
return 3
elif karta == "K":
return 4
elif karta == "A":
return 11
class Igrac():
def __init__(self, ime, ai, karte):
self.ime = ime
self.ai = ai
self.karte = karte
self.prazne = 0
self.bodovi = 0
self.ukupno = 0
def sortiraj_karte(self):
[self.karte.remove("prazno") for i in range(self.prazne)]
pik = [karta for karta in self.karte if '♠' in karta]
herc = [karta for karta in self.karte if '♥' in karta]
kara = [karta for karta in self.karte if '♦' in karta]
tref = [karta for karta in self.karte if '♣' in karta]
for boja in [pik, herc, kara, tref]:
boja.sort(key=sortiraj)
self.karte = pik + herc + kara + tref + self.prazne * ["prazno"]
def baci_kartu(self, karta):
print(self.ime, self.prazne, self.karte)
self.karte[self.karte.index(karta)] = "prazno"
self.prazne += 1
self.sortiraj_karte()
print(self.ime, self.prazne, self.karte)
return karta
def boje(self):
boje = [karta[-1] for karta in self.karte if karta != "prazno"]
return list(dict.fromkeys(boje))
def vrati_karte(self):
return self.karte[:8-self.prazne] | StarcoderdataPython |
1626408 | <filename>clmm/cosmology/cluster_toolkit.py
# Functions to model halo profiles
import numpy as np
import warnings
from astropy import units
from astropy.cosmology import LambdaCDM, FlatLambdaCDM
from .. constants import Constants as const
from .parent_class import CLMMCosmology
__all__ = []
class AstroPyCosmology(CLMMCosmology):
def __init__(self, **kwargs):
super(AstroPyCosmology, self).__init__(**kwargs)
# this tag will be used to check if the cosmology object is accepted by the modeling
self.backend = 'ct'
assert isinstance(self.be_cosmo, LambdaCDM)
def _init_from_cosmo(self, be_cosmo):
assert isinstance(be_cosmo, LambdaCDM)
self.be_cosmo = be_cosmo
def _init_from_params(self, H0, Omega_b0, Omega_dm0, Omega_k0):
Om0 = Omega_b0+Omega_dm0
Ob0 = Omega_b0
Ode0 = 1.0-Om0-Omega_k0
self.be_cosmo = LambdaCDM(H0=H0, Om0=Om0, Ob0=Ob0, Ode0=Ode0)
def _set_param(self, key, value):
raise NotImplementedError("Astropy do not support changing parameters")
def _get_param(self, key):
if key == "Omega_m0":
return self.be_cosmo.Om0
elif key == "Omega_b0":
return self.be_cosmo.Ob0
elif key == "Omega_dm0":
return self.be_cosmo.Odm0
elif key == "Omega_k0":
return self.be_cosmo.Ok0
elif key == 'h':
return self.be_cosmo.H0.to_value()/100.0
elif key == 'H0':
return self.be_cosmo.H0.to_value()
else:
raise ValueError(f"Unsupported parameter {key}")
def get_Omega_m(self, z):
return self.be_cosmo.Om(z)
def get_E2Omega_m(self, z):
return self.be_cosmo.Om(z)*(self.be_cosmo.H(z)/self.be_cosmo.H0)**2
def eval_da_z1z2(self, z1, z2):
return self.be_cosmo.angular_diameter_distance_z1z2(z1, z2).to_value(units.Mpc)
def eval_sigma_crit(self, z_len, z_src):
if np.any(np.array(z_src)<=z_len):
warnings.warn(f'Some source redshifts are lower than the cluster redshift. Returning Sigma_crit = np.inf for those galaxies.')
# Constants
clight_pc_s = const.CLIGHT_KMS.value*1000./const.PC_TO_METER.value
gnewt_pc3_msun_s2 = const.GNEWT.value*const.SOLAR_MASS.value/const.PC_TO_METER.value**3
d_l = self.eval_da_z1z2(0, z_len)
d_s = self.eval_da_z1z2(0, z_src)
d_ls = self.eval_da_z1z2(z_len, z_src)
beta_s = np.maximum(0., d_ls/d_s)
return clight_pc_s**2/(4.0*np.pi*gnewt_pc3_msun_s2)*1/d_l*np.divide(1., beta_s)*1.0e6
| StarcoderdataPython |
18804 | <reponame>klarman-cell-observatory/cirrocumulus-app-engine
import os
import sys
sys.path.append('lib')
from flask import Flask, send_from_directory
import cirrocumulus
from cirrocumulus.cloud_firestore_native import CloudFireStoreNative
from cirrocumulus.api import blueprint
from cirrocumulus.envir import CIRRO_AUTH_CLIENT_ID, CIRRO_AUTH, CIRRO_DATABASE, CIRRO_DATASET_PROVIDERS
from cirrocumulus.google_auth import GoogleAuth
from cirrocumulus.no_auth import NoAuth
from cirrocumulus.util import add_dataset_providers
client_path = os.path.join(cirrocumulus.__path__[0], 'client')
# If `entrypoint` is not defined in app.yaml, App Engine will look for an app
# called `app` in `main.py`.
app = Flask(__name__, static_folder=client_path, static_url_path='')
app.register_blueprint(blueprint, url_prefix='/api')
@app.route('/')
def root():
return send_from_directory(client_path, "index.html")
if os.environ.get(CIRRO_AUTH_CLIENT_ID) is not None:
app.config[CIRRO_AUTH] = GoogleAuth(os.environ.get(CIRRO_AUTH_CLIENT_ID))
else:
app.config[CIRRO_AUTH] = NoAuth()
app.config[CIRRO_DATABASE] = CloudFireStoreNative()
os.environ[CIRRO_DATASET_PROVIDERS] = ','.join(['cirrocumulus.zarr_dataset.ZarrDataset',
'cirrocumulus.parquet_dataset.ParquetDataset'])
add_dataset_providers()
if __name__ == '__main__':
app.run(host='127.0.0.1', port=5000, debug=True)
| StarcoderdataPython |
133383 | from ScopeFoundry.data_browser import DataBrowser, HyperSpectralBaseView
import numpy as np
class HyperSpecNPZView(HyperSpectralBaseView):
name = 'hyperspec_npz'
def is_file_supported(self, fname):
return "_spec_scan.npz" in fname
def load_data(self, fname):
self.dat = np.load(fname)
self.spec_map = self.dat['spec_map']
self.integrated_count_map = self.dat['integrated_count_map']
self.hyperspec_data = self.spec_map
self.display_image = self.integrated_count_map
self.spec_x_array = self.dat['wls']
def scan_specific_setup(self):
self.spec_plot.setLabel('left', 'Intensity', units='counts')
self.spec_plot.setLabel('bottom', 'Wavelength', units='nm')
def spectral_median(spec,wls, count_min=200):
int_spec = np.cumsum(spec)
total_sum = int_spec[-1]
if total_sum > count_min:
pos = int_spec.searchsorted( 0.5*total_sum)
wl = wls[pos]
else:
wl = np.NaN
return wl
class HyperSpecSpecMedianNPZView(HyperSpectralBaseView):
name = 'hyperspec_spec_median_npz'
def is_file_supported(self, fname):
return "_spec_scan.npz" in fname
def load_data(self, fname):
self.dat = np.load(fname)
self.spec_map = self.dat['spec_map']
self.wls = self.dat['wls']
self.integrated_count_map = self.dat['integrated_count_map']
self.spec_median_map = np.apply_along_axis(spectral_median, 2,
self.spec_map[:,:,:],
self.wls, 0)
self.hyperspec_data = self.spec_map
self.display_image = self.spec_median_map
self.spec_x_array = self.wls
def scan_specific_setup(self):
self.spec_plot.setLabel('left', 'Intensity', units='counts')
self.spec_plot.setLabel('bottom', 'Wavelength', units='nm')
if __name__ == '__main__':
import sys
app = DataBrowser(sys.argv)
app.load_view(HyperSpecNPZView(app))
sys.exit(app.exec_()) | StarcoderdataPython |
4842911 | <reponame>CITS5206/Precision-Farming<gh_stars>1-10
import csv
import datetime
import re
from typing import final
# Current datetime
datetoday=str(datetime.date.today())
sensortextpath='./Archive/Code/Data_Reader/Textfile/Dualemdata'+datetoday+'.txt'
gpstextpath='./Archive/Code/Data_Reader/Textfile/GPSdata'+datetoday+'.txt'
csvpath= './Archive/Code/Data_Reader/CSVfile/'
class creatCSVfile:
# This function is used to create the csv file for GPS and sensor using the text file that was generated in reader class
# reads the input values from both txt file and store them in seperate list
# The sensor data has single row value for csv in 4 rows of txt file. so seperate if condition check for every 4 values and append it in one list
# Once the list is created using csv library the csv file is generarted with header
# 2 csv files for sensor and gps and 1 csv file with metadata will be created in CSV file path
# All csv file will have timestamp attached to file name
def readtxtfile(self):
with open(sensortextpath, 'r') as sensor_file:
sensor_list=[]
sensor_data=sensor_file.readlines()
for i in sensor_data:
sensor_list.append(i.strip().replace('[','').replace(']','').replace("'",'').split(','))
data_list=[]
for i in range(0,len(sensor_list)):
if sensor_list[i][0] =='H':
c=sensor_list[i][1:] + sensor_list[i+1][2:] + sensor_list[i+2][1:] + sensor_list[i+3][1:]
data_list.append(c)
# Created the CSV file for sensor data
with open(csvpath+'DUALEMdata'+datetoday+'.csv', 'w') as out_file:
writer = csv.writer(out_file)
writer.writerow(('Timestamp [HhMmSs]', 'HCP conductivity of 0.5m array [mS/m]', 'HCP inphase of 0.5m array [ppt]', 'PRP conductivity of 0.5m array [mS/m]','PRP inphase of 0.5m array [ppt]', 'HCP conductivity of 1m array [mS/m]', 'HCP inphase of 1m array [ppt]', 'PRP conductivity of 1m array [mS/m]', 'PRP inphase of 1m array [ppt]', 'Voltage [V]' ,'Temperature [deg]', 'Pitch [deg]', 'Roll [deg]', 'Acceleration X [gal]', 'Acceleration Y [gal]', 'Acceleration Z [gal]',
'Magnetic field X [nT]', 'Magnetic field Y [nT]', 'Magnetic field Z [nT]', 'Temperature [deg]'))
writer.writerows(data_list)
# Reads the GPS txt file generates from program
with open(gpstextpath,'r') as gps_file:
gps_list=[]
gps_data=gps_file.readlines()
for i in range(len(gps_data)):
if i%2 != 0 :
temp = gps_data[i].strip().split(',')
gps_list.append(temp[0].split(' '))
# Creates CSV file to folder
with open(csvpath+'GPSdata'+ datetoday +'.csv', 'w') as out_file:
writer = csv.writer(out_file)
writer.writerow(('Latitute','Lognigtute','TimeStamp'))
writer.writerows(gps_list)
final_list=[]
# loop that appends two list and creates the list with both values
for i in range(10):
new_list=[]
new_list.append(gps_list[i][0])
new_list.append(gps_list[i][1])
for j in data_list[i]:
new_list.append(j)
final_list.append(new_list)
# Creates the meta data csv
with open(csvpath+'MetaData'+datetoday+'.csv','w') as outfile:
writer = csv.writer(outfile)
writer.writerow(('Latitute','Lognigtute','Timestamp [HhMmSs]', 'HCP conductivity of 0.5m array [mS/m]', 'HCP inphase of 0.5m array [ppt]', 'PRP conductivity of 0.5m array [mS/m]','PRP inphase of 0.5m array [ppt]', 'HCP conductivity of 1m array [mS/m]', 'HCP inphase of 1m array [ppt]', 'PRP conductivity of 1m array [mS/m]', 'PRP inphase of 1m array [ppt]', 'Voltage [V]' ,'Temperature [deg]', 'Pitch [deg]', 'Roll [deg]', 'Acceleration X [gal]', 'Acceleration Y [gal]', 'Acceleration Z [gal]', 'Magnetic field X [nT]', 'Magnetic field Y [nT]', 'Magnetic field Z [nT]', 'Temperature [deg]'))
writer.writerows(final_list)
temp = creatCSVfile()
temp.readtxtfile()
| StarcoderdataPython |
1730349 | from matplotlib import pyplot as plt
import numpy as np
results = np.load("feedforwardtimings.npy")
#Raw Timings Plot Feedforward
plt.figure()
plt.suptitle("Feedforward", fontsize=24, y=1.05)
plt.subplot(2,2,1)
plt.title("Timing With Ten by Ten Sized Matrices")
plt.plot(results[:-1,0])
plt.scatter([5],results[-1:,0])
plt.xticks(range(-1,7),["","1", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Seconds to compute one calculation")
plt.subplot(2,2,2)
plt.title("Timing With a Hundred by Hundred Sized Matrices")
plt.plot(results[:-1,1])
plt.scatter([5],results[-1:,1])
plt.xticks(range(-1,7),["","1", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Seconds to compute one calculation")
plt.subplot(2,2,3)
plt.title("Timing With a Thousand by a Thousand Sized Matrices")
plt.plot(results[:-1,2])
plt.scatter([5],results[-1:,2])
plt.xticks(range(-1,7),["","1", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Seconds to compute one calculation")
plt.subplot(2,2,4)
plt.title("All Timings In Log Scale")
plt.plot(results[:-1])
plt.scatter([5],results[-1:,0],color="blue")
plt.scatter([5],results[-1:,1], color="green")
plt.scatter([5],results[-1:,2], color="red")
plt.xticks(range(-1,7),["","1", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Seconds to compute one calculation")
plt.yscale("log")
plt.tight_layout()
plt.savefig("feedforward.pgf")
plt.show()
#Relative Timings Feedforward
thread_counts = np.array([1,2,4,8,16,32*256])
results = results[0,None]/results
plt.figure()
plt.suptitle("Feedforward", fontsize=24, y=1.05)
plt.subplot(2,2,1)
plt.title("All Speed Ups")
plt.plot(results[:-1])
plt.scatter([5],results[-1:,0],color="blue")
plt.scatter([5],results[-1:,1], color="green")
plt.scatter([5],results[-1:,2], color="red")
plt.xticks(range(-1,7),["","1", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Speed up ratio on one calculation (log scale)")
plt.yscale("log")
plt.legend(["10x10","100x100","1000x1000"],loc=2)
plt.subplot(2,2,2)
plt.title("CPU Only Speed Ups")
plt.plot(results[:-1]-1)
plt.xticks(range(-1,6),["","1", "2", "4", "8", "16", ""])
plt.xlabel("Number of threads")
plt.ylabel("Relative speed difference on one calculation")
plt.legend(["10x10","100x100","1000x1000"],loc=2)
plt.subplot(2,2,3)
plt.title("Speed Up Per Thread")
plt.plot((results[1:-1]-1)/thread_counts[1:-1,None])
plt.scatter([4],(results[-1:,0]-1)/thread_counts[-1],color="blue")
plt.scatter([4],(results[-1:,1]-1)/thread_counts[-1], color="green")
plt.scatter([4],(results[-1:,2]-1)/thread_counts[-1], color="red")
plt.xticks(range(-1,6),["", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Relative speed difference on one calculation")
plt.legend(["10x10","100x100","1000x1000"],loc=1)
plt.subplot(2,2,4)
def amdahlPortion(speedup,threads):
return threads*(speedup-1)/((threads-1)*speedup)
plt.title("Amdahl's Law Calculated Parallelizable Portion")
plt.plot(amdahlPortion(results[1:-1],thread_counts[1:-1,None]))
plt.scatter([4],amdahlPortion(results[-1:,0],thread_counts[-1]),color="blue")
plt.scatter([4],amdahlPortion(results[-1:,1],thread_counts[-1]), color="green")
plt.scatter([4],amdahlPortion(results[-1:,2],thread_counts[-1]), color="red")
plt.xticks(range(-1,6),["", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Ratio of parallelizable code to total code")
plt.legend(["10x10","100x100","1000x1000"],loc=10)
plt.tight_layout()
plt.savefig("feedforward2.pgf")
plt.show()
#Backprop time
results = np.load("backproptimings.npy")
#Raw Timings Plot Backpropagation
plt.figure()
plt.suptitle("Backpropagation", fontsize=24, y=1.05)
plt.subplot(2,2,1)
plt.title("Timing With Ten by Ten Sized Matrices")
plt.plot(results[:-1,0])
plt.scatter([5],results[-1:,0])
plt.xticks(range(-1,7),["","1", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Seconds to compute one calculation")
plt.subplot(2,2,2)
plt.title("Timing With a Hundred by Hundred Sized Matrices")
plt.plot(results[:-1,1])
plt.scatter([5],results[-1:,1])
plt.xticks(range(-1,7),["","1", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Seconds to compute one calculation")
plt.subplot(2,2,3)
plt.title("Timing With a Thousand by a Thousand Sized Matrices")
plt.plot(results[:-1,2])
plt.scatter([5],results[-1:,2])
plt.xticks(range(-1,7),["","1", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Seconds to compute one calculation")
plt.subplot(2,2,4)
plt.title("All Timings In Log Scale")
plt.plot(results[:-1])
plt.scatter([5],results[-1:,0],color="blue")
plt.scatter([5],results[-1:,1], color="green")
plt.scatter([5],results[-1:,2], color="red")
plt.xticks(range(-1,7),["","1", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Seconds to compute one calculation")
plt.yscale("log")
plt.tight_layout()
plt.savefig("backprop.pgf")
plt.show()
#Relative Timings Backpropagation
results = results[0,None]/results
plt.figure()
plt.suptitle("Feedforward", fontsize=24, y=1.05)
plt.subplot(2,2,1)
plt.title("All Speed Ups")
plt.plot(results[:-1])
plt.scatter([5],results[-1:,0],color="blue")
plt.scatter([5],results[-1:,1], color="green")
plt.scatter([5],results[-1:,2], color="red")
plt.xticks(range(-1,7),["","1", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Speed up ratio on one calculation (log scale)")
plt.yscale("log")
plt.legend(["10x10","100x100","1000x1000"],loc=2)
plt.subplot(2,2,2)
plt.title("CPU Only Speed Ups")
plt.plot(results[:-1]-1)
plt.xticks(range(-1,6),["","1", "2", "4", "8", "16", ""])
plt.xlabel("Number of threads")
plt.ylabel("Relative speed difference on one calculation")
plt.legend(["10x10","100x100","1000x1000"],loc=2)
plt.subplot(2,2,3)
plt.title("Speed Up Per Thread")
plt.plot((results[1:-1]-1)/thread_counts[1:-1,None])
plt.scatter([4],(results[-1:,0]-1)/thread_counts[-1],color="blue")
plt.scatter([4],(results[-1:,1]-1)/thread_counts[-1], color="green")
plt.scatter([4],(results[-1:,2]-1)/thread_counts[-1], color="red")
plt.xticks(range(-1,6),["", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Relative speed difference on one calculation")
plt.legend(["10x10","100x100","1000x1000"],loc=1)
plt.subplot(2,2,4)
plt.title("Amdahl's Law Calculated Parallelizable Portion")
plt.plot(amdahlPortion(results[1:-1],thread_counts[1:-1,None]))
plt.scatter([4],amdahlPortion(results[-1:,0],thread_counts[-1]),color="blue")
plt.scatter([4],amdahlPortion(results[-1:,1],thread_counts[-1]), color="green")
plt.scatter([4],amdahlPortion(results[-1:,2],thread_counts[-1]), color="red")
plt.xticks(range(-1,6),["", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Ratio of parallelizable code to total code")
plt.legend(["10x10","100x100","1000x1000"],loc=10)
plt.tight_layout()
plt.savefig("feedforward2.pgf")
plt.show()
| StarcoderdataPython |
1753387 | import unittest
from .timeUtil import *
from .timeBase import *
from .systemProcessingBase import *
class TestTimeUtil(unittest.TestCase):
def test_1(self):
# with 计时器() as t:
# 延时(1)
# print(t.取耗时())
t = 时间统计()
延时(1.22)
print(t.取秒())
print(t.取毫秒())
延时(1.22)
print(t.取秒())
print(t.取毫秒())
t.开始()
延时(1.22)
print(t.取秒())
print(t.取毫秒())
t = 计时器()
延时(1.22)
print(t.取秒())
print(t.取毫秒())
延时(1.22)
print(t.取秒())
print(t.取毫秒())
t.开始()
延时(1.22)
print(t.取秒())
print(t.取毫秒())
def test_4(self):
with 时间统计("买东西业务") as t:
延时(1)
t.取耗时("出门")
延时(1)
t.取耗时("到店")
延时(1)
t.取耗时("购买")
print("总耗时", t.取总耗时())
def test_5(self):
with 时间统计("测试耗时"):
延时(1)
| StarcoderdataPython |
1633024 | <reponame>rafaelhn2021/proyecto
# Generated by Django 3.0 on 2021-03-29 21:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('declaracion', '0007_secciones_simp'),
]
operations = [
migrations.AlterField(
model_name='infopersonalfija',
name='puesto',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
| StarcoderdataPython |
4816764 | <reponame>IOMRC/intake-aodn<gh_stars>1-10
#!/usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2020 - 2021, CSIRO
#
# All rights reserved.
#
# The full license is in the LICENSE file, distributed with this software.
#-----------------------------------------------------------------------------
def make_clim(da,time_res='month',**kwargs):
import numpy as np
if 'time_slice' in kwargs:
ct = da.sel(time=slice(kwargs['time_slice'][0],kwargs['time_slice'][1])).groupby('time.' + time_res).count(dim='time')
s = da.sel(time=slice(kwargs['time_slice'][0],kwargs['time_slice'][1])).groupby('time.' + time_res).std(dim='time')
clim = da.sel(time=slice(kwargs['time_slice'][0],kwargs['time_slice'][1])).groupby('time.' + time_res).mean(dim='time')
else:
clim = da.groupby('time.' + time_res).mean(dim='time')
s = da.groupby('time.' + time_res).std(dim='time')
ct = da.groupby('time.' + time_res).count(dim='time')
h95 = clim + 1.96*s/np.sqrt(ct)
l95 = clim - 1.96*s/np.sqrt(ct)
return clim,h95,l95
def time_average(da,dt,var,**kwargs):
import pandas as pd #
#dt specifies the type of resampling and can take values of 'M', 'Y' or 'DJF' for season
if len(dt)==3:
quarters = {'DJF':'Q-Feb','MAM':'Q-May','JJA':'Q-Aug','SON':'Q-Nov'}
mth_num = {'DJF':2,'MAM':5,'JJA':8,'SON':11}
m = mth_num[dt]
dt_q = quarters[dt]
t_unit = 'Y'
# Method 1: ignores incomplete seasons
if kwargs['ignore_inc']:
avg_da=da[var].resample(time=dt_q).mean(skipna = True)
avg_da = avg_da.sel(time=avg_da['time.month']==m).groupby('time.year').mean()
else:
# Method2: replaces incomplete seasons with Na
avg_da=da[var].resample(time='1M').mean()
avg_da = da.where(avg_da.time.dt.season==dt).rolling(min_periods=3,center=True,time=3).mean()
avg_da = avg_da.groupby('time.year').mean('time')[var]
else:
t_unit = dt
dt = '1'+dt
avg_da=da[var].resample(time=dt).mean(skipna=True)
avg_da['time'] = avg_da.time.astype('datetime64[' + t_unit +']')
if not kwargs['ignore_inc']:
ext_time = avg_da['time'][[0, len(avg_da['time'])-1]]
if (da.time[len(da.time)-1].dt.day.values < 15) or ((da.time[len(da.time)-1].dt.month.values < 12) and (t_unit=='Y')):
avg_da = avg_da.where(avg_da.time!=avg_da.time[len(avg_da.time)-1])
if avg_da['time'][0] < da['time'][0]-pd.to_timedelta(15,'D'):
avg_da = avg_da.where(avg_da.time!=avg_da.time[0])
#last_day = da.time[(da.time + pd.to_timedelta(1,'D')).dt.month != (da.time).dt.month].astype('datetime64[M]')
#avg_da = avg_da.where((avg_da.time.isin(last_day) & avg_da.time.isin(da.time)))
return avg_da
def lin_trend(da,coord,deg=1):
import xarray as xr
import numpy as np
from scipy import stats
f=da.polyfit(dim=coord,deg=1)
fit = xr.polyval(da[coord],f)
fit = fit.rename({'polyfit_coefficients':'linear_fit'})
n = len(da[coord])
x2 = xr.DataArray(range(1,len(da[coord])+1),dims =coord,coords={coord:da[coord]})
serr= np.sqrt(((da-fit['linear_fit'])**2).sum(dim=coord)/(len(da[coord])-1)).expand_dims(dim = {coord:n})
t = stats.t.ppf(1-0.025, len(da[coord]))
B = np.sqrt(1/n + (x2 - np.mean(x2))**2 / np.sum((x2-np.mean(x2))**2))
ci = B*serr*t
ci+fit
hci = ci+fit
lci = fit-ci
# f=da.polyfit(dim=coord,deg=1)
# fit = xr.polyval(da[coord],f)
# fit = fit.rename({'polyfit_coefficients':'linear_fit'})
# n = len(da[coord])
# x2 = range(1,len(da[coord])+1)
# serr = np.sqrt(np.sum((da-fit)**2)/(len(da[coord])-1))
# t = stats.t.ppf(1-0.025, len(da[coord]))
#ci = t * serr['linear_fit'].values * np.sqrt(1/n + (x2 - np.mean(x2))**2 / np.sum((x2-np.mean(x2))**2))
#hci = ci+fit
#lci = fit-ci
return f,fit,hci,lci
| StarcoderdataPython |
1612678 | <reponame>ar90n/kkt<filename>tests/test_commands_download.py
import re
import pytest
from tempfile import TemporaryDirectory
from kkt.commands.download import download
@pytest.mark.parametrize(
"given, expected",
[
(
{"status": "complete", "failureMessage": None, "user": "user"},
{"output": r"^save to:/.*/abc.wheel$"},
),
(
{"status": "running", "failureMessage": None, "user": "user"},
{"output": r"^Kernel has not been completed yet.$"},
),
],
)
def test_commands_install(
chshared_datadir, given, expected, cli_runner, kaggle_api, monkeypatch
):
api = kaggle_api(**given)
monkeypatch.setattr("kkt.commands.kkt_command.get_kaggle_api", lambda: api)
with TemporaryDirectory() as tmp_dir:
ret = cli_runner.invoke(download, ["--quiet", str(tmp_dir)])
for line in ret.output.strip().split("\n"):
assert re.match(expected["output"], line)
| StarcoderdataPython |
3281966 | # -*- coding: utf-8
"""Module for custom components.
Components in this module:
- :func:`tespy.components.customs.orc_evaporator`
This file is part of project TESPy (github.com/oemof/tespy). It's copyrighted
by the contributors recorded in the version control history of the file,
available from its original location tespy/components/customs.py
SPDX-License-Identifier: MIT
"""
import numpy as np
from tespy.components.components import component
from tespy.tools.data_containers import dc_cp
from tespy.tools.data_containers import dc_simple
from tespy.tools.fluid_properties import dh_mix_dpQ
from tespy.tools.fluid_properties import h_mix_pQ
from tespy.tools.fluid_properties import h_mix_pT
from tespy.tools.fluid_properties import s_mix_ph
from tespy.tools.fluid_properties import v_mix_ph
# %%
class orc_evaporator(component):
r"""Evaporator of the geothermal Organic Rankine Cycle (ORC).
Generally, the hot side of the geo-fluid from the geothermal wells deliver
two phases: steam and brine. In order to fully use the energy of the
geo-fluid, there are 2 inlets at the hot side.
The ORC evaporator represents counter current evaporators. Both, two hot
and one cold side of the evaporator, are simulated.
Equations
**mandatory equations**
- :func:`tespy.components.components.component.fluid_func`
- :func:`tespy.components.customs.orc_evaporator.mass_flow_func`
- :func:`tespy.components.customs.orc_evaporator.energy_func`
.. math::
0 = p_{1,in} \cdot pr1 - p_{1,out}\\
0 = p_{2,in} \cdot pr2 - p_{2,out}\\
0 = p_{3,in} \cdot pr3 - p_{3,out}
- hot side steam :func:`tespy.components.components.component.zeta_func`
- hot side brine :func:`tespy.components.components.component.zeta_func`
- worling fluid :func:`tespy.components.components.component.zeta_func`
**mandatory equations at outlet of the steam
from geothermal heat source side**
.. math::
0 = h_{1,out} - h\left(p, x=0 \right)\\
x: \text{vapour mass fraction}
**mandatory equations at outlet of the working fluid
of being evaporated side**
.. math::
0 = h_{3,out} - h\left(p, x=1 \right)\\
x: \text{vapour mass fraction}
Inlets/Outlets
- in1, in2, in3 (index 1: steam from geothermal heat source,
index 2: brine from geothermal heat source,
index 3: working fluid of being evaporated)
- out1, out2, out3 (index 1: steam from geothermal heat source,
index 2: brine from geothermal heat source,
index 3: working fluid of being evaporated)
Image
.. image:: _images/orc_evaporator.svg
:scale: 100 %
:alt: alternative text
:align: center
Parameters
----------
label : str
The label of the component.
design : list
List containing design parameters (stated as String).
offdesign : list
List containing offdesign parameters (stated as String).
design_path: str
Path to the components design case.
local_offdesign : boolean
Treat this component in offdesign mode in a design calculation.
local_design : boolean
Treat this component in design mode in an offdesign calculation.
char_warnings: boolean
Ignore warnings on default characteristics usage for this component.
printout: boolean
Include this component in the network's results printout.
Q : float/tespy.tools.data_containers.dc_cp
Heat transfer, :math:`Q/\text{W}`.
pr1 : float/tespy.tools.data_containers.dc_cp
Outlet to inlet pressure ratio at hot side 1 (steam),
:math:`pr/1`.
pr2 : float/tespy.tools.data_containers.dc_cp
Outlet to inlet pressure ratio at hot side 2 (brine),
:math:`pr/1`.
pr3 : float/tespy.tools.data_containers.dc_cp
Outlet to inlet pressure ratio at cold side (working fluid),
:math:`pr/1`.
zeta1 : float/tespy.tools.data_containers.dc_cp
Geometry independent friction coefficient at hot side 1 (steam),
:math:`\frac{\zeta}{D^4}/\frac{1}{\text{m}^4}`.
zeta2 : float/tespy.tools.data_containers.dc_cp
Geometry independent friction coefficient at hot side 2 (brine),
:math:`\frac{\zeta}{D^4}/\frac{1}{\text{m}^4}`.
zeta3 : float/tespy.tools.data_containers.dc_cp
Geometry independent friction coefficient at cold side (working fluid),
:math:`\frac{\zeta}{D^4}/\frac{1}{\text{m}^4}`.
subcooling : bool
Enable/disable subcooling at oulet of the hot side 1,
default value: disabled (False).
overheating : bool
Enable/disable overheating at oulet of the cold side,
default value: disabled (False).
Note
----
The ORC evaporator has an additional equation for enthalpy at the outlet of
the geothermal steam: The fluid leaves the component in saturated liquid
state. If code:`subcooling` is activated (:code:`True`), it is possible to
specify the enthalpy at the outgoing connection manually.
Additionally, an equation for enthalpy at the outlet of the working fluid
is imposed: It leaves the component in saturated gas state. If
:code:`overheating` is enabled (:code:`True`), it is possible to specify
the enthalpy at the outgoing connection manually.
Example
-------
A two-phase geo-fluid is used as the heat source for evaporating the
working fluid. We calculate the mass flow of the working fluid with known
steam and brine mass flow.
>>> from tespy.connections import connection
>>> from tespy.networks import network
>>> from tespy.components import source, sink
>>> from tespy.components.customs import orc_evaporator
>>> fluids = ['water', 'Isopentane']
>>> nw = network(fluids=fluids, iterinfo=False)
>>> nw.set_attr(p_unit='bar', T_unit='C', h_unit='kJ / kg')
>>> evaporator = orc_evaporator('geothermal orc evaporator')
>>> evaporator.component()
'orc_evaporator'
>>> source_wf = source('working fluid source')
>>> sink_wf = sink('working fluid sink')
>>> source_s = source('steam source')
>>> source_b = source('brine source')
>>> sink_s = sink('steam sink')
>>> sink_b = sink('brine sink')
>>> eva_wf_in = connection(source_wf, 'out1', evaporator, 'in3')
>>> eva_wf_out = connection(evaporator, 'out3', sink_wf, 'in1')
>>> eva_steam_in = connection(source_s, 'out1', evaporator, 'in1')
>>> eva_sink_s = connection(evaporator, 'out1', sink_s, 'in1')
>>> eva_brine_in = connection(source_b, 'out1', evaporator, 'in2')
>>> eva_sink_b = connection(evaporator, 'out2', sink_b, 'in1')
>>> nw.add_conns(eva_wf_in, eva_wf_out)
>>> nw.add_conns(eva_steam_in, eva_sink_s)
>>> nw.add_conns(eva_brine_in, eva_sink_b)
The orc working fluids leaves the evaporator in saturated steam state, the
geothermal steam leaves the component in staturated liquid state. We imply
the state of geothermal steam and brine with the corresponding mass flow as
well as the working fluid's state at the evaporator inlet. The pressure
ratio is specified for each of the three streams.
>>> evaporator.set_attr(pr1=0.95, pr2=0.98, pr3=0.99)
>>> eva_wf_in.set_attr(T=111, p=11,
... fluid={'water': 0, 'Isopentane': 1})
>>> eva_steam_in.set_attr(T=147, p=4.3, m=20,
... fluid={'water': 1, 'Isopentane': 0})
>>> eva_brine_in.set_attr(T=147, p=10.2, m=190,
... fluid={'water': 1, 'Isopentane': 0})
>>> eva_sink_b.set_attr(T=117)
>>> nw.solve(mode='design')
Check the state of the steam and working fluid outlet:
>>> eva_wf_out.x.val
1.0
>>> eva_sink_s.x.val
0.0
"""
@staticmethod
def component():
return 'orc_evaporator'
@staticmethod
def attr():
return {
'Q': dc_cp(max_val=0),
'pr1': dc_cp(max_val=1), 'pr2': dc_cp(max_val=1),
'pr3': dc_cp(max_val=1),
'zeta1': dc_cp(min_val=0), 'zeta2': dc_cp(min_val=0),
'zeta3': dc_cp(min_val=0),
'subcooling': dc_simple(val=False),
'overheating': dc_simple(val=False),
'SQ1': dc_simple(), 'SQ2': dc_simple(), 'SQ3': dc_simple(),
'Sirr': dc_simple()
}
@staticmethod
def inlets():
return ['in1', 'in2', 'in3']
@staticmethod
def outlets():
return ['out1', 'out2', 'out3']
def comp_init(self, nw):
component.comp_init(self, nw)
# number of mandatroy equations for
# fluid balance: num_fl * 3
# mass flow: 3
# energy balance: 1
self.num_eq = self.num_nw_fluids * 3 + 3 + 1
# enthalpy hot side 1 outlet (if not subcooling): 1
if self.subcooling.val is False:
self.num_eq += 1
# enthalpy cold side outlet (if not overheating): 1
if self.overheating.val is False:
self.num_eq += 1
for var in [self.Q, self.pr1, self.pr2, self.pr3,
self.zeta1, self.zeta2, self.zeta3, ]:
if var.is_set is True:
self.num_eq += 1
self.jacobian = np.zeros((
self.num_eq,
self.num_i + self.num_o + self.num_vars,
self.num_nw_vars))
self.residual = np.zeros(self.num_eq)
pos = self.num_nw_fluids * 3
self.jacobian[0:pos] = self.fluid_deriv()
self.jacobian[pos:pos + 3] = self.mass_flow_deriv()
def equations(self):
r"""Calculate residual vector with results of equations."""
k = 0
######################################################################
# equations for fluid balance
self.residual[k:k + self.num_nw_fluids * 3] = self.fluid_func()
k += self.num_nw_fluids * 3
######################################################################
# equations for mass flow balance
self.residual[k:k + 3] = self.mass_flow_func()
k += 3
######################################################################
# equations for energy balance
self.residual[k] = self.energy_func()
k += 1
######################################################################
# equations for specified heat transfer
if self.Q.is_set:
self.residual[k] = (
self.inl[2].m.val_SI * (
self.outl[2].h.val_SI - self.inl[2].h.val_SI) - self.Q.val)
k += 1
######################################################################
# equations for specified pressure ratio at hot side 1
if self.pr1.is_set:
self.residual[k] = (
self.pr1.val * self.inl[0].p.val_SI -
self.outl[0].p.val_SI)
k += 1
######################################################################
# equations for specified pressure ratio at hot side 2
if self.pr2.is_set:
self.residual[k] = (
self.pr2.val * self.inl[1].p.val_SI -
self.outl[1].p.val_SI)
k += 1
######################################################################
# equations for specified pressure ratio at cold side
if self.pr3.is_set:
self.residual[k] = (
self.pr3.val * self.inl[2].p.val_SI -
self.outl[2].p.val_SI)
k += 1
######################################################################
# equations for specified zeta at hot side 1
if self.zeta1.is_set:
self.residual[k] = self.zeta_func(
zeta='zeta1', inconn=0, outconn=0)
k += 1
######################################################################
# equations for specified zeta at hot side 2
if self.zeta2.is_set:
self.residual[k] = self.zeta_func(
zeta='zeta2', inconn=1, outconn=1)
k += 1
######################################################################
# equations for specified zeta at cold side
if self.zeta3.is_set:
self.residual[k] = self.zeta_func(
zeta='zeta3', inconn=2, outconn=2)
k += 1
######################################################################
# equation for saturated liquid at hot side 1 outlet
if self.subcooling.val is False:
o1 = self.outl[0].to_flow()
self.residual[k] = o1[2] - h_mix_pQ(o1, 0)
k += 1
######################################################################
# equation for saturated gas at cold side outlet
if self.overheating.val is False:
o3 = self.outl[2].to_flow()
self.residual[k] = o3[2] - h_mix_pQ(o3, 1)
k += 1
def derivatives(self, increment_filter):
r"""Calculate matrix of partial derivatives for given equations."""
######################################################################
# derivatives fluid and mass balance are static
k = self.num_nw_fluids * 3 + 3
######################################################################
# derivatives for energy balance equation
# mat_deriv += self.energy_deriv()
for i in range(3):
self.jacobian[k, i, 0] = (
self.outl[i].h.val_SI - self.inl[i].h.val_SI)
self.jacobian[k, i, 2] = -self.inl[i].m.val_SI
self.jacobian[k, i + 3, 2] = self.inl[i].m.val_SI
k += 1
######################################################################
# derivatives for specified heat transfer
if self.Q.is_set:
self.jacobian[k, 2, 0] = (
self.outl[2].h.val_SI - self.inl[2].h.val_SI)
self.jacobian[k, 2, 2] = -self.inl[2].m.val_SI
self.jacobian[k, 5, 2] = self.inl[2].m.val_SI
k += 1
######################################################################
# derivatives for specified pressure ratio at hot side 1
if self.pr1.is_set:
self.jacobian[k, 0, 1] = self.pr1.val
self.jacobian[k, 3, 1] = -1
k += 1
######################################################################
# derivatives for specified pressure ratio at hot side 2
if self.pr2.is_set:
self.jacobian[k, 1, 1] = self.pr2.val
self.jacobian[k, 4, 1] = -1
k += 1
######################################################################
# derivatives for specified pressure ratio at cold side
if self.pr3.is_set:
self.jacobian[k, 2, 1] = self.pr3.val
self.jacobian[k, 5, 1] = -1
k += 1
######################################################################
# derivatives for specified zeta at hot side 1
if self.zeta1.is_set:
f = self.zeta_func
if not increment_filter[0, 0]:
self.jacobian[k, 0, 0] = self.numeric_deriv(
f, 'm', 0, zeta='zeta1', inconn=0, outconn=0)
if not increment_filter[0, 1]:
self.jacobian[k, 0, 1] = self.numeric_deriv(
f, 'p', 0, zeta='zeta1', inconn=0, outconn=0)
if not increment_filter[0, 2]:
self.jacobian[k, 0, 2] = self.numeric_deriv(
f, 'h', 0, zeta='zeta1', inconn=0, outconn=0)
if not increment_filter[3, 1]:
self.jacobian[k, 3, 1] = self.numeric_deriv(
f, 'p', 3, zeta='zeta1', inconn=0, outconn=0)
if not increment_filter[3, 2]:
self.jacobian[k, 3, 2] = self.numeric_deriv(
f, 'h', 3, zeta='zeta1', inconn=0, outconn=0)
k += 1
######################################################################
# derivatives for specified zeta at hot side 2
if self.zeta2.is_set:
f = self.zeta_func
if not increment_filter[1, 0]:
self.jacobian[k, 1, 0] = self.numeric_deriv(
f, 'm', 1, zeta='zeta2', inconn=1, outconn=1)
if not increment_filter[1, 1]:
self.jacobian[k, 1, 1] = self.numeric_deriv(
f, 'p', 1, zeta='zeta2', inconn=1, outconn=1)
if not increment_filter[1, 2]:
self.jacobian[k, 1, 2] = self.numeric_deriv(
f, 'h', 1, zeta='zeta2', inconn=1, outconn=1)
if not increment_filter[4, 1]:
self.jacobian[k, 4, 1] = self.numeric_deriv(
f, 'p', 4, zeta='zeta2', inconn=1, outconn=1)
if not increment_filter[4, 2]:
self.jacobian[k, 4, 2] = self.numeric_deriv(
f, 'h', 4, zeta='zeta2', inconn=1, outconn=1)
k += 1
######################################################################
# derivatives for specified zeta at cold side
if self.zeta3.is_set:
f = self.zeta_func
if not increment_filter[2, 0]:
self.jacobian[k, 2, 0] = self.numeric_deriv(
f, 'm', 2, zeta='zeta3', inconn=2, outconn=2)
if not increment_filter[2, 1]:
self.jacobian[k, 2, 1] = self.numeric_deriv(
f, 'p', 2, zeta='zeta3', inconn=2, outconn=2)
if not increment_filter[2, 2]:
self.jacobian[k, 2, 2] = self.numeric_deriv(
f, 'h', 2, zeta='zeta3', inconn=2, outconn=2)
if not increment_filter[5, 1]:
self.jacobian[k, 5, 1] = self.numeric_deriv(
f, 'p', 5, zeta='zeta3', inconn=2, outconn=2)
if not increment_filter[5, 2]:
self.jacobian[k, 5, 2] = self.numeric_deriv(
f, 'h', 5, zeta='zeta3', inconn=2, outconn=2)
k += 1
######################################################################
# derivatives for saturated liquid at hot side 1 outlet equation
if self.subcooling.val is False:
o1 = self.outl[0].to_flow()
self.jacobian[k, 3, 1] = -dh_mix_dpQ(o1, 0)
self.jacobian[k, 3, 2] = 1
k += 1
######################################################################
# derivatives for saturated gas at cold side outlet 3 equation
if self.overheating.val is False:
o3 = self.outl[2].to_flow()
self.jacobian[k, 5, 1] = -dh_mix_dpQ(o3, 1)
self.jacobian[k, 5, 2] = 1
k += 1
def mass_flow_func(self):
r"""
Calculate the residual value of mass flow balance equations.
Returns
-------
residual : list
Vector with residual value for component's mass flow balance.
.. math::
0 = \dot{m}_{in,i} - \dot{m}_{out,i} \;
\forall i \in inlets/outlets
"""
residual = []
for i in range(self.num_i):
residual += [self.inl[i].m.val_SI - self.outl[i].m.val_SI]
return residual
def mass_flow_deriv(self):
r"""
Calculate the partial derivatives for all mass flow balance equations.
Returns
-------
deriv : list
Matrix with partial derivatives for the mass flow balance
equations.
"""
deriv = np.zeros((self.num_i, 2 * self.num_i, self.num_nw_vars))
for i in range(self.num_i):
deriv[i, i, 0] = 1
for j in range(self.num_i):
deriv[j, j + self.num_i, 0] = -1
return deriv
def energy_func(self):
r"""
Equation for heat exchanger energy balance.
Returns
-------
res : float
Residual value of equation.
.. math::
\begin{split}
res = &
\dot{m}_{1,in} \cdot \left(h_{1,out} - h_{1,in} \right) \\
&+ \dot{m}_{2,in} \cdot \left(h_{2,out} - h_{2,in} \right) \\
&+ \dot{m}_{3,in} \cdot \left(h_{3,out} - h_{3,in} \right)
\end{split}
"""
return (
self.inl[0].m.val_SI * (
self.outl[0].h.val_SI - self.inl[0].h.val_SI) +
self.inl[1].m.val_SI * (
self.outl[1].h.val_SI - self.inl[1].h.val_SI) +
self.inl[2].m.val_SI * (
self.outl[2].h.val_SI - self.inl[2].h.val_SI))
def bus_func(self, bus):
r"""
Calculate the value of the bus function.
Parameters
----------
bus : tespy.connections.bus
TESPy bus object.
Returns
-------
val : float
Value of energy transfer :math:`\dot{E}`. This value is passed to
:py:meth:`tespy.components.components.component.calc_bus_value`
for value manipulation according to the specified characteristic
line of the bus.
.. math::
\dot{E} = \dot{m}_{3,in} \cdot \left(
h_{3,out} - h_{3,in} \right)
"""
i = self.inl[2].to_flow()
o = self.outl[2].to_flow()
val = i[0] * (o[2] - i[2])
return val
def bus_deriv(self, bus):
r"""
Calculate the matrix of partial derivatives of the bus function.
Parameters
----------
bus : tespy.connections.bus
TESPy bus object.
Returns
-------
mat_deriv : ndarray
Matrix of partial derivatives.
"""
deriv = np.zeros((1, 6, self.num_nw_vars))
f = self.calc_bus_value
deriv[0, 2, 0] = self.numeric_deriv(f, 'm', 2, bus=bus)
deriv[0, 2, 2] = self.numeric_deriv(f, 'h', 2, bus=bus)
deriv[0, 5, 2] = self.numeric_deriv(f, 'h', 5, bus=bus)
return deriv
def initialise_source(self, c, key):
r"""
Return a starting value for pressure and enthalpy at outlet.
Parameters
----------
c : tespy.connections.connection
Connection to perform initialisation on.
key : str
Fluid property to retrieve.
Returns
-------
val : float
Starting value for pressure/enthalpy in SI units.
.. math::
val = \begin{cases}
10 \cdot 10^5 & \text{key = 'p'}\\
h\left(p, 473.15 \text{K} \right) &
\text{key = 'h' at outlet 1}\\
h\left(p, 473.15 \text{K} \right) &
\text{key = 'h' at outlet 2}\\
h\left(p, 523.15 \text{K} \right) &
\text{key = 'h' at outlet 3}
\end{cases}
"""
if key == 'p':
return 10e5
elif key == 'h':
flow = c.to_flow()
if c.source_id == 'out1':
T = 200 + 273.15
return h_mix_pT(flow, T)
elif c.source_id == 'out2':
T = 200 + 273.15
return h_mix_pT(flow, T)
else:
T = 250 + 273.15
return h_mix_pT(flow, T)
def initialise_target(self, c, key):
r"""
Return a starting value for pressure and enthalpy at inlet.
Parameters
----------
c : tespy.connections.connection
Connection to perform initialisation on.
key : str
Fluid property to retrieve.
Returns
-------
val : float
Starting value for pressure/enthalpy in SI units.
.. math::
val = \begin{cases}
10 \cdot 10^5 & \text{key = 'p'}\\
h\left(p, 573.15 \text{K} \right) &
\text{key = 'h' at inlet 1}\\
h\left(p, 573.15 \text{K} \right) &
\text{key = 'h' at inlet 2}\\
h\left(p, 493.15 \text{K} \right) &
\text{key = 'h' at inlet 3}
\end{cases}
"""
if key == 'p':
return 10e5
elif key == 'h':
flow = c.to_flow()
if c.target_id == 'in1':
T = 300 + 273.15
return h_mix_pT(flow, T)
elif c.target_id == 'in2':
T = 300 + 273.15
return h_mix_pT(flow, T)
else:
T = 220 + 273.15
return h_mix_pT(flow, T)
def calc_parameters(self):
r"""Postprocessing parameter calculation."""
# connection information
i1 = self.inl[0].to_flow()
i2 = self.inl[1].to_flow()
i3 = self.inl[2].to_flow()
o1 = self.outl[0].to_flow()
o2 = self.outl[1].to_flow()
o3 = self.outl[2].to_flow()
# specific volume
v_i1 = v_mix_ph(i1, T0=self.inl[0].T.val_SI)
v_i2 = v_mix_ph(i2, T0=self.inl[1].T.val_SI)
v_i3 = v_mix_ph(i3, T0=self.inl[2].T.val_SI)
v_o1 = v_mix_ph(o1, T0=self.outl[0].T.val_SI)
v_o2 = v_mix_ph(o2, T0=self.outl[1].T.val_SI)
v_o3 = v_mix_ph(o3, T0=self.outl[2].T.val_SI)
# specific entropy
s_i1 = s_mix_ph(i1, T0=self.inl[0].T.val_SI)
s_i2 = s_mix_ph(i2, T0=self.inl[1].T.val_SI)
s_i3 = s_mix_ph(i3, T0=self.inl[2].T.val_SI)
s_o1 = s_mix_ph(o1, T0=self.outl[0].T.val_SI)
s_o2 = s_mix_ph(o2, T0=self.outl[1].T.val_SI)
s_o3 = s_mix_ph(o3, T0=self.outl[2].T.val_SI)
# component parameters
self.Q.val = -i3[0] * (o3[2] - i3[2])
self.pr1.val = o1[1] / i1[1]
self.pr2.val = o2[1] / i2[1]
self.pr3.val = o3[1] / i3[1]
self.zeta1.val = ((i1[1] - o1[1]) * np.pi ** 2 /
(8 * i1[0] ** 2 * (v_i1 + v_o1) / 2))
self.zeta2.val = ((i2[1] - o2[1]) * np.pi ** 2 /
(8 * i2[0] ** 2 * (v_i2 + v_o2) / 2))
self.zeta3.val = ((i3[1] - o3[1]) * np.pi ** 2 /
(8 * i3[0] ** 2 * (v_i3 + v_o3) / 2))
self.SQ1.val = self.inl[0].m.val_SI * (s_o1 - s_i1)
self.SQ2.val = self.inl[1].m.val_SI * (s_o2 - s_i2)
self.SQ3.val = self.inl[2].m.val_SI * (s_o3 - s_i3)
self.Sirr.val = self.SQ1.val + self.SQ2.val + self.SQ3.val
self.check_parameter_bounds()
| StarcoderdataPython |
1776097 | <reponame>Tobdu399/p3wordformatter<filename>formatword_pkg/__init__.py
def format_word(word):
formatted_word = []
completed_word = ""
for letter in word:
formatted_word.append(letter)
formatted_word[0] = formatted_word[0].upper()
completed_word += formatted_word[0] # Add the capitalized letter
for i in range(1, len(formatted_word)):
formatted_word[i] = formatted_word[i].lower()
completed_word += formatted_word[i] # Add rest of the letters in lowercase
return print(completed_word) # Return the formatted word
| StarcoderdataPython |
3304982 | # -----------------------------------------------------
# Copyright (c) Shanghai Jiao Tong University. All rights reserved.
# Written by <NAME> (<EMAIL>)
# -----------------------------------------------------
"""API of efficientdet detector"""
import os
import sys
sys.path.insert(0, os.path.dirname(__file__))
from abc import ABC, abstractmethod
import platform
import torch
import numpy as np
from efficientdet.utils import unique, prep_image, prep_frame, bbox_iou
from efficientdet.effdet import EfficientDet, get_efficientdet_config, DetBenchEval, load_checkpoint
from detector.apis import BaseDetector
try:
from apex import amp
has_amp = True
except ImportError:
has_amp = False
#only windows visual studio 2013 ~2017 support compile c/cuda extensions
#If you force to compile extension on Windows and ensure appropriate visual studio
#is intalled, you can try to use these ext_modules.
if platform.system() != 'Windows':
from detector.nms import nms_wrapper
class EffDetDetector(BaseDetector):
def __init__(self, cfg, opt=None):
super(EffDetDetector, self).__init__()
self.detector_cfg = cfg
self.detector_opt = opt
self.model_cfg = get_efficientdet_config(opt.detector)
self.model_weights = 'detector/efficientdet/weights/'+opt.detector+'.pth'
#Input image dimension, uses model default if empty
self.inp_dim = cfg.get('INP_DIM', None) if cfg.get('INP_DIM', None) is not None else self.model_cfg.image_size
self.nms_thres = cfg.get('NMS_THRES', 0.6)
self.confidence = cfg.get('CONFIDENCE', 0.05)
self.num_classes = cfg.get('NUM_CLASSES', 80)
self.model = None
def load_model(self):
args = self.detector_opt
net = EfficientDet(self.model_cfg)
load_checkpoint(net, self.model_weights)
self.model = DetBenchEval(net, self.model_cfg)
if args:
if len(args.gpus) > 1:
if has_amp:
print('Using AMP mixed precision.')
self.model = amp.initialize(self.model, opt_level='O1')
else:
print('AMP not installed, running network in FP32.')
self.model = torch.nn.DataParallel(self.model, device_ids=args.gpus).to(args.device)
else:
self.model.to(args.device)
else:
if has_amp:
print('Using AMP mixed precision.')
self.model = amp.initialize(self.model, opt_level='O1')
else:
print('AMP not installed, running network in FP32.')
self.model.cuda()
net.eval()
def image_preprocess(self, img_source):
"""
Pre-process the img before fed to the object detection network
Input: image name(str) or raw image data(ndarray or torch.Tensor,channel GBR)
Output: pre-processed image data(torch.FloatTensor,(1,3,h,w))
"""
if isinstance(img_source, str):
img, orig_img, im_dim_list = prep_image(img_source, self.inp_dim)
elif isinstance(img_source, torch.Tensor) or isinstance(img_source, np.ndarray):
img, orig_img, im_dim_list = prep_frame(img_source, self.inp_dim)
else:
raise IOError('Unknown image source type: {}'.format(type(img_source)))
return img
def images_detection(self, imgs, orig_dim_list):
"""
Feed the img data into object detection network and
collect bbox w.r.t original image size
Input: imgs(torch.FloatTensor,(b,3,h,w)): pre-processed mini-batch image input
orig_dim_list(torch.FloatTensor, (b,(w,h,w,h))): original mini-batch image size
Output: dets(torch.cuda.FloatTensor,(n,(batch_idx,x1,y1,x2,y2,c,s,idx of cls))): human detection results
"""
args = self.detector_opt
if not self.model:
self.load_model()
with torch.no_grad():
imgs = imgs.to(args.device) if args else imgs.cuda()
scaling_factors = torch.FloatTensor([1./min(self.inp_dim / orig_dim[0], self.inp_dim / orig_dim[1]) for orig_dim in orig_dim_list]).view(-1, 1)
scaling_factors = scaling_factors.to(args.device) if args else scaling_factors.cuda()
prediction = self.model(imgs, scaling_factors)
#do nms to the detection results, only human category is left
dets = self.dynamic_get_results(prediction, self.confidence,
self.num_classes, nms=True,
nms_conf=self.nms_thres)
if isinstance(dets, int) or dets.shape[0] == 0:
return 0
dets = dets.cpu()
orig_dim_list = torch.index_select(orig_dim_list, 0, dets[:, 0].long())
for i in range(dets.shape[0]):
dets[i, [1, 3]] = torch.clamp(dets[i, [1, 3]], 0.0, orig_dim_list[i, 0])
dets[i, [2, 4]] = torch.clamp(dets[i, [2, 4]], 0.0, orig_dim_list[i, 1])
return dets
def dynamic_get_results(self, prediction, confidence, num_classes, nms=True, nms_conf=0.4):
prediction_bak = prediction.clone()
dets = self.get_results(prediction.clone(), confidence, num_classes, nms, nms_conf)
if isinstance(dets, int):
return dets
if dets.shape[0] > 100:
nms_conf -= 0.05
dets = self.get_results(prediction_bak.clone(), confidence, num_classes, nms, nms_conf)
return dets
def get_results(self, prediction, confidence, num_classes, nms=True, nms_conf=0.4):
args = self.detector_opt
#prediction: (batchsize, num of objects, (xc,yc,w,h,box confidence, 80 class scores))
conf_mask = (prediction[:, :, 4] > confidence).float().float().unsqueeze(2)
prediction = prediction * conf_mask
try:
ind_nz = torch.nonzero(prediction[:,:,4]).transpose(0,1).contiguous()
except:
return 0
#the 3rd channel of prediction: (xmin,ymin,w,h)->(x1,y1,x2,y2)
box_a = prediction.new(prediction.shape)
box_a[:,:,0] = prediction[:,:,0]
box_a[:,:,1] = prediction[:,:,1]
box_a[:,:,2] = prediction[:,:,0] + prediction[:,:,2]
box_a[:,:,3] = prediction[:,:,1] + prediction[:,:,3]
prediction[:,:,:4] = box_a[:,:,:4]
batch_size = prediction.size(0)
output = prediction.new(1, prediction.size(2) + 1)
write = False
num = 0
for ind in range(batch_size):
#select the image from the batch
image_pred = prediction[ind]
#Get the class having maximum score, and the index of that class
#Get rid of num_classes softmax scores
#Add the class index and the class score of class having maximum score
max_conf, max_conf_score = torch.max(image_pred[:,5:5+ num_classes], 1)
max_conf = max_conf.float().unsqueeze(1)
max_conf_score = max_conf_score.float().unsqueeze(1)
seq = (image_pred[:,:5], max_conf, max_conf_score)
#image_pred:(n,(x1,y1,x2,y2,c,s,idx of cls))
image_pred = torch.cat(seq, 1)
#Get rid of the zero entries
non_zero_ind = (torch.nonzero(image_pred[:,4]))
image_pred_ = image_pred[non_zero_ind.squeeze(),:].view(-1,7)
#Get the various classes detected in the image
try:
img_classes = unique(image_pred_[:,-1])
except:
continue
#WE will do NMS classwise
#print(img_classes)
for cls in img_classes:
if cls != 0:
continue
#get the detections with one particular class
cls_mask = image_pred_*(image_pred_[:,-1] == cls).float().unsqueeze(1)
class_mask_ind = torch.nonzero(cls_mask[:,-2]).squeeze()
image_pred_class = image_pred_[class_mask_ind].view(-1,7)
#sort the detections such that the entry with the maximum objectness
#confidence is at the top
conf_sort_index = torch.sort(image_pred_class[:,4], descending = True )[1]
image_pred_class = image_pred_class[conf_sort_index]
idx = image_pred_class.size(0)
#if nms has to be done
if nms:
if platform.system() != 'Windows':
#We use faster rcnn implementation of nms (soft nms is optional)
nms_op = getattr(nms_wrapper, 'nms')
#nms_op input:(n,(x1,y1,x2,y2,c))
#nms_op output: input[inds,:], inds
_, inds = nms_op(image_pred_class[:,:5], nms_conf)
image_pred_class = image_pred_class[inds]
else:
# Perform non-maximum suppression
max_detections = []
while image_pred_class.size(0):
# Get detection with highest confidence and save as max detection
max_detections.append(image_pred_class[0].unsqueeze(0))
# Stop if we're at the last detection
if len(image_pred_class) == 1:
break
# Get the IOUs for all boxes with lower confidence
ious = bbox_iou(max_detections[-1], image_pred_class[1:], args)
# Remove detections with IoU >= NMS threshold
image_pred_class = image_pred_class[1:][ious < nms_conf]
image_pred_class = torch.cat(max_detections).data
#Concatenate the batch_id of the image to the detection
#this helps us identify which image does the detection correspond to
#We use a linear straucture to hold ALL the detections from the batch
#the batch_dim is flattened
#batch is identified by extra batch column
batch_ind = image_pred_class.new(image_pred_class.size(0), 1).fill_(ind)
seq = batch_ind, image_pred_class
if not write:
output = torch.cat(seq,1)
write = True
else:
out = torch.cat(seq,1)
output = torch.cat((output,out))
num += 1
if not num:
return 0
#output:(n,(batch_ind,x1,y1,x2,y2,c,s,idx of cls))
return output
def detect_one_img(self, img_name):
"""
Detect bboxs in one image
Input: 'str', full path of image
Output: '[{"category_id":1,"score":float,"bbox":[x,y,w,h],"image_id":str},...]',
The output results are similar with coco results type, except that image_id uses full path str
instead of coco %012d id for generalization.
"""
args = self.detector_opt
_CUDA = True
if args:
if args.gpus[0] < 0:
_CUDA = False
if not self.model:
self.load_model()
if isinstance(self.model, torch.nn.DataParallel):
self.model = self.model.module
dets_results = []
#pre-process(scale, normalize, ...) the image
img, orig_img, img_dim_list = prep_image(img_name, self.inp_dim)
with torch.no_grad():
img_dim_list = torch.FloatTensor([img_dim_list]).repeat(1, 2)
img = img.to(args.device) if args else img.cuda()
scaling_factor = torch.FloatTensor([1/min(self.inp_dim / orig_dim[0], self.inp_dim / orig_dim[1]) for orig_dim in img_dim_list]).view(-1, 1)
scaling_factor = scaling_factor.to(args.device) if args else scaling_factor.cuda()
prediction = self.model(img, scaling_factor)
#do nms to the detection results, only human category is left
dets = self.dynamic_get_results(prediction, self.confidence,
self.num_classes, nms=True,
nms_conf=self.nms_thres)
if isinstance(dets, int) or dets.shape[0] == 0:
return None
dets = dets.cpu()
img_dim_list = torch.index_select(img_dim_list, 0, dets[:, 0].long())
for i in range(dets.shape[0]):
dets[i, [1, 3]] = torch.clamp(dets[i, [1, 3]], 0.0, img_dim_list[i, 0])
dets[i, [2, 4]] = torch.clamp(dets[i, [2, 4]], 0.0, img_dim_list[i, 1])
#write results
det_dict = {}
x = float(dets[i, 1])
y = float(dets[i, 2])
w = float(dets[i, 3] - dets[i, 1])
h = float(dets[i, 4] - dets[i, 2])
det_dict["category_id"] = 1
det_dict["score"] = float(dets[i, 5])
det_dict["bbox"] = [x, y, w, h]
det_dict["image_id"] = int(os.path.basename(img_name).split('.')[0])
dets_results.append(det_dict)
return dets_results
def check_detector(self, img_name):
"""
Detect bboxs in one image
Input: 'str', full path of image
Output: '[{"category_id":1,"score":float,"bbox":[x,y,w,h],"image_id":str},...]',
The output results are similar with coco results type, except that image_id uses full path str
instead of coco %012d id for generalization.
"""
args = self.detector_opt
_CUDA = True
if args:
if args.gpus[0] < 0:
_CUDA = False
if not self.model:
self.load_model()
if isinstance(self.model, torch.nn.DataParallel):
self.model = self.model.module
dets_results = []
#pre-process(scale, normalize, ...) the image
img, orig_img, img_dim_list = prep_image(img_name, self.inp_dim)
with torch.no_grad():
img_dim_list = torch.FloatTensor([img_dim_list]).repeat(1, 2)
img = img.to(args.device) if args else img.cuda()
scaling_factor = torch.FloatTensor([1/min(self.inp_dim / orig_dim[0], self.inp_dim / orig_dim[1]) for orig_dim in img_dim_list]).view(-1, 1)
scaling_factor = scaling_factor.to(args.device) if args else scaling_factor.cuda()
output = self.model(img, scaling_factor)
output = output.cpu()
for index, sample in enumerate(output):
image_id = int(os.path.basename(img_name).split('.')[0])
for det in sample:
score = float(det[4])
if score < .001: # stop when below this threshold, scores in descending order
break
coco_det = dict(
image_id=image_id,
bbox=det[0:4].tolist(),
score=score,
category_id=int(det[5]))
dets_results.append(coco_det)
return dets_results
if __name__ == "__main__":
#run with python detector/effdet_api.py /DATA1/Benchmark/coco/ efficientdet_d0
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from easydict import EasyDict as edict
from apis import get_detector
from tqdm import tqdm
import json
opt = edict()
_coco = COCO(sys.argv[1]+'/annotations/instances_val2017.json')
opt.detector = sys.argv[2]
opt.gpus = [0] if torch.cuda.device_count() >= 1 else [-1]
opt.device = torch.device("cuda:" + str(opt.gpus[0]) if opt.gpus[0] >= 0 else "cpu")
image_ids = sorted(_coco.getImgIds())
det_model = get_detector(opt)
dets = []
for entry in tqdm(_coco.loadImgs(image_ids)):
abs_path = os.path.join(
sys.argv[1], 'val2017', entry['file_name'])
det = det_model.check_detector(abs_path)
if det:
dets += det
json.dump(dets, open('results.json', 'w'))
coco_results = _coco.loadRes('results.json')
coco_eval = COCOeval(_coco, coco_results, 'bbox')
coco_eval.params.imgIds = image_ids # score only ids we've used
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize() | StarcoderdataPython |
170657 | <reponame>Xingyu-Lin/VCD<filename>VCD/utils/data_utils.py<gh_stars>10-100
import numpy as np
import torch
from torch_geometric.data import Data
import torch_geometric
class PrivilData(Data):
"""
Encapsulation of multi-graphs for multi-step training
ind: 0-(hor-1), type: vsbl or full
Each graph contain:
edge_index_{type}_{ind},
x_{type}_{ind},
edge_attr_{type}_{ind},
gt_rwd_{type}_{ind}
gt_accel_{type}_{ind}
mesh_mapping_{type}_{ind}
"""
def __init__(self, has_part=False, has_full=False, **kwargs):
super(PrivilData, self).__init__(**kwargs)
self.has_part = has_part
self.has_full = has_full
def __inc__(self, key, value, *args, **kwargs):
if 'edge_index' in key:
x = key.replace('edge_index', 'x')
return self[x].size(0)
elif 'mesh_mapping' in key:
# add index of mesh matching by
x = key.replace('partial_pc_mapped_idx', 'x')
return self[x].size(0)
else:
return super().__inc__(key, value)
class AggDict(dict):
def __init__(self, is_detach=True):
"""
Aggregate numpy arrays or pytorch tensors
:param is_detach: Whether to save numpy arrays in stead of torch tensors
"""
super(AggDict).__init__()
self.is_detach = is_detach
def __getitem__(self, item):
return self.get(item, 0)
def add_item(self, key, value):
if self.is_detach and torch.is_tensor(value):
value = value.detach().cpu().numpy()
if not isinstance(value, torch.Tensor):
if isinstance(value, np.ndarray) or isinstance(value, np.number):
assert value.size == 1
else:
assert isinstance(value, int) or isinstance(value, float)
if key not in self.keys():
self[key] = value
else:
self[key] += value
def update_by_add(self, src_dict):
for key, value in src_dict.items():
self.add_item(key, value)
def get_mean(self, prefix, count=1):
avg_dict = {}
for k, v in self.items():
avg_dict[prefix + k] = v / count
return avg_dict
def updateDictByAdd(dict1, dict2):
'''
update dict1 by dict2
'''
for k1, v1 in dict2.items():
for k2, v2 in v1.items():
dict1[k1][k2] += v2.cpu().item()
return dict1
def get_index_before_padding(graph_sizes):
ins_len = graph_sizes.max()
pad_len = ins_len * graph_sizes.size(0)
valid_len = graph_sizes.sum()
accum = torch.zeros(1).cuda()
out = []
for gs in graph_sizes:
new_ind = torch.range(0, gs - 1).cuda() + accum
out.append(new_ind)
accum += ins_len
final_ind = torch.cat(out, dim=0)
return final_ind.long()
class MyDataParallel(torch_geometric.nn.DataParallel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __getattr__(self, name):
if name == 'module':
return self._modules['module']
else:
return getattr(self.module, name)
def retrieve_data(data, key):
"""
vsbl: [vsbl], full: [full], dual :[vsbl, full]
"""
if isinstance(data, dict):
identifier = '_{}'.format(key)
out_data = {k.replace(identifier, ''): v for k, v in data.items() if identifier in k}
return out_data
| StarcoderdataPython |
1612165 | <gh_stars>10-100
import json
from abc import ABC, abstractmethod
from typing import List
from pywatts.core.filemanager import FileManager
from pywatts.core.summary_object import SummaryObject, SummaryCategory, SummaryObjectList, SummaryObjectTable
from tabulate import tabulate
class SummaryFormatter(ABC):
"""
SummaryFormatter is responsible for formatting the SummaryObjects according to the output file.
"""
def create_summary(self, summaries: List[SummaryObject], fm: FileManager):
"""
This method is responsible for creating and storing the summaries according to the output file.
:param summaries: The summaries that should be stored.
:type summaries: List[SummaryObject]
:param fm: The pyWATTS filemanager.
:type fm: FileManager
"""
@abstractmethod
def _create_summary(self, summary: SummaryObject):
pass
@abstractmethod
def _create_table_summary(self, summary: SummaryObject):
pass
class SummaryMarkdown(SummaryFormatter):
"""
SummaryMarkdown is a SummaryFormatter that save the SummaryObjects according as markdown file.
"""
def create_summary(self, summaries: List[SummaryObject], fm: FileManager):
"""
This method is responsible for creating and storing the summaries as markdown file.
:param summaries: The summaries that should be stored.
:type summaries: List[SummaryObject]
:param fm: The pyWATTS filemanager.
:type fm: FileManager
"""
summary_string = "# Summary: \n"
for category in [SummaryCategory.Summary, SummaryCategory.FitTime, SummaryCategory.TransformTime]:
summary_string += f"## {category.name}\n"
for summary in filter(lambda s: s.category == category, summaries):
if summary.additional_information != "" or len(summary.k_v) > 0:
if isinstance(summary, SummaryObjectList):
summary_string += self._create_summary(summary)
elif isinstance(summary, SummaryObjectTable):
summary_string += self._create_table_summary(summary)
with open(fm.get_path("summary.md"), "w") as file:
file.write(summary_string)
return summary_string
def _create_summary(self, summary: SummaryObject):
return f"### {summary.name}\n" + f"{summary.additional_information}\n" + "".join(
[f"* {key} : {value}\n" for key, value in summary.k_v.items()])
def _create_table_summary(self, summary: SummaryObject):
return f"### {summary.name}\n" + f"{summary.additional_information}\n" + "".join(
[
f"#### {key}\n {tabulate(value, headers=range(len(value)), showindex=range(len(value)), tablefmt='github')}\n"
for key, value in summary.k_v.items()])
class SummaryJSON(SummaryFormatter):
"""
SummaryJSON is a SummaryFormatter that save the SummaryObjects according as json file.
"""
def create_summary(self, summaries: List[SummaryObject], fm: FileManager):
"""
This method is responsible for creating and storing the summaries as json file.
:param summaries: The summaries that should be stored.
:type summaries: List[SummaryObject]
:param fm: The pyWATTS filemanager.
:type fm: FileManager
"""
summary_dict = {}
for category in [SummaryCategory.Summary, SummaryCategory.FitTime, SummaryCategory.TransformTime]:
category_dict = {}
for summary in filter(lambda s: s.category == category, summaries):
if summary.additional_information != "" or len(summary.k_v) > 0:
if isinstance(summary, SummaryObjectList):
category_dict.update(self._create_summary(summary))
elif isinstance(summary, SummaryObjectTable):
category_dict.update(self._create_table_summary(summary))
summary_dict.update({category.name: category_dict})
with open(fm.get_path("summary.json"), "w") as file:
json.dump(summary_dict, file)
return summary_dict
def _create_summary(self, summary: SummaryObject):
result_dict = {
key: value for key, value in summary.k_v.items()
}
return {
summary.name: {
"additional_information": summary.additional_information,
"results": result_dict
}
}
def _create_table_summary(self, summary: SummaryObject):
result_dict = {
key: value.tolist() for key, value in summary.k_v.items()
}
return {
summary.name: {
"additional_information": summary.additional_information,
"results": result_dict
}
}
| StarcoderdataPython |
3209834 | # (C) Copyright 2019-2021 Hewlett Packard Enterprise Development LP.
# Apache License 2.0
from copy import deepcopy
import logging
import json
from urllib.parse import quote_plus
from pyaoscx.exceptions.generic_op_error import GenericOperationError
from pyaoscx.exceptions.response_error import ResponseError
from pyaoscx.exceptions.verification_error import VerificationError
from pyaoscx.utils import util as utils
from pyaoscx.session import Session
from pyaoscx.pyaoscx_factory import PyaoscxFactory, Singleton
from pyaoscx.pyaoscx_module import PyaoscxModule
class Device(PyaoscxFactory, metaclass=Singleton):
'''
Represents a Device and all of its attributes. Keeping all the important
information inside one class.
'''
base_uri = "system"
def __init__(self, session):
self.session = session
self.firmware_version = None
# Used to set attributes
self.config_attrs = []
self.materialized = False
self.__original_attributes = {}
# Set firmware version
self.get_firmware_version()
@PyaoscxModule.connected
def get(self):
'''
Perform a GET call to retrieve device attributes
After data from response, Device class attributes
are generated using create_attrs()
'''
logging.info("Retrieving the switch attributes and capabilities")
non_configurable_attrs = [
"admin_password_set",
"aruba_central",
"boot_time",
"capabilities",
"capacities",
"mgmt_intf_status",
"platform_name",
"software_images",
"software_info",
"software_version",
"qos_defaults",
]
configurable_attrs = [
"domain_name",
"hostname",
"other_config",
"qos_config",
"qos_default",
"q_profile_default",
]
# Concatenate both config and non-config attrs without duplicates
all_attributes = list(set(non_configurable_attrs + configurable_attrs))
attributes_list = ','.join(all_attributes)
uri = "system?attributes={}&depth={}".format(
attributes_list,
self.session.api.default_depth
)
try:
response = self.session.request("GET", uri)
except Exception as e:
raise ResponseError("GET", e)
if not utils._response_ok(response, "GET"):
raise GenericOperationError(response.text, response.status_code)
# Load into json format
data = json.loads(response.text)
# Create class attributes using util.create_attrs
utils.create_attrs(self, data)
utils.set_config_attrs(
self,
data,
"config_attrs",
non_configurable_attrs
)
# Save original attributes
self.__original_attributes = deepcopy(
utils.get_attrs(self, self.config_attrs)
)
# Set device as materialized
self.materialized = True
@property
def modified(self):
"""
Verifies if there has been a modification for this object or not.
"""
device_data = utils.get_attrs(self, self.config_attrs)
return device_data != self.__original_attributes
@PyaoscxModule.connected
def get_subsystems(self):
'''
Perform GET call to retrieve subsystem attributes and create a dictionary containing them
'''
# Log
logging.info(
"Retrieving the switch subsystem attributes and capabilities")
# Attribute list
attributes = [
'product_info',
'power_supplies',
'interfaces',
'fans',
'resource_utilization'
]
# Format attribute list by joining every element with a comma
attributes_list = ','.join(attributes)
# Build URI
uri = "{}system/subsystems?attributes={}&depth={}".format(
self.session.base_url, attributes_list,
self.session.api.default_subsystem_facts_depth)
try:
# Try to perform a GET call and retrieve the data
response = self.session.s.get(
uri, verify=False, proxies=self.session.proxy)
except Exception as e:
raise ResponseError('GET', e)
if not utils._response_ok(response, "GET"):
raise GenericOperationError(response.text, response.status_code)
# Load into json format
data = json.loads(response.text)
data_subsystems = {'subsystems' : data}
# Create class attributes using util.create_attrs
utils.create_attrs(self, data_subsystems)
@PyaoscxModule.connected
def get_firmware_version(self):
'''
Perform a GET call to retrieve device firmware version
:return: firmware_version: The firmware version
'''
uri = "{}firmware".format(self.session.base_url)
try:
response = self.session.s.get(
uri, verify=False, proxies=self.session.proxy)
except Exception as e:
raise ResponseError('GET', e)
if not utils._response_ok(response, "GET"):
raise GenericOperationError(response.text, response.status_code)
data = json.loads(response.text)
self.firmware_version = data["current_version"]
# Return Version
return self.firmware_version
@PyaoscxModule.materialized
def apply(self):
"""
Main method to update an existing Device object.
:return modified: Boolean, True if object was created or modified
False otherwise.
"""
return self.update()
@PyaoscxModule.materialized
def update(self):
"""
Perform a PUT call to apply changes to a Device object.
:return modified: Boolean, True if object was created or modified
False otherwise.
"""
if not self.modified:
return False
device_data = utils.get_attrs(self, self.config_attrs)
put_data = json.dumps(device_data)
try:
response = self.session.request("PUT", "system", data=put_data)
except Exception as e:
raise ResponseError("PUT", e)
if not utils._response_ok(response, "PUT"):
raise GenericOperationError(
response.text,
response.status_code
)
# Set new original attributes
self.__original_attributes = deepcopy(device_data)
return True
####################################################################
# IMPERATIVE FUNCTIONS
####################################################################
def update_banner(self, banner_info, banner_type='banner'):
'''
Perform a PUT request to modify a Device's Banner
:param banner_info: String to be configured as the banner.
:param banner_type: Type of banner being configured on the switch.
Either banner or banner_exec
:return modified: Returns True if Banner was modified.
False otherwise
'''
modified = False
logging.info("Setting Banner")
depth = self.session.api.default_depth
# Second GET request to obtain just the variables that are writable
selector = self.session.api.default_selector
payload = {
"depth": depth,
"selector": selector
}
uri = "{base_url}{class_uri}".format(
base_url=self.session.base_url,
class_uri=Device.base_uri,
depth=self.session.api.default_depth
)
try:
response = self.session.s.get(
uri, verify=False,
proxies=self.session.proxy,
params=payload)
except Exception as e:
raise ResponseError('GET', e)
if not utils._response_ok(response, "GET"):
raise GenericOperationError(response.text, response.status_code)
# Configurable data
config_data = json.loads(response.text)
# If Banner type does not exist
if banner_type not in config_data['other_config']:
# Create Banner type
config_data['other_config'][banner_type] = ""
# Verify data is different
if config_data['other_config'][banner_type] == banner_info:
modified = False
else:
# Modify Banner
config_data['other_config'][banner_type] = banner_info
# UPDATE Banner
put_uri = "{base_url}{class_uri}".format(
base_url=self.session.base_url,
class_uri=Device.base_uri
)
# Set data to be used inside PUT
put_data = json.dumps(config_data, sort_keys=True, indent=4)
try:
response = self.session.s.put(
put_uri, verify=False, data=put_data, proxies=self.session.proxy)
except Exception as e:
raise ResponseError('PUT', e)
if not utils._response_ok(response, "PUT"):
raise GenericOperationError(
response.text, response.status_code, "UPDATE SYSTEM BANNER")
# Object was modified, returns True
modified = True
return modified
def delete_banner(self, banner_type='banner'):
'''
Perform a DELETE request to delete a device's Banner
:param banner_type: Type of banner being removed on the switch.
Either banner or banner_exec
:return modified: Returns True if Banner was modified.
False otherwise
'''
logging.info("Removing Banner")
depth = self.session.api.default_depth
# Second GET request to obtain just the variables that are writable
selector = self.session.api.default_selector
payload = {
"depth": depth,
"selector": selector
}
uri = "{base_url}{class_uri}".format(
base_url=self.session.base_url,
class_uri=Device.base_uri,
depth=self.session.api.default_depth
)
try:
response = self.session.s.get(
uri, verify=False,
proxies=self.session.proxy,
params=payload)
except Exception as e:
raise ResponseError('GET', e)
if not utils._response_ok(response, "GET"):
raise GenericOperationError(response.text, response.status_code)
# Configurable data
config_data = json.loads(response.text)
# If Banner type does not exist
if banner_type not in config_data['other_config']:
modified = False
else:
# Delete Banner
config_data['other_config'].pop(banner_type)
# UPDATE Banner
uri = "{base_url}{class_uri}".format(
base_url=self.session.base_url,
class_uri=Device.base_uri
)
put_data = json.dumps(config_data, sort_keys=True, indent=4)
try:
response = self.session.s.put(
uri, verify=False, data=put_data, proxies=self.session.proxy)
except Exception as e:
raise ResponseError('PUT', e)
if not utils._response_ok(response, "PUT"):
raise GenericOperationError(
response.text, response.status_code, "DELETE Banner")
# Object was modified, returns True
modified = True
return modified
def boot_firmware(self, partition_name='primary'):
'''
Perform a POST request to Boot the AOS-CX switch with image present
to the specified partition
:param partition_name: String name of the partition for device to boot to.
:return bool: True if success
'''
# Lower case for partition name
partition_name = partition_name.lower()
if partition_name not in ['primary', 'secondary']:
raise VerificationError('Boot Firmware', 'Bad partition name')
success = False
uri = '{base_url}boot?image={part}'.format(
base_url=self.session.base_url,
part=partition_name)
try:
self.session.s.post(
uri, verify=False,
proxies=self.session.proxy)
except Exception as e:
raise ResponseError('POST', e)
success = True
# Return result
return success
def upload_firmware_http(self, remote_firmware_file_path,
vrf,
partition_name='primary'):
'''
Perform a PUT request to upload a firmware image given
a http_request
:param remote_firmware_file_path: "HTTP server address and path for
uploading firmware image, must be reachable through provided vrf
ex: http://192.168.1.2:8000/TL_10_04_0030A.swi"
:param vrf: VRF to be used to contact HTTP server, required if
remote_firmware_file_path is provided
:param partition_name: Name of the partition for the
image to be uploaded to.
:return bool: True if success
'''
http_path = remote_firmware_file_path
unsupported_versions = [
"10.00",
"10.01",
"10.02",
"10.03",
]
# Verify Version
for version in unsupported_versions:
if version in self.firmware_version:
raise VerificationError(
'Upload Firmware through HTTPs',
"Minimum supported firmware version is 10.04 for" +
" remote firmware upload, your version is {firmware}"
.format(firmware=self.firmware_version))
# Verify VRF
if vrf is None:
raise VerificationError(
'VRF',
"VRF needs to be provided in order" +
" to upload firmware from HTTP server")
http_path_encoded = quote_plus(http_path)
# Build URI
uri = '{base_url}firmware?image={part}&from={path}&vrf={vrf}'\
.format(
base_url=self.session.base_url,
part=partition_name,
path=http_path_encoded,
vrf=vrf)
# PUT for a HTTP Request
try:
response = self.session.s.put(
uri, verify=False,
proxies=self.session.proxy)
except Exception as e:
raise ResponseError('PUT', e)
if not utils._response_ok(response, "PUT"):
raise GenericOperationError(
response.text, response.status_code)
# True if successful
return True
def upload_firmware_local(self, partition_name='primary',
firmware_file_path=None):
'''
Perform a POST request to upload a firmware image from a local file
:param partition_name: Name of the partition for the
image to be uploaded to.
:param firmware_file_path: File name and path for local file uploading
firmware image
:return success: True if success
'''
uri = '{base_url}firmware?image={part}'.format(
base_url=self.session.base_url,
part=partition_name)
# Upload file
success = utils.file_upload(self.session, firmware_file_path, uri)
# If no errors detected
return success
def upload_firmware(self, partition_name=None,
firmware_file_path=None,
remote_firmware_file_path=None,
vrf=None):
'''
Upload a firmware image from a local file OR from a remote location
:param partition_name: Name of the partition for the
image to be uploaded to.
:param firmware_file_path: File name and path for local file uploading
firmware image.
IMPORTANT: For this to be used, the remote_firmware_file_path
parameter must be left as NONE
:param remote_firmware_file_path: HTTP server address and path for
uploading firmware image, must be reachable through provided vrf
ex: http://192.168.1.2:8000/TL_10_04_0030A.swi
:param vrf: VRF to be used to contact HTTP server, required if
remote_firmware_file_path is provided
:return bool: True if success
'''
result = None
if partition_name is None:
partition_name = 'primary'
# Use HTTP Server
if remote_firmware_file_path is not None:
result = self.upload_firmware_http(
remote_firmware_file_path,
vrf,
partition_name)
# Locally
else:
result = self.upload_firmware_local(
partition_name,
firmware_file_path)
# If no errors detected
return result
def vsx_capable(self):
"""
Return whether this device supports the VSX functionality
:return: True if device supports VSX
"""
return hasattr(self, "capabilities") and "vsx" in self.capabilities
def is_capable(self, capability):
"""
Check if the current Device has the given capability.
:param capability: String name of a Device capability.
:return: True if Device is capable; False otherwise.
"""
if not self.materialized:
self.get()
return capability in self.capabilities
| StarcoderdataPython |
3390327 | """Add python related paths to the user's PATH environment variable."""
import ctypes
import sys
from ctypes.wintypes import HWND, UINT, WPARAM, LPARAM as LRESULT, LPVOID
from os.path import abspath
import winreg
HKCU = winreg.HKEY_CURRENT_USER
ENV = "Environment"
PATH = "PATH"
HWND_BROADCAST = 0xFFFF
WM_SETTINGCHANGE = 0x1A
def notify_windows():
"""Notifies all windows about a settings change.
See: <https://docs.microsoft.com/en-us/windows/desktop/winmsg/wm-settingchange>
"""
SendMessage = ctypes.windll.user32.SendMessageW
SendMessage.argtypes = HWND, UINT, WPARAM, LPVOID
SendMessage.restype = LRESULT
SendMessage(HWND_BROADCAST, WM_SETTINGCHANGE, 0, u'Environment')
def fix_user_env_path():
"""Interactively add important python related paths to the user's PATH
environment variable."""
to_check = {
abspath(sys.prefix),
abspath(sys.prefix + "\\Scripts"),
}
with winreg.CreateKey(HKCU, ENV) as key:
try:
current = winreg.QueryValueEx(key, PATH)[0]
except FileNotFoundError:
current = ""
existing = {abspath(p.strip()) for p in current.split(";") if
p.strip()}
to_add = sorted(to_check - existing)
if not to_add:
print("Nothing to add.")
return
while True:
print("* The following folders should be added to the PATH"
" environment variable:")
for p in to_add:
print(p)
answer = input("* Fix the PATH environment variables on your"
" account? [Yn]: ")
if answer.strip().lower() in ['', 'y', 'yes']:
break
if answer.strip().lower() in ['n', 'no']:
print("Nothing added.")
return
new = current.rstrip().rstrip(';') + ";" + ";".join(to_add) + ';'
winreg.SetValueEx(key, PATH, 0, winreg.REG_EXPAND_SZ, new)
notify_windows()
print("Updated.")
print("PATH will be updated on new sessions.")
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--interactive', '-i', action='store_true',
help='Interactive mode.', required=True)
args = parser.parse_args()
fix_user_env_path()
| StarcoderdataPython |
3394069 | <filename>sathub/forms.py
# -*- coding: utf-8 -*-
#
# sathub/forms.py
#
# Copyright 2015 Base4 Sistemas Ltda ME
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask_wtf import FlaskForm
from wtforms import StringField
from wtforms import PasswordField
from wtforms import validators
from wtforms.fields import SelectField
LOCALES = ['pt_BR', 'pt']
class LoginForm(FlaskForm):
username = StringField(u'Nome de Usuário',
validators=[
validators.DataRequired(),
validators.length(min=2, max=20), ])
password = PasswordField('<PASSWORD>',
validators=[
validators.DataRequired(),
validators.length(min=6, max=20), ])
class Meta:
locales = LOCALES
class EmptyForm(FlaskForm):
class Meta:
locales = LOCALES
class ConfigurarInterfaceDeRedeFrom(FlaskForm):
tipoInter = SelectField(
choices=(
('ETHE', 'ETHE'),
# ('WIFI', 'WIFI')
),
label='Tipo de interface',
render_kw={'class': 'form-control', 'style': 'font-size:150%'}
)
# SSID = StringField('Nome da rede (SSID)')
# seg = SelectField(
# choices=(
# ('NONE', 'NONE'),
# ('WEP', 'WEP'),
# ('WAP', 'WAP'),
# ('WPA-PERSONAL', 'WPA-PERSONAL'),
# ('WPA-ENTERPRISE', 'WPA-ENTERPRISE'),
# ),
# label='Seguranaça'
# )
# codigo = PasswordField(
# 'Frase ou chave de acesso à rede sem fio.',
# validators=[
# validators.DataRequired(),
# validators.length(min=6, max=64), ]
# )
tipoLan = SelectField(
choices=(
('IPFIX', 'IP FIXO'),
('DHCP', 'DHCP (Automático)')
),
label='Tipo de conexão',
render_kw={'class': 'form-control', 'style': 'font-size:150%'}
)
lanIP = StringField(
'Endereço IP',
validators=[
validators.DataRequired(),
validators.length(max=15)
],
render_kw={'class': 'form-control','value':'000.000.000.000', 'style': 'font-size:150%'}
)
lanMask = StringField('Máscara', validators=[
validators.DataRequired(),
validators.length(max=15)
],
render_kw={'class': 'form-control','value':'000.000.000.000', 'style': 'font-size:150%'}
)
lanGW = StringField('Gateway', validators=[
validators.DataRequired(),
validators.length(max=15)
],
render_kw={'class': 'form-control','value':'000.000.000.000','style': 'font-size:150%'})
lanDNS1 = StringField('DNS1', validators=[
validators.DataRequired(),
validators.length(max=15)
], render_kw={'class': 'form-control','value':'000.000.000.000','style': 'font-size:150%'})
lanDNS2 = StringField('DNS2', validators=[
validators.DataRequired(),
validators.length(max=15)
], render_kw={'class': 'form-control','value':'000.000.000.000','style': 'font-size:150%'})
| StarcoderdataPython |
188383 | #!/usr/bin/env python3
import cgi, cgitb, os
from templates import secret_page, after_login_incorrect
import secret
form = cgi.FieldStorage()
username_field = form.getvalue("username")
password_field = form.getvalue("password")
if username_field == secret.username and password_field == secret.password:
print("Set-Cookie: dbecerra = YEP\r\n")
logged_in = True
else:
logged_in = False
print("Content-type: text/html")
print()
# print("<body>")
# print(f"<p> Username: {username_field} </p>")
# print(f"<p> Password: {<PASSWORD>} </p>")
# print("</body>")
cookies = os.environ.get('HTTP_COOKIE').split(";")
for c in cookies:
try:
key, value = c.split("=")
key, value = key.strip(), value.strip()
if key=="dbecerra" and value=="YEP":
logged_in = True
except:
pass
if logged_in:
print(secret_page(secret.username, secret.password))
else:
print(after_login_incorrect()) | StarcoderdataPython |
4839123 | """Builds automatic documentation of the installed webviz config plugins.
The documentation is designed to be used by the YAML configuration file end
user. Sphinx has not been used due to
1) Sphinx is geared towards Python end users, and templateing of apidoc output
is not yet supported (https://github.com/sphinx-doc/sphinx/issues/3545).
2) It is a small problem to be solved, and most of the Sphinx machinery
is not needed.
Overall workflow is:
* Finds all installed plugins.
* Automatically reads docstring and __init__ function signature (both
argument names and which arguments have default values).
* Output the extracted plugin information in html using jinja2.
"""
import shutil
import inspect
import pathlib
from importlib import import_module
from collections import defaultdict
import jinja2
from markdown import markdown
import webviz_config.plugins
from webviz_config._config_parser import SPECIAL_ARGS
SCRIPT_DIR = pathlib.Path(__file__).resolve().parent
BUILD_DIR = SCRIPT_DIR / "_build"
TEMPLATE_FILE = SCRIPT_DIR / "templates" / "index.html.jinja2"
EXAMPLE = SCRIPT_DIR / ".." / "examples" / "basic_example.yaml"
def escape_all(input_string):
"""Escapes any html or utf8 character in the given string.
"""
no_html = jinja2.escape(input_string)
no_utf8 = no_html.encode("ascii", "xmlcharrefreplace").decode()
pass_through = jinja2.Markup(no_utf8)
return pass_through
def convert_docstring(doc):
"""Convert docstring to markdown.
"""
return "" if doc is None else markdown(doc, extensions=["fenced_code"])
def get_plugin_documentation():
"""Get all installed plugins, and document them by grabbing docstring
and input arguments / function signature.
"""
plugins = inspect.getmembers(webviz_config.plugins, inspect.isclass)
plugin_doc = []
for plugin in plugins:
reference = plugin[1]
plugin_info = {}
plugin_info["name"] = plugin[0]
plugin_info["doc"] = convert_docstring(reference.__doc__)
argspec = inspect.getfullargspec(reference.__init__)
plugin_info["args"] = [
arg for arg in argspec.args if arg not in SPECIAL_ARGS
]
plugin_info["values"] = defaultdict(lambda: "some value")
if argspec.defaults is not None:
for arg, default in dict(
zip(reversed(argspec.args), reversed(argspec.defaults))
).items():
plugin_info["values"][
arg
] = f"{default} # Optional (default value shown here)."
module = inspect.getmodule(reference)
plugin_info["module"] = module.__name__
package = inspect.getmodule(module).__package__
plugin_info["package"] = package
plugin_info["package_doc"] = convert_docstring(
import_module(package).__doc__
)
if not plugin_info["name"].startswith("Example"):
plugin_doc.append(plugin_info)
# Sort the plugins by package:
package_ordered = defaultdict(lambda: {"plugins": []})
for plugin in sorted(plugin_doc, key=lambda x: (x["module"], x["name"])):
package = plugin["package"]
package_ordered[package]["plugins"].append(plugin)
package_ordered[package]["doc"] = plugin["package_doc"]
return package_ordered
def get_basic_example():
with open(EXAMPLE) as fh:
return escape_all(fh.read())
if __name__ == "__main__":
template_data = {
"packages": get_plugin_documentation(),
"basic_example": get_basic_example(),
}
with open(TEMPLATE_FILE) as fh:
template = jinja2.Template(fh.read())
if BUILD_DIR.exists():
shutil.rmtree(BUILD_DIR)
shutil.copytree(SCRIPT_DIR / "assets", BUILD_DIR / "assets")
with open(BUILD_DIR / "index.html", "w") as fh:
fh.write(template.render(template_data))
print(f"Output available in {BUILD_DIR}")
| StarcoderdataPython |
3376632 | # Python imports
import requests
# Local imports
import exceptions
from timeline import Timeline
from contacts import Contacts
class User(object):
"""
Represent an user for an application
Access Google Glass timeline using : user.timeline
Each user is defined by unique token : user.token
"""
def __init__(self, app=None, token=None, refresh_token=None, tokens=None):
if tokens:
token = tokens["access_token"]
refresh_token = tokens["refresh_token"]
self.app = app
self.token = token
self.refresh_token = refresh_token
self.session = self.app.oauth.get_session(token=self.token)
self.session.headers.update({'Content-Type': 'application/json'})
self.timeline = Timeline(self)
self.contacts = Contacts(self)
def refresh_token(self):
"""
Refresh user token and return tokens dict
"""
if not self.refresh_token:
raise Exception("No refresh token for this user")
tokens = self.app.oauth.get_raw_access_token(data={
'refresh_token': self.refresh_token,
'grant_type': 'refresh_token'
}).json()
self.token = tokens["access_token"]
return self.tokens
def request(self, *args, **kwargs):
"""
Return a request with the user session
"""
r = self.session.request(*args, **kwargs)
try:
r.raise_for_status()
except requests.HTTPError, e:
if e.response.status_code == 401:
raise exceptions.RefreshTokenException()
else:
raise e
return r
@property
def tokens(self):
"""
Return tokens in a dict
"""
return {
"access_token": self.token,
"refresh_token": self.refresh_token
}
def profile(self):
"""
Return profile informations about this user
"""
r = self.request("GET", "oauth2/v1/userinfo", params={'alt': 'json'})
profile = r.json()
if (profile is None
or not "given_name" in profile
or not "email" in profile
or not "name" in profile):
raise exceptions.UserException("Invalid user profile")
return profile
def location(self, lid="latest"):
"""
Return the last known location or a specific location
:param lid: location id ("latest" for the last known location)
"""
r = self.request("GET", "mirror/v1/locations/%s" % (lid))
location = r.json()
if (location is None
or not "latitude" in location
or not "longitude" in location):
raise exceptions.UserException("Invalid user location")
return location
| StarcoderdataPython |
134053 | #
# Dispatcher.py
#
# (c) 2020 by <NAME>
# License: BSD 3-Clause License. See the LICENSE file for further details.
#
# Most internal requests are routed through here.
#
from __future__ import annotations
import sys, traceback, re
from copy import deepcopy
import isodate
from flask import Request
from typing import Any, Tuple, Dict, Callable
from Logging import Logging
from Configuration import Configuration
from Constants import Constants as C
from Types import ResourceTypes as T
from Types import FilterOperation
from Types import FilterUsage
from Types import ResponseType
from Types import Permission
from Types import Operation
from Types import DesiredIdentifierResultType
from Types import ResultContentType as RCN
from Types import ResponseCode as RC
from Types import Result
from Types import RequestArguments
from Types import RequestHeaders
from Types import RequestStatus
from Types import CSERequest
from Types import JSON, Parameters, Conditions
import CSE, Utils
from resources.Resource import Resource
import resources.Factory as Factory
class Dispatcher(object):
def __init__(self) -> None:
self.csiSlash = f'{CSE.cseCsi}/'
self.csiSlashLen = len(self.csiSlash)
self.sortDiscoveryResources = Configuration.get('cse.sortDiscoveredResources')
Logging.log('Dispatcher initialized')
def shutdown(self) -> bool:
Logging.log('Dispatcher shut down')
return True
# The "xxxRequest" methods handle http requests while the "xxxResource"
# methods handle actions on the resources. Security/permission checking
# is done for requests, not on resource actions.
#########################################################################
#
# Retrieve resources
#
def processRetrieveRequest(self, request:CSERequest, originator:str, id:str=None) -> Result:
fopsrn, id = self._checkHybridID(request, id) # overwrite id if another is given
# handle fanout point requests
if (fanoutPointResource := Utils.fanoutPointResource(fopsrn)) is not None and fanoutPointResource.ty == T.GRP_FOPT:
Logging.logDebug(f'Redirecting request to fanout point: {fanoutPointResource.__srn__}')
return fanoutPointResource.handleRetrieveRequest(request, fopsrn, request.headers.originator)
permission = Permission.DISCOVERY if request.args.fu == 1 else Permission.RETRIEVE
# check rcn & operation
if permission == Permission.DISCOVERY and request.args.rcn not in [ RCN.discoveryResultReferences, RCN.childResourceReferences ]: # Only allow those two
return Result(rsc=RC.badRequest, dbg=f'invalid rcn: {request.args.rcn:d} for fu: {request.args.fu:d}')
if permission == Permission.RETRIEVE and request.args.rcn not in [ RCN.attributes, RCN.attributesAndChildResources, RCN.childResources, RCN.attributesAndChildResourceReferences, RCN.originalResource, RCN.childResourceReferences]: # TODO
return Result(rsc=RC.badRequest, dbg=f'invalid rcn: {request.args.rcn:d} for fu: {request.args.fu:d}')
Logging.logDebug(f'Discover/Retrieve resources (fu: {request.args.fu:d}, drt: {request.args.drt}, handling: {request.args.handling}, conditions: {request.args.conditions}, resultContent: {request.args.rcn:d}, attributes: {str(request.args.attributes)})')
# Retrieve the target resource, because it is needed for some rcn (and the default)
if request.args.rcn in [RCN.attributes, RCN.attributesAndChildResources, RCN.childResources, RCN.attributesAndChildResourceReferences, RCN.originalResource]:
if (res := self.retrieveResource(id)).resource is None:
return res
if not CSE.security.hasAccess(originator, res.resource, permission):
return Result(rsc=RC.originatorHasNoPrivilege, dbg=f'originator has no permission ({permission:d})')
# if rcn == attributes then we can return here, whatever the result is
if request.args.rcn == RCN.attributes:
return res
resource = res.resource # root resource for the retrieval/discovery
# if rcn == original-resource we retrieve the linked resource
if request.args.rcn == RCN.originalResource:
if resource is None: # continue only when there actually is a resource
return res
if (lnk := resource.lnk) is None: # no link attribute?
return Result(rsc=RC.badRequest, dbg='missing lnk attribute in target resource')
return self.retrieveResource(lnk, originator, raw=True)
# do discovery
# TODO simplify arguments
if (res := self.discoverResources(id, originator, request.args.handling, request.args.fo, request.args.conditions, request.args.attributes, permission=permission)).lst is None: # not found?
return res.errorResult()
# check and filter by ACP. After this allowedResources only contains the resources that are allowed
allowedResources = []
for r in res.lst:
if CSE.security.hasAccess(originator, r, permission):
allowedResources.append(r)
#
# Handle more sophisticated RCN
#
if request.args.rcn == RCN.attributesAndChildResources:
self._resourceTreeDict(allowedResources, resource) # the function call add attributes to the target resource
return Result(resource=resource)
elif request.args.rcn == RCN.attributesAndChildResourceReferences:
self._resourceTreeReferences(allowedResources, resource, request.args.drt) # the function call add attributes to the target resource
return Result(resource=resource)
elif request.args.rcn == RCN.childResourceReferences:
#childResourcesRef: dict = { resource.tpe: {} } # Root resource as a dict with no attribute
childResourcesRef = self._resourceTreeReferences(allowedResources, None, request.args.drt)
return Result(resource=childResourcesRef)
elif request.args.rcn == RCN.childResources:
childResources:JSON = { resource.tpe : {} } # Root resource as a dict with no attribute
self._resourceTreeDict(allowedResources, childResources[resource.tpe]) # Adding just child resources
return Result(resource=childResources)
elif request.args.rcn == RCN.discoveryResultReferences: # URIList
return Result(resource=self._resourcesToURIList(allowedResources, request.args.drt))
else:
return Result(rsc=RC.badRequest, dbg='wrong rcn for RETRIEVE')
def retrieveResource(self, id:str=None, originator:str=None, raw:bool=False) -> Result:
# If the ID is in SP-relative format then first check whether this is for the
# local CSE.
# If yes, then adjust the ID and try to retrieve it.
# If no, then try to retrieve the resource from a connected (!) remote CSE.
if id is not None:
if id.startswith(self.csiSlash) and len(id) > self.csiSlashLen: # TODO for all operations?
id = id[self.csiSlashLen:]
else:
if Utils.isSPRelative(id):
return CSE.remote.retrieveRemoteResource(id, originator, raw)
return self.retrieveLocalResource(srn=id) if Utils.isStructured(id) else self.retrieveLocalResource(ri=id)
def retrieveLocalResource(self, ri:str=None, srn:str=None) -> Result:
Logging.logDebug(f'Retrieve resource: {ri if srn is None else srn}')
if ri is not None:
res = CSE.storage.retrieveResource(ri=ri) # retrieve via normal ID
elif srn is not None:
res = CSE.storage.retrieveResource(srn=srn) # retrieve via srn. Try to retrieve by srn (cases of ACPs created for AE and CSR by default)
else:
return Result(rsc=RC.notFound, dbg='resource not found')
if (resource := res.resource) is not None:
# Check for virtual resource
if resource.ty != T.GRP_FOPT and Utils.isVirtualResource(resource): # fopt is handled elsewhere
return resource.handleRetrieveRequest() # type: ignore[no-any-return]
return Result(resource=resource)
if res.dbg is not None:
Logging.logDebug(f'{res.dbg}: {ri}')
return Result(rsc=res.rsc, dbg=res.dbg)
#########################################################################
#
# Discover Resources
#
def discoverResources(self, id:str, originator:str, handling:Conditions, fo:int=1, conditions:Conditions=None, attributes:Parameters=None, rootResource:Resource=None, permission:Permission=Permission.DISCOVERY) -> Result:
Logging.logDebug('Discovering resources')
if rootResource is None:
if (res := self.retrieveResource(id)).resource is None:
return Result(rsc=RC.notFound, dbg=res.dbg)
rootResource = res.resource
# get all direct children
dcrs = self.directChildResources(id)
# Slice the page (offset and limit)
offset = handling['ofst'] if 'ofst' in handling else 1 # default: 1 (first resource
limit = handling['lim'] if 'lim' in handling else sys.maxsize # default: system max size or "maxint"
dcrs = dcrs[offset-1:offset-1+limit] # now dcrs only contains the desired child resources for ofst and lim
# Get level
level = handling['lvl'] if 'lvl' in handling else sys.maxsize # default: system max size or "maxint"
# a bit of optimization. This length stays the same.
allLen = len(attributes) if attributes is not None else 0
if conditions is not None:
allLen += ( len(conditions) +
(len(conditions.get('ty'))-1 if 'ty' in conditions else 0) + # -1 : compensate for len(conditions) in line 1
(len(conditions.get('cty'))-1 if 'cty' in conditions else 0) + # -1 : compensate for len(conditions) in line 1
(len(conditions.get('lbl'))-1 if 'lbl' in conditions else 0) # -1 : compensate for len(conditions) in line 1
)
# allLen = ((len(conditions) if conditions is not None else 0) +
# (len(attributes) if attributes is not None else 0) +
# (len(conditions.get('ty'))-1 if conditions is not None and 'ty' in conditions else 0) + # -1 : compensate for len(conditions) in line 1
# (len(conditions.get('cty'))-1 if conditions is not None else 0) + # -1 : compensate for len(conditions) in line 1
# (len(conditions.get('lbl'))-1 if conditions is not None else 0) # -1 : compensate for len(conditions) in line 1
# )
# Discover the resources
discoveredResources = self._discoverResources(rootResource, originator, level, fo, allLen, dcrs=dcrs, conditions=conditions, attributes=attributes, permission=permission)
# NOTE: this list contains all results in the order they could be found while
# walking the resource tree.
# DON'T CHANGE THE ORDER. DON'T SORT.
# Because otherwise the tree cannot be correctly re-constructed otherwise
# Apply ARP if provided
if 'arp' in handling:
arp = handling['arp']
result = []
for resource in discoveredResources:
srn = f'{resource[Resource._srn]}/{arp}'
if (res := self.retrieveResource(srn)).resource is not None:
if CSE.security.hasAccess(originator, res.resource, permission):
result.append(res.resource)
discoveredResources = result # re-assign the new resources to discoveredResources
return Result(lst=discoveredResources)
def _discoverResources(self, rootResource:Resource, originator:str, level:int, fo:int, allLen:int, dcrs:list[Resource]=None, conditions:Conditions=None, attributes:Parameters=None, permission:Permission=Permission.DISCOVERY) -> list[Resource]:
if rootResource is None or level == 0: # no resource or level == 0
return []
# get all direct children, if not provided
if dcrs is None:
if len(dcrs := self.directChildResources(rootResource.ri)) == 0:
return []
# Filter and add those left to the result
discoveredResources = []
for r in dcrs:
# Exclude virtual resources
if Utils.isVirtualResource(r):
continue
# check permissions and filter. Only then add a resource
# First match then access. bc if no match then we don't need to check permissions (with all the overhead)
if self._matchResource(r, conditions, attributes, fo, allLen) and CSE.security.hasAccess(originator, r, permission):
discoveredResources.append(r)
# Iterate recursively over all (not only the filtered) direct child resources
discoveredResources.extend(self._discoverResources(r, originator, level-1, fo, allLen, conditions=conditions, attributes=attributes))
return discoveredResources
def _matchResource(self, r:Resource, conditions:Conditions, attributes:Parameters, fo:int, allLen:int) -> bool:
""" Match a filter to a resource. """
# TODO: Implement a couple of optimizations. Can we determine earlier that a match will fail?
ty = r.ty
# get the parent resource
#
# TODO when determines how the parentAttribute is actually encoded
#
# pr = None
# if (pi := r.get('pi')) is not None:
# pr = storage.retrieveResource(ri=pi)
# The matching works like this: go through all the conditions, compare them, and
# increment 'found' when matching. For fo=AND found must equal all conditions.
# For fo=OR found must be > 0.
found = 0
# check conditions
if conditions is not None:
# Types
# Multiple occurences of ty is always OR'ed. Therefore we add the count of
# ty's to found (to indicate that the whole set matches)
if (tys := conditions.get('ty')) is not None:
found += len(tys) if str(ty) in tys else 0
if (ct := r.ct) is not None:
found += 1 if (c_crb := conditions.get('crb')) is not None and (ct < c_crb) else 0
found += 1 if (c_cra := conditions.get('cra')) is not None and (ct > c_cra) else 0
if (lt := r.lt) is not None:
found += 1 if (c_ms := conditions.get('ms')) is not None and (lt > c_ms) else 0
found += 1 if (c_us := conditions.get('us')) is not None and (lt < c_us) else 0
if (st := r.st) is not None:
found += 1 if (c_sts := conditions.get('sts')) is not None and (str(st) > c_sts) else 0
found += 1 if (c_stb := conditions.get('stb')) is not None and (str(st) < c_stb) else 0
if (et := r.et) is not None:
found += 1 if (c_exb := conditions.get('exb')) is not None and (et < c_exb) else 0
found += 1 if (c_exa := conditions.get('exa')) is not None and (et > c_exa) else 0
# Check labels similar to types
rlbl = r.lbl
if rlbl is not None and (lbls := conditions.get('lbl')) is not None:
for l in lbls:
if l in rlbl:
found += len(lbls)
break
# special handling of label-list
# if (lbl := r.lbl) is not None and (c_lbl := conditions.get('lbl')) is not None:
# lbla = c_lbl.split()
# fnd = 0
# for l in lbla:
# fnd += 1 if l in lbl else 0
# found += 1 if (fo == 1 and fnd == len(lbl)) or (fo == 2 and fnd > 0) else 0 # fo==or -> find any label
# # TODO labelsQuery
if ty in [ T.CIN, T.FCNT ]: # special handling for CIN, FCNT
if (cs := r.cs) is not None:
found += 1 if (sza := conditions.get('sza')) is not None and (int(cs) >= int(sza)) else 0
found += 1 if (szb := conditions.get('szb')) is not None and (int(cs) < int(szb)) else 0
# ContentFormats
# Multiple occurences of cnf is always OR'ed. Therefore we add the count of
# cnf's to found (to indicate that the whole set matches)
# Similar to types.
if ty in [ T.CIN ]: # special handling for CIN
if (cnfs := conditions.get('cty')) is not None:
found += len(cnfs) if r.cnf in cnfs else 0
# TODO childLabels
# TODO parentLabels
# TODO childResourceType
# TODO parentResourceType
# Attributes:
if attributes is not None:
for name in attributes:
val = attributes[name]
if '*' in val:
val = val.replace('*', '.*')
found += 1 if (rval := r[name]) is not None and re.match(val, str(rval)) else 0
else:
found += 1 if (rval := r[name]) is not None and str(val) == str(rval) else 0
# TODO childAttribute
# TODO parentAttribute
# Test whether the OR or AND criteria is fullfilled
if not ((fo == FilterOperation.OR and found > 0) or # OR and found something
(fo == FilterOperation.AND and allLen == found) # AND and found everything
):
return False
return True
#########################################################################
#
# Add resources
#
def processCreateRequest(self, request:CSERequest, originator:str, id:str=None) -> Result:
fopsrn, id = self._checkHybridID(request, id) # overwrite id if another is given
# # overwrite id if another is given
# if id is not None:
# id = id
# srn = None
# else:
# id = request.id
# srn = request.srn
# fopsrn, id = Utils.srnFromHybrid(srn, id) # Hybrid
# handle fanout point requests
if (fanoutPointResource := Utils.fanoutPointResource(fopsrn)) is not None and fanoutPointResource.ty == T.GRP_FOPT:
Logging.logDebug(f'Redirecting request to fanout point: {fanoutPointResource.__srn__}')
return fanoutPointResource.handleCreateRequest(request, fopsrn, request.headers.originator)
if (ty := request.headers.resourceType) is None: # Check for type parameter in request
return Result(rsc=RC.badRequest, dbg='type parameter missing in CREATE request')
# Some Resources are not allowed to be created in a request, return immediately
if ty in [ T.CSEBase, T.REQ ]:
return Result(rsc=RC.operationNotAllowed, dbg='operation not allowed')
# Get parent resource and check permissions
if (res := CSE.dispatcher.retrieveResource(id)).resource is None:
Logging.log('Parent resource not found')
return Result(rsc=RC.notFound, dbg='parent resource not found')
parentResource = res.resource
if CSE.security.hasAccess(originator, parentResource, Permission.CREATE, ty=ty, isCreateRequest=True, parentResource=parentResource) == False:
if ty == T.AE:
return Result(rsc=RC.securityAssociationRequired, dbg='security association required')
else:
return Result(rsc=RC.originatorHasNoPrivilege, dbg='originator has no privileges')
# Check for virtual resource
if Utils.isVirtualResource(parentResource):
return parentResource.handleCreateRequest(request, id, originator) # type: ignore[no-any-return]
# Add new resource
if (nres := Factory.resourceFromDict(deepcopy(request.dict), pi=parentResource.ri, ty=ty)).resource is None: # something wrong, perhaps wrong type
return Result(rsc=RC.badRequest, dbg=nres.dbg)
nresource = nres.resource
# Check whether the parent allows the adding
if not (res := parentResource.childWillBeAdded(nresource, originator)).status:
return res.errorResult()
# check whether the resource already exists, either via ri or srn
# hasResource() may actually perform the test in one call, but we want to give a distinguished debug message
if CSE.storage.hasResource(ri=nresource.ri):
Logging.logWarn(dbg := f'Resource with ri: {nresource.__srn__} already exists')
return Result(rsc=RC.conflict, dbg=dbg)
if CSE.storage.hasResource(srn=nresource.__srn__):
Logging.logWarn(dbg := f'Resource with structured id: {nresource.__srn__} already exists')
return Result(rsc=RC.conflict, dbg=dbg)
# Check resource creation
if (rres := CSE.registration.checkResourceCreation(nresource, originator, parentResource)).rsc != RC.OK:
return rres.errorResult()
originator = rres.originator # originator might have changed during this check
# Create the resource. If this fails we deregister everything
if (res := CSE.dispatcher.createResource(nresource, parentResource, originator)).resource is None:
CSE.registration.checkResourceDeletion(nresource) # deregister resource. Ignore result, we take this from the creation
return res
#
# Handle RCN's
#
tpe = res.resource.tpe
if request.args.rcn is None or request.args.rcn == RCN.attributes: # Just the resource & attributes
return res
elif request.args.rcn == RCN.modifiedAttributes:
dictOrg = request.dict[tpe]
dictNew = res.resource.asDict()[tpe]
return Result(resource={ tpe : Utils.resourceDiff(dictOrg, dictNew) }, rsc=res.rsc, dbg=res.dbg)
elif request.args.rcn == RCN.hierarchicalAddress:
return Result(resource={ 'm2m:uri' : Utils.structuredPath(res.resource) }, rsc=res.rsc, dbg=res.dbg)
elif request.args.rcn == RCN.hierarchicalAddressAttributes:
return Result(resource={ 'm2m:rce' : { Utils.noDomain(tpe) : res.resource.asDict()[tpe], 'uri' : Utils.structuredPath(res.resource) }}, rsc=res.rsc, dbg=res.dbg)
elif request.args.rcn == RCN.nothing:
return Result(rsc=res.rsc, dbg=res.dbg)
else:
return Result(rsc=RC.badRequest, dbg='wrong rcn for CREATE')
# TODO C.rcnDiscoveryResultReferences
def createResource(self, resource:Resource, parentResource:Resource=None, originator:str=None) -> Result:
Logging.logDebug(f'Adding resource ri: {resource.ri}, type: {resource.ty:d}')
if parentResource is not None:
Logging.logDebug(f'Parent ri: {parentResource.ri}')
if not parentResource.canHaveChild(resource):
if resource.ty == T.SUB:
err = 'Parent resource is not subscribable'
Logging.logWarn(err)
return Result(rsc=RC.targetNotSubscribable, dbg=err)
else:
err = f'Invalid child resource type: {T(resource.ty).value}'
Logging.logWarn(err)
return Result(rsc=RC.invalidChildResourceType, dbg=err)
# if not already set: determine and add the srn
if resource.__srn__ is None:
resource[resource._srn] = Utils.structuredPath(resource)
# add the resource to storage
if (res := resource.dbCreate(overwrite=False)).rsc != RC.created:
return res
# Activate the resource
# This is done *after* writing it to the DB, because in activate the resource might create or access other
# resources that will try to read the resource from the DB.
if not (res := resource.activate(parentResource, originator)).status: # activate the new resource
resource.dbDelete()
return res.errorResult()
# Could be that we changed the resource in the activate, therefore write it again
if (res := resource.dbUpdate()).resource is None:
resource.dbDelete()
return res
if parentResource is not None:
parentResource = parentResource.dbReload().resource # Read the resource again in case it was updated in the DB
parentResource.childAdded(resource, originator) # notify the parent resource
# send a create event
CSE.event.createResource(resource) # type: ignore
return Result(resource=resource, rsc=RC.created) # everything is fine. resource created.
#########################################################################
#
# Update resources
#
def processUpdateRequest(self, request:CSERequest, originator:str, id:str=None) -> Result:
fopsrn, id = self._checkHybridID(request, id) # overwrite id if another is given
# # overwrite id if another is given
# if id is not None:
# id = id
# srn = None
# else:
# id = request.id
# srn = request.srn
# fopsrn, id = Utils.srnFromHybrid(srn, id) # Hybrid
# handle fanout point requests
if (fanoutPointResource := Utils.fanoutPointResource(fopsrn)) is not None and fanoutPointResource.ty == T.GRP_FOPT:
Logging.logDebug(f'Redirecting request to fanout point: {fanoutPointResource.__srn__}')
return fanoutPointResource.handleUpdateRequest(request, fopsrn, request.headers.originator)
# Get resource to update
if (res := self.retrieveResource(id)).resource is None:
Logging.log('Resource not found')
return Result(rsc=RC.notFound, dbg=res.dbg)
resource = res.resource
if resource.readOnly:
return Result(rsc=RC.operationNotAllowed, dbg='resource is read-only')
#
# Permission check
# If this is an 'acpi' update?
if not (res := CSE.security.hasAcpiUpdatePermission(request, resource, originator)).status:
return res
if res.data is None: # data == None indicates that this is NOT an ACPI update. In this case we need a normal permission check
if CSE.security.hasAccess(originator, resource, Permission.UPDATE) == False:
return Result(rsc=RC.originatorHasNoPrivilege, dbg='originator has no privileges')
# Check for virtual resource
if Utils.isVirtualResource(resource):
return resource.handleUpdateRequest(request, id, originator) # type: ignore[no-any-return]
dictOrg = deepcopy(resource.dict) # Save for later
if (res := self.updateResource(resource, deepcopy(request.dict), originator=originator)).resource is None:
return res.errorResult()
resource = res.resource # re-assign resource (might have been changed during update)
# Check resource update with registration
if (rres := CSE.registration.checkResourceUpdate(resource, deepcopy(request.dict))).rsc != RC.OK:
return rres.errorResult()
#
# Handle RCN's
#
tpe = resource.tpe
if request.args.rcn is None or request.args.rcn == RCN.attributes:
return res
elif request.args.rcn == RCN.modifiedAttributes:
dictNew = deepcopy(resource.dict)
# return only the diff. This includes those attributes that are updated with the same value. Luckily,
# all key/values that are touched in the update request are in the resource's __modified__ variable.
return Result(resource={ tpe : Utils.resourceDiff(dictOrg, dictNew, modifiers=resource[Resource._modified]) }, rsc=res.rsc)
elif request.args.rcn == RCN.nothing:
return Result(rsc=res.rsc)
# TODO C.rcnDiscoveryResultReferences
else:
return Result(rsc=RC.badRequest, dbg='wrong rcn for UPDATE')
def updateResource(self, resource:Resource, dct:JSON=None, doUpdateCheck:bool=True, originator:str=None) -> Result:
Logging.logDebug(f'Updating resource ri: {resource.ri}, type: {resource.ty:d}')
if doUpdateCheck:
if not (res := resource.update(dct, originator)).status:
return res.errorResult()
else:
Logging.logDebug('No check, skipping resource update')
# send a create event
CSE.event.updateResource(resource) # type: ignore
return resource.dbUpdate()
#########################################################################
#
# Remove resources
#
def processDeleteRequest(self, request:CSERequest, originator:str, id:str=None) -> Result:
fopsrn, id = self._checkHybridID(request, id) # overwrite id if another is given
# if id is not None:
# id = id
# srn = None
# else:
# id = request.id
# srn = request.srn
# fopsrn, id = Utils.srnFromHybrid(srn, id) # Hybrid
# handle fanout point requests
if (fanoutPointResource := Utils.fanoutPointResource(fopsrn)) is not None and fanoutPointResource.ty == T.GRP_FOPT:
Logging.logDebug(f'Redirecting request to fanout point: {fanoutPointResource.__srn__}')
return fanoutPointResource.handleDeleteRequest(request, fopsrn, request.headers.originator)
# get resource to be removed and check permissions
if (res := self.retrieveResource(id)).resource is None:
Logging.logDebug(res.dbg)
return Result(rsc=RC.notFound, dbg=res.dbg)
resource = res.resource
if CSE.security.hasAccess(originator, resource, Permission.DELETE) == False:
return Result(rsc=RC.originatorHasNoPrivilege, dbg='originator has no privileges')
# Check for virtual resource
if Utils.isVirtualResource(resource):
return resource.handleDeleteRequest(request, id, originator) # type: ignore[no-any-return]
#
# Handle RCN's first. Afterward the resource & children are no more
#
tpe = resource.tpe
result: Any = None
if request.args.rcn is None or request.args.rcn == RCN.nothing:
result = None
elif request.args.rcn == RCN.attributes:
result = resource
# resource and child resources, full attributes
elif request.args.rcn == RCN.attributesAndChildResources:
children = self.discoverChildren(id, resource, originator, request.args.handling, Permission.DELETE)
self._childResourceTree(children, resource) # the function call add attributes to the result resource. Don't use the return value directly
result = resource
# direct child resources, NOT the root resource
elif request.args.rcn == RCN.childResources:
children = self.discoverChildren(id, resource, originator, request.args.handling, Permission.DELETE)
childResources:JSON = { resource.tpe : {} } # Root resource as a dict with no attributes
self._resourceTreeDict(children, childResources[resource.tpe])
result = childResources
elif request.args.rcn == RCN.attributesAndChildResourceReferences:
children = self.discoverChildren(id, resource, originator, request.args.handling, Permission.DELETE)
self._resourceTreeReferences(children, resource, request.args.drt) # the function call add attributes to the result resource
result = resource
elif request.args.rcn == RCN.childResourceReferences: # child resource references
children = self.discoverChildren(id, resource, originator, request.args.handling, Permission.DELETE)
childResourcesRef:JSON = { resource.tpe: {} } # Root resource with no attribute
self._resourceTreeReferences(children, childResourcesRef[resource.tpe], request.args.drt)
result = childResourcesRef
# TODO RCN.discoveryResultReferences
else:
return Result(rsc=RC.badRequest, dbg='wrong rcn for DELETE')
# remove resource
res = self.deleteResource(resource, originator, withDeregistration=True)
return Result(resource=result, rsc=res.rsc, dbg=res.dbg)
def deleteResource(self, resource:Resource, originator:str=None, withDeregistration:bool=False, parentResource:Resource=None) -> Result:
Logging.logDebug(f'Removing resource ri: {resource.ri}, type: {resource.ty:d}')
resource.deactivate(originator) # deactivate it first
# Check resource deletion
if withDeregistration:
if not (res := CSE.registration.checkResourceDeletion(resource)).status:
return Result(rsc=RC.badRequest, dbg=res.dbg)
# Retrieve the parent resource now, because we need it later
if parentResource is None:
parentResource = resource.retrieveParentResource()
# delete the resource from the DB. Save the result to return later
res = resource.dbDelete()
# send a delete event
CSE.event.deleteResource(resource) # type: ignore
# Now notify the parent resource
if parentResource is not None:
parentResource.childRemoved(resource, originator)
return Result(resource=resource, rsc=res.rsc, dbg=res.dbg)
#########################################################################
#
# Utility methods
#
def directChildResources(self, pi:str, ty:T=None) -> list[Resource]:
""" Return all child resources of a resource, optionally filtered by type. """
return CSE.storage.directChildResources(pi, ty)
def countDirectChildResources(self, pi:str, ty:T=None) -> int:
""" Return the number of all child resources of resource, optionally filtered by type. """
return CSE.storage.countDirectChildResources(pi, ty)
def discoverChildren(self, id:str, resource:Resource, originator:str, handling:JSON, permission:Permission) -> list[Resource]:
if (resourceList := self.discoverResources(id, originator, handling, rootResource=resource, permission=permission).lst) is None:
return None
# check and filter by ACP
children = []
for r in resourceList:
if CSE.security.hasAccess(originator, r, permission):
children.append(r)
return children
def countResources(self, ty:T|Tuple[T, ...]=None) -> int:
""" Return total number of resources.
Optional filter by type.
"""
# Count all resources
if ty is None:
return CSE.storage.countResources()
# Count all resources of the given types
if isinstance(ty, tuple):
cnt = 0
for t in ty:
cnt += len(CSE.storage.retrieveResourcesByType(t))
return cnt
# Count all resources of a specific type
return len(CSE.storage.retrieveResourcesByType(ty))
def retrieveResourcesByType(self, ty:T) -> list[Resource]:
""" Retrieve all resources of a type. """
result = []
rss = CSE.storage.retrieveResourcesByType(ty)
for rs in (rss or []):
result.append(Factory.resourceFromDict(rs).resource)
return result
#########################################################################
#
# Internal methods for collecting resources and child resources into structures
#
# Create a m2m:uril structure from a list of resources
def _resourcesToURIList(self, resources:list[Resource], drt:int) -> JSON:
cseid = f'{CSE.cseCsi}/' # SP relative. csi already starts with a "/"
lst = []
for r in resources:
lst.append(Utils.structuredPath(r) if drt == DesiredIdentifierResultType.structured else cseid + r.ri)
return { 'm2m:uril' : lst }
# Recursively walk the results and build a sub-resource tree for each resource type
def _resourceTreeDict(self, resources:list[Resource], targetResource:Resource|JSON) -> list[Resource]:
rri = targetResource['ri'] if 'ri' in targetResource else None
while True: # go multiple times per level through the resources until the list is empty
result = []
handledTy = None
handledTPE = None
idx = 0
while idx < len(resources):
r = resources[idx]
if rri is not None and r.pi != rri: # only direct children
idx += 1
continue
if r.ty in C.virtualResources: # Skip latest, oldest etc virtual resources
idx += 1
continue
if handledTy is None:
handledTy = r.ty # this round we check this type
handledTPE = r.tpe # ... and this TPE (important to distinguish specializations in mgmtObj and fcnt )
if r.ty == handledTy and r.tpe == handledTPE: # handle only resources of the currently handled type and TPE!
result.append(r) # append the found resource
resources.remove(r) # remove resource from the original list (greedy), but don't increment the idx
resources = self._resourceTreeDict(resources, r) # check recursively whether this resource has children
else:
idx += 1 # next resource
# add all found resources under the same type tag to the rootResource
if len(result) > 0:
# sort resources by type and then by lowercase rn
if self.sortDiscoveryResources:
result.sort(key=lambda x:(x.ty, x.rn.lower()))
targetResource[result[0].tpe] = [r.asDict(embedded=False) for r in result]
# TODO not all child resources are lists [...] Handle just to-1 relations
else:
break # end of list, leave while loop
return resources # Return the remaining list
def _resourceTreeReferences(self, resources:list[Resource], targetResource:Resource|JSON, drt:int) -> Resource|JSON:
""" Retrieve child resource references of a resource and add them to
a new target resource as "children" """
tp = 'ch'
if targetResource is None:
targetResource = { }
tp = 'm2m:rrl' # top level in dict, so add qualifier.
t = []
# sort resources by type and then by lowercase rn
if self.sortDiscoveryResources:
resources.sort(key=lambda x:(x.ty, x.rn.lower()))
for r in resources:
if r.ty in [ T.CNT_OL, T.CNT_LA, T.FCNT_OL, T.FCNT_LA ]: # Skip latest, oldest virtual resources
continue
ref = { 'nm' : r['rn'], 'typ' : r['ty'], 'val' : Utils.structuredPath(r) if drt == DesiredIdentifierResultType.structured else r.ri}
if r.ty == T.FCNT:
ref['spty'] = r.cnd # TODO Is this correct? Actually specializationID in TS-0004 6.3.5.29, but this seems to be wrong
t.append(ref)
targetResource[tp] = t
return targetResource
# Retrieve full child resources of a resource and add them to a new target resource
def _childResourceTree(self, resources:list[Resource], targetResource:Resource|JSON) -> None:
if len(resources) == 0:
return
result:JSON = {}
self._resourceTreeDict(resources, result) # rootResource is filled with the result
for k,v in result.items(): # copy child resources to result resource
targetResource[k] = v
#########################################################################
#
# Internal methods for ID handling
#
def _checkHybridID(self, request:CSERequest, id:str) -> Tuple[str, str]:
""" Return a corrected ID and SRN in case this is a hybrid ID.
srn might be None.
Returns: (srn, id)
"""
if id is not None:
return Utils.srnFromHybrid(None, id) # Hybrid
return Utils.srnFromHybrid(request.srn, request.id) # Hybrid
| StarcoderdataPython |
83749 | def has_cycle(head):
slowref=head
if not slowref or not slowref.next:
return False
fastref=head.next.next
while slowref != fastref:
slowref=slowref.next
if not slowref or not slowref.next:
return False
fastref=fastref.next.next
return True | StarcoderdataPython |
1652816 | <gh_stars>10-100
from keras.layers import Conv2D, SeparableConv2D, MaxPooling2D, Flatten, Dense
from keras.layers import Dropout, Input, BatchNormalization, Activation, add, GlobalAveragePooling2D
from keras.losses import categorical_crossentropy
from keras.optimizers import Adam
from keras.utils import plot_model
from keras import callbacks
from keras import models
from keras.applications import Xception
from utils_datagen import TrainValTensorBoard
from utils_basic import chk_n_mkdir
from models.base_model import BaseModel
class XCEPTION_APP(BaseModel):
def __init__(self, output_directory, input_shape, n_classes, verbose=False):
self.output_directory = output_directory + '/xception_kapp'
chk_n_mkdir(self.output_directory)
self.model = self.build_model(input_shape, n_classes)
if verbose:
self.model.summary()
self.verbose = verbose
self.model.save_weights(self.output_directory + '/model_init.hdf5')
def build_model(self, input_shape, n_classes):
# Load the VGG model
xception_conv = Xception(weights='imagenet', include_top=False, input_shape=input_shape)
# Freeze the layers except the last 4 layers
for layer in xception_conv.layers:
layer.trainable = False
# Create the model
model = models.Sequential()
# Add the vgg convolutional base model
model.add(xception_conv)
# Add new layers
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(n_classes, activation='softmax', name='predictions'))
# define the model with input layer and output layer
model.summary()
plot_model(model, to_file=self.output_directory + '/model_graph.png', show_shapes=True, show_layer_names=True)
model.compile(loss=categorical_crossentropy, optimizer=Adam(lr=0.01), metrics=['acc'])
# model save
file_path = self.output_directory + '/best_model.hdf5'
model_checkpoint = callbacks.ModelCheckpoint(filepath=file_path, monitor='loss', save_best_only=True)
# Tensorboard log
log_dir = self.output_directory + '/tf_logs'
chk_n_mkdir(log_dir)
tb_cb = TrainValTensorBoard(log_dir=log_dir)
self.callbacks = [model_checkpoint, tb_cb]
return model
| StarcoderdataPython |
118075 | <filename>simulation/horaire.py
"""
Composants reliés à l'horaire d'activité.
-----------------------------------------
"""
from .. import ecs
from . import stochastique
class Horaire(ecs.Component):
"""Horaire de base se répétant a intervalle fixe."""
def __init__(self, mom, cible, mtags, periode):
"""Configure l'horaire. La var d'état clef est ``actif`` et est dans un composant target.
C'est le target qui est responsable de l'initialisation.
C'est le target qui est responsable de l'initialisation. Cette version de horaire peut
être mise-à-jour à la seconde ou à la minute, ça ne change rien car les tags sont en minutes.
:param mom: on garde un handle vers le moment
:type mom: :class:`sim.base.Moment`
:param cible: un composant target avec la var d'état ``actif``
:param mtags: la liste de triplet (j,h,m) de changement d'état, j=0 est le jour actuel
:param periode: un triplet (j,h,m) pour la periode de cycle chaque j jours, h heures et m min
"""
self.mom=mom # instance de Moment
self.target=cible # la target avec un horaire (doit avoir une var d'etat actif)
self.periode=60*(periode[0]*1440+periode[1]*60+periode[2])
# les tags de changement (en secondes) d'etat de self.target.actif sur 24h
self.tags=[60*(x[0]*1440+x[1]*60+x[2])-self.mom.t0 for x in mtags]
self.nextTagIdx=0 # on suppose partir de mom.t0 et que self.tags[0]>mom.t0
if not self.tags:
print("Attention! Horaire: Pas de tags")
def update(self):
""" Update la var d'état ``actif`` dans le target selon la minute actuelle et les tags."""
if self.tags and (self.mom.t%self.periode)==self.tags[self.nextTagIdx]:
self.target.actif=not self.target.actif
self.nextTagIdx+=1
if self.nextTagIdx>=len(self.tags): self.nextTagIdx=0
class HoraireSto(ecs.Component):
"""Horaire de base se répétant a intervalle fixe."""
def __init__(self, mom, cible, mtags, periode, mtbf, mttr, mttrMin=-1, mttrAlpha=-1):
"""Configure l'horaire. La var d'état clef est ``actif`` et est dans un composant target.
C'est le target qui est responsable de l'initialisation. Cette version de horaire peut
être mise-à-jour à la seconde ou à la minute, ça ne change rien car les tags sont en minutes.
Il faut fournir les mtbf et mttr en minutes.
Par defaut, la durée d'un bris est fixée à mttf. Cependant, si on donne des valeurs
acceptable pour mttrMin et mttrAlpha, on utilise une loi triangulaire. En outre, il
faut respecter mttrMin<mttr, 0.0<=mttrAlpha<=1.0. On fixe mttrMode=mttrMin+mttrAlpha*(mttr-mttrMin).
Ainsi, avec mttrAlpha=0 la loi s'étire au maximum vers les grandes valeurs et avec mttrAlpha=1
la loi est centrée symétrique sur la moyenne.
:param mom: on garde un handle vers le moment
:type mom: :class:`sim.base.Moment`
:param cible: un composant target avec la var d'état ``actif``
:param mtags: la liste de triplet (j,h,m) de changement d'état, j=0 est le jour actuel
:param periode: un triplet (j,h,m) pour la periode de cycle chaque j jours, h heures et m min
:param mtbf: mean time b4 failure en minutes wallclock
:param mttr: mean time to repair en minutes wallclock
:param mttrMin: mttr min pour loi triangulaire
:param mttrAlpha: facteur dans [0,1] pour le décentrement de la loi triangulaire (1=centré)
"""
self.mom=mom # instance de Moment
self.target=cible # la target avec un horaire (doit avoir une var d'etat actif)
self.periode=60*(periode[0]*1440+periode[1]*60+periode[2])
# les tags de changement (en secondes) d'etat de self.target.actif sur 24h
self.tags=[60*(x[0]*1440+x[1]*60+x[2])-self.mom.t0 for x in mtags]
self.nextTagIdx=0 # on suppose partir de mom.t0 et que self.tags[0]>mom.t0
# partie stochastique
self.triggerFreq=stochastique.TriggerFrequence(1.0/mtbf)
self.mttr=stochastique.ConstantValue(mttr)
if 0<mttrMin and mttrMin<mttr and 0<=mttrAlpha and mttrAlpha<=1:
mttrMode=(int)(mttrMin+mttrAlpha*(mttr-mttrMin))
mttrMax=3*mttr-mttrMin-mttrMode
if mttr<mttrMax:
print("HoraireSto avec loi triangulaire (min,mode,moy,max)=",mttrMin,mttrMode,mttr,mttrMax)
self.mttr=stochastique.TriangularDistributionSample(mttrMin,mttrMax,mttrMode)
self.actif_horaire=self.target.actif # set l'etat horaire selon le target
self.duree_arret=0 # en minute
self.new_trigger=False
def update(self):
""" Update la var d'état ``actif`` dans le target selon la minute actuelle et les tags."""
self.new_trigger=False
if self.nextTagIdx>=len(self.tags):
pass # pas de tag, donc pas de changement d'etat
#print("Erreur HoraireJour: Pas de tags")
elif (self.mom.t%self.periode)==self.tags[self.nextTagIdx]:
self.actif_horaire=not self.actif_horaire # etat de l'horaire
self.nextTagIdx+=1
if self.nextTagIdx>=len(self.tags): self.nextTagIdx=0
if self.mom.tickM: # tick a la minute
self.duree_arret-=1
if self.triggerFreq.get():
self.new_trigger=True
#print(" *** trigger!",self.mom.getJHMS())
self.duree_arret=self.mttr.get()
#if self.duree_arret==1: print(" === fin trigger!",self.mom.getJHMS())
if self.duree_arret>0: self.target.actif=False
else: self.target.actif=self.actif_horaire
class HoraireStoTAP(ecs.Component):
"""Horaire stochastique special pour une TAP, avec répétition."""
def __init__(self, mom, cible, mtags, periode, arretplan, freq=None, arretnonplan=None):
"""Configure l'horaire. La var d'état clef est ``actif`` et est dans un composant target.
On force l'initialisation a True. Les tags imposent les debuts des arrets planifies.
:param mom: on garde un handle vers l'entité père
:type mom: :class:`sim.base.Moment`
:param cible: un composant target avec la var d'état ``actif``
:param mtags: la liste de triplet (j,h,m) de changement d'état, j=0 est le jour actuel
:param periode: un triplet (j,h,m) pour la periode de cycle chaque j jours, h heures et m min
:param arretplan: est un objet avec la methode ``get`` pour obtenir la duree des arrets dans mtags (en secondes)
:param freq: est un objet qui trigger un arrets non-planifies
:param arretnonplan: est un objet avec la methode ``get`` pour obtenir la duree des arrets non-planifies genere via freq (en secondes)
"""
self.mom=mom # instance de Moment
self.target=cible # la target avec un horaire (doit avoir une var d'etat actif)
self.periode=60*(periode[0]*1440+periode[1]*60+periode[2])
# les tags de changement (en secondes) d'etat de self.target.actif sur 24h
self.tags=[60*(x[0]*1440+x[1]*60+x[2])-self.mom.t0 for x in mtags]
self.nextTagIdx=0 # on suppose partir de mom.t0 et que self.tags[0]>mom.t0
self.arretplan=arretplan # objet avec methode get de la duree d'un arret plan
self.freq=freq # objet frequence avec methode get de trigger d'un arret non-plan
self.arretnonplan=arretnonplan # objet avec methode get de la duree d'un arret non-plan
self.target.actif=True # init a True
self.duree=-1 # duree de l'arret en cours
self.trigger=False # trigger d'un arret non-plan
def update(self):
""" Update la var d'état ``actif`` dans le target selon la minute actuelle et les tags."""
if self.freq is not None and not self.trigger:
self.trigger=self.freq.get() # test de trigger d'un arret nonplan, si on en a pas deja un
self.duree-=1 # decroit la duree d'un arret
if self.nextTagIdx>=len(self.tags):
print("Erreur HoraireStoTAP: Pas de tags")
elif (self.mom.t%self.periode)==self.tags[self.nextTagIdx]: # debut d'un arret planifie
self.duree=self.arretplan.get() # duree stochastique de l'arret planifie
#print("Arret planifie (sec):",self.duree)
self.nextTagIdx+=1
if self.nextTagIdx>=len(self.tags): self.nextTagIdx=0
if self.duree<=0 and self.trigger: # si pas en arret, mais qu'on a un trigger d'arret nonplan
self.duree=self.arretnonplan.get() # duree de l'arret nonplan
#print(" Arret non-planifie (sec):",self.duree)
self.trigger=False # reset du trigger
# cas special pour entrepot plein (on suppose qu'on a un handle sur l'entrepot)
# le handle doit etre mis dans modele
if self.duree<=0 and not self.entrepot.place4crue():
self.duree=24*3600 # pause de 48h (entrepot plein) (update 28 oct: 24h)
#print(self.mom.getJHMS()," Pause TAP pour 48h car entrepot plein, duree de (sec)",self.duree)
# update de actif
if self.duree>0: self.target.actif=False # si en arret, alors non actif
else: self.target.actif=True
class HoraireJour(ecs.Component):
"""Horaire de base pour une journée (24h), se répétant. ATTENTION: desuet et non-supporte. """
def __init__(self, mom, cible, mtags):
"""Configure l'horaire. La var d'état clef est ``actif`` et est dans un composant target.
C'est le target qui est responsable de l'initialisation.
:param mom: on garde un handle vers l'entité père
:type mom: :class:`sim.base.Moment`
:param cible: un composant target avec la var d'état ``actif``
:param mtags: la liste de minutes de changement d'état, des entiers entre 0-1439
"""
self.mom=mom # instance de Moment
self.target=cible # la target avec un horaire (doit avoir une var d'etat actif)
# les tags de changement (en secondes) d'etat de self.target.actif sur 24h
self.tags=[x*60 for x in mtags]
self.nextTagIdx=0 # on suppose partir de mom.t0 et que self.tags[0]>mom.t0
def update(self):
""" Update la var d'état ``actif`` dans le target selon la minute actuelle et les tags."""
if self.nextTagIdx>=len(self.tags):
print("Erreur HoraireJour: Pas de tags")
elif self.mom.tnow==self.tags[self.nextTagIdx]:
self.target.actif=not self.target.actif
self.nextTagIdx+=1
if self.nextTagIdx>=len(self.tags): self.nextTagIdx=0
| StarcoderdataPython |
1685469 | <gh_stars>1-10
# Copyright (c) 2019 - now, Eggroll Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import unittest
from eggroll.core.pair_store import *
from eggroll.core.pair_store.adapter import PairAdapter
from eggroll.core.pair_store.format import ArrayByteBuffer, PairBinReader, PairBinWriter
from eggroll.core.constants import StoreTypes
class TestPairStore(unittest.TestCase):
dir = "./"
# total = 1000 * 1000
total = 100000
def _run_case(self, db: PairAdapter):
start = time.time()
value = 's' * 1000
with db.new_batch() as wb:
for i in range(self.total):
wb.put(str(i).encode(), value.encode())
print("put:", time.time() - start)
for i in range(3):
start = time.time()
with db.iteritems() as rb:
cnt = 0
for k, v in rb:
if cnt % 100000 == 0:
print("item:",cnt, k, v)
cnt += 1
print(cnt)
assert cnt == self.total
print("time:", time.time() - start)
def _run_join(self, db1: PairAdapter, db2: PairAdapter):
start = time.time()
value = 's' * 1000
with db1.new_batch() as wb:
for i in range(self.total):
wb.put(str(i).encode(), value.encode())
with db2.new_batch() as wb:
for i in range(self.total):
wb.put(str(i).encode(), value.encode())
print("put:", time.time() - start)
with db1.iteritems() as rb1:
cnt = 0
for k, v in rb1:
if cnt % 100000 == 0:
print("item:",cnt, k, v)
cnt += 1
db2.get(k)
print(cnt)
assert cnt == self.total
print("time:", time.time() - start)
def test_lmdb(self):
with create_pair_adapter({"store_type": StoreTypes.ROLLPAIR_LMDB, "path": self.dir + "LMDB"}) as db:
self._run_case(db)
db.destroy()
def test_lmdb_seek(self):
with create_pair_adapter({"store_type": StoreTypes.ROLLPAIR_LMDB, "path": "./data/LMDB/test_pair_store"}) as db:
with db.new_batch() as wb:
for i in range(7):
if i == 5:
continue
wb.put(('k' + str(i)).encode(), ('v' + str(i)).encode())
with db.iteritems() as it:
for k, v in it:
print(k, v)
print("++++++++++++++++++++++++++++++++++++++++++++++++")
it.first()
print(it.seek(b'k5'))
for k, v in it:
print(k, v)
def test_rocksdb(self):
with create_pair_adapter({"store_type": StoreTypes.ROLLPAIR_LEVELDB, "path": self.dir + "rocksdb"}) as db:
self._run_case(db)
db.destroy()
def test_file(self):
with create_pair_adapter({"store_type": StoreTypes.ROLLPAIR_FILE, "path": self.dir + "file"}) as db:
self._run_case(db)
db.destroy()
def test_mmap(self):
with create_pair_adapter({"store_type": StoreTypes.ROLLPAIR_MMAP, "path": self.dir + "mmap"}) as db:
self._run_case(db)
db.destroy()
def test_cache(self):
with create_pair_adapter({"store_type": StoreTypes.ROLLPAIR_CACHE, "path": self.dir + "cache"}) as db:
self._run_case(db)
db.destroy()
def test_byte_buffer(self):
bs = bytearray(1024)
buf = ArrayByteBuffer(bs)
buf.write_int32(12)
buf.write_bytes(b"34")
buf.set_offset(0)
assert buf.read_int32() == 12
assert buf.read_bytes(2) == b"34"
def test_pair_bin(self):
bs = bytearray(32)
buf = ArrayByteBuffer(bs)
writer = PairBinWriter(buf)
for i in range(10):
try:
writer.write(str(i).encode(), str(i).encode())
except IndexError as e:
print(buf.read_bytes(buf.get_offset(), 0))
buf.set_offset(0)
writer = PairBinWriter(buf)
writer.write(str(i).encode(), str(i).encode())
buf.set_offset(0)
reader = PairBinReader(buf)
print("last")
print(list(reader.read_all()))
def test_join(self):
with create_pair_adapter({"store_type": StoreTypes.ROLLPAIR_LMDB, "path": self.dir + "lmdb"}) as db1, \
create_pair_adapter({"store_type": StoreTypes.ROLLPAIR_LMDB, "path": self.dir + "lmdb2"}) as db2:
self._run_join(db1, db2)
db1.destroy()
db2.destroy() | StarcoderdataPython |
1608277 | from hallo.events import EventMessage, EventMode
from hallo.server import Server
from hallo.test.server_mock import ServerMock
def test_voice_not_irc(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = "NOT_IRC"
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
chan1.add_user(user1)
chan1.add_user(
serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
)
try:
test_hallo.function_dispatcher.dispatch(EventMessage(serv1, chan1, user1, "voice"))
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "only available for irc" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_voice_0_privmsg(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
chan1.add_user(user1)
chan1.add_user(
serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
)
try:
test_hallo.function_dispatcher.dispatch(EventMessage(serv1, None, user1, "voice"))
data = serv1.get_send_data(1, user1, EventMessage)
assert "error" in data[0].text.lower()
assert "in a private message" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_voice_0_no_power(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
chan1.add_user(user1)
chan1.add_user(
serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
)
try:
test_hallo.function_dispatcher.dispatch(EventMessage(serv1, chan1, user1, "voice"))
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "don't have power" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_voice_0_is_voice(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_user1.is_voice = True
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(EventMessage(serv1, chan1, user1, "voice"))
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "already has voice" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_voice_0_is_op(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = True
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(EventMessage(serv1, chan1, user1, "voice"))
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "already has voice" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_voice_0(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(EventMessage(serv1, chan1, user1, "voice"))
data = serv1.get_send_data(2)
assert "error" not in data[1].text.lower()
assert data[0].channel == chan1
assert data[1].channel == chan1
assert data[0].__class__ == EventMode
assert data[1].__class__ == EventMessage
assert data[0].mode_changes == "+v {}".format(user1.address)
assert "status given" in data[1].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_voice_1priv_not_known(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, None, user1, "voice other_channel")
)
data = serv1.get_send_data(1, user1, EventMessage)
assert "error" in data[0].text.lower()
assert "other_channel is not known" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_voice_1priv_not_in_channel(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
serv1.get_channel_by_address("other_channel", "other_channel")
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, None, user1, "voice other_channel")
)
data = serv1.get_send_data(1, user1, EventMessage)
assert "error" in data[0].text.lower()
assert "not in that channel" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_voice_1priv_user_not_there(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, None, user1, "voice test_chan1")
)
data = serv1.get_send_data(1, user1, EventMessage)
assert "error" in data[0].text.lower()
assert "test_user1 is not in test_chan1" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_voice_1priv_no_power(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = False
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, None, user1, "voice test_chan1")
)
data = serv1.get_send_data(1, user1, EventMessage)
assert "error" in data[0].text.lower()
assert "don't have power" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_voice_1priv_is_voice(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_user1.is_voice = True
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, None, user1, "voice test_chan1")
)
data = serv1.get_send_data(1, user1, EventMessage)
assert "error" in data[0].text.lower()
assert "already has voice" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_voice_1priv_is_op(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = True
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, None, user1, "voice test_chan1")
)
data = serv1.get_send_data(1, user1, EventMessage)
assert "error" in data[0].text.lower()
assert "already has voice" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_voice_1priv(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, None, user1, "voice test_chan1")
)
data = serv1.get_send_data(2)
assert "error" not in data[1].text.lower()
assert data[0].channel == chan1
assert data[1].user == user1
assert data[0].__class__ == EventMode
assert data[1].__class__ == EventMessage
assert data[0].mode_changes == "+v {}".format(user1.address)
assert "status given" in data[1].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_voice_1_chan_user_not_there(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan2.add_user(user2)
chan2.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2_user2 = chan2.get_membership_by_user(user2)
chan2_user2.is_op = False
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "voice test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "test_user1 is not in test_chan2" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_voice_1_chan_no_power(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan2.add_user(user1)
chan2.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2_user1 = chan2.get_membership_by_user(user1)
chan2_user1.is_op = False
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = False
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "voice test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "don't have power" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_voice_1_chan_is_voice(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan2.add_user(user1)
chan2.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2_user1 = chan2.get_membership_by_user(user1)
chan2_user1.is_op = False
chan2_user1.is_voice = True
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "voice test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "already has voice" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_voice_1_chan_is_op(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan2.add_user(user1)
chan2.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2_user1 = chan2.get_membership_by_user(user1)
chan2_user1.is_op = True
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "voice test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "already has voice" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_voice_1_chan(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan2.add_user(user1)
chan2.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2_user1 = chan2.get_membership_by_user(user1)
chan2_user1.is_op = False
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "voice test_chan2")
)
data = serv1.get_send_data(2)
assert "error" not in data[1].text.lower()
assert data[0].channel == chan2
assert data[1].channel == chan1
assert data[0].__class__ == EventMode
assert data[1].__class__ == EventMessage
assert data[0].mode_changes == "+v {}".format(user1.address)
assert "status given" in data[1].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_voice_1_user_not_here(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "voice test_user2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "test_user2 is not in test_chan1" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_voice_1_user_no_power(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = False
chan1.add_user(user2)
chan1_user2 = chan1.get_membership_by_user(user2)
chan1_user2.is_op = False
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "voice test_user2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "don't have power" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_voice_1_user_is_voice(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan1.add_user(user2)
chan1_user2 = chan1.get_membership_by_user(user2)
chan1_user2.is_op = False
chan1_user2.is_voice = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "voice test_user2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "already has voice" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_voice_1_user_is_op(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan1.add_user(user2)
chan1_user2 = chan1.get_membership_by_user(user2)
chan1_user2.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "voice test_user2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "already has voice" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_voice_1_user(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan1.add_user(user2)
chan1_user2 = chan1.get_membership_by_user(user2)
chan1_user2.is_op = False
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "voice test_user2")
)
data = serv1.get_send_data(2)
assert "error" not in data[1].text.lower()
assert data[0].channel == chan1
assert data[1].channel == chan1
assert data[0].__class__ == EventMode
assert data[1].__class__ == EventMessage
assert data[0].mode_changes == "+v {}".format(user2.address)
assert "status given" in data[1].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_voice_2_chan_user_not_known(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user1 = chan2.get_membership_by_user(user2)
chan2_user1.is_op = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "voice test_chan2 test_user3")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "test_user3 is not known" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_voice_2_chan_user_not_there(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
serv1.get_user_by_address("test_user3", "test_user3")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user1 = chan2.get_membership_by_user(user2)
chan2_user1.is_op = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "voice test_chan2 test_user3")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "test_user3 is not in test_chan2" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_voice_2_chan_no_power(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user1 = chan2.get_membership_by_user(user2)
chan2_user1.is_op = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = False
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "voice test_chan2 test_user2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "don't have power" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_voice_2_chan_is_voice(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user2 = chan2.get_membership_by_user(user2)
chan2_user2.is_op = False
chan2_user2.is_voice = True
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "voice test_chan2 test_user2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "already has voice" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_voice_2_chan_is_op(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user2 = chan2.get_membership_by_user(user2)
chan2_user2.is_op = True
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "voice test_chan2 test_user2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "already has voice" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_voice_2_chan(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user2 = chan2.get_membership_by_user(user2)
chan2_user2.is_op = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "voice test_chan2 test_user2")
)
data = serv1.get_send_data(2)
assert "error" not in data[1].text.lower()
assert data[0].channel == chan2
assert data[1].channel == chan1
assert data[0].__class__ == EventMode
assert data[1].__class__ == EventMessage
assert data[0].mode_changes == "+v {}".format(user2.address)
assert "status given" in data[1].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_voice_2_user_not_in_channel(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = False
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user1 = chan2.get_membership_by_user(user2)
chan2_user1.is_op = False
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "voice test_user2 test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "i'm not in that channel" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_voice_2_user_user_not_known(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user1 = chan2.get_membership_by_user(user2)
chan2_user1.is_op = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "voice test_user3 test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "test_user3 is not known" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_voice_2_user_user_not_there(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
serv1.get_user_by_address("test_user3", "test_user3")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user1 = chan2.get_membership_by_user(user2)
chan2_user1.is_op = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "voice test_user3 test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "test_user3 is not in test_chan2" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_voice_2_user_no_power(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user1 = chan2.get_membership_by_user(user2)
chan2_user1.is_op = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = False
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "voice test_user2 test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "don't have power" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_voice_2_user_is_voice(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user2 = chan2.get_membership_by_user(user2)
chan2_user2.is_op = False
chan2_user2.is_voice = True
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "voice test_user2 test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "already has voice" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_voice_2_user_is_op(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user2 = chan2.get_membership_by_user(user2)
chan2_user2.is_op = True
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "voice test_user2 test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "already has voice" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_voice_2_user(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user2 = chan2.get_membership_by_user(user2)
chan2_user2.is_op = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "voice test_user2 test_chan2")
)
data = serv1.get_send_data(2)
assert "error" not in data[1].text.lower()
assert data[0].channel == chan2
assert data[1].channel == chan1
assert data[0].__class__ == EventMode
assert data[1].__class__ == EventMessage
assert data[0].mode_changes == "+v {}".format(user2.address)
assert "status given" in data[1].text.lower()
finally:
test_hallo.remove_server(serv1)
| StarcoderdataPython |
55037 | from .filesystem import find_files
from .piano_roll import (roll_encode, roll_decode, get_roll_index,
roll_subsample)
from .metrics import calc_stats, calc_metrics, metrics_empty_dict
from .loggers import write_metrics, write_images, write_audio
from .renderers import (plot_eval, plot_estim, plot_certainty,
roll2audio)
from .settings import Trainer, save_run_config, flush_n_close
| StarcoderdataPython |
3319178 | """wordCount URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from . import views
urlpatterns = [
#Goes to the views.py and displays what is return there
path('', views.home),
#Goes to the home.html and returns
path('homepage/', views.homepage, name= 'home'),
# Displays the information about the page
path('about/', views.about, name= 'about'),
#Goes to the count.html and returns
path('count/', views.count, name='count'),
# Since we are changed the name in home.html (action="{% url 'count' %}")whichever url we give still goes to the correct # #location: referencing the url that way.
path('countWords/', views.count, name='count'),
#Goes to the views.py and displays what is return there
path('cube/', views.cube),
]
| StarcoderdataPython |
3347060 | #!/usr/bin/python
"""
Filter the results of munki's MANAGED_INSTALL_REPORT.plist
to these items: 'EndTime', 'StartTime', 'ManifestName', 'ManagedInstallVersion'
'Errors', 'Warnings', 'RunType'
"""
import plistlib
import sys
import os
import CoreFoundation
DEBUG = False
# Path to the default munki install dir
default_install_dir = '/Library/Managed Installs'
# Checks munki preferences to see where the install directory is set to.
managed_install_dir = CoreFoundation.CFPreferencesCopyAppValue(
"ManagedInstallDir", "ManagedInstalls")
# set the paths based on munki's configuration.
if managed_install_dir:
MANAGED_INSTALL_REPORT = os.path.join(
managed_install_dir, 'ManagedInstallReport.plist')
else:
MANAGED_INSTALL_REPORT = os.path.join(
default_install_dir, 'ManagedInstallReport.plist')
# Don't skip manual check
if len(sys.argv) > 1:
if sys.argv[1] == 'debug':
print '**** DEBUGGING ENABLED ****'
DEBUG = True
import pprint
PP = pprint.PrettyPrinter(indent=4)
def dict_from_plist(path):
"""Returns a dict based on plist found in path"""
try:
return plistlib.readPlist(path)
except Exception, message:
raise Exception("Error creating plist from output: %s" % message)
def main():
"""Main"""
# Create cache dir if it does not exist
cachedir = '%s/cache' % os.path.dirname(os.path.realpath(__file__))
if not os.path.exists(cachedir):
os.makedirs(cachedir)
# Check if MANAGED_INSTALL_REPORT exists
if not os.path.exists(MANAGED_INSTALL_REPORT):
print '%s is missing.' % MANAGED_INSTALL_REPORT
install_report = {}
else:
install_report = dict_from_plist(MANAGED_INSTALL_REPORT)
# Collect Errors, Warnings (as JSON?)
# EndTime, StartTime, ManifestName, (Conditions->catalogs?)
# ManagedInstallVersion
# Some statistics
# pylint: disable=E1103
report_list = {}
items = ['EndTime', 'StartTime', 'ManifestName', 'ManagedInstallVersion',
'Errors', 'Warnings', 'RunType']
for item in items:
if install_report.get(item):
report_list[item] = install_report[item]
# pylint: enable=E1103
if DEBUG:
PP.pprint(report_list)
# Write report to cache
plistlib.writePlist(report_list, "%s/munkireport.plist" % cachedir)
if __name__ == "__main__":
main()
| StarcoderdataPython |
71342 | """Platform for retrieving meteorological data from Environment Canada."""
import datetime
import re
from env_canada import ECData
import voluptuous as vol
from homeassistant.components.weather import (
ATTR_CONDITION_CLEAR_NIGHT,
ATTR_CONDITION_CLOUDY,
ATTR_CONDITION_FOG,
ATTR_CONDITION_HAIL,
ATTR_CONDITION_LIGHTNING_RAINY,
ATTR_CONDITION_PARTLYCLOUDY,
ATTR_CONDITION_POURING,
ATTR_CONDITION_RAINY,
ATTR_CONDITION_SNOWY,
ATTR_CONDITION_SNOWY_RAINY,
ATTR_CONDITION_SUNNY,
ATTR_CONDITION_WINDY,
ATTR_FORECAST_CONDITION,
ATTR_FORECAST_PRECIPITATION_PROBABILITY,
ATTR_FORECAST_TEMP,
ATTR_FORECAST_TEMP_LOW,
ATTR_FORECAST_TIME,
PLATFORM_SCHEMA,
WeatherEntity,
)
from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME, TEMP_CELSIUS
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt
CONF_FORECAST = "forecast"
CONF_ATTRIBUTION = "Data provided by Environment Canada"
CONF_STATION = "station"
def validate_station(station):
"""Check that the station ID is well-formed."""
if station is None:
return
if not re.fullmatch(r"[A-Z]{2}/s0000\d{3}", station):
raise vol.error.Invalid('Station ID must be of the form "XX/s0000###"')
return station
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_STATION): validate_station,
vol.Inclusive(CONF_LATITUDE, "latlon"): cv.latitude,
vol.Inclusive(CONF_LONGITUDE, "latlon"): cv.longitude,
vol.Optional(CONF_FORECAST, default="daily"): vol.In(["daily", "hourly"]),
}
)
# Icon codes from http://dd.weatheroffice.ec.gc.ca/citypage_weather/
# docs/current_conditions_icon_code_descriptions_e.csv
ICON_CONDITION_MAP = {
ATTR_CONDITION_SUNNY: [0, 1],
ATTR_CONDITION_CLEAR_NIGHT: [30, 31],
ATTR_CONDITION_PARTLYCLOUDY: [2, 3, 4, 5, 22, 32, 33, 34, 35],
ATTR_CONDITION_CLOUDY: [10],
ATTR_CONDITION_RAINY: [6, 9, 11, 12, 28, 36],
ATTR_CONDITION_LIGHTNING_RAINY: [19, 39, 46, 47],
ATTR_CONDITION_POURING: [13],
ATTR_CONDITION_SNOWY_RAINY: [7, 14, 15, 27, 37],
ATTR_CONDITION_SNOWY: [8, 16, 17, 18, 25, 26, 38, 40],
ATTR_CONDITION_WINDY: [43],
ATTR_CONDITION_FOG: [20, 21, 23, 24, 44],
ATTR_CONDITION_HAIL: [26, 27],
}
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Environment Canada weather."""
if config.get(CONF_STATION):
ec_data = ECData(station_id=config[CONF_STATION])
else:
lat = config.get(CONF_LATITUDE, hass.config.latitude)
lon = config.get(CONF_LONGITUDE, hass.config.longitude)
ec_data = ECData(coordinates=(lat, lon))
add_devices([ECWeather(ec_data, config)])
class ECWeather(WeatherEntity):
"""Representation of a weather condition."""
def __init__(self, ec_data, config):
"""Initialize Environment Canada weather."""
self.ec_data = ec_data
self.platform_name = config.get(CONF_NAME)
self.forecast_type = config[CONF_FORECAST]
@property
def attribution(self):
"""Return the attribution."""
return CONF_ATTRIBUTION
@property
def name(self):
"""Return the name of the weather entity."""
if self.platform_name:
return self.platform_name
return self.ec_data.metadata.get("location")
@property
def temperature(self):
"""Return the temperature."""
if self.ec_data.conditions.get("temperature", {}).get("value"):
return float(self.ec_data.conditions["temperature"]["value"])
if self.ec_data.hourly_forecasts[0].get("temperature"):
return float(self.ec_data.hourly_forecasts[0]["temperature"])
return None
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def humidity(self):
"""Return the humidity."""
if self.ec_data.conditions.get("humidity", {}).get("value"):
return float(self.ec_data.conditions["humidity"]["value"])
return None
@property
def wind_speed(self):
"""Return the wind speed."""
if self.ec_data.conditions.get("wind_speed", {}).get("value"):
return float(self.ec_data.conditions["wind_speed"]["value"])
return None
@property
def wind_bearing(self):
"""Return the wind bearing."""
if self.ec_data.conditions.get("wind_bearing", {}).get("value"):
return float(self.ec_data.conditions["wind_bearing"]["value"])
return None
@property
def pressure(self):
"""Return the pressure."""
if self.ec_data.conditions.get("pressure", {}).get("value"):
return 10 * float(self.ec_data.conditions["pressure"]["value"])
return None
@property
def visibility(self):
"""Return the visibility."""
if self.ec_data.conditions.get("visibility", {}).get("value"):
return float(self.ec_data.conditions["visibility"]["value"])
return None
@property
def condition(self):
"""Return the weather condition."""
icon_code = None
if self.ec_data.conditions.get("icon_code", {}).get("value"):
icon_code = self.ec_data.conditions["icon_code"]["value"]
elif self.ec_data.hourly_forecasts[0].get("icon_code"):
icon_code = self.ec_data.hourly_forecasts[0]["icon_code"]
if icon_code:
return icon_code_to_condition(int(icon_code))
return ""
@property
def forecast(self):
"""Return the forecast array."""
return get_forecast(self.ec_data, self.forecast_type)
def update(self):
"""Get the latest data from Environment Canada."""
self.ec_data.update()
def get_forecast(ec_data, forecast_type):
"""Build the forecast array."""
forecast_array = []
if forecast_type == "daily":
half_days = ec_data.daily_forecasts
today = {
ATTR_FORECAST_TIME: dt.now().isoformat(),
ATTR_FORECAST_CONDITION: icon_code_to_condition(
int(half_days[0]["icon_code"])
),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: int(
half_days[0]["precip_probability"]
),
}
if half_days[0]["temperature_class"] == "high":
today.update(
{
ATTR_FORECAST_TEMP: int(half_days[0]["temperature"]),
ATTR_FORECAST_TEMP_LOW: int(half_days[1]["temperature"]),
}
)
half_days = half_days[2:]
else:
today.update(
{
ATTR_FORECAST_TEMP: None,
ATTR_FORECAST_TEMP_LOW: int(half_days[0]["temperature"]),
}
)
half_days = half_days[1:]
forecast_array.append(today)
for day, high, low in zip(range(1, 6), range(0, 9, 2), range(1, 10, 2)):
forecast_array.append(
{
ATTR_FORECAST_TIME: (
dt.now() + datetime.timedelta(days=day)
).isoformat(),
ATTR_FORECAST_TEMP: int(half_days[high]["temperature"]),
ATTR_FORECAST_TEMP_LOW: int(half_days[low]["temperature"]),
ATTR_FORECAST_CONDITION: icon_code_to_condition(
int(half_days[high]["icon_code"])
),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: int(
half_days[high]["precip_probability"]
),
}
)
elif forecast_type == "hourly":
hours = ec_data.hourly_forecasts
for hour in range(0, 24):
forecast_array.append(
{
ATTR_FORECAST_TIME: dt.as_local(
datetime.datetime.strptime(hours[hour]["period"], "%Y%m%d%H%M")
).isoformat(),
ATTR_FORECAST_TEMP: int(hours[hour]["temperature"]),
ATTR_FORECAST_CONDITION: icon_code_to_condition(
int(hours[hour]["icon_code"])
),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: int(
hours[hour]["precip_probability"]
),
}
)
return forecast_array
def icon_code_to_condition(icon_code):
"""Return the condition corresponding to an icon code."""
for condition, codes in ICON_CONDITION_MAP.items():
if icon_code in codes:
return condition
return None
| StarcoderdataPython |
3232739 | <reponame>python-discord/code-jam-management
from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.future import select
from api.database import Infraction as DbInfraction, Jam, User
from api.dependencies import get_db_session
from api.models import Infraction, InfractionResponse
router = APIRouter(prefix="/infractions", tags=["infractions"])
@router.get("/", response_model=list[InfractionResponse])
async def get_infractions(session: AsyncSession = Depends(get_db_session)) -> list[DbInfraction]:
"""Get every the infraction stored in the database."""
infractions = await session.execute(select(DbInfraction))
infractions.unique()
return infractions.scalars().all()
@router.get(
"/{infraction_id}",
response_model=InfractionResponse,
responses={
404: {
"description": "Infraction could not be found."
}
}
)
async def get_infraction(infraction_id: int, session: AsyncSession = Depends(get_db_session)) -> DbInfraction:
"""Get a specific infraction stored in the database by ID."""
infraction_result = await session.execute(select(DbInfraction).where(DbInfraction.id == infraction_id))
infraction_result.unique()
if not (infraction := infraction_result.scalars().one_or_none()):
raise HTTPException(404, "Infraction with specified ID could not be found.")
return infraction
@router.post(
"/",
response_model=InfractionResponse,
responses={
404: {
"Description": "Jam ID or User ID could not be found."
}
}
)
async def create_infraction(infraction: Infraction, session: AsyncSession = Depends(get_db_session)) -> DbInfraction:
"""Add an infraction for a user to the database."""
jam_id = (await session.execute(select(Jam.id).where(Jam.id == infraction.jam_id))).scalars().one_or_none()
if jam_id is None:
raise HTTPException(404, "Jam with specified ID could not be found.")
user_id = (await session.execute(select(User.id).where(User.id == infraction.user_id))).scalars().one_or_none()
if user_id is None:
raise HTTPException(404, "User with specified ID could not be found.")
infraction = DbInfraction(
user_id=user_id,
jam_id=jam_id,
infraction_type=infraction.infraction_type,
reason=infraction.reason
)
session.add(infraction)
await session.flush()
infraction_result = await session.execute(select(DbInfraction).where(DbInfraction.id == infraction.id))
infraction_result.unique()
return infraction_result.scalars().one()
| StarcoderdataPython |
17230 | <reponame>KuoHaoZeng/ai2thor-1
import ai2thor.controller
import numpy as np
from PIL import Image, ImageDraw
def get_rotation_matrix(agent_rot):
#######
# Construct the rotation matrix. Ref: https://en.wikipedia.org/wiki/Rotation_matrix
#######
r_y = np.array([[np.cos(np.radians(agent_rot["y"])), 0, np.sin(np.radians(agent_rot["y"]))],
[0, 1, 0],
[-np.sin(np.radians(agent_rot["y"])), 0, np.cos(np.radians(agent_rot["y"]))]])
r_x = np.array([[1, 0, 0],
[0, np.cos(np.radians(agent_rot["x"])), -np.sin(np.radians(agent_rot["x"]))],
[0, np.sin(np.radians(agent_rot["x"])), np.cos(np.radians(agent_rot["x"]))]])
r = r_x @ r_y
return r
def project_to_agent_coordinate(pos, agent_pos, r):
#######
# Project a position from the world coordinate to the agent coordinate.
#######
pos_diff = pos - agent_pos
# since AI2THOR is left-handed coordinate system, we need to turn it to the right-handed to use the rotation matrix
pos_diff[2] *= -1
new_pos = r @ pos_diff
# turn back to the left-handed coordinate system
new_pos[2] *= -1
return new_pos
def project_to_2d(pos, half_fov, w, h):
#######
# Project a given 3D position to 2D space.
#######
pos_2d = [pos[0] / (pos[2] * np.tan(np.radians(half_fov))),
pos[1] / (pos[2] * np.tan(np.radians(half_fov)))]
# x-axis
x = int(w * ((pos_2d[0] + 1.0) / 2.0))
# y-axis
y = int(h * (1 - ((pos_2d[1] + 1.0) / 2.0)))
return [x, y]
def draw_3d_bbox(event):
#######
# Draw the 3D bbox in 2D RGB image by first construct the rotation matrix and get agent position by the agent pose,
# then filter out the objects which are not visible to the agent.
# Finally, project the 3D bbox to 2D space and draw it on the 2D RGB image and return the event dict with image.
#######
# get the 2D image width and height
w, h = event.metadata["screenWidth"], event.metadata["screenHeight"]
# get the FOV
half_fov = event.metadata["fov"] / 2
# get the camera rotation matrix
agent_rot = event.metadata["agent"]["rotation"]
agent_rot["x"] = event.metadata["agent"]["cameraHorizon"]
rotation_matrix = get_rotation_matrix(agent_rot)
# get the camera 3D position
agent_pos = np.array([event.metadata["cameraPosition"]["x"],
event.metadata["cameraPosition"]["y"],
event.metadata["cameraPosition"]["z"]])
# get the 2D RGB image and allocate a drawer
img = Image.fromarray(event.frame, "RGB")
draw = ImageDraw.Draw(img)
# iterate over all objects in the scene
# first classify if the object is in the view by rotated z position and instance segmentation
# then draw the 3D bbox in the 2D RGB image
for obj in event.metadata["objects"]:
# get object 3D position and rotate it to the agent coordinate
pos = np.array([obj["position"]["x"], obj["position"]["y"], obj["position"]["z"]])
new_pos = project_to_agent_coordinate(pos, agent_pos, rotation_matrix)
# classify is the object is in front of the agent
if new_pos[2] > 0:
# classify if the object is seen by the agent (not occluded by other objects)
if obj["objectId"] in event.instance_masks.keys():
# don't draw the floor and ceiling objects
if "Floor" in obj["objectId"] or "Ceiling" in obj["objectId"]:
if "Lamp" not in obj["objectId"]:
continue
# get the object color from the instance segmentation
color = event.object_id_to_color[obj["objectId"]]
# get the 3D bbox center and size
vertices, valid = [], []
if not isinstance(obj["objectOrientedBoundingBox"], type(None)):
# get the 3D bbox 8 vertices
corner_points = obj["objectOrientedBoundingBox"]["cornerPoints"]
# project vertices to 2D image coordinate
for point in corner_points:
new_point = project_to_agent_coordinate(point, agent_pos, rotation_matrix)
if new_point[2] > 0:
valid.append(True)
else:
valid.append(False)
new_point_2d = project_to_2d(new_point, half_fov, w, h)
vertices.append(new_point_2d)
# get the 3D bbox 12 lines
lines = [[vertices[0], vertices[1]],
[vertices[2], vertices[3]],
[vertices[0], vertices[3]],
[vertices[1], vertices[2]],
[vertices[4], vertices[5]],
[vertices[6], vertices[7]],
[vertices[4], vertices[7]],
[vertices[5], vertices[6]],
[vertices[2], vertices[6]],
[vertices[3], vertices[7]],
[vertices[1], vertices[5]],
[vertices[0], vertices[4]]]
valid_lines = [valid[0] * valid[1],
valid[2] * valid[3],
valid[0] * valid[3],
valid[1] * valid[2],
valid[4] * valid[5],
valid[6] * valid[7],
valid[4] * valid[7],
valid[5] * valid[6],
valid[2] * valid[6],
valid[3] * valid[7],
valid[1] * valid[5],
valid[0] * valid[4]]
else:
if "cornerPoints" in obj["axisAlignedBoundingBox"].keys():
# get the 3D bbox 8 vertices
corner_points = obj["axisAlignedBoundingBox"]["cornerPoints"]
else:
# get the 3D bbox 8 vertices from bbox center and size
center = np.array([obj["axisAlignedBoundingBox"]["center"]["x"],
obj["axisAlignedBoundingBox"]["center"]["y"],
obj["axisAlignedBoundingBox"]["center"]["z"]])
size = np.array([obj["axisAlignedBoundingBox"]["size"]["x"],
obj["axisAlignedBoundingBox"]["size"]["y"],
obj["axisAlignedBoundingBox"]["size"]["z"]])
corner_points = []
for i in range(2):
pos_x = np.array(center)
pos_x[0] = pos_x[0] - (size[0] / 2) + (i * size[0])
for j in range(2):
pos_y = np.array(pos_x)
pos_y[1] = pos_y[1] - (size[1] / 2) + (j * size[1])
for k in range(2):
pos_z = np.array(pos_y)
pos_z[2] = pos_z[2] - (size[2] / 2) + (k * size[2])
corner_points.append(pos_z)
# project vertices to 2D image coordinate
for point in corner_points:
new_point = project_to_agent_coordinate(point, agent_pos, rotation_matrix)
if new_point[2] > 0:
valid.append(True)
else:
valid.append(False)
new_point_2d = project_to_2d(new_point, half_fov, w, h)
vertices.append(new_point_2d)
# get the 3D bbox 12 lines
lines = [[vertices[0], vertices[1]],
[vertices[2], vertices[3]],
[vertices[0], vertices[2]],
[vertices[1], vertices[3]],
[vertices[4], vertices[5]],
[vertices[6], vertices[7]],
[vertices[4], vertices[6]],
[vertices[5], vertices[7]],
[vertices[2], vertices[6]],
[vertices[3], vertices[7]],
[vertices[1], vertices[5]],
[vertices[0], vertices[4]]]
valid_lines = [valid[0] * valid[1],
valid[2] * valid[3],
valid[0] * valid[2],
valid[1] * valid[3],
valid[4] * valid[5],
valid[6] * valid[7],
valid[4] * valid[6],
valid[5] * valid[7],
valid[2] * valid[6],
valid[3] * valid[7],
valid[1] * valid[5],
valid[0] * valid[4]]
lines = np.array(lines)
lines = np.reshape(lines, (-1, 4))
valid_lines = np.array(valid_lines)
valid_lines = np.reshape(valid_lines, (-1, 1))
# draw the 3D bbox 12 lines in the 2D RGB image
for iii, line in enumerate(lines):
if valid_lines[iii]:
draw.line((line[0], line[1], line[2], line[3]), fill=color, width=2)
# store the result back to the event
bbox_frame = np.array(img)
event.bbox_3d_frame = bbox_frame
return event
if __name__ == "__main__":
# give the height and width of the 2D image and scene id
w, h = 900, 900
scene = "FloorPlan2{:02d}_physics".format(1)
# allocate controller and initialize the scene and agent
# local_path = "src/ai2thor/unity/builds/thor-local-OSXIntel64.app/Contents/MacOS/AI2-Thor"
local_path = ""
controller = ai2thor.controller.Controller(local_path=local_path)
_ = controller.start(width=w, height=h)
_ = controller.reset(scene)
event = controller.step(dict(action='Initialize',
gridSize=0.25,
renderClassImage=True,
renderObjectImage=True,
renderDepthImage=True,
fieldOfView=90))
# do something then draw the 3D bbox in 2D image
event = controller.step(dict(action="MoveAhead"))
event = controller.step(dict(action="MoveAhead"))
event = controller.step(dict(action="Rotate", rotation=dict(x=0, y=30, z=0)))
event = draw_3d_bbox(event)
img = Image.fromarray(event.bbox_3d_frame, "RGB")
img.save("./output1.png")
event = controller.step(dict(action="LookDown"))
event = draw_3d_bbox(event)
img = Image.fromarray(event.bbox_3d_frame, "RGB")
img.save("./output2.png")
event = controller.step(dict(action="LookDown"))
event = draw_3d_bbox(event)
img = Image.fromarray(event.bbox_3d_frame, "RGB")
img.save("./output3.png")
| StarcoderdataPython |
28967 | <reponame>devilry/devilry-django
from django import forms
from django.contrib import messages
from django.db import models
from django.db import transaction
from django.http import HttpResponseRedirect, Http404
from django.utils import timezone
from django.utils.translation import gettext_lazy, pgettext_lazy
from django.views.generic import View
import django_rq
from devilry.apps.core import models as core_models
from devilry.devilry_comment import models as comment_models
from devilry.devilry_cradmin import devilry_listbuilder
from devilry.devilry_examiner.views.assignment.bulkoperations import bulk_operations_grouplist
from devilry.devilry_group import models as group_models
from devilry.devilry_email.feedback_email import feedback_email
class AssignPointsForm(bulk_operations_grouplist.SelectedAssignmentGroupForm):
"""
Subclassed the select form and adds a ``IntegerField`` for points.
"""
#: Set the amount of points.
points = forms.IntegerField(
min_value=0,
help_text='Add a score that will be given to all selected assignment groups.',
required=True,
label=pgettext_lazy('Points'))
def get_grading_points(self):
return self.cleaned_data['points']
class PointsTargetRenderer(bulk_operations_grouplist.AssignmentGroupTargetRenderer):
def get_field_layout(self):
layout = super(PointsTargetRenderer, self).get_field_layout()
layout.append('points')
return layout
class AssignPassedFailedForm(bulk_operations_grouplist.SelectedAssignmentGroupForm):
"""
Subclassed the select form and adds a ``Boolean`` field to provide a
passed/failed grade.
"""
#: Set delivery as passed or failed.
passed = forms.BooleanField(
label=pgettext_lazy('grading', 'Passed?'),
help_text=pgettext_lazy('grading', 'Check to provide a passing grade.'),
initial=True,
required=False)
def get_grading_points(self):
if self.cleaned_data['passed']:
return self.assignment.max_points
else:
return 0
class PassedFailedTargetRenderer(bulk_operations_grouplist.AssignmentGroupTargetRenderer):
def get_field_layout(self):
layout = super(PassedFailedTargetRenderer, self).get_field_layout()
layout.append('passed')
return layout
class AbstractBulkFeedbackListView(bulk_operations_grouplist.AbstractAssignmentGroupMultiSelectListFilterView):
"""
Base class that handles all the logic of bulk creating feedbacks.
Extend this class with a subclass that uses a form suited for the
:attr:``~.devilry.apps.core.models.Assignment.grading_system_plugin_id``.
Example:
Bulk feedback class points based Assignment::
class BulkFeedbackPassedFailedView(AbstractBulkFeedbackListView):
def get_filterlist_url(self, filters_string):
return self.request.cradmin_app.reverse_appurl(
'bulk-feedback-passedfailed-filter', kwargs={'filters_string': filters_string})
def get_target_renderer_class(self):
return PassedFailedTargetRenderer
def get_form_class(self):
return AssignPassedFailedForm
"""
value_renderer_class = devilry_listbuilder.assignmentgroup.ExaminerMultiselectItemValue
template_name = 'devilry_examiner/assignment/bulk_create_feedback.django.html'
def get_pagetitle(self):
return gettext_lazy('Bulk create feedback')
def get_filterlist_url(self, filters_string):
raise NotImplementedError()
def get_unfiltered_queryset_for_role(self, role):
queryset = super(AbstractBulkFeedbackListView, self).get_unfiltered_queryset_for_role(role)
return queryset\
.filter_examiner_has_access(user=self.request.user) \
.exclude(cached_data__last_published_feedbackset=models.F('cached_data__last_feedbackset'))
def __create_grading_groupcomment(self, feedback_set_id, published_time, text):
"""
Create an entry of :class:`~.devilry.devilry_group.models.GroupComment` as part of grading
for the :class:`~.devilry.devilry_group.models.FeedbackSet` that received feedback.
Args:
feedback_set_id: comment for this feedback.
published_time: Time the comment was published.
text: Text provided by examiner.
"""
group_models.GroupComment.objects.create(
feedback_set_id=feedback_set_id,
part_of_grading=True,
visibility=group_models.GroupComment.VISIBILITY_VISIBLE_TO_EVERYONE,
user=self.request.user,
user_role=comment_models.Comment.USER_ROLE_EXAMINER,
text=text,
comment_type=comment_models.Comment.COMMENT_TYPE_GROUPCOMMENT,
published_datetime=published_time
)
def form_valid(self, form):
"""
Creates entries of :class:`~.devilry.devilry_group.models.GroupComment`s for all the
:class:`~.devilry.devilry_group.models.FeedbackSet`s that is given a bulk feedback.
Note:
Using ``transaction.atomic()`` for single transaction when creating ``GroupComment``s and
updating the ``FeedbackSet``s.
If anything goes wrong, the transaction is rolled back and nothing is saved to the database.
Args:
form: cleaned form.
"""
feedback_set_ids = self.get_feedbackset_ids_from_posted_ids(form=form)
points = form.get_grading_points()
text = form.cleaned_data['feedback_comment_text']
# Cache anonymous display names before transaction. Needed for django messages.
displaynames = self.get_group_displaynames(form=form)
now_without_microseconds = timezone.now().replace(microsecond=0)
with transaction.atomic():
for feedback_set_id in feedback_set_ids:
self.__create_grading_groupcomment(
feedback_set_id=feedback_set_id,
published_time=now_without_microseconds,
text=text)
group_models.FeedbackSet.objects\
.filter(id__in=feedback_set_ids)\
.update(
grading_published_by=self.request.user,
grading_published_datetime=now_without_microseconds + timezone.timedelta(microseconds=1),
grading_points=points)
feedback_email.bulk_send_feedback_created_email(
assignment_id=self.assignment.id,
feedbackset_id_list=feedback_set_ids,
domain_url_start=self.request.build_absolute_uri('/'))
self.add_success_message(displaynames)
return super(AbstractBulkFeedbackListView, self).form_valid(form=form)
def add_success_message(self, anonymous_display_names):
message = gettext_lazy('Bulk added feedback for %(group_names)s') % {
'group_names': ', '.join(anonymous_display_names)}
messages.success(self.request, message=message)
class BulkFeedbackPointsView(AbstractBulkFeedbackListView):
"""
Handles bulkfeedback for assignment with points-based grading system.
"""
def get_filterlist_url(self, filters_string):
return self.request.cradmin_app.reverse_appurl(
'bulk-feedback-points-filter', kwargs={'filters_string': filters_string})
def get_target_renderer_class(self):
return PointsTargetRenderer
def get_form_class(self):
return AssignPointsForm
class BulkFeedbackPassedFailedView(AbstractBulkFeedbackListView):
"""
Handles bulkfeedback for assignment with passed/failed grading system.
"""
def get_filterlist_url(self, filters_string):
return self.request.cradmin_app.reverse_appurl(
'bulk-feedback-passedfailed-filter', kwargs={'filters_string': filters_string})
def get_target_renderer_class(self):
return PassedFailedTargetRenderer
def get_form_class(self):
return AssignPassedFailedForm
class BulkFeedbackRedirectView(View):
"""
Redirects to the appropriate view based on the assignments grading system type.
"""
def dispatch(self, request, *args, **kwargs):
grading_plugin_id = self.request.cradmin_role.grading_system_plugin_id
if grading_plugin_id == core_models.Assignment.GRADING_SYSTEM_PLUGIN_ID_POINTS:
return HttpResponseRedirect(request.cradmin_app.reverse_appurl('bulk-feedback-points'))
grading_plugin_id = self.request.cradmin_role.grading_system_plugin_id
if grading_plugin_id == core_models.Assignment.GRADING_SYSTEM_PLUGIN_ID_PASSEDFAILED:
return HttpResponseRedirect(request.cradmin_app.reverse_appurl('bulk-feedback-passedfailed'))
return Http404()
| StarcoderdataPython |
1670411 | """Properties Module
This module defines types for Property objects.
For more about properties in Tiled maps see the below link:
https://doc.mapeditor.org/en/stable/manual/custom-properties/
The types defined in this module get added to other objects
such as Layers, Maps, Objects, etc
"""
from pathlib import Path
from typing import Dict, Union
from .common_types import Color
Property = Union[float, Path, str, bool, Color]
Properties = Dict[str, Property]
| StarcoderdataPython |
1759216 | <reponame>himichael/LeetCode
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def isSubtree(self, s, t):
"""
:type s: TreeNode
:type t: TreeNode
:rtype: bool
"""
if not (s or t):
return True
if not (s and t):
return False
return self.isSameTree(s,t) or self.isSubtree(s.left,t) or self.isSubtree(s.right,t)
def isSameTree(self,a,b):
if not (a or b):
return True
if not (a and b):
return False
return a.val==b.val and self.isSameTree(a.left,b.left) and self.isSameTree(a.right,b.right)
| StarcoderdataPython |
1729380 | from aiogram import types
from aiogram.dispatcher.filters.builtin import CommandStart
from data.texts import text
from filters import IsPrivate
from loader import dp
@dp.message_handler(CommandStart(), IsPrivate())
async def bot_start(message: types.Message):
await message.answer(text.start_message.format(message.from_user.full_name))
| StarcoderdataPython |
1769630 | # -*- mode: python; coding: utf-8 -*
# Copyright (c) 2019 Radio Astronomy Software Group
# Licensed under the 3-clause BSD License
import os
import numpy as np
import pytest
from astropy.coordinates import Angle
import astropy.units as units
from astropy.time import Time
from pyradiosky import SkyModel
import pyradiosky.utils as skyutils
def test_tee_ra_loop():
time = Time(2457458.1739, scale="utc", format="jd")
tee_ra = Angle(np.pi / 4.0, unit="rad") # rad
cirs_ra = skyutils._tee_to_cirs_ra(tee_ra, time)
new_tee_ra = skyutils._cirs_to_tee_ra(cirs_ra, time)
assert new_tee_ra == tee_ra
def test_stokes_tofrom_coherency():
stokesI = 4.5
stokesQ = -0.3
stokesU = 1.2
stokesV = -0.15
stokes = np.array([stokesI, stokesQ, stokesU, stokesV])
expected_coherency = (
0.5 * np.array([[4.2, 1.2 + 0.15j], [1.2 - 0.15j, 4.8]]) * units.Jy
)
with pytest.warns(
DeprecationWarning,
match="In version 0.2.0, stokes_arr will be required to be an astropy "
"Quantity. Currently, floats are assumed to be in Jy.",
):
coherency = skyutils.stokes_to_coherency(stokes)
assert np.allclose(expected_coherency, coherency)
with pytest.warns(
DeprecationWarning,
match="In version 0.2.0, coherency_matrix will be required to be an astropy "
"Quantity. Currently, floats are assumed to be in Jy.",
):
back_to_stokes = skyutils.coherency_to_stokes(coherency.value)
assert np.allclose(stokes * units.Jy, back_to_stokes)
# again, with multiple sources and a frequency axis.
stokes = (
np.array(
[[stokesI, stokesQ, stokesU, stokesV], [stokesI, stokesQ, stokesU, stokesV]]
).T
* units.Jy
)
stokes = stokes[:, np.newaxis, :]
coherency = skyutils.stokes_to_coherency(stokes)
back_to_stokes = skyutils.coherency_to_stokes(coherency)
assert units.quantity.allclose(stokes, back_to_stokes)
with pytest.raises(ValueError) as cm:
skyutils.stokes_to_coherency(stokes[0:2, :])
assert str(cm.value).startswith(
"First dimension of stokes_vector must be length 4."
)
with pytest.raises(ValueError) as cm:
skyutils.coherency_to_stokes(expected_coherency[0, :])
assert str(cm.value).startswith(
"First two dimensions of coherency_matrix must be length 2."
)
@pytest.mark.parametrize("stype", ["subband", "spectral_index", "flat"])
def test_download_gleam(tmp_path, stype):
pytest.importorskip("astroquery")
fname = "gleam_cat.vot"
filename = os.path.join(tmp_path, fname)
skyutils.download_gleam(path=tmp_path, filename=fname, row_limit=10)
sky = SkyModel()
sky.read_gleam_catalog(filename, spectral_type=stype)
assert sky.Ncomponents == 10
# check there's not an error if the file exists and overwrite is False
# and that the file is not replaced
skyutils.download_gleam(path=tmp_path, filename=fname, row_limit=5)
sky.read_gleam_catalog(filename, spectral_type=stype)
assert sky.Ncomponents == 10
# check that the file is replaced if overwrite is True
skyutils.download_gleam(path=tmp_path, filename=fname, row_limit=5, overwrite=True)
sky2 = SkyModel()
sky2.read_gleam_catalog(filename, spectral_type=stype)
assert sky2.Ncomponents == 5
def test_astroquery_missing_error(tmp_path):
fname = "gleam_cat.vot"
try:
import astroquery # noqa
pass
except ImportError:
with pytest.raises(
ImportError,
match="The astroquery module required to use the download_gleam function.",
):
skyutils.download_gleam(path=tmp_path, filename=fname, row_limit=10)
def test_jy_to_ksr():
Nfreqs = 200
freqs = np.linspace(100, 200, Nfreqs) * units.MHz
def jy2ksr_nonastropy(freq_arr):
c_cmps = 29979245800.0 # cm/s
k_boltz = 1.380658e-16 # erg/K
lam = c_cmps / freq_arr.to_value("Hz") # cm
return 1e-23 * lam ** 2 / (2 * k_boltz)
conv0 = skyutils.jy_to_ksr(freqs)
conv1 = jy2ksr_nonastropy(freqs) * units.K * units.sr / units.Jy
assert np.allclose(conv0, conv1)
| StarcoderdataPython |
1767843 | <filename>tests/test_readme_util.py
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
example_yaml_structure = yaml.safe_load(
"""\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
"""
)
CORRECT_DICT = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
README_CORRECT = """\
---
languages:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
README_CORRECT_FOUR_LEVEL = """\
---
languages:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
CORRECT_DICT_FOUR_LEVEL = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Extra Ignored Subsection",
"text": "",
"is_empty_text": True,
"subsections": [],
}
],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
README_EMPTY_YAML = """\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
EXPECTED_ERROR_README_EMPTY_YAML = (
"The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."
)
README_NO_YAML = """\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
EXPECTED_ERROR_README_NO_YAML = (
"The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."
)
README_INCORRECT_YAML = """\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
EXPECTED_ERROR_README_INCORRECT_YAML = "The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."
README_MISSING_TEXT = """\
---
languages:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
EXPECTED_ERROR_README_MISSING_TEXT = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."
README_NONE_SUBSECTION = """\
---
languages:
- zh
- en
---
# Dataset Card for My Dataset
"""
EXPECTED_ERROR_README_NONE_SUBSECTION = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."
README_MISSING_SUBSECTION = """\
---
languages:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
"""
EXPECTED_ERROR_README_MISSING_SUBSECTION = "The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."
README_MISSING_CONTENT = """\
---
languages:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
"""
EXPECTED_ERROR_README_MISSING_CONTENT = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."
README_MISSING_FIRST_LEVEL = """\
---
languages:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
EXPECTED_ERROR_README_MISSING_FIRST_LEVEL = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."
README_MULTIPLE_WRONG_FIRST_LEVEL = """\
---
languages:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
"""
EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL = "The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."
README_WRONG_FIRST_LEVEL = """\
---
languages:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
EXPECTED_ERROR_README_WRONG_FIRST_LEVEL = "The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."
README_EMPTY = ""
EXPECTED_ERROR_README_EMPTY = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."
README_MULTIPLE_SAME_HEADING_1 = """\
---
languages:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1 = "The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."
@pytest.mark.parametrize(
"readme_md, expected_dict",
[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
],
)
def test_readme_from_string_correct(readme_md, expected_dict):
assert ReadMe.from_string(readme_md, example_yaml_structure).to_dict() == expected_dict
@pytest.mark.parametrize(
"readme_md, expected_error",
[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
],
)
def test_readme_from_string_validation_errors(readme_md, expected_error):
with pytest.raises(ValueError, match=re.escape(expected_error.format(path="root"))):
readme = ReadMe.from_string(readme_md, example_yaml_structure)
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error",
[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
],
)
def test_readme_from_string_parsing_errors(readme_md, expected_error):
with pytest.raises(ValueError, match=re.escape(expected_error.format(path="root"))):
ReadMe.from_string(readme_md, example_yaml_structure)
@pytest.mark.parametrize(
"readme_md,",
[
(README_MULTIPLE_SAME_HEADING_1),
],
)
def test_readme_from_string_suppress_parsing_errors(readme_md):
ReadMe.from_string(readme_md, example_yaml_structure, suppress_parsing_errors=True)
@pytest.mark.parametrize(
"readme_md, expected_dict",
[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
],
)
def test_readme_from_readme_correct(readme_md, expected_dict):
with tempfile.TemporaryDirectory() as tmp_dir:
path = Path(tmp_dir) / "README.md"
with open(path, "w+") as readme_file:
readme_file.write(readme_md)
out = ReadMe.from_readme(path, example_yaml_structure).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"readme_md, expected_error",
[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
],
)
def test_readme_from_readme_error(readme_md, expected_error):
with tempfile.TemporaryDirectory() as tmp_dir:
path = Path(tmp_dir) / "README.md"
with open(path, "w+") as readme_file:
readme_file.write(readme_md)
expected_error = expected_error.format(path=path)
with pytest.raises(ValueError, match=re.escape(expected_error)):
readme = ReadMe.from_readme(path, example_yaml_structure)
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error",
[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
],
)
def test_readme_from_readme_parsing_errors(readme_md, expected_error):
with tempfile.TemporaryDirectory() as tmp_dir:
path = Path(tmp_dir) / "README.md"
with open(path, "w+") as readme_file:
readme_file.write(readme_md)
expected_error = expected_error.format(path=path)
with pytest.raises(ValueError, match=re.escape(expected_error)):
ReadMe.from_readme(path, example_yaml_structure)
@pytest.mark.parametrize(
"readme_md,",
[
(README_MULTIPLE_SAME_HEADING_1),
],
)
def test_readme_from_readme_suppress_parsing_errors(readme_md):
with tempfile.TemporaryDirectory() as tmp_dir:
path = Path(tmp_dir) / "README.md"
with open(path, "w+") as readme_file:
readme_file.write(readme_md)
ReadMe.from_readme(path, example_yaml_structure, suppress_parsing_errors=True)
| StarcoderdataPython |
87021 | # Header
# Use a header card to display a page #header.
# ---
from h2o_wave import site, ui
image = 'https://images.pexels.com/photos/220453/pexels-photo-220453.jpeg?auto=compress&h=750&w=1260'
commands = [
ui.command(name='profile', label='Profile', icon='Contact'),
ui.command(name='preferences', label='Preferences', icon='Settings'),
ui.command(name='logout', label='Logout', icon='SignOut'),
]
page = site['/demo']
page['header1'] = ui.header_card(
box='1 1 9 1',
title='Transparent header',
subtitle='And now for something completely different!',
image='https://www.h2o.ai/wp-content/themes/h2o2018/templates/dist/images/h2o_logo.svg',
items=[
ui.button(name='btn1', label='Button 1'),
ui.button(name='btn2', label='Button 2'),
ui.button(name='btn3', label='Button 3'),
],
secondary_items=[ui.textbox(name='search', icon='Search', width='300px', placeholder='Search...')],
color='transparent'
)
page['header2'] = ui.header_card(
box='1 2 9 1',
title='Card color header',
subtitle='And now for something completely different!',
items=[ui.menu(image=image, items=commands)],
secondary_items=[
ui.button(name='btn1', label='Link 1', link=True),
ui.button(name='btn2', label='Link 2', link=True),
ui.button(name='btn3', label='Link 3', link=True),
],
nav=[
ui.nav_group('Menu', items=[
ui.nav_item(name='#menu/spam', label='Spam'),
ui.nav_item(name='#menu/ham', label='Ham'),
ui.nav_item(name='#menu/eggs', label='Eggs'),
]),
ui.nav_group('Help', items=[
ui.nav_item(name='#about', label='About'),
ui.nav_item(name='#support', label='Support'),
])
],
color='card',
)
page['header3'] = ui.header_card(
box='1 3 9 1',
title='Primary color header',
subtitle='And now for something completely different!',
icon='Cycling',
icon_color='$violet',
items=[ui.menu(icon='Add', items=commands)],
secondary_items=[
ui.tabs(name='menu', value='email', link=True, items=[
ui.tab(name='email', label='Mail', icon='Mail'),
ui.tab(name='events', label='Events', icon='Calendar'),
ui.tab(name='spam', label='Spam', icon='Heart'),
]),
]
)
page.save()
| StarcoderdataPython |
1763351 | <reponame>combinators/templating
@(clsName: Python, text: Python, body: Python, bodyTight: Python)
class @{clsName}(object):
def __init__(self):
@body.indentExceptFirst.indentExceptFirst
def test(self):
@bodyTight.indent.indent
if __name__ == "__main__":
x = new @{clsName}()
print(@text)
print(x.blah) | StarcoderdataPython |
3261781 | <gh_stars>1-10
from browser import document
import brySVG.dragcanvas as SVG
canvas = SVG.CanvasObject("95vw", "100%", "cyan")
document["demo1"] <= canvas
tiles = [SVG.ClosedBezierObject([((-100,50), (50,100), (200,50)), ((-100,50), (50,0), (200,50))]),
SVG.GroupObject([SVG.PolygonObject([(50,25), (0,50), (50,75), (100,50)]),
SVG.SmoothBezierObject([(100,0), (4,40), (4,60), (100,100)])]),
SVG.EllipseObject([(25,0), (75,100)], angle=30),
SVG.GroupObject([SVG.CircleObject((50,50), 50),
SVG.BezierObject([(None, (0,100), (50,25)), ((50,25), (100,100), None)])]),
SVG.RectangleObject([(40,0), (50,90)], angle=20),
SVG.GroupObject([SVG.SmoothClosedBezierObject([(50,5), (5,80), (95,80)]),
SVG.PolylineObject([(0,0), (30,50), (70,50), (100,0)])]),
]
for i in range(10):
for j in range(6):
tile = tiles[(i+j)%6].cloneNode(True)
#tile = tiles[(i+j)%6].cloneObject() #is slower but allows MouseMode.DRAG, TRANSFORM or EDIT to be used
canvas <= tile
canvas.rotateElement(tile, 45*(i*6+j))
canvas.translateElement(tile, (i*100, j*100))
canvas.fitContents()
multilinetext = "This is a\nmultiline\nTextObject\nwith anchor\nat top left\nand fontsize 16"
canvas <= SVG.TextObject(multilinetext, (1025,10), 1, fontsize=16, ignorescaling=True, canvas=canvas, textcolour="blue")
longtext = "This is a WrappingTextObject with a width of 200 SVG units, with the anchor at bottom left."
canvas <= SVG.WrappingTextObject(canvas, longtext, (1025,600), 200, 7, 16, ignorescaling=True, textcolour="purple")
canvas.fitContents()
canvas.mouseMode = SVG.MouseMode.PAN
| StarcoderdataPython |
3217719 | <filename>tests/schema/github/conftest.py
import pytest
from acondbs import create_app
from acondbs.db.ops import define_tables
from acondbs.db.sa import sa
from acondbs.models import (
GitHubOrg,
GitHubUser,
GitHubOrgMembership,
GitHubToken,
AccountAdmin,
)
##__________________________________________________________________||
@pytest.fixture
def app_empty():
database_uri = "sqlite:///:memory:"
y = create_app(
SQLALCHEMY_DATABASE_URI=database_uri,
GITHUB_AUTH_AUTHORIZE_URL="https://github.com/login/oauth/authorize",
GITHUB_AUTH_TOKEN_URL="https://github.com/login/oauth/access_token",
GITHUB_AUTH_CLIENT_ID="client_id_0123456789",
GITHUB_AUTH_CLIENT_SECRET="<KEY>",
GITHUB_AUTH_REDIRECT_URI="http://localhost:8080/signin",
)
with y.app_context():
define_tables()
yield y
@pytest.fixture
def app(app_empty):
y = app_empty
#
# +------+
# | | +-------+ +--------+
# | org1 | ----- | | --+-- | token1 |
# | | | user1 | | +--------+
# +------+ +-- | | |
# | +-------+ | +--------+
# | +-- | token2 |
# +------+ | +--------+
# | | --+ +-------+
# | org2 | | | +--------+
# | | ----- | user2 | ----- | token3 |
# +------+ | | +--------+
# +-------+
#
# +------+ +-------+
# | | | | +--------+
# | org3 | | user3 | ----- | token4 |
# | | | | +--------+
# +------+ +-------+
#
#
org1 = GitHubOrg(login="org1", git_hub_id="012:Organization1")
org2 = GitHubOrg(login="org2", git_hub_id="012:Organization2")
org3 = GitHubOrg(login="org3", git_hub_id="012:Organization3")
user1 = GitHubUser(
user_id=1,
login="user1",
git_hub_id="04:User1",
name="<NAME>",
avatar_url="avatar.com/user1",
)
user2 = GitHubUser(
user_id=2,
login="user2",
git_hub_id="04:User2",
name="<NAME>",
avatar_url="avatar.com/user2",
)
user3 = GitHubUser(
user_id=3,
login="user3",
git_hub_id="04:User3",
name="<NAME>",
avatar_url="avatar.com/user3",
)
GitHubToken(token_id=1, token="token1", scope="read:org", user=user1)
GitHubToken(token_id=2, token="token2", scope="", user=user1)
GitHubToken(token_id=3, token="token3", scope="", user=user2)
GitHubToken(token_id=4, token="token4", scope="", user=user3)
GitHubOrgMembership(org=org1, member=user1)
GitHubOrgMembership(org=org2, member=user1)
GitHubOrgMembership(org=org2, member=user2)
admin1 = AccountAdmin(git_hub_login="user1")
admin2 = AccountAdmin(git_hub_login="user3")
with y.app_context():
sa.session.add(org1)
sa.session.add(org2)
sa.session.add(org3)
sa.session.add(user1)
sa.session.add(user2)
sa.session.add(user3)
sa.session.add(admin1)
sa.session.add(admin2)
sa.session.commit()
yield y
##__________________________________________________________________||
| StarcoderdataPython |
3212226 | # Copyright (c) 2018 PrimeVR
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php
from lib.web_data import WebData
def bitcore_claimer_line(n):
src_addr = n['src_addr']
txid = "<%s-airdrop-txid>" % src_addr
priv_key = "%s-private-key" % n['src_addr']
dst_addr = "bitcore-destination-address"
txindex = "<%s-airdrop-txindex>" % src_addr
satoshis = "<%s-airdrop-satoshis>" % src_addr
force = "--force " if n.bfc_force else ""
return ("python2.7 bitcoin_fork_claimer/claimer.py BTX %s %s %s %s"
" %s--txindex %s --satoshis %s" %
(txid, priv_key, src_addr, dst_addr, force, txindex, satoshis))
BITCORE_INSTRUCTIONS = """
BitCore has a separate blockchain that aidropped value on BTC addresses as new
transactions. To use the bitcoin_fork_claimer tool privately, the details of
the transactions must be manually found and provided here.
One must use a BitCore node or block explorer to find:
1) The transaction hash (a.k.a transaction ID) which credits the address
2) The transaction index of the specific output
3) The amount of BitCore satoshis credited
This has been automated to access the BitCore block explorer via the
direct-query-claim-prep.py script included in forkdrop_suite. This will gather
the balances and provide a more specific report tailored to claiming Bitcoin
Private.
WARNING: These quereis are less private than blockchain.info queries and may be
less reliable.
"""
UNSPENT_URL = "https://chainz.cryptoid.info/btx/api.dws?q=unspent&active=%s&key=<KEY>"
class BitcoreQuery(object):
def __init__(self, vdb, settings):
self.vdb = vdb
self.coin = self.vdb.get_coin_info('bitcore')
self.addrs = [{'addr': a,
'p2sh_p2wpkh': a[:1] == "3",
'bech32': a[:3] == "bc1"}
for a in settings.addresses]
self.tails = not settings.not_tails
self.cache = settings.cache_requests
self.wd = WebData(tails=self.tails, cache=self.cache)
self._add_nuggets()
def _add_nuggets(self):
for a in self.addrs:
addr = a['addr']
url = UNSPENT_URL % addr
unspent_info = self.wd.fetch_web_url_json_info(url)
for u in unspent_info['unspent_outputs']:
self.vdb['nuggets'].append_direct_query(a, self.coin,
u['tx_hash'],
u['tx_ouput_n'],
int(u['value']))
BITCORE = {
'id': 'bitcore',
'instructions': BITCORE_INSTRUCTIONS,
'claimer_line': bitcore_claimer_line,
'direct_query': BitcoreQuery,
}
| StarcoderdataPython |
3246210 | #!/usr/bin/env python
"""
Finds and prints the contents of chests (including minecart chests)
"""
import locale, os, sys
# local module
try:
import nbt
except ImportError:
# nbt not in search path. Let's see if it can be found in the parent folder
extrasearchpath = os.path.realpath(os.path.join(__file__,os.pardir,os.pardir))
if not os.path.exists(os.path.join(extrasearchpath,'nbt')):
raise
sys.path.append(extrasearchpath)
from nbt.world import WorldFolder
class Position(object):
def __init__(self, x,y,z):
self.x = x
self.y = y
self.z = z
class Chest(object):
def __init__(self, type, pos, items):
self.type = type
self.pos = Position(*pos)
self.items = items
def items_from_nbt(nbtlist):
items = {} # block_id -> count
for item in nbtlist:
id = item['id'].value
count = item['Count'].value
if id not in items:
items[id] = 0
items[id] += count
return items
def chests_per_chunk(chunk):
"""Given a chunk, increment the block types with the number of blocks found"""
# if (len(chunk['Entities']) > 0) or (len(chunk['TileEntities']) > 0):
# print("Chunk %d,%d" % (chunk["xPos"],chunk["zPos"]))
entities = []
for entity in chunk['Entities']:
if entity["id"].value == "Minecart" and entity["type"].value == 1:
x,y,z = entity["Pos"]
x,y,z = x.value,y,value,z.value
items = items_from_nbt(entity["Items"])
entities.append(Chest("Minecart with chest",(x,y,z),items))
for entity in chunk['TileEntities']:
if entity["id"].value == "Chest":
x,y,z = entity["x"].value,entity["y"].value,entity["z"].value
items = items_from_nbt(entity["Items"])
entities.append(Chest("Chest",(x,y,z),items))
return entities
def print_results(chests):
locale.setlocale(locale.LC_ALL, '')
for chest in chests:
itemcount = sum(chest.items.values())
print("%s at %s,%s,%s with %d items:" % \
(chest.type,\
locale.format("%0.1f",chest.pos.x,grouping=True),\
locale.format("%0.1f",chest.pos.y,grouping=True),\
locale.format("%0.1f",chest.pos.z,grouping=True),\
itemcount))
for blockid,count in chest.items.items():
print(" %3dx Item %d" % (count, blockid))
def main(world_folder):
world = WorldFolder(world_folder)
try:
for chunk in world.iter_nbt():
print_results(chests_per_chunk(chunk["Level"]))
except KeyboardInterrupt:
return 75 # EX_TEMPFAIL
return 0 # NOERR
if __name__ == '__main__':
if (len(sys.argv) == 1):
print("No world folder specified!")
sys.exit(64) # EX_USAGE
world_folder = sys.argv[1]
# clean path name, eliminate trailing slashes:
world_folder = os.path.normpath(world_folder)
if (not os.path.exists(world_folder)):
print("No such folder as "+world_folder)
sys.exit(72) # EX_IOERR
sys.exit(main(world_folder))
| StarcoderdataPython |
124409 | import random
from drf_yasg.utils import swagger_auto_schema
from rest_framework import status
from rest_framework import viewsets
from rest_framework import decorators as drf_decorators
from rest_framework.request import Request
from rest_framework.response import Response
from extension import defines as extension_defines
from extension import exceptions as extension_exceptions
from extension import serializers as extension_serializers
class ExtensionView(viewsets.ViewSet):
@drf_decorators.action(
detail=False,
methods=["GET"],
)
@swagger_auto_schema(
operation_summary="Get Collectable Fields",
operation_description="Retrieve strings the extensions will use to capture data.",
responses={
status.HTTP_200_OK: extension_serializers.GetExtensionFieldsSerializer,
},
)
def get_collectable_fields(self, request: Request, *args, **kwargs) -> Response:
extension_version = request.headers.get(extension_defines.EXTENSION_VERSION_HEADER_NAME)
if extension_version is None:
raise extension_exceptions.NoExtensionVersionError
if extension_version not in extension_defines.SUPPORTED_EXTENSION_VERSIONS:
raise extension_exceptions.ExtensionDeprecatedError
vctb = ["סמס", "קורס", "שם הקורס", "אופן", "קובע", "שעות", "משקל", "הערות"]
random.shuffle(vctb)
response_serializer = extension_serializers.GetExtensionFieldsSerializer(
instance={
"vctb": vctb[:3],
"vctl": "קורס",
"vctln": "שם",
},
)
return Response(response_serializer.data, status=status.HTTP_200_OK)
| StarcoderdataPython |
1779521 | """
executable.py - base classes for all executable code
NOTE: this script is not to be used standalone, execept for testing
purposes!
"""
# HISTORY ####################################################################
#
# 0.1.0 MR Mar11 Initial version (moderately tested)
##############################################################################
__description__ = "an Executable class implementation"
__version__ = "0.1.0"
__author__ = "<NAME>."
import os
from subprocess import Popen, PIPE, STDOUT
class Executable(object):
"""
Executable - abstract class implementing the executable script
"""
def __init__(self, command):
assert command is not None
assert command != ""
self.__cmd = command # a script to be executed
self._env = os.environ # dict for environment vars
def __str__(self):
return "{}".format(self.command)
@property
def command(self):
"""Returns the script name """
return self.__cmd
def setEnv(self, key, val):
self._env[key] = val
def getEnv(self, key):
return self._env[key]
@property
def environ(self):
return self._env
def _checkScript(self):
"""Checks for existence of the script"""
if not os.path.exists(self.__cmd):
return False
return True
def _ExeError(self, exe, error):
"""Saves an 'executable missing' error message as output text"""
return "Executable '{}' not found on system.\n{}".format(exe, error)
def _ScriptError(self):
"""Saves an 'script missing' error message as output text"""
return "Script '{}' does not exist".format(self.command)
def execute(self, executable, args="", shell=False):
"""Executes the script/executable"""
assert args is not None
assert executable is not None
rc = 1
output = ""
# check if executable even exists; if so, execute it; otherwise fail
if self._checkScript():
cmdlist = []
# we need to create a proper list of arguments for Popen to run
if isinstance(executable, list):
cmdlist = executable[:] # copy list
cmdlist.append(self.command)
else:
cmdlist = [executable, self.command]
# extend the list, if command-line arguments are present
if args != "":
cmdlist.extend(args.split())
# now execute it and get RC and output text
try:
proc = Popen(cmdlist, env=self.environ, shell=shell,
stdin=PIPE, stdout=PIPE, stderr=STDOUT)
(output, errors) = proc.communicate()
rc = proc.returncode
except OSError as oserror:
output = self._ExeError(executable, oserror)
else:
output = self._ScriptError()
return (rc, output)
###################################################################
if __name__ == '__main__':
print(__doc__)
| StarcoderdataPython |
133180 | from __future__ import division
from zibalzeep.xsd.const import xsd_ns
from zibalzeep.xsd.elements.base import Base
class Schema(Base):
name = "schema"
attr_name = "schema"
qname = xsd_ns("schema")
def clone(self, qname, min_occurs=1, max_occurs=1):
return self.__class__()
def parse_kwargs(self, kwargs, name, available_kwargs):
if name in available_kwargs:
value = kwargs[name]
available_kwargs.remove(name)
return {name: value}
return {}
def parse(self, xmlelement, schema, context=None):
from zibalzeep.xsd.schema import Schema as _Schema
schema = _Schema(xmlelement, schema._transport)
context.schemas.append(schema)
return schema
def parse_xmlelements(self, xmlelements, schema, name=None, context=None):
if xmlelements[0].tag == self.qname:
xmlelement = xmlelements.popleft()
result = self.parse(xmlelement, schema, context=context)
return result
def resolve(self):
return self
_elements = [Schema]
| StarcoderdataPython |
4814201 |
class BotHelper:
def ConnectToBot(Message):
print("Message")
pass | StarcoderdataPython |
1653114 | from discord import Embed, FFmpegPCMAudio
from discord.ext import commands
from discord.utils import get
from youtube_dl import YoutubeDL
from asyncio import run_coroutine_threadsafe
import re
import requests
from bs4 import BeautifulSoup
class Music(commands.Cog, name='Music'):
YDL_OPTIONS = {'format': 'bestaudio', 'noplaylist':'True'}
FFMPEG_OPTIONS = {'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5', 'options': '-vn'}
def __init__(self, bot):
self.bot = bot
self.song_queue = {}
self.message = {}
@staticmethod
def parse_duration(duration):
result = []
m, s = divmod(duration, 60)
h, m = divmod(m, 60)
return f'{h:d}:{m:02d}:{s:02d}'
@staticmethod
def search(author, arg):
with YoutubeDL(Music.YDL_OPTIONS) as ydl:
try: requests.get(arg)
except: info = ydl.extract_info(f"ytsearch:{arg}", download=False)['entries'][0]
else: info = ydl.extract_info(arg, download=False)
embed = (Embed(description=f"[{info['title']}]({info['webpage_url']})", color=0x3de4ba)
.add_field(name='Duration', value=Music.parse_duration(info['duration']))
.add_field(name='Requested by', value=author)
.add_field(name='Uploader', value=f"[{info['uploader']}]({info['channel_url']})")
.set_thumbnail(url=info['thumbnail']))
return {'embed': embed, 'source': info['formats'][0]['url'], 'title': info['title']}
async def edit_message(self, ctx):
embed = self.song_queue[ctx.guild][0]['embed']
content = "\n".join([f"({self.song_queue[ctx.guild].index(i)}) {i['title']}" for i in self.song_queue[ctx.guild][1:]]) if len(self.song_queue[ctx.guild]) > 1 else "No song queued"
embed.set_field_at(index=3, name="File d'attente :", value=content, inline=False)
await self.message[ctx.guild].edit(embed=embed)
def play_next(self, ctx):
voice = get(self.bot.voice_clients, guild=ctx.guild)
if len(self.song_queue[ctx.guild]) > 1:
del self.song_queue[ctx.guild][0]
run_coroutine_threadsafe(self.edit_message(ctx), self.bot.loop)
voice.play(FFmpegPCMAudio(self.song_queue[ctx.guild][0]['source'], **Music.FFMPEG_OPTIONS), after=lambda e: self.play_next(ctx))
voice.is_playing()
else:
run_coroutine_threadsafe(voice.disconnect(), self.bot.loop)
run_coroutine_threadsafe(self.message[ctx.guild].delete(), self.bot.loop)
@commands.command(aliases=['p'], brief='$play [url/words]')
async def play(self, ctx, *, video: str):
channel = ctx.author.voice.channel
voice = get(self.bot.voice_clients, guild=ctx.guild)
song = Music.search(ctx.author.mention, video)
if voice and voice.is_connected():
await voice.move_to(channel)
else:
voice = await channel.connect()
if not voice.is_playing():
self.song_queue[ctx.guild] = [song]
self.message[ctx.guild] = await ctx.send(embed=song['embed'])
await ctx.message.delete()
voice.play(FFmpegPCMAudio(song['source'], **Music.FFMPEG_OPTIONS), after=lambda e: self.play_next(ctx))
voice.is_playing()
else:
self.song_queue[ctx.guild].append(song)
await self.edit_message(ctx)
@commands.command(brief='$pause')
async def pause(self, ctx):
voice = get(self.bot.voice_clients, guild=ctx.guild)
if voice.is_connected():
await ctx.message.delete()
if voice.is_playing():
await ctx.send(embed= Embed(description= 'Music paused!', color= 0x3de4ba, delete_after=10.0))
voice.pause()
else:
await ctx.send(embed= Embed(description= 'Music resumed!', color= 0x3de4ba, delete_after=10.0))
voice.resume()
@commands.command(aliases=['pass'], brief='$skip')
async def skip(self, ctx):
voice = get(self.bot.voice_clients, guild=ctx.guild)
if voice.is_playing():
await ctx.message.delete()
await ctx.send(embed= Embed(description= 'Music skipped!', color= 0x3de4ba, delete_after=10.0))
voice.stop()
@commands.command(brief='$remove [x]')
async def remove(self, ctx, *, num: int):
voice = get(self.bot.voice_clients, guild=ctx.guild)
if voice.is_playing():
del self.song_queue[ctx.guild][num]
await ctx.message.delete()
await self.edit_message(ctx)
@commands.command(brief='$lyrics [artist] [song]')
async def lyrics(self, ctx, song: str):
message = ctx.message.content
song = message.replace("$lyrics", "")
url = self.generateUrl(song)
html = self.getHtml(url)
lyrics = self.extractLyrics(html)
for msg in self.extractLyrics(html):
await ctx.send(embed= Embed(description= msg, color= 0x3de4ba))
def generateUrl(self,artist):
host = "https://genius.com/"
urlWords = [w.lower() for w in artist.split()] + ["lyrics"]
urlWords[0] = urlWords[0].title()
return host+"-".join(urlWords)
def getHtml(self,url):
req = requests.get(url=url)
return BeautifulSoup(req.content, 'html.parser')
def extractLyrics(self,html):
htmlString = str(html.find_all("div", "lyrics")[0])
htmlString, _ = re.subn("</a>","", htmlString)
htmlString, _= re.subn("<a[^>]*>","", htmlString)
htmlString, _= re.subn("<p>","", htmlString)
htmlString, _= re.subn("<!--sse-->","", htmlString)
htmlString, _= re.subn("<br/>","", htmlString)
htmlString, _= re.subn("<div class=\"lyrics\">","", htmlString)
htmlString, _= re.subn("</p>","", htmlString)
htmlString, _= re.subn("<!--/sse-->","", htmlString)
htmlString, _= re.subn("</div>","", htmlString)
htmlString, _= re.subn("<i>","*", htmlString)
htmlString, _= re.subn("</i>","*", htmlString)
htmlString, _= re.subn("<b>","**", htmlString)
htmlString, _= re.subn("</b>","**", htmlString)
htmlString, _= re.subn("&","&", htmlString)
htmlString, _= re.subn("<","<", htmlString)
htmlString, _= re.subn(">",">", htmlString)
n = 2000
return [htmlString[i:i+n] for i in range(0, len(htmlString), n)]
def setup(bot):
bot.add_cog(Music(bot))
| StarcoderdataPython |
3209588 | #!/usr/bin/env python3
import argparse
import json
import sys
from subprocess import Popen, PIPE
from odf.draw import Image, Frame
from odf.opendocument import OpenDocumentSpreadsheet
from odf.style import Style, TableColumnProperties, TableRowProperties, TextProperties
from odf.table import Table, TableRow, TableCell, TableColumn
from odf.text import P, A
from pandocodswriter.limages import load_images
from pandocodswriter.lstyle import load_style, add_fmt, st_dict
# usage - python odswriter.py yourInputFile.yourExetention yourOutputFile.ods -s *YOUR POSITIVE NUMBER*
# check README.md for more information.
# DO NOT mix up places of intput and output.
# Style names.
# header0 - just for correct index.
# If in input file more, than two levels of headers, next level header will generate automatically
# with name = "header" + str(level).
header = ['header0', 'header1', 'header2']
table_header = 'tablehead'
table_content = 'tablebody'
simple_text = 'text'
# Read the command-line arguments.
parser = argparse.ArgumentParser(description='Pandoc ODS writer. This is Pandoc filter, but there is no opportunity '
'write .ods files easier way. So, use "out.ods" '
'option to write .ods files with this filter')
parser.add_argument('input', help='Input file. Use Pandoc`s input formats.', action='store')
parser.add_argument('output', help='Output file. Use .ods filename extension.', action='store')
parser.add_argument('-s', '--separator', nargs=1, help='Header level to separate sheets, 0 by default(no separation).',
action='store')
parser.add_argument('-r', '--reference', nargs=1, help='Reference to file with styles', action='store')
args = parser.parse_args()
# It is important for auto-height in text-rows:
# if you want to change width by default (10 cm), change it in 'write_sheet()',
# count how much PT in your length (in CM) and change this constant:
PTINTENCM = 284
# count how much IN in your length (in CM) and change this constant:
ININTENCM = 3.9
# I need this global variables, because there are two recursive functions call each other, so it would be very hard work
# without global "string_to_write". Other ones are just make those functions much more easy to read.
ods = OpenDocumentSpreadsheet()
table = Table() # creating the first sheet
content = P()
string_to_write = ''
header_level = 0
bullet = 0 # indicating bullet lists
ordered = 0 # indicating bullet list and used as order at item lines
image_counter = 0
saved_hr = None # list of hardreferences to loaded images
saved_styles = {} # We will save styles in order to not downloading it again each time we use it.
separator = 0 # level of separating header
# Dictionary of formatting indicators.
fmt = {'Emph': 0,
'Strong': 0,
'Strikeout': 0}
def write_sheet():
wide = Style(name="Wide", family="table-column")
wide.addElement(TableColumnProperties(columnwidth="10cm"))
ods.automaticstyles.addElement(wide)
table.addElement(TableColumn(stylename='Wide'))
ods.spreadsheet.addElement(table)
def count_height(row, cell):
"""Counting height that shows all text in cell.
This functions uses width of text-column and font size.
Args:
row - current row.
cell - current cell.
"""
style_name = cell.getAttribute('stylename')
try:
style = saved_styles[style_name]
text_prop = style.getElementsByType(TextProperties)
try:
text_prop = text_prop[0]
font_size = str(text_prop.getAttribute('fontsize'))
font_size = font_size.replace('pt', '')
font_size = int(font_size)
except IndexError:
font_size = 10
except KeyError:
font_size = 10
symbols_in_string = PTINTENCM // font_size + 1
length = 0
for p in cell.getElementsByType(P):
length += len(p.__str__())
height = font_size*(length // symbols_in_string + 1) + 4
height = str(height) + 'pt'
new_name = 'heightsuit' + height
height_suit = Style(name=new_name, family='table-row')
height_suit.addElement(TableRowProperties(rowheight=height))
ods.automaticstyles.addElement(height_suit)
row.setAttribute(attr='stylename', value=new_name)
def count_size(wh_list, row):
"""Count height of image row.
Args:
wh_list - list with attributes, contains width and height:
row - image row.
"""
height, width = -1, -1
for l in wh_list:
if l[0] == 'width':
width = float(l[1].replace('in', ''))
if l[0] == 'height':
height = float(l[1].replace('in', ''))
if height == -1 or width == -1:
width = ININTENCM
height = ININTENCM
if width > ININTENCM:
new_width = ININTENCM
new_height = height * new_width / width
else:
new_width = width
new_height = height
height_set = str(new_height)+'in'
new_name = 'image' + str(image_counter)
height_suit = Style(name=new_name, family='table-row')
height_suit.addElement(TableRowProperties(rowheight=height_set))
ods.automaticstyles.addElement(height_suit)
row.setAttribute(attr='stylename', value=new_name)
new_width = str(new_width) + 'in'
new_height = height_set
return new_width, new_height
def add_style(cell, name):
"""Add style to cell element.
This function calls style loading from 'lstyle' and saves it, in order to use it again quickly.
Args:
cell - cell that needs to be styled.
name - style name that will be set.
"""
if args.reference:
styles_source = args.reference[0]
else:
styles_source = str(sys.argv[0])
styles_source = styles_source.replace('odswriter.py', '')
styles_source = styles_source + 'styles.ods'
global saved_styles
global ods
try:
saved_styles[name]
except KeyError:
style = load_style(name, styles_source)
if style is not None:
saved_styles[name] = style
ods.styles.addElement(style)
cell.setAttribute(attr='stylename', value=name)
def write_text():
"""Write to output file ordinary elements.
This function is called every tame, we collect whole paragraph or block of elements in 'string_to_write'
We write every block or paragraph in it's own cell in the first column of output file.
After writing we shift down current row and clean 'string_to_write' in order to collect next elements.
"""
global string_to_write
global header_level
global ordered
global bullet
global table
global separator
global content
row = TableRow()
cell = TableCell()
if header_level != 0 and header_level > 0:
if header_level > (len(header) - 1): # if there are headers with lvl bigger than 2
for i in range(len(header), header_level+1): # creating names for headers with lvl bigger than 2
header.append('header' + str(i))
add_style(cell, header[header_level])
if header_level == separator: # if separator was set, we will create new sheet in document
if table.hasChildNodes():
write_sheet()
table = Table(name=string_to_write) # creating new sheet with separating header as name
else:
add_style(cell, simple_text)
if bullet:
string_to_write = '- ' + string_to_write
if ordered > 0:
string_to_write = str(ordered) + ') ' + string_to_write
ordered = ordered + 1
content.addText(string_to_write)
cell.addElement(content)
content = P()
count_height(row, cell)
row.addElement(cell)
table.addElement(row)
string_to_write = ''
def write_image(image):
"""Write to output file image elements.
Since, element with title 'Image' has special structure of 'c'(Content) field, that looks like:
[[0], [1], [2]]
where:
[0] - list of attributes: identifier, classes, key-value pairs:
['id', [], [ ... , ['weight', '...in'], ['height', '...in'], ... ] - we get sizes there.
[1] - caption.
[2] - ['src', 'title'] - source and title of image.
we should parse it especially.
Args:
image - element with title 'Image'.
"""
global image_counter
global saved_hr
if image_counter == -1:
return
if image_counter == 0:
saved_hr = load_images(args.input, ods)
if len(saved_hr) == 0:
image_counter = -1
return
if string_to_write:
write_text()
row = TableRow()
cell = TableCell()
w, h = count_size(image['c'][0][2], row)
frame = Frame(width=w, height=h)
img = Image(href=saved_hr[image_counter])
table.addElement(row)
row.addElement(cell)
cell.addElement(frame)
frame.addElement(img)
image_counter = image_counter + 1
def write_bullet(bull_list, without_write):
global bullet
bullet = 1
list_parse(bull_list['c'], without_write)
bullet = 0
def write_ord(ord_list, without_write):
global ordered
ordered = 1
list_parse(ord_list['c'], without_write)
ordered = 0
def write_code(code):
"""Write to output file code elements.
Since, element with title 'Code' or 'CodeBlock' has special structure of 'c'(Content) field, that looks like:
[[0], 'code']
where:
[0] - list of attributes: identifier, classes, key-value pairs.
'code' - string with code.
we should parse it especially.
Args:
code - element with title 'Code' or 'CodeBlock'.
"""
global string_to_write
string_to_write = string_to_write + code['c'][1]
def write_link(link):
"""Write special blocks with attributes.
Since, element with title 'Link' has special structure of 'c'(Content) field, that looks like:
[[atr], [1},['target', 'title']]
where:
[atr] - list of attributes: identifier, classes, key-value pairs.
[1] - list with objects (list of dictionaries) - visible text of hyperlink.
['target', 'title'] - list with two strings, 'target' - URL, 'title' - title.
we should parse it especially.
Args:
link - element with title 'Link'.
"""
global string_to_write
global content
content.addText(string_to_write)
string_to_write = ''
list_parse(link['c'][1], without_write=True)
a = A(href=link['c'][2][0], text=string_to_write)
string_to_write = ''
content.addElement(a)
def write_math(math):
"""Write to output file code elements
Since, element with title 'Math' has special structure of 'c'(Content) field, that looks like:
[{0}, 'math'].
where:
{0} - dictionary contains type of math.
'math' - string with math.
we should parse it especially.
TeX Math format.
Args:
raw - element with title 'Math'.
"""
# TODO: write it
global string_to_write
string_to_write = string_to_write + math['c'][1]
def write_raw(raw):
"""Write to output file raw elements.
Since, element with title 'RawBlock' or 'RawInline' has special structure of 'c'(Content) field, that looks like:
[format, 'raw text']
where:
format - format of raw text.
'raw text' - string with raw text.
we should parse it especially.
Args:
raw - element with title 'RawBlock' or 'RawInline'.
"""
global string_to_write
string_to_write = string_to_write + raw['c'][1]
def write_special_block(block, without_write):
"""Write special blocks with attributes.
Since, element with title 'Div' or 'Span' or 'Header' has special structure of 'c'(Content) field, that looks like:
[[0], [1]]*
where:
[0] - list of attributes: identifier, classes, key-value pairs.
[1] - list with objects (list of dictionaries) - content.
* with 'Header' title - [level, [0], [1]] - level - int, [0], [1] - the same as above.
we should parse it especially.
This function writes block itself.
Args:
block - element with title 'Div' or 'Span' or 'Header'.
without_write - indicate calling write_text() functions. By default calls it.
"""
global string_to_write
global header_level
con = 1
if block['t'] == 'Header':
header_level = block['c'][0]
con = 2
if (not without_write) and string_to_write:
write_text()
list_parse(block['c'][con], without_write=True)
if not without_write:
write_text()
header_level = 0
def write_table(tab):
"""Write to output file table elements.
This function is called every time, we meet 'Table' dictionary's title.
Firstly, if we have some information in 'string_to_write' we record it, because we'll use this
variable to collect information from table's cells.
Table in pandoc's json has following structure:
dict: { 't': 'Table'
'c': [ [0] [1] [2] [3] [4] ]
}
Where:
[0] - caption.
[1] - is list of aligns by columns, looks like: [ { t: 'AlignDefault' }, ... ].
[2] - widths of columns.
[3] - is list of table's headers (top cell of every column), can be empty.
[4] - list of rows, and row is list of cells.
Since every cell's structure is the same as text's one, we just parse them as list and write one by one.
Args:
tab - dictionary with 't': 'Table".
"""
global table
global string_to_write
global fmt
for k in fmt.keys(): # setting to zero all outside-table formatting, we use formatting ONLY inside table-cell
fmt[k] = 0
if string_to_write: # writing all text from buffer, table has it's own rules for rows, cols and their shift-rules
write_text()
row = TableRow() # adding empty line before table
table.addElement(row)
row = TableRow()
headers = tab['c'][3]
if headers:
cell = TableCell() # adding empty first cell in row (first column in document - text column).
row.addElement(cell)
for col in headers:
cell = TableCell()
list_parse(col, without_write=True)
add_style(cell, table_header)
cell_content = P(text=string_to_write)
for key in fmt.keys():
if fmt[key] == 1:
new_style = add_fmt(style=st_dict[cell.getAttribute(attr='stylename')], key=key)
ods.styles.addElement(new_style)
fmt[key] = 0
cell = TableCell(stylename=new_style.getAttribute(attr='name'))
cell.addElement(cell_content)
string_to_write = ''
row.addElement(cell)
table.addElement(row)
t_content = tab['c'][4]
for line in t_content:
row = TableRow()
cell = TableCell()
row.addElement(cell)
for col in line:
cell = TableCell()
list_parse(col, without_write=True)
add_style(cell, table_content)
cell_content = P(text=string_to_write)
for key in fmt.keys():
if fmt[key] == 1:
new_style = add_fmt(style=st_dict[cell.getAttribute(attr='stylename')], key=key)
ods.styles.addElement(new_style)
fmt[key] = 0
cell = TableCell(stylename=new_style.getAttribute(attr='name'))
cell.addElement(cell_content)
string_to_write = ''
row.addElement(cell)
table.addElement(row)
row = TableRow() # adding empty line after table
table.addElement(row)
# This two functions - 'dict_parse()' and 'list_parse()', has purpose of extract readable information
# from json object.
# Pandoc's json object in field 'blocks' has list of dictionaries or lists,
# which represent another objects and dictionaries has the following structure:
# { 't': '*Name of objects type*',
# 'c': '*Content of object*' }
# e.g.:
# { 't': 'Str',
# 'c': 'Hello' }
# (sometimes there can be no 'c'-field (e.g. 'Space' - object)
# So, 'c'-field - content, can be a list of dictionaries, or a string, or a list of lists,
# and we should parse list again and etc.
# That's why we have there two functions - for parsing lists and for parsing dictionaries - that should call each other
def dict_parse(dictionary, without_write=False):
"""Parse dictionaries.
Dictionary represent some json-object. Kind of json object depends on 't' (title) field of it.
We will parse it differently depending on different titles. Sometimes this function write block,
sometimes it leaves writing special functions.
Args:
dictionary - object with 't' and sometimes 'c' fields.
without_write - indicate calling write_text() functions. By default calls it.
"""
global string_to_write
global fmt
try:
if dictionary['t'] in fmt.keys():
fmt[dictionary['t']] = 1
if dictionary['t'] == 'Table':
write_table(dictionary)
elif dictionary['t'] == 'CodeBlock' or dictionary['t'] == 'Code':
write_code(dictionary)
elif dictionary['t'] == 'Div' or dictionary['t'] == 'Span' or dictionary['t'] == 'Header':
write_special_block(dictionary, without_write)
elif dictionary['t'] == 'Math':
write_math(dictionary)
elif dictionary['t'] == 'Link':
write_link(dictionary)
elif dictionary['t'] == 'BulletList':
write_bullet(dictionary, without_write)
elif dictionary['t'] == 'OrderedList':
write_ord(dictionary, without_write)
elif dictionary['t'] == 'Image':
write_image(dictionary)
elif 'c' in dictionary:
if type(dictionary['c']) == str:
string_to_write = string_to_write + dictionary['c']
if type(dictionary['c']) == list:
list_parse(dictionary['c'], without_write)
else:
if dictionary['t'] == 'Space':
string_to_write = string_to_write + ' '
elif dictionary['t'] == 'SoftBreak':
string_to_write = string_to_write + '\n'
elif dictionary['t'] == 'LineBreak':
string_to_write = string_to_write + '\n\n'
if not without_write:
write_text()
else:
string_to_write = string_to_write
if dictionary['t'] == 'Para':
string_to_write = string_to_write + '\n'
if not without_write:
write_text()
except KeyError:
print('Incompatible Pandoc version')
def list_parse(content_list, without_write=False):
"""Parse list.
Args:
content_list - list with different parts of content from input-document.
without_write - indicate calling write_text() functions. By default calls it.
"""
for item in content_list:
if type(item) == dict:
dict_parse(item, without_write)
if type(item) == list:
list_parse(item, without_write)
def main(doc):
"""Main function.
Get JSON object from pandoc, parse it, save result.
Args:
doc - json object as python dictionary or list.
In case of dictionary it has representation like:
{ 'pandoc-version': ...
'meta': ...
'blocks': .......}
in blocks we have all file-content, we will parse doc['blocks'].
In case of list it has representation like:
[[info_list], [content_list]], so we will parse doc[1].
Raises:
PermissionError: when we can't write output file.
"""
global table
output = args.output
if type(doc) == dict:
list_parse(doc['blocks'])
elif type(doc) == list:
list_parse(doc[1])
else:
print('Incompatible Pandoc version')
write_sheet()
try:
ods.save(output)
except PermissionError as err:
print("No access to ", output)
print(err.strerror)
if __name__ == '__main__':
source = args.input
# Use subprocess to call pandoc and convert input file into json-object.
command = 'pandoc ' + source + ' -t json'
proc = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)
res = proc.communicate()
if res[1]:
print(str(res[1])) # sending stderr output to user
else:
if args.separator is None:
d = json.loads(res[0])
main(d)
if args.separator is not None:
try:
s = int(args.separator[0])
separator = s
d = json.loads(res[0])
main(d)
except IndexError:
print('You entered invalid separator')
except ValueError:
print('You entered invalid separator')
| StarcoderdataPython |
3379259 | <reponame>herrywen-nanj/51reboot<filename>lesson01/liushifan/zuoye1.py<gh_stars>0
for i in range(1,10):
for x in range(1,i+1):
print("%d * %d = %2d "%(i, x, i*x),end=' ')
print(' ')
| StarcoderdataPython |
3245024 | <filename>environment.py
import argparse
import gym
from gym import spaces
import numpy as np
import random
from collections import deque
import os
import ray
from ray import tune
from ray.tune import grid_search
from ray.rllib.env import EnvContext
from ray.rllib.policy import Policy
from ray.rllib.models import ModelCatalog
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.models.torch.fcnet import FullyConnectedNetwork as TorchFC
from ray.rllib.utils.test_utils import check_learning_achieved
from ray.rllib.utils.framework import try_import_torch
torch, nn = try_import_torch()
from typing import Dict, List, Optional
from ray.rllib.utils.typing import TensorType, Tuple, Union
parser = argparse.ArgumentParser()
parser.add_argument("--run", type=str, default="PPO")
parser.add_argument("--as-test", action="store_true")
parser.add_argument("--stop-iters", type=int, default=50)
parser.add_argument("--stop-timesteps", type=int, default=100000)
parser.add_argument("--stop-reward", type=float, default=0.1)
class ConstraintSudoku(gym.Env):
# Adapted from http://norvig.com/sudoku.html
def __init__(self, config: EnvContext):
d = config.get('d', 3)
assert d % 2 == 1, "d must be odd"
self.d = d
self.D = d**2
self.N = self.D * 2 - 1 # Number of solved cells at the start of a puzzle
# A sudoku game can be represented as a DxD grid (D is typically 9)
# and each cell in the grid can take one of D possible values.
# At any given point, a cell may have one possible value (if its solved)
# up to D possible values (if there are no constraints on the cell).
# We express this as a binary DxDxD array where arr[i][j] is a
# D-dimensional binary vector representing remaining possible values
# for the cell in the i'th row and the j'th column
self.observation_space = spaces.MultiBinary([self.D]*3)
self.action_space = spaces.Dict({
'row': spaces.Discrete(self.D),
'col': spaces.Discrete(self.D),
'val': spaces.Discrete(self.D),
})
self.max_num_steps = config.get('max_num_steps', 1e6)
self.rewards = {
'impossible_assign': -1,
'solved_assign': -1,
'invalid_assign': -1,
'backtrack': -1,
'valid_assign': 0,
'win': 0,
'lose': 0,
}
def reset(self):
self.grid_stack = deque()
self.assign_stack = deque()
self.num_steps = 0
self.grid = self._random_puzzle()
return self.grid
def step(self, action: Dict):
"""Run one timestep of the environment's dynamics. When end of
episode is reached, you are responsible for calling `reset()`
to reset this environment's state.
Accepts an action and returns a tuple (observation, reward, done, info).
Args:
action (object): an action provided by the agent
Returns:
observation (object): agent's observation of the current environment
reward (float) : amount of reward returned after previous action
done (bool): whether the episode has ended, in which case further step() calls will return undefined results
info (dict): contains auxiliary diagnostic information (helpful for debugging, and sometimes learning)
"""
row = action['row']
col = action['col']
val = action['val']
self._check_bounds(row, col, val)
self.num_steps += 1
if self.num_steps == self.max_num_steps:
meta = {'reward': 'lose', 'stack_len': len(self.grid_stack)}
return self.grid, self.rewards['lose'], True, meta
# handle invalid actions
valid_actions = self.grid[row][col].nonzero()[0]
if len(valid_actions) == 0:
self._backtrack()
meta = {'reward': 'backtrack', 'stack_len': len(self.grid_stack)}
return self.grid, self.rewards['backtrack'], False, meta
if val not in valid_actions:
meta = {'reward': 'impossible_assign', 'stack_len': len(self.grid_stack)}
return self.grid, self.rewards['impossible_assign'], False, meta
if len(valid_actions) == 1:
meta = {'reward': 'solved_assign', 'stack_len': len(self.grid_stack)}
return self.grid, self.rewards['solved_assign'], False, meta
grid = self._assign(self.grid.copy(), row, col, val)
# handle invalid assign
if grid is False:
self.grid[row][col][val] = 0
meta = {'reward': 'invalid_assign', 'stack_len': len(self.grid_stack)}
return self.grid, self.rewards['invalid_assign'], False, meta
self.grid = grid
# handle solved
if (self.grid.sum(axis=-1) == 1).all():
meta = {'reward': 'win', 'stack_len': len(self.grid_stack)}
return self.grid, self.rewards['win'], True, meta
# handle valid but not solved
else:
self.grid_stack.append(grid.copy()) # save grid state for backtracking (assign may turn out to be invalid)
self.assign_stack.append((row, col, val)) # for setting assign to invalid after backtracking
meta = {'reward': 'valid_assign', 'stack_len': len(self.grid_stack)}
return self.grid, self.rewards['valid_assign'], False, meta
def render(self, mode='ansi'):
# TODO: clean up
s = ""
width = 1 + max(len(str(v)) for v in range(self.D))
width = max(width,3)
line = '+' + '+'.join(['-' * (width * self.d)] * self.d) + '+'
s += line + '\n'
for i in range(self.D):
for j in range(self.D):
if j == 0:
s += '|'
vals = self.grid[i][j].nonzero()[0]
if len(vals) == 1:
s += str(vals[0]).center(width)
else:
s += '.'.center(width)
if (j + 1) % self.d == 0:
s += '|'
s += '\n'
if (i + 1) % self.d == 0:
s += line + '\n'
return s
def seed(self, seed=1337):
random.seed(seed)
return [seed]
def _backtrack(self):
"""No valid assigns remain, therefore the previous assign was invalid.
Because the previous assign may have rendered other cell/values invalid,
we pop and revert to the previous grid state to make these cell/values
valid again. The previous assign is then set to invalid.
"""
self.grid = self.grid_stack.pop()
i, j, k = self.assign_stack.pop()
self.grid[i][j][k] = 0
def _random_puzzle(self):
grid = np.ones((self.D, self.D, self.D), dtype=int)
coords = [(i,j) for i in range(self.D) for j in range(self.D)]
random.shuffle(coords)
assigned_cells = 0
assigned_vals = set()
for i,j in coords:
val = random.choice(grid[i][j].nonzero()[0])
if self._assign(grid, i, j, val) is False:
return self._random_puzzle()
assigned_cells += 1
assigned_vals.add(val)
if assigned_cells >= self.N and len(assigned_vals) >= self.D - 1:
return grid
return self._random_puzzle()
def _assign(self, grid: np.ndarray, row: int, col: int, val: int):
other_vals = grid[row][col].nonzero()[0]
other_vals = other_vals[other_vals != val]
for ov in other_vals:
if self._eliminate(grid, row, col, ov) is False:
return False
return grid
def _eliminate(self, grid: np.ndarray, row: int, col: int, val: int):
vals_binary = grid[row][col] # binary encoding of possible vals
# check if val already eliminated during constraint propagation
if vals_binary[val] == 0:
return grid
# eliminate val and get remaining possible vals
vals_binary[val] = 0
remaining_vals = vals_binary.nonzero()[0]
# if eliminate last val, contradiction during constraint propagation
if len(remaining_vals) == 0:
return False
# get peers and remove current cell coordinates
peers = self._get_peers(row, col)
for peer_coords in peers:
peer_coords.remove((row, col))
# strategy 1: reduction to one possible value for a cell
if len(remaining_vals) == 1:
for peer_coords in peers:
for i, j in peer_coords:
if self._eliminate(grid, i, j, remaining_vals[0]) is False:
return False
# strategy 2: reduction to one possible cell for a value
for peer_coords in peers:
peer_cells = [idx for idx, (i, j) in enumerate(peer_coords) if grid[i][j][val] == 1]
if len(peer_cells) == 0:
return False
if len(peer_cells) == 1:
i,j = peer_coords[peer_cells[0]]
if self._assign(grid, i, j, val) is False:
return False
return grid
def _get_peers(self, row: int, col: int):
col_unit = [(i, col) for i in range(self.D)]
row_unit = [(row, j) for j in range(self.D)]
sq_row = row - row % self.d
sq_col = col - col % self.d
sq_unit = [(sq_row + i, sq_col + j) for i in range(self.d) for j in range(self.d)]
return [col_unit, row_unit, sq_unit]
def _check_bounds(self, row: int, col: int, val: int):
assert 0 <= row <= self.D-1, f"row must be in range [0,{self.D-1}]"
assert 0 <= col <= self.D-1, f"col must be in range [0,{self.D-1}]"
assert 0 <= val <= self.D-1, f"cell value must be in range [0,{self.D-1}]"
class HeuristicPolicy(Policy):
def __init__(self, observation_space, action_space, config):
Policy.__init__(self, observation_space, action_space, config)
# example parameter
self.w = 1.0
def compute_actions(
self,
obs_batch: Union[List[TensorType], TensorType],
state_batches: Optional[List[TensorType]] = None,
prev_action_batch: Union[List[TensorType], TensorType] = None,
prev_reward_batch: Union[List[TensorType], TensorType] = None,
info_batch: Optional[Dict[str, list]] = None,
episodes: Optional[List["MultiAgentEpisode"]] = None,
explore: Optional[bool] = None,
timestep: Optional[int] = None,
**kwargs) -> \
Tuple[TensorType, List[TensorType], Dict[str, TensorType]]:
# return action batch, RNN states, extra values to include in batch
actions = []
for grid in obs_batch:
num_valid = [(i, j, n) for i, r in enumerate(grid.sum(axis=-1))
for j, n in enumerate(r) if n > 1]
row, col, _ = min(num_valid, key=lambda x: x[-1])
val = grid[row][col].nonzero()[0][0]
actions += [{'row': row, 'col': col, 'val': val}]
return actions, [], {}
def learn_on_batch(self, samples):
# implement your learning code here
return {} # return stats
def get_weights(self):
return {"w": self.w}
def set_weights(self, weights):
self.w = weights["w"]
class TorchCustomModel(TorchModelV2, nn.Module):
"""Example of a PyTorch custom model that just delegates to a fc-net."""
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
TorchModelV2.__init__(self, obs_space, action_space, num_outputs,
model_config, name)
nn.Module.__init__(self)
self.torch_sub_model = TorchFC(obs_space, action_space, num_outputs,
model_config, name)
def forward(self, input_dict, state, seq_lens):
input_dict["obs"] = input_dict["obs"].float()
fc_out, _ = self.torch_sub_model(input_dict, state, seq_lens)
return fc_out, []
def value_function(self):
return torch.reshape(self.torch_sub_model.value_function(), [-1])
if __name__ == '__main__':
args = parser.parse_args()
ray.init()
# Can also register the env creator function explicitly with:
# register_env("corridor", lambda config: SimpleCorridor(config))
ModelCatalog.register_custom_model(
"my_model", TorchCustomModel)
config = {
"env": ConstraintSudoku, # or "corridor" if registered above
"env_config": {},
# Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
"num_gpus": int(os.environ.get("RLLIB_NUM_GPUS", "0")),
"model": {
"custom_model": "my_model",
},
"lr": grid_search([1e-2, 1e-4, 1e-6]), # try different lrs
"num_workers": 1, # parallelism
"framework": "torch"
}
stop = {
"training_iteration": args.stop_iters,
"timesteps_total": args.stop_timesteps,
"episode_reward_mean": args.stop_reward,
}
results = tune.run(args.run, config=config, stop=stop)
if args.as_test:
check_learning_achieved(results, args.stop_reward)
ray.shutdown()
| StarcoderdataPython |
1763603 | import sys
import time
import argparse
import logging
import threading
import subprocess
from . import rrlogger, __version__
from .constants import *
from .lib import AttemptResults
def run():
parser = _get_parser()
# ---
# version
f = sys.argv.index('--') if '--' in sys.argv else len(sys.argv)
if '-v' in sys.argv[1:f] or '--version' in sys.argv[1:f]:
print('retry version {v}'.format(v=__version__))
exit(0)
# ---
# parse arguments
parsed = parser.parse_args()
# ---
# configure logger
if parsed.verbose:
rrlogger.setLevel(logging.DEBUG)
# sanitize arguments
parsed.tries = max(1, parsed.tries)
parsed.min = max(MIN_TIMEOUT, parsed.min)
parsed.max = max(MAX_TIMEOUT, parsed.max) if parsed.max is not None else parsed.min * parsed.tries
command = ' '.join(parsed.command) if isinstance(parsed.command, list) else parsed.command
# run command
last_res = 1
res = AttemptResults(parsed.tries)
for attempt in range(parsed.tries):
# compute timeout
timeout = _get_timeout(parsed, attempt)
rrlogger.info('Attempt #{a} [timeout: {t}]'.format(a=attempt, t=timeout))
rrlogger.debug('Running "{c}"...'.format(c=command))
stdin = None if parsed.interactive else subprocess.PIPE
# spin process
process = subprocess.Popen(command, shell=True, stdin=stdin)
# create process monitor
monitor = threading.Thread(name='process_monitor', target=_process_monitor, args=(process, timeout, res, attempt))
monitor.start()
monitor.join()
assert res[attempt] != 'ND'
last_res = res[attempt]
rrlogger.debug('The attempt #{a} finished with code: {c}'.format(a=attempt, c=res[attempt]))
# check what happened
if res[attempt] == 0:
rrlogger.info('Attempt #{a} succeeded!'.format(a=attempt))
# the command succeeded
break
# the command failed or timed-out
if parsed.on_retry:
rrlogger.info('Executing on-retry command')
try:
subprocess.check_call(parsed.on_retry, shell=True)
except:
pass
if res[attempt] is None:
# process timed out, try again (or die)
rrlogger.info('Attempt #{a} timed out!'.format(a=attempt))
else:
# process failed, try again (if requested)
rrlogger.info('Attempt #{a} failed with exit code: {c}'.format(a=attempt, c=res[attempt]))
if parsed.no_retry_on_error:
break
succeeded = len(list(filter(lambda r: r == 0, res))) > 0
if not succeeded:
rrlogger.info('All attempts exhausted, no success reported!')
# run on-failure command (if given)
if parsed.on_fail:
rrlogger.info('Executing on-fail command')
try:
subprocess.check_call(parsed.on_fail, shell=True)
except:
pass
# ---
rrlogger.info('Done!')
exit(last_res if last_res is not None else 1)
def _get_timeout(parsed, attempt):
return min(parsed.min * (attempt + 1), parsed.max)
def _process_monitor(process, timeout, results, attempt):
start = time.time()
while (process.poll() is None) and ((time.time() - start) < timeout):
time.sleep(1.0 / PROCESS_MONITOR_HEARTBEAT_HZ)
# process ended, return its exit code
if process.poll() is not None:
results[attempt] = process.returncode
return
# process timed out
rrlogger.debug('The process is taking too long to finish. It will be terminated.')
start = time.time()
escalated_to_sigkill = False
while process.poll() is None:
if (time.time() - start > 10):
if not escalated_to_sigkill:
escalated_to_sigkill = True
rrlogger.info('Escalating to SIGKILL')
process.kill()
else:
process.terminate()
# sleep
time.sleep(1)
# indicate that the process was killed
results[attempt] = None
def _get_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
'-m', '--min',
type=int,
required=True,
help="Minimum timeout (in seconds)"
)
parser.add_argument(
'-M', '--max',
type=int,
default=None,
help="Maximum timeout (in seconds)"
)
parser.add_argument(
'-n', '--tries',
type=int,
default=3,
help="Maximum number of retries"
)
parser.add_argument(
'--no-retry-on-error',
dest='no_retry_on_error',
action='store_true',
default=False,
help="Do not retry when the command fails (as opposed to time out)"
)
parser.add_argument(
'-', '--on-fail',
default=None,
help="Command to run after last failure"
)
parser.add_argument(
'-c', '--on-retry',
default=None,
help="Command to run after every failed attempt"
)
parser.add_argument(
'-i', '--interactive',
action='store_true',
default=False,
help="Whether to run the commands in interactive mode"
)
parser.add_argument(
'-D', '--dry-run',
action='store_true',
default=False,
help="Performs a dry-run. It shows which commands would run"
)
parser.add_argument(
'--verbose', '--debug',
dest='verbose',
action='store_true',
default=False,
help="Run in verbose mode"
)
parser.add_argument(
'command',
nargs='+'
)
return parser
| StarcoderdataPython |
161973 | <gh_stars>0
#!/usr/bin/python3
import os
import sys
import argparse
from datetime import datetime, timedelta
from icalendar import Calendar
import recurring_ical_events
from urllib.request import urlopen
import validators
WINDOW = 365
def org_date(dateTime):
if isinstance(dateTime, datetime):
return dateTime.astimezone().strftime("<%Y-%m-%d %a %H:%M>")
else:
return dateTime.strftime("<%Y-%m-%d %a>")
def create_header(title="Calendar", author="", email=""):
results = ""
results += "#+TITLE: {}\n".format(title)
results += "#+AUTHOR: {}\n".format(author)
results += "#+EMAIL: {}\n".format(email)
results += "#+DESCRIPTION: converted using the Pical2org python script\n"
results += "#+CATEGORY: calendar\n"
results += "\n\n"
return results
class orgEvent:
def __init__(self, event):
# Store the summary of the event
if event.get("summary") is not None:
self.summary = event.get("summary")
self.summary = self.summary.replace("\\,", ",")
else:
self.summary = "(No title)"
# Store the start and end time of the event
self.dtstart = event.get("dtstart").dt
self.isDateTime = isinstance(self.dtstart, datetime)
if event.get("dtend") is not None:
self.dtend = event.get("dtend").dt
# If all day event, then dtstart and dtend are datetime.date
# objects and dtend is exactly one day in the future.
# The dtend can be removed to make it more elegant in org-mode
if not self.isDateTime and self.dtend == self.dtstart + timedelta(days=1):
self.dtend = None
else:
self.dtend = None
# Store the description of the event
if event.get("description") is not None:
self.description = "\n".join(event.get("description").split("\\n"))
self.description = self.description.replace("\\,", ",")
else:
self.description = ""
def __str__(self):
results = ""
results = "* {}\n".format(self.summary)
# Event has a start time and end time
if self.dtstart and self.dtend:
results += "{}--{}\n".format(
org_date(self.dtstart),
org_date(self.dtend),
)
# Event only has a start time
elif self.dtstart and not self.dtend:
results += "{}\n".format(org_date(self.dtstart))
results += "{}\n".format(self.description)
return results
class Convertor:
def __init__(self, args):
icalFile = self.read_file(args.INPUT_FILE)
self.calendar = Calendar.from_ical(icalFile.read())
self.startDate = datetime.now() - timedelta(days=WINDOW)
self.endDate = datetime.now() + timedelta(days=WINDOW)
def __call__(self):
results = ""
results = create_header()
events = recurring_ical_events.of(self.calendar).between(
self.startDate, self.endDate
)
for component in events:
event = orgEvent(component)
results += str(event)
return results
def read_file(self, path):
"""Open the file from the local system or a url and return it
Take a string representing either a url of a file or a name of
a local file and return the open file.
Parameters
----------
file_name : str
A url of a remote file or a path to a local file
"""
# Check to see if path is a remote url
if validators.url(path) is True:
f = urlopen(path)
# Otherwise, assume it is a local path
else:
try:
f = open(path, "r", encoding="utf-8")
except OSError:
print("Could not open/read file: ", f)
sys.exit()
return f
def create_parser():
"""Creates the default ArgumentParser object for the script
Creates a parser using the argparse library to collect arguments
from the command line. These arguments are then stored as and
ArgumentParser object and returned.
Returns
-------
ArgumentParser
An ArgumentParser object
"""
# Create the parser
parser = argparse.ArgumentParser(
description=(
"Converts an ical (.ics) file into a text file formatted for use in Emacs"
" org-mde."
),
add_help=True,
fromfile_prefix_chars="@",
)
# Tell the parser which version of the script this is
parser.version = "1.0"
# Add an argument to accept an input file
parser.add_argument(
"INPUT_FILE",
help=(
"A ical (.ics) file to be read. This can either be a path to a local or it"
" may be a url to a remote file."
),
)
# Add an option to output results to a file instead of stdout
parser.add_argument(
"-o",
"--output",
help="Name of file to store the output results.",
action="store",
type=str,
nargs=1,
metavar="OUTPUT_FILE",
)
# Add a window determining how many days to convert events
parser.add_argument(
"-w",
"--window",
help="Length of time before and after today in which events will be collected",
action="store",
type=int,
nargs="?",
default=365,
)
# Add an option to force clobbering of the output file
parser.add_argument(
"-f",
"--force_clobber",
help=(
"Force clobbering of and output file i the file already exists. If this"
" option is provided, the output file will overwrite the existing file with"
" the same name."
),
action="store_true",
default=False,
)
return parser
def main():
parser = create_parser()
args = parser.parse_args()
WINDOW = args.window
# Check to see if results should be saved to a file
if args.output:
# Check if a file with the same name as output file exists
if os.path.exists(args.output[0]) and not args.force_clobber:
print(
"File {outfile} exists.\nPlease specify a new name or run"
"script again with -f to force clobbering of existing "
"file".format(outfile=args.output[0])
)
else:
convertor = Convertor(args)
with open(args.output[0], "w") as outFile:
outFile.write(convertor())
# If no output file is given print data to stdout
else:
convertor = Convertor(args)
print(convertor())
# If this file is called directly, then execute main()
if __name__ == "__main__":
main()
| StarcoderdataPython |
149890 | <filename>run.py
"""
**main api run module for memberships and affiliate api **
"""
__developer__ = "mobius-crypt"
__email__ = "<EMAIL>"
__twitter__ = "@blueitserver"
__github_repo__ = "https://github.com/freelancing-solutions/memberships-and-affiliate-api"
__github_profile__ = "https://github.com/freelancing-solutions/"
__licence__ = "MIT"
from threading import Thread
import json
import os
from flask import Response, redirect
from cache.cache_manager import app_cache
from config import config_instance
from config.use_context import use_context
from main import create_app
from utils.utils import is_development, today, return_ttl
from tasks import start_task
# TODO create separate run files for client api, admin api, and public_api
app = create_app(config_class=config_instance)
debug = is_development() and config_instance.DEBUG
# Press the green button in the gutter to run the script.
# TODO Add logs handler which can send all errors to memberships and Affiliate Management Slack Channel
@app.before_request
def create_thread() -> None:
"""
**create_thread**
this creates a thread specifically to deal with tasks which will be run after request has been processed
:return: None
"""
try:
if not isinstance(app.tasks_thread, Thread):
app.tasks_thread = Thread(target=start_task)
return
except AttributeError as e:
pass
finally:
return
@app.after_request
@use_context
def start_thread(response: Response) -> Response:
"""
**start thread**
starting a separate thread to deal with tasks that where put aside during the request
"""
try:
if isinstance(app.tasks_thread, Thread) and not app.tasks_thread.is_alive():
app.tasks_thread.start()
except RuntimeError as e:
app.tasks_thread = Thread(target=start_task)
app.tasks_thread.start()
return response
@app.route('/', methods=['GET', 'POST'])
def main():
"""will display swagger api documentations"""
return redirect('/api-ui'), 302
@app.route('/redoc', methods=['GET', 'POST'])
@app_cache.cache.memoize(timeout=return_ttl('short'))
def redoc():
"""this will display swagger documentation instead"""
return redirect('/api-ui'), 302
@app.route('/warm-up', methods=['GET', 'POST'])
def warmup():
message: str = f'Warmup Success'
return json.dumps(dict(status=True, message=message)), 200
if __name__ == '__main__':
if is_development():
# NOTE: this is a development server
app.run(debug=debug, use_reloader=True, host='127.0.0.1', port=int(os.environ.get('PORT', 8081)))
else:
app.run(debug=debug, use_reloader=False, host='0.0.0.0', port=int(os.environ.get('PORT', 8081)))
| StarcoderdataPython |
1602916 | """
_ _
| | (_)_ __ ___ __ _ _ __
| | | | '_ \ / _ \/ _` | '__|
| |___| | | | | __/ (_| | |
|_____|_|_| |_|\___|\__,_|_|
____ _
| _ \ _ __ ___ __ _ _ __ __ _ _ __ ___ _ __ ___ (_)_ __ __ _
| |_) | '__/ _ \ / _` | '__/ _` | '_ ` _ \| '_ ` _ \| | '_ \ / _` |
| __/| | | (_) | (_| | | | (_| | | | | | | | | | | | | | | | (_| |
|_| |_| \___/ \__, |_| \__,_|_| |_| |_|_| |_| |_|_|_| |_|\__, |
|___/ |___/
@author: <NAME>
@coding: utf-8
@environment: Manjaro 18.1.5 Juhraya
@date: 19th Jan., 2020
"""
'''
max: z = 4x1 + 3x2
st: 2x1 + 3x2<=10
x1 + x2 <=8
x2 <= 7
x1,x2 > 0
'''
from scipy.optimize import linprog
c = [4, 3]
A = [[2, 3], [1, 1]]
b = [10, 8]
x1_bound = [0, None]
x2_bound = [0, 7]
res = linprog(c, A, b, bounds=(x1_bound, x2_bound))
print(res)
| StarcoderdataPython |
187557 | <filename>tools/python/PythonPlugin/package_python_files.py<gh_stars>0
import os
import py_compile
import shutil
import tempfile
import zipfile
stdlib_path = os.environ['MW_PYTHON_3_STDLIB_DIR'].replace('"', '')
zipfile_path = os.path.join(os.environ['BUILT_PRODUCTS_DIR'],
os.environ['UNLOCALIZED_RESOURCES_FOLDER_PATH'],
'python%s%s.zip' % (os.environ['MW_PYTHON_3_VERSION_MAJOR'],
os.environ['MW_PYTHON_3_VERSION_MINOR']))
cacert_file = 'cacert.pem'
cacert_path = os.path.join(os.environ['BUILT_PRODUCTS_DIR'],
os.environ['UNLOCALIZED_RESOURCES_FOLDER_PATH'],
cacert_file)
def add_file(fp, path):
with tempfile.NamedTemporaryFile() as tmp:
py_compile.compile(path,
cfile = tmp.name,
dfile = os.path.join(os.path.basename(zipfile_path),
path),
doraise = True,
optimize = 0)
fp.write(tmp.name, path[:-3] + '.pyc')
def add_files(fp):
for dirpath, dirnames, filenames in os.walk('.'):
dirnames_to_remove = []
for name in dirnames:
if (name in ('idle_test', 'test', 'tests') or
not os.path.isfile(os.path.join(dirpath, name, '__init__.py'))):
dirnames_to_remove.append(name)
for name in dirnames_to_remove:
dirnames.remove(name)
for name in filenames:
path = os.path.join(dirpath, name)[2:] # Remove leading ./
if name.endswith('.py'):
add_file(fp, path)
# If the ZIP file already exists, don't overrwrite it, as this will break code
# signing on iOS
if not os.path.isfile(zipfile_path):
with zipfile.ZipFile(zipfile_path, 'w', zipfile.ZIP_DEFLATED) as fp:
os.chdir(stdlib_path)
add_files(fp)
os.chdir(os.path.join(stdlib_path, 'site-packages'))
add_files(fp)
os.chdir(os.path.join(os.environ['SRCROOT'], 'PythonPlugin'))
add_file(fp, 'mworks_python_config.py')
# Ditto for root certificates file
if not os.path.isfile(cacert_path):
shutil.copyfile(os.path.join(stdlib_path, cacert_file), cacert_path)
| StarcoderdataPython |
188741 | <gh_stars>1-10
from django.urls import path
from django.urls.resolvers import URLPattern
from . import views
urlpatterns = [
path('<str:pk>/', views.getRoutes, name="routes"),
] | StarcoderdataPython |
3368456 | import json
import csv
from result import Result
import requests
import time
import re
import io
from extract_entities import entities
writer = csv.writer(open("falcon_results_qald7.csv", 'a', newline=''))
url = 'https://labs.tib.eu/falcon/api?mode=long'
headers = {'Content-type': 'application/json'}
with open('qald-7.json', encoding='UTF-8') as data_file:
data = json.loads(data_file.read())
nb=0
for distro in data['questions']:
entities_dataset=entities(distro['query']['sparql'])
print(entities_dataset)
entity_mentions=0
correctly_linked=0
n=1
system_result=0
result=[]
tmp=time.time()
for d in distro['question']:
if d["language"]=='en':
question_en=d["string"]
query = {'text': str(question_en)}
data_json = json.dumps(query)
response = requests.post(url, data=data_json, headers=headers)
detected_entity=0
if response:
execution_time=time.time()-tmp
response_json=response.json()
if 'entities' in response_json:
if response_json['entities']:
# system_result=len(response_json['results'])
system_result=len(response_json['entities'])
for em in entities_dataset:
entity_mentions=entity_mentions+1
for i in response_json['entities']:
if i[0]==em:
correctly_linked=correctly_linked+1
result.append(i[1])
n=n+1
#print(correctly_linked, system_result, entity_mentions)
res= Result(correctly_linked, system_result, entity_mentions)
fmeasure=0
if system_result!=0:
entity_precision=res.precision()
else:
entity_precision=0
if entity_mentions!=0:
entity_recall=res.recall()
else:
entity_recall=0
if entity_recall!=0 and entity_precision!=0:
fmeasure= (2*entity_precision*entity_recall)/(entity_precision + entity_recall)
for i in result:
print("id question: ", distro['id'], "result n: ", system_result, detected_entity, result)
print("Precision:", entity_precision," Recall:", entity_recall )
print("____________________________________")
myData=[[distro['id'],question_en,entity_mentions,detected_entity,system_result,correctly_linked, entity_precision,entity_recall, fmeasure, "0", "0", execution_time] ]
myFile = open('falcon_results_qald7.csv', 'a', encoding='utf-8')
with myFile:
writer = csv.writer(myFile, delimiter =";", lineterminator='\r')
writer.writerows(myData)
else:
#No string match
nsm=0
system_result=0
entity_precision=0
entity_recall=0
nsm=nsm+1
myData=[[distro['id'],question_en,entity_mentions,detected_entity,system_result,correctly_linked, entity_precision,entity_recall, "0", "0",nsm, execution_time] ]
print("____________________________________No string match")
myFile = open('falcon_results_qald7.csv', 'a', encoding='utf-8')
with myFile:
writer = csv.writer(myFile, delimiter =";", lineterminator='\r')
writer.writerows(myData)
else:
#No detected named entity:
if entities_dataset:
nbem=0
system_result=0
entity_precision=0
entity_recall=0
correctly_linked=0
detected_entity=0
if 'entity mapping' in distro:
for em in distro["entity mapping"]:
nbem=nbem+1
myData=[[distro['id'],question_en,nbem,detected_entity,system_result,correctly_linked, entity_precision,entity_recall,"0", "1", "0", execution_time] ]
print("____________________________________No detected named entity")
else:
nbem=0
system_result=1
entity_precision=1
entity_recall=1
correctly_linked=1
detected_entity=0
fmeasure=1
if 'entity mapping' in distro:
for em in distro["entity mapping"]:
nbem=nbem+1
myData=[[distro['id'],question_en,nbem,detected_entity,system_result,correctly_linked, entity_precision,entity_recall,fmeasure, "3", "3", execution_time] ]
print("____________________________________No mention + No results")
myFile = open('falcon_results_qald7.csv', 'a', encoding='utf-8')
with myFile:
writer = csv.writer(myFile, delimiter =";", lineterminator='\r')
writer.writerows(myData)
else:
#Unknown error from the web service
execution_time=time.time()-tmp
system_result=0
entity_precision=0
entity_recall=0
fmeasure= 0
entity_mentions=0
detected_entity=0
correctly_linked=0
print("____________________________________Unknown error from the web service")
myData=[[distro['id'],question_en,entity_mentions,detected_entity,system_result,correctly_linked, entity_precision,entity_recall, fmeasure, "2", "2", execution_time] ]
myFile = open('falcon_results_qald7.csv', 'a', encoding='utf-8')
with myFile:
writer = csv.writer(myFile, delimiter =";", lineterminator='\r')
writer.writerows(myData)
print("FALCON process completed")
| StarcoderdataPython |
184286 | <gh_stars>10-100
from utilities import db
def TransformResourceData(vars):
fields = {
'resource_id': 'num',
'resource_type_id': 'num',
'resource_name': 'string',
'resource_uri': 'string',
'resource_parent_id': 'num',
'resource_child_number': 'num',
}
t = vars['target']
resource_inserter = db.StaggeredInsert(t['host'], t['user'], t['password'], t['port'], t['db'], 'resources', fields)
resource_id_map = {}
resources = [
{'type': 'indices', 'parent_type': None, 'items': vars['queries'].GetIndices(vars)},
{'type': 'content_sections', 'parent_type': None, 'items': vars['queries'].GetContentSections(vars)},
{'type': 'tutorials', 'parent_type': 'content_sections', 'items': vars['queries'].GetTutorials(vars)},
{'type': 'tests', 'parent_type': 'content_sections', 'items': vars['queries'].GetTests(vars)},
{'type': 'books', 'parent_type': None, 'items': vars['queries'].GetBooks(vars)},
{'type': 'wikis', 'parent_type': None, 'items': vars['queries'].GetWikis(vars)},
{'type': 'forums', 'parent_type': None, 'items': vars['queries'].GetForums(vars)},
]
resource_moocdb_id = 1
for resource_subset in resources:
if resource_subset['type'] not in resource_id_map.keys(): resource_id_map[resource_subset['type']] = {}
for item in resource_subset['items']:
item['resource_id'] = resource_moocdb_id
resource_id_map[resource_subset['type']][item['original_id']] = resource_moocdb_id
resource_moocdb_id += 1
rpt = resource_subset['parent_type']
rpoid = item['resource_parent_original_id']
if rpt != None and rpoid != None and rpoid in resource_id_map[rpt].keys():
rpmid = resource_id_map[rpt][rpoid]
item['resource_parent_id'] = rpmid
else:
item['resource_parent_id'] = None
resource_inserter.addRow({k: item[k] for k in fields})
resource_inserter.insertPendingRows()
vars["logger"].Log(vars, "Counts: Inserted {} resources to target".format(resource_inserter.num_inserted_rows))
return resource_id_map | StarcoderdataPython |
172554 | #045_Pedra_papel_e_tesoura.py
from time import sleep
from random import randint
print("Pedra, Papel ou Tesoura?")
print('''[ 0 ] PEDRA
[ 1 ] PAPEL
[ 2 ] TESOURA''')
lista = ["PEDRA", "PAPEL", "TESOURA"]
c = randint(0, 2)
j = int(input("Sua escolha: "))
sleep(1)
print("JO")
sleep(1)
print("KEN")
sleep(1)
print("PO!!!")
sleep(1)
print(f"Jogador: {lista[j]}")
print(f"Computador: {lista[c]}")
sleep(1)
if (c == 0):
if (j == 0):
print("Empate")
elif (j == 1):
print("Vitória")
elif (j == 2):
print("Derrota")
elif (c == 1):
if (j == 1):
print("Empate")
elif (j == 2):
print("Vitória")
elif (j == 0):
print("Derrota")
elif (c == 2):
if (j == 2):
print("Empate")
elif (j == 0):
print("Vitória")
elif (j == 1):
print("Derrota")
else:
print("Jogada inválida")
| StarcoderdataPython |
3258581 | <filename>video-streaming/video_streaming/core/constants/errors.py
__all__ = [
'ErrorMessages',
'ErrorCodes'
]
class ErrorMessages:
INPUT_VIDEO_404_OR_403 = "Input video is not found on S3 or permission denieded. make sure bucket name and file name is exist."
OUTPUT_BUCKET_404_OR_403 = "Output bucket is not found or permission denieded."
OUTPUT_KEY_IS_ALREADY_EXIST = "Output key on S3 is already exist."
REPRESENTATION_NEEDS_BOTH_SIZE_AND_BITRATE = "Representation needs both size and bitrate"
S3_INPUT_KEY_IS_REQUIRED = "s3_input_key is required."
S3_INPUT_BUCKET_IS_REQUIRED = "s3_input_bucket is required."
S3_OUTPUT_BUCKET_IS_REQUIRED = "s3_output_bucket is required."
S3_OUTPUT_KEY_IS_REQUIRED = "s3_output_key is required."
OBJECT_DETAILS_IS_REQUIRED = "object_details is required."
OBJECT_DETAILS_IS_INVALID = "object_details must be a dict with 'ContentLength' key."
REQUEST_ID_IS_REQUIRED = "request_id is required."
OUTPUT_NUMBER_IS_REQUIRED = "request_id is required."
INPUT_NUMBER_IS_REQUIRED = "input_number is required."
WEBHOOK_URL_IS_REQUIRED = "webhook_url is required and can not be empty."
INPUT_PATH_IS_REQUIRED = "input_path is required."
OUTPUT_PATH_IS_REQUIRED = "output_path is required."
DIRECTORY_IS_REQUIRED = "directory is required."
OUTPUT_PATH_OR_S3_OUTPUT_KEY_IS_REQUIRED = "output_path or s3_output_key is required."
INPUT_SIZE_CAN_NOT_BE_ZERO = "input file size can not be zero."
INPUT_FILE_IS_NOT_FOUND = "input file is not found."
WEBHOOK_URL_MUST_NOT_BE_REDIRECTED = "webhook url must not be redirected."
WEBHOOK_HTTP_FAILED = "webhook task failed" \
", HTTP response status: '{status}'" \
", reason: '{reason}'" \
", request_id: '{request_id}'"
HTTP_STATUS_CODE_NOT_SUPPORT = "response HTTP status code is not support" \
", HTTP response status: '{status}'" \
", reason: '{reason}'" \
", request_id: '{request_id}'"
TASK_WAS_FORCIBLY_STOPPED = "task was forcibly stopped."
CAN_NOT_UPLOAD_DIRECTORY = "can not upload directory"
INPUT_VIDEO_CODEC_TYPE_IN_NOT_VIDEO = "input video codec type is not video"
CAN_NOT_UPLOAD_EMPTY_FILE = "cab not upload empty file"
CAN_NOT_UPLOAD_FILE = "can not upload the file"
INPUT_TYPE_IS_REQUIRED = "input type is required."
JOB_DETAILS_NOT_FOUND = "job details not found."
WAITING_FOR_AGGREGATE_INPUTS = "waiting for aggregate inputs."
# gRPC
INTERNAL_ERROR = "internal server error"
S3_KEY_CAN_NOT_BE_EMPTY = "s3 key can not be empty."
S3_BUCKET_NAME_IS_NOT_VALID = "s3 bucket name is not valid."
DUPLICATE_OUTPUT_LOCATIONS = "there are duplicate output locations."
ONE_OUTPUT_IS_REQUIRED = "one output is required."
JOB_NOT_FOUND_BY_TRACKING_ID = "job not found by tracking_id"
JOB_IS_FAILED = "job is failed"
JOB_IS_REVOKED = "job is revoked"
JOB_IS_FINISHED = "job is finished"
NO_WATERMARK_TO_USE = "no watermark to use"
class ErrorCodes:
# gRPC base exception error codes
INTERNAL_ERROR = 1000
S3_KEY_CAN_NOT_BE_EMPTY = 1001
S3_BUCKET_NAME_IS_NOT_VALID = 1002
DUPLICATE_OUTPUT_LOCATIONS = 1003
ONE_OUTPUT_IS_REQUIRED = 1004 # TODO
JOB_NOT_FOUND_BY_TRACKING_ID = 1005 # revoke_job_outputs
JOB_IS_FAILED = 1006
JOB_IS_REVOKED = 1007
JOB_IS_FINISHED = 1008
NO_WATERMARK_TO_USE = 1009
| StarcoderdataPython |
1664124 | """Main entry point for the script."""
import logging
import sys
from . import cli, script
def init_logger(verbose):
"""Initialize logger based on `verbose`."""
level = logging.DEBUG if verbose >= 1 else logging.INFO
logging.basicConfig(
level=level, format='%(message)s'
)
def main():
"""Execute the script commands."""
try:
user_config = script.get_config()
args = cli.parse_args(user_config)
init_logger(args.verbose)
if args.command == 'start':
script.start(args)
elif args.command == 'done':
script.done(args)
except script.ScriptError as exc:
logging.error(exc)
sys.exit(1)
except Exception as exc:
logging.error('Unexpected script error: %s', exc)
sys.exit(2)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1612238 | <gh_stars>0
import sys
import os.path
sys.path.insert( 0, os.path.normpath(os.path.join( os.path.dirname( __file__ ), '..') ))
from aql_tests import skip, AqlTestCase, runLocalTests
from aql.utils import fileChecksum, Tempdir, \
disableDefaultHandlers, enableDefaultHandlers, addUserHandler, removeUserHandler
from aql.values import SimpleValue, FileChecksumValue
from aql.options import builtinOptions
from aql.nodes import Node, BatchNode, Builder, FileBuilder, BuildManager
from aql.nodes.aql_build_manager import ErrorNodeDependencyCyclic, ErrorNodeSignatureDifferent
#//===========================================================================//
class CopyValueBuilder (Builder):
def __init__(self, options ):
self.signature = b''
def build( self, node ):
target_values = []
for source_value in node.getSourceValues():
copy_value = SimpleValue( source_value.get(), name = source_value.name + '_copy' )
target_values.append( copy_value )
node.addTargets( target_values )
def getTraceTargets( self, node, brief ):
return tuple( value.name for value in node.getTargetValues() )
def getTraceSources( self, node, brief ):
return tuple( value.name for value in node.getSourceValues() )
#//===========================================================================//
class ChecksumBuilder (FileBuilder):
NAME_ATTRS = ('replace_ext',)
SIGNATURE_ATTRS = ('offset', 'length')
def __init__(self, options, offset, length, replace_ext = False ):
self.offset = offset
self.length = length
self.replace_ext = replace_ext
#//-------------------------------------------------------//
def _buildSrc( self, src, alg ):
chcksum = fileChecksum( src, self.offset, self.length, alg )
if self.replace_ext:
chcksum_filename = os.path.splitext(src)[0]
else:
chcksum_filename = src
chcksum_filename += '.%s.chksum' % alg
chcksum_filename = self.getFileBuildPath( chcksum_filename )
with open( chcksum_filename, 'wb' ) as f:
f.write( chcksum.digest() )
return self.makeFileValue( chcksum_filename, tags = alg )
#//-------------------------------------------------------//
def build( self, node ):
target_values = []
for src in node.getSources():
target_values.append( self._buildSrc( src, 'md5' ) )
target_values.append( self._buildSrc( src, 'sha512' ) )
node.addTargets( target_values )
#//-------------------------------------------------------//
def buildBatch( self, node ):
for src_value in node.getSourceValues():
targets = [ self._buildSrc( src_value.get(), 'md5' ),
self._buildSrc( src_value.get(), 'sha512' ) ]
node.addSourceTargets( src_value, targets )
#//===========================================================================//
class ChecksumSingleBuilder (ChecksumBuilder):
split = ChecksumBuilder.splitSingle
#//===========================================================================//
def _addNodesToBM( builder, src_files, Node = Node ):
bm = BuildManager()
try:
checksums_node = Node( builder, src_files )
checksums_node2 = Node( builder, checksums_node )
bm.add( checksums_node ); bm.selfTest()
bm.add( checksums_node2 ); bm.selfTest()
except Exception:
bm.close()
raise
return bm
#//===========================================================================//
def _build( bm ):
try:
bm.selfTest()
success = bm.build( jobs = 1, keep_going = False )
bm.selfTest()
if not success:
bm.printFails()
raise Exception("Nodes failed")
finally:
bm.close()
bm.selfTest()
#//===========================================================================//
def _buildChecksums( builder, src_files, Node = Node ):
bm = _addNodesToBM( builder, src_files, Node )
_build( bm )
#//===========================================================================//
class TestBuildManager( AqlTestCase ):
def eventNodeBuilding( self, settings, node ):
self.building_nodes += 1
#//-------------------------------------------------------//
def eventNodeBuildingFinished( self, settings, node, builder_output, progress ):
self.finished_nodes += 1
#//-------------------------------------------------------//
def eventNodeActual( self, settings, node, progress ):
self.actual_nodes += 1
#//-------------------------------------------------------//
def eventNodeOutdated( self, settings, node, progress ):
self.outdated_nodes += 1
#//-------------------------------------------------------//
def eventNodeRemoved( self, settings, node, progress ):
self.removed_nodes += 1
#//-------------------------------------------------------//
def setUp( self ):
super(TestBuildManager,self).setUp()
# disableDefaultHandlers()
self.building_nodes = 0
addUserHandler( self.eventNodeBuilding )
self.finished_nodes = 0
addUserHandler( self.eventNodeBuildingFinished )
self.actual_nodes = 0
addUserHandler( self.eventNodeActual )
self.outdated_nodes = 0
addUserHandler( self.eventNodeOutdated )
self.removed_nodes = 0
addUserHandler( self.eventNodeRemoved )
#//-------------------------------------------------------//
def tearDown( self ):
removeUserHandler( [ self.eventNodeBuilding,
self.eventNodeBuildingFinished,
self.eventNodeOutdated,
self.eventNodeActual,
self.eventNodeRemoved,
] )
enableDefaultHandlers()
super(TestBuildManager,self).tearDown()
#//-------------------------------------------------------//
def test_bm_deps(self):
bm = BuildManager()
value1 = SimpleValue( "http://aql.org/download1", name = "target_url1" )
value2 = SimpleValue( "http://aql.org/download2", name = "target_url2" )
value3 = SimpleValue( "http://aql.org/download3", name = "target_url3" )
options = builtinOptions()
builder = CopyValueBuilder( options )
node0 = Node( builder, value1 )
node1 = Node( builder, node0 )
node2 = Node( builder, node1 )
node3 = Node( builder, value2 )
node4 = Node( builder, value3 )
node5 = Node( builder, node4 )
node6 = Node( builder, node5 )
node6.depends( [node0, node1] )
bm.add( node0 ); bm.selfTest(); self.assertEqual( len(bm), 1 )
bm.add( node1 ); bm.selfTest(); self.assertEqual( len(bm), 2 )
bm.add( node2 ); bm.selfTest(); self.assertEqual( len(bm), 3 )
bm.add( node3 ); bm.selfTest(); self.assertEqual( len(bm), 4 )
bm.add( node4 ); bm.selfTest(); self.assertEqual( len(bm), 5 )
bm.add( node5 ); bm.selfTest(); self.assertEqual( len(bm), 6 )
bm.add( node6 ); bm.selfTest(); self.assertEqual( len(bm), 7 )
node0.depends( node3 ); bm.depends( node0, node3 ); bm.selfTest()
node1.depends( node3 ); bm.depends( node1, node3 ); bm.selfTest()
node2.depends( node3 ); bm.depends( node2, node3 ); bm.selfTest()
node3.depends( node4 ); bm.depends( node3, node4 ); bm.selfTest()
node0.depends( node5 ); bm.depends( node0, node5 ); bm.selfTest()
node5.depends( node3 ); bm.depends( node5, node3 ); bm.selfTest()
with self.assertRaises(ErrorNodeDependencyCyclic):
node4.depends( node3 ); bm.depends( node4, node3 ); bm.selfTest()
#//-------------------------------------------------------//
def test_bm_build(self):
with Tempdir() as tmp_dir:
options = builtinOptions()
options.build_dir = tmp_dir
src_files = self.generateSourceFiles( tmp_dir, 5, 201 )
builder = ChecksumBuilder( options, 0, 256 )
self.building_nodes = self.finished_nodes = 0
_buildChecksums( builder, src_files )
self.assertEqual( self.building_nodes, 2 )
self.assertEqual( self.building_nodes, self.finished_nodes )
#//-------------------------------------------------------//
self.building_nodes = self.finished_nodes = 0
_buildChecksums( builder, src_files )
self.assertEqual( self.building_nodes, 0 )
self.assertEqual( self.building_nodes, self.finished_nodes )
#//-------------------------------------------------------//
builder = ChecksumBuilder( options, 32, 1024 )
self.building_nodes = self.finished_nodes = 0
_buildChecksums( builder, src_files )
self.assertEqual( self.building_nodes, 2 )
self.assertEqual( self.building_nodes, self.finished_nodes )
#//-------------------------------------------------------//
self.building_nodes = self.finished_nodes = 0
_buildChecksums( builder, src_files )
self.assertEqual( self.building_nodes, 0 )
self.assertEqual( self.building_nodes, self.building_nodes )
#//-------------------------------------------------------//
def test_bm_nodes(self):
def _makeNodes( builder ):
node1 = Node( builder, value1 )
copy_node1 = Node( builder, node1 )
copy2_node1 = Node( builder, copy_node1 )
node2 = Node( builder, value2 )
node3 = Node( builder, value3 )
copy_node3 = Node( builder, node3 )
copy2_node3 = Node( builder, copy_node3 )
copy2_node3.depends( [node1, copy_node1] )
return node1, node2, node3, copy_node1, copy_node3, copy2_node1, copy2_node3
with Tempdir() as tmp_dir:
options = builtinOptions()
options.build_dir = tmp_dir
bm = BuildManager()
value1 = SimpleValue( "http://aql.org/download1", name = "target_url1" )
value2 = SimpleValue( "http://aql.org/download2", name = "target_url2" )
value3 = SimpleValue( "http://aql.org/download3", name = "target_url3" )
builder = CopyValueBuilder( options )
bm.add( _makeNodes( builder ) )
self.finished_nodes = 0
bm.build( jobs = 1, keep_going = False )
bm.close()
self.assertEqual( self.finished_nodes, 7 )
#// --------- //
bm.add( _makeNodes( builder ) )
self.actual_nodes = 0
bm.status()
bm.close()
self.assertEqual( self.actual_nodes, 7 )
#// --------- //
bm.add( _makeNodes( builder ) )
self.removed_nodes = 0
bm.clear()
bm.close()
self.assertEqual( self.removed_nodes, 7 )
#// --------- //
bm.add( _makeNodes( builder ) )
self.actual_nodes = 0
self.outdated_nodes = 0
bm.status()
bm.close()
self.assertEqual( self.actual_nodes, 0 )
self.assertEqual( self.outdated_nodes, 3 )
#// --------- //
nodes = _makeNodes( builder )
copy_node3 = nodes[4]
bm.add( nodes )
self.finished_nodes = 0
bm.build( jobs = 1, keep_going = False, nodes = copy_node3 )
bm.close()
self.assertEqual( self.finished_nodes, 2 )
#// --------- //
nodes = _makeNodes( builder )
node2 = nodes[1]; copy_node3 = nodes[4]
bm.add( nodes )
self.finished_nodes = 0
bm.build( jobs = 1, keep_going = False, nodes = [node2, copy_node3] )
bm.close()
self.assertEqual( self.finished_nodes, 1 )
#// --------- //
#//-------------------------------------------------------//
def test_bm_check(self):
with Tempdir() as tmp_dir:
options = builtinOptions()
options.build_dir = tmp_dir
src_files = self.generateSourceFiles( tmp_dir, 3, 201 )
builder = ChecksumBuilder( options, 0, 256, replace_ext = True )
self.building_nodes = self.finished_nodes = 0
_buildChecksums( builder, src_files )
self.assertEqual( self.building_nodes, 2 )
self.assertEqual( self.building_nodes, self.finished_nodes )
bm = _addNodesToBM( builder, src_files )
try:
self.actual_nodes = self.outdated_nodes = 0
bm.status(); bm.selfTest()
self.assertEqual( self.outdated_nodes, 0)
self.assertEqual( self.actual_nodes, 2 )
finally:
bm.close()
#//-------------------------------------------------------//
def test_bm_batch(self):
with Tempdir() as tmp_dir:
options = builtinOptions()
options.build_dir = tmp_dir
src_files = self.generateSourceFiles( tmp_dir, 3, 201 )
builder = ChecksumBuilder( options, 0, 256, replace_ext = True )
self.building_nodes = self.finished_nodes = 0
_buildChecksums( builder, src_files, Node = BatchNode )
self.assertEqual( self.building_nodes, 2 )
self.assertEqual( self.building_nodes, self.finished_nodes )
bm = _addNodesToBM( builder, src_files, Node = BatchNode )
try:
self.actual_nodes = self.outdated_nodes = 0
bm.status(); bm.selfTest()
self.assertEqual( self.outdated_nodes, 0)
self.assertEqual( self.actual_nodes, 2 )
finally:
bm.close()
#//-------------------------------------------------------//
def test_bm_rebuild(self):
with Tempdir() as tmp_dir:
options = builtinOptions()
options.build_dir = tmp_dir
num_src_files = 3
src_files = self.generateSourceFiles( tmp_dir, num_src_files, 201 )
bm = BuildManager()
self.building_nodes = self.finished_nodes = 0
self.actual_nodes = self.outdated_nodes = 0
builder = ChecksumSingleBuilder( options, 0, 256 )
src_values = []
for s in src_files:
src_values.append( FileChecksumValue( s ) )
node = Node( builder, src_values )
node = Node( builder, node )
node = Node( builder, node )
bm.add( node )
_build( bm )
self.assertEqual( self.building_nodes, num_src_files * 7 )
#//-------------------------------------------------------//
self.actual_nodes = self.outdated_nodes = 0
bm = BuildManager()
builder = ChecksumSingleBuilder( options, 0, 256 )
node = Node( builder, src_values )
bm.add( node ); bm.selfTest()
bm.status(); bm.selfTest()
self.assertEqual( self.outdated_nodes, 0 )
self.assertEqual( self.actual_nodes, num_src_files )
#//-------------------------------------------------------//
def test_bm_tags(self):
with Tempdir() as tmp_dir:
options = builtinOptions()
options.build_dir = tmp_dir
num_src_files = 3
src_files = self.generateSourceFiles( tmp_dir, num_src_files, 201 )
builder = ChecksumSingleBuilder( options, 0, 256 )
bm = BuildManager()
self.finished_nodes = 0
node = Node( builder, src_files )
node_md5 = Node( builder, node.at('md5') )
bm.add( node_md5 )
_build( bm )
self.assertEqual( self.finished_nodes, num_src_files * 2 )
#//-------------------------------------------------------//
self.touchCppFile( src_files[0] )
bm = BuildManager()
self.finished_nodes = 0
node = Node( builder, src_files )
node_md5 = Node( builder, node.at('md5') )
bm.add( node_md5 )
_build( bm )
self.assertEqual( self.finished_nodes, 2 )
#//-------------------------------------------------------//
def test_bm_tags_batch(self):
with Tempdir() as tmp_dir:
options = builtinOptions()
options.build_dir = tmp_dir
num_src_files = 3
src_files = self.generateSourceFiles( tmp_dir, num_src_files, 201 )
builder = ChecksumBuilder( options, 0, 256 )
single_builder = ChecksumSingleBuilder( options, 0, 256 )
bm = BuildManager()
self.finished_nodes = 0
node = BatchNode( builder, src_files )
node_md5 = Node( single_builder, node.at('md5') )
bm.add( node_md5 )
_build( bm )
self.assertEqual( self.finished_nodes, num_src_files + 1 )
#//-------------------------------------------------------//
self.touchCppFile( src_files[0] )
bm = BuildManager()
self.finished_nodes = 0
node = BatchNode( builder, src_files )
node_md5 = Node( single_builder, node.at('md5') )
bm.add( node_md5 )
_build( bm )
self.assertEqual( self.finished_nodes, 2 )
#//-------------------------------------------------------//
def test_bm_conflicts(self):
with Tempdir() as tmp_dir:
options = builtinOptions()
options.build_dir = tmp_dir
num_src_files = 3
src_files = self.generateSourceFiles( tmp_dir, num_src_files, 201 )
bm = BuildManager()
self.finished_nodes = 0
builder1 = ChecksumSingleBuilder( options, 0, 256 )
builder2 = ChecksumSingleBuilder( options, 0, 1024 )
node1 = Node( builder1, src_files )
node2 = Node( builder2, src_files )
# node1 = Node( builder1, node1 )
# node2 = Node( builder2, node2 )
bm.add( node1 )
bm.add( node2 )
self.assertRaises( ErrorNodeSignatureDifferent, _build, bm )
#//-------------------------------------------------------//
def test_bm_no_conflicts(self):
with Tempdir() as tmp_dir:
options = builtinOptions()
options.build_dir = tmp_dir
num_src_files = 3
src_files = self.generateSourceFiles( tmp_dir, num_src_files, 201 )
bm = BuildManager()
self.finished_nodes = 0
builder1 = ChecksumSingleBuilder( options, 0, 256 )
builder2 = ChecksumSingleBuilder( options, 0, 256 )
node1 = Node( builder1, src_files )
node2 = Node( builder2, src_files )
node1 = Node( builder1, node1 )
node2 = Node( builder2, node2 )
bm.add( node1 )
bm.add( node2 )
_build( bm )
self.assertEqual( self.finished_nodes, 3 * 3 )
#//-------------------------------------------------------//
@skip
def test_bm_node_names(self):
with Tempdir() as tmp_dir:
src_files = self.generateSourceFiles( tmp_dir, 3, 201 )
options = builtinOptions()
options.build_dir = tmp_dir
builder = ChecksumBuilder( options, 0, 256, replace_ext = False )
bm = BuildManager()
try:
src_values = []
for s in src_files:
src_values.append( FileChecksumValue( s ) )
node0 = Node( builder, None )
node1 = Node( builder, src_values )
node2 = Node( builder, node1 )
node3 = Node( builder, node2 )
node4 = Node( builder, node3 )
bm.add( node0 )
bm.add( node1 )
bm.add( node2 )
bm.add( node3 )
bm.add( node4 )
bm.build(1, False)
print("node2: %s" % str(node4) )
print("node2: %s" % str(node3) )
print("node2: %s" % str(node2) )
print("node1: %s" % str(node1) )
print("node0: %s" % str(node0) )
finally:
bm.close()
#//===========================================================================//
def _generateNodeTree( bm, builder, node, depth ):
while depth:
node = Node( builder, node )
bm.add( node )
depth -= 1
#//===========================================================================//
@skip
class TestBuildManagerSpeed( AqlTestCase ):
def test_bm_deps_speed(self):
bm = BuildManager()
value = SimpleValue( "http://aql.org/download", name = "target_url1" )
builder = CopyValueBuilder()
node = Node( builder, value )
bm.add( node )
_generateNodeTree( bm, builder, node, 5000 )
#//===========================================================================//
if __name__ == "__main__":
runLocalTests()
| StarcoderdataPython |
3323189 | #Embedded file name: ACEStream\Core\Statistics\__init__.pyo
pass
| StarcoderdataPython |
3209895 | <filename>src/bll/mediacatalog/audiofilterfactory.py<gh_stars>1-10
from bll.mediacatalog.audiosyncfilter import AudioSyncFilter
from indexing.filters.pathfilterfactory import PathFilterFactory
class AudioFilterFactory(PathFilterFactory):
"""
Builds audio filter stack.
"""
####################################################################################################################
# Constructor.
####################################################################################################################
def __init__(self, audio_dal, sync_only=False):
"""
Initializes attributes.
Parameters
----------
audio_dal : AudioDataHandler
A reference to the audio DAL.
sync_only : boolean
Indicates whether only synchronization is needed.
"""
### Validate parameters.
if audio_dal is None:
raise Exception('audio_dal cannot be None.')
# Attributes from outside.
self._audio_dal = audio_dal
self._sync_only = sync_only
####################################################################################################################
# PathFilterFactory implementation.
####################################################################################################################
def create_filters(self):
filters = []
if self._sync_only:
filters.append(AudioSyncFilter(self._audio_dal))
return filters
| StarcoderdataPython |
151054 | """
Pseudocode: exercises 23, 24, 25
Post-solution REVIEW: Too much detail, to be honest
especially for someone who understands the fundamentals
Ex 23: Exercise 23 – Your first loops
Generate a list that contains at least 20 random integers. Write one loop that sums up all entries of the list. Write another loop that sums up only the entries of the list with even indices. Write a third loop that generates a new list that contains the values of the list in reverse order. Print out all three results
# Generate number list
import the random package
randomlist = empty list
for index in range of 0-25
number1, between 0 and 300, is generated
number1 is appended to randomlist
Loop is exited once whole list is read
# Add numbers in list to sum, by changing its value on iterations
set sum = 0
for index in randomlist
sum += index
print(sum)
Take user input (integer)
if input in list, just inform user of that
for each element:
If input bigger than list element, inform them of that then parse next element
else if input smaller than list element, inform them of that then parse next element
Exercise 24 – 3 while loops
Write a program that uses while loops to finish the tasks below:
- Searching for a specific number (e.g. 5) in an integer list of unknown length
- Multiplying all elements of an integer list of unknown length
- Printing out the contents of a string list of unknown length elementwise
# Generate list of unknown length with random elements
define empty list p
for index in range between 1 and random integer between min1 and max1
append random number between min2 and max2 to p
print p
# 1st part - search for number
get user input (number to scan for)
start at 0th position
set NumFound to False
while reading position is lower than length of list:
if reading position = user input
print confirmation for user
NumFound becomes True
reading position += 1
if NumFound remains False, inform user
# 2nd part - multiply list entries together
call list using code from earlier (recommended not to have more than 7 elements and random numbers only between 1-10)
set first parsing index (FPI) to 0
while FPI is less than list length:
set second parsing index (SPI) to 0
while SPI is less than list length
print(f'current entry {SPI} times last entry {FPI} = {SPI*FPI}')
SPI +=1 # move reading frame for reading position
FPI +=1 # move reading frame for reading position -1
# 3rd part - print contents of unknown list by element
call list using code from earlier
(STOPPED TO FOLLOW CHRISTOPH'S SOLUTION IN LESSON. It's much nicer to read,
of course, so you can take this as an example of too much detail)
"""
# after that, moved onto definition of function (see pdf)
"""Create a program that defines functions for the four mathematical basic
operations (+, -, * and /). Call the functions at least three times
with different parameters."""
| StarcoderdataPython |
103391 | <reponame>GamesBond008/NSE-India-Scrapper<gh_stars>1-10
from ._MarketData import MarketData
class Indices(MarketData):
def __init__(self,timeout: int=5):
super().__init__(timeout)
self._BaseURL="https://www.nseindia.com/api/allIndices"
def IndicesMarketWatch(self):
return self._GrabData(self._BaseURL) | StarcoderdataPython |
191227 | <filename>spiegel-news.py
# Done By <NAME> 2019/14/10
import datetime
from _csv import writer
import requests
import re
from bs4 import BeautifulSoup
import threading
def Crawl():
url = 'https://www.spiegel.de/international/'
urlcrawled = 'https://www.spiegel.de'
page = requests.get (url)
soup = BeautifulSoup (page.content, 'html.parser')
weblinks = soup.find_all ('a', {'href': True})
# retrieve news linke from main page of site
with open ('news.csv', 'w') as csv_file:
csv_writer = writer (csv_file) # creating headers in the csv file
headers = ['Title', 'SubTitle', 'Abstract', 'InsertedDate']
# writing a row of headers in the csv
csv_writer.writerow (headers)
urls = re.findall ('/international+[/a-z/]+(?:[-\w.]|(?:%[\da-fA-F]{2}))+.html', str (weblinks))
for u in urls:
strurl = "{0}{1}".format (urlcrawled, u)
# """Scrapes information from pages into items"""
pagez = requests.get (strurl)
soupz = BeautifulSoup (pagez.content, 'html.parser')
# now lets loop through posts
for tag in soupz.find_all ("meta"):
if tag.get ("property", None) == "og:title":
title = tag.get ("content", None)
elif tag.get ("property", None) == "og:description":
description = tag.get ("content", None)
elif tag.get ("name", None) == "news_keywords":
subtitle = tag.get ("content", None)
csv_writer.writerow ([title, subtitle, description, datetime.datetime.utcnow ()])
csv_file.close ()
threading.Timer (60.0, Crawl ()).start () # call the method each 60 sec
| StarcoderdataPython |
60768 | <gh_stars>0
from shapely.geometry import Polygon
from geopyspark.geopyspark_utils import ensure_pyspark
ensure_pyspark()
from geopyspark import get_spark_context, create_python_rdd
from geopyspark.geotrellis import Extent
from geopyspark.vector_pipe import Feature, Properties
from geopyspark.vector_pipe.features_collection import FeaturesCollection
from pyspark.sql import SparkSession
__all__ = ['from_orc', 'from_dataframe']
def from_orc(source, target_extent=None):
"""Reads in OSM data from an orc file that is located either locally or on S3. The
resulting data will be read in as an instance of :class:`~geopyspark.vector_pipe.features_collection.FeaturesCollection`.
Args:
source (str): The path or URI to the orc file to be read. Can either be a local file, or
a file on S3.
Note:
Reading a file from S3 requires additional setup depending on the environment
and how the file is being read.
The following describes the parameters that need to be set depending on
how the files are to be read in. However, **if reading a file on EMR, then
the access key and secret key do not need to be set**.
If using ``s3a://``, then the following ``SparkConf`` parameters need to be set:
- ``spark.hadoop.fs.s3a.impl``
- ``spark.hadoop.fs.s3a.access.key``
- ``spark.hadoop.fs.s3a.secret.key``
If using ``s3n://``, then the following ``SparkConf`` parameters need to be set:
- ``spark.hadoop.fs.s3n.access.key``
- ``spark.hadoop.fs.s3n.secret.key``
An alternative to passing in your S3 credentials to ``SparkConf`` would be
to export them as environment variables:
- ``AWS_ACCESS_KEY_ID=YOUR_KEY``
- ``AWS_SECRET_ACCESS_KEY_ID=YOUR_SECRET_KEY``
target_extent (:class:`~geopyspark.geotrellis.Extent` or ``shapely.geometry.Polygon``, optional): The
area of interest. Only features inside this ``Extent`` will be returned. Default is, ``None``. If
``None``, then all of the features will be returned.
Returns:
:class:`~geopyspark.vector_pipe.features_collection.FeaturesCollection`
"""
if target_extent:
if isinstance(target_extent, Polygon):
target_extent = Extent.from_polygon(target_extent)._asdict()
else:
target_extent = target_extent._asdict()
pysc = get_spark_context()
session = SparkSession.builder.config(conf=pysc.getConf()).enableHiveSupport().getOrCreate()
features = pysc._jvm.geopyspark.vectorpipe.io.OSMReader.fromORC(session._jsparkSession, source, target_extent)
return FeaturesCollection(features)
def from_dataframe(dataframe, target_extent=None):
"""Reads OSM data from a Spark ``DataFrame``. The resulting data will be read
in as an instance of :class:`~geopyspark.vector_pipe.features_collection.FeaturesCollection`.
Args:
dataframe (DataFrame): A Spark ``DataFrame`` that contains the OSM data.
target_extent (:class:`~geopyspark.geotrellis.Extent` or ``shapely.geometry.Polygon``, optional): The
area of interest. Only features inside this ``Extent`` will be returned. Default is, ``None``. If
``None``, then all of the features will be returned.
Returns:
:class:`~geopyspark.vector_pipe.features_collection.FeaturesCollection`
"""
if target_extent:
if isinstance(target_extent, Polygon):
target_extent = Extent.from_polygon(target_extent)._asdict()
else:
target_extent = target_extent._asdict()
pysc = get_spark_context()
features = pysc._jvm.geopyspark.vectorpipe.io.OSMReader.fromDataFrame(dataframe._jdf, target_extent)
return FeaturesCollection(features)
| StarcoderdataPython |
3200933 | import sys
sys.path.insert(0, '..')
from mayavi import mlab
import numpy as np
from demo import load_image
import lulu
import lulu.connected_region_handler as crh
img = load_image('chelsea_small.jpg')
print("Decomposing a %s image." % str(img.shape))
regions = lulu.decompose(img)
value_maxes = []
height = 0
for area in sorted(regions.keys()):
pulses = regions[area]
if len(pulses) == 0 or area < 280 or area > 300:
continue
values = [crh.get_value(p) for p in pulses]
height_diff = max(values) - min(values)
value_maxes.append(height_diff)
centre = height + height_diff / 2.0
pulse_values = np.zeros_like(img)
for p in pulses:
crh.set_array(pulse_values, p, crh.get_value(p))
y, x = np.where(pulse_values)
s = pulse_values[y, x]
mlab.barchart(x, y, [height + centre] * len(s), s,
opacity=1.0, scale_factor=1.5)
height += height_diff + 0.5
scene = mlab.get_engine().scenes[0]
scene.scene.parallel_projection = True
mlab.show()
| StarcoderdataPython |
1678097 | <gh_stars>1-10
"""
Like described in the :mod:`jedi.evaluate.parsing_representation` module,
there's a need for an ast like module to represent the states of parsed
modules.
But now there are also structures in Python that need a little bit more than
that. An ``Instance`` for example is only a ``Class`` before it is
instantiated. This class represents these cases.
So, why is there also a ``Class`` class here? Well, there are decorators and
they change classes in Python 3.
"""
import copy
from jedi._compatibility import use_metaclass, unicode
from jedi.parser import representation as pr
from jedi.parser.tokenize import Token
from jedi import debug
from jedi import common
from jedi.evaluate.cache import memoize_default, CachedMetaClass
from jedi.evaluate import compiled
from jedi.evaluate import recursion
from jedi.evaluate import iterable
from jedi.evaluate import docstrings
from jedi.evaluate import helpers
from jedi.evaluate import param
class Executable(pr.IsScope):
"""
An instance is also an executable - because __init__ is called
:param var_args: The param input array, consist of `pr.Array` or list.
"""
def __init__(self, evaluator, base, var_args=()):
self._evaluator = evaluator
self.base = base
self.var_args = var_args
def get_parent_until(self, *args, **kwargs):
return self.base.get_parent_until(*args, **kwargs)
@common.safe_property
def parent(self):
return self.base.parent
class Instance(use_metaclass(CachedMetaClass, Executable)):
"""
This class is used to evaluate instances.
"""
def __init__(self, evaluator, base, var_args=()):
super(Instance, self).__init__(evaluator, base, var_args)
if str(base.name) in ['list', 'set'] \
and compiled.builtin == base.get_parent_until():
# compare the module path with the builtin name.
self.var_args = iterable.check_array_instances(evaluator, self)
else:
# need to execute the __init__ function, because the dynamic param
# searching needs it.
with common.ignored(KeyError):
self.execute_subscope_by_name('__init__', self.var_args)
# Generated instances are classes that are just generated by self
# (No var_args) used.
self.is_generated = False
@memoize_default()
def _get_method_execution(self, func):
func = InstanceElement(self._evaluator, self, func, True)
return FunctionExecution(self._evaluator, func, self.var_args)
def _get_func_self_name(self, func):
"""
Returns the name of the first param in a class method (which is
normally self.
"""
try:
return str(func.params[0].get_name())
except IndexError:
return None
@memoize_default([])
def get_self_attributes(self):
def add_self_dot_name(name):
"""
Need to copy and rewrite the name, because names are now
``instance_usage.variable`` instead of ``self.variable``.
"""
n = copy.copy(name)
n.names = n.names[1:]
n._get_code = unicode(n.names[-1])
names.append(InstanceElement(self._evaluator, self, n))
names = []
# This loop adds the names of the self object, copies them and removes
# the self.
for sub in self.base.subscopes:
if isinstance(sub, pr.Class):
continue
# Get the self name, if there's one.
self_name = self._get_func_self_name(sub)
if not self_name:
continue
if sub.name.get_code() == '__init__':
# ``__init__`` is special because the params need are injected
# this way. Therefore an execution is necessary.
if not sub.decorators:
# __init__ decorators should generally just be ignored,
# because to follow them and their self variables is too
# complicated.
sub = self._get_method_execution(sub)
for n in sub.get_defined_names():
# Only names with the selfname are being added.
# It is also important, that they have a len() of 2,
# because otherwise, they are just something else
if unicode(n.names[0]) == self_name and len(n.names) == 2:
add_self_dot_name(n)
if not isinstance(self.base, compiled.CompiledObject):
for s in self.base.get_super_classes():
for inst in self._evaluator.execute(s):
names += inst.get_self_attributes()
return names
def get_subscope_by_name(self, name):
sub = self.base.get_subscope_by_name(name)
return InstanceElement(self._evaluator, self, sub, True)
def execute_subscope_by_name(self, name, args=()):
method = self.get_subscope_by_name(name)
return self._evaluator.execute(method, args)
def get_descriptor_return(self, obj):
""" Throws a KeyError if there's no method. """
# Arguments in __get__ descriptors are obj, class.
# `method` is the new parent of the array, don't know if that's good.
args = [obj, obj.base] if isinstance(obj, Instance) else [None, obj]
return self.execute_subscope_by_name('__get__', args)
@memoize_default([])
def get_defined_names(self):
"""
Get the instance vars of a class. This includes the vars of all
classes
"""
names = self.get_self_attributes()
for var in self.base.instance_names():
names.append(InstanceElement(self._evaluator, self, var, True))
return names
def scope_generator(self):
"""
An Instance has two scopes: The scope with self names and the class
scope. Instance variables have priority over the class scope.
"""
yield self, self.get_self_attributes()
names = []
for var in self.base.instance_names():
names.append(InstanceElement(self._evaluator, self, var, True))
yield self, names
def is_callable(self):
try:
self.get_subscope_by_name('__call__')
return True
except KeyError:
return False
def get_index_types(self, indexes=[]):
if any([isinstance(i, iterable.Slice) for i in indexes]):
# Slice support in Jedi is very marginal, at the moment, so just
# ignore them in case of __getitem__.
# TODO support slices in a more general way.
indexes = []
try:
return self.execute_subscope_by_name('__getitem__', indexes)
except KeyError:
debug.warning('No __getitem__, cannot access the array.')
return []
def __getattr__(self, name):
if name not in ['start_pos', 'end_pos', 'name', 'get_imports',
'doc', 'raw_doc', 'asserts']:
raise AttributeError("Instance %s: Don't touch this (%s)!"
% (self, name))
return getattr(self.base, name)
def __repr__(self):
return "<e%s of %s (var_args: %s)>" % \
(type(self).__name__, self.base, len(self.var_args or []))
class InstanceElement(use_metaclass(CachedMetaClass, pr.Base)):
"""
InstanceElement is a wrapper for any object, that is used as an instance
variable (e.g. self.variable or class methods).
"""
def __init__(self, evaluator, instance, var, is_class_var=False):
self._evaluator = evaluator
if isinstance(var, pr.Function):
var = Function(evaluator, var)
elif isinstance(var, pr.Class):
var = Class(evaluator, var)
self.instance = instance
self.var = var
self.is_class_var = is_class_var
@common.safe_property
@memoize_default()
def parent(self):
par = self.var.parent
if isinstance(par, Class) and par == self.instance.base \
or isinstance(par, pr.Class) \
and par == self.instance.base.base:
par = self.instance
elif not isinstance(par, (pr.Module, compiled.CompiledObject)):
par = InstanceElement(self.instance._evaluator, self.instance, par, self.is_class_var)
return par
def get_parent_until(self, *args, **kwargs):
return pr.Simple.get_parent_until(self, *args, **kwargs)
def get_decorated_func(self):
""" Needed because the InstanceElement should not be stripped """
func = self.var.get_decorated_func()
func = InstanceElement(self._evaluator, self.instance, func)
return func
def expression_list(self):
# Copy and modify the array.
return [InstanceElement(self.instance._evaluator, self.instance, command, self.is_class_var)
if not isinstance(command, (pr.Operator, Token)) else command
for command in self.var.expression_list()]
def __iter__(self):
for el in self.var.__iter__():
yield InstanceElement(self.instance._evaluator, self.instance, el, self.is_class_var)
def __getattr__(self, name):
return getattr(self.var, name)
def isinstance(self, *cls):
return isinstance(self.var, cls)
def is_callable(self):
return self.var.is_callable()
def __repr__(self):
return "<%s of %s>" % (type(self).__name__, self.var)
class Class(use_metaclass(CachedMetaClass, pr.IsScope)):
"""
This class is not only important to extend `pr.Class`, it is also a
important for descriptors (if the descriptor methods are evaluated or not).
"""
def __init__(self, evaluator, base):
self._evaluator = evaluator
self.base = base
@memoize_default(default=())
def get_super_classes(self):
supers = []
# TODO care for mro stuff (multiple super classes).
for s in self.base.supers:
# Super classes are statements.
for cls in self._evaluator.eval_statement(s):
if not isinstance(cls, (Class, compiled.CompiledObject)):
debug.warning('Received non class as a super class.')
continue # Just ignore other stuff (user input error).
supers.append(cls)
if not supers and self.base.parent != compiled.builtin:
# add `object` to classes
supers += self._evaluator.find_types(compiled.builtin, 'object')
return supers
@memoize_default(default=())
def instance_names(self):
def in_iterable(name, iterable):
""" checks if the name is in the variable 'iterable'. """
for i in iterable:
# Only the last name is important, because these names have a
# maximal length of 2, with the first one being `self`.
if unicode(i.names[-1]) == unicode(name.names[-1]):
return True
return False
result = self.base.get_defined_names()
super_result = []
# TODO mro!
for cls in self.get_super_classes():
# Get the inherited names.
if isinstance(cls, compiled.CompiledObject):
super_result += cls.get_defined_names()
else:
for i in cls.instance_names():
if not in_iterable(i, result):
super_result.append(i)
result += super_result
return result
@memoize_default(default=())
def get_defined_names(self):
result = self.instance_names()
type_cls = self._evaluator.find_types(compiled.builtin, 'type')[0]
return result + list(type_cls.get_defined_names())
def get_subscope_by_name(self, name):
for sub in reversed(self.subscopes):
if sub.name.get_code() == name:
return sub
raise KeyError("Couldn't find subscope.")
def is_callable(self):
return True
@common.safe_property
def name(self):
return self.base.name
def __getattr__(self, name):
if name not in ['start_pos', 'end_pos', 'parent', 'asserts', 'raw_doc',
'doc', 'get_imports', 'get_parent_until', 'get_code',
'subscopes']:
raise AttributeError("Don't touch this: %s of %s !" % (name, self))
return getattr(self.base, name)
def __repr__(self):
return "<e%s of %s>" % (type(self).__name__, self.base)
class Function(use_metaclass(CachedMetaClass, pr.IsScope)):
"""
Needed because of decorators. Decorators are evaluated here.
"""
def __init__(self, evaluator, func, is_decorated=False):
""" This should not be called directly """
self._evaluator = evaluator
self.base_func = func
self.is_decorated = is_decorated
@memoize_default()
def _decorated_func(self):
"""
Returns the function, that is to be executed in the end.
This is also the places where the decorators are processed.
"""
f = self.base_func
# Only enter it, if has not already been processed.
if not self.is_decorated:
for dec in reversed(self.base_func.decorators):
debug.dbg('decorator: %s %s', dec, f)
dec_results = self._evaluator.eval_statement(dec)
if not len(dec_results):
debug.warning('decorator not found: %s on %s', dec, self.base_func)
return None
decorator = dec_results.pop()
if dec_results:
debug.warning('multiple decorators found %s %s',
self.base_func, dec_results)
# Create param array.
old_func = Function(self._evaluator, f, is_decorated=True)
wrappers = self._evaluator.execute(decorator, (old_func,))
if not len(wrappers):
debug.warning('no wrappers found %s', self.base_func)
return None
if len(wrappers) > 1:
# TODO resolve issue with multiple wrappers -> multiple types
debug.warning('multiple wrappers found %s %s',
self.base_func, wrappers)
f = wrappers[0]
debug.dbg('decorator end %s', f)
if isinstance(f, pr.Function):
f = Function(self._evaluator, f, True)
return f
def get_decorated_func(self):
"""
This function exists for the sole purpose of returning itself if the
decorator doesn't turn out to "work".
We just ignore the decorator here, because sometimes decorators are
just really complicated and Jedi cannot understand them.
"""
return self._decorated_func() \
or Function(self._evaluator, self.base_func, True)
def get_magic_function_names(self):
return compiled.magic_function_class.get_defined_names()
def get_magic_function_scope(self):
return compiled.magic_function_class
def is_callable(self):
return True
def __getattr__(self, name):
return getattr(self.base_func, name)
def __repr__(self):
decorated_func = self._decorated_func()
dec = ''
if decorated_func is not None and decorated_func != self:
dec = " is " + repr(decorated_func)
return "<e%s of %s%s>" % (type(self).__name__, self.base_func, dec)
class FunctionExecution(Executable):
"""
This class is used to evaluate functions and their returns.
This is the most complicated class, because it contains the logic to
transfer parameters. It is even more complicated, because there may be
multiple calls to functions and recursion has to be avoided. But this is
responsibility of the decorators.
"""
@memoize_default(default=())
@recursion.execution_recursion_decorator
def get_return_types(self, evaluate_generator=False):
func = self.base
# Feed the listeners, with the params.
for listener in func.listeners:
listener.execute(self._get_params())
if func.is_generator and not evaluate_generator:
return [iterable.Generator(self._evaluator, func, self.var_args)]
else:
stmts = docstrings.find_return_types(self._evaluator, func)
for r in self.returns:
if r is not None:
stmts += self._evaluator.eval_statement(r)
return stmts
@memoize_default(default=())
def _get_params(self):
"""
This returns the params for an TODO and is injected as a
'hack' into the pr.Function class.
This needs to be here, because Instance can have __init__ functions,
which act the same way as normal functions.
"""
return param.get_params(self._evaluator, self.base, self.var_args)
def get_defined_names(self):
"""
Call the default method with the own instance (self implements all
the necessary functions). Add also the params.
"""
return self._get_params() + pr.Scope.get_defined_names(self)
def _copy_properties(self, prop):
"""
Literally copies a property of a Function. Copying is very expensive,
because it is something like `copy.deepcopy`. However, these copied
objects can be used for the executions, as if they were in the
execution.
"""
# Copy all these lists into this local function.
attr = getattr(self.base, prop)
objects = []
for element in attr:
if element is None:
copied = element
else:
copied = helpers.fast_parent_copy(element)
copied.parent = self._scope_copy(copied.parent)
if isinstance(copied, pr.Function):
copied = Function(self._evaluator, copied)
objects.append(copied)
return objects
def __getattr__(self, name):
if name not in ['start_pos', 'end_pos', 'imports', '_sub_module']:
raise AttributeError('Tried to access %s: %s. Why?' % (name, self))
return getattr(self.base, name)
@memoize_default()
def _scope_copy(self, scope):
""" Copies a scope (e.g. if) in an execution """
# TODO method uses different scopes than the subscopes property.
# just check the start_pos, sometimes it's difficult with closures
# to compare the scopes directly.
if scope.start_pos == self.start_pos:
return self
else:
copied = helpers.fast_parent_copy(scope)
copied.parent = self._scope_copy(copied.parent)
return copied
@common.safe_property
@memoize_default([])
def returns(self):
return self._copy_properties('returns')
@common.safe_property
@memoize_default([])
def asserts(self):
return self._copy_properties('asserts')
@common.safe_property
@memoize_default([])
def statements(self):
return self._copy_properties('statements')
@common.safe_property
@memoize_default([])
def subscopes(self):
return self._copy_properties('subscopes')
def get_statement_for_position(self, pos):
return pr.Scope.get_statement_for_position(self, pos)
def __repr__(self):
return "<%s of %s>" % (type(self).__name__, self.base)
| StarcoderdataPython |
1623295 | # Generated by Django 3.2 on 2021-05-05 21:14
import django.db.models.deletion
import django.utils.timezone
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("api", "0009_apikey_user"),
("core", "0116_location_extra_fields"),
]
operations = [
migrations.CreateModel(
name="SourceLocationMatchHistory",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_at", models.DateTimeField(default=django.utils.timezone.now)),
(
"api_key",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="source_location_match_history",
to="api.apikey",
),
),
(
"new_match_location",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="source_location_match_history",
to="core.location",
),
),
(
"old_match_location",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="+",
to="core.location",
),
),
(
"reporter",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="source_location_match_history",
to="core.reporter",
),
),
(
"source_location",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="source_location_match_history",
to="core.sourcelocation",
),
),
],
options={
"db_table": "source_location_match_history",
},
),
]
| StarcoderdataPython |
1659221 | <filename>SuperNewsCrawlSpider/SuperNewsCrawlSpider/tools/get_new_time.py
# encoding: utf-8
import time
import datetime
class GetTime(object):
def __init__(self):
pass
# 获取当前时间
def get_time(self):
return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
# 获取当前时间的前一个小时时间
def get_first_hour(self):
tim = datetime.datetime.now()
# datetime.timedelta(days=0, seconds=0, microseconds=0)#毫秒, milliseconds=0, minutes=0, hours=0, weeks=0)
first_hour = (tim + datetime.timedelta(hours=-1)).strftime("%Y-%m-%d %H")
return first_hour
# 获取当前时间的前一天的时间
def get_yesterday_date(self):
tim = datetime.datetime.now()
yesterday_date = (tim + datetime.timedelta(days=-1)).strftime("%Y-%m-%d")
return yesterday_date
if __name__ == '__main__':
pass | StarcoderdataPython |
3294969 | <reponame>kundajelab/tronn
# generate all results in this file
import os
import glob
import gzip
import networkx as nx
import pandas as pd
def get_bed_from_nx_graph(graph, bed_file, interval_key="active", merge=True):
"""get BED file from nx examples
"""
examples = list(graph.graph["examples"])
with open(bed_file, "w") as fp:
for region_metadata in examples:
interval_types = region_metadata.split(";")
interval_types = dict([
interval_type.split("=")[0:2]
for interval_type in interval_types])
interval_string = interval_types[interval_key]
chrom = interval_string.split(":")[0]
start = interval_string.split(":")[1].split("-")[0]
stop = interval_string.split("-")[1]
fp.write("{}\t{}\t{}\n".format(chrom, start, stop))
if merge:
tmp_bed_file = "{}.tmp.bed".format(bed_file.split(".bed")[0])
os.system("mv {} {}".format(bed_file, tmp_bed_file))
os.system("cat {} | sort -k1,1 -k2,2n | bedtools merge -i stdin | gzip -c > {}".format(
tmp_bed_file, bed_file))
os.system("rm {}".format(tmp_bed_file))
return None
def setup_ldsc_annotations(bed_file, bim_prefix, hapmap_prefix, out_dir):
"""set up annotations
"""
chroms = range(1,23)
os.system("mkdir -p {}".format(out_dir))
prefix = os.path.basename(bed_file).split(".bed")[0]
for chrom in chroms:
# make annot file
make_annot = (
"python ~/git/ldsc/make_annot.py "
"--bed-file {0} "
"--bimfile {2}.{3}.bim "
"--annot-file {4}/{1}.{3}.annot.gz").format(
bed_file, prefix, bim_prefix, chrom, out_dir)
print make_annot
os.system(make_annot)
# compute LD scores with annot file
compute_ld = (
"python ~/git/ldsc/ldsc.py "
"--l2 "
"--bfile {1}.{2} "
"--ld-wind-cm 1 "
"--annot {4}/{0}.{2}.annot.gz "
"--thin-annot --out {4}/{0}.{2} "
"--print-snps {3}.{2}.snp").format(
prefix, bim_prefix, chrom, hapmap_prefix, out_dir)
print compute_ld
os.system(compute_ld)
return
def setup_sumstats_file(sumstats_file, merge_alleles_file, out_file, other_params=""):
"""setup summary states file
"""
munge_cmd = (
"python ~/git/ldsc/munge_sumstats.py "
"--sumstats {} "
"--merge-alleles {} "
"--out {} "
"{} ").format(
sumstats_file,
merge_alleles_file,
out_file.split(".sumstats")[0],
other_params)
print munge_cmd
os.system(munge_cmd)
return None
def get_sig_snps(sumstats_file, variants_file, out_file):
"""get genome-wide sig snps, backtracking from log
"""
# prefix
phenotype = os.path.basename(sumstats_file).split(".sumstats")[0]
# read sumstats log file to get num sig snps
sumstats_log = "{}.log".format(sumstats_file.split(".sumstats")[0])
with open(sumstats_log, "r") as fp:
for line in fp:
if "Genome-wide significant SNPs" in line:
num_sig = int(line.strip().split()[0])
if num_sig != 0:
# access sumstats file and sort, only keep top k (to match num sig)
sumstats = pd.read_csv(sumstats_file, sep="\t")
sumstats = sumstats.dropna()
sumstats = sumstats[sumstats["Z"] != 0]
sumstats["Z_abs"] = sumstats["Z"].abs()
sumstats = sumstats.sort_values("Z_abs", ascending=False)
sumstats = sumstats.iloc[:num_sig,:]
sumstats = sumstats.set_index("SNP")
# now access variants file to associate snps properly
seen_rsids = []
sumstats["chr"] = 1
sumstats["start"] = 1
line_num = 0
with gzip.open(variants_file, "r") as fp:
for line in fp:
fields = line.strip().split()
if fields[5] in sumstats.index.values:
sumstats.loc[fields[5], "chr"] = "chr{}".format(fields[1])
sumstats.loc[fields[5], "start"] = int(fields[2])
seen_rsids.append(fields[5])
if len(seen_rsids) == sumstats.shape[0]:
break
# reassurance we're progressing
line_num += 1
if line_num % 1000000 == 0:
print line_num
sumstats = sumstats.reset_index()
sumstats["stop"] = sumstats["start"] + 1
# build the BED file
bed_data = sumstats[["chr", "start", "stop", "SNP"]]
bed_data["SNP"] = bed_data["SNP"].astype(str) + ";{}".format(phenotype)
bed_data.to_csv(out_file, sep="\t", header=False, index=False, compression="gzip")
return num_sig
def main():
"""run all analyses for GGR GWAS variants
"""
# get baseline ldsc model
if not os.path.isdir("1000G_EUR_Phase3_baseline"):
get_baseline_model = "wget https://data.broadinstitute.org/alkesgroup/LDSCORE/1000G_Phase3_baseline_ldscores.tgz"
setup_baseline_model = "tar -xvzf 1000G_Phase3_baseline_ldscores.tgz"
os.system(get_baseline_model)
os.system(setup_baseline_model)
baseline_model_prefix = "1000G_EUR_Phase3_baseline/baseline."
# get model weights
if not os.path.isdir("weights_hm3_no_hla"):
get_weights = "wget https://data.broadinstitute.org/alkesgroup/LDSCORE/weights_hm3_no_hla.tgz"
setup_weights = "tar -xvzf weights_hm3_no_hla.tgz"
os.system(get_weights)
os.system(setup_weights)
weights_prefix = "weights_hm3_no_hla/weights."
# get plink files (for setting up annotations from BED)
if not os.path.isdir("1000G_EUR_Phase3_plink"):
get_plink = "wget https://data.broadinstitute.org/alkesgroup/LDSCORE/1000G_Phase3_plinkfiles.tgz"
setup_plink = "tar -xzvf 1000G_Phase3_plinkfiles.tgz"
os.system(get_plink)
os.system(setup_plink)
bim_prefix = "1000G_EUR_Phase3_plink/1000G.EUR.QC"
# get hapmap
if not os.path.isdir("hapmap3_snps"):
get_hapmap = "wget https://data.broadinstitute.org/alkesgroup/LDSCORE/hapmap3_snps.tgz"
setup_hapmap = "tar -xzvf hapmap3_snps.tgz"
os.system(get_hapmap)
os.system(setup_hapmap)
hapmap_prefix = "hapmap3_snps/hm"
# get snp list
hapmap_snps_file = "w_hm3.snplist"
if not os.path.isfile(hapmap_snps_file):
get_snps = "wget https://data.broadinstitute.org/alkesgroup/LDSCORE/w_hm3.snplist.bz2"
setup_snps = "bunzip2 w_hm3.snplist.bz2"
os.system(get_snps)
os.system(setup_snps)
# ldsc annot dir
ldsc_annot_dir = "./annot.custom"
os.system("mkdir -p {}".format(ldsc_annot_dir))
# ldsc file table
ldsc_table_file = "./annot.table.TMP"
# get an unrelated cell type - Liver
HEPG2_DIR = "/mnt/data/integrative/dnase/ENCSR000ENP.HepG2_Hepatocellular_Carcinoma_Cell_Line.UW_Stam.DNase-seq/out_50m/peak/idr/pseudo_reps/rep1"
hepg2_bed_file = "{}/ENCSR000ENP.HepG2_Hepatocellular_Carcinoma_Cell_Line.UW_Stam.DNase-seq_rep1-pr.IDR0.1.filt.narrowPeak.gz".format(
HEPG2_DIR)
#prefix = "HepG2"
prefix = os.path.basename(hepg2_bed_file)
ldscore_file = "{}/{}.22.l2.ldscore.gz".format(
ldsc_annot_dir, prefix)
if not os.path.isfile(ldscore_file):
setup_ldsc_annotations(
hepg2_bed_file, bim_prefix, hapmap_prefix, ldsc_annot_dir)
with open(ldsc_table_file, "w") as fp:
fp.write("HepG2\t{}/{}.\n".format(
ldsc_annot_dir, prefix))
# get ATAC all
GGR_DIR = "/mnt/lab_data/kundaje/users/dskim89/ggr/integrative/v1.0.0a"
ggr_master_bed_file = "{}/data/ggr.atac.idr.master.bed.gz".format(GGR_DIR)
prefix = os.path.basename(ggr_master_bed_file).split(".bed")[0]
ldscore_file = "{}/{}.22.l2.ldscore.gz".format(
ldsc_annot_dir, prefix)
if not os.path.isfile(ldscore_file):
setup_ldsc_annotations(
ggr_master_bed_file, bim_prefix, hapmap_prefix, ldsc_annot_dir)
with open(ldsc_table_file, "a") as fp:
fp.write("GGR_ALL\t{}/{}.\n".format(
ldsc_annot_dir, prefix))
# get ATAC timepoints
timepoint_dir = "{}/results/atac/peaks.timepoints".format(GGR_DIR)
timepoint_bed_files = sorted(glob.glob("{}/*narrowPeak.gz".format(timepoint_dir)))
for timepoint_bed_file in timepoint_bed_files:
prefix = os.path.basename(timepoint_bed_file).split(".bed")[0]
ldscore_file = "{}/{}.22.l2.ldscore.gz".format(
ldsc_annot_dir, prefix)
if not os.path.isfile(ldscore_file):
setup_ldsc_annotations(
timepoint_bed_file, bim_prefix, hapmap_prefix, ldsc_annot_dir)
with open(ldsc_table_file, "a") as fp:
fp.write("{1}\t{0}/{1}.\n".format(
ldsc_annot_dir, prefix))
# get ATAC traj files
traj_dir = "{}/results/atac/timeseries/dp_gp/reproducible/hard/reordered/bed".format(GGR_DIR)
traj_bed_files = sorted(glob.glob("{}/*bed.gz".format(traj_dir)))
for traj_bed_file in traj_bed_files:
prefix = os.path.basename(traj_bed_file).split(".bed")[0]
ldscore_file = "{}/{}.22.l2.ldscore.gz".format(
ldsc_annot_dir, prefix)
if not os.path.isfile(ldscore_file):
setup_ldsc_annotations(
traj_bed_file, bim_prefix, hapmap_prefix, ldsc_annot_dir)
with open(ldsc_table_file, "a") as fp:
fp.write("{1}\t{0}/{1}.\n".format(
ldsc_annot_dir, prefix))
# grammar dir
grammar_dir = "./rules"
if not os.path.isdir(grammar_dir):
os.system("mkdir -p {}".format(grammar_dir))
if False:
# validated rules
rule_summary_file = "/mnt/lab_data/kundaje/users/dskim89/ggr/validation/mpra.2019-10-22.results/results/combinatorial_rules/summary.txt.gz"
rules_summary = pd.read_csv(rule_summary_file, sep="\t")
# get BED files from validated rules and make annotations
rule_dir = "/mnt/lab_data3/dskim89/ggr/nn/2019-03-12.freeze/dmim.shuffle/grammars.annotated.manual_filt.merged.final"
for rule_idx in range(rules_summary.shape[0]):
print rule_idx
# get rule examples
rule_name = rules_summary.iloc[rule_idx]["grammar"]
rule_file = "{}/{}.gml".format(
rule_dir, rule_name)
rule = nx.read_gml(rule_file)
rule.graph["examples"] = rule.graph["examples"].split(",")
# make bed file
bed_file = "{}/{}.bed.gz".format(
grammar_dir,
os.path.basename(rule_file).split(".gml")[0])
if not os.path.isfile(bed_file):
get_bed_from_nx_graph(rule, bed_file)
# then make annotations
prefix = os.path.basename(bed_file).split(".bed")[0]
ldscore_file = "{}/{}.22.l2.ldscore.gz".format(
ldsc_annot_dir, prefix)
if not os.path.isfile(ldscore_file):
setup_ldsc_annotations(
bed_file, bim_prefix, hapmap_prefix, ldsc_annot_dir)
with open(ldsc_table_file, "a") as fp:
fp.write("{1}\t{0}/{1}.\n".format(
ldsc_annot_dir, prefix))
# pull relevant GWAS summary stats (plus UKBB), configure, and run
sumstats_dir = "./sumstats"
os.system("mkdir -p {}".format(sumstats_dir))
sumstats_orig_dir = "{}/orig".format(sumstats_dir)
os.system("mkdir -p {}".format(sumstats_orig_dir))
# also set up results dir
results_dir = "./results.TMP"
os.system("mkdir -p {}".format(results_dir))
# ukbb standardized, can do all in one go
ukbb_manifest_file = "./ukbb/ukbb.gwas_imputed_3.release_20180731.tsv"
ukbb_manifest = pd.read_csv(ukbb_manifest_file, sep="\t")
# get the variant annotation file
ukbb_annot_file = "./ukbb/variants.tsv.bgz"
get_variant_annotation_file = "wget https://www.dropbox.com/s/puxks683vb0omeg/variants.tsv.bgz?dl=0 -O {}".format(
ukbb_annot_file)
if not os.path.isfile(ukbb_annot_file):
os.system(get_variant_annotation_file)
# GGR relevant codes
ukbb_codes = [
#"20001_1003", # skin cancer
#"20001_1060", # skin cancer
"20001_1061", # BCC
"20001_1062", # SCC
#"20002_1371", # sarcoidosis (self report)
#"22133", # sarcoidosis (doctor dx)
#"D86", # sarcoidosis ICD
#"20002_1381", # lupus
##"20002_1382", # sjogrens
#"20002_1384", # scleroderma
"20002_1452", # eczema/dermatitis
"20002_1453", # psoriasis (self report)
"L12_PSORI_NAS", # psoriasis
"L12_PSORIASIS", # psoriasis
"L40", # psoriasis ICD
"20002_1454", # blistering
"20002_1455", # skin ulcers
#"20002_1625", # cellulitis
"20002_1660", # rosacea
"L12_ROSACEA", # rosacea
"L71", # rosacea
#"20002_1661", # vitiligo
"B07", # viral warts
#"C_SKIN",
"C_OTHER_SKIN", # neoplasm of skin
#"C3_SKIN",
"C3_OTHER_SKIN", # neoplasm of skin
"C44", # cancer ICD
"D04", # carcinoma in situ of skin
"D23", # benign neoplasms
"L12_ACTINKERA", # actinic keratosis
"L12_ATROPHICSKIN", # atrophic skin
"L90", # atrophic skin ICD
"L30", # other dermatitis
"L12_EPIDERMALTHICKOTH", # epidermal thickening
"L12_EPIDERMTHICKNAS", # epidermal thickening
"L85", # epidermal thickening
#"L12_GRANULOMASKINNAS", # granulomatous
#"L12_GRANULOMATOUSSKIN", # granulomatous
#"L92", # granulomatous
"L91", # hypertrophic disorders
"L12_HYPERTROPHICNAS",
"L12_HYPERTROPHICSKIN",
"L12_HYPETROPHICSCAR",
"L82", # seborrhoeic keratosis
"XII_SKIN_SUBCUTAN", # skin
"L12_SCARCONDITIONS", # scarring
"L12_SKINSUBCUTISNAS", # other
"20002_1548", # acne
#"20002_1549", # lichen planus
#"20002_1550", # lichen sclerosis
#"L12_NONIONRADISKIN", # skin changes from nonionizing radiation
#"L57", # skin changes from nonionizing radiation ICD
"L12_OTHERDISSKINANDSUBCUTIS", # other
"L98", # other
]
# reduced set of interest
ukbb_codes_REDUCED = [
"20002_1452", # eczema/dermatitis
"20002_1453", # psoriasis (self report)
"20002_1660", # rosacea
"L12_ROSACEA", # rosacea
"L12_ACTINKERA", # actinic keratosis
"L82", # seborrhoeic keratosis
"20002_1548", # acne
]
# for each, download and process
num_sig_total = 0
for ukbb_code in ukbb_codes:
id_metadata = ukbb_manifest[ukbb_manifest["Phenotype Code"] == ukbb_code]
if id_metadata.shape[0] > 1:
id_metadata = id_metadata[id_metadata["Sex"] == "both_sexes"]
# download file
filename = "{}/{}".format(sumstats_orig_dir, id_metadata["File"].iloc[0])
if not os.path.isfile(filename):
download_cmd = id_metadata["wget command"].iloc[0].split("-O")[0]
download_cmd = "{} -O {}".format(download_cmd, filename)
print download_cmd
os.system(download_cmd)
# paste with annot file
w_annot_file = "{}.annot.tsv.gz".format(filename.split(".tsv")[0])
if not os.path.isfile(w_annot_file):
paste_cmd = (
"paste <(zcat {}) <(zcat {}) | "
"cut -f 1-6,10,13,17,31,34,37 | "
"gzip -c > {}").format(
ukbb_annot_file, filename, w_annot_file)
print paste_cmd
os.system('GREPDB="{}"; /bin/bash -c "$GREPDB"'.format(paste_cmd))
# set up sumstats file
description = str(id_metadata["Phenotype Description"].iloc[0]).split(":")
if len(description) > 1:
description = description[1]
else:
description = description[0]
short_description = description.strip().replace(" ", "_").replace("/", "_").replace(",", "_").lower()
final_sumstats_file = "{}/ukbb.{}.{}.ldsc.sumstats.gz".format(
sumstats_dir,
ukbb_code,
short_description)
if not os.path.isfile(final_sumstats_file):
setup_sumstats_file(
w_annot_file,
hapmap_snps_file,
final_sumstats_file,
other_params="--N-col n_complete_samples --a1 ref --a2 alt --frq AF")
# TODO get sig snps in a BED file to compare to rules
sig_snps_file = "{}.sig.bed.gz".format(final_sumstats_file.split(".sumstats")[0])
if not os.path.isfile(sig_snps_file):
num_sig = get_sig_snps(final_sumstats_file, ukbb_annot_file, sig_snps_file)
num_sig_total += num_sig
if num_sig != 0:
print sig_snps_file
print num_sig
# run tests
out_prefix = "{}/{}".format(results_dir, os.path.basename(final_sumstats_file).split(".ldsc")[0])
run_ldsc = (
"python ~/git/ldsc/ldsc.py "
"--h2-cts {} "
"--ref-ld-chr {} "
"--out {} "
"--ref-ld-chr-cts {} "
"--w-ld-chr {}").format(
final_sumstats_file,
baseline_model_prefix,
out_prefix,
ldsc_table_file,
weights_prefix)
#print run_ldsc
#os.system(run_ldsc)
# dermatitis - genome-wide genotyping array, illumina
gwas_dermatitis_sumstats = "{}/gwas.GCST003184.dermatitis.ldsc.sumstats.gz".format(sumstats_dir)
if not os.path.isfile(gwas_dermatitis_sumstats):
file_url = "ftp://ftp.ebi.ac.uk/pub/databases/gwas/summary_statistics/PaternosterL_26482879_GCST003184/EAGLE_AD_no23andme_results_29072015.txt"
save_file = "{}/gwas.GCST003184.dermatitis.sumstats.gz".format(sumstats_orig_dir)
get_file = "wget -O - {} | gzip -c > {}".format(file_url, save_file)
os.system(get_file)
setup_sumstats_file(
save_file,
hapmap_snps_file,
gwas_dermatitis_sumstats,
other_params="--N-col AllEthnicities_N")
# get sig snps in a BED file to compare to rules
sig_snps_file = "{}.sig.bed.gz".format(gwas_dermatitis_sumstats.split(".sumstats")[0])
if not os.path.isfile(sig_snps_file):
num_sig = get_sig_snps(gwas_dermatitis_sumstats, ukbb_annot_file, sig_snps_file)
num_sig_total += num_sig
if num_sig != 0:
print sig_snps_file
print num_sig
# acne - confirmed genome-wide genotyping array, Affy
gwas_acne_sumstats = "{}/gwas.GCST006640.acne.ldsc.sumstats.gz".format(sumstats_dir)
if not os.path.isfile(gwas_acne_sumstats):
file_url = "ftp://ftp.ebi.ac.uk/pub/databases/gwas/summary_statistics/HirataT_29855537_GCST006640/Dataset_S7.txt"
save_file = "{}/gwas.GCST006640.acne.sumstats.gz".format(sumstats_orig_dir)
get_file = "wget -O - {} | gzip -c > {}".format(file_url, save_file)
os.system(get_file)
setup_sumstats_file(
save_file,
hapmap_snps_file,
gwas_acne_sumstats,
other_params="--N-cas 1115 --N-con 4619 --ignore regional.analysis")
# get sig snps in a BED file to compare to rules
sig_snps_file = "{}.sig.bed.gz".format(gwas_acne_sumstats.split(".sumstats")[0])
if not os.path.isfile(sig_snps_file):
num_sig = get_sig_snps(gwas_acne_sumstats, ukbb_annot_file, sig_snps_file)
num_sig_total += num_sig
if num_sig != 0:
print sig_snps_file
print num_sig
# get UKBB bmistats (from LDSC repo)
ukbb_bmi_sumstats = "{}/ukbb.alkesgroup.BMI.ldsc.sumstats.gz".format(sumstats_dir)
if not os.path.isfile(ukbb_bmi_sumstats):
file_url = "https://data.broadinstitute.org/alkesgroup/UKBB/body_BMIz.sumstats.gz"
#save_file = "{}/ukbb.ldsc_pheno.BMI.sumstats.gz".format(sumstats_orig_dir)
get_ukbb = "wget {} -O {}".format(
file_url,
ukbb_bmi_sumstats)
os.system(get_ukbb)
#setup_sumstats_file(save_file, hapmap_snps_file, ukbb_derm_sumstats)
# ==================================
# GLOBAL ANALYSIS WITH SIG SNPS
# ==================================
# first, look at sig SNPs within ATAC regions
# take all the sig BED files and merge
# then for each grammar bed file, run overlaps and collect results
quit()
# ========================
# NOT USED
# ========================
# get UKBB derm stats (from LDSC repo)
ukbb_derm_sumstats = "{}/ukbb.none.derm.ldsc.sumstats.gz".format(sumstats_dir)
if not os.path.isfile(ukbb_derm_sumstats):
file_url = "https://data.broadinstitute.org/alkesgroup/UKBB/disease_DERMATOLOGY.sumstats.gz"
save_file = "{}/ukbb.ldsc_pheno.dermatology.sumstats.gz".format(sumstats_orig_dir)
get_ukbb = "wget {} -O {}".format(
file_url,
save_file)
os.system(get_ukbb)
setup_sumstats_file(save_file, hapmap_snps_file, ukbb_derm_sumstats)
# alopecia - genome-wide genotyping array
gwas_alopecia_sumstats = "{}/gwas.GCST006661.alopecia.ldsc.sumstats.gz".format(sumstats_dir)
if not os.path.isfile(gwas_alopecia_sumstats):
file_url = "ftp://ftp.ebi.ac.uk/pub/databases/gwas/summary_statistics/HagenaarsSP_28196072_GCST006661/Hagenaars2017_UKB_MPB_summary_results.zip"
unzip_dir = "{}/gwas.GCST006661.alopecia".format(sumstats_orig_dir)
save_file = "{}/gwas.GCST006661.alopecia.zip".format(unzip_dir)
os.system("mkdir -p {}".format(unzip_dir))
get_file = "wget {} -O {}".format(file_url, save_file)
os.system("unzip {} -d {}".format(save_file, unzip_dir))
setup_sumstats_file(
"",
hapmap_snps_file,
gwas_alopecia_sumstats,
other_params="--N 52874 --snp Markername")
# lupus - genome-wide genotyping array, illumina
gwas_lupus_sumstats = "{}/gwas.GCST005831.lupus.ldsc.sumstats.gz".format(sumstats_dir)
if not os.path.isfile(gwas_lupus_sumstats):
file_url = "ftp://ftp.ebi.ac.uk/pub/databases/gwas/summary_statistics/JuliaA_29848360_GCST005831/GWAS_SLE_summaryStats/Meta_results.txt"
save_file = "{}/gwas.GCST005831.lupus.sumstats.gz".format(sumstats_orig_dir)
get_file = "wget -O - {} | gzip -c > {}".format(file_url, save_file)
setup_sumstats(
save_file,
hapmap_snps_file,
gwas_lupus_sumstats,
other_params="--N-cas 4943 --N-con 8483 --a1 A1lele1 --a2 Allele2")
# lupus - targeted array, ignore
gwas_lupus_sumstats = "{}.gwas.GCST007400.lupus.ldsc.sumstats.gz"
# lupus - genome-wide genotyping array, illumina
gwas_lupus_sumstats = "{}/gwas.GCST003156.lupus.ldsc.sumstats.gz".format(sumstats_dir)
if not os.path.isfile(gwas_lupus_sumstats):
file_url = "ftp://ftp.ebi.ac.uk/pub/databases/gwas/summary_statistics/BenthamJ_26502338_GCST003156/bentham_2015_26502338_sle_efo0002690_1_gwas.sumstats.tsv.gz"
save_file = "{}/gwas.GCST003156.lupus.sumstats.gz".format(sumstats_orig_dir)
get_file = "wget {} -O {}".format(file_url, save_file)
setup_sumstats(
save_file,
hapmap_snps_file,
gwas_lupus_sumstats,
other_params="--N 14267 --N-cas 5201 --N-con 9066 --ignore OR,OR_lower,OR_upper")
# psoriasis - targeted array, ignore
gwas_psoriasis_sumstats = "{}/gwas.GCST005527.psoriasis.ldsc.sumstats.gz".format(sumstats_dir)
# baldness - NOTE problem with pval column
gwas_baldness_sumstats = "{}/gwas.GCST007020.baldness.ldsc.sumstats.gz".format(sumstats_dir)
if False:
if not os.path.isfile(gwas_baldness_sumstats):
file_url = "ftp://ftp.ebi.ac.uk/pub/databases/gwas/summary_statistics/YapCX_30573740_GCST007020/mpb_bolt_lmm_aut_x.tab.zip"
unzip_dir = "{}/gwas.GCST007020.baldness".format(sumstats_orig_dir)
os.system("mkdir -p {}".format(unzip_dir))
save_file = "{}/gwas.GCST007020.baldness.zip".format(unzip_dir)
get_file = "wget {} -O {}".format(file_url, save_file)
os.system("unzip {} -d {}".format(save_file, unzip_dir))
save_file = "{}/mpb_bolt_lmm_aut_x.tab".format(unzip_dir)
setup_sumstats(save_file, gwas_baldness_sumstats, other_params="--N 205327 --p P_BOLT_LMM_INF --a1 ALLELE1 --a2 ALLELE0")
# solar lentigines - genome-wide genotyping array, Affy
gwas_lentigines_sumstats = "{}/gwas.GCST006096.lentigines.ldsc.sumstats.gz".format(sumstats_dir)
if not os.path.isfile(gwas_lentigines_sumstats):
file_url = "ftp://ftp.ebi.ac.uk/pub/databases/gwas/summary_statistics/EndoC_29895819_GCST006096/DatasetS1.txt"
save_file = "{}/gwas.GCST006096.lentigines.sumstats.gz".format(sumstats_orig_dir)
get_file = "wget {} -O - | gzip -c > {}".format(file_url, save_file)
os.system(get_file)
setup_sumstats(
save_file,
hapmap_snps_file,
gwas_lentigines_sumstats,
other_params="--N 11253 --N-cas 3815 --N-con 7438")
# hyperhidrosis - genome-wide genotyping array, Affy
gwas_hyperhidrosis_sumstats = "{}/gwas.GCST006090.hyperhidrosis.ldsc.sumstats.gz".format(sumstats_dir)
if not os.path.isfile(gwas_hyperhidrosis_sumstats):
file_url = "ftp://ftp.ebi.ac.uk/pub/databases/gwas/summary_statistics/EndoC_29895819_GCST006090/DatasetS7.txt"
save_file = "{}/gwas.GCST006090.hyperhidrosis.sumstats.gz".format(sumstats_orig_dir)
get_file = "wget {} -O - | gzip -c > {}".format(sumstats_orig_dir)
setup_sumstats(
save_file,
hapmap_snps_file,
gwas_hyperhidrosis_sumstats,
other_params="--N 4538 --N-cas 1245 --N-con 3293")
# hirsutism (1) GCST006095
gwas_hirsutism_sumstats = "{}/gwas.GCST006095.hirsutism.ldsc.sumstats.gz".format(sumstats_dir)
if not os.path.isfile(gwas_hirsutism_sumstats):
file_url = "ftp://ftp.ebi.ac.uk/pub/databases/gwas/summary_statistics/EndoC_29895819_GCST006095/DatasetS6.txt"
save_file = "{}/gwas.GCST006095.hirsutism.sumstats.gz".format(sumstats_orig_dir)
get_file = "wget {} -O - | gzip -c > {}".format(sumstats_orig_dir)
setup_sumstats(
save_file,
hapmap_snps_file,
gwas_hyperhidrosis_sumstats,
other_params="--N 11244 --N-cas 3830 --N-con 7414")
if False:
# sarcoidosis (1) GCST005540
gwas_sarcoidosis_sumstats = ""
# lofgren - NOTE some error in the harmonized file?
gwas_lofgrens_sumstats = "{}/gwas.GCST005540.lofgrens.ldsc.sumstats.gz".format(sumstats_orig_dir)
if not os.path.isfile(gwas_lofgrens_sumstats):
file_url = "ftp://ftp.ebi.ac.uk/pub/databases/gwas/summary_statistics/RiveraNV_26651848_GCST005540/harmonised/26651848-GCST005540-EFO_0009466.h.tsv.gz"
save_file = "{}/gwas.GCST005540.lofgrens.sumstats.gz".format(sumstats_orig_dir)
get_file = "wget {} -O {}".format(file_url, save_file)
setup_sumstats(save_file, gwas_lofgrens_sumstats, other_params="")
# vitiligo (4) GCST007112, GCST007111, GCST004785, GCST001509
gwas_vitiligo_sumstats = "{}/gwas.GCST007112.vitiligo.ldsc.sumstats.gz"
gwas_vitiligo_sumstats = "{}/gwas.GCST007111.vitiligo.ldsc.sumstats.gz"
gwas_vitiligo_sumstats = "{}/gwas.GCST004785.vitiligo.ldsc.sumstats.gz".format(sumstats_dir)
gwas_vitiligo_sumstats = "{}/gwas.GCST001509.vitiligo.ldsc.sumstats.gz".format(sumstats_dir)
# get UKBB derm stats (from LDSC repo)
ukbb_derm_sumstats = "{}/ukbb.none.derm.ldsc.sumstats.gz".format(sumstats_dir)
if not os.path.isfile(ukbb_derm_sumstats):
file_url = "https://data.broadinstitute.org/alkesgroup/UKBB/disease_DERMATOLOGY.sumstats.gz"
save_file = "{}/ukbb.ldsc_pheno.dermatology.sumstats.gz".format(sumstats_orig_dir)
get_ukbb = "wget {} -O {}".format(
file_url,
save_file)
os.system(get_ukbb)
setup_sumstats_file(save_file, hapmap_snps_file, ukbb_derm_sumstats)
return
main()
| StarcoderdataPython |
171636 | <gh_stars>10-100
# -*- coding: utf-8 -*-
from ._pw_input import PwInputFile
from ._cp_input import CpInputFile
__all__ = ('PwInputFile', 'CpInputFile')
| StarcoderdataPython |
1707178 | print(())
print((1,))
print((1,2,3))
print(tuple())
print(tuple((1,)))
print(tuple((1,2,3)))
print(tuple([1,2,3]))
| StarcoderdataPython |
3359513 | <gh_stars>10-100
#import ROOT,sys,time,os,signal
from larcv import larcv
import sys,time,os,signal
import numpy as np
class larcv_data (object):
_instance_m={}
@classmethod
def exist(cls,name):
name = str(name)
return name in cls._instance_m
def __init__(self):
self._proc = None
self._name = ''
self._verbose = False
self._read_start_time = None
self._read_end_time = None
self._cfg_file = None
self._batch = -1
self._data = None
self._data_size = None
self.time_data_read = 0
self.time_data_conv = 0
self.time_data_copy = 0
self.time_label_conv = 0
self.time_label_copy = 0
self.read_counter = 0
def reset(self):
if self.is_reading():
self.next()
if larcv.ThreadFillerFactory.exist_filler(self._name):
larcv.ThreadFillerFactory.destroy_filler(self._name)
def __del__(self):
self.reset()
def configure(self,cfg):
if self._name:
self.reset()
if not cfg['filler_name']:
sys.stderr.write('filler_name is empty!\n')
raise ValueError
if self.__class__.exist(cfg['filler_name']):
sys.stderr.write('filler_name %s already running!' % cfg['filler_name'])
return
self._name = cfg['filler_name']
self._cfg_file = cfg['filler_cfg']
if not self._cfg_file or not os.path.isfile(self._cfg_file):
sys.stderr.write('filler_cfg file does not exist: %s\n' % self._cfg_file)
raise ValueError
if 'verbosity' in cfg:
self._verbose = bool(cfg['verbosity'])
self.__class__._instance_m[self._name] = self
def read_next(self,batch):
batch = int(batch)
if not batch >0:
sys.stderr.write('Batch size must be positive integer!\n')
raise ValueError
if not self._name:
sys.stderr.write('Filler name unspecified!\n')
raise ValueError
if not self._proc:
self._proc = larcv.ThreadFillerFactory.get_filler(self._name)
self._proc.configure(self._cfg_file)
self._read_start_time = time.time()
self._batch = batch
self._proc.batch_process(batch)
def is_reading(self):
return (self._batch > 0)
def next(self):
if self._batch <= 0:
sys.stderr.write('Thread not running...\n')
raise Exception
sleep_ctr=0
while self._proc.thread_running():
time.sleep(0.005)
sleep_ctr+=1
if sleep_ctr%1000 ==0:
print 'queueing...'
print 'Data dim:',
#sleep_ctr=0
#while sleep_ctr<40:
# time.sleep(0.125)
# sleep_ctr += 1
# print self._proc.thread_running(),
#print
self._read_end_time = time.time()
if self.read_counter:
self.time_data_read += (self._read_end_time - self._read_start_time)
if self._verbose:
print
print 'Data size:',self._proc.data().size()
print 'Label size:',self._proc.labels().size()
print 'Batch size:',self._proc.processed_entries().size()
print 'Total process counter:',self._proc.process_ctr()
ctime = time.time()
#np_data = larcv.as_ndarray(self._proc.data()).reshape(self._batch,self._proc.data().size()/self._batch)#.astype(np.float32)
if self._data is None:
self._data = np.array(self._proc.data())
self._data_size = self._data.size
else:
self._data = self._data.reshape(self._data.size)
larcv.copy_array(self._data,self._proc.data())
if self.read_counter: self.time_data_copy += time.time() - ctime
ctime = time.time()
self._data = self._data.reshape(self._batch,self._proc.data().size()/self._batch).astype(np.float32)
if self.read_counter: self.time_data_conv += time.time() - ctime
ctime = time.time()
np_label = np.array(self._proc.labels())
if self.read_counter: self.time_label_copy += time.time() - ctime
ctime = time.time()
np_label = np_label.reshape(self._batch,self._proc.labels().size()/self._batch).astype(np.float32)
if self.read_counter: self.time_label_conv += time.time() - ctime
self._batch = -1
self.read_counter += 1
return (self._data,np_label)
def sig_kill(signal,frame):
print '\033[95mSIGINT detected.\033[00m Finishing the program gracefully.'
for name,ptr in larcv_data._instance_m.iteritems():
print 'Terminating filler:',name
ptr.reset()
signal.signal(signal.SIGINT, sig_kill)
| StarcoderdataPython |
3216744 | # -*- coding: utf-8 -*-
### Python imports
import pathlib
### Third Party imports
import numpy as np
import pandas as pd
import pytest
### Project imports
from t4.formats import FormatRegistry
from t4.util import QuiltException
### Constants
### Code
def test_buggy_parquet():
"""
Test that T4 avoids crashing on bad Pandas metadata from
old pyarrow libaries.
"""
path = pathlib.Path(__file__).parent
for parquet_handler in FormatRegistry.for_format('parquet'):
with open(path / 'data' / 'buggy_parquet.parquet', 'rb') as bad_parq:
# Make sure this doesn't crash.
parquet_handler.deserialize(bad_parq.read())
def test_formats_for_obj():
arr = np.ndarray(3)
fmt = FormatRegistry.for_obj(arr)[0]
assert 'npz' in fmt.handled_extensions
assert FormatRegistry.for_ext('npy')[0] is fmt
expected_string_fmt_names = ['utf-8', 'unicode', 'json']
found_string_fmt_names = list(f.name for f in FormatRegistry.for_obj('blah'))
assert found_string_fmt_names == expected_string_fmt_names
bytes_obj = fmt.serialize(arr)[0]
assert np.array_equal(fmt.deserialize(bytes_obj, ), arr)
def test_formats_for_ext():
fmt = FormatRegistry.for_ext('json')[0]
assert fmt.serialize({'blah': 'blah'})[0] == b'{"blah": "blah"}'
assert fmt.deserialize(b'{"meow": "mix"}', ) == {'meow': 'mix'}
def test_formats_for_meta():
bytes_fmt = FormatRegistry.for_meta({'target': 'bytes'})[0]
json_fmt = FormatRegistry.for_meta({'target': 'json'})[0]
some_bytes = b'["phlipper", "piglet"]'
assert bytes_fmt.serialize(some_bytes)[0] == some_bytes
assert json_fmt.deserialize(some_bytes) == ['phlipper', 'piglet']
def test_formats_for_format():
bytes_fmt = FormatRegistry.for_format('bytes')[0]
json_fmt = FormatRegistry.for_format('json')[0]
some_bytes = b'["phlipper", "piglet"]'
assert bytes_fmt.serialize(some_bytes)[0] == some_bytes
assert json_fmt.deserialize(some_bytes) == ['phlipper', 'piglet']
def test_formats_serdes():
objects = [
{'blah': 'foo'},
b'blather',
'blip',
]
metadata = [{} for o in objects]
for obj, meta in zip(objects, metadata):
data, format_meta = FormatRegistry.serialize(obj, meta)
meta.update(format_meta)
assert FormatRegistry.deserialize(data, meta) == obj
meta = {}
df1 = pd.DataFrame([[1, 2], [3, 4]])
data, format_meta = FormatRegistry.serialize(df1, meta)
meta.update(format_meta)
df2 = FormatRegistry.deserialize(data, meta)
# we can't really get around this nicely -- if header is used, and header names are numeric,
# once loaded from CSV, header names are now strings. This causes a bad comparison, so we
# cast to int again.
df2.columns = df2.columns.astype(int, copy=False)
assert df1.equals(df2)
def test_formats_csv_read():
csv_file = pathlib.Path(__file__).parent / 'data' / 'csv.csv'
meta = {'format': {'name': 'csv'}}
expected_bytes = b'a,b,c,d\n1,2,3,4\n5,6,7,8\n'
expected_df = FormatRegistry.deserialize(expected_bytes, meta)
df = FormatRegistry.deserialize(csv_file.read_bytes(), meta)
assert df.equals(expected_df)
assert expected_bytes == FormatRegistry.serialize(df, meta)[0]
def test_formats_csv_roundtrip():
test_data = b'9,2,5\n7,2,6\n1,0,1\n'
# roundtrip defaults.
meta = {'format': {'name': 'csv'}}
df1 = FormatRegistry.deserialize(test_data, meta)
bin, format_meta = FormatRegistry.serialize(df1, meta)
meta.update(format_meta)
df2 = FormatRegistry.deserialize(bin, meta)
assert test_data == bin
assert df1.equals(df2)
# interpret first row as header
meta = {'format': {'name': 'csv', 'opts': {'use_header': True}}}
df1 = FormatRegistry.deserialize(test_data, meta)
bin, format_meta = FormatRegistry.serialize(df1, meta)
meta.update(format_meta)
df2 = FormatRegistry.deserialize(bin, meta)
assert test_data == bin
assert df1.equals(df2)
# interpret first column as index
meta = {'format': {'name': 'csv', 'opts': {'use_index': True}}}
df1 = FormatRegistry.deserialize(test_data, meta)
bin, format_meta = FormatRegistry.serialize(df1, meta)
meta.update(format_meta)
df2 = FormatRegistry.deserialize(bin, meta)
assert test_data == bin
assert df1.equals(df2)
# interpret first row as header, and first column as index
meta = {'format': {'name': 'csv', 'opts': {'use_index': True, 'use_header': True}}}
df1 = FormatRegistry.deserialize(test_data, meta)
bin, format_meta = FormatRegistry.serialize(df1, meta)
meta.update(format_meta)
df2 = FormatRegistry.deserialize(bin, meta)
assert test_data == bin
assert df1.equals(df2)
def test_formats_search_fail_notfound():
# a search that finds nothing should raise with an explanation.
class Foo:
pass
bad_kwargs = [
dict(obj_type=Foo, meta=None, ext=None),
dict(obj_type=None, meta={}, ext=None),
dict(obj_type=None, meta=None, ext='.fizz'),
]
for args in bad_kwargs:
with pytest.raises(QuiltException):
FormatRegistry.search(**args) | StarcoderdataPython |
150578 | import easypost
import os
easypost.api_key=os.environ['EASYPOST_KEY']
shipment=easypost.Shipment.retrieve('shp_sq2zuZ8d')
| StarcoderdataPython |
90205 | <reponame>joyliao07/401_midterm_wizard_game
import pytest
import io
# Login function
def login_for_test(app):
""" this logs in test user """
app.post('/login', data=dict(
email='<EMAIL>',
password='<PASSWORD>'
), follow_redirects=True)
# test basics
def test_app_import(app):
assert app
def test_bad_route(app):
""" test 404 with bad route """
rv = app.test_client().get('/fake')
assert rv.status_code == 404
assert b'Page not found' in rv.data
# Home route
def test_home_get_no_login(app):
""" test login page for status code/correct message with no login """
rv = app.test_client().get('/')
assert rv.status_code == 200
assert b'You must be the new apprentice.' in rv.data
def test_home_get_with_login(app, db, session, account):
""" test login page for status code/correct message with login """
with app.test_client() as app:
login_for_test(app)
rv = app.get('/')
assert rv.status_code == 200
assert b'Welcome back, my apprentice' in rv.data
def test_home_bad_method(app):
""" test home route with unimplemented method for correct status code """
rv = app.test_client().delete('/')
assert rv.status_code == 405
assert b'Are you trying to pull a fast one' in rv.data
# Play route
def test_play_get_no_login(app):
""" test that going to /play while not logged in redirects to login
page """
rv = app.test_client().get('/play', follow_redirects=True)
assert b'You must be logged in' in rv.data
assert rv.status_code == 200
def test_play_get_with_login(app, session, db, account):
""" test that going to /play while logged in takes you to the prompt """
with app.test_client() as app:
login_for_test(app)
rv = app.get('/play')
assert rv.status_code == 200
assert b'I seem to have forgotten what a'
def test_play_post_no_login(app):
""" test that trying to post to /play with no login redirects to login """
rv = app.test_client().post('/play', follow_redirects=True)
assert rv.status_code == 200
assert b'You must be logged in' in rv.data
def test_play_post_with_login(app, session, db, account):
""" tests posting to play route (user making initial submission) """
with app.test_client() as app:
login_for_test(app)
data = dict()
data['file_upload'] = (io.BytesIO(b'hi'), 'test_no_match.jpg')
rv = app.post(
'/play',
data=data,
content_type='multipart/form-data',
follow_redirects=True
)
assert rv.status_code == 200
assert b'So, is this what a' in rv.data
def test_play_post_with_login_no_data(app, session, db, account):
""" test that posting to /play with no data while logged in
just serves the /play get page """
with app.test_client() as app:
login_for_test(app)
rv = app.post('/play')
assert rv.status_code == 200
assert b'I seem to have forgotten what a'
def test_play_bad_method(app):
""" test home route with unimplemented method for correct status code """
rv = app.test_client().delete('/play')
assert rv.status_code == 405
assert b'Are you trying to pull a fast one' in rv.data
# Submission route
def test_submission_get_no_login(app):
""" tests that user is prompted to login when visiting submission page
when not logged in """
rv = app.test_client().get('/submission', follow_redirects=True)
assert b'You must be logged in' in rv.data
assert rv.status_code == 200
def test_submission_get_with_login_no_data(app, session, db, account):
""" tests that 404 is received when going to /submission without
submitting anything """
with app.test_client() as app:
login_for_test(app)
rv = app.get('/submission', follow_redirects=True)
assert rv.status_code == 404
assert b'Page not found' in rv.data
def test_submission_get_with_login(app, db, session, account):
""" tests that going to /submission route after submitting an image
(but before confirming/finalizing submission) serves correct content """
with app.test_client() as app:
login_for_test(app)
data = dict()
data['file_upload'] = (io.BytesIO(b'hi'), 'test.jpg')
app.post(
'/play',
data=data,
content_type='multipart/form-data'
)
rv = app.get('/submission')
assert rv.status_code == 200
assert b'So, is this what a' in rv.data
def test_submission_route_bad_method(app):
""" test submission route with unimplemented method for correct
status code """
rv = app.test_client().delete('/submission')
assert rv.status_code == 405
assert b'Are you trying to pull a fast one' in rv.data
# Feedback route
def test_feedback_get_no_login(app):
""" tests going to /feedback without being logged in to make sure
user is prompted to log in """
rv = app.test_client().get('/feedback', follow_redirects=True)
assert rv.status_code == 200
assert b'You must be logged in' in rv.data
def test_feedback_no_data(app, session, db, account):
""" tests that 404 is received when going to /feedback without
submitting anything """
with app.test_client() as app:
login_for_test(app)
rv = app.get('/feedback', follow_redirects=True)
assert rv.status_code == 404
assert b'Page not found' in rv.data
def test_feedback_get_no_match(app, session, db, account, prompt):
""" tests feedback text when user submits unmatching image """
with app.test_client() as app:
login_for_test(app)
f = open('src/test/test_images/test_no_match.jpg', 'rb').read()
data = dict()
data['file_upload'] = (io.BytesIO(f), 'test.jpg')
app.post(
'/play',
data=data,
content_type='multipart/form-data',
follow_redirects=True
)
rv = app.get('/feedback')
assert rv.status_code == 200
assert b'This is not a' in rv.data
assert b'It\'s not even' in rv.data
def test_feedback_get_color_only_match(app, session, db, account, prompt):
""" tests feedback text when user submits image that matches color only """
with app.test_client() as app:
login_for_test(app)
f = open('src/test/test_images/test_color_match.jpg', 'rb').read()
data = dict()
data['file_upload'] = (io.BytesIO(f), 'test.jpg')
app.post(
'/play',
data=data,
content_type='multipart/form-data'
)
rv = app.get('/feedback')
assert rv.status_code == 200
assert b'Well... it\'s' in rv.data
assert b'but it\'s not even a' in rv.data
def test_feedback_get_noun_only_match(app, session, db, account, prompt):
""" tests feedback text when user submits image that matches object
type only """
with app.test_client() as app:
login_for_test(app)
f = open('src/test/test_images/test_noun_match.jpg', 'rb').read()
data = dict()
data['file_upload'] = (io.BytesIO(f), 'test.jpg')
app.post(
'/play',
data=data,
content_type='multipart/form-data',
)
rv = app.get('/feedback')
assert rv.status_code == 200
# assert b'That looks like a' in rv.data
# assert b'but it\'s not' in rv.data
def test_feedback_get_full_match(app, session, db, account, prompt):
""" tests feedback text when user submits full match """
with app.test_client() as app:
login_for_test(app)
f = open('src/test/test_images/test_full_match.png', 'rb').read()
data = dict()
data['file_upload'] = (io.BytesIO(f), 'test.png')
app.post(
'/play',
data=data,
content_type='multipart/form-data'
)
rv = app.get('/feedback')
assert rv.status_code == 200
# assert b'Yes, that\'s a' in rv.data
def test_feedback_route_bad_method(app):
""" test feedback route with unimplemented method for correct ]
status code """
rv = app.test_client().delete('/feedback')
assert rv.status_code == 405
assert b'Are you trying to pull a fast one' in rv.data
# History route
def test_history_get_no_login(app):
""" tests going to /history without being logged in to make sure user is
redirected """
rv = app.test_client().get('/history', follow_redirects=True)
assert b'You must be logged in' in rv.data
assert rv.status_code == 200
def test_history_get_no_submissions(app, session, db, account):
""" tests going to /history when user hasn't submitted anything """
with app.test_client() as app:
login_for_test(app)
rv = app.get('/history')
assert rv.status_code == 200
assert b'Apprentice, you have no submissions yet.' in rv.data
def test_history_get_with_submissions(app, session, db, account, prompt):
""" tests that submission is present on history page """
with app.test_client() as app:
login_for_test(app)
f = open('src/test/test_images/test_color_match.jpg', 'rb').read()
data = dict()
data['file_upload'] = (io.BytesIO(f), 'test.jpg')
app.post(
'/play',
data=data,
content_type='multipart/form-data'
)
rv = app.get('/history')
assert rv.status_code == 200
assert b'Apprentice, your past submissions are below.'
assert b'Blue Chair (Fail)' in rv.data
# Players route
def test_players_get_no_login(app):
""" tests that going to /players when not logged in prompts user to log
in """
rv = app.test_client().get('/players', follow_redirects=True)
assert b'You must be logged in' in rv.data
assert rv.status_code == 200
def test_players_get_no_submissions(app, session, db, account, prompt):
""" tests that going to /players with no submissions shows different
message """
with app.test_client() as app:
login_for_test(app)
rv = app.get('/players')
assert rv.status_code == 200
assert b'Apprentice, there are no successful submissions' in rv.data
def test_players_get_with_submissions(app, session, db, account, prompt):
""" tests that going to /players shows a player's successful submission """
with app.test_client() as app:
login_for_test(app)
f = open('src/test/test_images/test_full_match.png', 'rb').read()
data = dict()
data['file_upload'] = (io.BytesIO(f), 'test.png')
app.post(
'/play',
data=data,
content_type='multipart/form-data'
)
app.get('/feedback')
rv = app.get('/players')
assert rv.status_code == 200
assert b'Other player\'s submissions are below.'
# assert b'Blue Chair' in rv.data
| StarcoderdataPython |
1660206 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Support module generated by PAGE version 4.20
# in conjunction with Tcl version 8.6
# Feb 18, 2019 11:55:48 AM -03 platform: Windows NT
# Feb 19, 2019 09:09:42 AM -03 platform: Windows NT
"""
Created on Mon Feb 18 10:08:04 2019
@author: <NAME>
"""
import sys
import Controller as ctrl
import Model as md
import Estilos as es
import numpy as np
#from numpy import array, concatenate, ndarray, append, take, delete
import pandas as pd
from tkinter import filedialog, colorchooser, IntVar
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
try:
import Tkinter as tk
except ImportError:
import tkinter as tk
try:
import ttk
py3 = False
except ImportError:
import tkinter.ttk as ttk
py3 = True
def set_var():
global conn, qtdMin, curvePlot, curvesList, cBoxList, md_dpv, validation
conn = 0
qtdMin = 0
curvePlot = np.ndarray([])
curvesList = np.ndarray([])
cBoxList = np.ndarray([])
md_dpv = md.dpv()
validation = ctrl.validation(w, root)
def init(top, gui, *args, **kwargs):
global w, top_level, root, font9
w = gui
top_level = top
root = top
#font9 = "-family {Segoe UI} -size 9 -weight bold -slant roman " \
# "-underline 0 -overstrike 0"
set_var()
painelDPV()
def destroy_window():
# Function which closes the window.
global top_level
top_level.destroy()
top_level = None
if __name__ == '__main__':
import VStat
VStat.vp_start_gui()
########## Funções ###########
def createCanvas():
w.cv_curveGraph = tk.Canvas(w.fr_mainView)
w.cv_curveGraph.place(relx=0.012, rely=0.119, relheight=0.857, relwidth=0.974)
w.cv_curveGraph.configure(background="#ffffff")
w.cv_curveGraph.configure(highlightbackground="#ffffff")
w.cv_curveGraph.configure(highlightcolor="black")
w.cv_curveGraph.configure(insertbackground="black")
w.cv_curveGraph.configure(relief='ridge')
w.cv_curveGraph.configure(selectbackground="#c4c4c4")
w.cv_curveGraph.configure(selectforeground="black")
w.cv_curveGraph.configure(width=823)
w.fr_toolbar = tk.Frame(w.fr_mainView)
w.fr_toolbar.place(relx=0.012, rely=0.02, height=38, relwidth=0.974)
w.fr_toolbar.configure(relief='groove')
w.fr_toolbar.configure(borderwidth="2")
w.fr_toolbar.configure(relief='groove')
w.fr_toolbar.configure(background="#f9f9f9")
w.fr_toolbar.configure(highlightbackground="#f9f9f9")
w.fr_toolbar.configure(highlightcolor="black")
w.fr_toolbar.configure(width=823)
def btn_import(p1):
global curvesList, curvePlot, spAt, cnvAt
curvePlot = np.ndarray([])
curvesList = np.ndarray([])
imp = filedialog.askopenfilename(initialdir = "C:/", title = "Importar CSV...",
filetypes = (("Comma-separeted values", "*.csv"),
("All files", "*.*")))
if imp:
csv = ctrl.file.importCsv(imp)
top_level.title("VStat - " + csv.curveName + ".csv")
curvePlot = np.append(curvePlot, csv, axis=None)
spAt, cnvAt = drawCurve()
cnvAt.draw()
curvesList = np.append(curvesList, csv, axis=None)
createMins()
def btn_export(p1):
global curvePlot
if curvePlot.size == 2:
csvName = filedialog.asksaveasfilename(title='Exportar CSV...', defaultextension = 'csv', initialdir = "C:/", filetypes = (("Comma-separeted values", "*.csv"), ("All files", "*.*")))
ctrl.file.exportCsv(np.take(curvePlot, 1), csvName)
elif curvePlot.size > 2:
w.lb_ConnInfo.configure(text="Ainda não é possível\nexportar curvas unidas")
elif curvePlot.size < 2:
w.lb_ConnInfo.configure(text="Sem curva para\nexportar")
def btn_connect(p1):
global conn
if conn:
ctrl.connection.disconnect()
conn = 0
w.btn_connect.configure(text='''Conectar''', background="#738c8c")
w.btn_connect.update()
w.lb_ConnInfo.configure(text="VStat desconectado")
else:
vstat = ctrl.connection.connect()
if vstat:
conn = 1
w.lb_ConnInfo.configure(text="VStat conectado\nPorta "+vstat)
w.btn_connect.configure(text='''Desconectar''', background="#00cccc")
w.btn_connect.update()
else:
w.lb_ConnInfo.configure(text="VStat não encontrado")
def btn_iniciar(p1):
global curvePlot, curvesList, spAt, cnvAt, md_dpv
md_dpv.pIni = w.et_PInicio.get()
md_dpv.pFim = w.et_PFim.get()
md_dpv.pPul = w.et_PPulso.get()
md_dpv.pPas = w.et_PPasso.get()
md_dpv.tPul = w.et_TPulso.get()
md_dpv.tPas = w.et_tPasso.get()
md_dpv.tEqu = w.et_tEquil.get()
md_dpv.fEsc = w.cb_intCorrente.current()
# Limpa o frame de miniaturas
destroyChildren(w.fr_miniaturas)
w.fr_miniaturas.update()
# Verifica se o potenciostato está conectado e inicia a análise
ini = ctrl.connection.openPort()
if ini:
w.lb_ConnInfo.configure(text="VStat não conectado")
w.btn_connect.configure(background="#ff6666")
w.btn_connect.update()
else:
"""x = np.arange(float(w.et_PInicio.get()), float(w.et_PFim.get()), float(w.et_PPasso.get()))
y = np.arange(0, x.size, 1)
c = md.curve("live Plot", x, y)
curvePlot = np.append(curvePlot, c)"""
ctrl.transmition.transmit(str(w.cb_intCorrente.current()),
w.et_PInicio.get(),
w.et_PFim.get(),
w.et_PPulso.get(),
w.et_PPasso.get(),
w.et_TPulso.get(),
w.et_tPasso.get(),
w.et_tEquil.get())
destroyChildren(w.fr_mainView)
# Fundo de escala
if w.cb_intCorrente.current() == 0:
fe = 5/(4096/3.3)
print("Escala: Automática")
print("fundo de escala(inicial): ", fe)
else:
fe = int(w.cb_intCorrente.get()[4:-2])/(4096/3.3)
print("Escala: ", w.cb_intCorrente.get()[4:-2])
print("fundo de escala: ", fe)
curvePlot = np.ndarray([])
curvePlot = np.append(curvePlot, md.curve("", np.array([]), np.array([])))
spAt, cnvAt = drawCurve()
curvesList = ctrl.transmition.receive(curvePlot, spAt, cnvAt, fe, float(w.et_PInicio.get()), float(w.et_PPasso.get()))#, canvas)
#curvePlot = np.append(curvePlot, np.take(curvesList, 1))
ctrl.connection.closePort()
#if dpv:
top_level.title("VStat - " + np.take(curvePlot, 1).curveName)#dpv.curveName)
#createCanvas()
#spAt, cnvAt = drawCurve()
createMins()
def drawCurve():
global curvePlot, sp, fig
createCanvas()
fig = Figure(figsize=(10, 8), dpi = 100)
sp = fig.add_subplot(111, xlabel="Potencial em Volts (V)", ylabel="Corrente em Microampere (µA)")#, title=cv.curveName)
canvas = FigureCanvasTkAgg(fig, master = w.cv_curveGraph)
toolbar = NavigationToolbar2Tk(canvas, w.fr_toolbar)
if curvePlot.size == 2:
cv = np.take(curvePlot, 1)
sp.set_title(cv.curveName)
sp.plot(cv.curveX, cv.curveY, color=cv.color)
elif curvePlot.size > 2:
sp.set_title("untitle merge")
for i in range(1, curvePlot.size):
cv = np.take(curvePlot, i)
sp.plot(cv.curveX, cv.curveY, color=cv.color)
toolbar.update()
canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)
#canvas.draw()
canvas.get_tk_widget().pack(side = tk.TOP, fill = tk.BOTH, expand = 1)
return sp, canvas
def expandMin(curveIndex):
global curvesList, curvePlot, spAt, cnvAt
cv = np.take(curvesList, curveIndex+1)
curvePlot = np.ndarray([])
curvePlot = np.append(curvePlot, cv, axis=None)
spAt, cnvAt = drawCurve()
def createMins():
global cBoxList, curvesList, qtdMin
# Apaga miniaturas existentes
qtdMin = 0
destroyChildren(w.fr_miniaturas)
# Cria miniaturas para cada curva na lista
for i in range(1, curvesList.size):
curve = np.take(curvesList, i)
createMin(curve)
def createMin(curve):
global qtdMin, cBoxList
cBoxList = np.append(cBoxList, IntVar(), axis=None)
thisIndex = qtdMin
relX = 0.01
if qtdMin == 0:
qtdMin += 1
elif qtdMin > 0:
relX = (0.152 * qtdMin) + 0.01
qtdMin += 1
# Titulo superior das miniaturas
w.lb_minCurve = tk.Label(w.fr_miniaturas)
w.lb_minCurve.place(relx=relX, rely=0.058, height=21, width=133)
w.lb_minCurve.configure(background="#d9d9d9")
w.lb_minCurve.configure(disabledforeground="#a3a3a3")
w.lb_minCurve.configure(foreground="#000000")
w.lb_minCurve.configure(text=curve.curveName)
w.lb_minCurve.configure(width=133)
w.lb_minCurve.bind("<Button-1>", lambda x:expandMin(thisIndex))
# Canvas para desenhar a miniatura
w.cv_minCurve = tk.Canvas(w.fr_miniaturas)
w.cv_minCurve.place(relx=relX, rely=0.165, height=112, width=133)
fig = Figure(figsize=(1, 1), dpi = 100)
canvas = FigureCanvasTkAgg(fig, master = w.cv_minCurve)
#toolbar = NavigationToolbar2Tk(canvas, w.fr_toolbar)
sp = fig.add_subplot(111)
sp.plot(curve.curveX, curve.curveY)
#toolbar.update()
#canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)
canvas.draw()
canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', lambda x: expandMin(thisIndex))
w.cb_chooser = tk.Checkbutton(w.cv_minCurve)
w.cb_chooser.place(relx=0.075, rely=0.097, relheight=0.243, relwidth=0.211)
w.cb_chooser.configure(activebackground="#ececec")
w.cb_chooser.configure(activeforeground="#000000")
w.cb_chooser.configure(background="#d9d9d9")
w.cb_chooser.configure(disabledforeground="#a3a3a3")
w.cb_chooser.configure(foreground="#000000")
w.cb_chooser.configure(highlightbackground="#d9d9d9")
w.cb_chooser.configure(highlightcolor="black")
w.cb_chooser.configure(justify='left')
w.cb_chooser.configure(variable=np.take(cBoxList, thisIndex+1))
w.fr_color = tk.Frame(w.cv_minCurve)
w.fr_color.place(relx=0.752, rely=0.097, relheight=0.243, relwidth=0.188)
w.fr_color.configure(relief='groove')
w.fr_color.configure(borderwidth="2")
w.fr_color.configure(relief='groove')
w.fr_color.configure(background="#1559c6")
w.fr_color.configure(width=25)
w.fr_color.bind("<Button-1>", lambda e:changeColor(e, thisIndex, sp, canvas))
def destroyChildren(frame):
for child in frame.winfo_children():
if child.winfo_children():
destroyChildren(child)
child.destroy()
def changeColor(p1, curveIndex, sp, canvas):
global curvesList, curvePlot
color = colorchooser.askcolor()
c = str(color)
c = c[-9:-2]
cv = np.take(curvesList, curveIndex+1)
cv.color = c
sp.plot(cv.curveX, cv.curveY, c)
canvas.draw()
p1.widget.configure(background=cv.color)
p1.widget.update()
drawCurve()
def curvesJoin():
global curvePlot
count = 0
for i in range(1, cBoxList.size):
c = np.take(cBoxList, i)
if c.get():
if count < 1:
cv = np.take(curvesList, i)
curvePlot = np.ndarray([])
curvePlot = np.append(curvePlot, cv, axis=None)
count += 1
else:
cv = np.take(curvesList, i)
curvePlot = np.append(curvePlot, cv, axis=None)
count += 1
c.set(0)
if count <= 1:
w.lb_ConnInfo.configure(text="Selecione ao menos\nduas curvas")
else:
drawCurve()
def removeCurve():
global curvesList, cBoxList, curvePlot
#print("remover")
i = 1
while i < cBoxList.size:
c = np.take(cBoxList, i)
t = np.take(curvesList, i)
p = np.take(curvePlot, 1)
if t is p:
#print("Igual")
pass
#print("c: "+ str(c.get()))
if c.get():
#if t is p:
curvesList = np.delete(curvesList, i)
cBoxList = np.delete(cBoxList, i)
else:
i += 1
createMins()
#-----------------------------------------------------#
# PAINEIS #
#-----------------------------------------------------#
#---- Painel DPV ----#
def painelDPV():
global md_dpv
destroyChildren(w.fr_analise)
w.fr_analise.configure(text='''DPV''')
vcmd = w.fr_analise.register(validation.entryValidate)
# Inicializa entradas que serão manipuladas
w.et_PInicio = tk.Entry(w.fr_analise)
w.et_PFim = tk.Entry(w.fr_analise)
w.et_PPasso = tk.Entry(w.fr_analise)
w.et_tPasso = tk.Entry(w.fr_analise)
w.lb_PInicio = tk.Label(w.fr_analise, anchor="w")
w.lb_PInicio.place(relx=0.053, y=17, height=21, width=91
, bordermode='ignore')
es.lbStyle(w.lb_PInicio)
w.lb_PInicio.configure(text='''Pot. Inicial (V)''')
w.et_PInicio.configure(validate="key")
w.et_PInicio.configure(validatecommand=(vcmd, '%d', '%i', '%P', '%S', '%W'))
w.et_PInicio.place(relx=0.59, y=18, height=20, width=77
, bordermode='ignore')
es.etStyle(w.et_PInicio)
ctrl.validation.entryInsert(w.et_PInicio, md_dpv.pIni)
w.lb_PFim = tk.Label(w.fr_analise, anchor="w")
w.lb_PFim.place(relx=0.053, y=43, height=21, width=91
, bordermode='ignore')
es.lbStyle(w.lb_PFim)
w.lb_PFim.configure(text='''Pot. Final (V)''')
w.lb_PFim.configure(width=71)
w.et_PFim.configure(validate="key")
w.et_PFim.configure(validatecommand=(vcmd, '%d', '%i', '%P', '%S', '%W'))
w.et_PFim.place(relx=0.59, y=44, height=20, width=77
, bordermode='ignore')
es.etStyle(w.et_PFim)
ctrl.validation.entryInsert(w.et_PFim, md_dpv.pFim)
w.lb_PPasso = tk.Label(w.fr_analise, anchor="w")
w.lb_PPasso.place(relx=0.053, y=69, height=21, width=91
, bordermode='ignore')
es.lbStyle(w.lb_PPasso)
w.lb_PPasso.configure(text='''Pot. Passo (V)''')
w.lb_PPasso.configure(width=81)
w.et_PPasso.configure(validate="key")
w.et_PPasso.configure(validatecommand=(vcmd, '%d', '%i', '%P', '%S', '%W'))
w.et_PPasso.place(relx=0.59, y=70, height=20, width=77
, bordermode='ignore')
es.etStyle(w.et_PPasso)
ctrl.validation.entryInsert(w.et_PPasso, md_dpv.pPas)
w.lb_PPulso = tk.Label(w.fr_analise, anchor="w")
w.lb_PPulso.place(relx=0.053, y=95, height=21, width=91
, bordermode='ignore')
es.lbStyle(w.lb_PPulso)
w.lb_PPulso.configure(text='''Pot. Pulso (V)''')
w.et_PPulso = tk.Entry(w.fr_analise, validate="key", validatecommand=(vcmd, '%d', '%i', '%P', '%S', '%W'))
w.et_PPulso.place(relx=0.59, y=96, height=20, width=77
, bordermode='ignore')
es.etStyle(w.et_PPulso)
ctrl.validation.entryInsert(w.et_PPulso, md_dpv.pPul)
w.lb_TPulso = tk.Label(w.fr_analise, anchor="w")
w.lb_TPulso.place(relx=0.053, y=121, height=21, width=91
, bordermode='ignore')
es.lbStyle(w.lb_TPulso)
w.lb_TPulso.configure(text='''Tem. Pulso (s)''')
w.lb_TPulso.configure(width=91)
w.et_TPulso = tk.Entry(w.fr_analise, validate="key", validatecommand=(vcmd, '%d', '%i', '%P', '%S', '%W'))
w.et_TPulso.place(relx=0.59, y=122, height=20, width=77
, bordermode='ignore')
es.etStyle(w.et_TPulso)
ctrl.validation.entryInsert(w.et_TPulso, md_dpv.tPul)
w.lb_tPasso = tk.Label(w.fr_analise, anchor="w")
w.lb_tPasso.place(relx=0.053, y=147, height=21, width=91
, bordermode='ignore')
es.lbStyle(w.lb_tPasso)
w.lb_tPasso.configure(text='''Tem. Passo (s)''')
w.et_tPasso.configure(validate="key")
w.et_tPasso.configure(validatecommand=(vcmd, '%d', '%i', '%P', '%S', '%W'))
w.et_tPasso.place(relx=0.59, y=148, height=20, width=77
, bordermode='ignore')
es.etStyle(w.et_tPasso)
ctrl.validation.entryInsert(w.et_tPasso, md_dpv.tPas)
w.lb_tEquil = tk.Label(w.fr_analise, anchor="w")
w.lb_tEquil.place(relx=0.053, y=173, height=21, width=110
, bordermode='ignore')
es.lbStyle(w.lb_tEquil)
w.lb_tEquil.configure(text='''Tem. equilíbrio (s)''')
w.et_tEquil = tk.Entry(w.fr_analise, validate="key", validatecommand=(vcmd, '%d', '%i', '%P', '%S', '%W'))
w.et_tEquil.place(relx=0.59, y=174, height=20, width=77
, bordermode='ignore')
es.etStyle(w.et_tEquil)
ctrl.validation.entryInsert(w.et_tEquil, md_dpv.tEqu)
w.lb_currentRange = tk.Label(w.fr_analise, anchor="w")
w.lb_currentRange.place(relx=0.053, y=199, height=21, width=91
, bordermode='ignore')
es.lbStyle(w.lb_currentRange)
w.lb_currentRange.configure(text='''Int. Corrente''')
w.cb_intCorrente = ttk.Combobox(w.fr_analise)
w.cb_intCorrente.place(relx=0.59, y=183, height=20, width=77)
w.cb_intCorrente.configure(values=["auto","+/- 5uA","+/- 10uA","+/- 20uA", "+/- 50uA"])
w.cb_intCorrente.current(md_dpv.fEsc)
w.lb_sRate = tk.Label(w.fr_analise, anchor="w")
w.lb_sRate.place(relx=0.053, y=225, height=21, width=110
, bordermode='ignore')
es.lbStyle(w.lb_sRate)
w.lb_sRate.configure(text='''SRate (V/s)''')
w.et_sRate = tk.Entry(w.fr_analise, state="disabled")
w.et_sRate.place(relx=0.59, y=226, height=20, width=77
, bordermode='ignore')
w.lb_tEstimado = tk.Label(w.fr_analise, anchor="w")
w.lb_tEstimado.place(relx=0.053, y=251, height=21, width=110
, bordermode='ignore')
es.lbStyle(w.lb_tEstimado)
w.lb_tEstimado.configure(text='''Tem. Estimado (s)''')
w.et_tEstimado = tk.Entry(w.fr_analise, state="disabled")
w.et_tEstimado.place(relx=0.59, y=252, height=20, width=77
, bordermode='ignore')
w.lb_nPontos = tk.Label(w.fr_analise, anchor="w")
w.lb_nPontos.place(relx=0.053, y=277, height=21, width=110
, bordermode='ignore')
es.lbStyle(w.lb_nPontos)
w.lb_nPontos.configure(text='''Nº Pontos''')
w.et_nPontos = tk.Entry(w.fr_analise, state="disabled")
w.et_nPontos.place(relx=0.59, y=278, height=20, width=77
, bordermode='ignore')
w.btn_dpv = tk.Button(w.fr_analise)
w.btn_dpv.place(relx=0.063, y=315, height=24, relwidth=0.88
, bordermode='ignore')
es.btnStyle(w.btn_dpv)
w.btn_dpv.configure(text='''Iniciar''')
w.btn_dpv.configure(width=167)
w.btn_dpv.bind('<ButtonRelease-1>',lambda e:btn_iniciar(e))
validation.updateInfo(float(w.et_PInicio.get()), float(w.et_PFim.get()), float(w.et_PPasso.get()), float(w.et_tPasso.get()))
#---- Operações de tratamento da curva ----#
def op_frame2param(frName, p1Name, p1Value, p2Name, p2Value, callback):
destroyChildren(w.fr_analise)
w.fr_analise.configure(text=frName)
vcmd = w.fr_analise.register(validation.entryValidate)
w.lb_Param1 = tk.Label(w.fr_analise, anchor="w")
w.lb_Param1.place(relx=0.053, y=17, height=21, width=91
, bordermode='ignore')
es.lbStyle(w.lb_Param1)
w.lb_Param1.configure(text=p1Name)
w.et_Param1 = tk.Entry(w.fr_analise, validate="key", validatecommand=(vcmd, '%d', '%i', '%P', '%S', '%W'))
w.et_Param1.place(relx=0.59, y=18, height=20, width=74
, bordermode='ignore')
es.etStyle(w.et_Param1)
w.et_Param1.configure(width=74)
ctrl.validation.entryInsert(w.et_Param1, p1Value)
w.lb_Param2 = tk.Label(w.fr_analise, anchor="w")
w.lb_Param2.place(relx=0.053, y=43, height=21, width=91
, bordermode='ignore')
es.lbStyle(w.lb_Param2)
w.lb_Param2.configure(text=p2Name)
w.et_Param2 = tk.Entry(w.fr_analise, validate="key", validatecommand=(vcmd, '%d', '%i', '%P', '%S', '%W'))
w.et_Param2.place(relx=0.59, y=44, height=20, width=74
, bordermode='ignore')
es.etStyle(w.et_Param2)
ctrl.validation.entryInsert(w.et_Param2, p2Value)
w.btn_Apply = tk.Button(w.fr_analise)
w.btn_Apply.place(relx=0.063, y=315, height=24, relwidth=0.88
, bordermode='ignore')
es.btnStyle(w.btn_Apply)
w.btn_Apply.configure(text='''Aplicar''')
w.btn_Apply.configure(width=167)
w.btn_Apply.bind('<ButtonRelease-1>',lambda x:aplicar(callback, (w.et_Param1.get(), w.et_Param2.get())))
def fd_PEAK():
global curvePlot, spAt, cnvAt
if curvePlot.size == 2:
cv = np.take(curvePlot, 1)
i = ctrl.operations.findPeak(cv.curveY)
spAt.scatter(cv.curveX[i],cv.curveY[i])
cnvAt.draw()
w.lb_ConnInfo.configure(text="PICO\nPotencial = "+str(float("{0:.4f}".format(cv.curveX[i])))+"V\nCorrente = "+str(float("{0:.3f}".format(cv.curveY[i])))+"uA")
elif curvePlot.size > 2:
w.lb_ConnInfo.configure(text="Ainda não é possível\nanalisar curvas unidas")
elif curvePlot.size < 2:
w.lb_ConnInfo.configure(text="Selecione uma curva")
def aplicar(callback, args):
global curvePlot, curvesList
if curvePlot.size == 2:
c = np.take(curvePlot, 1)
y = callback(c.curveY, *args)
c2 = md.curve(c.curveName+"_"+callback.__name__, c.curveX, y)
c2.curveX = c.curveX
c2.curveY = y
curvePlot = np.ndarray([])
curvePlot = np.append(curvePlot, c2, axis=None)
drawCurve()
curvesList = np.append(curvesList, c2, axis=None)
createMins()
elif curvePlot.size > 2:
w.lb_ConnInfo.configure(text="Ainda não é possível\nanalisar curvas unidas")
elif curvePlot.size < 2:
w.lb_ConnInfo.configure(text="Selecione uma curva")
""" apagar filhos de um frame
for child in infoFrame.winfo_children():
child.destroy()
""" | StarcoderdataPython |
1659020 | <gh_stars>10-100
############################################################################
# Copyright (c) 2015 Saint Petersburg State University
# Copyright (c) 2011-2014 Saint Petersburg Academic University
# All Rights Reserved
# See file LICENSE for details.
############################################################################
import os
import itertools
# There exists pyfasta package -- http://pypi.python.org/pypi/pyfasta/
# Use it !
def get_lengths_from_fastafile(filename):
"""
Gets filename of FASTA-file
Returns list of lengths of sequences in FASTA-file
"""
lengths = []
l = 0
for line in open(filename):
if line[0] == '>':
if l: # not the first sequence in FASTA
lengths.append(l)
l = 0
else:
l += len(line.strip())
lengths.append(l)
return lengths
def split_fasta(filename, outputdir):
"""
Gets filename of FASTA-file and directory to output
Creates separate FASTA-files for each sequence in FASTA-file
Returns nothing
Oops, similar to: pyfasta split --header "%(seqid).fasta" original.fasta
"""
if not os.path.isdir(outputdir):
os.mkdir(outputdir)
outFile = None
for line in open(filename):
if line[0] == '>':
if outFile:
outFile.close()
outFile = open(os.path.join(outputdir, line[1:].strip() + '.fa'), 'w')
if outFile:
outFile.write(line)
if outFile: # if filename is empty
outFile.close()
def read_fasta(filename):
"""
Returns list of FASTA entries (in tuples: name, seq)
"""
res_name = []
res_seq = []
first = True
seq = ''
fastafile = filename
file_ext = os.path.splitext(filename)[1]
if file_ext == ".gz":
import gzip
fastafile = gzip.open(filename)
else:
fastafile = open(filename)
for line in fastafile:
if line[0] == '>':
res_name.append(line.strip())
if not first:
res_seq.append(seq)
else:
first = False
seq = ''
else:
seq += line.strip()
res_seq.append(seq)
return zip(res_name, res_seq)
def write_fasta(fasta):
for name, seq in fasta:
print (name)
for i in xrange(0,len(seq),60):
print (seq[i:i+60])
def write_fasta_to_file(filename, fasta):
outfile = open(filename, 'a')
for name, seq in fasta:
outfile.write(name + '\n')
for i in xrange(0,len(seq),60):
outfile.write(seq[i:i+60] + '\n')
outfile.close()
def comp(letter):
return {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C'}[letter.upper()]
def rev_comp(seq):
return ''.join(itertools.imap(comp, seq[::-1]))
def remove_nonACGT(seq):
seq2 = []
for c in seq:
if c in 'ACGT':
seq2.append(c)
return string.join(seq2, '')
| StarcoderdataPython |
3267868 | t1 = (5,6,2,1)
del t1
print(t1)
| StarcoderdataPython |
1665328 | <gh_stars>1-10
''' Handles and formats chat events '''
class Handler():
'''handles chat events'''
def __init__(self, config, chat):
self.config = config
self.event_types = {
'reply': self.type_reply, 'event': self.type_event,
'method': self.type_method, 'system': self.type_system}
self.poll_switch = True
self.chat = chat
def formatting(self, data):
'''
checks the event type and calls the function
relating to that event type
'''
func = self.event_types[data['type']]
func(data)
if self.config.CHATDEBUG:
print(data)
def type_reply(self, data):
'''Handles the Reply type data'''
if 'data' in data:
if 'authenticated' in data['data']:
if data['data']['authenticated']:
print('Authenticated with the server')
else:
print('Authenticated Failed, Chat log restricted')
else:
print('Server Reply: {}'.format(str(data)))
else:
print('Server Reply: {}'.format(str(data['error'])))
def type_event(self, data):
'''handles the reply chat event types'''
event_string = {
'WelcomeEvent': 'Connected to the channel chat...',
'UserJoin': '{} has joined the channel.',
'UserLeave': '{} has left the channel.',
'ChatMessage': '{user} : {msg}',
'whisper': '{user} → {target} : {msg}',
'me': '{user} {msg}',
'PollStart': '{} has started a poll',
'PollEnd': 'The poll started by {} has ended'}
if data['event'] == 'WelcomeEvent':
print(event_string[data['event']])
elif data['event'] == 'UserJoin' or data['event'] == 'UserLeave':
if data['data']['username'] is not None:
print(event_string[data['event']].format(
data['data']['username']))
elif data['event'] == 'PollStart':
if self.poll_switch:
print(event_string[data['event']].format(
data['data']['author']['user_name']))
self.poll_switch = False
elif data['event'] == 'PollEnd':
print(event_string[data['event']].format(
data['data']['author']['user_name']))
self.poll_switch = True
elif data['event'] == 'ChatMessage':
msg = ''.join(
item["text"] for item in data['data']["message"]["message"])
if 'whisper' in data['data']['message']['meta']:
print(event_string['whisper'].format(
user=data['data']['user_name'],
target=data['data']['target'],
msg=msg))
elif 'me' in data['data']['message']['meta']:
print(event_string['me'].format(
user=data['data']['user_name'],
msg=msg))
else:
print(event_string[data['event']].format(
user=data['data']['user_name'],
msg=msg))
if msg == '!ping':
self.chat.message('Its ping pong time')
def type_method(self, data):
'''handles the reply chat event types'''
if self.config.CHATDEBUG:
if data['method'] == 'auth':
print('Authenticating with the server...')
elif data['method'] == 'msg':
if self.config.CHATDEBUG:
print('METHOD MSG: {}'.format(str(data)))
else:
print('METHOD MSG: {}'.format(str(data)))
def type_system(self, data):
'''handles the reply chat event types'''
if self.config.CHATDEBUG:
print('SYSTEM MSG: {}'.format(str(data['data'])))
| StarcoderdataPython |
1664885 | <gh_stars>1-10
from metrics.base_classification_scorer_factory import BaseClassificationScorerFactory
from metrics.result_scorer_auc_macro import ResultScorerAucMacro
from metrics.result_scorer_f1_macro import ResultScorerF1Macro
class ResultScorerAucMacroFactory(BaseClassificationScorerFactory):
"""
Factory for F1 results_scorer
"""
def get(self):
return ResultScorerAucMacro()
| StarcoderdataPython |
4834806 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Starter script for magnum-driver-manage."""
import sys
from cliff import app
from cliff import commandmanager
from cliff import lister
import magnum.conf
from magnum.drivers.common import driver
from magnum import version
CONF = magnum.conf.CONF
class DriverList(lister.Lister):
"""List templates"""
def _print_rows(self, parsed_args, rows):
fields = ['name']
field_labels = ['Name']
if parsed_args.details:
fields.extend(['server_type', 'os', 'coe'])
field_labels.extend(['Server_Type', 'OS', 'COE'])
if parsed_args.paths:
fields.append('path')
field_labels.append('Template Path')
return field_labels, [tuple([row[field] for field in fields])
for row in rows]
def get_parser(self, prog_name):
parser = super(DriverList, self).get_parser(prog_name)
parser.add_argument('-d', '--details',
action='store_true',
dest='details',
help=('display the cluster types provided by '
'each template'))
parser.add_argument('-p', '--paths',
action='store_true',
dest='paths',
help='display the path to each template file')
return parser
def take_action(self, parsed_args):
rows = []
for entry_point, cls in driver.Driver.load_entry_points():
name = entry_point.name
definition = cls().get_template_definition()
template = dict(name=name, path=definition.template_path)
if parsed_args.details:
for cluster_type in cls().provides:
row = dict()
row.update(template)
row.update(cluster_type)
rows.append(row)
else:
rows.append(template)
return self._print_rows(parsed_args, rows)
class DriverCommandManager(commandmanager.CommandManager):
COMMANDS = {
"list-drivers": DriverList,
}
def load_commands(self, namespace):
for name, command_class in self.COMMANDS.items():
self.add_command(name, command_class)
class DriverManager(app.App):
def __init__(self):
super(DriverManager, self).__init__(
description='Magnum Driver Manager',
version=version.version_info,
command_manager=DriverCommandManager(None),
deferred_help=True)
def main(args=None):
if args is None:
args = sys.argv[1:]
CONF([],
project='magnum',
version=version.version_info.release_string())
return DriverManager().run(args)
| StarcoderdataPython |
3399829 | import torch
from functions import create_model
class Checkpoint:
def __init__(self, model_state_dict, class_to_idx, arch, hidden_units):
self.model_state_dict = model_state_dict
self.class_to_idx = class_to_idx
self.architecture = arch
self.hidden_units = hidden_units
def save_checkpoint(model, class_to_idx, save_directory, arch, hidden_units):
checkpoint = Checkpoint(model.state_dict(), class_to_idx, arch, hidden_units)
torch.save(checkpoint, save_directory)
def load_checkpoint(checkpoint_directory):
checkpoint = torch.load(checkpoint_directory)
model = create_model(checkpoint.architecture, checkpoint.hidden_units)
model.load_state_dict(checkpoint.model_state_dict)
model.class_to_idx = checkpoint.class_to_idx
return model
| StarcoderdataPython |
1620445 | <reponame>voytekresearch/omapping<filename>om/meg/group.py
"""MEG-DATA Analysis Module - Group"""
import os
import pickle
import datetime
import numpy as np
import scipy.io as sio
from scipy.stats.stats import pearsonr
from om.meg.single import MegSubj
from om.core.osc import check_bands
from om.core.errors import DataNotComputedError, InconsistentDataError, UnknownDataTypeError
###################################################################################################
###################################################################################################
class MegGroup(MegSubj):
"""A class to store OMEGA data from multiple subjects.
Holds all oscillations, regardless of spatial location.
Attributes
----------
n_subjs : int
The number of subjects included in the group data.
subjs : list of int
List of the subject numbers included in current group data.
bands : Osc() object
Stores labels and band definitions of oscillation bands.
n_oscs_tot : int
Total number of oscillations found across all subjects.
comment : str
A note about the data, label about data that is loaded.
gr_oscs : dict
All oscillations, in bands, for all subjects [n_verts, n_oscs, n_subjs].
osc_probs : dict
Oscillation probability for each oscillation band, for each vertex.
osc_pow_ratios : dict
Oscillation power ratios for each oscillation band, for each vertex.
osc_scores : dict
Oscillation scores for each oscillation band, for each vertex.
vert_exponents : 2d array
Aperiodic exponent values for each subject, at each vertex [n_verts, n_subjs].
exponent_gr_avg : 1d array
Average aperiodic exponent values across subjects for each vertex.
osc_prob_done : boolean
Whether oscillation probability has been calculated.
osc_power_done : boolean
Whether oscillation power ratio has been calculated.
osc_score_done : boolean
Whether oscillation score has been calculated.
"""
def __init__(self, db, osc):
"""Initialize object with omegamappin database, and oscillation definitions.
Parameters
----------
db : OMDB() object
Database object for omegamappin project.
osc : Osc() object
Object to store oscillatory band definitions.
"""
# Initialize from MegSubj() object
MegSubj.__init__(self, db, 'both')
# Initialize groups subject variables
self.n_subjs = int()
self.subjs = []
# Update variable types for demographic
self.sex = list()
self.age = np.array([])
# Set definition of oscillation bands used for the group
self.bands = osc.bands
# Initialize count of total oscillations, across all subjects
self.n_oscs_tot = int()
# Set comment for data, can be used for plotting
self.comment = 'Group'
# Initialize dictionary for oscillation band data
self.gr_oscs = dict()
# Initilaize dictionary to store oscillation probabilities
self.osc_probs = dict()
# Initialize dict to store oscillation power ratios
self.osc_pow_ratios = dict()
# Initialize to store oscillation scores
self.osc_scores = dict()
# Initialize vars to store exponent values
self.vert_exponents = np.array([])
self.exponent_gr_avg = np.array([])
# Set booleans for what has been run
self.osc_prob_done = False
self.osc_power_done = False
self.osc_score_done = False
def __len__(self):
return self.n_subjs
def add_subject(self, new_subj, add_vertex_oscs=False, add_vertex_exponents=False,
add_all_oscs=False, add_vertex_bands=False, add_peak_freqs=False,
add_demo=False):
"""Adds a new subject to the MegGroup object.
Parameters
----------
new_subj : MegSubj() Object
MEG subject (instance of MegSubj)
add_vertex_oscs : boolean, optional (default: False)
Whether to add all oscillations, across vertices.
add_vertex_exponents : boolean, optional (default: False)
Whether to add the aperiodic exponents.
add_all_oscs : boolean, optional (default: False)
Whether to add the vectors of all oscillations, collapsed across vertices.
add_vertex_bands : boolean, optional (default: False)
Whether to add the oscillation band data, across vertices.
add_peak_freqs : boolean, optional (default: False)
Whether to add peak frequencies.
add_demo : boolean, optional (default: False)
Whether to add demographic information.
"""
# Check if subject has data
if not new_subj.has_data:
raise DataNotComputedError("Empty meg data object. Cannot add data.")
# Add oscillations per vertex
if add_vertex_oscs:
# Check new subject has relevant data
if not new_subj.has_vertex_oscs:
raise DataNotComputedError('New subject does not have vertex osc data.')
if not self.has_data:
# Add data to group object
self.centers = new_subj.centers
self.powers = new_subj.powers
self.bws = new_subj.bws
# Update that group contains this data
self.has_vertex_oscs = True
else:
# Check that group has data defined
if not self.has_vertex_oscs:
raise DataNotComputedError('MEG Group does not include vertex osc data.')
# Add data to group object
self.centers = np.dstack([self.centers, new_subj.centers])
self.powers = np.dstack([self.powers, new_subj.powers])
self.bws = np.dstack([self.bws, new_subj.bws])
# Add exponents per vertex
if add_vertex_exponents:
# Check new subject has relevant data
if not new_subj.has_vertex_exponents:
raise DataNotComputedError('New subject does not have vertex exponent data.')
if not self.has_data:
# Add data to group object
self.vert_exponents = new_subj.exponents
# Update that group contains this data
self.has_vertex_exponents = True
else:
# Check that group has data defined
if not self.has_vertex_exponents:
raise DataNotComputedError('MEG Group does not include vertex exponent data.')
# Add data to group object
self.vert_exponents = np.hstack([self.vert_exponents, new_subj.exponents])
# Add All-Osc Data
if add_all_oscs:
# Check that new subject has all_osc data available
if not new_subj.has_all_osc:
raise DataNotComputedError('New subject does not have all osc data.')
# Check that group has data defined
if self.has_data:
if not self.has_all_osc:
raise DataNotComputedError('MEG Group does not include all osc data.')
# Add oscillation parameters to current data
self.centers_all = np.append(self.centers_all, new_subj.centers_all)
self.bws_all = np.append(self.bws_all, new_subj.bws_all)
self.powers_all = np.append(self.powers_all, new_subj.powers_all)
self.exponents = np.append(self.exponents, new_subj.exponents)
# Add centers hist
self.centers_hist.append(new_subj.centers_hist)
# Update count of total number of oscillations
self.n_oscs = np.append(self.n_oscs, new_subj.n_oscs)
self.n_oscs_tot = len(self.centers_all)
# If first subject, update what kind of data is loaded
if not self.has_data:
self.has_all_osc = True
# Add band-specific data
if add_vertex_bands:
# Check that new subject has vertex bands data
if not new_subj.has_vertex_bands:
raise DataNotComputedError('New subject does not have vertex band data.')
# Check that new subject has same bands defined
_ = check_bands([self.bands, new_subj.bands])
# Add new subject to group oscillations
if not self.has_data:
# Add data to group object
self.gr_oscs = new_subj.oscs
# Update that group contains this data
self.has_vertex_bands = True
else:
# Check that group has data defined
if not self.has_vertex_bands:
raise DataNotComputedError('MEG Group does not include vertex band data.')
# Add data to group object
for band in self.bands:
self.gr_oscs[band] = np.dstack([self.gr_oscs[band], new_subj.oscs[band]])
# Add oscillation peak data
if add_peak_freqs:
# Check that new subject has peak frequency data
if not new_subj.has_peak_freqs:
raise DataNotComputedError('New subject does not have peak freq data.')
# Check that new subject has same bands defined
_ = check_bands([self.bands, new_subj.bands])
# Add new subject to peak frequencies
if not self.has_data:
# Add data to group object
self.peaks = new_subj.peaks
# Update that group contains this data
self.has_peak_freqs = True
else:
# Check that group has data defined
if not self.has_peak_freqs:
raise DataNotComputedError('MEG Group does not include peak freq data.')
# Add data to group object
for band in self.bands:
self.peaks[band] = np.append(self.peaks[band], new_subj.peaks[band])
# Add demographic data
if add_demo:
# Check that incoming subject has demo data
if not new_subj.has_demo:
raise DataNotComputedError('Demographic data not available')
# Check that group has data defined
if self.has_data:
if not self.has_demo:
raise DataNotComputedError('MEG Group does not include demo data.')
# Add demographic data to group object
self.sex.append(new_subj.sex)
self.age = np.append(self.age, new_subj.age)
# If first subject, update what kind of data is loaded
if not self.has_data:
self.has_demo = True
# If first subject, update that object has data
if self.n_subjs == 0:
self.has_data = True
# Update subj count and subject number list
self.n_subjs += 1
self.subjs.append(new_subj.subnum)
# Check consistency of group data
self.check_consistency()
def check_consistency(self):
"""Check for consistency of data loaded in group object."""
n_vertices = 7501
if self.n_subjs != len(self.subjs):
raise InconsistentDataError('Discrepancy in subject numbers.')
if self.has_vertex_oscs:
if self.n_subjs > 1:
assert self.centers.shape == (n_vertices, 8, self.n_subjs)
assert self.powers.shape == (n_vertices, 8,self.n_subjs)
assert self.bws.shape == (n_vertices, 8,self.n_subjs)
if self.has_vertex_exponents:
assert self.vert_exponents.shape == (n_vertices, self.n_subjs)
if self.has_all_osc:
pass
if self.has_vertex_bands:
pass
if self.has_peak_freqs:
pass
if self.has_demo:
pass
def group_exponent(self, avg='mean'):
"""Calculates the average exponent value for each vertex, across subjects.
Parameters
----------
avg : {'mean', 'median'}, optional
How to average across the group.
"""
# Calculate the average exponent value per vertex
if avg is 'mean':
self.exponent_gr_avg = np.mean(self.vert_exponents, 1)
elif avg is 'median':
self.exponent_gr_avg = np.median(self.vert_exponents, 1)
def osc_prob(self):
"""Calculates the probability of an osc in a specific band.
This is done per vertex, across subjects.
"""
# Check if vertex data is set
if not self.has_vertex_bands:
raise DataNotComputedError('Vertex oscillation bands data not available.')
# For each oscillation band, compute the probability of an oscillation in that band - NEW
for band in self.bands:
self.osc_probs[band] = _osc_prob(self.gr_oscs[band])
# Update boolean that oscillation probability has been computed
self.osc_prob_done = True
def osc_power(self):
"""Calculate the oscillation power ratio for each frequency band."""
# Check if vertex data is set
if not self.has_vertex_bands:
raise DataNotComputedError('Vertex oscillation bands data not available.')
# Compute power ratio for each oscillation band
for band in self.bands:
self.osc_pow_ratios[band] = _osc_pow_ratio(self.gr_oscs[band])
# Set boolean that oscillation score has been computed.
self.osc_power_done = True
def osc_score(self):
"""Calculate the oscillation score for each frequency band.
The oscillation score is ....
"""
# Check if oscillation probability & power ratios are calculated.
# Can not proceed if they are not.
_ = self._get_map_type('prob')
_ = self._get_map_type('power')
# Compute oscillation score for each oscillation band
for band in self.bands:
self.osc_scores[band] = self.osc_pow_ratios[band] * self.osc_probs[band]
# Set boolean that oscillation score has been computed.
self.osc_score_done = True
def osc_map_corrs(self, map_type):
"""Calculates the correlations between oscillation probabilities or scores.
Parameters
----------
map_type : {'prob', 'score', 'power'}
Which map data type to save out.
Returns
-------
corrs_mat : 2d array
Correlation R-values matrix, across all oscillation bands.
ps_mat : 2d array
Correlations p-values matrix, across all oscillation bands.
sorted_bands : list of str
Oscillation band labels, sorted into order.
"""
# Check how many oscillation bands are defined
n_bands = len(self.bands)
# Initialize matrices to store correlation results
corrs_mat = np.zeros([n_bands, n_bands])
ps_mat = np.zeros([n_bands, n_bands])
# Get oscillation bands in order
sorted_bands, sort_inds = _band_sort(self.bands)
# Set which map to run
dat = self._get_map_type(map_type)
# Loop through all bands, computing correlations between them
for i in range(n_bands):
for j in range(n_bands):
corrs_mat[i, j], ps_mat[i, j] = pearsonr(
dat[sorted_bands[i]],
dat[sorted_bands[j]])
# Set diagonals to zero - where band is correlated with itself
np.fill_diagonal(corrs_mat, 0)
np.fill_diagonal(ps_mat, 0)
return corrs_mat, ps_mat, sorted_bands
def calc_osc_peak_age(self):
"""Compares age and peak frequency within frequency bands.
NOTE: ADD CHECKS THAT REQUIRED DATA HAS BEEN COMPUTED.
Returns
-------
corrs_mat : 1d array
Correlations R-values comparing age to oscillations.
ps_mat : 1d array
Correlations p-values from comparing age to oscillations.
sorted_bands : list of str
Oscillation band labels, sorted into order.
"""
# Check how many bands there are
n_bands = len(self.bands)
# Initialize matrices to store correlation results
corrs_mat = np.zeros([n_bands])
ps_mat = np.zeros([n_bands])
# Get oscillation bands in order
sorted_bands, sort_inds = _band_sort(self.bands)
# Loop through all oscillation peaks, calculation correlation with age
for i in range(n_bands):
corrs_mat[i], ps_mat[i] = pearsonr(
self.age, self.peaks[sorted_bands[sort_inds[i]]])
return corrs_mat, ps_mat, sorted_bands
def save_gr_exponent(self, file_name):
"""Saves out the average group exponent results.
Parameters
----------
file_name : str
File name to save group exponent file as.
"""
# Set up
pickle_file_name = file_name + '.p'
pickle_save_name = os.path.join(self.db.maps_path, 'Exponents', pickle_file_name)
# Check current time for when file is saved
cur_time = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# Collect data together to save out
dat_out = dict({'dat_source': self.dat_source,
'exponents': self.exponent_gr_avg,
'n_subjs': self.n_subjs,
'save_time': cur_time})
# Save out with pickle
pickle.dump(dat_out, open(pickle_save_name, 'wb'))
def save_map(self, map_type, file_name):
"""Save oscillation map data out to disc.
Parameters
----------
map_type : {'prob', 'score', 'power'}
Which map data type to save out.
file_name : str
String to add to the file name.
"""
# Set which map to run
dat = self._get_map_type(map_type)
# Check current time for when file is saved
cur_time = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# Set file name, and create full file path
pickle_file_name = file_name + '_Osc_' + map_type + '.p'
pickle_save_name = os.path.join(self.db.maps_path, 'Oscs', pickle_file_name)
# Collect data together to save out
dat_out = dict({'dat_source': self.dat_source,
'map_type': map_type,
'osc_dat': dat,
'bands':self.bands,
'n_subjs': self.n_subjs,
'save_time': cur_time})
# Save out with pickle
pickle.dump(dat_out, open(pickle_save_name, 'wb'))
def set_exponent_viz(self):
"""Saves out a matfile, of the group average exponent, for visualization."""
# Set up paths to save to
save_name = 'Group_Exponents'
save_file = os.path.join(self.db.viz_path, save_name)
# Save desired outputs into a dictionary
save_dict = {}
save_dict['exponents'] = self.exponent_gr_avg
save_dict['dat_source'] = self.dat_source
save_dict['n_subjs'] = self.n_subjs
save_dict['save_time'] = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# Save the dicionary out to a .mat file
sio.savemat(save_file, save_dict)
def set_fooof_viz(self):
"""Set FOOOF features to visualize.
TODO
"""
pass
def set_map_viz(self, map_type, file_name):
"""Save out an oscillation map for visualization with Brainstorm.
Parameters
----------
map_type : {'prob', 'score', 'power'}
Which map data type to set as viz.
file_name : str
Label to attach to file name to be saved out.
"""
# Set which map to run
dat = self._get_map_type(map_type)
# Set up the save name
save_name = file_name + '_group_osc_' + map_type + '_viz'
# Set up paths to save to
save_file = os.path.join(self.db.viz_path, save_name)
# Initialize dictionary to save out, and save basic info
save_dict = {}
save_dict['dat_source'] = self.dat_source
save_dict['map_type'] = map_type
save_dict['n_subjs'] = self.n_subjs
save_dict['save_time'] = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# Add maps to save dictionary
for band in self.bands:
save_dict[band.lower() + '_' + map_type] = dat[band]
# Save out the dictionary to a mat file
sio.savemat(save_file, save_dict)
def _get_map_type(self, map_type):
"""Pull out specific MEG map type.
Parameters
----------
map_type : {'prob', 'score', 'power'}
Oscillation map type to pull out.
"""
# Check if requested map is prob, and if it is calculated
if map_type is 'prob':
# Check if oscillation probabilities have been calculated.
if not self.osc_prob_done:
raise DataNotComputedError("Oscillation probability not computed - can not proceed.")
dat = self.osc_probs
# Check if requested map is score, and if it is calculated
elif map_type is 'score':
# Check if oscillation score has been calculated.
if not self.osc_score_done:
raise DataNotComputedError("Oscillation score not computed - can not proceed.")
dat = self.osc_scores
# Check if requested map is power ratio, and if it is calculated
elif map_type is 'power':
# Check if oscillation power map has been calculated.
if not self.osc_power_done:
raise DataNotComputedError("Oscillation power map not computed - can not proceed.")
dat = self.osc_pow_ratios
# Raise an error if requested type doensn't match a known map type
else:
raise UnknownDataTypeError('Map type not understood.')
return dat
#########################################################################################################
################################## OMEGAMAPPIN - MEG GROUP - FUNCTIONS ##################################
#########################################################################################################
def freq_corr_group(centers, f_win, f_step=1):
"""Calculates the correlation between adjacent frequency bands.
Parameters
----------
centers : 3d array [n_verts, n_slots, n_subjs]
Center frequencies of oscillations across all vertices & subjects.
f_win : float
Size of frequency window to use.
f_step : float
Increment to step by.
Returns
-------
corr_vec : 1d array
Vector of the correlation coefficients between all adjacent frequency bands.
p_vec : 1d array
Vector of the p-values for the correlations between adjacent frequency bands.
freqs : 1d array
Vector of frequencies of the correlations (each value is first frequency of first bin).
Each frequency 'f' reflects the correlation of [f:f+f_win, f+f_win:f+2*f_win].
"""
# Get # vertices, # of subjects to loop through
ind_step = int(f_win / f_step)
[n_vertex, n_slots, n_subj] = np.shape(centers)
# Initialize variables for freqs, # of freqs, and matrix to store probability
freqs = np.arange(3, 40-f_win, f_step)
n_freqs = len(freqs)
prob_mat = np.zeros([n_vertex, n_freqs])
# Loop across all vertices and subjects
for vertex in range(n_vertex):
for subj in range(n_subj):
# Store centers for current vertex, current subj in temp vector
cens_temp = centers[vertex, :, subj]
# Loop through freq-ranges, counting when oscillations occur
for ind, freq in enumerate(freqs):
# Get the oscillation centers
cens_fwin = _get_all_osc(cens_temp, freq, freq+f_win)
# If there is an osc in range, add to prob_mat count
if len(cens_fwin) != 0:
prob_mat[vertex, ind] += 1
# Divide by # of subjects to get probability per freq-range
prob_mat = prob_mat/n_subj
# Initialize vectors to store correlations and p-values
corr_vec = np.zeros([n_freqs-1])
p_vec = np.zeros([n_freqs-1])
# Compute corr between f and f+f_win start windows
for f_ind in range(n_freqs-ind_step):
corr_vec[f_ind], p_vec[f_ind] = pearsonr(prob_mat[:, f_ind], prob_mat[:, f_ind+ind_step])
# Select frequency range that represents the start of each correlation
freqs = freqs[:-1]
return corr_vec, p_vec, freqs
def osc_space_group(oscs, bands, verts, osc_param=0, space_param=1):
"""
Parameters
----------
oscs : dict()
xx
bands : ?
xx
verts : 2d array
Spatial coordinates for all vertices [n_verts, 3].
osc_param : ?
xx
space_param : ?
xx
Returns
-------
dat_out : 3d array
Correlation data for all subjects, all bands.
[n_subjs, n_bands, 2], where last dimension is [R-val, p-val].
labels : list of str
Labels of oscillation bands that were analyzed in dat_out.
"""
labels = list(bands.keys())
n_verts, n_bands, n_subjs = oscs[labels[0]].shape
space = verts[:, space_param]
dat_out = np.zeros(shape=(n_subjs, len(bands), 2))
for subj in range(n_subjs):
for ind, band in enumerate(bands):
cur_dat = oscs[band][:, osc_param, subj]
keep_inds = cur_dat > 0
dat_out[subj, ind, :] = pearsonr(cur_dat[keep_inds], space[keep_inds])
return dat_out, labels
#################################################################################################
############################ OMEGAMAPPIN - OM_MD - PRIVATE FUNCTIONS ############################
#################################################################################################
def _get_all_osc(centers, osc_low, osc_high):
"""Returns all the oscillations in a specified frequency band.
Parameters
----------
centers : 1d array
Vector of oscillation centers.
osc_low : int
Lower bound for frequency range.
osc_high : int
Upper bound for frequency range.
Returns
-------
osc_cens : 1d array
Osc centers in specified frequency band.
"""
# Get inds of desired oscs and pull out from input data
osc_inds = (centers >= osc_low) & (centers <= osc_high)
osc_cens = centers[osc_inds]
return osc_cens
def _osc_prob(osc_mat):
"""Takes a 3D matrix of oscillations across subjects, calculates probability of oscillation.
Parameters
----------
osc_mat : 3d array
Oscillations for each subject, [n_vertex, n_dim, n_subj].
Returns
-------
prob : 1d array
Vector with probability of given oscillation at each vertex.
"""
# Check how many vertices and subjects in group
[n_vertex, n_dim, n_subj] = np.shape(osc_mat)
# Initialize vector to store probabilities
prob = np.zeros([n_vertex])
# Loop through all vertices, calculating osc prob for each
for i in range(n_vertex):
prob[i] = (np.count_nonzero(osc_mat[i, 0, :]) / n_subj)
return prob
def _osc_pow_ratio(osc_mat):
"""Calculate the power ratio of an oscillation.
Power ratio is a score, bounded between [0, 1], reflecting power
in a given frequency band, relative to the max power in that
frequency band.
Parameters
----------
osc_mat : 3d array
Oscillations for each subject, [n_vertex, n_dim, n_subj].
Returns
-------
pow_ratio : 1d array
Vector with oscillation score of given oscillation at each vertex.
"""
# Check how many vertices and subjects in group
[n_vertex, n_dim, n_subj] = np.shape(osc_mat)
# Initialize vector to store average powers
avg_powers = np.zeros(n_vertex)
# Loop through all vertices
for vertex in range(n_vertex):
# Pull out temp vector of all oscillation powers
temp_pows = osc_mat[vertex, 1, :]
temp_pows = temp_pows[np.nonzero(temp_pows)]
# If there are oscillations get average power
if len(temp_pows) == 0:
avg_powers[vertex] = 0
else:
avg_powers[vertex] = np.mean(temp_pows)
# Get the maximum power across all vertices
max_all = max(avg_powers)
# Initialize vector to store power ratios
pow_ratio = np.zeros(n_vertex)
# Loop through all vertices, calculating power ratio
for vertex in range(n_vertex):
pow_ratio[vertex] = np.mean(osc_mat[vertex, 1, :]) / max_all
return pow_ratio
def _band_sort(osc_bands):
"""Sort oscillation dictionary into order.
Parameters
----------
osc_bands : dict
A dictionary containing the oscillation band definitions.
Returns
-------
ordered_bands : list of str
A list of the oscillation band names, in order.
sort_inds : list of int
A list of indices to sort oscillation bands.
"""
# Check how many oscillation bands there are
n_bands = len(osc_bands)
# Initialize to store names and lower bounds
band_names = []
lower_bounds = np.array([])
# Loop through, and grab name and lower bound for each band
for band in osc_bands:
band_names.append(band)
lower_bounds = np.append(lower_bounds, osc_bands[band][0])
# Get the indices to sort the lower bounds
sort_inds = np.argsort(lower_bounds)
# Use indices to sort band names into order
ordered_bands = []
ordered_bands[:] = [band_names[i] for i in sort_inds]
return ordered_bands, sort_inds
| StarcoderdataPython |
153383 | # -*- coding: utf-8 -*-
from config import RUN_VER
if RUN_VER == 'open':
from blueapps.patch.settings_open_saas import * # noqa
else:
from blueapps.patch.settings_paas_services import * # noqa
# 本地开发环境
RUN_MODE = 'DEVELOP'
# APP本地静态资源目录
STATIC_URL = '/static/'
# APP静态资源目录url
# REMOTE_STATIC_URL = '%sremote/' % STATIC_URL
# Celery 消息队列设置 RabbitMQ
# BROKER_URL = 'amqp://guest:guest@localhost:5672//'
# Celery 消息队列设置 Redis
BROKER_URL = 'redis://localhost:6379/0'
DEBUG = True
# 本地开发数据库设置
# USE FOLLOWING SQL TO CREATE THE DATABASE NAMED APP_CODE
# SQL: CREATE DATABASE `framework_py` DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci; # noqa: E501
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'bktest',
'USER': 'root',
'PASSWORD': '<PASSWORD>',
'HOST': 'localhost',
'PORT': '3306',
},
'cmdb_db': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'django_content_type',
'USER': 'root',
'PASSWORD': '<PASSWORD>',
'HOST': 'localhost',
'PORT': '3306',
},
}
# 多人开发时,无法共享的本地配置可以放到新建的 local_settings.py 文件中
# 并且把 local_settings.py 加入版本管理忽略文件中
try:
from local_settings import * # noqa
except ImportError:
pass
| StarcoderdataPython |
1760693 | <reponame>Petr-By/qtpyvis
from .detector import Detector
from .landmarks import FacialLandmarks
| StarcoderdataPython |
3384380 | from enum import Enum
class UserGroupsEnum(Enum):
MODERATOR = "Moderator"
| StarcoderdataPython |
61479 | <gh_stars>1-10
from tornado import web
from tornado.log import app_log
#from jupyterhub.services.auth import HubOAuthenticated, HubOAuth
class BaseHandler(web.RequestHandler): # HubOAuthenticated
"""HubAuthenticated by default allows all successfully identified users (see allow_all property)."""
def initialize(self):
super().initialize()
self.log = app_log
#self.hub_auth = HubOAuth.instance(config=self.settings['traitlets_config'])
@property
def template_namespace(self):
return dict(static_url=self.static_url,
**self.settings.get('template_variables', {}))
def render_template(self, name, **extra_ns):
"""Render an HTML page"""
ns = {}
ns.update(self.template_namespace)
ns.update(extra_ns)
template = self.settings['jinja2_env'].get_template(name)
html = template.render(**ns)
self.write(html)
| StarcoderdataPython |
192158 | import cv2
import os
import numpy as np
import pandas as pd
from scipy.ndimage import zoom
#from matplotlib import pyplot as plt
def clipped_zoom(img, zoom_factor, **kwargs):
h, w = img.shape[:2]
# For multichannel images we don't want to apply the zoom factor to the RGB
# dimension, so instead we create a tuple of zoom factors, one per array
# dimension, with 1's for any trailing dimensions after the width and height.
zoom_tuple = (zoom_factor,) * 2 + (1,) * (img.ndim - 2)
# Zooming out
if zoom_factor < 1:
# Bounding box of the zoomed-out image within the output array
zh = int(np.round(h * zoom_factor))
zw = int(np.round(w * zoom_factor))
top = (h - zh) // 2
left = (w - zw) // 2
# Zero-padding
out = np.zeros_like(img)
out[top:top+zh, left:left+zw] = zoom(img, zoom_tuple, **kwargs)
# Zooming in
elif zoom_factor > 1:
# Bounding box of the zoomed-in region within the input array
zh = int(np.round(h / zoom_factor))
zw = int(np.round(w / zoom_factor))
top = (h - zh) // 2
left = (w - zw) // 2
out = zoom(img[top:top+zh, left:left+zw], zoom_tuple, **kwargs)
# `out` might still be slightly larger than `img` due to rounding, so
# trim off any extra pixels at the edges
trim_top = ((out.shape[0] - h) // 2)
trim_left = ((out.shape[1] - w) // 2)
out = out[trim_top:trim_top+h, trim_left:trim_left+w]
# If zoom_factor == 1, just return the input array
else:
out = img
return out
def detect_dnn_frame(net, frame):
frameOpencvDnn = frame.copy()
frameHeight = frameOpencvDnn.shape[0]
frameWidth = frameOpencvDnn.shape[1]
blob = cv2.dnn.blobFromImage(frameOpencvDnn, 1.0, (300, 300), [104, 117, 123], False, False)
conf_threshold = 0.7
net.setInput(blob)
detections = net.forward()
bboxes = []
for i in range(detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > conf_threshold:
x1 = int(detections[0, 0, i, 3] * frameWidth)
y1 = int(detections[0, 0, i, 4] * frameHeight)
x2 = int(detections[0, 0, i, 5] * frameWidth)
y2 = int(detections[0, 0, i, 6] * frameHeight)
bboxes.append([x1, y1, x2, y2])
cv2.rectangle(frameOpencvDnn, (x1, y1), (x2, y2), (0, 255, 0), int(round(frameHeight/150)), 8)
return frameOpencvDnn, bboxes
def show_labels(folder_path):
data = pd.read_csv(os.path.join(folder_path,'sub-'+'train'+'-annotations-bbox.csv')).values.tolist()
# teste = np.array(annot[['Set', 'Participant', 'File']].astype(str))
# print(teste)
# print('='*60)
# folders.sort(key=int)
train_folder =os.path.join(folder_path, 'train')
image_names = os.listdir(train_folder)
image_names.sort()
#print(image_names)
for d in data:
#for i, image_name in enumerate(image_names):
print(d)
image_id = str(d[0])
image_path = os.path.join(train_folder, image_id)
print(image_path)
thermal = cv2.imread(image_path)
print(thermal.shape)
#print(image_path.split('/')[1:])
#(x,y,w,h) = np.array(d[['XMin', 'XMax', 'YMin', 'YMax']])
#(x,y,w,h) = (d[1], d[2], d[3], d[4])
(x,y,w,h) = (d[1], d[2], d[3], d[4])
print((x,y,w,h))
#thermal = cv2.rectangle(thermal,(x,y),(x+w,y+h),(255,0,0),2)
thermal = cv2.rectangle(thermal,(x,y),(w,h),(255,0,0),2)
cv2.imshow('Thermal', thermal)
if cv2.waitKey(0) > 0:
continue
#break
# cv2.imshow('Original', img)
# cv2.imshow('Cinza', gray)
# cv2.waitKey(0)
folder_path = 'data/CelebA/img_celeba_splitted'
#folder_path = 'data/Thermal_organized_splitted'
show_labels(folder_path)
#match_template(folder_path, rgb_folder_path)
| StarcoderdataPython |
9054 | <reponame>proofdock/chaos-azure
from unittest.mock import patch, MagicMock
from pdchaosazure.webapp.actions import stop, restart, delete
from tests.data import config_provider, secrets_provider, webapp_provider
@patch('pdchaosazure.webapp.actions.fetch_webapps', autospec=True)
@patch('pdchaosazure.webapp.actions.client.init', autospec=True)
def test_happily_stop_webapp(init, fetch):
config = config_provider.provide_default_config()
secrets = secrets_provider.provide_secrets_public()
webapp = webapp_provider.default()
client = MagicMock()
init.return_value = client
resource_list = [webapp]
fetch.return_value = resource_list
f = "where resourceGroup=~'rg'"
stop(f, config, secrets)
fetch.assert_called_with(f, config, secrets)
client.web_apps.stop.assert_called_with(webapp['resourceGroup'], webapp['name'])
@patch('pdchaosazure.webapp.actions.fetch_webapps', autospec=True)
@patch('pdchaosazure.webapp.actions.client.init', autospec=True)
def test_happily_restart_webapp(init, fetch):
config = config_provider.provide_default_config()
secrets = secrets_provider.provide_secrets_public()
webapp = webapp_provider.default()
client = MagicMock()
init.return_value = client
resource_list = [webapp]
fetch.return_value = resource_list
f = "where resourceGroup=~'rg'"
restart(f, config, secrets)
fetch.assert_called_with(f, config, secrets)
client.web_apps.restart.assert_called_with(webapp['resourceGroup'], webapp['name'])
@patch('pdchaosazure.webapp.actions.fetch_webapps', autospec=True)
@patch('pdchaosazure.webapp.actions.client.init', autospec=True)
def test_happily_delete_webapp(init, fetch):
webapp = webapp_provider.default()
config = config_provider.provide_default_config()
secrets = secrets_provider.provide_secrets_public()
client = MagicMock()
init.return_value = client
resource_list = [webapp]
fetch.return_value = resource_list
f = "where resourceGroup=~'rg'"
delete(f, config, secrets)
fetch.assert_called_with(f, config, secrets)
client.web_apps.delete.assert_called_with(webapp['resourceGroup'], webapp['name'])
| StarcoderdataPython |
1703355 | <filename>Crash/Fundamentals/code_snip/resize_live_video.py
def changeRes(width, height, capture):
# Live video
capture.set(3, width)
capture.set(4, height)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.