id stringlengths 2 8 | text stringlengths 16 264k | dataset_id stringclasses 1 value |
|---|---|---|
261287 | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import logging
import hashlib
import cbor
from sawtooth_sdk.processor.handler import TransactionHandler
from sawtooth_sdk.processor.exceptions import InvalidTransaction
from sawtooth_sdk.processor.exceptions import InternalError
LOGGER = logging.getLogger(__name__)
VALID_VERBS = 'open', 'delete', 'query', 'transfer'
MIN_VALUE = 0
MAX_VALUE = 4294967295
MAX_NAME_LENGTH = 20
FAMILY_NAME = 'simple'
SIMPLE_ADDRESS_PREFIX = hashlib.sha512(
FAMILY_NAME.encode('utf-8')).hexdigest()[0:6]
def make_simple_address(name):
return SIMPLE_ADDRESS_PREFIX + hashlib.sha512(
name.encode('utf-8')).hexdigest()[-64:]
class SimpleTransactionHandler(TransactionHandler):
@property
def family_name(self):
return FAMILY_NAME
@property
def family_versions(self):
return ['1.0']
@property
def namespaces(self):
return [SIMPLE_ADDRESS_PREFIX]
def apply(self, transaction, context):
verb, name, value = _unpack_transaction(transaction)
state = _get_state_data(name, context)
updated_state = _do_simple(verb, name, value, state)
_set_state_data(name, updated_state, context)
def _unpack_transaction(transaction):
verb, name, value = _decode_transaction(transaction)
_validate_verb(verb)
_validate_name(name)
_validate_value(value)
return verb, name, value
def _decode_transaction(transaction):
try:
content = cbor.loads(transaction.payload)
except:
raise InvalidTransaction(
'Invalid payload serialization')
try:
verb = content['verb']
except AttributeError:
raise InvalidTransaction('Verb is required')
try:
name = content['account']
except AttributeError:
raise InvalidTransaction('Name is required')
try:
value = int(content['money'])
except AttributeError:
raise InvalidTransaction('Value is required')
return verb, name, value
def _validate_verb(verb):
if verb not in VALID_VERBS:
raise InvalidTransaction(
'Verb must be "open", "delete", "query", or "transfer"')
def _validate_name(name):
if not isinstance(name, str) or len(name) > MAX_NAME_LENGTH:
raise InvalidTransaction(
'Name must be a string of no more than {} characters'.format(
MAX_NAME_LENGTH))
def _validate_value(value):
if not isinstance(value, int) or value < 0 or value > MAX_VALUE:
raise InvalidTransaction(
'Value must be an integer '
'no less than {i} and no greater than {a}'.format(
i=MIN_VALUE,
a=MAX_VALUE))
def _get_state_data(name, context):
address = make_simple_address(name)
state_entries = context.get_state([address])
try:
return cbor.loads(state_entries[0].data)
except IndexError:
return {}
except:
raise InternalError(
'Failed to load state data')
def _set_state_data(name, state, context):
address = make_simple_address(name)
encoded = cbor.dumps(state)
addresses = context.set_state({address: encoded})
if not addresses:
raise InternalError(
'State error')
def _do_simple(verb, name, value, state):
verbs = {
'open': _do_open,
'delete': _do_delete,
'query': _do_query,
'transfer': _do_transfer,
}
try:
return verbs[verb](name, value, state)
except KeyError:
# This would be a programming error.
raise InternalError(
'Unhandled verb: {}'.format(verb))
def _do_open(name, value, state):
msg = 'Opening "{n}" to {v}'.format(n=name, v=value)
LOGGER.debug(msg)
if name in state:
raise InvalidTransaction(
'Account is "open", but already exists: Name: {n}, Value {v}'.format(
n=name,
v=state[name]))
updated = {k: v for k, v in state.items()}
updated[name] = value
return updated
def _do_delete(name, value, state):
msg = 'Incrementing "{n}" by {v}'.format(n=name, v=value)
LOGGER.debug(msg)
if name not in state:
raise InvalidTransaction(
'Verb is "inc" but name "{}" not in state'.format(name))
curr = state[name]
incd = curr + value
if incd > MAX_VALUE:
raise InvalidTransaction(
'Verb is "inc", but result would be greater than {}'.format(
MAX_VALUE))
updated = {k: v for k, v in state.items()}
updated[name] = incd
return updated
def _do_query(name, value, state):
return
def _do_transfer(name, value, state):
msg = 'Decrementing "{n}" by {v}'.format(n=name, v=value)
LOGGER.debug(msg)
if name not in state:
raise InvalidTransaction(
'Verb is "dec" but name "{}" not in state'.format(name))
curr = state[name]
decd = curr - value
if decd < MIN_VALUE:
raise InvalidTransaction(
'Verb is "dec", but result would be less than {}'.format(
MIN_VALUE))
updated = {k: v for k, v in state.items()}
updated[name] = decd
return updated
| StarcoderdataPython |
1605365 | #!/usr/bin/env python3
# import rospy
import socket
import cv2
import numpy as np
import time
cap = cv2.VideoCapture("udp://10.5.5.9:10000", cv2.CAP_FFMPEG)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
last_message = time.time()
while True:
# Get an image
ret, img = cap.read()
# Do something with img
cv2.imshow("My Window", img)
cv2.waitKey(1)
cv2.destroyWindow(window_name)
cap.release()
| StarcoderdataPython |
6479247 | <gh_stars>0
lst = [1, 0, 1, 2, 1, 3, 7, 2]
lst1 = [8, 3, 9, 6, 4, 7, 5, 2, 1]
lst7 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
n = 20
k = 2000000000000000000
def main():
shift_4(subtract_4([1,2,3],1,3))
def shift_4(lst):
line_one = []
#print(lst)
reverse_line_two = lst
#print(reverse_line_two)
shifted_num = []
reverse_line_two = reverse_line_two[::-1]
#print(reverse_line_two)
limit = len(reverse_line_two)
#print(limit)
for i in range(limit):
line_one.append(i + 1)
line_one.pop(0)
left = reverse_line_two[:i]
right = reverse_line_two[i:]
reverse_line_two1 = lst[1:]
#print(len(reverse_line_two1))
#print("line_one: ", line_one, "reverse_line_two: ", reverse_line_two, "reverse_line_two1: ", reverse_line_two1)
for i in range(len(reverse_line_two1)):
count = 0
if i == 0:
for j in left:
if line_one[i] > j:
count+=1
shifted_num.append(count)
if i > 0:
if line_one[i] % 2 == 1:
if shifted_num[i-1]%2 ==0:
for j in reverse_line_two[:reverse_line_two.index(line_one[i])]:
if line_one[i] > j:
count+=1
shifted_num.append(count)
elif shifted_num[i-1]%2 ==1:
for j in reverse_line_two[reverse_line_two.index(line_one[i]):]:
if line_one[i] > j:
count+=1
shifted_num.append(count)
elif line_one[i] % 2 == 0:
if (shifted_num[i-1]+shifted_num[i-2])%2 ==0:
for j in reverse_line_two[:reverse_line_two.index(line_one[i])]:
if line_one[i] > j:
count+=1
shifted_num.append(count)
if (shifted_num[i-1]+shifted_num[i-2])%2 ==1:
for j in reverse_line_two[reverse_line_two.index(line_one[i]):]:
if line_one[i] > j:
count+=1
shifted_num.append(count)
print(shifted_num)
return shifted_num
def add_4(lst, k, n):
line_two = shift_4(lst)
#bound
line_one = []
#Easier to manipulate in list notation
reverse_line_two = line_two[::-1]
#subtracted list i want
final_list = []
#length of the bounds
limit = len(reverse_line_two) + 1
for i in range(limit):
line_one.append(i + 1)
line_one.pop(0)
line_one = line_one[::-1]
#print("line_one: ", line_one)
#print("reverse_line_two: ", reverse_line_two)
step_one = reverse_line_two[0] + k
if step_one >= line_one[0]:
first_carry = step_one//line_one[0]
#print("first_carry: ", first_carry)
first_append = step_one%line_one[0]
#print("first_append: ", first_append)
final_list.append(first_append)
if step_one < line_one[0]:
first_carry = 0
first_append = step_one%line_one[0]
final_list.append(first_append)
index = 1
current_carry = first_carry
#print(reverse_line_two)
#print(n)
while index < (n -1):
#print("current_carry: ", current_carry)
#print("r: ",reverse_line_two,"index: ", index, "value: ",reverse_line_two[index])
if (reverse_line_two[index]) + current_carry >= line_one[index]:
res = reverse_line_two[index] + current_carry
current_carry = res//line_one[index]
#print("appending: ",res%line_one[index])
final_list.append(res%line_one[index])
index += 1
if (reverse_line_two[index] + current_carry) < line_one[index]:
res1 = reverse_line_two[index] + current_carry
current_carry = 0
#print("appending: ",res1%line_one[index])
final_list.append(res1)
index += 1
#print("final_list: ", final_list)
return final_list[::-1]
def subtract_4(lst, k, n):
line_two = shift_4(lst)
#bound
line_one = []
#Easier to manipulate in list notation
reverse_line_two = line_two[::-1]
#subtracted list i want
final_list = []
#length of the bounds
limit = len(reverse_line_two) + 1
for i in range(limit):
line_one.append(i + 1)
line_one.pop(0)
line_one = line_one[::-1]
step_one = reverse_line_two[0] - k
#print(step_one)
if step_one < 0:
first_carry = -((step_one) // line_one[0])
#print(first_carry)
first_append = step_one%line_one[0]
#print(first_append)
final_list.append(first_append)
if step_one >= 0:
first_carry = 0
first_append = step_one%line_one[0]
final_list.append(first_append)
index = 1
current_carry = first_carry
#print("final_list: ", final_list)
print(reverse_line_two[index], current_carry)
while index < (n-1):
#print("current_carry: ", current_carry)
#print("index: ", reverse_line_two[index])
#print(reverse_line_two[index] - current_carry)
if reverse_line_two[index] - current_carry < 0:
res = reverse_line_two[index] - current_carry
#print("res: ",res)
final_list.append(res%line_one[index])
current_carry = -(res//line_one[index])
index += 1
if reverse_line_two[index] - current_carry >= 0:
res1 = reverse_line_two[index] - current_carry
current_carry = 0
final_list.append(res1)
index += 1
# print("line_one: ", line_one, "line_two: ", reverse_line_two)
# print("append value: ",(reverse_line_two[0]-k)%line_one[0])
# final_list.append((reverse_line_two[0]-k)%line_one[0])
# print("carry value: ", first_carry, final_list)
# print("carried: ", reverse_line_two[1] - first_carry )
# carried = reverse_line_two[1] - first_carry
# second_append = (carried)%line_one[1]
# final_list.append(second_append)
#print(final_list)
return final_list[::-1]
def find_it_4(new_list,index,value):
num = 0
for i in range(len(new_list)):
if new_list[i] == -1:
num+=1
if num == index+1:
new_list[i] = value
return new_list
def find_it_neg_4(new_list,index,value):
num = 0
lst = new_list[::-1]
for i in range(len(new_list)):
if lst[i] == -1:
num+=1
if num == index+1:
lst[i] = value
a = lst[::-1]
return a
def order_4(lst):
lst1 = lst[::-1]
limit = len(lst1)+1
top = []
mylist = []
for i in range(limit):
top.append(i + 1)
top.pop(0)
for i in range(limit):
mylist.append(-1)
b = top[::-1]
#print(b,lst1,mylist)
for i in range(len(lst1)):
#print(mylist)
if b[i] != 2:
if b[i]%2 == 1:
if lst1[i+1]%2 == 1:
mylist = find_it_4(mylist, lst1[i], b[i])
#mylist[lst1[i]] = b[i]
elif lst1[i+1]%2 ==0:
mylist = find_it_neg_4(mylist, lst1[i], b[i])
print("find neg: ", mylist, lst1[i], b[i])
elif b[i]%2 == 0:
if (lst1[i+1]+lst1[i+2])%2 == 1:
mylist = find_it_4(mylist, lst1[i], b[i])
#mylist[lst1[i]] = b[i]
elif (lst1[i+1]+lst1[i+2])%2 == 0:
mylist = find_it_neg_4(mylist, lst1[i], b[i])
print("find neg: ", mylist, lst1[i], b[i])
#print(mylist, lst1[i], b[i])
elif b[i] == 2:
mylist = find_it_neg_4(mylist, lst1[i], b[i])
for i in range(len(mylist)):
#print(mylist, lst1[i], b[i])
if mylist[i] == -1:
mylist[i] = 1
print(mylist)
return mylist
if __name__ == '__main__':
main()
| StarcoderdataPython |
91961 | <gh_stars>0
import numpy as np
from copy import *
import vtk
# the minimum square distance for two points to be considered distinct
tolerance = np.square(0.01)
# square distance of two points
def sqr_dist(p1,p2):
return np.square(p1[0]-p2[0]) + np.square(p1[1]-p2[1]) + np.square(p1[2]-p2[2])
# this returns the cosine of the angle between two adjacent edges
# if they are fully aligned it returns 1.0 going to -1.0 with increasing angle
def angle(pts):
if len(pts) != 3: raise Exception("need 3 points to compute an angle.")
v1 = pts[1]-pts[0]
v1 = v1/np.linalg.norm(v1)
v2 = pts[2]-pts[1]
v2 = v2/np.linalg.norm(v2)
return np.dot(v1,v2)
def VisualizePointCloud(points):
"""
Display a set of points in 3D space
"""
pts = vtk.vtkPoints()
vertices = vtk.vtkCellArray()
for p in points:
id = pts.InsertNextPoint(p)
vertices.InsertNextCell(1)
vertices.InsertCellPoint(id)
meshData = vtk.vtkPolyData()
meshData.SetPoints(pts)
meshData.SetVerts(vertices)
# map the triangle meshs into the scene
meshMapper = vtk.vtkPolyDataMapper()
meshMapper.SetInputData(meshData)
# add the actors to the scene
meshActor = vtk.vtkActor()
meshActor.SetMapper(meshMapper)
meshActor.GetProperty().SetColor(vtk.vtkNamedColors().GetColor3d("Yellow"))
# create a render window
renderer = vtk.vtkRenderer()
renderer.SetBackground(vtk.vtkNamedColors().GetColor3d("SlateGray"))
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetSize(800,600)
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
renderWindowInteractor.Initialize()
style = vtk.vtkInteractorStyleTrackballCamera()
style.SetDefaultRenderer(renderer)
renderWindowInteractor.SetInteractorStyle(style)
# add the actors to the scene
renderer.AddActor(meshActor)
# render and interact
renderWindow.Render()
renderWindowInteractor.Start()
# now the interaction is running until we close the window
# cleanup after closing the window
del renderWindow
del renderWindowInteractor
# We define a class for a polyhedron.
# Its starts empty and we can successively add faces keeping track of the number of points
# and the correct indices for faces.
# Finally we can output a solid geometry.
class SolidPolyhedron():
def __init__(self):
"""
Create an empty SolidPolyhedron object
"""
self.points = []
self.faces = []
self.NP = 0
def replace_point(self,old,new):
"""
Replace all references to the old point by a reference to the new point.
"""
for face in self.faces:
for k,f in enumerate(face):
if f==old:
# face is a reference, so we can modify it in place
face[k]=new
def remove_point(self,index):
"""
Remove one point with given index from the list.
This is only possible if there exist no references to it in the faces.
All face indexes are updated according to the shift in list positions.
"""
# first check if there are no references
for face in self.faces:
for f in face:
if f==index:
raise Exception("attempting to remove a point with existing references")
# delete the point from the list
del self.points[index]
self.NP = len(self.points)
# move the indexes of the faces
for i in range(index,self.NP):
self.replace_point(i+1,i)
def add_triangle(self,new_points):
"""
Add a triangle with given points to the faces of the solid.
Points are only added if they are not yet present.
"""
if len(new_points) != 3:
raise Exception("triangles should be given with 3 points.")
new_face = [0,0,0]
# append the new points to the list
for i,new in enumerate(new_points):
is_new = True
# check if this new point is already present
for k,p in enumerate(self.points):
if sqr_dist(p,new)<tolerance:
new_face[i] = k
is_new = False
# do not append points that are already present
if is_new:
new_face[i] = self.NP
self.points.append(new)
self.NP += 1
self.faces.append(new_face)
def add_polygon(self,new_points):
"""
Add a face defined by a polygon.
Degenerated edges are removed.
The polygon is recursively split into triangles, always cutting off
the triangle with the sharpest corner.
"""
new_NP = len(new_points)
# remove degenerate edges
i=1
# we have to use while loops as the end may change during the execution
while i<new_NP:
p1 = new_points[i-1]
p2 = new_points[i]
if sqr_dist(p1,p2)<tolerance:
del new_points[i]
new_NP -= 1
print('removed one degenerate edge')
# if the edge was degenerate we have to try te same index again
else:
i += 1
# add the face
if new_NP<3: raise Exception("too few points for a polygon.")
if new_NP==3: self.add_triangle(new_points)
else:
# find the sharpest corner
min_angle = 1.0
# i is the index of the corner under consideration
for i in range(new_NP):
ind = [i-1,i,i+1]
# the positive wrap-around has to be handled explicitely, the -1 index works as built-in
if i+1==new_NP: ind = [i-1,i,0]
points = [new_points[k] for k in ind]
a = angle(points)
if a<min_angle:
tri_ind = i
tri_points = points
min_angle = a
self.add_triangle(tri_points)
# the rest is the origonal polygon with the sharpest corner dropped
rest_ind = range(new_NP)
rest_ind.remove(tri_ind)
rest = [new_points[i] for i in rest_ind]
# recursively add the rest polygon
self.add_polygon(rest)
def add(self,new_points,new_faces):
"""
Add a collection of faces to the lists
"""
old_NP = self.NP
new_NP = len(new_points)
# the new points are appended after the existing ones
for p in new_points:
self.points.append(p)
# all indices have to be corrected for the number of already existing points
for f in new_faces:
new_face = [i+old_NP for i in f]
self.faces.append(new_face)
self.NP += new_NP
# now check if any of the new points were already present
# the code is the same as unify() except that the ranges of the test
# are limited to the old (NP) and new points, respectively
# we have to use while loops as the end may change during the execution
i=0
while i<old_NP:
# k starts with the first new point
k=old_NP
while k<self.NP:
if sqr_dist(self.points[i],self.points[k])<tolerance:
# replace the new point with the already present
self.replace_point(k,i)
self.remove_point(k)
k+=1
i+=1
def unify(self):
"""
Check for duplicated points with less than tolerance distance.
"""
# we have to use while loops as the end may change during the execution
i=0
while i<self.NP:
k=i+1
while k<self.NP:
if sqr_dist(self.points[i],self.points[k])<tolerance:
# replace the latter point with the former
self.replace_point(k,i)
self.remove_point(k)
k+=1
i+=1
def summary(self):
print("NP = %d" % self.NP)
print("faces %d" % len(self.faces))
def check(self,debug=False):
"""
We want to check the correctness and completeness of the solid.
It is a closed volume with all normals pointing to the same side if all edges
exit exactly twice with opposite directions.
The debug flag controls the generation of output about every flaw found.
"""
# make a list of all edges starting from the points
count = 0
edges = [[] for i in range(self.NP)]
for f in self.faces:
NF = len(f)
for i in range(NF-1):
edges[f[i]].append(f[i+1])
count += 1
edges[f[NF-1]].append(f[0])
count += 1
print('found %d edges' % count)
# check for duplicated edges
count = 0
for p1,e in enumerate(edges):
set_e = set()
for p2 in e:
if p2 in set_e:
if debug: print('found duplicated edge from %d to %d.' % (p1,p2))
count += 1
else:
set_e.add(p2)
print('found %d duplicated edges' % count)
# check for every edge if the opposite direction exists
count = 0
for p1 in range(self.NP):
for p2 in edges[p1]:
if not p1 in edges[p2]:
count = count+1
if debug: print('found free edge from %d to %d.' % (p1,p2))
print('found %d free edges' % count)
def getPolyData(self):
"""
Return a vtkPolyData object
"""
pts = vtk.vtkPoints()
for p in self.points: pts.InsertNextPoint(p)
cells = vtk.vtkCellArray()
for f in self.faces:
cells.InsertNextCell(len(f), f)
meshData = vtk.vtkPolyData()
meshData.SetPoints(pts)
meshData.SetPolys(cells)
return meshData
def writeSTL(self,filename):
pts = vtk.vtkPoints()
for p in self.points: pts.InsertNextPoint(p)
cells = vtk.vtkCellArray()
for f in self.faces:
cells.InsertNextCell(len(f), f)
meshData = vtk.vtkPolyData()
meshData.SetPoints(pts)
meshData.SetPolys(cells)
stlWriter = vtk.vtkSTLWriter()
stlWriter.SetFileTypeToASCII()
# stlWriter.SetFileTypeToBinary()
stlWriter.SetInputData(meshData)
stlWriter.SetFileName(filename)
stlWriter.Write()
def visualize(self, showEdges=True, Opacity=0.9):
meshData = self.getPolyData()
# map the triangle meshs into the scene
meshMapper = vtk.vtkPolyDataMapper()
meshMapper.SetInputData(meshData)
# add the actors to the scene
meshActor = vtk.vtkActor()
meshActor.SetMapper(meshMapper)
if showEdges:
meshActor.GetProperty().EdgeVisibilityOn()
else:
meshActor.GetProperty().EdgeVisibilityOff()
meshActor.GetProperty().SetColor(vtk.vtkNamedColors().GetColor3d("Yellow"))
meshActor.GetProperty().SetOpacity(Opacity)
# create a render window
renderer = vtk.vtkRenderer()
renderer.SetBackground(vtk.vtkNamedColors().GetColor3d("SlateGray"))
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetSize(800,600)
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
renderWindowInteractor.Initialize()
style = vtk.vtkInteractorStyleTrackballCamera()
style.SetDefaultRenderer(renderer)
renderWindowInteractor.SetInteractorStyle(style)
# add the actors to the scene
renderer.AddActor(meshActor)
# render and interact
renderWindow.Render()
renderWindowInteractor.Start()
# now the interaction is running until we close the window
# cleanup after closing the window
del renderWindow
del renderWindowInteractor | StarcoderdataPython |
6488212 | <filename>pytorch_lightning_pbt/trainer/__init__.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ---------------------
# PyTorch Lightning PBT
# Authors: <NAME>
# <NAME>
# Updated: May. 2020
# ---------------------
"""
Population Based Trainer
=========
"""
from pytorch_lightning_pbt.trainer.trainer import Trainer
__all__ = [
'Trainer',
]
| StarcoderdataPython |
6574552 | <gh_stars>1-10
# import all the necessary libraries
import scipy
from scipy.io import wavfile
from scipy import signal
import numpy as np
import matplotlib.pyplot as plt
from pydub import AudioSegment
from pydub.utils import make_chunks
from pydub.silence import split_on_silence
import pyaudio
from queue import Queue
from threading import Thread
import sys
import time
import random
import librosa
import librosa.display
import os, shutil
import glob
from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, BatchNormalization, Dropout
from keras.models import Sequential
from keras.callbacks import TensorBoard, ModelCheckpoint
from keras import optimizers
from keras.preprocessing.image import ImageDataGenerator
from keras.models import model_from_json
from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv1D, Input, MaxPooling1D, LSTM, GRU
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras import backend as K
from keras import optimizers, losses, activations, models
import time
K.clear_session()
# Defining Global Variables
labels = ['arabic', 'english', 'pashto', 'sindhi', 'urdu']
def getModel():
db = 0.3
inputs = Input(shape=(128,32))
#First Conv1D layer
conv = Conv1D(32,6, padding='valid', activation='relu', strides=1)(inputs)
conv = MaxPooling1D(2)(conv)
conv = Dropout(db)(conv)
#Second Conv1D layer
conv = Conv1D(64, 4, padding='valid', activation='relu', strides=1)(conv)
# conv = MaxPooling1D(2)(conv)
conv = Dropout(db)(conv)
conv=LSTM(units=64, return_sequences=True)(conv)
conv=LSTM(units=128, return_sequences=True)(conv)
#Flatten layer
conv = Flatten()(conv)
#Dense Layer 1
# conv = Dense(256, activation='relu')(conv)
# conv = Dropout(0.3)(conv)
# Dense Layer 2
conv = Dense(128, activation='relu')(conv)
outputs = Dense(5, activation='softmax')(conv)
model_2 = Model(inputs, outputs)
return model_2
# Function to load the model weights and archtitecture
def initModel(weights):
loaded_model = getModel()
# load weights into new model
loaded_model.load_weights(weights)
return loaded_model
# Function to create a spectogram from a given file and save it in the desired
# destination
def clearFolder(folder):
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path, e))
# Function to split audio file into smaller samples
def splitAudio(input_file, extension, chunk_size, output_path):
myaudio = AudioSegment.from_file(input_file , extension)
myaudio = myaudio.low_pass_filter(3000).high_pass_filter(200)
chunk_length_ms = chunk_size * 1000
chunks = make_chunks(myaudio, chunk_length_ms)
#Export all of the individual chunks as wav files
for i, chunk in enumerate(chunks):
chunk_name = output_path + "chunk{0}.wav".format(i)
chunk.export(chunk_name, format="wav")
# Function to process audio by splitting it and converting
# samples to MFCCs
def processAudio(input_file, extension, chunk_file):
# Clearing Folders
clearFolder(chunk_file)
# Splitting the audio
output_path = chunk_file
splitAudio(input_file, extension, 1, output_path)
input_paths = glob.glob(output_path + '*')
X_pred = []
data = input_paths
for i, d in zip(range(len(data)), data):
SR = 16000
y, sr = librosa.load(d, sr = SR)
S = librosa.feature.melspectrogram(y, sr)
S = librosa.power_to_db(S, ref=np.max)
if (S.shape == (128, 32)):
X_pred.append(S)
# End if
# End for
return np.array(X_pred)
def getPredictions(inputFile, extension, model, chunk_file):
# Processing the Audio file to get the final spectograms
X_pred = processAudio(inputFile, extension, chunk_file)
# Making Prediction
pred = model.predict(X_pred)
# Formating Outputs
predicted_class_indices=np.argmax(pred, axis=1)
# Labels
predictions = [labels[k] for k in predicted_class_indices]
return predictions
def languageLocalize(inputFile, extension, chunk_file):
model = initModel('weights.hdf5')
model.compile(loss = 'categorical_crossentropy', optimizer = 'adamax', metrics = ['accuracy'])
pred = getPredictions(inputFile, extension, model, chunk_file)
return pred
def predict(data, model):
S = librosa.feature.melspectrogram(y = data.astype(np.float32), sr = 16000)
S = librosa.power_to_db(S,ref = np.max)
S = np.expand_dims(S, axis = 0)
assert(S.shape == (1, 128,32))
label = model.predict(S)
label = np.argmax(label)
labels = ['arabic', 'english', 'pashto', 'sindhi', 'urdu']
return labels[label] | StarcoderdataPython |
3251645 | import tensorflow as tf
import numpy as np
from preprocess_encode_images import extract_cache_features
from train_data_preparation import tokenizer, train_max_length
from params import attention_features_shape
from config import DIRECTORIES
def predict_single(image, models, image_features_dir, tokenizer,
extract_features = False):
"""
Parameters:
models: tuple of Keras Model: (encoder, decoder)
Returns:
logits: Tensor of shape (caption_length, vocab_size), with
caption_length variable and capped by train_max_length
caption:
attention_plot:
"""
attention_plot = np.zeros((train_max_length, attention_features_shape))
encoder, decoder = models
hidden = decoder.reset_state(batch_size=1)
if extract_features:
extract_cache_features(image, image_features_dir)
img_feature_filename = image_features_dir + \
image.split('/')[-1] + '.npy'
img_tensor = np.load(img_feature_filename)
features = encoder(img_tensor)
dec_input = tf.expand_dims([tokenizer.word_index['<start>']], 0)
logits = []
caption = []
for i in range(train_max_length):
predictions, hidden, attention_weights = decoder((dec_input, features, hidden))
attention_plot[i] = tf.reshape(attention_weights, (-1, )).numpy()
logits.append(predictions)
predicted_id = tf.random.categorical(predictions, 1)[0][0].numpy()
caption.append(tokenizer.index_word.get(predicted_id))
#
# if tokenizer.index_word.get(predicted_id) == '<end>':
#
# logits = tf.concat(logits, axis=0)
# return logits, caption, attention_plot
dec_input = tf.expand_dims([predicted_id], 0)
logits = tf.concat(logits, axis=0)
try:
end_index = caption.index('<end>')
except ValueError:
end_index = len(caption)
finally:
caption = caption[:end_index]
attention_plot = attention_plot[:len(caption), :]
return logits, caption, attention_plot
def predict_all(img_paths, models, image_features_dir, tokenizer):
all_logits = []
all_captions = []
for image in img_paths:
logits, caption, _ = predict_single(image, models, image_features_dir, tokenizer)
all_logits.append(logits)
all_captions.append(caption)
return all_logits, all_captions
def predict_batch(img_tensor_batch, models, tokenizer, train_max_length):
"""
Predicts logits for the probability distribution of words, for the whole
caption.
Params:
img_tensor_batch: tensor of shape (batch_size, 64, 2048) (latter two
are attention_features_shape and features_shape from InceptionV3)
models: tuple of (encoder, decoder)
tokenizer: trained on train_captions
train_max_length: int
longest caption length in training dataset (all captions are padded
to this length)
Returns:
logits: tensor of shape (batch_size, vocab_size, train_max_length)
contains logits for each word and each instance on the batch
"""
batch_size = img_tensor_batch.shape[0]
encoder, decoder = models
features_batch = encoder(img_tensor_batch, training = False)
hidden = decoder.reset_state(batch_size = batch_size)
dec_input = tf.expand_dims([tokenizer.word_index['<start>']]*batch_size, 1)
logits = []
captions = []
# forces to predict up to caption_length
for i in range(train_max_length):
# not using attention_weights here
predictions, hidden, _ = decoder((dec_input, features_batch, hidden), training = False)
logits.append(predictions)
predicted_id = tf.random.categorical(predictions, 1)
# caption.append(tokenizer.index_word.get(predicted_id))
dec_input = predicted_id
logits = tf.stack(logits, axis=2)
return logits
def translate_into_captions(logits, tokenizer):
"""
Parameters:
logits: tensor of shape (caption_length, vocab_size)
"""
caption_length, vocab_size = logits.shape
# using [[]]*batch_size creates multiple references to the same list
captions = [[] for _ in range(caption_length)]
predicted_ids = tf.random.categorical(predictions, 1).numpy()
next_word = [tokenizer.index.get(id) for id in predicted_ids.flatten()]
for i in range(batch_size):
captions[i].extend(next_word[i])
pass
def generate_captions_single(image, models, image_features_dir,
extract_features = False):
"""
Parameters:
models: tuple of Keras Model: (encoder, decoder)
"""
attention_plot = np.zeros((train_max_length, attention_features_shape))
encoder, decoder = models
hidden = decoder.reset_state(batch_size=1)
if extract_features:
extract_cache_features(image, image_features_dir)
img_feature_filename = image_features_dir + \
image.split('/')[-1] + '.npy'
img_tensor_val = np.load(img_feature_filename)
features = encoder(img_tensor_val)
dec_input = tf.expand_dims([tokenizer.word_index['<start>']], 0)
result = []
for i in range(train_max_length):
predictions, hidden, attention_weights = decoder((dec_input, features, hidden))
attention_plot[i] = tf.reshape(attention_weights, (-1, )).numpy()
predicted_id = tf.random.categorical(predictions, 1)[0][0].numpy()
result.append(tokenizer.index_word.get(predicted_id))
if tokenizer.index_word.get(predicted_id) == '<end>':
return result, attention_plot
dec_input = tf.expand_dims([predicted_id], 0)
attention_plot = attention_plot[:len(result), :]
return result, attention_plot
def generate_captions_all(img_paths, models, image_features_dir):
all_captions = []
for image in img_paths:
caption, _ = generate_captions_single(image, models, image_features_dir)
all_captions.append(caption)
return all_captions
def generate_train_captions(img_paths, models):
return generate_captions_all(img_paths, models,
image_features_dir = IMGS_FEATURES_CACHE_DIR_TRAIN)
def generate_valid_captions(img_paths, models):
return generate_captions_all(img_paths, models,
image_features_dir = IMGS_FEATURES_CACHE_DIR_VAL)
| StarcoderdataPython |
11265280 | <gh_stars>0
import subprocess
from briefcase.commands import (
BuildCommand,
CreateCommand,
PackageCommand,
PublishCommand,
RunCommand,
UpdateCommand
)
from briefcase.config import BaseConfig
from briefcase.exceptions import BriefcaseCommandError
from briefcase.integrations.xcode import verify_xcode_install
from briefcase.platforms.macOS import (
macOSMixin,
macOSPackageMixin,
macOSRunMixin
)
class macOSXcodeMixin(macOSMixin):
output_format = 'Xcode'
def verify_tools(self):
if self.host_os != 'Darwin':
raise BriefcaseCommandError(
"macOS applications require the Xcode command line tools, "
"which are only available on macOS."
)
# Require XCode 10.0.0. There's no particular reason for this
# specific version, other than it's a nice round number that's
# not *that* old at time of writing.
verify_xcode_install(self, min_version=(10, 0, 0))
# Verify superclass tools *after* xcode. This ensures we get the
# git check *after* the xcode check.
super().verify_tools()
def binary_path(self, app):
return (
self.platform_path
/ self.output_format
/ f'{app.formal_name}'
/ 'build' / 'Release'
/ f'{app.formal_name}.app'
)
def distribution_path(self, app, packaging_format):
if packaging_format == 'dmg':
return self.platform_path / f'{app.formal_name}-{app.version}.dmg'
else:
return self.binary_path(app)
def entitlements_path(self, app):
return (
self.bundle_path(app)
/ f'{app.formal_name}'
/ f'{app.app_name}.entitlements'
)
class macOSXcodeCreateCommand(macOSXcodeMixin, CreateCommand):
description = "Create and populate a macOS Xcode project."
class macOSXcodeUpdateCommand(macOSXcodeMixin, UpdateCommand):
description = "Update an existing macOS Xcode project."
class macOSXcodeBuildCommand(macOSXcodeMixin, BuildCommand):
description = "Build a macOS Xcode project."
def build_app(self, app: BaseConfig, **kwargs):
"""
Build the Xcode project for the application.
:param app: The application to build
"""
self.logger.info()
self.logger.info(f'[{app.app_name}] Building XCode project...')
# build_settings = [
# ('AD_HOC_CODE_SIGNING_ALLOWED', 'YES'),
# ('CODE_SIGN_IDENTITY', '-'),
# ('VALID_ARCHS', '"i386 x86_64"'),
# ('ARCHS', 'x86_64'),
# ('ONLY_ACTIVE_ARCHS', 'NO')
# ]
# build_settings_str = [f'{setting}={value}' for setting, value in build_settings]
try:
self.subprocess.run(
[
'xcodebuild', # ' '.join(build_settings_str),
'-project', self.bundle_path(app) / f'{app.formal_name}.xcodeproj',
'-quiet',
'-configuration', 'Release',
'build'
],
check=True,
)
self.logger.info('Build succeeded.')
except subprocess.CalledProcessError as e:
raise BriefcaseCommandError(f"Unable to build app {app.app_name}.") from e
class macOSXcodeRunCommand(macOSRunMixin, macOSXcodeMixin, RunCommand):
description = "Run a macOS app."
class macOSXcodePackageCommand(macOSPackageMixin, macOSXcodeMixin, PackageCommand):
description = "Package a macOS app for distribution."
class macOSXcodePublishCommand(macOSXcodeMixin, PublishCommand):
description = "Publish a macOS app."
# Declare the briefcase command bindings
create = macOSXcodeCreateCommand # noqa
update = macOSXcodeUpdateCommand # noqa
build = macOSXcodeBuildCommand # noqa
run = macOSXcodeRunCommand # noqa
package = macOSXcodePackageCommand # noqa
publish = macOSXcodePublishCommand # noqa
| StarcoderdataPython |
110345 | from marshmallow import fields
from marshmallow import Schema
from marshmallow.validate import OneOf
class UsersListFilterSchema(Schema):
sort_key = fields.String(
OneOf(choices=['username', 'email', 'phone_number']),
missing='username')
sort_order = fields.String(missing='asc')
class UsersListSchema(Schema):
username = fields.String()
email = fields.String()
phone_number = fields.String()
class UserInputSchema(Schema):
username = fields.String()
password = fields.String()
confirm_password = fields.String()
email = fields.String()
confirm_email = fields.String()
phone_number = fields.String()
| StarcoderdataPython |
146506 | from smartnlp.classfication.svm_classifier import SVMClassifier
if __name__ == '__main__':
svm_model = SVMClassifier('model/svm/model.pkl',
'./data/imdb/aclImdb.txt',
train=True)
# svm_model = SVMClassifier('model/svm/model.pkl')
svm_model.predict(['i like it ! its very interesting', 'I don\'t like it, it\'s boring'])
| StarcoderdataPython |
3260914 | from tkinter import *
from tkinter import messagebox
from webbrowser import get
class CoursesWindow:
LIST_COLORS= ["dark green", "dark blue", "red", "orange", "purple", "brown"]
def __init__(self, database: dict):
self.database= database
self.window = Tk(className=' Classes Selector')
self.window.geometry('400x200+350+250')
yscrollbar = Scrollbar(self.window)
yscrollbar.pack(side = RIGHT, fill = Y)
self.listing = Listbox(self.window, selectmode='unique', yscrollcommand = yscrollbar.set)
self.listing.pack(padx = 10, pady = 10, expand = YES, fill = "both")
def listingBox(self):
counter=0
for colorID, course in enumerate(self.database):
for classtype in self.database[course]:
self.listing.insert(END, course+' '+classtype)
self.listing.itemconfig(counter, bg = self.LIST_COLORS[colorID], fg='white')
counter+=1
self.listing.bind("<<ListboxSelect>>", self.returnSelection)
def returnSelection(self, event):
selection = event.widget.curselection()
if selection:
class_selected = event.widget.get(selection[0])
confirmation = messagebox.askyesno(title='Confirmation', message='Do you want to enter in {} meeting?'.format(class_selected))
if confirmation:
class_selected= class_selected.split(' ')
self.window.destroy()
get(using=None).open(self.database[class_selected[0]][class_selected[1]], new=2)
| StarcoderdataPython |
8067410 | x = 2
print(x == 2) # prints out True
print(x == 3) # prints out False
print(x < 3) # prints out True
name = "John"
age = 23
if name == "John" and age == 23:
print("Your name is John, and you are also 23 years old.")
if name == "John" or name == "Rick":
print("Your name is either John or Rick.")
name = "John"
if name in ["John", "Rick"]:
print("Your name is either John or Rick.")
x = 2
if x == 2:
print("x equals two!")
else:
print("x does not equal to two.")
x = [1,2,3]
y = [1,2,3]
print(x == y) # Prints out True
print(x is y) # Prints out False
print(not False) # Prints out True
print((not False) == (False)) # Prints out False
| StarcoderdataPython |
5072859 | <filename>Scripts/simulation/sims/fixup/sim_info_perk_fixup_action.py
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\sims\fixup\sim_info_perk_fixup_action.py
# Compiled at: 2019-09-16 22:31:25
# Size of source mod 2**32: 2924 bytes
from event_testing.tests import TunableTestSet
from sims.fixup.sim_info_fixup_action import _SimInfoFixupAction
from sims4.tuning.tunable import TunableRange, TunableList, TunableReference
import services, sims4.resources, random
class _SimInfoPerkFixupAction(_SimInfoFixupAction):
FACTORY_TUNABLES = {'potential_perks_to_grant':TunableList(description='\n Bucks perks to grant.\n ',
tunable=TunableReference(manager=(services.get_instance_manager(sims4.resources.Types.BUCKS_PERK)))),
'number_of_perks_to_grant':TunableRange(description='\n The number of perks that should be granted to the Sim. This is limited\n to a maximum now since we might have performance issue otherwise.\n ',
tunable_type=int,
default=1,
minimum=1,
maximum=50),
'tests':TunableTestSet(description='\n A set of tests that must pass for this action to be applied.\n ')}
def __init__(self, *args, **kwargs):
(super().__init__)(*args, **kwargs)
def __call__(self, sim_info):
if self.tests:
resolver = sim_info.get_resolver()
if not self.tests.run_tests(resolver):
return
def perk_can_be_unlocked(perk):
if bucks_tracker.is_perk_unlocked(perk):
return False
if perk.required_unlocks is not None:
for required_perk in perk.required_unlocks:
if not bucks_tracker.is_perk_unlocked(required_perk):
return False
return True
bucks_tracker = sim_info.get_bucks_tracker(add_if_none=True)
potential_perks_list = list(self.potential_perks_to_grant)
available_bucks_perks = [perk for perk in potential_perks_list if perk_can_be_unlocked(perk)]
num_unlocks_remaining = self.number_of_perks_to_grant
while num_unlocks_remaining > 0 and available_bucks_perks:
perk = random.choice(available_bucks_perks)
bucks_tracker.unlock_perk(perk)
num_unlocks_remaining -= 1
potential_perks_list.remove(perk)
available_bucks_perks = [perk for perk in potential_perks_list if perk_can_be_unlocked(perk)] | StarcoderdataPython |
123209 | <reponame>Staberinde/data-hub-api
import csv
import io
from contextlib import contextmanager
from chardet import UniversalDetector
from django import forms
from django.core.exceptions import ValidationError
from django.core.validators import FileExtensionValidator
from django.utils.translation import gettext_lazy
class BaseCSVImportForm(forms.Form):
"""Form used for loading a CSV file in the admin site."""
UNICODE_DECODE_ERROR_MESSAGE = gettext_lazy('There was an error decoding the file contents.')
MISSING_COLUMNS_MESSAGE = gettext_lazy(
'This file is missing the following required columns: {missing_columns}.',
)
csv_file_field_label = 'CSV file'
csv_file_field_help_text = None
required_columns = set()
csv_file = forms.FileField(
validators=[FileExtensionValidator(allowed_extensions=('csv',))],
)
def __init__(self, *args, **kwargs):
"""Initialises the form, dynamically setting the label of the csv_file field."""
super().__init__(*args, **kwargs)
self.fields['csv_file'].label = self.csv_file_field_label
self.fields['csv_file'].help_text = self.csv_file_field_help_text
def clean_csv_file(self):
"""Validates the uploaded CSV file and creates a CSV DictReader from it."""
# This could be an instance of InMemoryUploadedFile or TemporaryUploadedFile
# (depending on the file size)
file_field = self.cleaned_data['csv_file']
# Guess the file encoding (primarily to check for a UTF-8 BOM)
encoding_detector = UniversalDetector()
for chunk in file_field.chunks():
encoding_detector.feed(chunk)
if encoding_detector.done:
break
detection_result = encoding_detector.close()
self.cleaned_data['csv_file_encoding'] = detection_result['encoding']
# Check that the file can actually be decoded using the detected encoding so that
# we don't need to worry about encoding errors when reading the CSV
file_field.seek(0)
self._validate_encoding()
file_field.seek(0)
# Check that the CSV file has the required column
self._validate_columns()
file_field.seek(0)
return file_field
@contextmanager
def open_file_as_text_stream(self):
"""
Opens the CSV file in the csv_file field as a text stream.
Must only be called if is_valid() has returned True.
"""
encoding = self.cleaned_data['csv_file_encoding']
csv_file = self.cleaned_data['csv_file']
csv_file.seek(0)
stream = io.TextIOWrapper(csv_file, encoding=encoding)
try:
yield stream
finally:
# Detach the file from TextIOWrapper; this stops it being automatically closed
stream.detach()
@contextmanager
def open_file_as_dict_reader(self):
"""
Opens the CSV file in the csv_file field as a csv.DictReader.
Must only be called if is_valid() has returned True.
"""
with self.open_file_as_text_stream() as stream:
yield csv.DictReader(stream)
def _validate_encoding(self):
try:
# Read the entire file one line at a time to trigger any decoding errors.
with self.open_file_as_text_stream() as stream:
for _ in stream:
pass
except UnicodeError as exc:
raise ValidationError(
self.UNICODE_DECODE_ERROR_MESSAGE,
code='unicode-decode-error',
) from exc
def _validate_columns(self):
with self.open_file_as_text_stream() as stream:
csv_reader = csv.DictReader(stream)
missing_columns = sorted(self.required_columns - set(csv_reader.fieldnames))
if missing_columns:
msg = self.MISSING_COLUMNS_MESSAGE.format(
missing_columns=', '.join(missing_columns),
)
raise ValidationError(msg, code='missing-columns')
| StarcoderdataPython |
9725369 | <filename>Exercicios/ex037.py
"""Escreva um programa em Python que leia um número inteiro qualquer e peça para o usuário
escolher qual será a base de conversão: 1 para binário, 2 para octal e 3 para hexadecimal."""
n = int(input('Digite um número inteiro'))
print('[1] Binario')
print('[2] Octal')
print('[3] Hexadecimal')
opção = int(input('Digite a sua jogador'))
if opção == 1:
conversão = bin(n) #Não precisa transforamr em STR para fazer o fatiamento, eu que me confundir.
valor = str(conversão)
print(valor[2:]) #O primeiro parametro é onde começa depois onde termina e depois os Passos.
elif opção == 2:
conversão = oct(n)
valor = str(conversão)
print(valor[2:])
elif opção == 3:
conversão = hex(n)
valor = str(conversão)
print(valor[2:])
else:
print('Você Digitou um Valor invalido')
"""
Eu também podia ter feito dessa maneira que é muito mais facil
n = int(input('Digite um número inteiro'))
print('O número {} em binario é {}'.format(n,bin(n)[2:]))
"""
| StarcoderdataPython |
213025 | <gh_stars>0
#If someone knows a better way to write the next 5 lines, lmk
import os
os.chdir(os.path.dirname(os.path.abspath(__file__)))
import sys
sys.path.append('../')
from libs import ARTracker
tracker = ARTracker.ARTracker(['/dev/video2'], write=False) #ARTracker requires a list of camera files
while True:
tracker.findMarker(0)#, id2 = 1)
print('Distance (in cm): ', tracker.distanceToMarker)
print('Angle: ', tracker.angleToMarker)
| StarcoderdataPython |
9767183 | from selenium import webdriver
import os
import subprocess
import sys
import json
import requests
path = 'D:\Chrome Driver\chromedriver.exe' # The path of chromedriver.exe(You can give your own path)
# Class
class GitScraper():
driverPath = ''
url = "https://github.com/login"
add_command = "git add -A"
commit_command = 'git commit -m "'
push_command = 'git push -u origin '
api_url = "https://api.github.com/users/"
# init function ehich takes the chromedriver.exe path
def __init__(self, driverPath):
self.driverPath = driverPath
# This funtion initialises the chrome driver and opens it with the url
def driver_init(self):
self.driver = webdriver.Chrome(self.driverPath)
self.driver.get(self.url)
# This function logins inito the github profile and gives you your information
def login(self):
username_val = input("Enter your username: ")
password_val = input("Enter your password: ")
self.username = self.driver.find_element_by_id("login_field")
self.password = self.driver.find_element_by_id("password")
self.button = self.driver.find_element_by_name("commit")
self.username.send_keys(username_val)
self.password.send_keys(<PASSWORD>)
self.button.click()
# This function creates a new repository
def github_profile(self):
username_val = input("Enter your ussername: ")
self.response = requests.get(self.api_url + username_val)
self.data = json.loads(self.response.text)
print("\nUsername : " + self.data['login'])
print("\nName: " + self.data['name'])
print("\nLocation: " + self.data['location'])
print("\nBio : " + self.data['bio'])
print("\nPublic Repos : " + str(self.data['public_repos']))
print("\nFollowers : " + str(self.data['followers']))
print("\nFollowing : " + str(self.data['following']))
def new_repo(self):
repoName = input("Enter your new repository name: ")
desc_val = input("Enter the description of your new repo.(You can keep it blank)\n:")
type = input("Your repository should be Public or Private: ")
self.driver.implicitly_wait(3)
self.NEW = self.driver.find_element_by_xpath(
'/html/body/div[6]/div/aside/div[2]/div[1]/div/h2/a')
self.NEW.click()
self.Repository = self.driver.find_element_by_id("repository_name")
self.Repository.send_keys(repoName)
self.Description = self.driver.find_element_by_id(
"repository_description")
self.Description.send_keys(desc_val)
if type == "Public" or type == "public":
self.Public = self.driver.find_element_by_id("repository_visibility_public")
self.Public.click()
else:
self.Private = self.driver.find_element_by_id(
"repository_visibility_private")
self.Private.click()
# This function can git add and git commit and git push make sure git is install and the configuration is global
def add_commit_push(self):
commit_msg = input("Enter your commit message: ")
branch_name = input("Enter your branch name: ")
decision = input("Do you want ti change directory: Yes(y) or No(n): ")
if decision == 'y':
directory = input("Enter the directory path where your repo is located: ")
os.chdir(directory)
completed1 = subprocess.Popen(["powershell.exe", self.add_command], stdout = sys.stdout)
completed2 = subprocess.Popen(["powershell.exe", self.commit_command + commit_msg + '"'], stdout = sys.stdout)
completed3 = subprocess.Popen(["powershell.exe", self.push_command + branch_name], stdout = sys.stdout)
completed4 = subprocess.Popen(["powershell.exe", "cls"], stdout=sys.stdout)
# Object
bot = GitScraper(path)
# Fucntion with their specifications
#bot.driver_init()
#bot.login()
#bot.new_repo()
#bot.add_commit_push()
#bot.github_profile() | StarcoderdataPython |
1954031 | from random import randrange
from enum import Enum
import pyxel
class App:
class State(Enum):
TITLE = 0
INGAME = 1
RESULT = 2
def __init__(self):
pyxel.init(160, 120, caption="Flappy Bird")
pyxel.load("assets/flappybird.pyxres")
self.state = App.State.TITLE
self.guide = "Click to start game!!"
self.reset()
pyxel.run(self.update, self.draw)
def reset(self):
self.score = 0
self.player_x = 72
self.player_y = 32
self.player_vy = 0
self.player_is_alive = True
self.pipe = [((i+2) * 80, randrange(16, 88, 8), True) for i in range(4)]
self.last_offset = 0
def update(self):
if pyxel.btnp(pyxel.KEY_0):
pyxel.quit()
if self.state in (App.State.TITLE, App.State.RESULT):
if pyxel.btnp(pyxel.MOUSE_LEFT_BUTTON):
self.state = App.State.INGAME
self.reset()
else:
if self.player_is_alive:
self.update_player()
for i, v in enumerate(self.pipe):
self.pipe[i] = self.update_pipe(*v)
self.update_gravity()
if self.player_y >= 96:
if self.player_is_alive:
self.player_is_alive = False
self.player_y = 96
self.state = App.State.RESULT
def update_gravity(self):
self.player_y = min(self.player_y + self.player_vy, 96)
self.player_vy = min(self.player_vy + 1, 4)
def update_player(self):
if pyxel.btnp(pyxel.MOUSE_LEFT_BUTTON) and self.player_is_alive:
self.player_vy = -4
def update_pipe(self, x, y, is_active):
if is_active:
if x + 8 >= self.player_x >= x - 8:
if y + 16 >= self.player_y >= y - 16:
pyxel.play(3, 0)
else:
self.player_is_alive = False
elif self.player_x > x - 8:
is_active = False
self.score += 1
x -= 2
if x < -40:
x += 320
y = randrange(16, 88, 8)
is_active = True
return x, y, is_active
def draw(self):
pyxel.cls(12)
# draw forest
offset = pyxel.frame_count % 160 if self.player_is_alive else self.last_offset
for i in range(2):
pyxel.blt(i * 160 - offset, 72, 0, 0, 16, 160, 48, 12)
self.last_offset = offset
# draw pipe
for x, y, is_active in self.pipe:
for y_offset in range(0, 104, 8):
if y_offset == y-16:
pyxel.blt(x, y_offset, 0, 32, 8, 16, 8, 12)
elif y_offset == y+16:
pyxel.blt(x, y_offset, 0, 32, 0, 16, 8, 12)
elif y_offset > y+16 or y_offset < y-16:
pyxel.blt(x, y_offset, 0, 48, 0, 16, 8, 12)
# draw player
pyxel.blt(
self.player_x,
self.player_y,
0,
16 if self.player_vy > 0 else 0,
0,
16,
16,
12,
)
# draw score
if self.state is App.State.TITLE:
pyxel.text(32, 16, self.guide, 1)
pyxel.text(33, 16, self.guide, 7)
elif self.state is App.State.INGAME:
s = "SCORE {:>4}".format(self.score)
pyxel.text(5, 4, s, 1)
pyxel.text(4, 4, s, 7)
elif self.state is App.State.RESULT:
s = "RESULT {:>4}".format(self.score)
pyxel.text(56, 48, s, 1)
pyxel.text(57, 48, s, 7)
pyxel.text(40, 64, self.guide, 1)
pyxel.text(41, 64, self.guide, 7)
App()
| StarcoderdataPython |
1738110 | import os
import testinfra.utils.ansible_runner
import pytest
import yaml
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
with open('../../defaults/main.yml') as vars_yml:
vars = yaml.safe_load(vars_yml)
with open('converge.yml') as playbook_yml:
playbook = yaml.safe_load(playbook_yml)
vars.update(playbook[0]['vars'])
@pytest.mark.parametrize('name', [
('build-essential'),
('cmake'),
('mesa-common-dev'),
('libglu1-mesa-dev'),
('libglew-dev'),
('libxtst-dev'),
('libxrandr-dev'),
('libpng-dev'),
('libjpeg-dev'),
('zlib1g-dev'),
('libbz2-dev'),
('libogg-dev'),
('libvorbis-dev'),
('libc6-dev'),
('yasm'),
('libasound2-dev'),
('libpulse-dev'),
('binutils-dev'),
('libgtk2.0-dev'),
('libmad0-dev'),
('libudev-dev'),
('libva-dev'),
('nasm'),
('git')
])
def test_package_is_installed(host, name):
package = host.package(name)
assert package.is_installed
def test_stepmania_directory_ownership(host):
directory = host.file(vars['stepmania_install_path'])
assert directory.exists
assert directory.is_directory
assert directory.user == vars['ansible_env']['USER']
assert directory.group == vars['ansible_env']['USER']
def test_stepmania_is_installed(host):
assert host.exists('stepmania')
def test_stepmania_desktop_entry_exists(host):
stepmania_desktop = host.file('/usr/share/applications/stepmania.desktop')
assert stepmania_desktop.exists
assert stepmania_desktop.is_file
| StarcoderdataPython |
3566704 | <gh_stars>0
import logging
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from playlist_creation.forms import PlaylistCreationForm
from playlist_creation.models import Playlist
logger = logging.getLogger(__name__)
def home(request):
if request.method == 'POST':
form = PlaylistCreationForm(request.POST)
if form.is_valid():
playlist: Playlist = form.save()
request.session['playlist'] = str(playlist.id)
return HttpResponseRedirect(reverse('social:begin',
args=('spotify', )))
else:
for field in form:
for error in field.errors:
messages.error(request, error)
for error in form.non_field_errors():
messages.error(request, error)
else:
logger.info('/home/ was called')
form = PlaylistCreationForm()
return render(request, 'home.html', {'form': form})
def thank_you(request):
playlist_url: str = None
try:
playlist_url = request.session.pop('playlist_url')
except KeyError:
...
return render(request, 'thank_you.html', {'playlist_url': playlist_url})
| StarcoderdataPython |
3523344 | <filename>day_10_1.py
from helper import get_input
def main():
data = sorted([int(x) for x in get_input(10).split('\n') if x])
max_val = max(data)
diffs = []
prev = 0
data.append(max_val + 3)
for d in data:
diffs.append(d - prev)
prev = d
print(diffs.count(1) * diffs.count(3))
if __name__ == '__main__':
main()
| StarcoderdataPython |
6469679 | <gh_stars>1-10
import pytest
from pyspark.sql import DataFrame
from pyspark.sql.types import StringType, IntegerType
import blizz.check
from blizz import Field, Relation
from test.conftest import get_or_create_spark_session, path_student_performance_test
from test.test_spark_feature_library.data_sources import StudentPerformance
class StudentPerformanceFaulty1(Relation):
"""
Example of a defined field missing.
"""
THIS_IS_MISSING = Field(name="I'm missing")
@classmethod
@blizz.check.fields
def load(cls) -> DataFrame:
return get_or_create_spark_session().read.csv(
path=path_student_performance_test().as_posix(),
inferSchema=True,
header=True,
)
class StudentPerformanceFaulty2(Relation):
"""
Example of a defined field with faulty datatype.
"""
# this is actually a DoubleType:
MARKS = Field(name="Marks", datatype=StringType)
@classmethod
@blizz.check.fields
@blizz.check.types
def load(cls) -> DataFrame:
return get_or_create_spark_session().read.csv(
path=path_student_performance_test().as_posix(),
inferSchema=True,
header=True,
)
def test_field_existence_check() -> None:
"""
"""
with pytest.raises(
expected_exception=ValueError,
match="Field\(s\) 'I'm missing' not part of loaded Relation 'StudentPerformanceFaulty1'",
):
StudentPerformanceFaulty1.load()
def test_field_type_check() -> None:
"""
"""
with pytest.raises(
expected_exception=ValueError,
match="Type error for 'StudentPerformanceFaulty2.Marks'*",
):
StudentPerformanceFaulty2.load()
class StudentPerformanceFaulty3(Relation):
"""
Example of a duplicated field defined as key.
"""
STUDENT_ID = Field(name="Student_ID", datatype=StringType)
# this is actually not the key:
SEMSTER_NAME = Field("Semster_Name", datatype=StringType, key=True)
PAPER_ID = Field(name="Paper_ID", datatype=StringType)
MARKS = Field(name="Marks", datatype=IntegerType)
@classmethod
@blizz.check.fields
@blizz.check.types
@blizz.check.keys
def load(cls) -> DataFrame:
return get_or_create_spark_session().read.csv(
path=path_student_performance_test().as_posix(),
inferSchema=True,
header=True,
)
def test_key_check() -> None:
"""
"""
with pytest.raises(
expected_exception=ValueError,
match="Key error for 'StudentPerformanceFaulty3'*",
):
StudentPerformanceFaulty3.load()
def test_passes_checks() -> None:
sdf = StudentPerformance.load()
assert sdf is not None
| StarcoderdataPython |
3213996 | # Crie uma lista chamada 'números' e duas funções: sorteia(), que vai sortear 5 números e colocá-los dentro da lista, e soma_par() que vai mostrar a soma entre todos números pares da função anterior
# não tem muita necessidade da primeira função, o sample() já faz isso
from random import sample
from time import sleep
def sorteia(numeros):
print('Sorteando os 5 valores da lista: ', end='', flush=True)
for n in numeros:
sleep(1)
print(n, end=' ', flush=True)
print()
def soma_par(numeros):
print('Somando os valores pares: ', end='', flush=True)
tot_par = 0
for n in numeros:
if n % 2 == 0:
sleep(1)
print(n, end=' ', flush=True)
tot_par += n
sleep(1)
print(f'resulta em {tot_par}')
print()
numeros = sample(range(1, 10), 5)
numeros.sort()
sorteia(numeros)
soma_par(numeros)
| StarcoderdataPython |
6527976 | #!/usr/bin/env python
# Copyright (c) 2016-2017 Spotify AB.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import operator
def filterjson(event, filter, matchtype, listcondition="or"):
match = True
if isinstance(filter, dict):
for k, v in filter.iteritems():
if k in event:
if isinstance(v, dict):
match = filterjson(event[k], v, matchtype, listcondition)
elif isinstance(v, list) and isinstance(event[k], list):
match = filterjson(event[k], v, matchtype, listcondition)
else:
# Match filter string against object array
if isinstance(event[k], list):
for e in event[k]:
match = filterjson(e, v, matchtype)
if match:
break
# Match filter string against object string, int, bool
else:
match = matchstr(event[k], v, matchtype)
else:
# For count checks, handle a missing key
# as if the key were an empty list
if matchtype == "count":
match = matchstr([], v, matchtype)
else:
match = False
if not match:
break
elif isinstance(filter, list) and isinstance(event, list):
for f in filter:
for e in event:
match = filterjson(e, f, matchtype, listcondition)
if ((listcondition == 'or' and match) or
(listcondition == 'and' and not match)):
break
elif isinstance(filter, list):
for v in filter:
match = filterjson(event, v, matchtype, listcondition)
if ((listcondition == 'or' and match) or
(listcondition == 'and' and not match)):
break
elif isinstance(filter, basestring):
match = matchstr(event, filter, matchtype)
else:
raise "ERROR, unknown object encountered"
return match
def matchstr(estr, fstr, matchtype):
if matchtype == 'count' or matchtype == 'numeric':
ops = {"lt": operator.lt,
"gt": operator.gt,
"eq": operator.eq}
op, val = fstr.split()
val = int(val)
if matchtype == 'exact':
match = (fstr == estr)
elif matchtype == 'partial':
match = estr.find(fstr)
elif matchtype == 'regex':
match = (re.search(fstr, estr) is not None)
elif matchtype == 'numeric':
match = isinstance(estr, (int, long) and ops[op](estr, val))
elif matchtype == 'count':
# All other objects than lists are single objects
if isinstance(estr, list):
objlen = len(estr)
else:
objlen = 1
match = ops[op](objlen, val)
else:
raise "ERROR: unknown mode"
return match
| StarcoderdataPython |
3448253 | <gh_stars>1-10
"""Unit testing of User Created Objects Model"""
from django.forms import ValidationError
from django.test import TestCase, tag
from BookClub.models import ForumPost
from BookClub.models.abstract_user_objects import UserCreatedObject
@tag('models', 'user_created')
class UserCreatedObjectTestCase(TestCase):
"""User Created Objects Model, Fields, Validation and Methods Testing"""
fixtures = [
'BookClub/tests/fixtures/default_user_created_objects.json',
'BookClub/tests/fixtures/default_users.json'
]
def setUp(self):
self.usercreobj = ForumPost.objects.get(pk=1)
def assertValid(self):
try:
self.usercreobj.full_clean()
except ValidationError:
self.fail('Test user created object should be valid')
def assertInvalid(self):
with self.assertRaises(ValidationError):
self.usercreobj.full_clean()
def test_valid_usercreatedobject(self):
self.assertValid()
def test_forum_subclasses_user_created_object(self):
assert issubclass(ForumPost, UserCreatedObject)
def test_creator_cannot_be_blank(self):
self.usercreobj.creator = None
self.assertInvalid()
| StarcoderdataPython |
4946437 | import json
import random
import string
import requests
base_url = "https://accounts-new.dev.ukr.net"
url = "https://accounts-new.dev.ukr.net/api/v1/registration/reserve_login"
def test_used_login(get_registration_cookies):
payload = "{\"login\":\"test\"}"
headers = {
'Content-Type': 'application/json',
'Cookie': get_registration_cookies
}
response = requests.request("POST", base_url + "/api/v1/registration/reserve_login", headers=headers, data=payload)
print(response.text.encode('utf8'))
resp_body = response.json()
assert resp_body['available'] is False
# print(response.headers)
# print(response.headers.get('Date'))
# json.dumps()
def test_unused_login(get_registration_cookies):
payload = "{\"login\":\"" + ''.join(random.choices(string.ascii_letters + string.digits, k = random.randrange(1, 32))) + "\"}"
headers = {
'Content-Type': 'application/json',
'Cookie': get_registration_cookies
}
response = requests.request("POST", base_url + "/api/v1/registration/reserve_login", headers=headers, data=payload)
print(payload)
print(response.text.encode('utf8'))
resp_body = response.json()
assert resp_body['available'] is True
| StarcoderdataPython |
5098379 | <gh_stars>10-100
import logging
from datetime import timedelta
from decimal import Decimal
from typing import Dict
import pytest
from tinkoff.invest import (
CandleInterval,
MoneyValue,
PortfolioPosition,
PortfolioResponse,
Quotation,
)
from tinkoff.invest.strategies.base.account_manager import AccountManager
from tinkoff.invest.strategies.moving_average.plotter import (
MovingAverageStrategyPlotter,
)
from tinkoff.invest.strategies.moving_average.signal_executor import (
MovingAverageSignalExecutor,
)
from tinkoff.invest.strategies.moving_average.strategy import MovingAverageStrategy
from tinkoff.invest.strategies.moving_average.strategy_settings import (
MovingAverageStrategySettings,
)
from tinkoff.invest.strategies.moving_average.supervisor import (
MovingAverageStrategySupervisor,
)
from tinkoff.invest.strategies.moving_average.trader import MovingAverageStrategyTrader
from tinkoff.invest.typedefs import AccountId, ShareId
logging.basicConfig(format="%(asctime)s %(levelname)s:%(message)s", level=logging.INFO)
logger = logging.getLogger(__name__)
@pytest.fixture()
def token() -> str:
return "some"
@pytest.fixture()
def portfolio_positions() -> Dict[str, PortfolioPosition]:
return {
"BBG004730N88": PortfolioPosition(
figi="BBG004730N88",
instrument_type="share",
quantity=Quotation(units=110, nano=0),
average_position_price=MoneyValue(
currency="rub", units=261, nano=800000000
),
expected_yield=Quotation(units=-106, nano=-700000000),
current_nkd=MoneyValue(currency="", units=0, nano=0),
average_position_price_pt=Quotation(units=0, nano=0),
current_price=MoneyValue(currency="rub", units=260, nano=830000000),
)
}
@pytest.fixture()
def balance() -> MoneyValue:
return MoneyValue(currency="rub", units=20050, nano=690000000)
@pytest.fixture()
def portfolio_response(
portfolio_positions: Dict[str, PortfolioPosition],
balance: MoneyValue,
) -> PortfolioResponse:
return PortfolioResponse(
total_amount_shares=MoneyValue(currency="rub", units=28691, nano=300000000),
total_amount_bonds=MoneyValue(currency="rub", units=0, nano=0),
total_amount_etf=MoneyValue(currency="rub", units=0, nano=0),
total_amount_currencies=balance,
total_amount_futures=MoneyValue(currency="rub", units=0, nano=0),
expected_yield=Quotation(units=0, nano=-350000000),
positions=list(portfolio_positions.values()),
)
@pytest.fixture()
def figi() -> str:
return "BBG0047315Y7"
@pytest.fixture()
def account_id() -> str:
return AccountId("1337007228")
@pytest.fixture()
def settings(figi: str, account_id: AccountId) -> MovingAverageStrategySettings:
return MovingAverageStrategySettings(
share_id=ShareId(figi),
account_id=account_id,
max_transaction_price=Decimal(10000),
candle_interval=CandleInterval.CANDLE_INTERVAL_1_MIN,
long_period=timedelta(minutes=100),
short_period=timedelta(minutes=20),
std_period=timedelta(minutes=30),
)
class TestMovingAverageStrategyTrader:
@pytest.mark.freeze_time()
def test_trade(
self,
moving_average_strategy_trader: MovingAverageStrategyTrader,
strategy: MovingAverageStrategy,
account_manager: AccountManager,
signal_executor: MovingAverageSignalExecutor,
plotter: MovingAverageStrategyPlotter,
supervisor: MovingAverageStrategySupervisor,
caplog,
freezer,
):
caplog.set_level(logging.DEBUG)
caplog.set_level(logging.INFO)
initial_balance = account_manager.get_current_balance()
for i in range(50):
logger.info("Trade %s", i)
moving_average_strategy_trader.trade()
current_balance = account_manager.get_current_balance()
assert initial_balance != current_balance
logger.info("Initial balance %s", initial_balance)
logger.info("Current balance %s", current_balance)
events = supervisor.get_events()
plotter.plot(events)
| StarcoderdataPython |
6544155 | <reponame>machism0/bimodal-qd-micropillars
import numpy as np
import pandas as pd
def variable_names():
return ['Re(Es)', 'Im(Es)', '|Ew|^2', 'rho', 'n']
def param_names():
return ['kappa_s', 'kappa_w', 'mu_s', 'mu_w', 'epsi_ss', 'epsi_ww', 'epsi_sw', 'epsi_ws', 'beta', 'J_p', 'eta',
'tau_r', 'S_in', 'V', 'Z_QD', 'n_bg', 'tau_sp', 'T_2', 'A', 'hbar_omega', 'epsi_tilda', 'J', 'feed_phase',
'feed_ampli', 'tau_fb', 'epsi0', 'hbar', 'e0', 'alpha_par', 'omega1']
def param_plot_names():
return ['\\kappa_s', '\\kappa_w', '\\mu_s', '\\mu_w', '\\epsilon_{ss}', '\\epsilon_{ww}', '\\epsilon_{sw}',
'\\epsilon_{ws}', '\\beta', 'J_p', '\\eta', '\\tau_r', 'S^{in}', 'V', 'Z^{QD}', 'n_{bg}', '\\tau_{sp}',
'T_2', 'A', 'hbar\\omega', '\\epsilon_0n_{bg}c_{0}', 'J', 'Feedback Phase', 'Feedback Amp', '\\tau_{fb}',
'\\epislon0', 'hbar', 'e_0', '\\alpha', '\\omega1', '\\omega2']
class Branch:
def __init__(self, matlab_branch):
points = matlab_branch['point'][0, 0]
x = np.concatenate(points['x'].squeeze(), axis=1).astype(np.float).T
params = np.concatenate(points['parameter'].squeeze(), axis=0).astype(np.float)
nunst = matlab_branch['nunst'][0, 0]
ind_hopf = matlab_branch['indHopf'][0, 0] - 1
ind_fold = matlab_branch['indFold'][0, 0] - 1
hopf, fold = np.zeros_like(nunst, dtype=np.bool), np.zeros_like(nunst, dtype=np.bool)
hopf[ind_hopf, :] = True
fold[ind_fold, :] = True
intensity_x1x2 = np.square(np.linalg.norm(x[:, 0:2], axis=1))[:, np.newaxis]
self.variable_names = variable_names()
self.param_names = param_names()
self.extended_names = ['|Es|^2', 'nunst']
data = np.concatenate([x, params, intensity_x1x2, nunst], axis=1)
col_names = self.variable_names + self.param_names + self.extended_names
self._df = pd.DataFrame(data=data, columns=col_names)
self._df = self._df.assign(hopf=hopf, fold=fold)
self._active_params = self._get_active_params()
def _get_active_params(self):
names = []
for name in self.param_names:
if np.allclose(self._df.loc[0, name], self._df.loc[:, name]):
pass
else:
names.append(name)
return self.df.columns.intersection(names)
@property
def df(self):
return self._df
@property
def params(self):
return self.df.loc[:, self.param_names]
@property
def cont_params(self):
return self._active_params
@property
def cont(self):
return self._df.loc[:, self.cont_params]
@property
def vars(self):
return self._df.loc[:, self.variable_names]
@property
def x(self):
cols = self.cont_params.drop('omega1', errors='ignore')
cols = cols.insert(loc=0, item='|Es|^2')
return self._df.loc[:, cols]
| StarcoderdataPython |
4953234 | <gh_stars>1-10
import tkinter as tk
from .menu import MenuContainer
class Menu(tk.Toplevel):
def __init__(self, master, name, *args, **kwargs):
super().__init__(master, *args, **kwargs)
self.base = master.base
self.master = master
self.configure(bg='#e8e8e8')
self.withdraw()
self.overrideredirect(True)
# self.wm_attributes("-topmost", 1)
self.menu = MenuContainer(self, name)
self.menu.pack(fill=tk.BOTH, expand=True, padx=1, pady=1)
self.configure_bindings()
def configure_bindings(self):
self.bind("<FocusOut>" , self.hide)
self.bind("<Escape>", self.hide)
def show(self, *args):
self.update_idletasks()
x = self.master.winfo_rootx()
y = self.master.winfo_rooty() + self.master.winfo_height()
self.wm_geometry(f"+{x}+{y}")
self.deiconify()
self.focus_set()
def hide(self, *args):
self.withdraw()
def add_first_item(self, text, command):
self.menu.add_first_item(text, command)
def add_item(self, text, command):
self.menu.add_item(text, command)
def add_last_item(self, text, command):
self.menu.add_last_item(text, command)
def add_separator(self):
self.menu.add_separator()
| StarcoderdataPython |
3550524 | <reponame>dlu-ch/trac-ticketdependencyplugin
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 <NAME> <<EMAIL>>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
import pkg_resources
import trac.core
import trac.web.api
import trac.web.chrome
import trac.resource
import trac.ticket.model
import trac.util.text
import trac.util.translation
import genshi.builder
import model
_, add_domain = trac.util.translation.domain_functions("ticketdependency", ("_", "add_domain"))
def hyperlink_to_ticket(req, ticket, text_format):
# ticket: trac.ticket.model.Ticket
title = trac.util.text.shorten_line(ticket["summary"])
return genshi.builder.tag.a(
text_format.format(id=ticket.id, title=title),
title=title,
class_=ticket["status"],
href=req.href.ticket(ticket.id))
class TicketDependencyTemplateStreamFilter(trac.core.Component):
trac.core.implements(trac.web.api.ITemplateStreamFilter)
def __init__(self):
locale_dir = pkg_resources.resource_filename(__name__, "locale") # path of locale directory
add_domain(self.env.path, locale_dir)
# ITemplateStreamFilter methods
def filter_stream(self, req, method, filename, stream, data):
if filename in ('ticket.html', 'ticket_box.html', 'ticket_preview.html'):
self._render_ticket_fields(req, data)
elif filename in ('query.html', 'query_results.html'):
self._render_query(req, data)
return stream
def _render_ticket_fields(self, req, data):
for field in data.get("fields", []):
if field["name"] == model.TICKETREF:
field["label"] = _(model.TICKETREF_LABEL)
ticket = data["ticket"]
rendered_lines = []
subticket_ids = model.ticket_ids_from_field_value(ticket[model.TICKETREF]) # None if new
if subticket_ids:
rendered_lines.append(self._link_ticket_list(req, subticket_ids))
superticket_ids = model.query_supertickets(self.env, ticket.id)
if superticket_ids:
rendered_lines.append(
self._link_ticket_list(req, superticket_ids, is_super=True,
element_id='tref_ticketid_super'))
rendered_lines = sum([[i, genshi.builder.tag.br()] for i in rendered_lines], [])[:-1]
if rendered_lines:
field["rendered"] = genshi.builder.tag([
genshi.builder.tag.span(rendered_lines),
# U+25BA: BLACK RIGHT-POINTING POINTER
genshi.builder.tag.a(_(u'(A \u25BA B: A depends on B)'), style='float: right')
])
for changes in data.get("changes", []):
# yellow box in tickets's Change History
ticketref_change = changes.get("fields", {}).get(model.TICKETREF)
if ticketref_change:
self._render_ticketref_change(req, ticketref_change)
def _render_ticketref_change(self, req, ticketref_change):
# req: Request
# ticketref_change: Dictionary describing modification of field model.TICKETREF
old_ids = model.ticket_ids_from_field_value(ticketref_change.get("old"))
new_ids = model.ticket_ids_from_field_value(ticketref_change.get("new"))
comma = genshi.builder.tag.span(u', ')
added_elements = []
for ticket_id in sorted(new_ids - old_ids):
ticket = self._create_ticket_from_id(ticket_id)
if ticket:
added_elements.append(hyperlink_to_ticket(req, ticket, '#{id}'))
if added_elements:
added_elements = sum([[i, comma] for i in added_elements], [])[:-1] + [
genshi.builder.tag.span(' ' + _('added'))
]
removed_elements = []
for ticket_id in sorted(old_ids - new_ids):
ticket = self._create_ticket_from_id(ticket_id)
if ticket:
removed_elements.append(hyperlink_to_ticket(req, ticket, '#{id}'))
if removed_elements:
removed_elements = sum([[i, comma] for i in removed_elements], [])[:-1] + [
genshi.builder.tag.span(' ' + _('removed'))
]
elements = added_elements
if elements and removed_elements:
elements.append(comma)
elements.extend(removed_elements)
if elements:
ticketref_change["rendered"] = genshi.builder.tag.span(elements)
ticketref_change["label"] = _(model.TICKETREF_LABEL)
# for "Custom Query"
def _render_query(self, req, data):
fields_tref = data.get("fields", {}).get(model.TICKETREF)
if fields_tref:
# name of checkbox for filter and column title with "Show under each result"
fields_tref["label"] = _(model.TICKETREF_LABEL)
if fields_tref["type"] == u"textarea":
if isinstance(data.get("all_columns"), list):
data["all_columns"].append(model.TICKETREF)
# column title of result
for header in data.get("headers", []):
if header["name"] == model.TICKETREF:
header["label"] = _(model.TICKETREF_LABEL)
def _link_ticket_list(self, req, ticket_ids, is_super=False, element_id='tref_ticketid'):
if not ticket_ids:
return None
if is_super:
text_format = u'\u25C4 #{id}' # U+25C4: BLACK LEFT-POINTING POINTER
else:
text_format = u'\u25BA #{id}' # U+25BA: BLACK RIGHT-POINTING POINTER
text_format += ' - {title}'
separator = genshi.builder.tag.br()
elements = []
for ticket_id in sorted(ticket_ids):
ticket = self._create_ticket_from_id(ticket_id)
if ticket:
elements.append(hyperlink_to_ticket(req, ticket, text_format))
elements.append(separator)
elements = elements[:-1]
return genshi.builder.tag.span(elements, id=element_id)
def _create_ticket_from_id(self, ticket_id):
try:
return trac.ticket.model.Ticket(self.env, ticket_id)
except trac.resource.ResourceNotFound:
self.log.warn("ticket not found (ignored): {}".format(ticket_id))
| StarcoderdataPython |
242392 | <reponame>robertogoes/exercicios-coursera-python
n = int(input("Digite o valor a ser avaliado:"))
ac1 = n%10
achouadjacente = False
n = n//10
while n !=0 and not(achouadjacente):
ac2 = n%10
if ac2 == ac1:
achouadjacente = True
n = n//10
ac1 = ac2
if achouadjacente:
print("Nesse número há dois números adjacentes!")
else:
print("Nesse número não há dois números adjacentes!")
| StarcoderdataPython |
25690 | <reponame>boyuhou/security-data
import click
import logging
import datetime
import pandas as pd
from security_data import SecurityService
DATE_FORMAT = '%Y%m%d'
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s', level=logging.DEBUG, datefmt='%Y-%m-%d %I:%M:%S')
logger = logging.getLogger(__name__)
@click.command()
@click.option('--input_file', default=r'.\config\FrogBoxList.csv', help='Trade data input folder')
@click.option('--start_date', default=pd.datetime.today().strftime(DATE_FORMAT))
def main(input_file, start_date):
logger.info('input file: {0}'.format(input_file))
logger.info('start date: {0}'.format(start_date))
start_date = datetime.datetime.strptime(start_date+'000000', '%Y%m%d%H%M%S')
ticker_list = get_ticker_list(input_file).tolist()
security_service = SecurityService()
# security_service.update_daily_data(ticker_list, start_date)
security_service.update_intraday_data(ticker_list, start_date)
def get_ticker_list(input_file):
df = pd.read_csv(input_file)
return df['ticker']
if __name__ == '__main__':
main() | StarcoderdataPython |
5162513 | from math import radians, cos, sin, asin, sqrt
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.express as px
import pandas as pd
import numpy as np
from datetime import date, timedelta
from pandas.tseries.offsets import DateOffset
from math import radians, cos, sin, asin, sqrt
import folium
import plotly.graph_objects as go
import json
SPD_data = pd.read_csv('sample_2018_2019.csv',delimiter = ',')
SPD_data.sort_values(by='Report DateTime', ascending = True, inplace = True)
SPD_data['coordinates'] = SPD_data[['Latitude', 'Longitude']].values.tolist()
SPD_data = SPD_data.iloc[:100000,:]
def crimes_in_radius_dates(coord, radius, start_date, end_date):
df = SPD_data
df['Report DateTime']=pd.to_datetime(df['Report DateTime']).dt.date
date_mask = (pd.to_datetime(df['Report DateTime']) >= start_date) & (pd.to_datetime(df['Report DateTime']) <= end_date)
dff = df[date_mask]
result = [point_in_radius(value[0],value[1],coord[0],coord[1],radius)
for value in dff['coordinates']]
return dff[result]
def point_in_radius(lat1, lon1, lat2, lon2, radius):
# """
# Calculate the great circle distance between two points
# on the earth (specified in decimal degrees)
# """
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
r = 3956 # Radius of earth in kilometers. Use 3956 for miles
if c*r<=int(radius):
return True
else:
return False
def address_to_coord(address_string):
result = address_string.replace(' ','+')
query = f'https://nominatim.openstreetmap.org/search?q={result}&format=geojson'
response = requests.get(f'https://nominatim.openstreetmap.org/search?q={query}&format=geojson')
return(response.json())
def crime_marker(coord,category,map):
colors = {'PROPERTY':'Blue','PERSON':'Red','SOCIETY':'#009933'}
feature_property = folium.FeatureGroup('PROPERTY')
feature_person = folium.FeatureGroup('PERSON')
feature_society = folium.FeatureGroup('SOCIETY')
group = {'PROPERTY':feature_property,'PERSON':feature_person,'SOCIETY':feature_society}
for x, y in zip(coord, category):
folium.CircleMarker(
location = x,
radius = 3,
popup = y,
color = colors[y],
fill = True,
fill_color = colors[y]
).add_to(group[y])
for key in group.keys():
group[key].add_to(map)
def crime_table(data,type, start, end):
df =data[data['Crime Against Category'] == type].sort_values('Report DateTime', ascending = True)
#df['date']=pd.to_datetime(df['Report DateTime']).dt.date
date_mask = (pd.to_datetime(df['Report DateTime']) >= start) & (pd.to_datetime(df['Report DateTime']) <= end)
return df[date_mask].groupby('Offense').count()['Report Number'].sort_values(ascending = False).reset_index()
def crime_trend_data(data,type, end_date):
df =data[data['Crime Against Category'] == type].sort_values('Report DateTime', ascending = True)
date_mask = (pd.to_datetime(df['Report DateTime']) <= end_date) & (pd.to_datetime(df['Report DateTime']) >= pd.to_datetime(end_date)-timedelta(days=180)) #selects only rows with certain timeframe
df = df[date_mask]
offense_names = df['Offense'].unique()
dff = pd.DataFrame()
fig = go.Figure()
for o_type in offense_names:
df_off = df[df['Offense'] == o_type]
df_off['Report DateTime'] = pd.to_datetime(df_off['Report DateTime'])
df_off = df_off.resample('M', on='Report DateTime').count()['Report Number'].reset_index()
fig.add_trace(go.Scatter(x =df_off['Report DateTime'], y = df_off['Report Number'], mode='lines+markers', name = o_type))
fig.update_layout(legend = dict(
yanchor = "top",
y = -0.5,
xanchor= "left",
x = 0.0
))
return fig
def slider_marks(marks,start_date):
maxmarks=marks
m1date=start_date
datelist=pd.date_range(m1date, periods=maxmarks, freq='M') # list of months as dates
dlist=pd.DatetimeIndex(datelist).normalize()
tags={} #dictionary relating marks on slider to tags. tags are shown as "Apr', "May', etc
datevalues={} #dictionary relating mark to date value
x=1
for i in dlist:
tags[x]=(i).strftime('%b %y') #gets the string representation of next month ex:'Apr'
datevalues[x]=i
x=x+1
return tags,dlist
def site_names(col_name):
lst_sites = SPD_data[col_name].unique()
lst_sites = np.sort(lst_sites)
result = []
for site in lst_sites:
result.append({'label': site,'value':site})
return result
def histogram_plot(dff_n1,dff_n2, n1,n2):
fig = go.Figure()
fig.add_trace(go.Histogram(
x=dff_n1,
histnorm='percent',
name=n1, # name used in legend and hover labels
xbins=dict( # bins used for histogram
start=0,
end=dff_n1.max(),
size=1
),
marker_color='firebrick',
opacity=0.75
))
fig.add_trace(go.Histogram(
x=dff_n2,
histnorm='percent',
name=n2,
xbins=dict(
start=0,
end=dff_n2.max(),
size=1
),
marker_color='royalblue',
opacity=0.75
))
fig.update_layout(
title_text='Frequency Histograms', # title of plot
xaxis_title_text='# of Incidences per Month', # xaxis label
yaxis_title_text='Frequency', # yaxis label
bargap=0.2, # gap between bars of adjacent location coordinates
bargroupgap=0.05 # gap between bars of the same location coordinates
)
return fig
| StarcoderdataPython |
3483205 | <reponame>sebtelko/pulumi-azure-native
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from .. import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .access_control_record import *
from .backup_policy import *
from .backup_schedule import *
from .bandwidth_setting import *
from .get_access_control_record import *
from .get_backup_policy import *
from .get_backup_schedule import *
from .get_bandwidth_setting import *
from .get_manager import *
from .get_manager_extended_info import *
from .get_storage_account_credential import *
from .get_volume import *
from .get_volume_container import *
from .list_device_failover_sets import *
from .list_device_failover_tars import *
from .list_manager_activation_key import *
from .list_manager_public_encryption_key import *
from .manager import *
from .manager_extended_info import *
from .storage_account_credential import *
from .volume import *
from .volume_container import *
from ._inputs import *
from . import outputs
# Make subpackages available:
if typing.TYPE_CHECKING:
import pulumi_azure_native.storsimple.v20161001 as v20161001
import pulumi_azure_native.storsimple.v20170601 as v20170601
else:
v20161001 = _utilities.lazy_import('pulumi_azure_native.storsimple.v20161001')
v20170601 = _utilities.lazy_import('pulumi_azure_native.storsimple.v20170601')
| StarcoderdataPython |
6539036 | """Commands CLI."""
from typing import List, Optional
import click
from statue.cli.cli import statue_cli
from statue.cli.util import (
allow_option,
contexts_option,
deny_option,
silent_option,
verbose_option,
verbosity_option,
)
from statue.configuration import Configuration
from statue.exceptions import InvalidCommand, UnknownCommand
@statue_cli.group("command")
def commands_cli() -> None:
"""Commands related actions such as list, install, show, etc."""
@commands_cli.command("list")
@contexts_option
@allow_option
@deny_option
def list_commands_cli(
context: Optional[List[str]],
allow: Optional[List[str]],
deny: Optional[List[str]],
) -> None:
"""List matching commands to contexts, allow list and deny list."""
commands = Configuration.read_commands(
contexts=context,
allow_list=allow,
deny_list=deny,
)
for command_instance in commands:
click.echo(f"{command_instance.name} - {command_instance.help}")
@commands_cli.command("install")
@contexts_option
@allow_option
@deny_option
@silent_option
@verbose_option
@verbosity_option
def install_commands_cli(
context: Optional[List[str]],
allow: Optional[List[str]],
deny: Optional[List[str]],
verbosity: str,
) -> None:
"""Install missing commands."""
commands_list = Configuration.read_commands(
contexts=context, allow_list=allow, deny_list=deny
)
for command in commands_list:
command.install(verbosity=verbosity)
@commands_cli.command("show")
@click.pass_context
@click.argument("command_name", type=str)
@contexts_option
@allow_option
@deny_option
def show_command_cli(
ctx: click.Context,
command_name: str,
context: Optional[List[str]],
allow: Optional[List[str]],
deny: Optional[List[str]],
) -> None:
"""Show information about specific command."""
try:
command_instance = Configuration.read_command(
command_name=command_name,
contexts=context,
allow_list=allow,
deny_list=deny,
)
click.echo(f"Name - {command_instance.name}")
click.echo(f"Description - {command_instance.help}")
click.echo(f"Arguments - {command_instance.args}")
except (UnknownCommand, InvalidCommand) as error:
click.echo(str(error))
ctx.exit(1)
| StarcoderdataPython |
1913510 | <reponame>smueksch/pdf2txt
import re
class NumberFlagger:
""" Flag up lines that contain numbers.
Use this to point out any lines that potentially have left-over footnote
numbers in them but in a way they cannot be removed automatically.
"""
name = 'NumberFlagger'
desc = 'Flag up lines that contain numbers'
def __call__(self, text: str):
lines = text.splitlines()
for i in range(len(lines)):
match = self.find_number(lines[i])
if match:
context = self.get_context(match.start(), match.end(), lines[i])
print('Number found, line {}: {}'.format((i+1), context))
@staticmethod
def find_number(line: str):
""" Return match object for number in line.
Args:
line (str): Line to be searched.
Returns:
Match object: Match object if a number is found, None otherwise.
"""
return re.search(r'([0-9]+)', line)
@staticmethod
def get_context(start: int, end: int, line: str, radius: int = 8) -> str:
""" Return context for given term in line.
The context of a term is the immediately surrounding characters. The
number of characters is given by the radius.
Take for example the sentence:
"Sore was I ere I saw Eros."
with the term starting at 11 and ending at 14, i.e. "ere". Given a
radius of 8 the context would hence be:
">e was I ere I saw e<"
If there aren't enough characters to the left/right or both then the
maximum number of characters smaller than the radius is taken instead.
Args:
start (int): Index of start of term within line.
end (int): Index of end of term within line.
line (str): Line to get context from.
radius (int, optional): Number of characters left and right of the
term that should be included as context, defaults to 8.
Return:
str: Context of term, including term itself.
"""
context_start = max(0, start - radius)
context_end = min(len(line) - 1, end + radius)
return '<' + line[context_start:context_end] + '>'
| StarcoderdataPython |
1832189 | <filename>eval/ClassifierTester.py
import tensorflow.compat.v1 as tf
from utils import get_checkpoint_path
import numpy as np
class ClassifierTester:
def __init__(self, model, data_generator, pre_processor):
self.model = model
self.pre_processor = pre_processor
self.data_generator = data_generator
self.num_eval_steps = self.data_generator.num_samples//self.model.batch_size
def get_data_queue(self):
data = self.data_generator.get_dataset()
data = data.map(self.pre_processor.process_test, num_parallel_calls=1)
data = data.batch(self.model.batch_size)
data = data.prefetch(100)
iterator = tf.data.make_one_shot_iterator(data)
return iterator
def get_data_queue_multicrop(self):
data = self.data_generator.get_dataset()
data = data.map(self.pre_processor.process_test_multicrop, num_parallel_calls=1)
data = data.batch(self.model.batch_size)
data = data.prefetch(100)
iterator = tf.data.make_one_shot_iterator(data)
return iterator
def preprocess(self, img, label):
img = self.pre_processor.process_test(img)
return img, label
def make_test_summaries(self, names_to_values):
# Create the summary ops such that they also print out to std output:
summary_ops = []
for metric_name, metric_value in names_to_values.iteritems():
op = tf.summary.scalar(metric_name, metric_value)
op = tf.Print(op, [metric_value], metric_name)
summary_ops.append(op)
return summary_ops
def test_classifier(self, ckpt_dir):
print('Restoring from: {}'.format(ckpt_dir))
g = tf.Graph()
with g.as_default():
# Get test batches
batch_queue = self.get_data_queue()
imgs_test, labels_test = batch_queue.get_next()
imgs_test.set_shape([self.model.batch_size, ]+self.model.im_shape)
# Get predictions
predictions = self.model.linear_classifiers(imgs_test, self.data_generator.num_classes, training=False)
num_corrects_list = []
for preds, f_id in zip(predictions, self.model.feats_IDs):
preds_test = tf.argmax(preds, 1)
correct_preds = tf.equal(preds_test, labels_test)
num_correct = tf.reduce_sum(tf.to_float(correct_preds))
num_corrects_list.append(num_correct)
# Start running operations on the Graph.
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
prev_ckpt = get_checkpoint_path(ckpt_dir)
print('Restoring from previous checkpoint: {}'.format(prev_ckpt))
saver = tf.train.Saver(tf.global_variables())
saver.restore(sess, prev_ckpt)
n_cor_np = np.zeros([len(self.model.feats_IDs)])
for i in range(self.num_eval_steps):
n_correct = sess.run(num_corrects_list)
n_cor_np += n_correct
acc = n_cor_np/self.data_generator.num_samples
print('Accuracy: {}'.format(acc))
return acc
def test_classifier_multicrop(self, ckpt_dir):
print('Restoring from: {}'.format(ckpt_dir))
g = tf.Graph()
with g.as_default():
# Get test batches
batch_queue = self.get_data_queue_multicrop()
imgs_test, labels_test = batch_queue.get_next()
imgs_test.set_shape((self.model.batch_size,) + self.pre_processor.src_shape + (3,))
print('imgs_test: {}'.format(imgs_test.get_shape().as_list()))
# Extract crops
imgs_rcrop = []
dp = int((self.pre_processor.src_shape[0] - self.pre_processor.target_shape[0]) / 2)
imgs_ccrop = imgs_test[:, dp:dp + self.pre_processor.target_shape[0], dp:dp + self.pre_processor.target_shape[1], :]
imgs_rcrop.append(imgs_ccrop)
imgs_ulcrop = imgs_test[:, :self.pre_processor.target_shape[0], :self.pre_processor.target_shape[1], :]
imgs_rcrop.append(imgs_ulcrop)
imgs_urcrop = imgs_test[:, :self.pre_processor.target_shape[0], -self.pre_processor.target_shape[1]:, :]
imgs_rcrop.append(imgs_urcrop)
imgs_blcrop = imgs_test[:, -self.pre_processor.target_shape[0]:, :self.pre_processor.target_shape[1], :]
imgs_rcrop.append(imgs_blcrop)
imgs_brcrop = imgs_test[:, -self.pre_processor.target_shape[0]:, -self.pre_processor.target_shape[1]:, :]
imgs_rcrop.append(imgs_brcrop)
imgs_rcrop_stack = tf.concat(imgs_rcrop, 0)
# Add flipped crops
imgs_rcrop_stack = tf.concat([imgs_rcrop_stack, tf.reverse(imgs_rcrop_stack, [2])], 0)
preds_rcrop_stack = self.model.linear_classifiers(imgs_rcrop_stack, self.data_generator.num_classes, training=False)
num_corrects_list = []
for preds_stack, f_id in zip(preds_rcrop_stack, self.model.feats_IDs):
stack_preds = tf.stack(tf.split(preds_stack, 10))
stack_preds = tf.nn.softmax(stack_preds, axis=-1)
preds = tf.reduce_mean(stack_preds, 0)
preds_test = tf.argmax(preds, 1)
correct_preds = tf.equal(preds_test, labels_test)
num_correct = tf.reduce_sum(tf.cast(correct_preds, tf.float32))
num_corrects_list.append(num_correct)
# Start running operations on the Graph.
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
prev_ckpt = get_checkpoint_path(ckpt_dir)
print('Restoring from previous checkpoint: {}'.format(prev_ckpt))
saver = tf.train.Saver(tf.global_variables())
saver.restore(sess, prev_ckpt)
n_cor_np = np.zeros([len(self.model.feats_IDs)])
for i in range(self.num_eval_steps):
n_correct = sess.run(num_corrects_list)
n_cor_np += n_correct
acc = n_cor_np/self.data_generator.num_samples
print('Accuracy: {}'.format(acc))
return acc
| StarcoderdataPython |
6600053 | """
Goal: store application settings.
Notes:
- This file serves as an example only and should be changed
for production deployments.
- The *SALT* value will be provided over the phone.
"""
# Set to False if the a target server has a self-signed certificate
VERIFY_SSL_CERT = False
CLIENT_ID = 'client_1'
CLIENT_SECRET = 'secret_1'
# Application endpoints
TOKEN_URL = 'https://localhost:5000/oauth/token'
SAVE_URL = 'https://localhost:5000/api/save'
DB_HOST = 'localhost'
DB_PORT = 3306
DB_NAME = 'olassc'
DB_USER = 'olassc'
DB_PASS = '<PASSWORD>'
# Expected length: 64 characters
# Derived from 'himalayan_salt'
SALT = '5ce3c76fae7161e7d45a5c96fb6a2b2131134af739fc1c85465e659aded4e431'
# Partners should only add/remove elements to this array
# to activate/deactivate hashing rules
ENABLED_RULES = ['F_L_D_Z', 'L_F_D_Z', 'F_L_D_C', 'L_F_D_C']
| StarcoderdataPython |
208739 | """
Tests for FileCollectionController
"""
import os
import shutil
import tempfile
import platform
from io import open
try:
to_unicode = unicode
except NameError:
to_unicode = str
try:
def to_bytes(val):
return bytes(val)
to_bytes("test")
except TypeError:
def to_bytes(val):
return bytes(val, "utf-8")
to_bytes("test")
from datmo.config import Config
from datmo.core.controller.project import ProjectController
from datmo.core.controller.file.file_collection import \
FileCollectionController
from datmo.core.util.exceptions import EntityNotFound, UnstagedChanges
class TestFileCollectionController():
def setup_method(self):
# provide mountable tmp directory for docker
tempfile.tempdir = "/tmp" if not platform.system(
) == "Windows" else None
test_datmo_dir = os.environ.get('TEST_DATMO_DIR',
tempfile.gettempdir())
self.temp_dir = tempfile.mkdtemp(dir=test_datmo_dir)
Config().set_home(self.temp_dir)
self.project_controller = ProjectController()
self.file_collection_controller = FileCollectionController()
def teardown_method(self):
pass
def __setup(self):
# Create the files in the project files directory
dirpath1 = os.path.join(
self.file_collection_controller.file_driver.files_directory,
"dirpath1")
os.makedirs(dirpath1)
filepath1 = os.path.join(
self.file_collection_controller.file_driver.files_directory,
"filepath1")
with open(filepath1, "wb") as _:
pass
return filepath1, dirpath1
def test_create(self):
self.project_controller.init("test3", "test description")
# Test failure creation of collection if no path given
failed = False
try:
self.file_collection_controller.create()
except TypeError:
failed = True
assert failed
# Test create success with paths
paths = self.__setup()
file_collection_obj = self.file_collection_controller.create(paths)
assert file_collection_obj
assert file_collection_obj.id
assert file_collection_obj.path
assert file_collection_obj.driver_type
assert file_collection_obj.filehash == "74be16979710d4c4e7c6647856088456"
# Test create success without paths (should be the same as previous)
file_collection_obj_1 = self.file_collection_controller.create([])
assert file_collection_obj_1 == file_collection_obj
assert file_collection_obj_1.id == file_collection_obj.id
assert file_collection_obj_1.path == file_collection_obj.path
assert file_collection_obj_1.driver_type == file_collection_obj.driver_type
assert file_collection_obj_1.filehash == file_collection_obj.filehash
# Test create success with paths again (should be same as previous)
file_collection_obj_2 = self.file_collection_controller.create(paths)
assert file_collection_obj_2 == file_collection_obj_1
assert file_collection_obj_2.id == file_collection_obj_1.id
assert file_collection_obj_2.path == file_collection_obj_1.path
assert file_collection_obj_2.driver_type == file_collection_obj_1.driver_type
assert file_collection_obj_2.filehash == file_collection_obj_1.filehash
# Test file collection with empty paths (should be same as previous)
file_collection_obj_3 = self.file_collection_controller.create([])
assert file_collection_obj_3 == file_collection_obj_2
assert file_collection_obj_3.id == file_collection_obj_2.id
assert file_collection_obj_3.path == file_collection_obj_2.path
assert file_collection_obj_3.driver_type == file_collection_obj_2.driver_type
assert file_collection_obj_3.filehash == file_collection_obj_2.filehash
def test_list(self):
self.project_controller.init("test4", "test description")
paths_1 = self.__setup()
filepath2 = os.path.join(self.file_collection_controller.home,
"filepath2")
with open(filepath2, "wb") as f:
f.write(to_bytes("test" + "\n"))
paths_2 = [filepath2]
file_collection_obj_1 = self.file_collection_controller.create(paths_1)
file_collection_obj_2 = self.file_collection_controller.create(paths_2)
# List all code and ensure they exist
result = self.file_collection_controller.list()
assert len(result) == 2 and \
file_collection_obj_1 in result and \
file_collection_obj_2 in result
def test_delete(self):
self.project_controller.init("test5", "test description")
paths = self.__setup()
file_collection_obj = self.file_collection_controller.create(paths)
# Delete code in the project
result = self.file_collection_controller.delete(file_collection_obj.id)
# Check if code retrieval throws error
thrown = False
try:
self.file_collection_controller.dal.file_collection.get_by_id(
file_collection_obj.id)
except EntityNotFound:
thrown = True
assert result == True and \
thrown == True
def test_exists_file(self):
self.project_controller.init("test6", "test description")
paths = self.__setup()
file_collection_obj = self.file_collection_controller.create(paths)
# check for file_collection_id
result = self.file_collection_controller.exists(
file_collection_id=file_collection_obj.id)
assert result
# check for file_hash in file_collection
result = self.file_collection_controller.exists(
file_hash=file_collection_obj.filehash)
assert result
# check for not proper file_collection_id
result = self.file_collection_controller.exists(
file_collection_id="test_file_collection_id")
assert not result
def test_calculate_project_files_hash(self):
self.project_controller.init("test7", "test description")
filepath1, dirpath1 = self.__setup()
# Test if hash is for 1 blank filepath and empty directory
result = self.file_collection_controller._calculate_project_files_hash(
)
assert result == "74be16979710d4c4e7c6647856088456"
def test_has_unstaged_changes(self):
self.project_controller.init("test8", "test description")
# Create the files in the project files directory
paths = self.__setup()
# Test when there are unstaged changes
result = self.file_collection_controller._has_unstaged_changes()
assert result
# Save the file collection
self.file_collection_controller.create(paths)
# Test when there are no unstaged changes
result = self.file_collection_controller._has_unstaged_changes()
assert not result
# Change the file contents
with open(paths[0], "wb") as f:
f.write(to_bytes("hello"))
# Test when there are unstaged changes again
result = self.file_collection_controller._has_unstaged_changes()
assert result
def test_check_unstaged_changes(self):
self.project_controller.init("test9", "test description")
# Create the files in the project files directory
paths = self.__setup()
# Test when there are unstaged changes
failed = False
try:
_ = self.file_collection_controller.check_unstaged_changes()
except UnstagedChanges:
failed = True
assert failed
# Save the file collection
self.file_collection_controller.create(paths)
# Test when there are no unstaged changes
result = self.file_collection_controller.check_unstaged_changes()
assert not result
# Change the file contents
with open(paths[0], "wb") as f:
f.write(to_bytes("hello"))
# Test when there are unstaged changes again
failed = False
try:
_ = self.file_collection_controller.check_unstaged_changes()
except UnstagedChanges:
failed = True
assert failed
# Test when there are no files (should be staged)
os.remove(paths[0])
shutil.rmtree(paths[1])
result = self.file_collection_controller.check_unstaged_changes()
assert not result
def test_checkout(self):
self.project_controller.init("test9", "test description")
# Create the files in the project files directory
paths = self.__setup()
# Create a file collection to checkout to with paths
file_collection_obj = self.file_collection_controller.create(paths)
# Checkout success when there are no unstaged changes
result = self.file_collection_controller.checkout(
file_collection_obj.id)
assert result
current_hash = self.file_collection_controller._calculate_project_files_hash(
)
assert current_hash == "74be16979710d4c4e7c6647856088456"
assert file_collection_obj.filehash == current_hash
# Check the filenames as well because the hash does not take this into account
assert os.path.isfile(paths[0])
# Change file contents to make it unstaged
with open(paths[0], "wb") as f:
f.write(to_bytes("hello"))
# Checkout failure when there are unstaged changes
failed = False
try:
_ = self.file_collection_controller.checkout(
file_collection_obj.id)
except UnstagedChanges:
failed = True
assert failed
# Create a new file collection with paths
file_collection_obj_1 = self.file_collection_controller.create(paths)
# Checkout success back when there are no unstaged changes
result = self.file_collection_controller.checkout(
file_collection_obj.id)
assert result
current_hash = self.file_collection_controller._calculate_project_files_hash(
)
assert current_hash == "74be16979710d4c4e7c6647856088456"
assert file_collection_obj.filehash == current_hash
assert file_collection_obj_1.filehash != current_hash
# Check the filenames as well because the hash does not take this into account
assert os.path.isfile(paths[0])
| StarcoderdataPython |
1715430 | # Streamlit Timeline Component Example
import streamlit as st
from streamlit_timeline import timeline
# use full page width
st.set_page_config(page_title="Timeline Example", layout="wide")
# load data
with open('example.json', "r") as f:
data = f.read()
# render timeline
timeline(data, height=800) | StarcoderdataPython |
5137463 | <gh_stars>0
print "hello"
print "whatever"
| StarcoderdataPython |
4809119 | from flask import Flask, escape, request
import requests
import datetime
from dateutil import parser
application = Flask(__name__)
@application.route('/has_peers')
def peer_checker():
try:
r = requests.get('http://127.0.0.1:8732/network/peers')
except requests.exceptions.RequestException as e:
err = "Could not connect to node, %s" % repr(e), 500
print(err)
return err
number_of_peers = len(r.json())
if number_of_peers < 1:
err = "We don't have any peer", 500
print(err)
return err
return str(number_of_peers)
@application.route('/is_synced')
def sync_checker():
try:
r = requests.get('http://127.0.0.1:8732/chains/main/is_bootstrapped')
except requests.exceptions.RequestException as e:
err = "Could not connect to node, %s" % repr(e), 500
print(err)
return err
if not r.json()["bootstrapped"]:
err = "Chain is not bootstrapped", 500
print(err)
return err
return "Chain is bootstrapped"
| StarcoderdataPython |
6458388 | <filename>modules/blocks/pna/__init__.py
from .pna import PNAConv, PNAConvSimple | StarcoderdataPython |
12814062 | from pathlib import Path
from shutil import which
from geopandas.geodataframe import GeoDataFrame
from ipywidgets.widgets import widget
from keplergl import KeplerGl
import pandas as pd
import geopandas as gpd
from typing import Union
from .config import load_config
import contextily as ctx
import matplotlib.pyplot as plt
from scipy.cluster.hierarchy import dendrogram
import numpy as np
from src.data.make_dataset import h3_to_polygon
from src.settings import FIGURES_DIR
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import time
from src.data.download import ensure_geometry_type
def visualize_kepler(data: Union[pd.DataFrame, gpd.GeoDataFrame], name="data", config_name: str=None) -> KeplerGl:
if config_name is not None:
config = load_config(config_name)
if config is not None:
return KeplerGl(data={name: data}, config=config, height=900)
return KeplerGl(data={name: data})
def visualize_clusters_kepler(data: Union[pd.DataFrame, gpd.GeoDataFrame], name="data") -> KeplerGl:
return visualize_kepler(data, name=name, config_name="clusters")
def visualize_df(df: Union[pd.DataFrame, GeoDataFrame], map_source=ctx.providers.CartoDB.Positron, column="label", alpha=0.6, figsize=(15,15), **kwargs):
if type(df) == pd.DataFrame or "geometry" not in df.columns:
if "h3" in df.columns:
df = df.copy(deep=False)
df['geometry'] = df['h3'].apply(h3_to_polygon)
df = gpd.GeoDataFrame(df, crs="EPSG:4326")
else:
raise ValueError("Passed dataframe must either be GeoDataFrame with geometry column or have h3 column")
ax = df.to_crs(epsg=3857).plot(column=column, legend=True, alpha=alpha, figsize=figsize, **kwargs)
ctx.add_basemap(ax, source=map_source, attribution_size=4)
ax.axis("off")
ax.tick_params(axis='both', which='both', bottom=False, labelbottom=False, top=False, labeltop=False, left=False, labelleft=False, right=False, labelright=False)
return ax.get_figure(), ax
def visualize_dendrogram(model, **kwargs):
counts = np.zeros(model.children_.shape[0])
n_samples = len(model.labels_)
for i, merge in enumerate(model.children_):
current_count = 0
for child_idx in merge:
if child_idx < n_samples:
current_count += 1 # leaf node
else:
current_count += counts[child_idx - n_samples]
counts[i] = current_count
linkage_matrix = np.column_stack([model.children_, model.distances_,
counts]).astype(float)
# Plot the corresponding dendrogram
dendrogram(linkage_matrix, **kwargs)
def save_kepler_map(kepler_map: KeplerGl, figure_subpath: Path, remove_html=False):
result_path = FIGURES_DIR.joinpath(figure_subpath)
result_path.parent.mkdir(parents=True, exist_ok=True)
html_file = result_path.with_suffix(".html")
for gdf in kepler_map.data.values():
ensure_geometry_type(gdf)
kepler_map.save_to_html(file_name=html_file)
options = Options()
height = kepler_map.height
width = 1300
options.add_argument("--headless")
options.add_argument(f"--window-size={width},{height}")
driver = webdriver.Chrome(options=options)
driver.get(str(html_file.resolve()))
time.sleep(3)
driver.save_screenshot(str(result_path))
if remove_html:
html_file.unlink()
| StarcoderdataPython |
4900313 | <filename>prep.py
from argparse import ArgumentParser
from tempfile import NamedTemporaryFile
from pprint import pprint
import csv, os, pandas
"""
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
cost_center_name_map = {
"1": "District Office",
"125": "Grade School",
"571": "Junior High School",
"608": "High School",
"0": "Other"
}
if __name__ == "__main__":
ap = ArgumentParser()
ap.add_argument("filename")
ap.add_argument("-o", "--output")
args = ap.parse_args()
amt_cols = ["2018-2019 Actual", "2019-2020 Actual", "2020-2021 Budget", "2021-2022 Proposed"]
# Remove "$-" values (happens by default here) and split "Account" and "Budget Organization" columns
df = pandas.read_csv(args.filename) if args.filename.endswith('csv') else pandas.read_excel(args.filename)
df[['Account Number','Account Name']] = df.Account.str.split(pat=' - ', n=1, expand=True)
df[['Budget Organization Number','Budget Organization Name']] = df['Budget Organization'].str.split(pat=' - ', n=1, expand=True)
# Break out Budget Terms into Rows and leave Amount Field as Metric
tmp_file = NamedTemporaryFile(mode='w', delete=False)
df.to_csv(tmp_file, index=False)
tmp_file.flush()
tmp_file.close()
in_fp = open(tmp_file.name, 'r')
reader = csv.DictReader(in_fp)
output_filename = args.output if args.output is not None else "output.csv"
with open(output_filename, 'w') as out_fp:
out_cols = list(df.columns)
for col in amt_cols:
out_cols.remove(col)
out_cols += ['Budget Term','Cost Center Name','Amount']
writer = csv.DictWriter(out_fp, fieldnames=out_cols)
writer.writeheader()
for row in reader:
out_row_base = row.copy()
for col in amt_cols:
del out_row_base[col]
for col in amt_cols:
out_row = out_row_base.copy()
out_row['Cost Center Name'] = cost_center_name_map.get(row['Cost Center'], 'Other')
out_row['Budget Term'] = col
out_row['Amount'] = row[col]
writer.writerow(out_row)
os.unlink(tmp_file.name)
| StarcoderdataPython |
3302620 | <reponame>chrisjsherm/jira-status<gh_stars>0
"""
Math-related utility functions.
"""
def get_percentage(numerator, denominator, precision = 2):
"""
Return a percentage value with the specified precision.
"""
return round(float(numerator) / float(denominator) * 100, precision) | StarcoderdataPython |
4804812 | <filename>dashboard/migrations/0012_auto_20200222_2026.py
# Generated by Django 2.2.5 on 2020-02-22 19:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0011_auto_20200222_2017'),
]
operations = [
migrations.AlterField(
model_name='csvdata',
name='fall_asleep',
field=models.DateTimeField(blank=True, null=True, verbose_name='fall asleep'),
),
migrations.AlterField(
model_name='csvdata',
name='wake_up',
field=models.DateTimeField(blank=True, null=True, verbose_name='wake up'),
),
]
| StarcoderdataPython |
6650774 | <filename>G_martix_spiral.py
"""
given a m*n matrix, print the matrix spirally clockwise
"""
def print_spiral(matrix):
m, n = len(matrix), len(matrix[0])
top, down = 0, m - 1
left, right = 0, n - 1
while True:
for j in xrange(left, right + 1):
print(matrix[top][j])
top += 1
if top > down or left > right:
break
for i in xrange(top, down + 1):
print(matrix[i][right])
right -= 1
if top > down or left > right:
break
for j in reversed(xrange(left, right + 1)):
print(matrix[down][j])
down -= 1
if top > down or left > right:
break
for i in reversed(xrange(top, down + 1)):
print(matrix[i][left])
left += 1
if top > down or left > right:
break
if __name__ == '__main__':
matrix = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
print_spiral(matrix)
| StarcoderdataPython |
396637 | import math
EARTH_RADIUS = 6370000.
MAG_LAT = 82.7
MAG_LON = -114.4
direction_names = ["N", "NNE", "NE", "ENE", "E", "ESE", "SE", "SSE", "S", "SSW", "SW", "WSW", "W", "WNW", "NW", "NNW"]
directions_num = len(direction_names)
directions_step = 360. / directions_num
def xyz(lat, lon, r=EARTH_RADIUS):
""" Takes spherical coordinates and returns a triple of cartesian coordinates """
x = r * math.cos(math.radians(lat)) * math.cos(math.radians(lon))
y = r * math.cos(math.radians(lat)) * math.sin(math.radians(lon))
z = r * math.sin(math.radians(lat))
return x, y, z
def dot(p1, p2):
""" Dot product of two vectors """
return p1[0] * p2[0] + p1[1] * p2[1] + p1[2] * p2[2]
def cross(p1, p2):
""" Cross product of two vectors """
x = p1[1] * p2[2] - p1[2] * p2[1]
y = p1[2] * p2[0] - p1[0] * p2[2]
z = p1[0] * p2[1] - p1[1] * p2[0]
return x, y, z
def determinant(p1, p2, p3):
""" Determinant of three vectors """
return dot(p1, cross(p2, p3))
def normalize_angle(angle):
""" Takes angle in degrees and returns angle from 0 to 360 degrees """
cycles = angle / 360.
normalized_cycles = cycles - math.floor(cycles)
return normalized_cycles * 360.
def sgn(x):
""" Returns sign of number """
if x == 0:
return 0.
elif x > 0:
return 1.
else:
return -1.
def angle(v1, v2, n=None):
""" Returns angle between v1 and v2 in degrees. n can be a vector that points to an observer who is looking at the plane containing v1 and v2. This way, you can get well-defined signs. """
if n == None:
n = cross(v1, v2)
prod = dot(v1, v2) / math.sqrt(dot(v1, v1) * dot(v2, v2))
rad = sgn(determinant(v1, v2, n)) * math.acos(prod)
deg = math.degrees(rad)
return normalize_angle(deg)
def great_circle_angle(p1, p2, p3):
""" Returns angle w(p1,p2,p3) in degrees. Needs p1 != p2 and p2 != p3. """
n1 = cross(p1, p2)
n2 = cross(p3, p2)
return angle(n1, n2, p2)
def distance(p1, p2, r=EARTH_RADIUS):
""" Returns length of curved way between two points p1 and p2 on a sphere with radius r. """
return math.radians(angle(p1, p2)) * r
def direction_name(angle):
""" Returns a name for a direction given in degrees. Example: direction_name(0.0) returns "N", direction_name(90.0) returns "O", direction_name(152.0) returns "SSO". """
index = int(round(normalize_angle(angle) / directions_step))
index %= directions_num
return direction_names[index]
magnetic_northpole = xyz(MAG_LAT, MAG_LON)
geographic_northpole = xyz(90, 0)
### Part two - A tolerant parser for position strings ###
import re
class Parser:
""" A parser class using regular expressions. """
def __init__(self):
self.patterns = {}
self.raw_patterns = {}
self.virtual = {}
def add(self, name, pattern, virtual=False):
""" Adds a new named pattern (regular expression) that can reference previously added patterns by %(pattern_name)s.
Virtual patterns can be used to make expressions more compact but don't show up in the parse tree. """
self.raw_patterns[name] = "(?:" + pattern + ")"
self.virtual[name] = virtual
try:
self.patterns[name] = ("(?:" + pattern + ")") % self.patterns
except KeyError as e:
raise Exception("Unknown pattern name: %s" % str(e))
def parse(self, pattern_name, text):
""" Parses 'text' with pattern 'pattern_name' and returns parse tree """
# build pattern with subgroups
sub_dict = {}
subpattern_names = []
for s in re.finditer("%\(.*?\)s", self.raw_patterns[pattern_name]):
subpattern_name = s.group()[2:-2]
if not self.virtual[subpattern_name]:
sub_dict[subpattern_name] = "(" + self.patterns[subpattern_name] + ")"
subpattern_names.append(subpattern_name)
else:
sub_dict[subpattern_name] = self.patterns[subpattern_name]
pattern = "^" + (self.raw_patterns[pattern_name] % sub_dict) + "$"
# do matching
m = re.match(pattern, text)
if m == None:
return None
# build tree recursively by parsing subgroups
tree = {"TEXT": text}
for i in xrange(len(subpattern_names)):
text_part = m.group(i + 1)
if not text_part == None:
subpattern = subpattern_names[i]
tree[subpattern] = self.parse(subpattern, text_part)
return tree
position_parser = Parser()
position_parser.add("direction_ns", r"[NSns]")
position_parser.add("direction_ew", r"[EOWeow]")
position_parser.add("decimal_separator", r"[\.,]", True)
position_parser.add("sign", r"[+-]")
position_parser.add("nmea_style_degrees", r"[0-9]{2,}")
position_parser.add("nmea_style_minutes", r"[0-9]{2}(?:%(decimal_separator)s[0-9]*)?")
position_parser.add("nmea_style", r"%(sign)s?\s*%(nmea_style_degrees)s%(nmea_style_minutes)s")
position_parser.add("number", r"[0-9]+(?:%(decimal_separator)s[0-9]*)?|%(decimal_separator)s[0-9]+")
position_parser.add("plain_degrees", r"(?:%(sign)s\s*)?%(number)s")
position_parser.add("degree_symbol", r"\xc2\xb0", True)
position_parser.add("minutes_symbol", r"'|\xe2\x80\xb2|`|\xc2\xb4", True)
position_parser.add("seconds_symbol", r"%(minutes_symbol)s%(minutes_symbol)s|\xe2\x80\xb3|\"", True)
position_parser.add("degrees", r"%(number)s\s*%(degree_symbol)s")
position_parser.add("minutes", r"%(number)s\s*%(minutes_symbol)s")
position_parser.add("seconds", r"%(number)s\s*%(seconds_symbol)s")
position_parser.add("degree_coordinates",
"(?:%(sign)s\s*)?%(degrees)s(?:[+\s]*%(minutes)s)?(?:[+\s]*%(seconds)s)?|(?:%(sign)s\s*)%(minutes)s(?:[+\s]*%(seconds)s)?|(?:%(sign)s\s*)%(seconds)s")
position_parser.add("coordinates_ns", r"%(nmea_style)s|%(plain_degrees)s|%(degree_coordinates)s")
position_parser.add("coordinates_ew", r"%(nmea_style)s|%(plain_degrees)s|%(degree_coordinates)s")
position_parser.add("position", """\
\s*%(direction_ns)s\s*%(coordinates_ns)s[,;\s]*%(direction_ew)s\s*%(coordinates_ew)s\s*|\
\s*%(direction_ew)s\s*%(coordinates_ew)s[,;\s]*%(direction_ns)s\s*%(coordinates_ns)s\s*|\
\s*%(coordinates_ns)s\s*%(direction_ns)s[,;\s]*%(coordinates_ew)s\s*%(direction_ew)s\s*|\
\s*%(coordinates_ew)s\s*%(direction_ew)s[,;\s]*%(coordinates_ns)s\s*%(direction_ns)s\s*|\
\s*%(coordinates_ns)s[,;\s]+%(coordinates_ew)s\s*\
""")
def get_number(b):
""" Takes appropriate branch of parse tree and returns float. """
s = b["TEXT"].replace(",", ".")
return float(s)
def get_coordinate(b):
""" Takes appropriate branch of the parse tree and returns degrees as a float. """
r = 0.
if b.has_key("nmea_style"):
if b["nmea_style"].has_key("nmea_style_degrees"): r += get_number(b["nmea_style"]["nmea_style_degrees"])
if b["nmea_style"].has_key("nmea_style_minutes"): r += get_number(b["nmea_style"]["nmea_style_minutes"]) / 60.
if b["nmea_style"].has_key("sign") and b["nmea_style"]["sign"]["TEXT"] == "-": r *= -1.
elif b.has_key("plain_degrees"):
r += get_number(b["plain_degrees"]["number"])
if b["plain_degrees"].has_key("sign") and b["plain_degrees"]["sign"]["TEXT"] == "-": r *= -1.
elif b.has_key("degree_coordinates"):
if b["degree_coordinates"].has_key("degrees"):
r += get_number(b["degree_coordinates"]["degrees"]["number"])
if b["degree_coordinates"].has_key("minutes"):
r += get_number(b["degree_coordinates"]["minutes"]["number"]) / 60.
if b["degree_coordinates"].has_key("seconds"):
r += get_number(b["degree_coordinates"]["seconds"]["number"]) / 3600.
if b["degree_coordinates"].has_key("sign") and b["degree_coordinates"]["sign"]["TEXT"] == "-": r *= -1.
return r
def parse_position(s):
""" Takes a (utf8-encoded) string describing a position and returns a tuple of floats for latitude and longitude in degrees.
Tries to be as tolerant as possible with input. Returns None if parsing doesn't succeed. """
parse_tree = position_parser.parse("position", s)
if parse_tree == None: return None
lat_sign = +1.
if parse_tree.has_key("direction_ns") and parse_tree["direction_ns"]["TEXT"] in ("S", "s"): lat_sign = -1.
lon_sign = +1.
if parse_tree.has_key("direction_ew") and parse_tree["direction_ew"]["TEXT"] in ("W", "w"): lon_sign = -1.
lat = lat_sign * get_coordinate(parse_tree["coordinates_ns"])
lon = lon_sign * get_coordinate(parse_tree["coordinates_ew"])
return lat, lon
| StarcoderdataPython |
5096666 | # coding=utf-8
from gevent import monkey
monkey.patch_all()
import logging.config
import gevent
from gevent import spawn
from gevent.queue import Queue
from retrying import retry
from munch import munchify
from datetime import datetime
from gevent.hub import LoopExit
from restkit import ResourceError
from base_worker import BaseWorker
from bot.dfs.bridge.utils import journal_context
from bot.dfs.bridge.journal_msg_ids import (DATABRIDGE_SUCCESS_UPLOAD_TO_TENDER,
DATABRIDGE_UNSUCCESS_UPLOAD_TO_TENDER,
DATABRIDGE_ITEM_STATUS_CHANGED_WHILE_PROCESSING)
from bot.dfs.bridge.constants import retry_mult, DOC_TYPE
logger = logging.getLogger(__name__)
class UploadFileToTender(BaseWorker):
def __init__(self, client, upload_to_tender_queue, process_tracker, services_not_available, sleep_change_value,
delay=15):
super(UploadFileToTender, self).__init__(services_not_available)
self.start_time = datetime.now()
self.delay = delay
self.process_tracker = process_tracker
# init clients
self.client = client
# init queues for workers
self.upload_to_tender_queue = upload_to_tender_queue
self.retry_upload_to_tender_queue = Queue(maxsize=500)
# blockers
self.sleep_change_value = sleep_change_value
def upload_worker(self):
while not self.exit:
self.services_not_available.wait()
self.try_peek_data_and_upload_to_tender(False)
gevent.sleep(self.sleep_change_value.time_between_requests)
def retry_upload_worker(self):
while not self.exit:
self.services_not_available.wait()
self.try_peek_data_and_upload_to_tender(True)
gevent.sleep(self.sleep_change_value.time_between_requests)
def try_peek_data_and_upload_to_tender(self, is_retry):
try:
tender_data = self.peek_from_tender_queue(is_retry)
except LoopExit:
gevent.sleep(0)
else:
self.try_upload_to_tender(tender_data, is_retry)
def peek_from_tender_queue(self, is_retry):
return self.retry_upload_to_tender_queue.peek() if is_retry else self.upload_to_tender_queue.peek()
def try_upload_to_tender(self, tender_data, is_retry):
try:
self.update_headers_and_upload_to_tender(tender_data, is_retry)
except ResourceError as re:
self.remove_data_or_increase_wait(re, tender_data, is_retry)
except Exception as e:
self.handle_error(e, tender_data, is_retry)
else:
self.successfully_uploaded_to_tender(tender_data, is_retry)
def update_headers_and_upload_to_tender(self, tender_data, is_retry):
if is_retry:
self.do_upload_to_tender_with_retry(tender_data)
else:
self.do_upload_to_tender(tender_data)
def do_upload_to_tender(self, tender_data):
document_data = tender_data.file_content.get('data', {})
document_data["documentType"] = DOC_TYPE
self.client.headers.update({'X-Client-Request-ID': tender_data.doc_id()})
self.client._create_tender_resource_item(munchify({'data': {'id': tender_data.tender_id}}),
{'data': document_data},
'awards/{}/documents'.format(tender_data.award_id))
@retry(stop_max_attempt_number=5, wait_exponential_multiplier=retry_mult)
def do_upload_to_tender_with_retry(self, tender_data):
"""Process upload to tender request for retry queue objects."""
self.do_upload_to_tender(tender_data)
def remove_data_or_increase_wait(self, re, tender_data, is_retry):
if re.status_int == 403 or re.status_int == 422 or re.status_int is None:
self.removing_data(re, tender_data, is_retry)
elif re.status_int == 429:
self.decrease_request_frequency(re, tender_data)
else:
self.handle_error(re, tender_data, is_retry)
def removing_data(self, re, tender_data, is_retry):
logger.warning("Accept {} while uploading to {} doc_id: {}. Message {}".format(
re.status_int, tender_data, tender_data.doc_id(), re.msg),
extra=journal_context({"MESSAGE_ID": DATABRIDGE_ITEM_STATUS_CHANGED_WHILE_PROCESSING},
tender_data.log_params()))
self.remove_data(tender_data, is_retry)
def decrease_request_frequency(self, re, tender_data):
logger.info("Accept 429 while uploading to {} doc_id: {}. Message {}".format(
tender_data, tender_data.doc_id(), re.msg),
extra=journal_context({"MESSAGE_ID": DATABRIDGE_ITEM_STATUS_CHANGED_WHILE_PROCESSING},
tender_data.log_params()))
self.sleep_change_value.increment()
def handle_error(self, re, tender_data, is_retry):
logger.info('Error while uploading file to {} doc_id: {}. Status: {}. Message: {}'.format(
tender_data, tender_data.doc_id(), getattr(re, "status_int", None), re.message),
extra=journal_context({"MESSAGE_ID": DATABRIDGE_UNSUCCESS_UPLOAD_TO_TENDER}, tender_data.log_params()))
self.sleep_change_value.decrement()
if not is_retry:
self.retry_upload_to_tender_queue.put(tender_data)
self.upload_to_tender_queue.get()
def successfully_uploaded_to_tender(self, tender_data, is_retry):
logger.info('Successfully uploaded file to {} doc_id: {}'.format(tender_data, tender_data.doc_id()),
extra=journal_context({"MESSAGE_ID": DATABRIDGE_SUCCESS_UPLOAD_TO_TENDER},
tender_data.log_params()))
self.remove_data(tender_data, is_retry)
def remove_data(self, tender_data, is_retry):
self.process_tracker.update_items_and_tender(tender_data.tender_id, tender_data.award_id, tender_data.doc_id()) # TODO: PLACEHOLDER!
self.sleep_change_value.decrement()
if is_retry:
self.retry_upload_to_tender_queue.get()
else:
self.upload_to_tender_queue.get()
def _start_jobs(self):
return {'upload_worker': spawn(self.upload_worker),
'retry_upload_worker': spawn(self.retry_upload_worker)}
| StarcoderdataPython |
5066641 | <filename>source/gateway/runtime/app.py<gh_stars>10-100
import os
from chalice import IAMAuthorizer
from chalice import Chalice, AuthResponse
from chalice import ChaliceViewError, BadRequestError, NotFoundError
import requests
import boto3
from chalice import Response
import json
import jwt
app = Chalice(app_name='aws-mre-gateway-api')
authorizer = IAMAuthorizer()
sd_client = boto3.client('servicediscovery')
session = boto3.session.Session()
sec_client = session.client(
service_name='secretsmanager'
)
SERVICE_DISCOVERY_SERVICE_ID = os.environ['SERVICE_DISC_SERVICE_ID']
API_AUTH_SECRET_KEY_NAME = os.environ['API_AUTH_SECRET_KEY_NAME']
def get_iam_auth():
from requests_aws4auth import AWS4Auth
return AWS4Auth(
os.environ['AWS_ACCESS_KEY_ID'],
os.environ['AWS_SECRET_ACCESS_KEY'],
os.environ['AWS_REGION'],
'execute-api',
session_token=os.getenv('AWS_SESSION_TOKEN')
)
@app.authorizer()
def token_auth(auth_request):
'''
Custom Authorizer: Provides API Auth using HS512 (HMAC) based Authentication using a Shared Secret Key and an expiring JWT Token
Clients invoke the API by sending a Bearer Token.
'''
get_secret_value_response = sec_client.get_secret_value(
SecretId=API_AUTH_SECRET_KEY_NAME
)
try:
jwt.decode(auth_request.token.replace("Bearer", '').strip(),
get_secret_value_response['SecretString'], algorithms=["HS512"])
except Exception as e:
return AuthResponse(routes=[''], principal_id='user')
return AuthResponse(routes=[
f'/external/*'
], principal_id='user')
@app.route('/external/{proxy+}', cors=True, methods=['GET'], authorizer=token_auth)
def get_payload():
"""
Invokes the ControlPlane APIs with a GET request. This API is meant for integration with external systems
that send Bearer JWT tokens for authentication.
Returns:
Controlplane API result.
"""
return invoke_destination_api("GET", app.current_request.uri_params['proxy'], app.current_request.headers)
@app.route('/external/{proxy+}', cors=True, methods=['DELETE'], authorizer=token_auth)
def delete_payload():
"""
Invokes the ControlPlane APIs with a DELETE request. This API is meant for integration with external systems
that send Bearer JWT tokens for authentication.
Returns:
Controlplane API result.
"""
return invoke_destination_api("DELETE", app.current_request.uri_params['proxy'])
@app.route('/external/{proxy+}', cors=True, methods=['PUT'], authorizer=token_auth)
def put_payload():
"""
Invokes the ControlPlane APIs with a PUT request. This API is meant for integration with external systems
that send Bearer JWT tokens for authentication.
Returns:
Controlplane API result.
"""
return invoke_destination_api("PUT", app.current_request.uri_params['proxy'], api_body=app.current_request.raw_body)
@app.route('/external/{proxy+}', cors=True, methods=['POST'], authorizer=token_auth)
def post_payload():
"""
Invokes the ControlPlane APIs with a POST request. This API is meant for integration with external systems
that send Bearer JWT tokens for authentication.
Returns:
Controlplane API result.
"""
return invoke_destination_api("POST", app.current_request.uri_params['proxy'], api_body=app.current_request.raw_body)
@app.route('/{proxy+}', cors=True, methods=['GET'], authorizer=authorizer)
def get_payload():
"""
Invokes the ControlPlane APIs with a GET request. Supports IAM Authentication.
Returns:
Controlplane API result.
"""
payload = invoke_destination_api("GET", app.current_request.uri_params['proxy'], app.current_request.headers)
return payload
@app.route('/{proxy+}', cors=True, methods=['DELETE'], authorizer=authorizer)
def delete_payload():
"""
Invokes the ControlPlane APIs with a DELETE request. Supports IAM Authentication.
Returns:
Controlplane API result.
"""
return invoke_destination_api("DELETE", app.current_request.uri_params['proxy'])
@app.route('/{proxy+}', cors=True, methods=['PUT'], authorizer=authorizer)
def put_payload():
"""
Invokes the ControlPlane APIs with a PUT request. Supports IAM Authentication.
Returns:
Controlplane API result.
"""
return invoke_destination_api("PUT", app.current_request.uri_params['proxy'], api_body=app.current_request.raw_body)
@app.route('/{proxy+}', cors=True, methods=['POST'], authorizer=authorizer)
def post_payload():
"""
Invokes the ControlPlane APIs with a POST request. Supports IAM Authentication.
Returns:
Controlplane API result.
"""
return invoke_destination_api("POST", app.current_request.uri_params['proxy'], api_body=app.current_request.raw_body)
def get_url_from_cloudmap(entity):
service_instances = sd_client.list_instances(ServiceId=SERVICE_DISCOVERY_SERVICE_ID)
for instance in service_instances['Instances']:
if entity in instance['Attributes']:
return instance['Attributes'][entity]
raise Exception("Service not found")
def get_api_url_by_route(uri_params):
'''
Gets API Endpoint Url based on the Uri Params
'''
params = uri_params['proxy'].lower()
if params.startswith('/plugin') or params.startswith('plugin'):
return get_url_from_cloudmap("PluginUrl")
elif params.startswith('/system') or params.startswith('system'):
return get_url_from_cloudmap("SystemUrl")
elif params.startswith('/profile') or params.startswith('profile'):
return get_url_from_cloudmap("ProfileUrl")
elif params.startswith('/model') or params.startswith('model'):
return get_url_from_cloudmap("ModelUrl")
elif params.startswith('/event') or params.startswith('event'):
return get_url_from_cloudmap("EventUrl")
elif params.startswith('/contentgroup') or params.startswith('contentgroup'):
return get_url_from_cloudmap("ContentGroupUrl")
elif params.startswith('/program') or params.startswith('program'):
return get_url_from_cloudmap("ProgramUrl")
elif params.startswith('/workflow') or params.startswith('workflow'):
return get_url_from_cloudmap("WorkflowUrl")
elif params.startswith('/replay') or params.startswith('replay'):
return get_url_from_cloudmap("ReplayUrl")
else:
return ""
def invoke_destination_api(api_method, uri_params, api_headers=None, api_body=None):
try:
dest_url = get_api_url_by_route(app.current_request.uri_params)
if not dest_url:
raise Exception("No route found")
if api_method in ['GET', 'DELETE']:
res = requests.request(
method=api_method,
url=f"{dest_url}{uri_params}",
verify=True,
auth=get_iam_auth()
)
if api_headers:
if 'accept' in api_headers:
if 'application/octet-stream' in api_headers['accept']:
blob_content = json.loads(res.text)
return Response(body=bytes(blob_content['BlobContent'], 'utf-8'),
status_code=200,
headers={'Content-Type': 'application/octet-stream'})
elif api_method in ['PUT', 'POST']:
res = requests.request(
method=api_method,
url=f"{dest_url}{uri_params}",
headers=api_headers,
data=api_body,
verify=True,
auth=get_iam_auth()
)
res.raise_for_status()
except requests.HTTPError as e:
if res.status_code == 404:
raise NotFoundError(res.reason)
elif res.status_code == 400:
raise BadRequestError(res.reason)
elif res.status_code >= 500:
raise ChaliceViewError(res.reason)
except requests.exceptions.RequestException as e:
if res.status_code == 404:
raise NotFoundError(res.reason)
elif res.status_code == 400:
raise BadRequestError(res.reason)
elif res.status_code >= 500:
raise ChaliceViewError(res.reason)
else:
return res.content
| StarcoderdataPython |
6633585 | """Django settings for django-generic-filters demo project."""
from os import environ
from os.path import abspath, dirname, join
# Configure some relative directories.
demoproject_dir = dirname(abspath(__file__))
demo_dir = dirname(demoproject_dir)
root_dir = dirname(demo_dir)
data_dir = join(root_dir, 'var')
# Mandatory settings.
ROOT_URLCONF = 'demoproject.urls'
WSGI_APPLICATION = 'demoproject.wsgi.application'
# Database.
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": "test",
"USER": environ.get("DJANGO_DB_USER", environ.get("USER")),
'PASSWORD': environ.get("DJANGO_DB_PASSWORD", ""),
'HOST': '',
'PORT': '',
}
}
# Template.
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'debug': True,
'context_processors': (
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
'django.template.context_processors.request',
"django.contrib.messages.context_processors.messages"),
}
},
]
# Required.
SECRET_KEY = "This is a secret made public on project's repository."
# Media and static files.
MEDIA_ROOT = join(data_dir, 'media')
MEDIA_URL = '/media/'
STATIC_ROOT = join(data_dir, 'static')
STATIC_URL = '/static/'
# Applications.
INSTALLED_APPS = (
# Standard Django applications.
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# The actual django-generic-filters demo.
'django_genericfilters',
'demoproject',
'demoproject.filter',
)
# Default middlewares. You may alter the list later.
MIDDLEWARE_CLASSES = [
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
# Development configuration.
DEBUG = True
| StarcoderdataPython |
6680193 | """Init file for reader module"""
from typing import AnyStr
import feedparser
from .constants import RSS_FEEDS, \
FEEDPARSER_AGENT
from .utils import get_source, \
send_empty_response, \
send_heading, \
send_response
from ...utils import get_slack_client
def generic_feed_parser(slack_signing_key: AnyStr, source_key: AnyStr):
"""
This method reads rss feed from a url
Based on the source_key provided as input
and posts message on the channel specified
along with heading
:param slack_signing_key: Signing key to initialize slack client
:param source_key: String to find the source where to fetch RSS from
:return:
"""
slack_client = get_slack_client(slack_signing_key)
source = get_source(source_key)
if not source:
send_empty_response(slack_client)
else:
data_from_rss = feedparser.parse(source['url'],
agent=FEEDPARSER_AGENT)
heading = source['heading']
send_heading(slack_client, "#ideas", heading)
send_response(slack_client, "#ideas", data_from_rss['entries'])
| StarcoderdataPython |
11217805 | <filename>server.py
from flask import Flask, request, send_from_directory
from flask_socketio import SocketIO
from gibbon.project import Project
import configparser
import os, sys, re, json
project = None
app = Flask(__name__)
# global project parameter
DIRECTORY = 'templates/html'
@app.route('/connect')
def connect():
send_from_directory
return send_from_directory(DIRECTORY, 'welcome.html')
@app.route('/display')
def display():
# url= http://127.0.0.1:5000/display?basemap=satellite&poi=null&environment=null
return ''
if __name__ == '__main__':
app.run()
| StarcoderdataPython |
3216031 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from auto_nag.bzcleaner import BzCleaner
class Intermittents(BzCleaner):
def __init__(self):
super(Intermittents, self).__init__()
def description(self):
return "Intermittent test failure bugs unchanged in 21 days"
def get_bz_params(self, date):
params = {
"email1": "<EMAIL>",
"emailreporter1": "1",
"emailtype1": "exact",
"n1": "1",
"f1": "longdescs.count",
"o1": "changedafter",
"v1": "-3w",
"f2": "blocked",
"o2": "isempty",
"f3": "flagtypes.name",
"o3": "notequals",
"v3": "needinfo?",
"f4": "OP",
"n4": "1",
"f5": "bug_status",
"o5": "changedto",
"v5": "REOPENED",
"f6": "bug_status",
"o6": "changedafter",
"v6": "-7d",
"f7": "CP",
"f8": "bug_severity",
"o8": "notequals",
"v8": "critical",
"f9": "component",
"o9": "nowordssubstr",
"v9": "new tab page, messaging system",
"keywords_type": "nowords",
"keywords": "leave-open",
"priority": "P5",
"resolution": "---",
"status_whiteboard_type": "notregexp",
"status_whiteboard": "(leave open|leave-open|leaveopen|test disabled|test-disabled|testdisabled)",
}
return params
def get_autofix_change(self):
return {
"status": "RESOLVED",
"resolution": "INCOMPLETE",
"comment": {
"body": f"https://wiki.mozilla.org/Bug_Triage#Intermittent_Test_Failure_Cleanup\n{self.get_documentation()}"
},
}
if __name__ == "__main__":
Intermittents().run()
| StarcoderdataPython |
372022 | <reponame>domingoesteban/robolearn<gh_stars>1-10
from robolearn.models.transitions.transition import Transition
| StarcoderdataPython |
12847034 | <filename>main.py<gh_stars>1-10
import discord as dc
from dotenv import load_dotenv
from os import getenv
import datetime as dt
import json, string
load_dotenv()
#*#*#*# variables #*#*#*#
config_relative_path = getenv("CONFIG")
database_relative_path = getenv("DATABASE")
token = getenv("TOKEN")
#*#*#*#*#*#*#*#*#*#*#*#*#
with open(config_relative_path) as f:
cfg = json.load(f)
with open(database_relative_path) as f:
db = json.load(f)
class BOT(dc.Client):
def __init__(self, intents=None, *args, **kwargs):
super().__init__(*args, **kwargs, intents=intents)
self.prefix = cfg['prefix']
self.perms = cfg['perms']
self.debugging = db['debugMode']
async def on_ready(self):
await self.loadLogsChannel()
for guild in self.guilds:
print(f"{self.user} connected to {guild.name}, id: {guild.id}")
print(f"{self.user.name} is alive!")
async def on_message(self, message):
if message.author == self.user:
return
elif db["groupReg"]["active"] and message.channel.id == db["groupReg"]["channel_id"]:
if "lab" in message.content.lower() or "mat" in message.content.lower():
await self.groupReg(message)
elif message.content.startswith(self.prefix):
await self.command(message)
elif (self.user.name + " ssie") in message.content or (self.user.name + " sucks") in message.content:
await message.reply("૮( ᵒ̌▱๋ᵒ̌ )ა ?!")
async def command(self, message):
content = message.content[len(self.prefix):]
args = content.split()[1::] if len(content.split()) > 1 else [None]
command = content.split()[0]
# say command
if command == "say" and await self.checkPerms(message, "say"):
await message.delete()
if any(args):
await message.channel.send(" ".join([arg for arg in args]))
# message purge
elif command == "purge" and await self.checkPerms(message, "purge"):
try:
delRan = int(args[0])
except:
await message.reply("Please specify how many messages to purge.")
else:
if delRan in range(1,51):
await message.channel.purge(limit=delRan+1, bulk=True)
if self.logsActive: await self.log(message)
else:
await message.reply("Purge amount must be in range from `1` to `50`.")
# user info embed getter
elif command == "me" and await self.checkPerms(message, "me"):
if len(message.mentions) == 1:
await message.channel.send(embed=self.getMeEmbed(message, message.mentions[0]))
else:
await message.channel.send(embed=self.getMeEmbed(message))
# role/channel ID getter
elif command == "id" and await self.checkPerms(message, "id"):
if len(args) == 1:
if len(message.role_mentions) == 1:
await message.channel.send(f"id: `{message.role_mentions[0].id}`")
elif len(message.channel_mentions) == 1:
await message.channel.send(f"id: `{message.channel_mentions[0].id}`")
elif len(message.mentions) == 1:
await message.channel.send(f"id: `{message.mentions[0].id}`")
# avatar getter
elif command == "avatar" or command == "av" and await self.checkPerms(message, "avatar"):
if message.mentions:
avatar_url = self.getAvatarURL(message.mentions[0])
else:
avatar_url = self.getAvatarURL(message.author)
await message.reply(avatar_url)
# perms getter/setter
elif command == "perms" or command == "permissions" and await self.checkPerms(message, "permissions"):
if args[0] == "set" and len(args) == 3 and await self.checkPerms(message, "permissions_manage"):
try:
lvl = int(args[2])
if len(message.role_mentions) == 1:
role_id = message.raw_role_mentions[0]
else:
role_id = args[1]
except:
await message.reply(f"Please specify a permission level and role to assign the permission to.")
else:
if lvl not in range(1,3):
await message.reply("Perms level can only be 1 or 2")
else:
if self.managePerms("set", level=lvl, role=role_id):
await message.reply("Role permission changed successfully")
if self.logsActive: await self.log(message)
else:
await message.reply("Error occured while changing role permissions.")
elif (args[0] == "delete" or args[0] == "del") and await self.checkPerms(message, "permissions_manage"):
if len(args) == 2:
if len(message.role_mentions) == 1:
role_id = message.raw_role_mentions[0]
else:
role_id = args[1]
if self.managePerms("delete", role=role_id):
if self.logsActive: await self.log(message)
await message.reply("Role permission deleted successfully")
else:
await message.reply("Error occured while deleting role permissions.")
else:
await message.reply(f"Please specify a role to delete the permission from.")
elif not any(args):
perm_lvl = self.getUserPerms(message.author)
await message.reply(f"Your permission level: `{perm_lvl if perm_lvl < 3 else 'GOD'}`")
# bot prefix setter
elif command == "prefix" and await self.checkPerms(message, "prefix"):
if args[0]:
self.setPrefix(args[0])
await message.channel.send(f"Prefix successfully set to: `{args[0]}`")
if self.logsActive: await self.log(message)
# leaderboard getter
elif command == "leaderboard" and await self.checkPerms(message, "leaderboard"):
lb_len = 5
if args[0]:
try:
lb_len = int(args[0])
except:
await message.reply(f"Please specify the leaderboard lenght like: `{self.prefix}leaderboard 10`")
lb = self.getLeaderboard(message.guild, lb_len)
await message.channel.send(lb)
# debug mode
elif (command == "debug" or command == "debugging") and await self.checkPerms(message, "debugging"):
if args[0] == "on" or args[0] == "true" or args[0] == "1":
if self.debugging:
await message.reply("Debugging mode is already `on`")
else:
self.debugging = db['debugMode'] = True
self.saveDatabase()
if self.logsActive: await self.log(message)
await message.reply("Debugging mode has been successfully turned `on`")
elif args[0] == "off" or args[0] == "false" or args[0] == "0":
if not self.debugging:
await message.reply("Debugging mode is already `off`")
else:
self.debugging = db['debugMode'] = False
self.saveDatabase()
if self.logsActive: await self.log(message)
await message.reply("Debugging mode has been successfully turned `off`")
# logs management
elif command == "logs" and await self.checkPerms(message, "logs"):
if args[0] == "set":
if len(args) == 2 and len(message.channel_mentions) == 1:
await self.setLogsChannel(message.channel_mentions[0].id)
await message.reply(f"Logs channel successfully set to {message.channel_mentions[0].mention}")
else:
await message.reply(f"Please specify a log channel like: `{self.prefix}logs set #someLogsChannel`")
elif len(args) == 1 and (args[0] == "on" or args[0] == "true" or args[0] == "1"):
self.logsActive = True
db['logs']['active'] = True
self.saveDatabase()
if self.logsActive: await self.log(message)
await message.reply("Logs are now turned `on`")
elif len(args) == 1 and (args[0] == "off" or args[0] == "false" or args[0] == "0"):
if self.logsActive: await self.log(message)
self.logsActive = False
db['logs']['active'] = False
self.saveDatabase()
await message.reply("Logs are now turned `off`")
# semester management
elif (command == "semester" or command == "sem") and await self.checkPerms(message, "semester_manage"):
if args[0] == "new" or args[0] == "start":
if not db["groupReg"]["active"]:
try:
group_count = int(args[1])
except:
await message.reply(f"Please specify the number of groups like: `{self.prefix}semester new 8`")
else:
if await self.openGroupReg(message, group_count):
await message.reply("New semester started successfully!")
if self.logsActive: await self.log(message)
else:
await message.reply("An error has occured while creating new semester.")
else:
await message.reply("Group registration is already open!")
elif args[0] == "close" or args[0] == "end":
if db["groupReg"]["active"]:
await self.closeGroupReg(message)
if self.logsActive: await self.log(message)
await message.reply("Group registration has successfully been closed.")
else:
await message.reply("There's no group registration currently ongoing to close!")
# *=*=*=*=*=*=*=*=* COMMANDS *=*=*=*=*=*=*=*=* #
def saveDatabase(self):
with open(database_relative_path, mode="w") as f:
json.dump(db, f, indent=4)
async def loadLogsChannel(self):
channel = await self.fetch_channel(db['logs']['id'])
if channel:
self.logsChannel = channel
self.logsActive = db['logs']['active']
else:
self.logsActive = db['logs']['active'] = False
self.saveDatabase()
print("Logs channel could not be found by id -- Logs were turned off.")
async def setLogsChannel(self, channel_id):
db['logs']['id'] = channel_id
self.saveDatabase()
await self.loadLogsChannel()
def getUserPerms(self, user):
lvls = [0]
for pLvl, pRoles in db['permRoles'].items():
if any([role.id in pRoles for role in user.roles]):
lvls.append(int(pLvl))
permLevel = max(lvls)
if permLevel == 0 and self.debugging: return -1
return permLevel
async def checkPerms(self, message, command):
perm_lvl = self.getUserPerms(message.author)
if self.debugging and perm_lvl == -1:
await message.reply("Can't use commands while bot is in debugging mode.")
return False
try:
required = cfg["perms"][command]
except:
required = float('infinity')
if self.getUserPerms(message.author) >= required:
return True
else:
await message.reply("You don't have the permission to use this command.")
return False
def getAvatarURL(self, user):
base = "https://cdn.discordapp.com/avatars/"
return base + str(user.id) + "/" + str(user.avatar)
def getMeEmbed(self, message, user = None):
embed = dc.Embed(title="User info")
if not user:
user = message.author
embed.color = user.color
embed.set_image(url=self.getAvatarURL(user))
joined_info = f"Joined server on `{user.joined_at.strftime('%d/%m/%Y')}`"
joined_info += f"\nBeen here for: `{str(dt.datetime.now() - user.joined_at).split(',')[0]}`"
user_roles = [role.mention for role in user.roles if role.name != "@everyone"]
if not any(user_roles):
roles_info = "No roles to see here!"
else:
roles_info = ", ".join(user_roles)
# ranking_info =
embed.add_field(name="Join Date", value=joined_info, inline=False)
embed.add_field(name="User Roles", value=roles_info, inline=False)
# embed.add_field(name="Ranking", value=ranking_info, inline=False)
return embed
def setPrefix(self, new_prefix):
cfg["prefix"] = new_prefix
with open(config_relative_path, mode="w") as f:
json.dump(cfg, f, indent=4)
self.prefix = new_prefix
def getLeaderboard(self, guild, lenght = 5):
ranking = db["ranking"]
ranking.sort(key = lambda x: x["exp"], reverse = True)
lb = ""
r=1
for i in range(min(len(ranking), lenght, 15)):
user = ranking[i]
if not guild.get_member(user['id']):
lb+=f"#{r} {guild.get_member(user['id'])}: {user.get('exp')}\n"
r+=1
print(lb)
return lb
def managePerms(self, command, **args):
if command == "set":
try:
level = args["level"]
role = args["role"]
except:
return False
else:
for pLvl, pRoles in db["permRoles"].items():
if role in pRoles:
if int(pLvl) == level:
return True
db["permRoles"][pLvl] = [r for r in db["permRoles"][pLvl] if r != role]
break
db["permRoles"][str(level)].append(role)
self.saveDatabase()
return True
elif command == "delete":
try:
role = args["role"]
except:
return False
else:
for pLvl, pRoles in db["permRoles"].items():
if role in pRoles:
db["permRoles"][pLvl] = [r for r in db["permRoles"][pLvl] if r != role]
self.saveDatabase()
return True
return False
async def log(self, message, custom = False):
if not custom:
case = db['logs']['cases']
db['logs']['cases'] = case+1
self.saveDatabase()
embed = dc.Embed(title=f"Log Case #{case}")
embed.color = message.author.color
embed.add_field(name="Author", value=message.author.mention, inline=True)
embed.add_field(name="Channel", value=message.channel.mention, inline=True)
embed.add_field(name="Date", value=dt.datetime.now().strftime("%d/%m/%Y %H:%M:%S"), inline=True)
embed.add_field(name="Command", value=f"`{message.content}`", inline=True)
await self.logsChannel.send(embed=embed)
else:
await self.logsChannel.send(message)
async def resetGroupRoles(self, channel, group_count):
role_template = cfg["nameSpace"]["labRoleTemplate"].split('#')
math_role_template = cfg["nameSpace"]["mathRoleTemplate"].split('#')
if len(role_template) != 2:
print("config group role template invalid: missing '#'?")
return False
elif len(math_role_template) != 2:
print("config math group role template invalid: missing '#'?")
return False
# initialize flags to see which roles exist and create the nonexistent ones later
lab_flags = [0 for _ in range(group_count)]
mat_flags = [0 for _ in range((group_count-1)//2 + 1)]
records = {} # keep record of removed data to save and log it later
for role in await channel.guild.fetch_roles():
if (role.name.startswith(role_template[0]) and role.name.endswith(role_template[1])) or (role.name.startswith(math_role_template[0]) and role.name.endswith(math_role_template[1])):
role_type = "LAB" if role.name.startswith(role_template[0]) else "MAT"
records[str(role.name)] = []
members = role.members
# g_id determines the current group's number
if role_type == "LAB":
g_id = int(role.name[len(role_template[0]):-len(role_template[1])])
elif role_type == "MAT":
g_id = int(role.name[len(math_role_template[0]):-len(math_role_template[1])])
# clear role from every user and store the changes in records
await channel.send(f"Clearing `{role.name}` from `{len(members)}` users..")
for member in members:
records[role.name].append(str(member.name + '#' + member.discriminator))
await member.remove_roles(role)
# remove the role entirely if it's not in range of new semester's group length
if g_id not in range(1,group_count+1):
await channel.send(f"Removing `{role.name}`..")
await role.delete()
elif role_type == "MAT" and g_id not in range(1,len(mat_flags)+1):
await channel.send(f"Removing `{role.name}`..")
await role.delete()
else:
# set flags for roles kept for next semester and save their id's in db for future registration management
if role_type == "LAB":
lab_flags[g_id-1] = 1
db["groupReg"]["role_ids"][str(g_id)] = role.id
elif role_type == "MAT":
mat_flags[g_id-1] = 1
db["groupReg"]["math_role_ids"][str(g_id)] = role.id
self.saveDatabase()
# create nonexistent roles based on gaps in flags
for ID, flag in enumerate(lab_flags):
if not flag:
name = f"{role_template[0]}{ID+1}{role_template[1]}"
await channel.send(f"Creating `{name}`..")
role = await channel.guild.create_role(name=name,mentionable=True,hoist=True,color=dc.Color.random())
db["groupReg"]["role_ids"][str(ID+1)] = role.id
for ID, flag in enumerate(mat_flags):
if not flag:
name = f"{math_role_template[0]}{ID+1}{math_role_template[1]}"
await channel.send(f"Creating `{name}`..")
role = await channel.guild.create_role(name=name,mentionable=True,color=dc.Color.random())
db["groupReg"]["math_role_ids"][str(ID+1)] = role.id
self.saveDatabase()
# save records to file and log them to logs channel if active
with open('archives.txt', 'a') as f:
json.dump(records, f, indent=4)
# if self.logsActive:
# await self.log(f'```json\n{json.dumps(records,indent=4)}\n```', custom=True)
# await channel.send(f'`Archive sent to logs channel and saved on machine.`')
# else:
await channel.send(f'`Archive saved on machine.`')
return True
async def openGroupReg(self, message, group_count):
if await self.resetGroupRoles(message.channel, group_count):
db["groupReg"]["active"] = True
db["groupReg"]["groupCount"] = group_count # group_count determines the len of lab groups in new semester
# rid of registration category and text channels if they exist
for category in message.guild.categories:
if category.name == cfg["nameSpace"]["groupsRegCategory"]:
for channel in category.channels:
await channel.delete()
await category.delete()
break
# create new category with its text channels for registration
GRC = await message.guild.create_category(name=cfg["nameSpace"]["groupsRegCategory"], position=2)
GRIC = await GRC.create_text_channel(name=cfg["nameSpace"]["groupsRegInfoChannel"])
await GRIC.set_permissions(message.guild.roles[0], send_messages = False, read_messages = True)
GRC = await GRC.create_text_channel(name=cfg["nameSpace"]["groupsRegChannel"])
# save the channel id used for registration for command management purposes
db["groupReg"]["channel_id"] = GRC.id
self.saveDatabase()
# send registration opening notification to GRIC
await message.channel.send(f'`Group registration channel created.`')
info = f''':warning: @everyone Rejestracja do grup w nowym semestrze została otwarta! :warning: \n
**Aby poprawnie zarejestrować się do grupy LAB oraz MAT wyślij** `lab #numerGrupy` **oraz** `mat #numerGrupy` **na kanale** {GRC.mention}, np. `lab 4`; `mat 2` lub `lab 4 mat 2`.
Dla osób będących w kilku grupach laboratoryjnych jednocześnie - proszę kontaktować się z administracją serwera.'''
await GRIC.send(info)
# send new semester decorator on all group channels
for channel in message.guild.channels:
if channel.name.endswith(cfg["nameSpace"]["generalChannelTemplate"]):
await channel.send(cfg["nameSpace"]["newSemesterDecorator"])
elif channel.name.endswith(cfg["nameSpace"]["datesChannelTemplate"]):
await channel.send(cfg["nameSpace"]["newSemesterDecorator"])
elif channel.name.startswith(cfg["nameSpace"]["mathChannelTemplate"]):
await channel.send(cfg["nameSpace"]["newSemesterDecorator"])
return True
return False
async def groupReg(self, message):
user = message.author
content = message.content.lower()
l_id = content.find('lab')
m_id = content.find('mat')
digits = string.digits
lab_gr = mat_gr = None
# do some string magic to extract lab group number from message if it inclues "lab" keyword
if l_id >= 0:
if m_id > l_id: # dont include the "mat" keyword if it appears after "lab"
cntnt = content[l_id+3:m_id].lstrip()
else: cntnt = content[l_id+3:].lstrip()
lab_gr = int("".join([v for vID, v in enumerate(cntnt) if v in digits and not any([c not in digits for c in cntnt[:vID]])]))
# return with an exception if the number is not in current lab groups range
if lab_gr not in range(1,db["groupReg"]["groupCount"]+1):
await message.reply(f"Lab group needs to be between `1` and `{db['groupReg']['groupCount']}`.")
return
# same string magic for mat group number
if m_id >= 0:
if l_id > m_id: # dont include the "lab" keyword if it appears after "mat"
cntnt = content[m_id+3:l_id].lstrip()
else: cntnt = content[m_id+3:].lstrip()
mat_gr = int("".join([v for vID, v in enumerate(cntnt) if v in digits and not any([c not in digits for c in cntnt[:vID]])]))
# return with an exception if the number is not in current mat groups range
if mat_gr not in range(1,(db["groupReg"]["groupCount"]-1)//2 + 2):
await message.reply(f"Mat group needs to be between `1` and `{(db['groupReg']['groupCount']-1)//2 + 1}`.")
return
# assign group roles to user and catch the output
out = await self.regToGroups(user, lab_gr, mat_gr)
if out:
await message.reply(f"Successfully registered to: `{'`, `'.join(out)}`")
else:
await message.reply("An error occured while registering to group, please try again.")
async def regToGroups(self, user, labGroup=None, matGroup=None):
if not (labGroup or matGroup): return False
for role in user.roles:
if labGroup and role.id in tuple(db["groupReg"]["role_ids"].values()):
await user.remove_roles(role)
elif matGroup and role.id in tuple(db["groupReg"]["math_role_ids"].values()):
await user.remove_roles(role)
output = [] # store successfully applied roles in output
if labGroup:
lab_id = db["groupReg"]["role_ids"][str(labGroup)]
role = user.guild.get_role(lab_id)
output.append(role.name)
await user.add_roles(role)
if matGroup:
mat_id = db["groupReg"]["math_role_ids"][str(matGroup)]
role = user.guild.get_role(mat_id)
output.append(role.name)
await user.add_roles(role)
return output
async def closeGroupReg(self, message):
# reset group registration database
db["groupReg"]["active"] = False
db["groupReg"]["channel_id"] = None
db["groupReg"]["groupCount"] = 0
db["groupReg"]["role_ids"] = {}
db["groupReg"]["math_role_ids"] = {}
self.saveDatabase()
# rid of registration category and text channels if they exist
for category in message.guild.categories:
if category.name == cfg["nameSpace"]["groupsRegCategory"]:
for channel in category.channels:
await channel.delete()
await category.delete()
break
intents = dc.Intents.all()
bot_client = BOT(intents=intents)
bot_client.run(token) | StarcoderdataPython |
330916 | <filename>methods/tools/controller.py<gh_stars>1-10
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: <NAME>(<EMAIL>)
# Some methods used by main methods.
import os
from utils.helpers.file_helper import FileHelper
from utils.tools.logger import Logger as Log
class Controller(object):
@staticmethod
def init(runner):
runner.runner_state['iters'] = 0
runner.runner_state['last_iters'] = 0
runner.runner_state['epoch'] = 0
runner.runner_state['last_epoch'] = 0
runner.runner_state['performance'] = 0
runner.runner_state['val_loss'] = 0
runner.runner_state['max_performance'] = 0
runner.runner_state['min_val_loss'] = 0
@staticmethod
def train(runner):
Log.info('Training start...')
if runner.configer.get('network', 'resume') is not None and runner.configer.get('network', 'resume_val'):
runner.val()
if runner.configer.get('solver', 'lr')['metric'] == 'epoch':
while runner.runner_state['epoch'] < runner.configer.get('solver', 'max_epoch'):
runner.train()
if runner.runner_state['epoch'] == runner.configer.get('solver', 'max_epoch'):
runner.val()
break
else:
while runner.runner_state['iters'] < runner.configer.get('solver', 'max_iters'):
runner.train()
if runner.runner_state['iters'] == runner.configer.get('solver', 'max_iters'):
runner.val()
break
Log.info('Training end...')
@staticmethod
def debug(runner):
Log.info('Debugging start..')
base_dir = os.path.join(runner.configer.get('project_dir'), 'out/vis',
runner.configer.get('task'), runner.configer.get('network', 'model_name'))
if not os.path.exists(base_dir):
os.makedirs(base_dir)
runner.debug(base_dir)
Log.info('Debugging end...')
@staticmethod
def test(runner):
Log.info('Testing start...')
out_dir = os.path.join(runner.configer.get('project_dir'),
'results', runner.configer.get('task'),
runner.configer.get('dataset'),
runner.configer.get('network', 'checkpoints_name'),
runner.configer.get('test', 'out_dir'))
test_img = runner.configer.get('test', 'test_img')
test_dir = runner.configer.get('test', 'test_dir')
if test_img is None and test_dir is None:
Log.error('test_img & test_dir not exists.')
exit(1)
if test_img is not None:
runner.test_img(test_img, out_dir)
if test_dir is not None:
runner.test(test_dir, out_dir)
Log.info('Testing end...')
| StarcoderdataPython |
11244031 | <reponame>GrowingData/hyper-model
import click
import logging
# from hypermodel.platform.gcp.services import GooglePlatformServices
# from hypermodel.ml.model_container import ModelContainer
from titanic.pipeline.tragic_titanic_training_pipeline import (
FEATURE_COLUMNS,
TARGET_COLUMN,
)
from hypermodel.platform.local.services import LocalServices
from titanic.tragic_titanic_config import (
DB_LOCATION,
DB_TABLE,
DB_TRAINING_TABLE,
DB_TESTING_TABLE,
TRAINING_CSV_LOCATION,
TESTING_CSV_LOCATION)
@click.group()
def transform():
""" Pipeline operations relating to transforming data"""
logging.info(f"Created transform:transform")
pass
@transform.command()
@click.pass_context
def create_training(ctx):
logging.info(f"Entering transform:create_training")
services: LocalServices = ctx.obj["services"]
# model_container: ModelContainer = ctx.obj["container"]
# column_string = ",".join(FEATURE_COLUMNS)
# query = f"""
# SELECT {column_string}, {TARGET_COLUMN}
# FROM {DB_TABLE}
# """
# services.warehouse.select_into(
# query, DB_LOCATION, DB_TRAINING_TABLE
# )
services.warehouse.import_csv(TRAINING_CSV_LOCATION,DB_LOCATION, DB_TRAINING_TABLE)
logging.info(f"Wrote training set to {DB_TRAINING_TABLE}. Success!")
@transform.command()
@click.pass_context
def create_test(ctx):
logging.info(f"Entering transform:create_test")
services: LocalServices = ctx.obj["services"]
# model_container: ModelContainer = ctx.obj["container"]
# column_string = ",".join(FEATURE_COLUMNS)
# query = f"""
# SELECT {column_string}, {TARGET_COLUMN}
# FROM {DB_TABLE}
# """
# services.warehouse.select_into(
# query, DB_LOCATION, DB_TESTING_TABLE
# )
services.warehouse.import_csv(TRAINING_CSV_LOCATION,DB_LOCATION, DB_TESTING_TABLE)
logging.info(f"Wrote test set to {DB_TESTING_TABLE}. Success!")
| StarcoderdataPython |
1960472 | def ensure_bool(x):
if isinstance(x, bool):
return x
else:
if isinstance(x, str):
if x.lower().startswith('t'):
return True
elif x.lower().startswith('f'):
return False
elif isinstance(x, int):
return bool(x)
raise ValueError(f"Couldn't convert to a boolean: {x}") | StarcoderdataPython |
9649583 | <reponame>qinjidong/esp8266-v3.0-msys32<filename>mingw32/bin/doesitcache2-script.py
#!C:/msys32/mingw32/bin/python2.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'CacheControl==0.12.5','console_scripts','doesitcache'
__requires__ = 'CacheControl==0.12.5'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('CacheControl==0.12.5', 'console_scripts', 'doesitcache')()
)
| StarcoderdataPython |
3563069 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from datetime import datetime
from common.waterfall import failure_type
from libs.gitiles.blame import Blame
from libs.gitiles.blame import Region
from libs.gitiles.change_log import Contributor
from libs.gitiles.change_log import FileChangeInfo
from libs.gitiles.diff import ChangeType
from libs.gitiles.gitiles_repository import GitilesRepository
from waterfall import build_failure_analysis
from waterfall.failure_signal import FailureSignal
from waterfall.test import wf_testcase
class BuildFailureAnalysisTest(wf_testcase.WaterfallTestCase):
def _MockGetChangeLog(self, revision):
class MockChangeLog(object):
def __init__(self, date, touched_files):
self.author = Contributor(
'name', '<EMAIL>',
datetime.strptime(
'Jun %s 04:35:32 2015' % date, '%b %d %H:%M:%S %Y'))
self.touched_files = touched_files
MOCK_CHANGE_LOGS = {}
MOCK_CHANGE_LOGS['1'] = MockChangeLog('1', [])
MOCK_CHANGE_LOGS['2'] = MockChangeLog('2', [
FileChangeInfo.FromDict(
{'change_type': ChangeType.ADD,
'old_path': 'dev/null',
'new_path': 'third_party/dep/f.cc'})])
MOCK_CHANGE_LOGS['3'] = MockChangeLog('3', [
FileChangeInfo.FromDict(
{'change_type': ChangeType.MODIFY,
'old_path': 'third_party/dep/f.cc',
'new_path': 'third_party/dep/f.cc'})])
MOCK_CHANGE_LOGS['4'] = MockChangeLog('4', [
FileChangeInfo.FromDict(
{'change_type': ChangeType.MODIFY,
'old_path': 'third_party/dep/f2.cc',
'new_path': 'third_party/dep/f2.cc'})])
MOCK_CHANGE_LOGS['5'] = MockChangeLog('5', [
FileChangeInfo.FromDict(
{'change_type': ChangeType.MODIFY,
'old_path': 'third_party/dep/f3.cc',
'new_path': 'third_party/dep/f3.cc'})])
MOCK_CHANGE_LOGS['6'] = MockChangeLog('6', [
FileChangeInfo.FromDict(
{'change_type': ChangeType.MODIFY,
'old_path': 'third_party/dep/f.cc',
'new_path': 'third_party/dep/f.cc'})])
MOCK_CHANGE_LOGS['7'] = MockChangeLog('7', [
FileChangeInfo.FromDict(
{'change_type': ChangeType.MODIFY,
'old_path': 'third_party/dep/f.cc',
'new_path': 'third_party/dep/f.cc'})])
MOCK_CHANGE_LOGS['8'] = MockChangeLog('8', [
FileChangeInfo.FromDict(
{'change_type': ChangeType.MODIFY,
'old_path': 'third_party/dep/f.cc',
'new_path': 'third_party/dep/f.cc'})])
MOCK_CHANGE_LOGS['9'] = MockChangeLog('9', [
FileChangeInfo.FromDict(
{'change_type': ChangeType.DELETE,
'old_path': 'third_party/dep/f.cc',
'new_path': 'dev/null'})])
MOCK_CHANGE_LOGS['10'] = MockChangeLog('10', [
FileChangeInfo.FromDict(
{'change_type': ChangeType.MODIFY,
'old_path': 'third_party/dep/f2.cc',
'new_path': 'third_party/dep/f2.cc'})])
MOCK_CHANGE_LOGS['11'] = MockChangeLog('11', [
FileChangeInfo.FromDict(
{'change_type': ChangeType.MODIFY,
'old_path': 'third_party/dep/f2.cc',
'new_path': 'third_party/dep/f2.cc'})])
return MOCK_CHANGE_LOGS.get(revision, MockChangeLog('12', []))
def _MockGetCommitsBetweenRevisions(self, start_revision, end_revision):
return map(str, range(int(start_revision) + 1, int(end_revision) + 1))
def _MockGetBlame(self, path, revision):
if revision == '10' or revision == '11' or path == 'f_not_exist.cc':
return None
blame = Blame(revision, path)
blame.AddRegions([
Region(1, 2, '7',
u'<EMAIL>', u'<EMAIL>',
datetime(2015, 06, 07, 04, 35, 32)),
Region(3, 3, '5',
u'<EMAIL>', u'<EMAIL>',
datetime(2015, 06, 05, 04, 35, 32)),
Region(7, 1, '8',
u'<EMAIL>', u'<EMAIL>',
datetime(2015, 06, 8, 04, 35, 32)),
Region(8, 1, '7',
u'<EMAIL>', u'<EMAIL>',
datetime(2015, 06, 07, 21, 35, 32)),
Region(9, 10, '12',
u'<EMAIL>', u'<EMAIL>',
datetime(2015, 06, 12, 04, 35, 32))])
return blame
def testIsSameFile(self):
self.assertTrue(build_failure_analysis._IsSameFile('a/b/x.cc', 'x.cc'))
self.assertTrue(build_failure_analysis._IsSameFile('a/b/x.cc', 'b/x.cc'))
self.assertTrue(build_failure_analysis._IsSameFile('a/b/x.cc', 'a/b/x.cc'))
self.assertTrue(build_failure_analysis._IsSameFile('A/B/X.cc', 'a/b/x.cc'))
self.assertFalse(
build_failure_analysis._IsSameFile('a/prefix_x.cc.', 'x.cc'))
self.assertFalse(
build_failure_analysis._IsSameFile('prefix_a/x.cc.', 'a/x.cc'))
self.assertFalse(
build_failure_analysis._IsSameFile('c/x.cc.', 'a/b/c/x.cc'))
self.assertFalse(build_failure_analysis._IsSameFile('a/x.cc.', 'a/y.cc'))
def testNormalizeObjectFile(self):
cases = {
'obj/a/T.x.o': 'a/x.o',
'obj/a/T.x.y.o': 'a/x.y.o',
'x.o': 'x.o',
'obj/a/x.obj': 'a/x.obj',
'a.cc.obj': 'a.cc.obj',
'T.a.c.o': 'a.c.o',
'T.a.o': 'a.o'
}
for obj_file, expected_file in cases.iteritems():
self.assertEqual(
expected_file,
build_failure_analysis._NormalizeObjectFilePath(obj_file))
def testStripCommonSuffix(self):
cases = {
'a_file':
'a_file_%s.cc' % '_'.join(build_failure_analysis._COMMON_SUFFIXES),
'src/b_file': 'src/b_file_impl_mac.h',
'c_file': 'c_file_browsertest.cc',
'xdtest': 'xdtest.cc',
}
for expected_file, file_path in cases.iteritems():
self.assertEqual(
expected_file,
build_failure_analysis._StripExtensionAndCommonSuffix(file_path))
def testIsRelated(self):
self.assertTrue(build_failure_analysis._IsRelated('a.py', 'a_test.py'))
self.assertTrue(
build_failure_analysis._IsRelated('a.h', 'a_impl_test.o'))
self.assertTrue(
build_failure_analysis._IsRelated('a.h', 'target.a_impl_test.obj'))
self.assertFalse(
build_failure_analysis._IsRelated('a/x.cc', 'a/b/y.cc'))
self.assertFalse(
build_failure_analysis._IsRelated('a/x.cc', 'xdtest.cc'))
self.assertFalse(
build_failure_analysis._IsRelated('a_tests.cc', 'a_browsertests.cc'))
self.assertFalse(
build_failure_analysis._IsRelated('cc_unittests.isolate', 'a.cc.obj'))
self.assertFalse(
build_failure_analysis._IsRelated('a.h', 'a.pyc'))
self.assertFalse(
build_failure_analysis._IsRelated('a', 'b'))
self.assertFalse(build_failure_analysis._IsRelated('a', 'a'))
def testCheckFilesAgainstSuspectedCL(self):
failure_signal_json = {
'files': {
'src/a/b/f1.cc': [],
'd/e/a2_test.cc': [],
'b/c/f2.cc': [10, 20],
'd/e/f3.h': [],
'x/y/f4.py': [],
'f5_impl.cc': []
}
}
change_log_json = {
'revision': 'rev',
'touched_files': [
{
'change_type': ChangeType.ADD,
'old_path': '/dev/null',
'new_path': 'a/b/f1.cc'
},
{
'change_type': ChangeType.ADD,
'old_path': '/dev/null',
'new_path': 'd/e/a2.cc'
},
{
'change_type': ChangeType.MODIFY,
'old_path': 'a/b/c/f2.h',
'new_path': 'a/b/c/f2.h'
},
{
'change_type': ChangeType.MODIFY,
'old_path': 'd/e/f3.h',
'new_path': 'd/e/f3.h'
},
{
'change_type': ChangeType.DELETE,
'old_path': 'x/y/f4.py',
'new_path': '/dev/null'
},
{
'change_type': ChangeType.DELETE,
'old_path': 'h/f5.h',
'new_path': '/dev/null'
},
{
'change_type': ChangeType.RENAME,
'old_path': 't/y/x.cc',
'new_path': 's/z/x.cc'
},
]
}
deps_info = {}
justification = build_failure_analysis._CheckFiles(
FailureSignal.FromDict(failure_signal_json),
change_log_json, deps_info)
self.assertIsNotNone(justification)
# The score is 15 because:
# +5 added a/b/f1.cc (same file src/a/b/f1.cc in failure_signal log)
# +1 added d/e/a2.cc (related file a2_test.cc in failure_signal log)
# +1 modified b/c/f2.h (related file a/b/c/f2.cc in failure_signal log)
# +2 modified d/e/f3.h (same file d/e/f3.h in failure_signal log)
# +5 deleted x/y/f4.py (same file x/y/f4.py in failure_signal log)
# +1 deleted h/f5.h (related file f5_impl.cc in failure_signal log)
# +0 renamed t/y/x.cc -> s/z/x.cc (no related file in failure_signal log)
self.assertEqual(15, justification['score'])
def testCheckFilesAgainstUnrelatedCL(self):
failure_signal_json = {
'files': {
'src/a/b/f.cc': [],
}
}
change_log_json = {
'revision': 'rev',
'touched_files': [
{
'change_type': ChangeType.ADD,
'old_path': '/dev/null',
'new_path': 'a/d/f1.cc'
},
]
}
deps_info = {}
justification = build_failure_analysis._CheckFiles(
FailureSignal.FromDict(failure_signal_json),
change_log_json, deps_info)
self.assertIsNone(justification)
def _testCheckFileInDependencyRoll(
self, file_path_in_log, rolls, expected_score, line_numbers,
expected_hints=None):
self.mock(GitilesRepository, 'GetChangeLog', self._MockGetChangeLog)
self.mock(GitilesRepository, 'GetBlame', self._MockGetBlame)
self.mock(GitilesRepository, 'GetCommitsBetweenRevisions',
self._MockGetCommitsBetweenRevisions)
justification = build_failure_analysis._Justification()
build_failure_analysis._CheckFileInDependencyRolls(
file_path_in_log, rolls, justification, line_numbers)
self.assertEqual(expected_score, justification.score)
if expected_hints:
self.assertEqual(expected_hints, justification._hints)
def testCheckFileInDependencyRollWhenUnrelatedDependencyIsRolled(self):
file_path_in_log = 'third_party/dep/f.cc'
rolls = [
{ # An unrelated dependency was rolled to a new revision.
'path': 'src/third_party/dep2/',
'repo_url': 'https://url_dep2',
'old_revision': '6',
'new_revision': '8',
},
]
expected_score = 0
self._testCheckFileInDependencyRoll(file_path_in_log, rolls, expected_score,
None)
def testCheckFileInDependencyRollWhenRelatedDependencyIsRolled(self):
file_path_in_log = 'third_party/dep/f.cc'
rolls = [
{ # Dependency was rolled to a new revision.
'path': 'src/third_party/dep/',
'repo_url': 'https://url_dep',
'old_revision': '6',
'new_revision': '8',
},
]
expected_score = 1
expected_hints = {
('rolled dependency third_party/dep/ with changes in '
'https://url_dep/+log/6..8?pretty=fuller (and f.cc was in log)'): 1
}
self._testCheckFileInDependencyRoll(file_path_in_log, rolls, expected_score,
None, expected_hints)
def testCheckFileInDependencyRollWhenRelatedDependencyIsAdded(self):
file_path_in_log = 'third_party/dep/f.cc'
rolls = [
{ # Dependency was newly-added.
'path': 'src/third_party/dep/',
'repo_url': 'https://url_dep',
'old_revision': None,
'new_revision': '9',
},
]
expected_score = 5
self._testCheckFileInDependencyRoll(file_path_in_log, rolls, expected_score,
None)
def testCheckFileInDependencyRollWhenRelatedDependencyIsDeleted(self):
file_path_in_log = 'third_party/dep/f.cc'
rolls = [
{ # Dependency was deleted.
'path': 'src/third_party/dep/',
'repo_url': 'https://url_dep',
'old_revision': '7',
'new_revision': None,
},
]
expected_score = 5
self._testCheckFileInDependencyRoll(file_path_in_log, rolls, expected_score,
None)
def testCheckFileInDependencyRollWhenFileIsAddedWithinTheRoll(self):
rolls = [
{ # One region in blame.
'path': 'src/third_party/dep/',
'repo_url': 'https://url_dep',
'old_revision': '1',
'new_revision': '2',
}
]
file_path_in_log = 'third_party/dep/f.cc'
expected_score = 5
expected_hints = {
('rolled dependency third_party/dep/ with changes in '
'https://url_dep/+log/1..2?pretty=fuller '
'(and f.cc(added) was in log)'): 5
}
self._testCheckFileInDependencyRoll(file_path_in_log, rolls, expected_score,
None, expected_hints)
def testCheckFileInDependencyRollWhenFileIsAddedAndChangedWithinTheRoll(self):
file_path_in_log = 'third_party/dep/f.cc'
rolls = [
{ # Multiple regions in blame, but they are all after old revision.
'path': 'src/third_party/dep/',
'repo_url': 'https://url_dep',
'old_revision': '1',
'new_revision': '3',
},
]
expected_score = 5
self._testCheckFileInDependencyRoll(file_path_in_log, rolls, expected_score,
None)
def testCheckFileInDependencyRollWhenFileIsNotTouchedWithinTheRoll(self):
rolls = [{
'path': 'src/third_party/dep/',
'repo_url': 'https://url_dep',
'old_revision': '3',
'new_revision': '5',
}]
file_path_in_log = 'third_party/dep/f.cc'
expected_score = 0
self._testCheckFileInDependencyRoll(file_path_in_log, rolls, expected_score,
None)
def testCheckFileInDependencyRollWhenLinesAreChangedWithinTheRoll(self):
rolls = [{
'path': 'src/third_party/dep/',
'repo_url': 'https://url_dep',
'old_revision': '6',
'new_revision': '8',
}]
file_path_in_log = 'third_party/dep/f.cc'
line_numbers = [2, 7, 12]
expected_score = 4
expected_hints = {
('rolled dependency third_party/dep/ with changes in '
'https://url_dep/+log/6..8?pretty=fuller '
'(and f.cc[2, 7] was in log)'): 4
}
self._testCheckFileInDependencyRoll(file_path_in_log, rolls, expected_score,
line_numbers, expected_hints)
def testCheckFileInDependencyRollWhenFileIsNotChanged(self):
rolls = [{
'path': 'src/third_party/dep/',
'repo_url': 'https://url_dep',
'old_revision': '8',
'new_revision': '9',
}]
file_path_in_log = 'third_party/dep/not_this_file.cc'
line_numbers = [2, 7, 8]
expected_score = 0
self._testCheckFileInDependencyRoll(file_path_in_log, rolls, expected_score,
line_numbers)
def testCheckFileInDependencyRollWhenFileIsDeleted(self):
rolls = [
{
'path': 'src/third_party/dep/',
'repo_url': 'https://url_dep',
'old_revision': '8',
'new_revision': '10',
}
]
file_path_in_log = 'third_party/dep/f.cc'
expected_score = 5
expected_hints = {
('rolled dependency third_party/dep/ with changes in '
'https://url_dep/+log/8..10?pretty=fuller '
'(and f.cc(deleted) was in log)'): 5
}
self._testCheckFileInDependencyRoll(file_path_in_log, rolls, expected_score,
None, expected_hints)
def testCheckFileInDependencyRollWhenFileIsModifiedWithoutBlame(self):
rolls = [
{
'path': 'src/third_party/dep/',
'repo_url': 'https://url_dep',
'old_revision': '10',
'new_revision': '11',
}
]
file_path_in_log = 'third_party/dep/f2.cc'
line_numbers = [2, 7, 8]
expected_score = 1
self._testCheckFileInDependencyRoll(file_path_in_log, rolls, expected_score,
line_numbers)
def testCheckFileInDependencyRollRolledDowngrade(self):
rolls = [{
'path': 'src/third_party/dep/',
'repo_url': 'https://url_dep',
'old_revision': '8',
'new_revision': '6',
}]
file_path_in_log = 'third_party/dep/f.cc'
line_numbers = [2, 7, 8]
expected_score = 0
self._testCheckFileInDependencyRoll(file_path_in_log, rolls, expected_score,
line_numbers)
def testCheckFileInDependencyRollFileNotExist(self):
rolls = [{
'path': 'src/third_party/dep/',
'repo_url': 'https://url_dep',
'old_revision': '6',
'new_revision': '8',
}]
file_path_in_log = 'third_party/dep/f_not_exist.cc'
line_numbers = [2, 7, 8]
expected_score = 0
self._testCheckFileInDependencyRoll(file_path_in_log, rolls, expected_score,
line_numbers)
def testCheckFileInDependencyRollOnV8(self):
rolls = [{
'path': 'src/v8/',
'repo_url': 'https://chromium.googlesource.com/v8/v8.git',
'old_revision': '6',
'new_revision': '8',
}]
file_path_in_log = 'v8/f.cc'
line_numbers = [2, 7, 8]
expected_score = 0
self._testCheckFileInDependencyRoll(file_path_in_log, rolls, expected_score,
line_numbers)
def testCheckFilesAgainstDEPSRollWithUnrelatedLinesChanged(self):
failure_signal_json = {
'files': {
'src/third_party/dep1/f.cc': [123],
}
}
change_log_json = {
'revision': 'rev',
'touched_files': [
{
'change_type': ChangeType.MODIFY,
'old_path': 'DEPS',
'new_path': 'DEPS'
},
]
}
deps_info = {
'deps_rolls': {
'rev': [
{
'path': 'src/third_party/dep1/',
'repo_url': 'https://url_dep1',
'old_revision': '7',
'new_revision': '9',
},
]
}
}
self.mock(GitilesRepository, 'GetChangeLog', self._MockGetChangeLog)
self.mock(GitilesRepository, 'GetCommitsBetweenRevisions',
self._MockGetCommitsBetweenRevisions)
self.mock(GitilesRepository, 'GetBlame', self._MockGetBlame)
justification = build_failure_analysis._CheckFiles(
FailureSignal.FromDict(failure_signal_json),
change_log_json, deps_info)
self.assertIsNotNone(justification)
# The score is 1 because:
# +1 rolled third_party/dep1/ and src/third_party/dep1/f.cc was in log.
self.assertEqual(1, justification['score'])
def testAnalyzeSuccessfulBuild(self):
failure_info = {
'failed': False,
}
result, suspected_cls = build_failure_analysis.AnalyzeBuildFailure(
failure_info, change_logs=None, deps_info=None, failure_signals=None)
self.assertEqual(0, len(result['failures']))
self.assertEqual([], suspected_cls)
def testAnalyzeBuildWithoutValidChromiumRevision(self):
failure_info = {
'failed': True,
'chromium_revision': None,
}
result, suspected_cls = build_failure_analysis.AnalyzeBuildFailure(
failure_info, change_logs=None, deps_info=None, failure_signals=None)
self.assertEqual(0, len(result['failures']))
self.assertEqual([], suspected_cls)
def testAnalyzeBuildWithInfraFailure(self):
failure_info = {
'failed': True,
'failure_type': failure_type.INFRA,
'chromium_revision': '00baf00ba',
}
result, suspected_cls = build_failure_analysis.AnalyzeBuildFailure(
failure_info, change_logs=None, deps_info=None, failure_signals=None)
self.assertEqual(0, len(result['failures']))
self.assertEqual([], suspected_cls)
def testAnalyzeBuildFailure(self):
failure_info = {
'master_name': 'm',
'builder_name': 'b',
'build_number': 99,
'failure_type': failure_type.TEST,
'failed': True,
'chromium_revision': 'r99_2',
'failed_steps': {
'a': {
'current_failure': 99,
'first_failure': 98,
},
'b': {
'current_failure': 99,
'first_failure': 98,
'last_pass': 96,
},
},
'builds': {
'99': {
'blame_list': ['r99_1', 'r99_2'],
},
'98': {
'blame_list': ['r98_1'],
},
'97': {
'blame_list': ['r97_1'],
},
'96': {
'blame_list': ['r96_1', 'r96_2'],
},
}
}
change_logs = {
'r99_1': {
'revision': 'r99_1',
'touched_files': [
{
'change_type': ChangeType.MODIFY,
'old_path': 'a/b/f99_1.cc',
'new_path': 'a/b/f99_1.cc'
},
],
},
'r99_2': {
'revision': 'r99_2',
'touched_files': [
{
'change_type': ChangeType.MODIFY,
'old_path': 'a/b/f99_2.cc',
'new_path': 'a/b/f99_2.cc'
},
],
},
'r98_1': {
'revision': 'r98_1',
'touched_files': [
{
'change_type': ChangeType.MODIFY,
'old_path': 'y/z/f98.cc',
'new_path': 'y/z/f98.cc'
},
],
},
'r97_1': {
'revision': 'r97_1',
'touched_files': [
{
'change_type': ChangeType.ADD,
'old_path': '/dev/null',
'new_path': 'x/y/f99_1.cc'
},
{
'change_type': ChangeType.MODIFY,
'old_path': 'a/b/f99_1.cc',
'new_path': 'a/b/f99_1.cc'
},
],
},
'r96_1': {
'revision': 'r96_1',
'touched_files': [
{
'change_type': ChangeType.MODIFY,
'old_path': 'a/b/f96_1.cc',
'new_path': 'a/b/f96_1.cc'
},
],
},
}
deps_info = {}
failure_signals_json = {
'a': {
'files': {
'src/a/b/f99_2.cc': [],
},
},
'b': {
'files': {
'x/y/f99_1.cc': [],
},
},
}
expected_analysis_result = {
'failures': [
{
'step_name': 'a',
'supported': True,
'first_failure': 98,
'last_pass': None,
'suspected_cls': [
{
'build_number': 99,
'repo_name': 'chromium',
'revision': 'r99_2',
'commit_position': None,
'url': None,
'score': 2,
'hints': {
'modified f99_2.cc (and it was in log)': 2,
},
}
],
},
{
'step_name': 'b',
'supported': True,
'first_failure': 98,
'last_pass': 96,
'suspected_cls': [
{
'build_number': 97,
'repo_name': 'chromium',
'revision': 'r97_1',
'commit_position': None,
'url': None,
'score': 5,
'hints': {
'added x/y/f99_1.cc (and it was in log)': 5,
},
}
],
}
]
}
expected_suspected_cl = [
{
'repo_name': 'chromium',
'revision': 'r99_2',
'commit_position': None,
'url': None,
'failures': {
'a': []
},
'top_score': 2
},
{
'repo_name': 'chromium',
'revision': 'r97_1',
'commit_position': None,
'url': None,
'failures': {
'b': []
},
'top_score': 5
}
]
analysis_result, suspected_cls = build_failure_analysis.AnalyzeBuildFailure(
failure_info, change_logs, deps_info, failure_signals_json)
self.assertEqual(expected_analysis_result, analysis_result)
self.assertEqual(sorted(expected_suspected_cl), sorted(suspected_cls))
def testAnalyzeBuildFailureTestLevel(self):
failure_info = {
'failed': True,
'chromium_revision': 'r99_2',
'master_name': 'm',
'builder_name': 'b',
'build_number': 99,
'failure_type': failure_type.TEST,
'failed_steps': {
'a': {
'current_failure': 99,
'first_failure': 98,
},
'b': {
'current_failure': 99,
'first_failure': 98,
'last_pass': 96,
'list_isolated_data': [
{
'isolatedserver': 'https://isolateserver.appspot.com',
'namespace': 'default-gzip',
'digest': 'isolatedhashabctest-223'
}
],
'tests': {
'Unittest1.Subtest1': {
'current_failure': 99,
'first_failure': 98,
'last_pass': 97
},
'Unittest2.Subtest1': {
'current_failure': 99,
'first_failure': 98,
'last_pass': 97
},
'Unittest3.Subtest2': {
'current_failure': 99,
'first_failure': 98,
'last_pass': 96
},
'Unittest3.Subtest3': {
'current_failure': 99,
'first_failure': 98,
'last_pass': 96
}
}
},
},
'builds': {
'99': {
'blame_list': ['r99_1', 'r99_2'],
},
'98': {
'blame_list': ['r98_1'],
},
'97': {
'blame_list': ['r97_1'],
},
'96': {
'blame_list': ['r96_1', 'r96_2'],
},
}
}
change_logs = {
'r99_1': {
'revision': 'r99_1',
'touched_files': [
{
'change_type': ChangeType.MODIFY,
'old_path': 'a/b/f99_1.cc',
'new_path': 'a/b/f99_1.cc'
},
],
},
'r99_2': {
'revision': 'r99_2',
'touched_files': [
{
'change_type': ChangeType.MODIFY,
'old_path': 'a/b/f99_2.cc',
'new_path': 'a/b/f99_2.cc'
},
],
},
'r98_1': {
'revision': 'r98_1',
'touched_files': [
{
'change_type': ChangeType.MODIFY,
'old_path': 'y/z/f98.cc',
'new_path': 'y/z/f98.cc'
},
],
},
'r97_1': {
'revision': 'r97_1',
'touched_files': [
{
'change_type': ChangeType.ADD,
'old_path': '/dev/null',
'new_path': 'x/y/f99_1.cc'
},
{
'change_type': ChangeType.MODIFY,
'old_path': 'a/b/f99_1.cc',
'new_path': 'a/b/f99_1.cc'
},
],
},
'r96_1': {
'revision': 'r96_1',
'touched_files': [
{
'change_type': ChangeType.MODIFY,
'old_path': 'a/b/f96_1.cc',
'new_path': 'a/b/f96_1.cc'
},
],
},
}
deps_info = {}
failure_signals_json = {
'a': {
'files': {
'src/a/b/f99_2.cc': [],
},
},
'b': {
'files': {
'x/y/f99_1.cc': [],
'y/z/f98.cc': [123, 456],
},
'tests': {
'Unittest1.Subtest1': {
'files': {
'x/y/f99_1.cc': [],
},
},
'Unittest2.Subtest1': {
'files': {
'y/z/f98.cc': [123],
},
},
'Unittest3.Subtest2': {
'files': {
'y/z/f98.cc': [456],
},
}
}
}
}
def MockGetChangedLines(repo_info, touched_file, line_numbers, _):
# Only need line_numbers, ignoring the first two parameters.
del repo_info, touched_file
if line_numbers:
return line_numbers
self.mock(build_failure_analysis, '_GetChangedLinesForChromiumRepo',
MockGetChangedLines)
expected_analysis_result = {
'failures': [
{
'step_name': 'a',
'first_failure': 98,
'last_pass': None,
'supported': True,
'suspected_cls': [
{
'build_number': 99,
'repo_name': 'chromium',
'revision': 'r99_2',
'commit_position': None,
'url': None,
'score': 2,
'hints': {
'modified f99_2.cc (and it was in log)': 2,
},
}
],
},
{
'step_name': 'b',
'first_failure': 98,
'last_pass': 96,
'supported': True,
'suspected_cls': [
{
'build_number': 97,
'repo_name': 'chromium',
'revision': 'r97_1',
'commit_position': None,
'url': None,
'score': 5,
'hints': {
'added x/y/f99_1.cc (and it was in log)': 5,
},
},
{
'build_number': 98,
'repo_name': 'chromium',
'revision': 'r98_1',
'commit_position': None,
'url': None,
'score': 4,
'hints': {
'modified f98.cc[123, 456] (and it was in log)': 4,
},
}
],
'tests': [
{
'test_name': 'Unittest1.Subtest1',
'first_failure': 98,
'last_pass': 97,
'suspected_cls': [
{
'build_number': 97,
'repo_name': 'chromium',
'revision': 'r97_1',
'commit_position': None,
'url': None,
'score': 5,
'hints': {
'added x/y/f99_1.cc (and it was in log)': 5,
},
}
]
},
{
'test_name': 'Unittest2.Subtest1',
'first_failure': 98,
'last_pass': 97,
'suspected_cls': [
{
'build_number': 98,
'repo_name': 'chromium',
'revision': 'r98_1',
'commit_position': None,
'url': None,
'score': 4,
'hints': {
('modified f98.cc[123] '
'(and it was in log)'): 4,
},
}
]
},
{
'test_name': 'Unittest3.Subtest2',
'first_failure': 98,
'last_pass': 96,
'suspected_cls': [
{
'build_number': 98,
'repo_name': 'chromium',
'revision': 'r98_1',
'commit_position': None,
'url': None,
'score': 4,
'hints': {
('modified f98.cc[456] '
'(and it was in log)'): 4,
},
}
]
},
{
'test_name': 'Unittest3.Subtest3',
'first_failure': 98,
'last_pass': 96,
'suspected_cls': []
}
]
}
]
}
expected_suspected_cl = [
{
'repo_name': 'chromium',
'revision': 'r99_2',
'commit_position': None,
'url': None,
'failures': {
'a': []
},
'top_score': 2
},
{
'repo_name': 'chromium',
'revision': 'r97_1',
'commit_position': None,
'url': None,
'failures': {
'b': ['Unittest1.Subtest1']
},
'top_score': 5
},
{
'repo_name': 'chromium',
'revision': 'r98_1',
'commit_position': None,
'url': None,
'failures': {
'b': ['Unittest2.Subtest1', 'Unittest3.Subtest2']
},
'top_score': 4
}
]
analysis_result, suspected_cls = build_failure_analysis.AnalyzeBuildFailure(
failure_info, change_logs, deps_info, failure_signals_json)
self.assertEqual(expected_analysis_result, analysis_result)
self.assertEqual(sorted(expected_suspected_cl), sorted(suspected_cls))
def testAnalyzeBuildFailureForUnsupportedStep(self):
failure_info = {
'master_name': 'master1',
'builder_name': 'b',
'build_number': 99,
'failure_type': failure_type.TEST,
'failed': True,
'chromium_revision': 'r99_2',
'failed_steps': {
'unsupported_step1': {
'current_failure': 99,
'first_failure': 98,
},
},
'builds': {
'99': {
'blame_list': ['r99_1', 'r99_2'],
},
'98': {
'blame_list': ['r98_1'],
},
}
}
change_logs = {}
deps_info = {}
failure_signals_json = {
'not_supported': {
'files': {
'src/a/b/f99_2.cc': [],
},
}
}
expected_analysis_result = {
'failures': [
{
'step_name': 'unsupported_step1',
'supported': False,
'first_failure': 98,
'last_pass': None,
'suspected_cls': [],
},
]
}
analysis_result, suspected_cls = build_failure_analysis.AnalyzeBuildFailure(
failure_info, change_logs, deps_info, failure_signals_json)
self.assertEqual(expected_analysis_result, analysis_result)
self.assertEqual([], suspected_cls)
def testGetGitBlame(self):
repo_info = {
'repo_url': 'https://chromium.googlesource.com/chromium/src.git',
'revision': '8'
}
file_path = 'a/b/c.cc'
self.mock(GitilesRepository, 'GetBlame', self._MockGetBlame)
blame = build_failure_analysis._GetGitBlame(repo_info, file_path)
self.assertIsNotNone(blame)
def testGetGitBlameEmpty(self):
repo_info = {}
file_path = 'a/b/c.cc'
self.mock(GitilesRepository, 'GetBlame', self._MockGetBlame)
blame = build_failure_analysis._GetGitBlame(repo_info, file_path)
self.assertIsNone(blame)
def testGetChangedLinesTrue(self):
repo_info = {
'repo_url': 'https://chromium.googlesource.com/chromium/src.git',
'revision': '8'
}
touched_file = {
'change_type': ChangeType.MODIFY,
'old_path': 'a/b/c.cc',
'new_path': 'a/b/c.cc'
}
line_numbers = [2, 7, 8]
commit_revision = '7'
self.mock(GitilesRepository, 'GetBlame', self._MockGetBlame)
changed_line_numbers = (
build_failure_analysis._GetChangedLinesForChromiumRepo(
repo_info, touched_file, line_numbers, commit_revision))
self.assertEqual([2, 8], changed_line_numbers)
def testGetChangedLinesDifferentRevision(self):
repo_info = {
'repo_url': 'https://chromium.googlesource.com/chromium/src.git',
'revision': '9'
}
touched_file = {
'change_type': ChangeType.MODIFY,
'old_path': 'a/b/c.cc',
'new_path': 'a/b/c.cc'
}
line_numbers = [2, 7, 8]
commit_revision = '9'
self.mock(GitilesRepository, 'GetBlame', self._MockGetBlame)
changed_line_numbers = (
build_failure_analysis._GetChangedLinesForChromiumRepo(
repo_info, touched_file, line_numbers, commit_revision))
self.assertEqual([], changed_line_numbers)
def testGetChangedLinesDifferentLine(self):
repo_info = {
'repo_url': 'https://chromium.googlesource.com/chromium/src.git',
'revision': '8'
}
touched_file = {
'change_type': ChangeType.MODIFY,
'old_path': 'a/b/c.cc',
'new_path': 'a/b/c.cc'
}
line_numbers = [15]
commit_revision = '7'
self.mock(GitilesRepository, 'GetBlame', self._MockGetBlame)
changed_line_numbers = (
build_failure_analysis._GetChangedLinesForChromiumRepo(
repo_info, touched_file, line_numbers, commit_revision))
self.assertEqual([], changed_line_numbers)
def testGetChangedLinesNoneBlame(self):
repo_info = {
'repo_url': 'https://chromium.googlesource.com/chromium/src.git',
'revision': '10'
}
touched_file = {
'change_type': ChangeType.MODIFY,
'old_path': 'a/b/c.cc',
'new_path': 'a/b/c.cc'
}
line_numbers = [2, 7, 8]
commit_revision = '7'
self.mock(GitilesRepository, 'GetBlame', self._MockGetBlame)
changed_line_numbers = (
build_failure_analysis._GetChangedLinesForChromiumRepo(
repo_info, touched_file, line_numbers, commit_revision))
self.assertEqual([], changed_line_numbers)
def testCheckFileSameLineChanged(self):
def MockGetChangedLines(*_):
return [1, 3]
self.mock(build_failure_analysis, '_GetChangedLinesForChromiumRepo',
MockGetChangedLines)
touched_file = {
'change_type': ChangeType.MODIFY,
'old_path': 'a/b/c.cc',
'new_path': 'a/b/c.cc'
}
file_path_in_log = 'a/b/c.cc'
justification = build_failure_analysis._Justification()
file_name_occurrences = {'c.cc': 1}
line_numbers = [1, 3]
repo_info = {
'repo_url': 'https://chromium.googlesource.com/chromium/src.git',
'revision': 'dummy_abcd1234'
}
commit_revision = 'dummy_1'
build_failure_analysis._CheckFile(
touched_file, file_path_in_log, justification, file_name_occurrences,
line_numbers, repo_info, commit_revision)
expected_justification = {
'score': 4,
'hints': {
'modified c.cc[1, 3] (and it was in log)': 4
}
}
self.assertEqual(expected_justification, justification.ToDict())
| StarcoderdataPython |
12842540 | import numpy as np
import matplotlib.pyplot as plt
data = np.load("uv-coverage.npy")
print(data.shape) | StarcoderdataPython |
11374360 | <reponame>cu-swe4s-fall-2019/final-project-swe4s_mc_params_optimization
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 19 21:44:21 2019
@author: owenmadin
"""
# Create functions that return properties for a given model, eps, sig
def rhol_hat_models(compound_2CLJ, Temp, eps, sig, L, Q):
'''
L_nm=L/10
sig_nm=sig/10
Q_nm=Q/10
'''
rhol_hat = compound_2CLJ.rhol_hat_2CLJQ(Temp, eps, sig, L, Q)
return rhol_hat # [kg/m3]
def Psat_hat_models(compound_2CLJ, Temp, eps, sig, L, Q):
Psat_hat = compound_2CLJ.Psat_hat_2CLJQ(Temp, eps, sig, L, Q)
return Psat_hat # [kPa]
def SurfTens_hat_models(compound_2CLJ, Temp, eps, sig, L, Q):
'''
L_nm=L/10
sig_nm=sig/10
Q_nm=Q/10
'''
SurfTens_hat = compound_2CLJ.ST_hat_2CLJQ(Temp, eps, sig, L, Q)
return SurfTens_hat
def T_c_hat_models(compound_2CLJ, eps, sig, L, Q):
'''
L_nm=L/10
sig_nm=sig/10
Q_nm=Q/10
'''
T_c_hat = compound_2CLJ.T_c_hat_2CLJQ(eps, sig, L, Q)
return T_c_hat
| StarcoderdataPython |
217091 | <gh_stars>1-10
num = []
soma = 0
for i in range(11):
num.append(int(input()))
n = len(num)
for i in num:
soma = soma + i
media = soma / n
print(media)
| StarcoderdataPython |
11388173 | <reponame>rizwanniazigroupdocs/aspose-slides-cloud-python
from slides_configuration import *
request=PutSlidesSlideSizeRequest("test.pptx", width="100", height="100", size_type="OnScreen", scale_type="DoNotScale")
response = slides_api.put_slides_slide_size(request)
print(response) | StarcoderdataPython |
3391971 | <reponame>merkrafter/CopasiTool<gh_stars>0
import logging
def setup_logger(args):
"""
Takes an argparse namespace and returns a logger with the setting specified.
"""
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
formatter = logging.Formatter('[%(levelname)s] %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
if args.verbose >= 2:
logger.setLevel(logging.DEBUG)
elif args.verbose == 1:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.WARNING)
return logger
| StarcoderdataPython |
12849938 | from app import create_app
from flask_script import Manager,Server #initialise our extensions and server class that aid in launching of our server
# Creating app instance
app = create_app('development')
manager = Manager(app)
manager.add_command('server',Server) #launch app server
@manager.command
def test():
"""Run the ubit tests"""
import unittest
tests = unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if __name__ == '__main__': #checks if the script is run directly
manager.run()
| StarcoderdataPython |
321843 | """All constants related to the ZHA component."""
import enum
import logging
from typing import List
import bellows.zigbee.application
from zigpy.config import CONF_DEVICE_PATH # noqa: F401 # pylint: disable=unused-import
import zigpy_cc.zigbee.application
import zigpy_deconz.zigbee.application
import zigpy_xbee.zigbee.application
import zigpy_zigate.zigbee.application
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR
from homeassistant.components.climate import DOMAIN as CLIMATE
from homeassistant.components.cover import DOMAIN as COVER
from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER
from homeassistant.components.fan import DOMAIN as FAN
from homeassistant.components.light import DOMAIN as LIGHT
from homeassistant.components.lock import DOMAIN as LOCK
from homeassistant.components.sensor import DOMAIN as SENSOR
from homeassistant.components.switch import DOMAIN as SWITCH
from .typing import CALLABLE_T
ATTR_ARGS = "args"
ATTR_ATTRIBUTE = "attribute"
ATTR_ATTRIBUTE_ID = "attribute_id"
ATTR_ATTRIBUTE_NAME = "attribute_name"
ATTR_AVAILABLE = "available"
ATTR_CLUSTER_ID = "cluster_id"
ATTR_CLUSTER_TYPE = "cluster_type"
ATTR_COMMAND = "command"
ATTR_COMMAND_TYPE = "command_type"
ATTR_DEVICE_IEEE = "device_ieee"
ATTR_DEVICE_TYPE = "device_type"
ATTR_ENDPOINTS = "endpoints"
ATTR_ENDPOINT_ID = "endpoint_id"
ATTR_IEEE = "ieee"
ATTR_IN_CLUSTERS = "in_clusters"
ATTR_LAST_SEEN = "last_seen"
ATTR_LEVEL = "level"
ATTR_LQI = "lqi"
ATTR_MANUFACTURER = "manufacturer"
ATTR_MANUFACTURER_CODE = "manufacturer_code"
ATTR_MEMBERS = "members"
ATTR_MODEL = "model"
ATTR_NAME = "name"
ATTR_NODE_DESCRIPTOR = "node_descriptor"
ATTR_NWK = "nwk"
ATTR_OUT_CLUSTERS = "out_clusters"
ATTR_POWER_SOURCE = "power_source"
ATTR_PROFILE_ID = "profile_id"
ATTR_QUIRK_APPLIED = "quirk_applied"
ATTR_QUIRK_CLASS = "quirk_class"
ATTR_RSSI = "rssi"
ATTR_SIGNATURE = "signature"
ATTR_TYPE = "type"
ATTR_UNIQUE_ID = "unique_id"
ATTR_VALUE = "value"
ATTR_WARNING_DEVICE_DURATION = "duration"
ATTR_WARNING_DEVICE_MODE = "mode"
ATTR_WARNING_DEVICE_STROBE = "strobe"
ATTR_WARNING_DEVICE_STROBE_DUTY_CYCLE = "duty_cycle"
ATTR_WARNING_DEVICE_STROBE_INTENSITY = "intensity"
BAUD_RATES = [2400, 4800, 9600, 14400, 19200, 38400, 57600, 115200, 128000, 256000]
BINDINGS = "bindings"
CHANNEL_ACCELEROMETER = "accelerometer"
CHANNEL_ANALOG_INPUT = "analog_input"
CHANNEL_ATTRIBUTE = "attribute"
CHANNEL_BASIC = "basic"
CHANNEL_COLOR = "light_color"
CHANNEL_COVER = "window_covering"
CHANNEL_DOORLOCK = "door_lock"
CHANNEL_ELECTRICAL_MEASUREMENT = "electrical_measurement"
CHANNEL_EVENT_RELAY = "event_relay"
CHANNEL_FAN = "fan"
CHANNEL_HUMIDITY = "humidity"
CHANNEL_IAS_WD = "ias_wd"
CHANNEL_IDENTIFY = "identify"
CHANNEL_ILLUMINANCE = "illuminance"
CHANNEL_LEVEL = ATTR_LEVEL
CHANNEL_MULTISTATE_INPUT = "multistate_input"
CHANNEL_OCCUPANCY = "occupancy"
CHANNEL_ON_OFF = "on_off"
CHANNEL_POWER_CONFIGURATION = "power"
CHANNEL_PRESSURE = "pressure"
CHANNEL_SHADE = "shade"
CHANNEL_SMARTENERGY_METERING = "smartenergy_metering"
CHANNEL_TEMPERATURE = "temperature"
CHANNEL_THERMOSTAT = "thermostat"
CHANNEL_ZDO = "zdo"
CHANNEL_ZONE = ZONE = "ias_zone"
CLUSTER_COMMAND_SERVER = "server"
CLUSTER_COMMANDS_CLIENT = "client_commands"
CLUSTER_COMMANDS_SERVER = "server_commands"
CLUSTER_TYPE_IN = "in"
CLUSTER_TYPE_OUT = "out"
COMPONENTS = (
BINARY_SENSOR,
CLIMATE,
COVER,
DEVICE_TRACKER,
FAN,
LIGHT,
LOCK,
SENSOR,
SWITCH,
)
CONF_BAUDRATE = "baudrate"
CONF_DATABASE = "database_path"
CONF_DEVICE_CONFIG = "device_config"
CONF_ENABLE_QUIRKS = "enable_quirks"
CONF_FLOWCONTROL = "flow_control"
CONF_RADIO_TYPE = "radio_type"
CONF_USB_PATH = "usb_path"
CONF_ZIGPY = "zigpy_config"
DATA_DEVICE_CONFIG = "zha_device_config"
DATA_ZHA = "zha"
DATA_ZHA_CONFIG = "config"
DATA_ZHA_BRIDGE_ID = "zha_bridge_id"
DATA_ZHA_CORE_EVENTS = "zha_core_events"
DATA_ZHA_DISPATCHERS = "zha_dispatchers"
DATA_ZHA_GATEWAY = "zha_gateway"
DATA_ZHA_PLATFORM_LOADED = "platform_loaded"
DEBUG_COMP_BELLOWS = "bellows"
DEBUG_COMP_ZHA = "homeassistant.components.zha"
DEBUG_COMP_ZIGPY = "zigpy"
DEBUG_COMP_ZIGPY_CC = "zigpy_cc"
DEBUG_COMP_ZIGPY_DECONZ = "zigpy_deconz"
DEBUG_COMP_ZIGPY_XBEE = "zigpy_xbee"
DEBUG_COMP_ZIGPY_ZIGATE = "zigpy_zigate"
DEBUG_LEVEL_CURRENT = "current"
DEBUG_LEVEL_ORIGINAL = "original"
DEBUG_LEVELS = {
DEBUG_COMP_BELLOWS: logging.DEBUG,
DEBUG_COMP_ZHA: logging.DEBUG,
DEBUG_COMP_ZIGPY: logging.DEBUG,
DEBUG_COMP_ZIGPY_CC: logging.DEBUG,
DEBUG_COMP_ZIGPY_DECONZ: logging.DEBUG,
DEBUG_COMP_ZIGPY_XBEE: logging.DEBUG,
DEBUG_COMP_ZIGPY_ZIGATE: logging.DEBUG,
}
DEBUG_RELAY_LOGGERS = [DEBUG_COMP_ZHA, DEBUG_COMP_ZIGPY]
DEFAULT_RADIO_TYPE = "ezsp"
DEFAULT_BAUDRATE = 57600
DEFAULT_DATABASE_NAME = "zigbee.db"
DISCOVERY_KEY = "zha_discovery_info"
DOMAIN = "zha"
GROUP_ID = "group_id"
GROUP_IDS = "group_ids"
GROUP_NAME = "group_name"
MFG_CLUSTER_ID_START = 0xFC00
POWER_MAINS_POWERED = "Mains"
POWER_BATTERY_OR_UNKNOWN = "Battery or Unknown"
class RadioType(enum.Enum):
"""Possible options for radio type."""
ezsp = (
"ESZP: HUSBZB-1, Elelabs, Telegesis, Silabs EmberZNet protocol",
bellows.zigbee.application.ControllerApplication,
)
deconz = (
"Conbee, Conbee II, RaspBee radios from dresden elektronik",
zigpy_deconz.zigbee.application.ControllerApplication,
)
ti_cc = (
"TI_CC: CC2531, CC2530, CC2652R, CC1352 etc, Texas Instruments ZNP protocol",
zigpy_cc.zigbee.application.ControllerApplication,
)
zigate = "ZiGate Radio", zigpy_zigate.zigbee.application.ControllerApplication
xbee = (
"Digi XBee S2C, XBee 3 radios",
zigpy_xbee.zigbee.application.ControllerApplication,
)
@classmethod
def list(cls) -> List[str]:
"""Return a list of descriptions."""
return [e.description for e in RadioType]
@classmethod
def get_by_description(cls, description: str) -> str:
"""Get radio by description."""
for radio in cls:
if radio.description == description:
return radio.name
raise ValueError
def __init__(self, description: str, controller_cls: CALLABLE_T):
"""Init instance."""
self._desc = description
self._ctrl_cls = controller_cls
@property
def controller(self) -> CALLABLE_T:
"""Return controller class."""
return self._ctrl_cls
@property
def description(self) -> str:
"""Return radio type description."""
return self._desc
REPORT_CONFIG_MAX_INT = 900
REPORT_CONFIG_MAX_INT_BATTERY_SAVE = 10800
REPORT_CONFIG_MIN_INT = 30
REPORT_CONFIG_MIN_INT_ASAP = 1
REPORT_CONFIG_MIN_INT_IMMEDIATE = 0
REPORT_CONFIG_MIN_INT_OP = 5
REPORT_CONFIG_MIN_INT_BATTERY_SAVE = 3600
REPORT_CONFIG_RPT_CHANGE = 1
REPORT_CONFIG_DEFAULT = (
REPORT_CONFIG_MIN_INT,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_ASAP = (
REPORT_CONFIG_MIN_INT_ASAP,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_BATTERY_SAVE = (
REPORT_CONFIG_MIN_INT_BATTERY_SAVE,
REPORT_CONFIG_MAX_INT_BATTERY_SAVE,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_IMMEDIATE = (
REPORT_CONFIG_MIN_INT_IMMEDIATE,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_OP = (
REPORT_CONFIG_MIN_INT_OP,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
SENSOR_ACCELERATION = "acceleration"
SENSOR_BATTERY = "battery"
SENSOR_ELECTRICAL_MEASUREMENT = CHANNEL_ELECTRICAL_MEASUREMENT
SENSOR_GENERIC = "generic"
SENSOR_HUMIDITY = CHANNEL_HUMIDITY
SENSOR_ILLUMINANCE = CHANNEL_ILLUMINANCE
SENSOR_METERING = "metering"
SENSOR_OCCUPANCY = CHANNEL_OCCUPANCY
SENSOR_OPENING = "opening"
SENSOR_PRESSURE = CHANNEL_PRESSURE
SENSOR_TEMPERATURE = CHANNEL_TEMPERATURE
SENSOR_TYPE = "sensor_type"
SIGNAL_ADD_ENTITIES = "zha_add_new_entities"
SIGNAL_ATTR_UPDATED = "attribute_updated"
SIGNAL_AVAILABLE = "available"
SIGNAL_MOVE_LEVEL = "move_level"
SIGNAL_REMOVE = "remove"
SIGNAL_SET_LEVEL = "set_level"
SIGNAL_STATE_ATTR = "update_state_attribute"
SIGNAL_UPDATE_DEVICE = "{}_zha_update_device"
SIGNAL_REMOVE_GROUP = "remove_group"
SIGNAL_GROUP_ENTITY_REMOVED = "group_entity_removed"
SIGNAL_GROUP_MEMBERSHIP_CHANGE = "group_membership_change"
UNKNOWN = "unknown"
UNKNOWN_MANUFACTURER = "unk_manufacturer"
UNKNOWN_MODEL = "unk_model"
WARNING_DEVICE_MODE_STOP = 0
WARNING_DEVICE_MODE_BURGLAR = 1
WARNING_DEVICE_MODE_FIRE = 2
WARNING_DEVICE_MODE_EMERGENCY = 3
WARNING_DEVICE_MODE_POLICE_PANIC = 4
WARNING_DEVICE_MODE_FIRE_PANIC = 5
WARNING_DEVICE_MODE_EMERGENCY_PANIC = 6
WARNING_DEVICE_STROBE_NO = 0
WARNING_DEVICE_STROBE_YES = 1
WARNING_DEVICE_SOUND_LOW = 0
WARNING_DEVICE_SOUND_MEDIUM = 1
WARNING_DEVICE_SOUND_HIGH = 2
WARNING_DEVICE_SOUND_VERY_HIGH = 3
WARNING_DEVICE_STROBE_LOW = 0x00
WARNING_DEVICE_STROBE_MEDIUM = 0x01
WARNING_DEVICE_STROBE_HIGH = 0x02
WARNING_DEVICE_STROBE_VERY_HIGH = 0x03
WARNING_DEVICE_SQUAWK_MODE_ARMED = 0
WARNING_DEVICE_SQUAWK_MODE_DISARMED = 1
ZHA_DISCOVERY_NEW = "zha_discovery_new_{}"
ZHA_GW_MSG = "zha_gateway_message"
ZHA_GW_MSG_DEVICE_FULL_INIT = "device_fully_initialized"
ZHA_GW_MSG_DEVICE_INFO = "device_info"
ZHA_GW_MSG_DEVICE_JOINED = "device_joined"
ZHA_GW_MSG_DEVICE_REMOVED = "device_removed"
ZHA_GW_MSG_GROUP_ADDED = "group_added"
ZHA_GW_MSG_GROUP_INFO = "group_info"
ZHA_GW_MSG_GROUP_MEMBER_ADDED = "group_member_added"
ZHA_GW_MSG_GROUP_MEMBER_REMOVED = "group_member_removed"
ZHA_GW_MSG_GROUP_REMOVED = "group_removed"
ZHA_GW_MSG_LOG_ENTRY = "log_entry"
ZHA_GW_MSG_LOG_OUTPUT = "log_output"
ZHA_GW_MSG_RAW_INIT = "raw_device_initialized"
EFFECT_BLINK = 0x00
EFFECT_BREATHE = 0x01
EFFECT_OKAY = 0x02
EFFECT_DEFAULT_VARIANT = 0x00
| StarcoderdataPython |
4983838 | <filename>hackerrank/algorithms/implementation/easy/lisas_workbook/py/solution.py
def solution(chapters, k):
count = 0
page = 0
for probCount in chapters:
for problem in range(1, 1 + probCount):
if (problem - 1) % k == 0:
page += 1
if problem == page:
count += 1
return count
n, k = map(int, input().split())
t = map(int, input().split())
count = solution(t, k)
print(count)
| StarcoderdataPython |
121397 | import unittest
import pickle
import sys
import tempfile
from pathlib import Path
class TestUnpickleDeletedModule(unittest.TestCase):
def test_loading_pickle_with_no_module(self):
"""Create a module that uses Numba, import a function from it.
Then delete the module and pickle the function. The function
should load from the pickle without a problem.
Note - This is a simplified version of how Numba might be used
on a distributed system using e.g. dask distributed. With the
pickle being sent to the worker but not the original module.
"""
# Source code for temporary module we will make
source = "\n".join(
[
"from numba import vectorize",
"@vectorize(['float64(float64)'])",
"def inc1(x):",
" return x + 1",
]
)
# Create a temporary directory and add it to path.
modname = "tmp_module"
with tempfile.TemporaryDirectory() as tmp_dir:
sys.path.append(tmp_dir)
# Create tmp_module.py in there with our source code above.
filename = Path(f"{tmp_dir}/{modname}.py")
f = open(filename, "a")
f.write(source)
f.close()
# Import the temporary module before file is deleted
from tmp_module import inc1
# Remove from imported libraries
del sys.modules[modname]
# Pickle function and assert that it loads correctly
pkl = pickle.dumps(inc1)
f = pickle.loads(pkl)
self.assertEqual(f(2), 3)
| StarcoderdataPython |
356908 | """Utility functions for NumPy-based Reinforcement learning algorithms."""
import numpy as np
from metarl.misc import tensor_utils
def paths_to_tensors(paths, max_path_length, baseline_predictions, discount):
"""Return processed sample data based on the collected paths.
Args:
paths (list[dict]): A list of collected paths.
max_path_length (int): Maximum length of a single rollout.
baseline_predictions(numpy.ndarray): : Predicted value of GAE
(Generalized Advantage Estimation) Baseline.
discount (float): Environment reward discount.
Returns:
dict: Processed sample data, with key
* observations (numpy.ndarray): Padded array of the observations of
the environment
* actions (numpy.ndarray): Padded array of the actions fed to the
the environment
* rewards (numpy.ndarray): Padded array of the acquired rewards
* agent_infos (dict): a dictionary of {stacked tensors or
dictionary of stacked tensors}
* env_infos (dict): a dictionary of {stacked tensors or
dictionary of stacked tensors}
* rewards (numpy.ndarray): Padded array of the validity information
"""
baselines = []
returns = []
for idx, path in enumerate(paths):
# baselines
path['baselines'] = baseline_predictions[idx]
baselines.append(path['baselines'])
# returns
path['returns'] = tensor_utils.discount_cumsum(path['rewards'],
discount)
returns.append(path['returns'])
obs = [path['observations'] for path in paths]
obs = tensor_utils.pad_tensor_n(obs, max_path_length)
actions = [path['actions'] for path in paths]
actions = tensor_utils.pad_tensor_n(actions, max_path_length)
rewards = [path['rewards'] for path in paths]
rewards = tensor_utils.pad_tensor_n(rewards, max_path_length)
agent_infos = [path['agent_infos'] for path in paths]
agent_infos = tensor_utils.stack_tensor_dict_list([
tensor_utils.pad_tensor_dict(p, max_path_length) for p in agent_infos
])
env_infos = [path['env_infos'] for path in paths]
env_infos = tensor_utils.stack_tensor_dict_list(
[tensor_utils.pad_tensor_dict(p, max_path_length) for p in env_infos])
valids = [np.ones_like(path['returns']) for path in paths]
valids = tensor_utils.pad_tensor_n(valids, max_path_length)
samples_data = dict(observations=obs,
actions=actions,
rewards=rewards,
agent_infos=agent_infos,
env_infos=env_infos,
valids=valids)
return samples_data
| StarcoderdataPython |
61518 | #Automation
#Specifically used for small subsets with int64 as their astype
import pandas as pd
my_df = pd.read_csv("subset-1-sous-ensemble-1.csv", encoding = "latin-1")
my_df = my_df.loc[my_df['QUESTION'] == 'Q01']
my_df = my_df.loc[my_df['SURVEYR'] == 2020]
my_df = my_df.iloc[0:,[20, 22]]
print (my_df)
count = my_df['MOST_POSITIVE_OR_LEAST_NEGATIVE'].count()
print ('Count: ' + str(count))
my_df = my_df.astype({"MOST_POSITIVE_OR_LEAST_NEGATIVE": "int64", "MOST_NEGATIVE_OR_LEAST_POSITIVE": "int64"}, copy = False)
my_df.sum()["MOST_POSITIVE_OR_LEAST_NEGATIVE"]
my_df.sum()["MOST_NEGATIVE_OR_LEAST_POSITIVE"]
average_mpln = my_df.sum()["MOST_POSITIVE_OR_LEAST_NEGATIVE"] / count
average_mnlp = my_df.sum()["MOST_NEGATIVE_OR_LEAST_POSITIVE"] / count
print ('Average for MOST_POSITIVE_OR_LEAST_NEGATIVE: ' + str(average_mpln))
print ('Average for MOST_NEGATIVE_OR_LEAST_POSITIVE: ' + str(average_mnlp))
| StarcoderdataPython |
11221909 | import bleach
import markdown as md
from bleach.linkifier import LinkifyFilter
from django import template
allowed_tags = ['a', 'abbr', 'acronym', 'b', 'blockquote', 'code', 'em', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'li', 'ol', 'p', 'pre', 'strong', 'ul']
register = template.Library()
cleaner = bleach.Cleaner(tags=allowed_tags, filters=[LinkifyFilter])
@register.filter(is_safe=True)
def markdown(value):
if not value:
return ""
return cleaner.clean(md.markdown(value))
@register.tag()
def markdownify(parser, token):
nodelist = parser.parse(('endmarkdownify', ))
parser.delete_first_token()
return Markdownify(nodelist)
class Markdownify(template.Node):
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
output = self.nodelist.render(context)
return cleaner.clean(markdown(output))
| StarcoderdataPython |
6487370 | <reponame>krislindgren/padre
import fractions
class ManualProgressBar(object):
"""A progress bar you update yourself."""
def reset(self):
pass
def update(self, done_text):
pass
class AutoProgressBar(object):
"""A progress that updates itself (ie. wrapping some iterator)."""
def __init__(self, max_am, update_period=1):
self.max_am = max_am
self.update_period = update_period
self._last_am = -1
def _trigger_change(self, percent_done):
pass
def reset(self):
self._last_am = -1
def update(self, curr_am):
curr_am = max(0, min(curr_am, self.max_am))
should_trigger = False
if self._last_am == -1:
should_trigger = True
else:
if self._last_am >= 0:
curr_diff = max(0, curr_am - self._last_am)
if (curr_diff >= self.update_period or
(curr_am == self.max_am and curr_diff != 0)):
should_trigger = True
if should_trigger:
percent_done = fractions.Fraction(curr_am, self.max_am)
percent_done = percent_done * 100
self._trigger_change(percent_done)
self._last_am = curr_am
def wrap_iter(self, it):
self.reset()
self.update(0)
for j, item in enumerate(it):
yield item
self.update(j + 1)
| StarcoderdataPython |
5060935 | from .attribute_builder import AttributeBuilder
class SourceLanguage(AttributeBuilder):
"""
Represents 'srclang' attribute.
"""
def __init__(self):
super().__init__()
self.attributes = ["srclang"]
| StarcoderdataPython |
11339179 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Benchmark for the quality of the joint space"""
from argparse import ArgumentParser
import logging
from io import open
from collections import defaultdict
from copy import deepcopy
import cPickle as pickle
from sklearn.linear_model import LinearRegression
from sklearn.neighbors import NearestNeighbors
import numpy
from numpy import asarray
from LocalLinearRegression import LocalLinearRegression
__author__ = "<NAME>"
__email__ = "<EMAIL>"
LOGFORMAT = "%(asctime).19s %(levelname)s %(filename)s: %(lineno)s %(message)s"
reg_model = None
K_NN = 1000
class Mapping(object):
""" Mapping between terms/phrases."""
def __init__(self, source=None, target=None):
self.s_lang = source
self.t_lang = target
self.map = None
class IdentityTranslations(Mapping):
def __init__(self, source, target, se, te):
super(IdentityTranslations, self).__init__(source, target)
words = set(se.word_id.keys()) & set(te.word_id.keys())
D = {}
for word in words:
D[word] = word
self.map = D
class Embeddings(object):
""" A list of words and their vector representatoins.
We assume that the given words are sorted by their frequency.
"""
def __init__(self, lang, filename=None, vectors=None, words=None):
self.lang = lang
if filename:
self.filename = filename
self.read_file()
if vectors is not None:
self.vectors = asarray(vectors)
if words:
if len(set(words)) == len(words):
self.word_id = {w: i for i, w in enumerate(words)}
else:
logging.debug("We have duplicate words.")
self.word_id = {u'{}_{}'.format(w, i): i for i, w in enumerate(words)}
self.id_word = {i: w for w, i in self.word_id.iteritems()}
self.words = [w for w, i in Embeddings.sorted_words(self.word_id)]
def read_file(self):
raise NotImplementedError("Implement an embeddings reader.")
def get_vectors(self, words=None):
if words:
return asarray([self.vectors[self.word_id[w]] for w in words])
return self.vectors
def __most_frequent(self, n, start=0):
return [x for x, y in sorted(self.word_id.iteritems(), key=lambda(x, y): y)[start:n]]
def most_frequent(self, n, start=0):
return Embeddings(lang=self.lang, words=self.words[start:n],
vectors=self.vectors[start:n])
def least_frequent_n(self, n):
return [x for x, y in sorted(self.word_id.iteritems(),
key=lambda(x, y): y, reverse=True)[:n]]
def words_translations(self, other, mapping, segment):
start, end = segment
s_words = self.__most_frequent(n=end, start=start)
map_ = mapping.map
t_words = [map_[w] for w in s_words]
exact = [(w1, w2) for (w1, w2) in zip(s_words, t_words) if w1.lower() == w2.lower()]
logging.info("{} exact words translations in between {}-{} for "
"{}-{} languages.".format(len(exact), start, end, mapping.s_lang, mapping.t_lang))
s_new_vectors = self.vectors[start:end]
t_new_vectors = asarray([other.vectors[other.word_id[w]] for w in t_words])
source = Embeddings(vectors=s_new_vectors, words=s_words, lang=self.lang)
target = Embeddings(vectors=t_new_vectors, words=t_words, lang=other.lang)
return (source, target)
@staticmethod
def sorted_words(word_id):
return sorted(word_id.iteritems(), key=lambda(x, y): y)
def get_common(self, other, mapping):
""" Limit the two embeddings to the terms that are covered by the mapping."""
self_oov = defaultdict(lambda: 0)
other_oov = defaultdict(lambda: 0)
self_word_id = deepcopy(self.word_id)
other_word_id = deepcopy(other.word_id)
new_words = []
map_ = mapping.map
for i, w in enumerate(self.word_id):
if w not in map_:
self_oov[w] += 1
del self_word_id[w]
continue
if map_[w] not in other.word_id:
other_oov[map_[w]] += 1
del self_word_id[w]
for i, w in enumerate(other.word_id):
if w not in map_:
del other_word_id[w]
logging.info("We could not find {} {} words in our dictionary.".format(
len(self_oov), self.lang))
logging.info("We could not find {} {} words in our target words.".format(
len(other_oov), other.lang))
logging.info("Our {} vocabulary has {} valid words.".format(
self.lang, len(self_word_id)))
sorted_self_word_id = Embeddings.sorted_words(self_word_id)
self_vectors = asarray([self.vectors[i] for w, i in sorted_self_word_id])
self_words = [w for w, i in sorted_self_word_id]
new_self = Embeddings(lang=self.lang, vectors=self_vectors, words=self_words)
sorted_other_word_id = Embeddings.sorted_words(other_word_id)
other_vectors = asarray([other.vectors[i] for w, i in sorted_other_word_id])
other_words = [w for w, i in sorted_other_word_id]
new_other = Embeddings(lang=self.lang, vectors=other_vectors, words=other_words)
return (new_self, new_other)
def split(self, mapping, ignore_exact=True):
""" Generates two embeddings that cover the mapping terms.
If we have a1: b1, a2: b2 mappings in an embeddings space where {a1, b1,
a2, b2} exists, we would like to generates two embeddings spaces one for
{a1, a2} and another for {b1, b2}.
Sometimes it is not desirable to include exact terms a3:a3 in the new
embeddings. Hence, you need to ignore the exact terms.
"""
source_oov = defaultdict(lambda: 0)
target_oov = defaultdict(lambda: 0)
w_exact = defaultdict(lambda: 0)
source_words = []
target_words = []
map_ = mapping.map
for w, id_ in self.word_id.iteritems():
if w not in map_:
source_oov[w] += 1
continue
if map_[w] not in self.word_id:
target_oov[map_[w]] += 1
continue
if w.lower() == map_[w].lower():
w_exact[w] += 1
if ignore_exact:
continue
source_words.append(w)
target_words.append(map_[w])
logging.debug("We could not find {} source words in our dictionary.".format(
len(source_oov)))
logging.debug("We could not find {} target words in our target words.".format(
len(target_oov)))
logging.debug("{} words are exact between languages".format(len(w_exact)))
logging.debug("We found {} pairs of words valid for testing.".format(len(source_words)))
new_s_vectors = asarray([self.vectors[self.word_id[w]] for w in source_words])
source = Embeddings(vectors=new_s_vectors, words=source_words,
lang=mapping.s_lang)
new_t_vectors = asarray([self.vectors[self.word_id[w]] for w in target_words])
target = Embeddings(vectors=new_t_vectors, words=target_words,
lang=mapping.t_lang)
new_mapping = Mapping(source=mapping.s_lang, target=mapping.t_lang)
new_mapping.map = dict(zip(source.words, target.words))
return (source, target, new_mapping)
def common(self, other):
""" Find common terms between languages.
The post condition is that both embeddings vocabulary are in the same
order.
"""
common_words = []
for word in self.word_id:
if word in other.word_id:
common_words.append(word)
new_self_vectors = []
new_other_vectors = []
for word in common_words:
new_self_vectors.append(self.vectors[self.word_id[word]])
new_other_vectors.append(other.vectors[other.word_id[word]])
new_self = Embeddings(vectors=asarray(new_self_vectors), words=common_words,
lang=self.lang)
new_other = Embeddings(vectors=asarray(new_other_vectors), words=common_words,
lang=self.lang)
return (new_self, new_other)
class Word2VecEmbeddings(Embeddings):
""" Word2Vec embeddings reader."""
def read_file(self, limit=-1):
words = []
embeddings = []
with open(self.filename, 'rb') as f:
words_number, size = [int(x) for x in f.readline().strip().split()][:2]
for i, line in enumerate(f):
try:
ws = line.decode('utf-8').strip().split()
words.append(' '.join(ws[:-size]))
embeddings.append([float(x) for x in ws[-size:]])
if i == limit:
break
except Exception, e:
print "Exception", i
print "Exception", line
self.word_id = {w: i for i, w in enumerate(words)}
self.vectors = asarray(embeddings)
assert len(self.word_id) == self.vectors.shape[0]
class Evaluator(object):
""" Evaluator of the alignment between two languages."""
def __init__(self, source_embeddings, target_embeddings, metric='l2', k=5):
self.metric = metric
self.source_embeddings = source_embeddings
self.target_embeddings = target_embeddings
self.k = k
self.row_normalize = True
self.col_normalize = False
@staticmethod
def cosine_knn(vectors, point, k):
distances = numpy.dot(vectors, point)
indices = list(reversed(distances.argsort()))[:k]
return distances[indices], [indices]
def norm(self, vectors):
out = vectors
if self.row_normalize:
norms = (vectors ** 2).sum(axis=1) ** 0.5
out = (vectors.T / norms).T
if self.col_normalize:
norms = (vectors ** 2).sum(axis=0) ** 0.5
norms[norms == 0] = 1
out = vectors / norms
out = numpy.nan_to_num(out)
return out
def precision_at_k(self, test_pairs):
if self.metric == 'cosine':
return self.precision_at_k_cosine(test_pairs)
return self.precision_at_k_l2(test_pairs)
def precision_at_k_l2(self, test_pairs):
t_knn = NearestNeighbors(n_neighbors=self.k, algorithm='ball_tree', p=2)
t_knn.fit(self.target_embeddings.vectors)
right = 0
index = 0
for s, t in test_pairs:
assert(s == t)
point = self.source_embeddings.vectors[self.source_embeddings.word_id[s]]
distances, indices = t_knn.kneighbors(point)
t_words = [self.target_embeddings.id_word[i] for i in indices[0]]
t = t.rsplit('_', 1)[0]
t_words = [x.rsplit('_', 1)[0] for x in t_words]
line = u"{: <20}{:<20}{:<50}".format(s, t, u' '.join(t_words))
logging.debug(line.encode('utf-8'))
if t in t_words:
right += 1
index = index + 1
return right / float(len(test_pairs))
def precision_at_k_cosine(self, test_pairs):
s_vectors = self.norm(self.source_embeddings.vectors)
t_vectors = self.norm(self.target_embeddings.vectors)
right = 0
for s, t in test_pairs:
point = self.source_embeddings.vectors[self.source_embeddings.word_id[s]]
distances, indices = Evaluator.cosine_knn(t_vectors, point, self.k)
t_words = [self.target_embeddings.id_word[i] for i in indices[0]]
t = t.rsplit('_', 1)[0]
t_words = [x.rsplit('_', 1)[0] for x in t_words]
line = u"{: <20}{:<20}{:<50}".format(s, t, u' '.join(t_words))
logging.debug(line.encode('utf-8'))
if t in t_words:
right += 1
return right / float(len(test_pairs))
def evaluate(self, mapping, operation, training_segment, test_segment):
(s_train, t_train) = self.source_embeddings.words_translations(self.target_embeddings, mapping, training_segment)
(s_test, t_test) = self.source_embeddings.words_translations(self.target_embeddings, mapping, test_segment)
s_train.vectors = self.norm(s_train.vectors)
t_train.vectors = self.norm(t_train.vectors)
s_test.vectors = self.norm(s_test.vectors)
t_test.vectors = self.norm(t_test.vectors)
if set(s_train.words).intersection(set(s_test.words)):
print (u"Train and test words are overlapping")
s_new, t_new = operation((s_train, t_train), (s_test, t_test))
return None
def linear_regression(train_embeddings, test_embeddings):
global reg_model
s_embeddings, t_embeddings = train_embeddings
s_test, t_test = test_embeddings
reg = LinearRegression()
reg.fit(s_embeddings.vectors, t_embeddings.vectors)
pickle.dump(reg, open(reg_model, 'wb'))
s = Embeddings(vectors=reg.predict(s_test.vectors),
words=s_test.words, lang=s_embeddings.lang)
return s, t_test
def local_linear_regression(train_embeddings, test_embeddings):
global reg_model
print "Using local linear regression with k = ", K_NN
s_embeddings, t_embeddings = train_embeddings
s_test, t_test = test_embeddings
reg = LocalLinearRegression(k_nn=K_NN)
reg.fit(s_embeddings.vectors, t_embeddings.vectors)
pickle.dump(reg, open(reg_model, 'wb'))
return None, None
def identity(train_vectors, all_vectors):
return all_vectors
def evaluate_word2vec(sl, tl, source_file, target_file, method):
print "Proceeding to load embeddings"
s_ = Word2VecEmbeddings(lang=sl, filename=source_file)
t_ = Word2VecEmbeddings(lang=tl, filename=target_file)
print "Loaded word embeddings"
mapping = IdentityTranslations(source=sl, target=tl, se=s_, te=t_)
print "Mapping done"
s, t = s_.get_common(t_, mapping)
print "Common vocab done"
evaluator = Evaluator(source_embeddings=s, target_embeddings=t, metric='l2')
print "Evaluator constructed"
assert(s.vectors.shape == t.vectors.shape)
print "Evaluating"
if method == 'linear':
p1 = evaluator.evaluate(mapping, linear_regression, (0, s.vectors.shape[0]), (0, s.vectors.shape[0]))
elif method == 'locallinear':
p1 = evaluator.evaluate(mapping, local_linear_regression, (0, s.vectors.shape[0]), (0, s.vectors.shape[0]))
def main(args):
global reg_model
global K_NN
reg_model = args.filename
if args.method == 'linear':
evaluate_word2vec('old', 'new', args.old_model, args.new_model, 'linear')
elif args.method == 'locallinear':
K_NN = int(args.knn_val)
evaluate_word2vec('old', 'new', args.old_model, args.new_model, 'locallinear')
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-f", "--file", dest="filename", help="Input file")
parser.add_argument("-o", "--old_model", dest="old_model", help="old model")
parser.add_argument("-n", "--new_model", dest="new_model", help="new model")
parser.add_argument("-k", "--knn", dest="knn_val", default=1000, type=int, help="K in KNN for local linear regression")
parser.add_argument("-m", "--method", dest="method", help="method")
parser.add_argument("-l", "--log", dest="log", help="log verbosity level",
default="INFO")
args = parser.parse_args()
# if args.log == 'DEBUG':
# sys.excepthook = debug
numeric_level = getattr(logging, args.log.upper(), None)
logging.basicConfig(level=numeric_level, format=LOGFORMAT)
main(args)
| StarcoderdataPython |
1769126 | # Copyright 2010-2018, Sikuli.org, sikulix.<EMAIL>
# Released under the MIT License.
from Sikuli import *
| StarcoderdataPython |
3374467 | <filename>jobs/zip+tar/zipped_python_job/entry.py
import os
import sys
import datetime
from utils.log import Logging
JOB_RUN_OCID_KEY = "JOB_RUN_OCID"
LOG_OBJECT_OCID_KEY = "LOG_OBJECT_OCID"
if __name__ == "__main__":
try:
job = Logging()
job.log(
[
"Start logging for job run: {}".format(
os.environ.get(JOB_RUN_OCID_KEY, "LOCAL")
),
"Current timestamp in UTC: {}".format(str(datetime.datetime.utcnow())),
]
)
job.log(["Job Done."])
except Exception as e:
print(e)
raise e
| StarcoderdataPython |
3460124 | <reponame>yswtrue/pycrunch-trace
from random import Random
from . import AbstractFileFilter
class DefaultFileFilter(AbstractFileFilter):
def should_trace(self, filename: str) -> bool:
# start or end with
exclusions = (
'/Users/gleb/code/pycrunch_tracing/',
'/Users/gleb/code/bc/briteapps-admin/',
'module_a.py',
'module_b.py',
'module_c.py',
'invalid_picker_with_exception.py',
'/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6'
'/Users/gleb/venv/PyCrunch/lib/python3.6/site-packages/',
# 'copyreg.py',
'/Users/gleb/venv/PyCrunch/lib/python3.6/site-packages/py/',
'api/tracing.py'
)
# r= Random()
# if r.choice(range(0, 10)) == 1:
# return True
# return True
if filename.endswith(exclusions) or filename.startswith(exclusions):
return False
return True
if filename.endswith(end_patterns):
return True
if filename.startswith(start_patterns):
return True
print('should_trace - false', filename)
return False | StarcoderdataPython |
3448428 | <gh_stars>0
import argparse
def eval_args():
parser = argparse.ArgumentParser(
description=help_message
) | StarcoderdataPython |
6620906 | #coding=utf-8
from __future__ import print_function
import traceback
import sys
from eccodes import *
#判断字符串Str是否包含序列SubStrList中的每一个子字符串
def IsSubString(SubStrList,Str):
flag=True
for substr in SubStrList:
if not(substr in Str):
flag=False
return flag
#获取当前目录所有指定类型的文件
def GetFileList(FindPath,FlagStr=[]):
import os
FileList=[]
#print FileList
FileNames=os.listdir(FindPath)
#print FileNames
for fn in FileNames:
if (len(FlagStr)>0):
#返回指定类型的文件名
if (IsSubString(FlagStr,fn)):
fullfilename=os.path.join(FindPath,fn)
FileList.append(fullfilename)
else:
#默认直接返回所有文件名
fullfilename=os.path.join(FindPath,fn)
FileList.append(fullfilename)
#对文件名排序
if (len(FileList)>0):
FileList.sort()
#print FileList
return FileList
def example(filename):
f = open(filename, 'rb')
keys = [
## 'Ni',
## 'Nj',
## 'latitudeOfFirstGridPointInDegrees',
## 'longitudeOfFirstGridPointInDegrees',
## 'latitudeOfLastGridPointInDegrees',
## 'longitudeOfLastGridPointInDegrees',
'dataDate',
## 'dataTime'
]
dataDate=0
while 1:
gid = codes_grib_new_from_file(f)
if gid is None:
break
for key in keys:
try:
## print(' %s: %s' % (key, codes_get(gid, key)))
dataDate2=codes_get(gid, key)
if dataDate != dataDate2:
print(key,dataDate,dataDate2)
dataDate=dataDate2
## input()
continue
except KeyValueNotFoundError as err:
# Full list of exceptions here:
# https://confluence.ecmwf.int/display/ECC/Python+exception+classes
print(' Key="%s" was not found: %s' % (key, err.msg))
except CodesInternalError as err:
print('Error with key="%s" : %s' % (key, err.msg))
## print('There are %d values, average is %f, min is %f, max is %f' % (
## codes_get_size(gid, 'values'),
## codes_get(gid, 'average'),
## codes_get(gid, 'min'),
## codes_get(gid, 'max')
## ))
codes_release(gid)
f.close()
#改名
newname=os.path.dirname(filename)+"/era5.CHV.levels."+str(dataDate)+".grib"
os.rename(filename,newname)
print(filename,'======>',newname)
import os
from eccodes import *
Path='/mnt/d/Downloads/' #
SubStrList=['.grib']
FileList=GetFileList(Path,SubStrList) #得到指定类型(grib)文件名列表
##
##
for eachfile in FileList: #对每个文件操作
print(eachfile)
example(eachfile)
| StarcoderdataPython |
11305210 | <filename>src/common/file/ext_xlsx.py<gh_stars>0
# ext_xlsx.py
# Simple tool functions for xlsx files
#
from ..base import *
import openpyxl
def inXlsx(file_, data, level=error, debug=False):
wb = file_
for key in data.keys():
print("Read worksheet %s" % key)
ws = wb.get_sheet_by_name(key)
data[key] = ReadOnlySheet(ws)
return
#---------------------------------------------------------------------- Simplified Excel classes
class ReadOnlyCell:
# openpyxl.cell.Cell 간략화
def __init__(self, openpyxl_cell):
self.coordinate = openpyxl_cell.coordinate
self.value = safeCellValue(openpyxl_cell, level=mute)
return
class ReadOnlySheet:
# openpyxl.worksheet.Worksheet 간편화
lastFounds = []
def __init__(self, openpyxl_worksheet):
def convertRows(rows, cols, cells):
sameColList = {}
for row in range(len(rows)):
for col in range(len(rows[row])):
col_idx = rows[row][col].col_idx
# 변환
rows[row][col] = ReadOnlyCell(rows[row][col])
# cols 생성
if sameColList.get(col_idx) is None:
sameColList[col_idx] = [rows[row][col], ]
else:
sameColList[col_idx].append(rows[row][col])
# cells 생성
coord = rows[row][col].coordinate
value = rows[row][col].value
cells[coord] = value
for mergedCol in sameColList.values():
cols.append(mergedCol.copy())
return
self.mergedRows = []
self.mergedCols = []
self.mergedCells = {}
self._regMergedRows(openpyxl_worksheet)
convertRows(self.mergedRows, self.mergedCols, self.mergedCells)
self.commonRows = []
self.commonCols = []
self.commonCells = {}
self._regCommonRows(openpyxl_worksheet)
convertRows(self.commonRows, self.commonCols, self.commonCells)
return
def _regMergedRows(self, openpyxl_worksheet):
"""
병합 셀을 보다 쉽게 다루기 위해 정렬하는 함수
- 모든 병합 셀 시작 점을 잡아, 행 기준으로 정렬\n
- e.g. [[A1, A3], [B4], [C2], [D1, D4]]\n
"""
ws = openpyxl_worksheet
self.mergedRows.clear()
# 워크시트에서 병합 셀 위치 추출
# (e.g. J3, C1, A3, ...)
unorderedList = []
for cellRange in ws.merged_cell_ranges:
# 시작점의 위치 만을 추출
coord = list(openpyxl.utils.range_boundaries(cellRange)[0:2])
# 내부는 col, row 로 되어있기에 리스트 반전
# (e.g. 3J, 1C, 3A, ...)
coord.reverse()
unorderedList.append(coord)
# 병합 셀 위치를 순차적으로 정렬
# (e.g. 1A, 3C, 3J, ...)
unorderedList.sort()
# 같은 행 끼리 묶어줌
previousRow = unorderedList[0][0]
sameRowList = []
orderedList = []
for coord in unorderedList:
# 행 번호가 이전과 다르면(=새로운 행이면),
if coord[0] != previousRow:
orderedList.append(sameRowList.copy())
sameRowList.clear()
previousRow = coord[0]
sameRowList.append(coord.copy())
orderedList.append(sameRowList.copy())
# 위치로부터 셀 객체를 얻음
# (e.g. cell[1C], cell[3A], cell[3J], ...)
sameRowList.clear()
for row in orderedList:
for coord in row:
sameRowList.append(ws[coord[0]][coord[1]-1])
self.mergedRows.append(sameRowList.copy())
sameRowList.clear()
return
def _regCommonRows(self, openpyxl_worksheet):
"""
일반 셀을 보다 쉽게 다루기 위해 정리하는 함수
- 내용이 있는 셀만 포함되며, 병합 셀은 제외된다\n
"""
ws = openpyxl_worksheet
self.commonRows.clear()
# 정렬은 이미 되어있으므로, 값과 병합 셀의 유무 만을 확인
orderedList = []
for row in ws.iter_rows():
for cell in row:
if cell.value is not None:
if cell.coordinate not in self.mergedCells.keys():
orderedList.append(cell)
if len(orderedList) != 0:
self.commonRows.append(orderedList.copy())
orderedList.clear()
return
def findInCommonRows(self, *args):
return self._findIn(self.commonRows, *args)
def findInCommonCols(self, *args):
return self._findIn(self.commonCols, *args)
def findInMergedRows(self, *args):
return self._findIn(self.mergedRows, *args)
def findInMergedCols(self, *args):
return self._findIn(self.mergedCols, *args)
def _findIn(self, cellLists, *args):
"""
정렬된 셀 리스트에서 특정 이름을 검색
- 행/열 기준 검색
- 행/열의 첫 번째 셀(태그)의 값 만을 검색
- 같은 조건의 태그를 지닌 모든 행/열을 리스트로 반환
- 매개변수 *args 를 통해, 1차 검색에 실패한 경우 n차 검색 시도 가능
"""
founds = []
string = str(args[0])
# 여러 속성의 일괄적 처리를 위해 리스트로 가져옴
# [속성 명 #1] [ ... ]
# [속성 명 #2] [ ... ]
# [속성 명 #3] [ ... ]
for cells in cellLists:
if cells[0].value.find(string) != -1:
founds.append(cells)
ReadOnlySheet.lastFounds = founds
if len(founds) == 0:
if len(args) > 1:
return self._findIn(cellLists, *(args[1:]))
return founds
#------------------------------------------------------------------------ Tag finding tool class
class TagFinder:
createdList = []
targetSheet = None
def __init__(self, *keywords):
if TagFinder.targetSheet is not None:
if keywords is not None:
self.isDerived = False
self.keywords = keywords
self.cellList = TagFinder.targetSheet.findInMergedRows(*keywords)
TagFinder.createdList.append(self)
return
def __eq__(self, other):
return self.keywords == other.keywords
def __ne__(self, other):
return self.keywords != other.keywords
def fillBlanks(level=warning):
group = PrintGroup("-- 찾지 못한 행이 있어, 다른 행으로부터 재검색 합니다", file=sys.stderr)
level.joinGroup(group)
for finder in TagFinder.createdList:
if len(finder.cellList) == 0:
empty = finder
# 자기 자신을 검색 대상에서 제외하기 위해 isDerived 사용
empty.isDerived = True
for other in TagFinder.createdList:
empty._fillIn(other)
if len(empty.cellList) != 0:
level.print("'%s' 옆에서 '%s'을 찾았습니다" % (other.keywords, empty.keywords))
break # 하나라도 있으면 break (전부 다시 찾으려면 비활성화)
# end for
# end for
level.leaveGroup()
return
def _fillIn(self, other):
if not other.isDerived and len(other.cellList) > 0 and other != self:
for otherRow in other.cellList:
if len(otherRow) > 2:
# 키워드로 검색
for keyword in self.keywords:
if str(otherRow[2].value).find(keyword) != -1:
v = otherRow.pop()
k = otherRow.pop()
self.cellList.append([k, v])
break
# end if
# end if
return
#---------------------------------------------------------------- Tool functions for excel cells
def safeCellValue(cell, level=warning):
"""
셀 객체로부터 값을 안전하게 얻기 위한 함수
- 안전하지 않은 경우, 공백 문자열을 반환
"""
cellType = type(cell)
if cellType is openpyxl.cell.Cell or cellType is ReadOnlyCell:
noneSpaceString = str(cell.value).strip()
if noneSpaceString not in kIgnoreCharacters:
return noneSpaceString
else:
# level.print("셀 값이 존재하지 않습니다 (%s)" % cell.coordinate)
pass
else:
level.print("셀이 아닌 객체를 호출하고 있습니다 (safeCellValue)")
return ""
def safeCellValueAlter(cells, org, alt, level=warning):
"""
셀 객체로부터 값을 안전하게 얻기 위한 함수
- 원본 데이터가 없는 경우, 대체 데이터를 사용
- 둘 다 없는 경우, 공백 문자열을 반환
"""
valueOrg = safeCellValue(cells[org], level)
valueAlt = safeCellValue(cells[alt], level)
if valueOrg != "":
return valueOrg
elif valueAlt == "":
# level.print("원본 셀과 참조 셀 모두 값이 없습니다 (%s, %s)" % (cells[org].coordinate, cells[alt].coordinate))
pass
return valueAlt
| StarcoderdataPython |
12825354 | """
Komponent töötajate nimede hägusaks eraldamiseks.
Loodud Rasa Open Source komponendi RegexEntityExtractor põhjal.
https://github.com/RasaHQ/rasa/blob/main/rasa/nlu/extractors/regex_entity_extractor.py
"""
import typing
from components.helper_functions import parse_nlu
from components.levenshtein import manual_levenshtein
from typing import Any, Optional, Text, Dict, List
from rasa.nlu.extractors.extractor import EntityExtractor
from rasa.nlu.config import RasaNLUModelConfig
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.nlu.training_data.message import Message
from rasa.shared.nlu.constants import (
ENTITIES,
ENTITY_ATTRIBUTE_VALUE,
TEXT,
ENTITY_ATTRIBUTE_TYPE,
INTENT,
PREDICTED_CONFIDENCE_KEY
)
from fuzzywuzzy import process
if typing.TYPE_CHECKING:
from rasa.nlu.model import Metadata
class EmployeeExtractor(EntityExtractor):
# Vaikeväärtused
defaults = {
# töötaja nime ja teksti vastavuse lävend
"match_threshold": 80,
# Töötajate nimede andmetabeli asukoht
"employee_file_path": "data/employee.yml",
}
def __init__(self, component_config: Optional[Dict[Text, Any]] = None):
super().__init__(component_config)
self.employees = []
self.match_threshold = self.component_config["match_threshold"]
# Töötajate nimede mällu lugemine
with open(self.defaults['employee_file_path'], "r") as f:
for line in f.readlines()[4:]:
self.employees.append(line.replace(" - ", "").replace("\n", ""))
# Kavatsustes esinevate sõnade mällu lugemine
self.intent_words = parse_nlu(["- intent: request_employee_office\n"])
def remove_intent_words(self, text):
text_list = text.split(" ")
for word in text.split(" "):
# best_match = process.extractOne(word, self.intent_words)
best_match = manual_levenshtein(word, self.intent_words)
if best_match[1] < 2:
text_list.remove(word)
return " ".join(text_list)
def train(
self,
training_data: TrainingData,
config: Optional[RasaNLUModelConfig] = None,
**kwargs: Any,
) -> None:
pass
def _extract_entities(self, message: Message) -> List[Dict[Text, Any]]:
entities = []
# Väärtuste ebavajaliku eraldamise vältimine kavatsuse kontrolli abil
if message.get(INTENT)['name'] not in {"request_employee_office"}:
return entities
best_match = process.extractOne(self.remove_intent_words(message.get(TEXT)), self.employees)
if best_match[1] >= self.match_threshold:
entities.append({
ENTITY_ATTRIBUTE_TYPE: "employee",
ENTITY_ATTRIBUTE_VALUE: best_match[0],
PREDICTED_CONFIDENCE_KEY: best_match[1]
})
return entities
def process(self, message: Message, **kwargs: Any) -> None:
extracted_entities = self._extract_entities(message)
extracted_entities = self.add_extractor_name(extracted_entities)
message.set(ENTITIES, message.get(ENTITIES, []) + extracted_entities, add_to_output=True)
def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]]:
pass
@classmethod
def load(
cls,
meta: Dict[Text, Any],
model_dir: Optional[Text] = None,
model_metadata: Optional["Metadata"] = None,
cached_component: Optional["EntityExtractor"] = None,
**kwargs: Any,
) -> "EntityExtractor":
if cached_component:
return cached_component
else:
return cls(meta)
| StarcoderdataPython |
3210196 | <gh_stars>0
from app import app
if __name__ == "__main__":
app.run() # No añadir parámetros, modificar directamente en Config
| StarcoderdataPython |
1970388 | """
LBPH-based Face recognition module
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import cv2
import tqdm
import numpy as np
class FaceRecognizer():
"""
Face recognition class ikmplementing the LBPH algorithm
"""
def __init__(self):
"""
Instantiate a FaceRecognizer object
"""
self.model = cv2.face.LBPHFaceRecognizer_create()
self.logger = logging.getLogger(__name__)
def train(self, images, labels):
"""
Train the recognizer on the training set
:param images: the images to train on
:type images: numpy.ndarray shape: (num_images, image_height, image_width)
:param labels: the labels/subjects the corresponding faces belong to
:type labels: numpy.ndarray shape: (num_images,)
"""
self.model.train(images, labels)
def predict(self, images):
"""
Predicts the labels of the given images
:param images: the images to test on
:type images: numpy.ndarray shape: (num_images, image_height, image_width)
:returns: the predicted labels
:rtype: array
"""
predictions = []
for i in tqdm.trange(0, len(images)):
prediction = self.model.predict(images[i])
predictions.append(prediction)
i += 1
return predictions
def evaluate(self, predictions, ground_truths):
assert(len(predictions) == len(ground_truths))
true_positive = np.count_nonzero(
np.equal(ground_truths, np.array(predictions)[:, 0]))
precision_perc = true_positive/len(predictions)
self.logger.info(
"Precision@1: {0}/{1}={2:.3%}".format(true_positive, len(predictions), precision_perc))
def save(self, name):
self.model.write(name)
def load(self, name):
self.model.read(name)
| StarcoderdataPython |
8147216 | <reponame>astooke/gtimer
"""
All gtimer-specific exception classes.
"""
class GTimerError(Exception):
pass
class StoppedError(GTimerError):
pass
class PausedError(GTimerError):
pass
class UniqueNameError(GTimerError):
pass
class LoopError(GTimerError):
pass
class StartError(GTimerError):
pass
class BackdateError(GTimerError):
pass
| StarcoderdataPython |
5013224 | # -*- coding: utf-8 -*-
import unittest
class TestList(unittest.TestCase):
def test_indexed(self):
arr = [1, 2, 3]
self.assertEqual(3, arr[2])
self.assertEqual(2, arr[-2])
self.assertEqual(1, [1, 4, 5][0])
def test_slicing(self):
arr = [1, 2, 3, 4, 5]
self.assertEqual([1, 2], arr[0:2])
self.assertEqual([3, 4, 5], arr[-3:])
self.assertEqual(arr, arr[:])
def test_concatenate(self):
arr = [1, 2]
self.assertEqual([1, 2, 3, 4], arr + [3, 4])
def test_mutable(self):
arr = [1, 2, 3]
arr[2] = 4
self.assertEqual([1, 2, 4], arr)
arr[1:3] = [3, 6]
self.assertEqual([1, 3, 6], arr)
arr[1:3] = []
self.assertEqual([1], arr)
def test_nested(self):
arr = [1, 2]
arr.append([3, 4])
self.assertEqual([1, 2, [3, 4]], arr)
self.assertEqual(4, arr[2][1])
def test_append(self):
arr = [1, 2]
arr.append(3)
self.assertEqual([1, 2, 3], arr)
arr[len(arr):] = [4]
self.assertEqual([1, 2, 3, 4], arr)
def test_extend(self):
arr = [1, 2]
arr.extend([3, 4])
self.assertEqual([1, 2, 3, 4], arr)
arr[len(arr):] = [5, 6]
self.assertEqual([1, 2, 3, 4, 5, 6], arr)
def test_insert(self):
arr = [1, 2]
arr.insert(0, -1)
self.assertEqual([-1, 1, 2], arr)
arr.insert(1, 0)
self.assertEqual([-1, 0, 1, 2], arr)
arr.insert(len(arr), 3)
self.assertEqual([-1, 0, 1, 2, 3], arr)
def test_remove(self):
arr = [1, 2]
with self.assertRaises(ValueError):
arr.remove(3) # 없는 element 삭제 시도시 ValueError
arr.remove(2)
self.assertEqual([1], arr)
def test_pop(self):
arr = [1, 2, 3, 4]
self.assertEqual(4, arr.pop())
self.assertEqual([1, 2, 3], arr)
self.assertEqual(2, arr.pop(1))
self.assertEqual([1, 3], arr)
def test_index(self):
arr = [1, 2, 3]
with self.assertRaises(ValueError):
arr.index(4) # 못 찾을 경우 ValueError
self.assertEqual(1, arr.index(2))
def test_count(self):
arr = [1, 1, 1, 2]
self.assertEqual(0, arr.count(3))
self.assertEqual(3, arr.count(1))
def test_sort(self):
arr = [2, 1, 3, 4]
arr.sort(lambda x,y: cmp(y, x))
self.assertEqual([4, 3, 2, 1], arr, "cmp는 -1, 0, 1 함수를 사용해야 한다")
arr.sort(lambda x,y: cmp(y, x), lambda x: -x)
self.assertEqual([1, 2, 3, 4], arr, "key func로 비교 전 가공을 할 수 있다")
arr.sort(lambda x,y: cmp(y, x), lambda x: -x, True)
self.assertEqual([4, 3, 2, 1], arr, "reverse로 비교 결과를 뒤집을 수 있다")
def test_reverse(self):
arr = [2, 1, 3]
arr.reverse()
self.assertEqual([3, 1, 2], arr)
def test_del(self):
a = [1, 2, 3, 4]
del a[0]
self.assertEqual([2, 3, 4], a)
del a[1:3]
self.assertEqual([2], a)
del a[:]
self.assertEqual([], a)
class TestFunctionalProgrammingTools(unittest.TestCase):
def test_filter(self):
l = filter(lambda x: x % 2 == 0, range(5))
self.assertEqual([0, 2, 4], l)
def test_map(self):
l = map(lambda x: x ** 2, range(5))
self.assertEqual([0, 1, 4, 9, 16], l)
l = map(lambda x,y: (x or 0) + (y or 0), range(5), range(4))
self.assertEqual([0, 2, 4, 6, 4], l,
"map에서 길이가 다를 경우 None으로 들어온다")
def test_reduce(self):
s = reduce(lambda x,y: x + y, range(5))
self.assertEqual(0 + 1 + 2 + 3 + 4, s)
class TestListComprehensions(unittest.TestCase):
def test_for_create(self):
self.assertEqual([0, 1, 4, 9], [x**2 for x in range(4)])
self.assertEqual([(1, 2), (2, 1)],
[(x, y) for x in [1, 2] for y in [2, 1] if x != y])
self.assertEqual([2, 1, 0, 1, 2], [abs(x) for x in [-2, -1, 0, 1, 2]])
def test_for_nested(self):
matrix = [
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
]
self.assertEqual([[1, 5, 9], [2, 6, 10], [3, 7, 11], [4, 8, 12]],
[[row[i] for row in matrix] for i in range(4)])
class TestTupleAndSequnces(unittest.TestCase):
def test_tuples(self):
t = "hello", 123, True
self.assertEqual(123, t[1])
x, y, z = t
self.assertEqual(True, z, "tuple unpacking")
def test_tuples_immutable(self):
t = "hello", 123
with self.assertRaises(TypeError):
t[1] = 3 # immutable
self.assertEqual(123, t[1])
class TestSet(unittest.TestCase):
def test_set(self):
r = [1, 2, 3, 2, 1, 3, 4, 2, 1]
s = set(r)
self.assertEqual(set([1, 2, 3, 4]), s)
self.assertTrue(1 in s)
self.assertTrue(8 not in s)
def test_set_op(self):
a = set('abcde')
b = set('cdefg')
self.assertEqual(set(['a', 'b']), a - b)
self.assertEqual(set(['a', 'b', 'c', 'd', 'e', 'f', 'g']), a | b)
self.assertEqual(set(['c', 'd', 'e']), a & b)
self.assertEqual(set(['a', 'b', 'f', 'g']), a ^ b)
def test_comprehensions(self):
self.assertEqual(set(['d', 'e']),
{x for x in 'abcdedede' if x not in 'abc'})
class TestDictionary(unittest.TestCase):
def test_dic(self):
d = {'a':1, 'b':2}
self.assertEqual(1, d['a'])
d['c'] = 3
self.assertEqual(3, len(d))
del d['b']
self.assertFalse('b' in d)
def test_comprehensions(self):
self.assertEqual({2: 4, 4: 16, 5: 25},
{x: x**2 for x in [2, 4, 5]})
class TestLoopingTechnique(unittest.TestCase):
def test_enumerate(self):
l = [(i, j) for i, j in enumerate(['a', 'b', 'c'])]
self.assertEqual([(0, 'a'), (1, 'b'), (2, 'c')], l)
l2 = [(i, j) for i, j in enumerate(['a', 'b', 'c'], 1)]
self.assertEqual([(1, 'a'), (2, 'b'), (3, 'c')], l2,
"enumerate 두번째 인자로 시작 index를 넣을 수 있다.")
def test_zip(self):
a = ['a', 'b', 'c', 'd']
b = [1, 2, 3]
r = [(x, y) for x, y in zip(a, b)]
self.assertEqual([('a', 1), ('b', 2), ('c', 3)], r,
"짧은 리스트 길이만큼 zip!")
a2, b2 = zip(*r)
self.assertEqual(('a', 'b', 'c'), a2)
self.assertEqual((1, 2, 3), b2)
def test_reversed(self):
self.assertEqual([3, 2, 1, 0], list(reversed(xrange(4))))
def test_sorted(self):
a = [1, 3, 2]
self.assertEqual([1, 2, 3], sorted(a), "a를 변경하지 않는다.")
self.assertEqual([1, 3, 2], a)
def test_iteritems(self):
a = {'a': 1, 'b': 2}
self.assertEqual(['a1', 'b2'], ["%s%s" %(x, y) for x, y in a.iteritems()])
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1747707 | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from oca.models.base_model_ import Model
from oca import util
class HomeScreenContent(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, type=None, embedded_app=None, service_email=None): # noqa: E501
"""HomeScreenContent - a model defined in OpenAPI
:param type: The type of this HomeScreenContent. # noqa: E501
:type type: str
:param embedded_app: The embedded_app of this HomeScreenContent. # noqa: E501
:type embedded_app: str
:param service_email: The service_email of this HomeScreenContent. # noqa: E501
:type service_email: str
"""
self.openapi_types = {
'type': str,
'embedded_app': str,
'service_email': str
}
self.attribute_map = {
'type': 'type',
'embedded_app': 'embedded_app',
'service_email': 'service_email'
}
self._type = type
self._embedded_app = embedded_app
self._service_email = service_email
@classmethod
def from_dict(cls, dikt) -> 'HomeScreenContent':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The HomeScreenContent of this HomeScreenContent. # noqa: E501
:rtype: HomeScreenContent
"""
return util.deserialize_model(dikt, cls)
@property
def type(self):
"""Gets the type of this HomeScreenContent.
:return: The type of this HomeScreenContent.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this HomeScreenContent.
:param type: The type of this HomeScreenContent.
:type type: str
"""
allowed_values = ["native", "embedded_app"] # noqa: E501
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}"
.format(type, allowed_values)
)
self._type = type
@property
def embedded_app(self):
"""Gets the embedded_app of this HomeScreenContent.
Only set when 'type' is embedded_app # noqa: E501
:return: The embedded_app of this HomeScreenContent.
:rtype: str
"""
return self._embedded_app
@embedded_app.setter
def embedded_app(self, embedded_app):
"""Sets the embedded_app of this HomeScreenContent.
Only set when 'type' is embedded_app # noqa: E501
:param embedded_app: The embedded_app of this HomeScreenContent.
:type embedded_app: str
"""
self._embedded_app = embedded_app
@property
def service_email(self):
"""Gets the service_email of this HomeScreenContent.
This service will be used as 'context' in the embedded app, to set user data / service data. # noqa: E501
:return: The service_email of this HomeScreenContent.
:rtype: str
"""
return self._service_email
@service_email.setter
def service_email(self, service_email):
"""Sets the service_email of this HomeScreenContent.
This service will be used as 'context' in the embedded app, to set user data / service data. # noqa: E501
:param service_email: The service_email of this HomeScreenContent.
:type service_email: str
"""
self._service_email = service_email
| StarcoderdataPython |
12841832 | <gh_stars>0
from rest_framework.routers import DefaultRouter
from django.urls import path, include
from .views import *
router = DefaultRouter()
router.register('user', User, basename='user')
urlpatterns = [
path('', include(router.urls))
]
| StarcoderdataPython |
4994279 | <filename>migrations/versions/175c80bee699_modelos_actualizado.py
"""modelos actualizado
Revision ID: 1<PASSWORD>
Revises: None
Create Date: 2016-05-19 10:38:47.632650
"""
# revision identifiers, used by Alembic.
revision = '175c<PASSWORD>'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('companies',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Unicode(length=100), nullable=True),
sa.Column('address', sa.Unicode(length=255), nullable=True),
sa.Column('phone', sa.Unicode(length=50), nullable=True),
sa.Column('website', sa.Unicode(length=255), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name'),
sa.UniqueConstraint('website')
)
op.create_table('skills',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Unicode(length=50), nullable=True),
sa.Column('description', sa.UnicodeText(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('students',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Unicode(length=255), nullable=True),
sa.Column('last_name', sa.Unicode(length=255), nullable=True),
sa.Column('age', sa.Integer(), nullable=True),
sa.Column('email', sa.Unicode(length=50), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
op.create_table('company_skills',
sa.Column('company_id', sa.Integer(), nullable=True),
sa.Column('skills_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['company_id'], ['companies.id'], ),
sa.ForeignKeyConstraint(['skills_id'], ['skills.id'], )
)
op.create_table('student_skills',
sa.Column('student_id', sa.Integer(), nullable=True),
sa.Column('skills_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['skills_id'], ['skills.id'], ),
sa.ForeignKeyConstraint(['student_id'], ['students.id'], )
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('student_skills')
op.drop_table('company_skills')
op.drop_table('students')
op.drop_table('skills')
op.drop_table('companies')
### end Alembic commands ###
| StarcoderdataPython |
6464473 | from flask import Blueprint
welcome = Blueprint('welcome', __name__, url_prefix='/')
from . import views, errors | StarcoderdataPython |
1963466 | # _*_ coding: utf-8 _*_
"""
@copyright Copyright (c) 2014 Submit Consulting
@author <NAME> (@asullom)
@package sad
@Descripcion Registro de los modelos de la app
"""
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.text import capfirst, get_text_list
from unicodedata import normalize
# models
# others
N = 'NO'
S = 'SI'
COBRANZA_INTERES_CHOICES = (
(N, 'NO'),
(S, 'SI'),
)
class ConceptoCobranza(models.Model):
class Meta:
verbose_name = "ConceptoCobranza"
verbose_name_plural = "ConceptoCobranzas"
def __str__(self):
pass
class Cobranza(models.Model):
"""
Tabla para Cobranza
"""
'''
concepto_cobranza = models.CharField(
_('concepto cobranza'),
max_length=20, null=False, blank=False,
error_messages={'unique': "eeeee ee"})
'''
concepto_cobranza = models.ForeignKey(
ConceptoCobranza, blank=True, null=True)
importe = models.CharField(
capfirst(_('importe')), max_length=50, null=False, blank=False)
detalle = models.TextField(
capfirst(_('detalle')), max_length=300, null=False, blank=False)
cobranza_interes = models.CharField(
_('cobranza interes '),
max_length=50, choices=COBRANZA_INTERES_CHOICES, default=N)
fecha_inicio = models.DateField(_('fecha inicio'), null=False, blank=False)
fecha_fin = models.DateField(_('fecha fin'), null=False, blank=False)
class Meta:
verbose_name = _('Cobranza')
verbose_name_plural = _('Cobranzas')
unique_together = (
(
'concepto_cobranza',
'importe',
'detalle',
'cobranza_interes',
'fecha_inicio',
'fecha_fin'
))
def __str__(self):
cc = self.concepto_cobranza
i = self.importe
moneda = 'S/ '
cadena = "%s %s %s" % (cc, moneda, i)
return cadena
# return '%s + %s' % (
# self.concepto_cobranza,
# self.importe)
# para el transaction, es necesario poner el
# transaction.savepoint_rollback(sid)
def save(self, *args, **kwargs):
# TODO Mandar con Exception no con ValidationError
if normalize('NFKD', u'%s %s %s %s %s %s' % (
self.concepto_cobranza,
self.importe,
self.detalle,
self.cobranza_interes,
self.fecha_inicio,
self.fecha_fin
)).encode('ascii', 'ignore').lower() in list(
normalize('NFKD', u'%s %s %s %s %s %s' % (
c['concepto_cobranza'],
c['importe'],
c['detalle'],
c['cobranza_interes'],
c['fecha_inicio'],
c['fecha_fin'],
)).encode(
'ascii', 'ignore').lower()
for c in Cobranza.objects.values(
'concepto_cobranza',
'importe',
'detalle',
'cobranza_interes',
'fecha_inicio',
'fecha_fin'
).exclude(pk=self.pk).filter(concepto_cobranza=self.concepto_cobranza)
):
raise Exception(_(u'%(model_name)s with this %(field_label)s already exists.') % {
'model_name': _('Cobranza'),
'field_label': get_text_list((
capfirst(_('concepto cobranza')),
capfirst(_('importe')),
capfirst(_('detalle')),
capfirst(_('cobranza interes')),
capfirst(_('fecha inicio')),
capfirst(_('fecha fin'))),
_('and')),
})
if Cobranza.objects.exclude(id=self.id).filter(concepto_cobranza=self.concepto_cobranza).count() > 0:
raise Exception(_(u'%(model_name)s with this %(field_label)s already exists.') % {
'model_name': _('Cobranza'),
'field_label': get_text_list((capfirst(_('number')),
capfirst(_('Type'))),
_('and')),
})
super(Cobranza, self).save(*args, **kwargs)
| StarcoderdataPython |
1808397 | <reponame>Matrixchung/EDAutopilot<filename>.vscode/robigo.py
from game import *
import transitions
pyautogui.FAILSAFE=False
map_bookmark = str(fileRootPath.joinpath("templates/map_bookmark.png"))
map_bookmarkHL = str(fileRootPath.joinpath("templates/map_bookmark_highlight.png"))
map_sothis = str(fileRootPath.joinpath("templates/robigo/map_sothis_a_5.png"))
map_sothisHL = str(fileRootPath.joinpath("templates/robigo/map_sothis_a_5_highlight.png"))
map_robigo = str(fileRootPath.joinpath("templates/robigo/map_robigom.png"))
map_robigoHL = str(fileRootPath.joinpath("templates/robigo/map_robigom_highlight.png"))
map_plotroute = str(fileRootPath.joinpath("templates/map_plot_route.png"))
map_plotrouteHL = str(fileRootPath.joinpath("templates/map_plot_route_highlight.png"))
sign_scassist = str(fileRootPath.joinpath('templates/sign_scassist.png'))
sign_align_with_target = str(fileRootPath.joinpath('templates/sign_align_with_target.png'))
sign_autodock = str(fileRootPath.joinpath('templates/sign_auto_dock.png'))
sign_throttle_up = str(fileRootPath.joinpath('templates/sign_throttle_up.png'))
sign_obscured = str(fileRootPath.joinpath('templates/sign_target_obscured.png'))
sign_fuel_filled = str(fileRootPath.joinpath('templates/sign_fuel_filled.png'))
sign_mission = str(fileRootPath.joinpath('templates/sign_has_mission.png'))
sign_pause_menu = str(fileRootPath.joinpath('templates/sign_pause_menu.png'))
tab_contacts = str(fileRootPath.joinpath('templates/contacts.png'))
tab_contactsHL = str(fileRootPath.joinpath('templates/contacts_highlight.png'))
tab_sirius = str(fileRootPath.joinpath('templates/robigo/tab_sirius.png'))
tab_siriusHL = str(fileRootPath.joinpath('templates/robigo/tab_sirius_highlight.png'))
tab_robigomines = str(fileRootPath.joinpath('templates/robigo/tab_robigo_mines_mission.png'))
tab_robigominesHL = str(fileRootPath.joinpath('templates/robigo/tab_robigo_mines_mission_highlight.png'))
exitButton = str(fileRootPath.joinpath("templates/exit.png"))
exitButtonHL = str(fileRootPath.joinpath("templates/exit_highlight.png"))
launchButton = str(fileRootPath.joinpath("templates/autolaunch.png"))
launchButtonHL = str(fileRootPath.joinpath("templates/autolaunch_highlight.png"))
button_requestDock = str(fileRootPath.joinpath('templates/button_request_docking.png'))
button_requestDockHL = str(fileRootPath.joinpath('templates/button_request_docking_highlight.png'))
button_fuel = str(fileRootPath.joinpath('templates/button_fuel.png'))
button_complete_mission = str(fileRootPath.joinpath('templates/button_complete_mission.png'))
button_complete_missionHL = str(fileRootPath.joinpath('templates/button_complete_mission_highlight.png'))
button_starport_service = str(fileRootPath.joinpath('templates/button_starport_services.png'))
def setDest(session,dest):
if session.guiFocus != 'GalaxyMap':
session.sendKey('UI_OpenGalaxyMap') # Toggle Map
session.sleep(3)
bookmarkLoc = locateButtons(map_bookmark,map_bookmarkHL,confidence1=0.7,confidence2=0.7)
if bookmarkLoc[0] == -1:
print("Error in setDest(): Cannot find any bookmark button")
return False
pyautogui.moveTo(bookmarkLoc)
pyautogui.click()
session.sleep(2)
pyautogui.doubleClick(bookmarkLoc)
session.sleep(2)
pyautogui.move(50,0)
if dest == 'Sothis': destLoc = locateButtons(map_sothis,map_sothisHL,confidence1=0.8,confidence2=0.8)
elif dest == 'Robigo': destLoc = locateButtons(map_robigo,map_robigoHL,confidence1=0.7,confidence2=0.7)
else : return False
session.sleep(1)
pyautogui.click(destLoc)
session.sleep(1)
session.sendKey('space')
session.sleep(3)
plotRoute = locateButtons(map_plotroute,map_plotrouteHL,confidence1=0.8,confidence2=0.8)
if plotRoute[0] != -1:
session.sleep(1)
pyautogui.click(plotRoute)
session.sleep(2)
session.sendKey('space')
session.sleep(3)
session.sendKey('UI_OpenGalaxyMap')
return True
class p(object):
pass
progress = p()
if __name__ == '__main__': # Test
isDebug = True
stateOverride = '' # Debugging Options
states = ['initial','get-mission','mission-received','select-target-sothis','undock','thrust-up','first-align','first-jump', # in Robigo
'first-sc','second-align','second-jump', # in Wredguia TH-U c16-19
'second-sc','third-align','first-approaching','first-enable-assist','first-waiting-for-arrive','first-auxiliary-align', # in sothis and sothis 5 (Sirius Atmospherics)
'target-beacon','waiting-for-beacon','select-target-robigo','sothis-a-5-avoiding','fourth-align','third-jump', # in sirius atmospherics
'third-sc','fifth-align','fourth-jump', # in Wredguia WD-K d8-65
'fourth-sc','sixth-align','second-enable-assist','second-auxiliary-align','second-waiting-for-arrive','approach-station','trigger-autodock','waiting-for-docked','claim-task-reward' # back to robigo
]
initialState = 'initial' # do not change!
if stateOverride != '':initialState=stateOverride
machine = transitions.Machine(model=progress,states=states,initial=initialState)
session = gameSession(debug=isDebug)
align = False
auto = False
startTime = datetime.now()
elapsedTime = datetime.now()-startTime
failsafeState = ''
if isDebug:
statusImg = np.zeros((70,1600,3),np.uint8)
while not keyboard.is_pressed('end'):
try:
session.update()
# 输入区
if keyboard.is_pressed('o'): align = True
if keyboard.is_pressed('home'):
auto = True
startTime = datetime.now()
if isDebug and keyboard.is_pressed('capslock+space'): screenCapture()
# 功能区
if auto:
if progress.state!='initial':
elapsedTime = datetime.now()-startTime
if keyboard.is_pressed('f10'): # Emergency Break
auto=False
failsafeState = progress.state
continue
if failsafeState != '':machine.set_state(failsafeState)
if session.status == 'Docked' and progress.state == 'initial': # in while loop
if len(session.missionList) == 0 : # 'get-mission'
# machine.set_state('get-mission')
pass
if isDebug: machine.set_state('mission-received') # allow launch without missions (isDebug)
else :
machine.set_state('mission-received')
elif progress.state == 'get-mission':
pass # WIP
elif progress.state == 'mission-received': # elif 确保一次大的while循环中只执行一次状态判断,避免状态转移导致的update滞后
if session.shipTarget != 'Wredguia TH-U c16-19': # select-target-sothis
session.sleep(1)
setDest(session,'Sothis')
session.sleep(2)
if session.shipTarget == 'Wredguia TH-U c16-19':
machine.set_state('undock')
elif progress.state == 'undock':
if session.status == 'Docked':
if session.guiFocus != 'NoFocus': # 返回到主界面
session.sendKey('esc')
session.sleep(2)
session.sendKey('UI_Down',repeat=3) # choose AUTO LAUNCH
session.sleep(1)
session.sendKey('space')
session.sendKey('SpeedZero')
session.sleep(1)
machine.set_state('thrust-up')
elif progress.state=='thrust-up':
session.sendKey('ThrustUp')
if 'FSDMassLocked' not in session.stateList:
session.sendKey('ThrustUp',hold=3,block=True)
session.sendKey('SpeedZero')
machine.set_state('first-align')
elif progress.state=='first-align':
if 'FSDMassLocked' in session.stateList:
machine.set_state('thrust-up')
# if not align: align = True # pass true segment to next loop
if not session.align(): # align complete
# session.sendKey('TargetAhead')
align=False
machine.set_state('first-jump')
elif progress.state=='first-jump':
# Enable FSD
if (('FSDJump' not in session.stateList and 'FSDCharging' not in session.stateList) and
'Supercruise' in session.stateList or 'FSDCooldown' in session.stateList) and session.shipLoc!='Robigo': # Waiting for jump complete
machine.set_state('first-sc')
elif 'FSDCharging' not in session.stateList and session.shipLoc=='Robigo' and locateImageOnScreen(sign_throttle_up,confidence=0.6)[0]==-1: # need charge
session.sendKey('EnableFSD')
session.sendDelay(1,block=True) # Just for update the stateList
session.sendDelay(15,block=True)
session.sendKey('EngineBoost')
session.sendDelay(0.5,block=True)
session.sendKey('SpeedZero')
elif progress.state=='first-sc':
session.sendDelay(1,block=True)
session.sunAvoiding(fwdDelay=25,turnDelay=11)
session.sendDelay(1,block=True)
session.sendKey('PitchUpButton',repeat=3) # trick
machine.set_state('second-align')
elif progress.state=='second-align':
if not session.align():
align=False
machine.set_state('second-jump')
elif progress.state=='second-jump':
# Enable FSD
if (('FSDJump' not in session.stateList and 'FSDCharging' not in session.stateList) and
'Supercruise' in session.stateList or 'FSDCooldown' in session.stateList) and session.shipLoc!='Wredguia TH-U c16-19': # Waiting for jump complete
machine.set_state('second-sc')
elif 'FSDCharging' not in session.stateList and session.shipLoc=='Wredguia TH-U c16-19' and locateImageOnScreen(sign_throttle_up,confidence=0.6)[0]==-1: # need charge
session.sendKey('EnableFSD')
session.sendDelay(1,block=True) # Just for update the stateList
session.sendDelay(15,block=True)
session.sendKey('Speed100') # This time it is in supercruise,so no boost can be applied
session.sendDelay(2,block=True)
session.sendKey('SpeedZero')
elif progress.state=='second-sc':
session.sendDelay(1,block=True)
session.sunAvoiding(fwdDelay=25)
session.sendDelay(1,block=True)
machine.set_state('third-align')
elif progress.state=='third-align':
if not session.align():
align=False
machine.set_state('first-approaching')
elif progress.state=='first-approaching':
if not session.align():
session.sendKey('Speed100')
session.sendDelay(58,block=True) # magic number:wait the ship approaching Sirius Atmospherics
session.align()
session.sendKey('SpeedZero')
machine.set_state('first-enable-assist')
elif progress.state == 'first-enable-assist':
# Change the navigation target to Sirius Atmospherics and enable Supercruise Assist
result1 = locateImageOnScreen(sign_scassist,confidence=0.8)
result2 = locateImageOnScreen(sign_align_with_target,confidence=0.8)
if result2[0]!=-1 or result1[0]!=-1: # Supercruise Assist active
machine.set_state('first-waiting-for-arrive')
print('first-enable-assist:Assist Already Active!')
elif result1[0]==-1 and result2[0]==-1: # Supercruise Assist not enabled
session.sendKey('SpeedZero')
if session.guiFocus != 'NoFocus':
session.sendKey('esc') # back to main panel
session.sendDelay(1,block=True)
if session.guiFocus != 'Panel_1':
session.sendKey('UI_1')
session.sendDelay(1,block=True)
session.sendKey('UI_Left',repeat=3)
session.sendKey('UI_Up',repeat=5) # To Left-Up Corner
# Now start from FILTER button
# Select Sirius Atmospherics (This time it should be the nearest POI)
session.sendKey('UI_Right')
session.sendDelay(1,block=True)
session.sendKey('UI_Up',hold=4,block=True) # small trick:hold the button to get to the top
session.sendDelay(1,block=True)
for i in range(20):
# 因为使得POI最近的距离实在不好控制 所以遍历导航页的项目 选取 Sirius Atmospherics
res1 = locateImageOnScreen(tab_sirius,confidence=0.6)
res2 = locateImageOnScreen(tab_siriusHL,confidence=0.6)
if res2[0]!=-1: # Match Found
break
if res2[0]==-1 or res1[0]!=-1:
session.sendKey('UI_Down')
session.sendDelay(2.5,block=True)
session.sendKey('space')
session.sendDelay(1,block=True)
session.sendKey('UI_Right')
session.sendKey('space')
session.sendDelay(1,block=True)
session.sendKey('esc') # back to main panel
session.sendDelay(3,block=True)
result1 = locateImageOnScreen(sign_scassist,confidence=0.6) # re-check assist status
result2 = locateImageOnScreen(sign_align_with_target,confidence=0.6)
if result2[0]!=-1 or result1[0]!=-1: # Supercruise Assist active
# machine.set_state('first-waiting-for-arrive')
machine.set_state('first-auxiliary-align')
elif progress.state == 'first-auxiliary-align':
if not session.align():
machine.set_state('first-waiting-for-arrive')
elif progress.state=='first-waiting-for-arrive':
session.sendDelay(0.1,block=True)
result2 = locateImageOnScreen(sign_align_with_target,confidence=0.7)
if result2[0]!=-1 and 'Supercruise' in session.stateList :
session.align()
if not ('Supercruise' in session.stateList) and session.status == 'normal': # add more condition
session.sendDelay(1,block=True)
session.sendKey('SpeedZero')
machine.set_state('target-beacon')
elif progress.state=='target-beacon':
session.sendKey('TargetAhead')
session.sendDelay(1,block=True)
machine.set_state('waiting-for-beacon')
elif progress.state == 'waiting-for-beacon':
if 'FSDCooldown' not in session.stateList: # About the same time
session.sendDelay(2,block=True)
machine.set_state('select-target-robigo')
elif progress.state=='select-target-robigo':
if session.shipTarget != 'Wredguia WD-K d8-65': # select-target-sothis
session.sleep(1)
setDest(session,'Robigo')
session.sleep(2)
if session.shipTarget == 'Wredguia WD-K d8-65':
machine.set_state('sothis-a-5-avoiding')
elif progress.state == 'sothis-a-5-avoiding':
session.sunAvoiding(turnDelay=18,fwdDelay=22) # Avoid the blue planet which affects the Template Matching
session.sendDelay(2,block=True)
machine.set_state('fourth-align')
elif progress.state=='fourth-align':
if not session.align():
align=False
machine.set_state('third-jump')
elif progress.state=='third-jump':
# Enable FSD
if (('FSDJump' not in session.stateList and 'FSDCharging' not in session.stateList) and
'Supercruise' in session.stateList or 'FSDCooldown' in session.stateList) and session.shipLoc!='Sothis': # Waiting for jump complete
machine.set_state('third-sc')
elif 'FSDCharging' not in session.stateList and session.shipLoc=='Sothis' and locateImageOnScreen(sign_throttle_up,confidence=0.6)[0]==-1: # need charge
session.sendKey('EnableFSD')
session.sendDelay(1,block=True) # Just for update the stateList
session.sendDelay(15,block=True)
session.sendKey('EngineBoost') # in normal space
session.sendDelay(0.5,block=True)
session.sendKey('SpeedZero')
elif progress.state=='third-sc':
session.sendDelay(1,block=True)
session.sunAvoiding()
session.sendDelay(1,block=True)
machine.set_state('fifth-align')
elif progress.state=='fifth-align':
if not session.align():
align=False
machine.set_state('fourth-jump')
elif progress.state=='fourth-jump':
# Enable FSD
if (('FSDJump' not in session.stateList and 'FSDCharging' not in session.stateList) and
'Supercruise' in session.stateList or 'FSDCooldown' in session.stateList) and session.shipLoc !='Wredguia WD-K d8-65' : # Waiting for jump complete
machine.set_state('fourth-sc')
elif 'FSDCharging' not in session.stateList and session.shipLoc=='Wredguia WD-K d8-65' and locateImageOnScreen(sign_throttle_up,confidence=0.6)[0]==-1: # need charge
session.sendKey('EnableFSD')
session.sendDelay(1,block=True) # Just for update the stateList
session.sendDelay(15,block=True)
session.sendKey('Speed100') # This time it is in supercruise,so no boost can be applied
session.sendDelay(2,block=True)
session.sendKey('SpeedZero')
elif progress.state == 'fourth-sc':
session.sendDelay(1,block=True)
session.sunAvoiding()
session.sendDelay(1,block=True)
machine.set_state('sixth-align')
elif progress.state == 'sixth-align':
if not session.align():
align=False
machine.set_state('second-enable-assist')
elif progress.state == 'second-enable-assist':
# just enable Supercruise Assist to Robigo Mines
session.sendDelay(2,block=True)
result1 = locateImageOnScreen(sign_scassist,confidence=0.8)
result2 = locateImageOnScreen(sign_align_with_target,confidence=0.8)
if result2[0]!=-1 or result1[0]!=-1: # Supercruise Assist active
machine.set_state('second-waiting-for-arrive')
print('second-enable-assist:Assist Already Active!')
elif result1[0]==-1 and result2[0]==-1: # Supercruise Assist not enabled
if session.guiFocus != 'NoFocus':
session.sendKey('esc') # back to main panel
session.sendDelay(1,block=True)
if session.guiFocus != 'Panel_1':
session.sendKey('UI_1')
session.sendDelay(1,block=True)
session.sendKey('UI_Left',repeat=3)
session.sendKey('UI_Up',repeat=5) # To Left-Up Corner
# Now start from FILTER button
# Select <NAME> (This time it should be the second POI/Station while the first is NAV BEACON)
session.sendKey('UI_Right')
session.sendDelay(1,block=True)
session.sendKey('UI_Up',hold=2,block=True) # small trick:hold the button to get to the top
for i in range(10): # maximum 10 targets in a single tab
# 因为使得POI最近的距离实在不好控制 所以遍历导航页的项目 选取 <NAME>
res1 = locateImageOnScreen(tab_robigomines,confidence=0.6)
res2 = locateImageOnScreen(tab_robigominesHL,confidence=0.6)
if res2[0]!=-1: # Match Found
break
if res2[0]==-1 or res1[0]!=-1:
session.sendKey('UI_Down')
session.sendDelay(2.5,block=True)
session.sendKey('space')
session.sendDelay(1,block=True)
session.sendKey('UI_Right')
session.sendKey('space')
session.sendDelay(1,block=True)
session.sendKey('esc') # back to main panel
session.sendDelay(3,block=True)
result1 = locateImageOnScreen(sign_scassist,confidence=0.6) # re-check assist status
result2 = locateImageOnScreen(sign_align_with_target,confidence=0.6)
if result2[0]!=-1 or result1[0]!=-1: # Supercruise Assist active
# machine.set_state('second-waiting-for-arrive')
machine.set_state('second-auxiliary-align')
elif progress.state == 'second-auxiliary-align': # make sure the supercruise assist active
if not session.align():
machine.set_state('second-waiting-for-arrive')
elif progress.state=='second-waiting-for-arrive':
if 'Supercruise' in session.stateList and locateImageOnScreen(sign_obscured,confidence=0.8)[0]!=-1: # target obscured
print('second-waiting-for-arrive:Destination Target Obscured!') # 目标被遮挡
session.sunAvoiding(turnDelay=9,fwdDelay=30)
machine.set_state('second-auxiliary-align')
if not ('Supercruise' in session.stateList) and session.status == 'normal': # add more condition
session.sendDelay(1,block=True)
session.sendKey('SpeedZero')
machine.set_state('approach-station')
elif progress.state=='approach-station':
session.sendKey('EngineBoost') # trick:boost
session.sendDelay(5,block=True) # magic number : wait for approaching to 7.5km
session.sendKey('TargetAhead') # trick: select the station so that it can be directly selected in CONTACTS Tab
session.sendKey('SpeedZero')
machine.set_state('trigger-autodock')
elif progress.state=='trigger-autodock':
# TRIGGER Autodock
if session.guiFocus != 'Panel_1':
if session.guiFocus != 'NoFocus':
session.sendKey('esc')
session.sendDelay(1,block=True)
if session.guiFocus == 'NoFocus':
session.sendKey('UI_1')
session.sendDelay(1,block=True)
result1 = locateImageOnScreen(tab_contactsHL,confidence=0.6)
if result1[0] == -1: # Not in contacts Tab
session.sendKey('UI_PrevTab') # trick : often in navigation tab,so previous tab is contact
session.sendDelay(0.5,block=True)
result1 = locateImageOnScreen(tab_contactsHL,confidence=0.6)
if result1[0] == -1: # in Transaction tab initially
session.sendKey('UI_PrevTab')
session.sendDelay(0.5,block=True)
# now the cursor should be in the contact tab
# WIP: give it a second check for sure
session.sendKey('UI_Left',repeat=2)
session.sendKey('UI_Right',repeat=2)
session.sendDelay(1,block=True)
result1=locateImageOnScreen(button_requestDockHL,confidence=0.6)
if result1[0]!=-1:
session.sendKey('space')
session.sendDelay(5,block=True)
session.sendKey('esc') # back to main panel and let's check if the docking computer is active
session.sendDelay(3,block=True)
result1=locateImageOnScreen(sign_autodock,confidence=0.6)
if result1[0]!=-1 or session.status == 'docking': # Autodock active
machine.set_state('waiting-for-docked')
else: # docking request denied
session.sleep(10) # sleep for 10s
elif progress.state=='waiting-for-docked':
if (session.status=='Docked'):
session.sendDelay(2,block=True)
machine.set_state('claim-task-reward')
elif progress.state=='claim-task-reward': # Auto claim task rewards
if session.guiFocus != 'NoFocus' and session.guiFocus != 'StationServices':
session.sendKey('esc')
session.sendDelay(2,block=True)
if session.guiFocus == 'NoFocus':
session.sendKey('UI_Up',repeat=3)
if locateImageOnScreen(button_fuel,confidence=0.6)[0]!=-1: # Fuel Button
session.sendKey('space')
session.sendDelay(3,block=True)
session.sendDelay(1,block=True)
session.sendKey('UI_Down')
session.sendDelay(2,block=True)
session.sendKey('space') # auto fuel and go to Station Services
session.sendDelay(5,block=True)
if session.guiFocus == 'StationServices':
session.sendKey('UI_Down',hold=3) # trick : make cursor stops at EXIT
session.sendDelay(1,block=True)
session.sendKey('UI_Up',repeat=3) # goto passenger lounge
session.sendDelay(0.5,block=True)
session.sendKey('space') # enter passenger lounge
# session.sendDelay(10,block=True)
# session.sendKey('UI_Left',repeat=2)
# session.sendDelay(1,block=True)
# session.sendKey('UI_Down',repeat=5) # at back button
# for i in range(3): # 3 mission providers
# session.sendKey('UI_Up')
# session.sendDelay(1,block=True)
# session.sendKey('space')
# session.sendDelay(1,block=True)
# for j in range(10): # failsafe number 10 (in fact the max mission number is 7)
# session.sleep(0.5)
# result = locateImageOnScreen(button_complete_mission,confidence=0.6)
# if result[0]==-1: break # No more mission
# pyautogui.moveTo(result)
# session.sendDelay(2,block=True)
# result1 = locateImageOnScreen(button_complete_missionHL,confidence=0.6)
# if result1[0]==-1 : continue
# pyautogui.click(result1)
# session.sendKey('UI_Left',repeat=4)
# session.sendDelay(0.5,block=True)
# session.sendKey('space')
# session.sendDelay(3,block=True)
# session.sendKey('space')
# session.sendKey('UI_Left')
# session.sendDelay(1,block=True)
# print(len(session.missionList))
auto=False
failsafeState = ''
machine.set_state('initial')
# else:
# print('claim-task-reward:enter stationservice failed')
# auto=False
# machine.set_state('initial')
# continue
if align: align = session.align()
if isDebug:
cv2.putText(statusImg,'%s'%progress.state,(10,30),cv2.FONT_HERSHEY_DUPLEX,1,(0,255,0))
# cv2.putText(statusImg,'GUIFocus:%s'%session.guiFocus,(10,30),cv2.FONT_HERSHEY_DUPLEX,1,(0,255,0))
cv2.putText(statusImg,"align:%s"%session.isAligned,(500,30),cv2.FONT_HERSHEY_DUPLEX,1,(0,255,0))
cv2.putText(statusImg,'Status:%s'%session.status,(650,30),cv2.FONT_HERSHEY_DUPLEX,1,(0,255,0))
cv2.putText(statusImg,'Loc:%s'%session.shipLoc,(900,30),cv2.FONT_HERSHEY_DUPLEX,1,(0,255,0))
cv2.putText(statusImg,'Target:%s'%session.shipTarget,(1300,30),cv2.FONT_HERSHEY_DUPLEX,1,(0,255,0))
# cv2.putText(statusImg,'state:%s'%session.stateList,(10,60),cv2.FONT_HERSHEY_DUPLEX,1,(0,255,0))
cv2.putText(statusImg,'%s'%elapsedTime,(10,60),cv2.FONT_HERSHEY_DUPLEX,1,(0,255,0))
cv2.imshow('status',statusImg)
statusImg.fill(0)
cv2.waitKey(1)
except:
traceback.print_exc()
session.stop() | StarcoderdataPython |
1621455 | '''
## Problem 🤔
You are given an array of k linked-lists lists, each linked-list is sorted in ascending order.
Merge all the linked-lists into one sorted linked-list and return it.
**Example 1**
`Input: lists = [[1,4,5],[1,3,4],[2,6]]`
`Output: [1,1,2,3,4,4,5,6]`
_Explanation_
The linked-lists are:
```
[
1->4->5,
1->3->4,
2->6
]
```
merging them into one sorted list:
```
1->1->2->3->4->4->5->6
```
**Example 2**
`Input: lists = []`
`Output: []`
**Example 3**
`Input: lists = [[]]`
`Output: []`
**Note**
- k == lists.length
- 0 <= k <= 10^4
- 0 <= lists[i].length <= 500
- -10^4 <= lists[i][j] <= 10^4
- lists[i] is sorted in ascending order.
- The sum of lists[i].length won't exceed 10^4
'''
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class NaiveSolution(object):
def mergeKLists(self, lists):
vals = []
cur = ListNode(None)
dummy_node = cur
for l in lists:
while l:
vals.append(l.val)
l = l.next
for n in sorted(vals):
cur.next = ListNode(n)
cur = cur.next
return dummy_node.next
class Solution:
def mergeKLists(self, lists):
heap = [(l.val, idx) for idx, l in enumerate(lists) if l]
heapq.heapify(heap)
cur = ListNode(None)
dummy_node = cur
while heap:
val, idx = heapq.heappop(heap)
cur.next = ListNode(val)
cur = cur.next
lists[idx] = lists[idx].next
if lists[idx]:
heapq.heappush(heap, (lists[idx].val, idx))
return dummy_node.next
| StarcoderdataPython |
12851078 | <reponame>thedrow/ray<filename>doc/examples/doc_code/raysgd_torch_signatures.py
# flake8: noqa
"""
This file holds code for the Pytorch Trainer creator signatures.
It ignores yapf because yapf doesn't allow comments right after code blocks,
but we put comments right after code blocks to prevent large white spaces
in the documentation.
"""
# yapf: disable
# __torch_model_start__
import torch.nn as nn
def model_creator(config):
"""Constructor function for the model(s) to be optimized.
You will also need to provide a custom training
function to specify the optimization procedure for multiple models.
Args:
config (dict): Configuration dictionary passed into ``PyTorchTrainer``.
Returns:
One or more torch.nn.Module objects.
"""
return nn.Linear(1, 1)
# __torch_model_end__
# __torch_optimizer_start__
import torch
def optimizer_creator(model, config):
"""Constructor of one or more Torch optimizers.
Args:
models: The return values from ``model_creator``. This can be one
or more torch nn modules.
config (dict): Configuration dictionary passed into ``PyTorchTrainer``.
Returns:
One or more Torch optimizer objects.
"""
return torch.optim.SGD(model.parameters(), lr=config.get("lr", 1e-4))
# __torch_optimizer_end__
# __torch_data_start__
from ray.util.sgd.pytorch.examples.train_example import LinearDataset
def data_creator(config):
"""Constructs torch.utils.data.Dataset objects.
Note that even though two Dataset objects can be returned,
only one dataset will be used for training.
Args:
config: Configuration dictionary passed into ``PyTorchTrainer``
Returns:
One or Two Dataset objects. If only one Dataset object is provided,
``trainer.validate()`` will throw a ValueError.
"""
return LinearDataset(2, 5), LinearDataset(2, 5, size=400)
# __torch_data_end__
# __torch_loss_start__
import torch
def loss_creator(config):
"""Constructs the Torch Loss object.
Note that optionally, you can pass in a Torch Loss constructor directly
into the PyTorchTrainer (i.e., ``PyTorchTrainer(loss_creator=nn.BCELoss, ...)``).
Args:
config: Configuration dictionary passed into ``PyTorchTrainer``
Returns:
Torch Loss object.
"""
return torch.nn.BCELoss()
# __torch_loss_end__
# __torch_scheduler_start__
import torch
def scheduler_creator(optimizer, config):
"""Constructor of one or more Torch optimizer schedulers.
Args:
optimizers: The return values from ``optimizer_creator``.
This can be one or more torch optimizer objects.
config: Configuration dictionary passed into ``PyTorchTrainer``
Returns:
One or more Torch scheduler objects.
"""
return torch.optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.9)
# __torch_scheduler_end__
# __torch_ray_start__
import ray
ray.init()
# or ray.init(address="auto") to connect to a running cluster.
# __torch_ray_end__
# __torch_trainer_start__
from ray.util.sgd import PyTorchTrainer
trainer = PyTorchTrainer(
model_creator,
data_creator,
optimizer_creator,
loss_creator=nn.MSELoss,
scheduler_creator=scheduler_creator,
config={"lr": 0.001})
# __torch_trainer_end__
| StarcoderdataPython |
1822691 | <gh_stars>1-10
# pip install Pillow
from PIL import Image
# The image we sent
im = Image.open("orig.png")
pix = im.load()
# The image they return (indexed)
im_mod = Image.open("mod.png")
im_mod = im_mod.convert("RGB")
pix_mod = im_mod.load()
for i in xrange(im.size[0]):
for j in xrange(im.size[1]):
print("{0} x {1}: {2} {3}".format(i,j,pix[i,j],pix_mod[i,j]))
| StarcoderdataPython |
3391400 | from typing import Any, Final, TypedDict
import numpy as np
import numpy.typing as npt
HelloWorldType: Final[Any] = TypedDict("HelloWorldType", {"Hello": str})
IntegerArrayType: Final[Any] = npt.NDArray[np.int_]
| StarcoderdataPython |
1873812 | import signal
def sigterm(x, y):
pass
signal.signal(signal.SIGTERM, sigterm)
print("hello pipenv-docker-development world!", flush=True)
signal.sigwait([signal.SIGTERM])
print("shutdown...", flush=True)
| StarcoderdataPython |
4885236 | <reponame>jaypirates/Pull-Request-Predictor
# Author: <NAME>
import numpy as np
import json
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
import os
# Opening the norm_output.json file which contains the normalized data
os.chdir('..')
filepath = os.getcwd()
filepath = filepath + "/data/norm_output.json"
f = open(filepath, "r")
# Loading the files data as JSON
json_data = json.loads(f.read())
# Creating lists to hold the data
x_status_check = []
x_user_type = []
x_user_contr = []
x_comment_count = []
x_time_duration = []
y = []
# Parsing the data and loading the lists
for data_object in json_data:
x_status_check.append(data_object['pr_status_check'])
x_user_type.append(data_object['pr_user_type'])
x_user_contr.append(data_object['pr_user_contr'])
x_comment_count.append(data_object['pr_comment_count'])
x_time_duration.append(data_object['pr_time_duration'])
y.append(data_object['pr_is_merged'])
# Converting the lists into Numpy Array
x_array_status_check = np.array(x_status_check)
x_array_user_type = np.array(x_user_type)
x_array_user_contr = np.array(x_user_contr)
x_array_comment_count = np.array(x_comment_count)
x_array_time_duration = np.array(x_time_duration)
y_array = np.array(y)
# Reshaping the array so that it is compatible with the Linear Regression method
x_array_status_check = x_array_status_check.reshape(-1, 1)
x_array_user_type = x_array_user_type.reshape(-1, 1)
x_array_user_contr = x_array_user_contr.reshape(-1, 1)
x_array_comment_count = x_array_comment_count.reshape(-1, 1)
x_array_time_duration = x_array_time_duration.reshape(-1, 1)
y_array = y_array.reshape(-1, 1)
# Calculating the R-Squared
reg_status_check = LinearRegression().fit(x_array_status_check, y_array)
reg_user_type = LinearRegression().fit(x_array_user_type, y_array)
reg_user_contr = LinearRegression().fit(x_array_user_contr, y_array)
reg_comment_count = LinearRegression().fit(x_array_comment_count, y_array)
reg_time_duration = LinearRegression().fit(x_array_time_duration, y_array)
print("PR Status Check R-Squared Score = ", reg_status_check.score(x_array_status_check, y_array))
print("PR User Type R-Squared Score = ", reg_user_type.score(x_array_user_type, y_array))
print("PR User Contribution R-Squared Score = ", reg_user_contr.score(x_array_user_contr, y_array))
print("PR Comment Count R-Squared Score = ", reg_comment_count.score(x_array_comment_count, y_array))
print("PR Time Duration R-Squared Score = ", reg_time_duration.score(x_array_time_duration, y_array))
# Plotting the data
fig, axs = plt.subplots(3, 2)
axs[0, 0].plot(x_array_status_check, y_array)
axs[0, 0].set_title('Status Check R-Squared')
axs[0, 1].plot(x_array_user_type, y_array, 'tab:orange')
axs[0, 1].set_title('User Type R-Squared')
axs[1, 0].plot(x_array_user_contr, y_array, 'tab:green')
axs[1, 0].set_title('User Contribution R-Squared')
axs[1, 1].plot(x_array_comment_count, y_array, 'tab:red')
axs[1, 1].set_title('Comment Count R-Squared')
axs[2, 0].plot(x_array_time_duration, y_array, 'tab:purple')
axs[2, 0].set_title('Time Duration R-Squared')
plt.show()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.