id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
3306757 | <gh_stars>0
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
bl_info = {
"name": "Import Voodoo camera",
"author": "<NAME>",
"version": (0, 8),
"blender": (2, 63, 0),
"location": "File > Import > Voodoo camera",
"description": "Imports a Blender (2.4x or 2.5x) Python script from the Voodoo (version 1.1 or 1.2) camera tracker software.",
"warning": "",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/"
"Scripts/Import-Export/Voodoo_camera",
"tracker_url": "https://developer.blender.org/maniphest/task/edit/form/2/",
"category": "Import-Export"}
"""
This script loads a Blender Python script from the Voodoo camera
tracker program into Blender 2.5x+.
It processes the script as a text file and not as a Python executable
because of the incompatible Python APIs of Blender 2.4x/2.5x/2.6x.
"""
import bpy
from bpy.props import *
import mathutils
import os
import string
import math
def voodoo_import(infile,ld_cam,ld_points):
checktype = os.path.splitext(infile)[1]
if checktype.upper() != '.PY':
print ("Selected file: ",infile)
raise IOError("The selected input file is not a *.py file")
return
print ("--------------------------------------------------")
print ("Importing Voodoo file: ", infile)
file = open(infile,'rU')
scene = bpy.context.scene
initfr = scene.frame_current
b24= True
voodoo_import.frwas= False
dummy = bpy.data.objects.new('voodoo_scene', None)
dummy.location = (0.0, 0.0, 0.0)
dummy.rotation_euler = ( -3.141593/2, 0.0, 0.0)
dummy.scale = (0.2, 0.2, 0.2)
scene.objects.link(dummy)
if ld_cam:
data = bpy.data.cameras.new('voodoo_render_cam')
data.lens_unit= 'DEGREES'
vcam = bpy.data.objects.new('voodoo_render_cam', data)
vcam.location = (0.0, 0.0, 0.0)
vcam.rotation_euler = (0.0, 0.0, 0.0)
vcam.scale = (1.0, 1.0, 1.0)
data.lens = 35.0
data.shift_x = 0.0
data.shift_y = 0.0
data.dof_distance = 0.0
data.clip_start = 0.1
data.clip_end = 1000.0
data.draw_size = 0.5
scene.objects.link(vcam)
vcam.parent = dummy
if ld_points:
data = bpy.data.meshes.new('voodoo_FP3D_cloud')
mesh = bpy.data.objects.new('voodoo_FP3D_cloud', data)
mesh.location = (0.0, 0.0, 0.0)
# before 6.3
# mesh.rotation_euler = (3.141593/2, 0.0, 0.0)
# mesh.scale = (5.0, 5.0, 5.0)
mesh.rotation_euler = (0.0, 0.0, 0.0)
mesh.scale = (1.0, 1.0, 1.0)
scene.objects.link(mesh)
mesh.parent = dummy
verts = []
def stri(s):
try:
ret= int(s,10)
except ValueError :
ret= -1
return ret
def process_line(line):
lineSplit = line.split()
if (len(lineSplit) < 1): return
if (line[0] == '#'): return
if b24:
# Blender 2.4x mode
# process camera commands
if ld_cam:
if (line[0] == 'c' and line[1] != 'r'):
pos= line.find('.lens')
if (pos != -1):
fr = stri(lineSplit[0][1:pos])
if (fr >= 0):
scene.frame_current = fr
vcam.data.lens= float(lineSplit[2])
vcam.data.keyframe_insert('lens')
return
if (line[0] == 'o'):
pos= line.find('.setMatrix')
if (pos != -1):
fr = stri(lineSplit[0][1:pos])
if (fr >= 0):
scene.frame_current = fr
# for up to 2.55
# vcam.matrix_world = eval('mathutils.' + line.rstrip()[pos+21:-1])
# for 2.56, from Michael (Meikel) Oetjen
# vcam.matrix_world = eval('mathutils.Matrix(' + line.rstrip()[pos+28:-2].replace('[','(',4).replace(']',')',4) + ')')
# for 2.57
# vcam.matrix_world = eval('mathutils.Matrix([' + line.rstrip()[pos+28:-2] + '])')
# for 2.63
vcam.matrix_world = eval('(' + line.rstrip()[pos+27:-1] + ')')
vcam.keyframe_insert('location')
vcam.keyframe_insert('scale')
vcam.keyframe_insert('rotation_euler')
return
# process mesh commands
if ld_points:
if (line[0] == 'v'):
pos= line.find('NMesh.Vert')
if (pos != -1):
verts.append(eval(line[pos+10:]))
return
# Blender 2.5x mode
# process camera commands
if ld_cam:
pos= line.find('set_frame')
if (pos == -1):
pos= line.find('frame_set')
if (pos == -1):
pos= lineSplit[0].find('frame_current')
if (pos != -1):
fr= stri(lineSplit[2])
if (fr >= 0):
scene.frame_current = fr
voodoo_import.frwas= True
return
if (pos != -1):
fr= stri(line[pos+10:-2])
if (fr >= 0):
scene.frame_current = fr
voodoo_import.frwas= True
return
if voodoo_import.frwas:
pos= line.find('data.lens')
if (pos != -1):
vcam.data.lens= float(lineSplit[2])
vcam.data.keyframe_insert('lens')
return
pos= line.find('.Matrix')
if (pos != -1):
# for up to 2.55
# vcam.matrix_world = eval('mathutils' + line[pos:])
# for 2.56
# if (line[pos+8] == '['):
# # from Michael (Meikel) Oetjen
# vcam.matrix_world = eval('mathutils.Matrix((' + line.rstrip()[pos+9:-1].replace('[','(',3).replace(']',')',4) + ')')
# else:
# vcam.matrix_world = eval('mathutils' + line[pos:])
# for 2.57
# vcam.matrix_world = eval('mathutils.Matrix([' + line.rstrip()[pos+8:-1] + '])')
# for 2.63
vcam.matrix_world = eval('(' + line.rstrip()[pos+7:] + ')')
return
pos= line.find('.matrix_world')
if (pos != -1):
vcam.matrix_world = eval(line.rstrip()[line.find('=')+1:])
return
pos= line.find('.location')
if (pos != -1):
vcam.location = eval(line[line.find('=')+1:])
return
pos= line.find('.rotation_euler')
if (pos != -1):
vcam.rotation_euler = eval(line[line.find('=')+1:])
return
pos= line.find('.data.keyframe_insert')
if (pos != -1):
vcam.data.keyframe_insert(eval(line[pos+22:-2]))
return
pos= line.find('.keyframe_insert')
if (pos != -1):
vcam.keyframe_insert(eval(line[pos+17:-2]))
return
# process mesh commands
if ld_points:
pos= line.find('.append')
if (pos != -1):
verts.append(eval(line[pos+8:-2]))
#read lines
for line in file.readlines():
if (b24 and (line.find('import') != -1) and (line.find('bpy') != -1)):
b24= False
process_line(line)
scene.frame_set(initfr)
if ld_points:
mesh.data.from_pydata(verts, [], [])
mesh.data.update()
# Operator
class ImportVoodooCamera(bpy.types.Operator):
""""""
bl_idname = "import.voodoo_camera"
bl_label = "Import Voodoo camera"
bl_description = "Load a Blender export script from the Voodoo motion tracker"
bl_options = {'REGISTER', 'UNDO'}
filepath = StringProperty(name="File Path",
description="Filepath used for processing the script",
maxlen= 1024,default= "")
# filter_python = BoolProperty(name="Filter python",
# description="",default=True,options={'HIDDEN'})
load_camera = BoolProperty(name="Load camera",
description="Load the camera",
default=True)
load_points = BoolProperty(name="Load points",
description="Load the FP3D point cloud",
default=True)
def execute(self, context):
voodoo_import(self.filepath,self.load_camera,self.load_points)
return {'FINISHED'}
def invoke(self, context, event):
wm = context.window_manager
wm.fileselect_add(self)
return {'RUNNING_MODAL'}
# Registering / Unregister
def menu_func(self, context):
self.layout.operator(ImportVoodooCamera.bl_idname, text="Voodoo camera", icon='PLUGIN')
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_import.append(menu_func)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_import.remove(menu_func)
if __name__ == "__main__":
register()
| StarcoderdataPython |
3267410 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import subprocess
import tempfile
#TODO: Cahge scrot backend to ImageMagic
# Edit the image
# https://wiki.archlinux.org/index.php/Taking_a_Screenshot
SCREENSHOT_SOUND = '/usr/share/sounds/scru_shot.wav'
class ScrotNotFound(Exception):
"""scrot must be installed"""
def grab(filename, select, sound, quality, delay, optipng):
"""Grab the screen as binary file"""
if not filename:
# Temporary file
f = tempfile.NamedTemporaryFile(suffix='.png',
prefix='screenshot_scrot_')
filename = f.name
grab_filename(filename, select, sound, quality, delay, optipng)
# Open the temp screenshot
return open(filename, 'rb')
def grab_filename(filename, select, sound, quality, delay, optipng):
"""Grab the screen as image file"""
# Wrap of scrot command
cmd = ['scrot', filename]
opng = ['optipng', '-preserve', '-quiet', filename]
#mogri = ['mogrify' '-depth', '8', filename]
if select:
cmd.append('-s')
cmd.append('-b') #show window decoration (border)
if quality:
cmd.append('-q%d' % quality)
if delay:
# delay and show regresive count
cmd.append('-d%d' % delay)
cmd.append('-c')
try:
subprocess.call(cmd)
if sound:
play_screenshot_sound()
if optipng:
subprocess.Popen(opng)
except Exception, e:
raise ScrotNotFound
print e
def play_screenshot_sound():
""""Play a sound of a camera shot"""
try:
# Player for alsa
subprocess.call(['aplay', '-q', SCREENSHOT_SOUND])
except OSError:
# Player for oss
subprocess.call(['ossplay', '-q', SCREENSHOT_SOUND])
else:
pass
| StarcoderdataPython |
165875 | <filename>sql_queries.py
# DROP TABLES
songplay_table_drop = "DROP TABLE IF EXISTS f_songplays;"
user_table_drop = "DROP TABLE IF EXISTS d_users;"
song_table_drop = "DROP TABLE IF EXISTS d_songs;"
artist_table_drop = "DROP TABLE IF EXISTS d_artists;"
time_table_drop = "DROP TABLE IF EXISTS d_times;"
# CREATE TABLES
user_table_create = ("CREATE TABLE IF NOT EXISTS d_users \
(user_id int PRIMARY KEY, first_name varchar(80) NOT NULL, last_name varchar(100) NOT NULL, \
gender character NOT NULL, level varchar(10) NOT NULL);")
song_table_create = ("CREATE TABLE IF NOT EXISTS d_songs \
(song_id varchar(18) PRIMARY KEY, title varchar(200) NOT NULL, artist_id varchar(18) NOT NULL, \
year int, duration numeric);")
artist_table_create = ("CREATE TABLE IF NOT EXISTS d_artists \
(artist_id varchar(18) PRIMARY KEY, name varchar(200) NOT NULL, \
location varchar(200), latitude numeric, longitude numeric);")
time_table_create = ("CREATE TABLE IF NOT EXISTS d_times \
(start_time timestamp PRIMARY KEY, hour smallint, day smallint NOT NULL, \
week smallint NOT NULL, month smallint NOT NULL, year smallint NOT NULL, weekday smallint NOT NULL);")
songplay_table_create = ("CREATE TABLE IF NOT EXISTS f_songplays \
(songplay_id serial PRIMARY KEY, start_time timestamp REFERENCES d_times(start_time), \
user_id int REFERENCES d_users(user_id), level varchar(10), \
song_id varchar(18) REFERENCES d_songs(song_id), artist_id varchar(18) REFERENCES d_artists(artist_id), \
session_id int, location varchar(200), user_agent varchar(200));")
# INSERT RECORDS
# use DEFAULT value for songplay_id as it is a serial
songplay_table_insert = ("INSERT INTO f_songplays \
(start_time, user_id, level, song_id, artist_id, session_id, location, user_agent) \
VALUES (%s, %s, %s, %s, %s, %s, %s, %s)")
# user data should be TIME DEPENDENT
# use named arguments (as dictionary) to deal with ON CONFLICT UPDATE
# https://www.psycopg.org/docs/usage.html
user_table_insert = ("INSERT INTO d_users \
(user_id, first_name, last_name, gender, level) \
VALUES(%(user_id)s, %(first_name)s, %(last_name)s, %(gender)s, %(level)s) \
ON CONFLICT (user_id) DO UPDATE \
SET level = EXCLUDED.level")
# potentially no conflict to expect
song_table_insert = ("INSERT INTO d_songs \
(song_id, title, artist_id, year, duration) \
VALUES(%s, %s, %s, %s, %s) \
ON CONFLICT DO NOTHING")
# potentially no conflict to expect
artist_table_insert = ("INSERT INTO d_artists \
(artist_id, name, location, latitude, longitude) \
VALUES(%s, %s, %s, %s, %s) \
ON CONFLICT DO NOTHING")
# in case of conflict do nothing
time_table_insert = ("INSERT INTO d_times \
(start_time, hour, day, week, month, year, weekday) \
VALUES(%s, %s, %s, %s, %s, %s, %s) \
ON CONFLICT DO NOTHING")
# FIND SONGS
song_select = ("SELECT song_id, d_artists.artist_id FROM ( \
d_songs JOIN d_artists ON d_artists.artist_id=d_songs.artist_id ) \
WHERE title=%s AND name=%s AND duration=%s;")
# QUERY LISTS
create_table_queries = [user_table_create, song_table_create,
artist_table_create, time_table_create, songplay_table_create]
drop_table_queries = [songplay_table_drop, user_table_drop, song_table_drop, artist_table_drop, time_table_drop]
| StarcoderdataPython |
104574 | <filename>neurokit2/complexity/entropy_differential.py
import numpy as np
import pandas as pd
import scipy.stats
def entropy_differential(signal, base=2, **kwargs):
"""**Differential entropy (DiffEn)**
Differential entropy (DiffEn; also referred to as continuous entropy) started as an
attempt by Shannon to extend Shannon entropy. However, differential entropy presents some
issues too, such as that it can be negative even for simple distributions (such as the uniform
distribution).
This function can be called either via ``entropy_differential()`` or ``complexity_diffen()``.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
**kwargs : optional
Other arguments passed to ``scipy.stats.differential_entropy()``.
Returns
--------
diffen : float
The Differential entropy of the signal.
base: float
The logarithmic base to use, defaults to ``2``, giving a unit in *bits*. Note that ``scipy.
stats.entropy()`` uses Euler's number (``np.e``) as default (the natural logarithm), giving
a measure of information expressed in *nats*.
info : dict
A dictionary containing additional information regarding the parameters used
to compute Differential entropy.
See Also
--------
entropy_shannon, entropy_cumulativeresidual, entropy_kl
Examples
----------
.. ipython:: python
import neurokit2 as nk
# Simulate a Signal with Laplace Noise
signal = nk.signal_simulate(duration=2, frequency=5, noise=0.1)
# Compute Differential Entropy
diffen, info = nk.entropy_differential(signal)
diffen
References
-----------
* `scipy.stats.differential_entropy()
<https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.differential_entropy.html>`_
* https://en.wikipedia.org/wiki/Differential_entropy
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Check if string ('ABBA'), and convert each character to list (['A', 'B', 'B', 'A'])
if not isinstance(signal, str):
signal = list(signal)
if "method" in kwargs:
method = kwargs["method"]
kwargs.pop("method")
else:
method = "vasicek"
diffen = scipy.stats.differential_entropy(signal, method=method, base=base, **kwargs)
return diffen, {"Method": method, "Base": base}
| StarcoderdataPython |
47237 | import sqlite3
import datetime
import time
import logging
import os
from bot_constant import CQ_ROOT
CQ_IMAGE_ROOT = os.path.join(CQ_ROOT, r'data/image')
logger = logging.getLogger("CTB." + __name__)
class FileDB:
def __init__(self, db_name: str):
self.conn = sqlite3.connect(db_name, check_same_thread=False)
self.cursor = self.conn.cursor()
self.db_name = db_name
self.table_name = 'file_ids'
self.cursor.execute(f"SELECT count(*) FROM sqlite_master WHERE type='table' AND name='{self.table_name}';")
result = self.cursor.fetchall()
if result[0][0]:
pass
else:
"""
FileDB contains:
filename: str
file_type: str
file_md5: str
fileid_tg: str
size: str, bytes
last_usage_date: int, unix timestamp
usage_count: int
"""
self.cursor.execute(f"create table {self.table_name} (download_date int primary key,"
f"filename text, file_type text, file_md5 text, fileid_tg text, file_size int,"
f" last_usage_date int, usage_count int)")
self.cursor.execute(f"create unique index md5_index on {self.table_name}(file_md5);")
self.cursor.execute(f"create unique index fileid_index on {self.table_name}(fileid_tg);")
self.conn.commit()
def get_filename_by_fileid(self, fileid_tg: str):
"""
acquire filename by fileid
:param fileid_tg: telegram file id
:return: filename
"""
self.cursor.execute(f"select usage_count, filename from '{self.table_name}'"
f" where fileid_tg='{fileid_tg}'")
result = self.cursor.fetchall()
if result:
timestamp = int(time.mktime(datetime.datetime.now().timetuple()))
self.cursor.execute(
f"update '{self.table_name}' set last_usage_date=?, usage_count=? where fileid_tg=?;",
(timestamp, result[0][0]+1, fileid_tg))
self.conn.commit()
return result[0][1]
return False
def get_fileid_by_md5(self, file_md5):
"""
acquire fileid by md5
:param file_md5: md5
:return: fileid
"""
self.cursor.execute(f"select usage_count, fileid_tg, file_type from '{self.table_name}' where file_md5='{file_md5}'")
result = self.cursor.fetchall()
if result:
timestamp = int(time.mktime(datetime.datetime.now().timetuple()))
self.cursor.execute(
f"update '{self.table_name}' set last_usage_date=?, usage_count=? where file_md5=?;",
(timestamp, result[0][0]+1, file_md5))
self.conn.commit()
return {'file_id': result[0][1], 'file_type': result[0][2]}
return False
def qq_add_resource(self, filename: str, file_type: str, file_md5: str, file_size: int, fileid_tg):
"""
add resource received by qq
:param filename:
:param file_type:
:param file_md5:
:param file_size:
:param fileid_tg:
"""
timestamp = int(time.mktime(datetime.datetime.now().timetuple()))
self.cursor.execute(f"insert into '{self.table_name}' "
f"(download_date, filename, file_type, file_md5, fileid_tg, file_size,"
f" last_usage_date, usage_count)"
f"values (?, ?, ?, ?, ?, ?, ?, ?)",
(timestamp, filename, file_type, file_md5, fileid_tg, file_size, timestamp, 1))
self.conn.commit()
def tg_add_resource(self, fileid_tg: str, filename: str, file_type: str, file_md5: str, file_size: int):
"""
add resource acquired by telegram
:param fileid_tg:
:param filename:
:param file_type:
:param file_md5:
:param file_size:
"""
timestamp = int(time.mktime(datetime.datetime.now().timetuple()))
self.cursor.execute(f"insert into '{self.table_name}' "
f"(download_date, filename, file_type, file_md5, fileid_tg, file_size,"
f" last_usage_date, usage_count)"
f"values (?, ?, ?, ?, ?, ?, ?, ?)",
(timestamp, filename, file_type, file_md5, fileid_tg, file_size, timestamp, 1))
self.conn.commit()
@staticmethod
def calculate_real_size():
"""
calculate real size
real size is the size of the directory
:return: size in bytes
"""
real_size = 0
for root, dirs, files in os.walk(CQ_IMAGE_ROOT):
real_size += sum([os.path.getsize(os.path.join(root, name)) for name in files])
return real_size
def calculate_db_size(self):
"""
calculate db size
db size is the sum of size in db
:return: size in bytes
"""
self.cursor.execute(f"select sum(file_size) from {self.table_name}")
db_size = self.cursor.fetchall()[0][0]
return db_size
def purge_half(self):
"""
use LRU cache policy
:return:
"""
pass
def purge_all(self):
"""
remove old data and reconstruct db
:return:
"""
self.conn.close()
os.remove(self.db_name)
for i in os.listdir(CQ_IMAGE_ROOT):
path_file = os.path.join(CQ_IMAGE_ROOT, i)
if os.path.isfile(path_file):
os.remove(path_file)
self.conn = sqlite3.connect(self.db_name, check_same_thread=False)
self.cursor = self.conn.cursor()
self.cursor.execute(f"create table {self.table_name} (download_date int primary key,"
f"filename text, file_type text, file_md5 text, fileid_tg text, file_size int,"
f" last_usage_date int, usage_count int)")
self.cursor.execute(f"create unique index md5_index on {self.table_name}(file_md5);")
self.cursor.execute(f"create unique index fileid_index on {self.table_name}(fileid_tg);")
self.conn.commit()
def purge_one_time(self):
"""
purge one time usage file
:return: purged size
"""
purged_size = 0
self.cursor.execute(f"select download_date, filename, file_size from {self.table_name} where usage_count=1")
data = self.cursor.fetchall()
for entry in data:
self.cursor.execute(f"delete from {self.table_name} where download_date=?", (str(entry[0]),))
if os.path.exists(entry[1]):
os.remove(os.path.join(CQ_IMAGE_ROOT, entry[1]))
purged_size += entry[2]
self.conn.commit()
return purged_size
def sync_cache(self):
"""
sync cache status with db, this will remove file records from db
:return: size reduced
"""
size_reduced = 0
self.cursor.execute(f"select download_date, filename, file_size from {self.table_name}")
data = self.cursor.fetchall()
for entry in data:
if not os.path.exists(os.path.join(CQ_IMAGE_ROOT, entry[1])):
self.cursor.execute(f"delete from {self.table_name} where download_date=?", (str(entry[0]),))
size_reduced += entry[2]
self.conn.commit()
return size_reduced
def __del__(self):
self.conn.close()
| StarcoderdataPython |
3366727 | #!/usr/bin/python
from datetime import datetime
from datetime import timedelta
#every = 4.9578 # ms
every = 120
# returns the elapsed milliseconds since the start of the program
def millis():
dt = datetime.now() - start_time
ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
return ms
start_time = datetime.now()
pingTimer = 0
while True:
if millis() >= pingTimer:
pingTimer += every
print pingTimer | StarcoderdataPython |
1763179 | <filename>pycuda/sparse/inner.py
from __future__ import division
from __future__ import absolute_import
import pycuda.driver as drv
import pycuda.gpuarray as gpuarray
import atexit
STREAM_POOL = []
def get_stream():
if STREAM_POOL:
return STREAM_POOL.pop()
else:
return drv.Stream()
class AsyncInnerProduct:
def __init__(self, a, b, pagelocked_allocator):
self.gpu_result = gpuarray.dot(a, b)
self.gpu_finished_evt = drv.Event()
self.gpu_finished_evt.record()
self.gpu_finished = False
self.pagelocked_allocator = pagelocked_allocator
def get_host_result(self):
if not self.gpu_finished:
if self.gpu_finished_evt.query():
self.gpu_finished = True
self.copy_stream = get_stream()
self.host_dest = self.pagelocked_allocator(
self.gpu_result.shape, self.gpu_result.dtype, self.copy_stream
)
drv.memcpy_dtoh_async(
self.host_dest, self.gpu_result.gpudata, self.copy_stream
)
self.copy_finished_evt = drv.Event()
self.copy_finished_evt.record()
else:
if self.copy_finished_evt.query():
STREAM_POOL.append(self.copy_stream)
return self.host_dest
def _at_exit():
STREAM_POOL[:] = []
atexit.register(_at_exit)
| StarcoderdataPython |
1743658 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Inline file contents in destination file.
Replaces special template syntax (e.g., `%filename.ext%`) in the destination file.
Usage: inline_file_content.py <dest-file> <src-file> [<src-file>...]
"""
import os
import re
import sys
def main():
dest_file_path = sys.argv[1]
src_paths = sys.argv[2:]
with open(dest_file_path, "r") as f:
dest_content = f.read()
template_name_to_content = {}
for src_path in src_paths:
with open(src_path, "r") as f:
content = f.read()
src_basename = os.path.basename(src_path)
if src_basename in template_name_to_content:
raise ValueError("Duplicate src basename: %s" % src_basename)
template_name_to_content[src_basename] = content
search_key = (
"%("
+ "|".join([re.escape(key) for key in template_name_to_content])
+ ")%"
)
def replace_key_with_content(match):
return template_name_to_content[match.group(1)]
inlined_content = re.sub(search_key, replace_key_with_content, dest_content)
sys.stdout.write(inlined_content)
if __name__ == "__main__":
main()
| StarcoderdataPython |
1677310 | from django.urls import path
from django.contrib.auth.decorators import login_required
from .views import HomePageView, ProfileView, ProfileEditView
urlpatterns = [
path('', HomePageView.as_view(), name='home'),
path('profile', login_required(ProfileView.as_view()), name='profile'),
path('profile/edit', login_required(ProfileEditView.as_view()), name='profile_edit'),
] | StarcoderdataPython |
1669822 | <gh_stars>0
# coding=utf-8
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
from dft.core.dftnode import dftNode
from dft.generated.dft_pb2_grpc import AdminAPIServicer
class AdminAPIService(AdminAPIServicer):
# TODO: Separate the Service from the node model
def __init__(self, dftnode: dftNode):
self.dftnode = dftnode
| StarcoderdataPython |
3399751 | <filename>main/gui/Frames/HomeFrame.py
from tkinter import Button, Frame
import os
from main.gui.Utilities.Settings import Settings
import subprocess
def launchVisualizationTool(path):
os.system("python " + path)
'''
The welcoming frame of the app
Inherits from Frame
Shows three buttons: create model, load model, and help
create - launch CreateModelFrame, walks user through creating a model
load - launch LoadModelFrame, allows a user to test model
help - launch TutorialFrame, show user how to use the application
'''
class HomeFrame(Frame):
'''
Creates the frame with three buttons
:param GUI, the parent GUI (stage) object used to launch new frames
'''
def __init__(self, GUI, parent):
# Construct super class
Frame.__init__(self, master=parent, bg=Settings.BACKGROUND_COLOR.value)
# Lines things up
self.pack()
# The 'Create Model' button, onClick launches the CreateModelFrame
createModelButton = Button(self, text="Create Model", height=8, width=32, font=Settings.REGULAR_FONT.value, bg=Settings.GOOD_BUTTON_COLOR.value, command=lambda :GUI.newModelCreationFrame())
createModelButton.pack(padx=10, pady=10)
createAdvModelButton = Button(self, text="Create Advanced Model", height=8, width=32, font=Settings.REGULAR_FONT.value, bg=Settings.GOOD_BUTTON_COLOR.value, command=lambda :GUI.newAdvModelCreationFrame())
createAdvModelButton.pack(padx=10, pady=10)
# The 'Load Model' button onClick launches the LoadModelFrame
loadModelButton = Button(self, text="Load Model", height=8, width=32, font=Settings.REGULAR_FONT.value, bg=Settings.GOOD_BUTTON_COLOR.value, command=lambda :GUI.newLoadModelFrame())
loadModelButton.pack(padx=10, pady=10)
# The 'Visualization Tool' button onClick launches defined visualization software
visualizationButton = Button(self, text="Visualization Tool", height=8, width=32, font=Settings.REGULAR_FONT.value, bg=Settings.GOOD_BUTTON_COLOR.value, command= lambda : launchVisualizationTool(Settings.VISUALIZATION_TOOL.value))
visualizationButton.pack(padx=10, pady=10)
# The 'Help' button onClick launches the HelpFrame
helpButton = Button(self, text="Help", height=8, width=32, font=Settings.REGULAR_FONT.value, bg=Settings.GOOD_BUTTON_COLOR.value, command= lambda : subprocess.Popen(["../main/Resources/Help/Help.pdf"],shell=True))
helpButton.pack(padx=10, pady=10) | StarcoderdataPython |
3272712 | # -*- coding: utf-8 -*-
from requests import Response
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.response import HTTPResponse
from requests.packages.urllib3.poolmanager import PoolManager
class MiddlewareHTTPAdapter(HTTPAdapter):
"""An HTTPAdapter onto which :class:`BaseMiddleware <BaseMiddleware>`
can be registered. Middleware methods are called in the order of
registration. Note: contrib that expose actions called during adapter
initialization must be passed to `__init__` rather than `register`, else
those actions will not take effect.
:param list middlewares: List of :class:`BaseMiddleware <BaseMiddleware>`
objects
"""
def __init__(self, middlewares=None, *args, **kwargs):
self.middlewares = middlewares or []
super(MiddlewareHTTPAdapter, self).__init__(*args, **kwargs)
def register(self, middleware):
"""Add a middleware to the middleware stack.
:param BaseMiddleware middleware: The middleware object
"""
self.middlewares.append(middleware)
def init_poolmanager(self, connections, maxsize, block=False):
"""Assemble keyword arguments to be passed to `PoolManager`.
Middlewares are called in reverse order, so if multiple middlewares
define conflicting arguments, the higher-priority middleware will take
precedence. Note: Arguments are passed directly to `PoolManager` and
not to the superclass `init_poolmanager` because the superclass method
does not currently accept **kwargs.
"""
kwargs = {}
for middleware in self.middlewares[::-1]:
value = middleware.before_init_poolmanager(
connections, maxsize, block
)
kwargs.update(value or {})
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(
num_pools=connections, maxsize=maxsize, block=block,
**kwargs
)
def send(self, request, *args, **kwargs):
"""Send the request. If any middleware in the stack returns a `Response`
or `HTTPResponse` value from its `before_send` method, short-circuit;
else delegate to `HTTPAdapter::send`.
:param request: The :class:`PreparedRequest <PreparedRequest>`
being sent.
:returns: The :class:`Response <Response>` object.
"""
for middleware in self.middlewares:
value = middleware.before_send(request, **kwargs)
if isinstance(value, Response):
return value
if isinstance(value, HTTPResponse):
return self.build_response(request, value)
if value:
raise ValueError('Middleware "before_send" methods must return '
'`Response`, `HTTPResponse`, or `None`')
return super(MiddlewareHTTPAdapter, self).send(
request, *args, **kwargs
)
def build_response(self, req, resp):
"""Build the response. Call `HTTPAdapter::build_response`, then pass
the response object to the `after_build_response` method of each
middleware in the stack, in reverse order.
:param req: The :class:`PreparedRequest <PreparedRequest>` used to
generate the response.
:param resp: The urllib3 response object.
:returns: The :class:`Response <Response>` object.
"""
for middleware in reversed(self.middlewares):
req, resp = middleware.before_build_response(req, resp)
response = super(MiddlewareHTTPAdapter, self).build_response(req, resp)
for middleware in reversed(self.middlewares):
response = middleware.after_build_response(req, resp, response)
return response
class BaseMiddleware(object):
def before_init_poolmanager(self, connections, maxsize, block=False):
"""Called before `HTTPAdapter::init_poolmanager`. Optionally return a
dictionary of keyword arguments to `PoolManager`.
:returns: `dict` of keyword arguments or `None`
"""
pass
def before_send(self, request, *args, **kwargs):
"""Called before `HTTPAdapter::send`. If a truthy value is returned,
:class:`MiddlewareHTTPAdapter <MiddlewareHTTPAdapter>` will short-
circuit the remaining middlewares and `HTTPAdapter::send`, using the
returned value instead.
:param request: The `PreparedRequest` used to generate the response.
:returns: The `Response` object or `None`.
"""
pass
def before_build_response(self, req, resp):
"""Called before `HTTPAdapter::build_response`. Optionally modify the
returned `PreparedRequest` and `HTTPResponse` objects.
:param req: The `PreparedRequest` used to generate the response.
:param resp: The urllib3 response object.
:returns: Tuple of potentially modified (req, resp)
"""
return req, resp
def after_build_response(self, req, resp, response):
"""Called after `HTTPAdapter::build_response`. Optionally modify the
returned `Response` object.
:param req: The `PreparedRequest` used to generate the response.
:param resp: The urllib3 response object.
:param response: The `Response` object.
:returns: The potentially modified `Response` object.
"""
return response
| StarcoderdataPython |
3265110 | from .path_manager import PathManager
from .path_handler import PathHandler, NativePathHandler
from .http_path_handler import HTTPURLHandler
from .redirect_path_handler import RedirectPathHandler
from .utils import file_lock
__all__ = ["PathManager", "PathHandler", "NativePathHandler", "HTTPURLHandler", "RedirectPathHandler", "file_lock"]
PathManager.register(HTTPURLHandler()) | StarcoderdataPython |
3205916 | """Setup for the texoopy package."""
import setuptools
with open('README.md') as f:
README = f.read()
setuptools.setup(
author="",
author_email="",
name='texoopy',
license='',
description='TeXooPy (texoopy) is a Python module that tackles the handling of TeXoo style JSON data.',
version='v0.2.1',
long_description=README,
url='https://github.com/DATEXIS/TeXooPy',
packages=setuptools.find_packages(),
python_requires=">=3.5",
install_requires=[''],
classifiers=[
# Trove classifiers
# (https://pypi.python.org/pypi?%3Aaction=list_classifiers)
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Intended Audience :: Developers',
],
) | StarcoderdataPython |
1693316 | <reponame>mome0320/EZ-Bot
import discord
from discord.ext import commands
class Userinfo(commands.Cog):
def __init__(self, client):
self.client = client
# Commands
@commands.command()
async def 유저정보(self, ctx):
if (ctx.message.mentions.__len__() > 0):
for user in ctx.message.mentions:
embed = discord.Embed(title="**" + user.name + "**님의 정보", description="",
color=0xffffff)
embed.add_field(name="**ID**",
value=user.id,
inline=True)
embed.add_field(name="**Nickname**",
value=user.display_name,
inline=True)
embed.add_field(name="**Status**",
value=user.status,
inline=True)
embed.add_field(name="**Mention**",
value="<@" + str(user.id) + ">",
inline=True)
embed.set_thumbnail(url=user.avatar_url)
embed.set_footer(text="Offered by NACL - Shio", icon_url="https://raw.githubusercontent.com/Shio7/EZ-Bot/master/images/Shio7.png")
await ctx.send(embed=embed)
else:
embed = discord.Embed(title=ctx.author.name + "님의 정보", description="",
color=0xffffff)
embed.add_field(name="**ID**",
value=ctx.author.id,
inline=True)
embed.add_field(name="**Nickname**",
value=ctx.author.display_name,
inline=True)
embed.add_field(name="**Status**",
value=ctx.author.status,
inline=True)
embed.add_field(name="**Mention**",
value="<@" + str(ctx.author.id) + ">",
inline=True)
embed.set_thumbnail(url=ctx.author.avatar_url)
embed.set_footer(text="Offered by NACL - Shio", icon_url="https://raw.githubusercontent.com/Shio7/EZ-Bot/master/images/Shio7.png")
await ctx.send(embed=embed)
def setup(client):
client.add_cog(Userinfo(client))
| StarcoderdataPython |
1644953 | <filename>brFinance/scraper/cvm/search.py
import re
import time
from abc import ABC, abstractmethod
from datetime import datetime
from typing import Tuple, Any
import lxml.html as LH
import pandas as pd
from selenium import webdriver
from brFinance.utils.browser import Browser
class Search(ABC):
"""
Perform webscraping on the page https://www.rad.cvm.gov.br/ENET/frmConsultaExternaCVM.aspx
"""
DELAY: int = 1
cvm_code_df: pd.DataFrame = None
driver: webdriver = None
def check_cvm_code_exists(self, cod_cvm: int) -> bool:
"""Check if CVM code exists
Parameters
----------
cod_cvm : int
CVM company code
Returns
-------
bool
True if cvm code exist, otherwise False
"""
cvm_codes_available = self.get_cvm_codes()
cvm_code_exists = str(cod_cvm) in [str(cod_cvm_aux) for cod_cvm_aux in cvm_codes_available['codCVM'].values]
return cvm_code_exists
def _instantiate_driver(self) -> webdriver:
"""Returns a driver object
Returns
-------
selenium.webdriver
webdriver created for searching
"""
if self.driver is None: return Browser.run_chromedriver()
return self.driver
def _fetch_data(self, cvm_code: int, category: int, initial_date: str, final_date: str) -> Tuple[pd.DataFrame, Any]:
"""Returns dataframe and html document from search
Parameters
----------
initial_date : str
Initial date for search
final_date : str
Final date for search
Returns
-------
pandas.Dataframe
Dataframe containing search results
lxml object
Object containing html data from search
"""
driver = self._instantiate_driver()
driver.get(f"https://www.rad.cvm.gov.br/ENET/frmConsultaExternaCVM.aspx?codigoCVM={cvm_code}")
# Wait until page is loaded and click Period button
while True:
try:
period_button_xpath = "//html/body/form[1]/div[3]/div/fieldset/div[4]/div[1]/label[4]"
#driver.find_element_by_xpath(period_button_xpath).click()
driver.find_element_by_id("rdPeriodo").click()
break
except Exception:
print("[LOG]: Waiting for period button")
time.sleep(1)
# Wait until page is loaded and send keys for initial date
while True:
try:
period_init_id = "txtDataIni"
driver.find_element_by_id(period_init_id).send_keys(initial_date)
break
except Exception:
print("[LOG]: Waiting for initial date input")
time.sleep(1)
# Wait until page is loaded and send keys for end date
while True:
try:
period_end_id = "txtDataFim"
driver.find_element_by_id(period_end_id).send_keys(final_date)
break
except Exception:
print("[LOG]: Waiting for final date input")
time.sleep(1)
# Wait until page is loaded and click Categories button
while True:
try:
category_button_id = 'cboCategorias_chosen'
driver.find_element_by_id(category_button_id).click()
break
except Exception:
print("[LOG]: Waiting for Categories button")
time.sleep(1)
# Wait until page is loaded and select category from user input
while True:
try:
category_selection_xpath = f"//html/body/form[1]/div[3]/div/fieldset/div[""5]/div[1]/div/div/ul/li[" \
f"@data-option-array-index='{category}']"
driver.find_element_by_xpath(category_selection_xpath).click()
break
except Exception:
print("[LOG]: Waiting for category dropdown menu")
time.sleep(1)
# Wait until page is loaded and click on Consult button
while True:
try:
consult_button_id = "btnConsulta"
driver.find_element_by_id(consult_button_id).click()
break
except Exception:
print("[LOG]: Waiting for consult button")
time.sleep(1)
# Wait html table load the results (grdDocumentos)
while True:
try:
table_html = str(driver.find_element_by_id('grdDocumentos').get_attribute("outerHTML"))
if ("DFP - Demonstrações Financeiras Padronizadas" in table_html) or \
("ITR - Informações Trimestrais" in table_html):
break
except Exception:
print("[LOG]: Waiting for results")
time.sleep(1)
table = LH.fromstring(table_html)
df = pd.read_html(table_html)[0]
if self.driver is None: driver.quit()
return df, table
def _clean_data(self, cvm_code: int, df_enet_search_result: pd.DataFrame, table: Any) -> pd.DataFrame:
"""
Perform data cleaning and add link to view or download reports documents
Parameters
----------
cvm_code : int
cvm_code
df_enet_search_result : DataFrame
ENET Search dataframe result
table : HTML string with ENET search table result containing links to download and view the reports.
Returns
-------
pandas.Dataframe
Dataframe containing search cleaned results
"""
# Cleaning data for CVM code and reference date
df_enet_search_result["Código CVM"] = cvm_code
df_enet_search_result['Data Referência'] = df_enet_search_result['Data Referência'].str.split(' ', 1).str[1]
df_enet_search_result['Data Referência'] = pd.to_datetime(df_enet_search_result["Data Referência"], format="%d/%m/%Y", errors="coerce")
# Creating a collumn for document visualization link
link_view = []
for expression in table.xpath("//tr/td/i[1]/@onclick"):
link_view.append("https://www.rad.cvm.gov.br/ENET/" + re.findall("(?<=\')(.*?)(?=\')", expression)[0])
df_enet_search_result["linkView"] = link_view
# Creating a collumn for document download link
link_download = []
for expression in table.xpath("//tr/td/i[2]/@onclick"):
try:
data = expression.split(",")
if "OpenDownloadDocumentos" in data:
sequencia, versao, protocolo, tipo = [re.findall("(?<=\')(.*?)(?=\')", d)[0] for d in data]
link_download.append(f"https://www.rad.cvm.gov.br/ENET/frmDownloadDocumento.aspx?Tela=ext&"
f"numSequencia={sequencia}&"
f"numVersao={versao}&"
f"numProtocolo={protocolo}&"
f"descTipo={tipo}&"
f"CodigoInstituicao=1")
else:
link_download.append(None)
except IndexError:
link_download.append(None)
df_enet_search_result["linkDownload"] = link_download
# Filtering for documents which Status is Active
df_enet_search_result = df_enet_search_result.drop(df_enet_search_result[df_enet_search_result["Status"] != "Ativo"].index)
# Deleting Actions column
del df_enet_search_result["Ações"]
return df_enet_search_result
def get_cvm_codes(self) -> pd.DataFrame:
"""Returns a dataframe of all CVM codes and Company names
Returns
-------
pandas.Dataframe
Dataframe of all CVM codes and company names
"""
if Search.cvm_code_df is not None: return Search.cvm_code_df
driver = self._instantiate_driver()
driver.get("https://www.rad.cvm.gov.br/ENET/frmConsultaExternaCVM.aspx")
# Wait until page is loaded and get all companies data
while True:
try:
companies_result_id = "hdnEmpresas"
html_data = driver.find_element_by_id(companies_result_id).get_attribute("value")
if len(html_data) == 0:
continue
break
except Exception:
print("[LOG]: Waiting CVM codes")
time.sleep(1)
# Selecting company name and CVM code
list_cod_cvm = re.findall(r"(?<=_)(.*?)(?=\')", html_data)
list_nome_emp = re.findall(r"(?<=-)(.*?)(?=\')", html_data)
# Adding selected information to a Dataframe
df = pd.DataFrame(list(zip(list_cod_cvm, list_nome_emp)), columns=['codCVM', 'nome_empresa'])
df['codCVM'] = pd.to_numeric(df['codCVM'])
Search.cvm_code_df = df
if self.driver is None: driver.quit()
return Search.cvm_code_df
@abstractmethod
def search(self, cvm_code: int, initial_date: str, final_date: str) -> pd.DataFrame:
"""
Returns dataframe of search results including cod_cvm, report's url, etc.
Parameters
----------
cvm_code : int
CVM company code
initial_date: str
Ex: 01012010 for 01/01/2010
final_date: str
Ex 30072021 for 30/07/2021
Returns
-------
pandas.Dataframe
Dataframe of search results
"""
pass
class SearchDFP(Search):
"""
Perform webscraping on the page https://www.rad.cvm.gov.br/ENET/frmConsultaExternaCVM.aspx for category
"Demonstração Financeira Padronizada"
"""
def __init__(self, driver: webdriver = None):
"""
Parameters
----------
driver : webdriver
Optional parameter for webdriver created by user
"""
self.driver = driver
self.category = 21
def search(self,
cvm_code: int,
initial_date: str = '01012010',
final_date: str = datetime.today().strftime('%d%m%Y')) -> pd.DataFrame:
assert self.check_cvm_code_exists(cvm_code), "CVM code not found"
df, table = self._fetch_data(cvm_code, self.category, initial_date, final_date)
df = self._clean_data(cvm_code, df, table)
return df
class SearchITR(Search):
"""
Perform webscraping on the page https://www.rad.cvm.gov.br/ENET/frmConsultaExternaCVM.aspx for category
"Informações Trimestrais"
"""
def __init__(self, driver: webdriver = None):
"""
Parameters
----------
driver : webdriver
Optional parameter for webdriver created by user
"""
self.driver = driver
self.category = 39
def search(self,
cvm_code: int,
initial_date: str = '01012010',
final_date: str = datetime.today().strftime('%d%m%Y')) -> pd.DataFrame:
assert self.check_cvm_code_exists(cvm_code), "CVM code not found"
df, table = self._fetch_data(cvm_code, self.category, initial_date, final_date)
df = self._clean_data(cvm_code, df, table)
return df
| StarcoderdataPython |
1713253 | """
Copyright 2013 <NAME>
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
from cvxpy.atoms.affine.affine_atom import AffAtom
import cvxpy.utilities as u
import cvxpy.interface as intf
import cvxpy.lin_ops.lin_utils as lu
import numpy as np
class reshape(AffAtom):
""" Reshapes the expression.
Vectorizes the expression then unvectorizes it into the new shape.
The entries are stored in column-major order.
"""
def __init__(self, expr, rows, cols):
self.rows = rows
self.cols = cols
super(reshape, self).__init__(expr)
@AffAtom.numpy_numeric
def numeric(self, values):
"""Reshape the value.
"""
return np.reshape(values[0], (self.rows, self.cols), "F")
def validate_arguments(self):
"""Checks that the new shape has the same number of entries as the old.
"""
old_len = self.args[0].size[0]*self.args[0].size[1]
new_len = self.rows*self.cols
if not old_len == new_len:
raise ValueError(
"Invalid reshape dimensions (%i, %i)." % (self.rows, self.cols)
)
def shape_from_args(self):
"""Returns the shape from the rows, cols arguments.
"""
return u.Shape(self.rows, self.cols)
@staticmethod
def graph_implementation(arg_objs, size, data=None):
"""Convolve two vectors.
Parameters
----------
arg_objs : list
LinExpr for each argument.
size : tuple
The size of the resulting expression.
data :
Additional data required by the atom.
Returns
-------
tuple
(LinOp for objective, list of constraints)
"""
return (lu.reshape(arg_objs[0], size), [])
| StarcoderdataPython |
3334401 | # -*- coding: utf-8 -*-
import wx
class MyFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, -1, "Menu01 Demo")
wx.Panel(self)
menu_bar = wx.MenuBar() # 创建菜单栏,不需要任何参数
self.menu1 = wx.Menu() # 创建菜单
self.str1 = self.menu1.Append(-1, "aaa") # 在菜单中创建菜单项
self.str2 = self.menu1.Append(-1, "bbb") # Append方法的返回值是一个MenuItem实例
self.str3 = self.menu1.Append(id=-1, # id 用于标识独一无二的一个菜单项
item="Count",
helpString="Get menu item count")
self.Bind(wx.EVT_MENU, self.MenuItemCount1, self.str3) # 菜单项绑定事件
menu_bar.Append(self.menu1, "String") # 菜单附加到菜单栏
self.menu2 = wx.Menu()
items_count = self.menu2.Append(-1, "GetMenuItemCount")
self.Bind(wx.EVT_MENU, self.MenuItemCount2, items_count)
menu_bar.Append(self.menu2, "Test")
self.SetMenuBar(menu_bar) # 设置窗口的菜单栏
def MenuItemCount1(self, event):
print("# The MenuItemCount is : ", self.menu1.GetMenuItemCount())
def MenuItemCount2(self, event):
print("# The MenuItemCount is : ", self.menu2.GetMenuItemCount())
if __name__ == "__main__":
app = wx.App()
frame = MyFrame()
frame.Show()
app.MainLoop()
# ### 菜单
# 分类:菜单栏菜单,也就是常规菜单;弹出式菜单(右键菜单/上下文菜单),点击鼠标右键出现的菜单;
# 菜单包含菜单项,菜单项可以包含子菜单;
# 主要涉及三个类:
# - wx.MenuBar :用于处理菜单栏
# - wx.Menu :用于管理一个下拉菜单或弹出菜单
# - wx.MenuItem :用于管理具体的菜单项
#
# ### 菜单栏(wx.MenuBar类)
# https://docs.wxpython.org/wx.MenuBar.html
# 主要方法:
# - Append(menu, title) :向菜单栏的尾部添加一个菜单
# - Insert(pos, Menu, title) :向菜单栏的指定位置(从0开始的索引)添加一个菜单
# - Remove(pos) :删除索引为pos的菜单
# - GetMenuCount() :返回当前菜单栏的菜单个数
# - Replace(pos, Menu, title) :使用相应的菜单去替换指定位置的菜单
# - GetMenu(pos) :返回指定位置的菜单对象
# - GetLabelTop(pos) :返回指定位置的菜单标签
# - SetLabelTop(pos, label) :设置指定位置的菜单标签
# - FindMenu(title) :返回菜单栏中指定名称菜单的索引
# - EnableTop(pos, enable) :设置指定位置的菜单是否可用
#
# ### 菜单(wx.Menu类)
# https://docs.wxpython.org/wx.Menu.html
# 主要方法:
# - Append() :向菜单添加一个菜单项
# - Insert() :向菜单的指定位置添加一个菜单项
# - AppendSeparator(pos) :向菜单的指定位置添加一个分割线
# - InsertSeparator(pos) :向菜单的指定位置插入一个分割线
# - Remove(id) :删除指定ID的菜单项
# - GetMenuItemCount() :返回菜单中的菜单项个数
# - GetMenuItems() : 返回菜单中项目的一个列表
#
# ### 菜单项(wx.MenuItem类)
# https://docs.wxpython.org/wx.MenuItem.html
#
# ### Enable()方法与IsEnabled()方法
# Enable()方法:设置菜单栏、菜单、菜单项是否可用;
# - 菜单栏:Enable(self, id, enable)
# - 菜单:Enable(self, id, enable)
# - 菜单项:Enable(self, enable=True)
# IsEnabled()方法:查看是否可用;
# - 菜单栏:IsEnable(id)
# - 菜单:IsEnable(id)
# - 菜单项:IsEnable()
# 另外,wx.MenuBar类的EnableTop(pos, enable)方法可以设置整个顶级菜单有效或无效;
| StarcoderdataPython |
50358 | """Random Forest classification and computation of assessment metrics."""
import numpy as np
from imblearn.over_sampling import RandomOverSampler
from imblearn.under_sampling import RandomUnderSampler
from sklearn import metrics
from sklearn.ensemble import RandomForestClassifier
from raster import is_raster
def transform_input(scene):
"""Transform input variables (here Landsat NDSV).
Parameters
----------
scene : landsat.Scene
Input Landsat scene.
Returns
-------
X : array
Transformed input data as an array of shape (n_samples, n_features).
"""
n_features = len(scene.ndsv_)
n_samples = scene.profile['width'] * scene.profile['height']
X = np.zeros(shape=(n_samples, n_features), dtype=np.float)
ndsv = scene.ndsv
for i in range(n_features):
X[:, i] = ndsv[i, :, :].ravel()
return X
def transform_test(true, pred):
"""Transform true and predicted raster data sets to
flat arrays.
Parameters
----------
true : array-like
Testing data set raster as a 2D NumPy array.
pred : array-like
Predicted values as a 2D NumPy array.
Returns
-------
y_true : array
1D array of true labels of shape (n_samples).
y_pred : array
1D array of predicted labels of shape (n_samples).
"""
y_pred = pred[true > 0].ravel()
y_true = true[true > 0].ravel()
return y_true, y_pred
def transform_training(scene, training):
"""Transform training data set.
Parameters
----------
scene : landsat.Scene
Input Landsat scene.
training : 2D numpy array
Training data raster as a 2D numpy array.
Returns
-------
X : array
Training samples as an array of shape (n_samples, n_features).
y : array
Training labels as an array of shape (n_samples).
"""
n_features = len(scene.ndsv_)
n_samples = np.count_nonzero(training)
X = np.zeros(shape=(n_samples, n_features), dtype=np.float)
ndsv = scene.ndsv
for i in range(n_features):
X[:, i] = ndsv[i, :, :][training > 0].ravel()
y = training[training > 0].ravel()
return X, y
def classify(
scene,
training,
oversampling=False,
undersampling=False,
water=None,
**kwargs):
"""Classify Landsat scene using Random Forest.
Parameters
----------
scene : landsat.Scene
Input Landsat scene.
training : 2D numpy array
Input training data set as a 2D numpy array.
oversampling : bool, optional
If set to `True`, random oversampling will be performed on the
minority class.
undersampling : bool, optional
If set to `True`, random undersampling will be performed on the
majority class.
water : 2D numpy array, optional
If provided, water pixels will be ignored and classified as
non-built.
kwargs : **kwargs
Additionnal arguments to the Random Forest classifier.
Returns
-------
classes : 2D numpy array
Binary output as a 2D numpy array.
probabilities : 2D numpy array
Probabilistic output as a 2D numpy array.
"""
X = transform_input(scene)
x_train, y_train = transform_training(scene, training)
random_state = kwargs.pop('random_state', None)
if oversampling:
ros = RandomOverSampler(random_state=random_state)
x_train, y_train = ros.fit_sample(x_train, y_train)
if undersampling:
ros = RandomUnderSampler(random_state=random_state)
x_train, y_train = ros.fit_sample(x_train, y_train)
rf = RandomForestClassifier(**kwargs)
rf.fit(x_train, y_train)
probabilities = rf.predict_proba(X)
probabilities = probabilities[:, 0].reshape(scene.red.shape)
if is_raster(water):
probabilities[water] = 0
return probabilities
def assess(probabilities, testing_dataset, threshold=0.75):
"""Compute validation metrics.
Parameters
----------
probabilities : 2D numpy array
Predicted probabilities of belonging to
the built-up class as a 2D NumPy array.
testing_dataset : 2D numpy array
Testing data set as as 2D NumPy array.
threshold : float
Threshold applied to the probabilistic output
to obtain a binary product (0-1).
Returns
-------
summary : dict
Assessment metrics in a dictionnary.
"""
summary = {}
# Binary product obtained by thresholding the probabilities
classes = np.zeros(shape=probabilities.shape, dtype=np.uint8)
classes[probabilities >= threshold] = 1
classes[probabilities < threshold] = 2
# 1. Binary classification metrics:
# Assign value 2 to all non-built land covers
true, pred = testing_dataset.copy(), classes.copy()
true[true >= 2] = 2
pred[pred >= 2] = 2
# Transform and binarize input data
y_true, y_pred = transform_test(true, pred)
y_true, y_pred = y_true == 1, y_pred == 1
summary['accuracy'] = metrics.accuracy_score(
y_true, y_pred
)
summary['balanced_accuracy'] = metrics.recall_score(
y_true, y_pred
)
summary['precision'] = metrics.precision_score(
y_true, y_pred
)
summary['recall'] = metrics.recall_score(
y_true, y_pred
)
summary['f1_score'] = metrics.f1_score(
y_true, y_pred
)
summary['confusion_matrix'] = metrics.confusion_matrix(
y_true, y_pred
)
# 2. Continuous metrics based on probabilities:
# Assign value 2 to all non-built land covers
true = testing_dataset.copy()
true[true >= 2] = 2
# Transform and binarize input data
y_true, y_pred = transform_test(true, probabilities)
y_true = y_true == 1
summary['pr_curve'] = metrics.precision_recall_curve(
y_true, y_pred
)
summary['avg_precision'] = metrics.average_precision_score(
y_true, y_pred, average='weighted'
)
# 3. Per land cover accuracies
land_covers = {
'builtup': 1,
'baresoil': 2,
'lowveg': 3,
'highveg': 4
}
for label, value in land_covers.items():
mask = testing_dataset == value
true = testing_dataset[mask]
pred = classes[mask]
total = np.count_nonzero(mask)
if label == 'builtup':
accuracy = np.count_nonzero(pred == 1) / total
else:
accuracy = np.count_nonzero(pred >= 2) / total
summary['{}_accuracy'.format(label)] = accuracy
return summary
| StarcoderdataPython |
1788026 | <gh_stars>0
#-*-coding: utf-8-*-
from pyautogui import screenshot
from os import chdir, mkdir
from datetime import date
from time import sleep
if __name__ == '__main__':
while True:
try:
chdir("C:\\{}".format(date.today()))
except:
mkdir("C:\\{}".format(date.today()))
else:
break
count = 1
while True:
ss = screenshot()
ss.save("{}-{}.png".format(date.today(),count))
count += 1
sleep(5)
| StarcoderdataPython |
3215578 | <reponame>srihari-nagaraj/anuvaad
from anuvaad_auditor.loghandler import log_info
from anuvaad_auditor.loghandler import log_exception
from anuvaad_auditor.loghandler import log_debug
from collections import namedtuple
from src.utilities.region_operations import collate_regions, get_polygon,sort_regions, remvoe_regions
from src.services.segment import horzontal_merging
import src.utilities.app_context as app_context
import copy
Rectangle = namedtuple('Rectangle', 'xmin ymin xmax ymax')
class MapKeys:
def __init__(self):
self.left = None
self.right = None
self.top = None
self.bottom = None
def get_left(self,box):
left = int(box['boundingBox']['vertices'][0]['x'])
return left
def get_right(self,box):
right = int(box['boundingBox']['vertices'][1]['x'])
return right
def get_top(self,box):
top = int(box['boundingBox']['vertices'][0]['y'])
return top
def get_bottom(self,box):
bottom = int(box['boundingBox']['vertices'][3]['y'])
return bottom
def get_height(self,box):
height = int(abs(self.get_top(box) - self.get_bottom(box)))
return height
def get_width(self,box):
width = int(abs(self.get_left(box) - self.get_right(box)))
return width
keys = MapKeys()
class Page_Config:
def avg_region_info(self,page):
try:
total_region = 0; avg_hor_dist = 0
for idx, region in enumerate(page):
if idx+1<len(page):
for idx2, region2 in enumerate(page[idx+1:]):
if keys.get_right(region)<keys.get_left(region2):
hor_dis = abs(keys.get_right(region) - keys.get_left(region2))
avg_hor_dist = avg_hor_dist + hor_dis
total_region = total_region +1
if keys.get_right(region2)<keys.get_left(region):
hor_dis = abs(keys.get_right(region2) - keys.get_left(region))
avg_hor_dist = avg_hor_dist + hor_dis
total_region = total_region +1
avg_hor_dist = avg_hor_dist / total_region
except:
pass
return avg_hor_dist
def avg_line_info(self,page):
try:
avg_height = 0; total_line = 0
avg_ver_dist = 0; avg_width = 0
ver_dist_mes_count = 0
for region in page:
if region['children'] !=None:
total_line = total_line+len(region['children'])
for idx, line in enumerate(region['children']):
height = keys.get_height(line)
avg_height = avg_height + height
avg_width = avg_width+ keys.get_width(line)
current_line_top = keys.get_top(line)
if idx<len(region['children'])-1:
next_line_top = keys.get_top(region['children'][idx+1])
max_height = max( keys.get_height(region['children'][idx+1]) ,keys.get_height(region['children'][idx]))
ver_dis = abs(next_line_top-current_line_top)
if ver_dis > max_height * 0.5 :
avg_ver_dist = avg_ver_dist + ver_dis
ver_dist_mes_count +=1
avg_height = avg_height / total_line
avg_width = avg_width / total_line
if ver_dist_mes_count > 0 :
avg_ver_dist = avg_ver_dist / ver_dist_mes_count
else:
avg_ver_dist = avg_height
except:
pass
return avg_height, avg_ver_dist, avg_width
def avg_word_sep(self, page):
try:
avg_height = 0
total_words = 0
avg_spacing = 0
avg_width = 0
for line in page:
if line['children'] != None:
total_words = total_words + len(line['children'])
for idx, word in enumerate(line['children']):
if idx < len(line['children']) - 1:
#print(len(line['children']))
next_line_left = keys.get_left(line['children'][idx + 1])
current_line_right = keys.get_right(line['children'][idx])
spacing = abs(next_line_left - current_line_right)
avg_spacing = avg_spacing + spacing
avg_spacing = avg_spacing / (total_words - len(page))
except:
pass
return avg_spacing
class Region_Unifier:
def check_horizon_region(self,box1,box2):
if keys.get_right(box1)<keys.get_left(box2):
return True
if keys.get_right(box2)<keys.get_left(box1):
return True
else:
return False
def get_text_tabel_region(self,regions):
text_region = []
tabel_region = []
image_region = []
n_text_table_regions = []
for region in regions:
if region['class'] in ['TEXT', "HEADER",'FOOTER']:
text_region.append(region)
else:
if region['class']=='TABLE':
tabel_region.append(region)
else:
if region['class']=='IMAGE':
image_region.append(region)
else :
n_text_table_regions.append(region)
return text_region,n_text_table_regions,tabel_region,image_region
def check_double_column(self,boxes,avg_height):
total_regions = len(boxes)
count =0
regions = copy.deepcopy(boxes)
while len(regions)>2:
flag = False
reg1 = regions[0]
for idx,reg2 in enumerate(regions[1:]):
if self.check_horizon_region(reg1,reg2) and keys.get_height(reg1)>3*avg_height and keys.get_height(reg2)>3*avg_height :
flag = True
del regions[0]
break
if flag==True:
count=count+1
else:
del regions[0]
if count>0.3*total_regions:
return True
else:
return False
def merge_condition(self,reg1,reg2,avg_height, avg_ver_dist, avg_width,avg_word_sepc):
box1_top = keys.get_top(reg1); box1_bottom = keys.get_bottom(reg1)
box1_left = keys.get_left(reg1); box1_right = keys.get_right(reg1)
box2_top = keys.get_top(reg2); box2_bottom = keys.get_bottom(reg2)
box2_left = keys.get_left(reg2); box2_right = keys.get_right(reg2)
box1_lines = reg1["children"]; box2_lines = reg2["children"]
hor_diff_thresh = avg_word_sepc*2 ; line_width_diff = avg_width*0.1
#print(hor_diff_thresh,'')
#issue in order
if box1_left > box2_left :
sepration = abs(box1_left -box2_right)
else :
sepration = abs(box2_left -box1_right)
if box1_lines!= None and len(box1_lines)>0 and box2_lines!=None and len(box2_lines)>0:
box1_last_line = box1_lines[-1]; box2_first_line = box2_lines[0]
#Mergin lines which are detected as regions
# if (keys.get_height(reg1)<= avg_height*2 and keys.get_height(reg2)<= avg_height+2) \
# and sepration < hor_diff_thresh\
# and abs(box2_top-box1_bottom)< 3 * avg_ver_dist:
# return True
# ########### conditions based on merging two horizon regions which are lines and horizontal spaing is less than threshold
if self.check_horizon_region(reg1,reg2) \
and (keys.get_height(reg1)<= avg_height*2 and keys.get_height(reg2)<= avg_height*2) :
if (0<(keys.get_left(reg2)-keys.get_right(reg1))<hor_diff_thresh \
and abs(box2_top-box1_bottom)<avg_ver_dist) \
or (0<(keys.get_left(reg1)-keys.get_right(reg2))<hor_diff_thresh \
and abs(box2_top-box1_bottom)<avg_ver_dist):
return True
else:
return False
############
#based on box separation :
# if abs(keys.get_width(reg1)-keys.get_width(reg2))<line_width_diff\
# and abs(box2_top-box1_bottom)<avg_ver_dist *2 \
# and keys.get_right(box2_first_line)-keys.get_right(box1_last_line)< line_width_diff \
# and keys.get_left(box2_first_line)-keys.get_left(box1_last_line)< line_width_diff :
# return True
#IF a running paragraph is broken (1) :
if len(box1_lines) > 1 :
box_1_second_last_line = box1_lines[-2]
if (keys.get_left(box2_first_line)-keys.get_left(box1_last_line)< hor_diff_thresh)\
and (keys.get_right(box_1_second_last_line)-keys.get_right(box1_last_line)< hor_diff_thresh)\
and abs(box2_top-box1_bottom)<avg_ver_dist *2 :
return True
# IF a running paragraph is broken (2)
if keys.get_right(box2_first_line)-keys.get_right(box1_last_line) > hor_diff_thresh*0.5 :
return False
# based on box separation :
if abs(box2_top-box1_bottom)<avg_ver_dist *2 \
and keys.get_right(box2_first_line)-keys.get_right(box1_last_line)< hor_diff_thresh \
and keys.get_left(box2_first_line)-keys.get_left(box1_last_line)< hor_diff_thresh :
return True
# if abs(box2_top-box1_bottom)<avg_ver_dist and abs(box1_left-box2_left)<50 and abs(box1_right-box2_right)<50:
# return True
# if (abs(box1_bottom-box2_top)<avg_ver_dist*0.5 \
# and abs(box1_left-box2_left)<line_width_diff) \
# or (abs(box1_bottom-box2_top)<avg_ver_dist*0.5\
# and abs(box1_right-box2_right)<line_width_diff):
# return True
# else:
# return False
else:
return False
def check_region_unification(self,reg1,reg2,avg_height, avg_ver_dist, avg_width,avg_word_sepc):
box1_top = keys.get_top(reg1)
box2_top = keys.get_top(reg2)
if box1_top < box2_top:
return self.merge_condition(reg1,reg2,avg_height, avg_ver_dist, avg_width,avg_word_sepc)
if box1_top > box2_top:
return self.merge_condition(reg2,reg1,avg_height, avg_ver_dist, avg_width,avg_word_sepc)
def update_children(self,reg1,reg2):
page_config = Page_Config()
if reg1['children']!=None and len(reg1['children']) > 0 :
if reg2['children']!=None and len(reg2['children']) > 0 :
agg_children = reg1['children'] + reg2['children']
agg_children.sort(key=lambda x: x['boundingBox']['vertices'][0]['y'])
children = sort_regions(agg_children , [])
if len(children) > 1 :
avg__region_height, avg__region_ver_dist, avg__region_width = page_config.avg_line_info([{'children': children}])
avrage_region_ver_ratio = avg__region_ver_dist / max(1,avg__region_height)
return horzontal_merging(children, avrage_region_ver_ratio)
#v_list[idx] =v_block
else:
return children
else :
return reg1['children']
else :
if reg2['children']!=None and len(reg2['children']) > 0 :
return reg2['children']
else :
return []
def update_coord(self,reg1,reg2):
#try:
box1_top = keys.get_top(reg1); box1_bottom = keys.get_bottom(reg1)
box1_left = keys.get_left(reg1); box1_right = keys.get_right(reg1)
box2_top = keys.get_top(reg2); box2_bottom = keys.get_bottom(reg2)
box2_left = keys.get_left(reg2); box2_right = keys.get_right(reg2)
reg1['children'] = self.update_children(reg1, reg2)
reg1["boundingBox"]["vertices"][0]['x']= min(box1_left,box2_left)
reg1["boundingBox"]["vertices"][0]['y']= min(box1_top,box2_top)
reg1["boundingBox"]["vertices"][1]['x']= max(box1_right,box2_right)
reg1["boundingBox"]["vertices"][1]['y']= min(box1_top,box2_top)
reg1["boundingBox"]["vertices"][2]['x']= max(box1_right,box2_right)
reg1["boundingBox"]["vertices"][2]['y']= max(box1_bottom,box2_bottom)
reg1["boundingBox"]["vertices"][3]['x']= min(box1_left,box2_left)
reg1["boundingBox"]["vertices"][3]['y']= max(box1_bottom,box2_bottom)
#reg1['class'] = 'TEXT'
# except:
# pass
return reg1
def is_connected(self,region1, region2,avg_height, avg_ver_dist, avg_width,avg_word_sepc):
region_poly = get_polygon(region2['boundingBox'])
base_poly = get_polygon(region1['boundingBox'])
area=0
check=False
if region_poly and base_poly:
area = base_poly.intersection(region_poly).area
check = self.check_region_unification(region1,region2,avg_height, avg_ver_dist, avg_width,avg_word_sepc)
return area>0 or check
def merge_remove_overlap(self,text_regions,avg_height, avg_ver_dist, avg_width,avg_word_sepc):
region_updated = []
flag =False
while len(text_regions)>0:
check = False
region_temp= text_regions[1:]
for idx2,region2 in enumerate(region_temp):
if self.is_connected(text_regions[0], region2, avg_height, avg_ver_dist, avg_width,avg_word_sepc):
region1 = self.update_coord(text_regions[0],region2)
text_regions[0] = copy.deepcopy(region1)
check =True ; flag = True
del text_regions[idx2+1]
break
if check == False:
region_updated.append(copy.deepcopy(text_regions[0]))
del text_regions[0]
return region_updated, flag
#middle box merging kan_1_0
# secnod ponint (2.) mergingkam_1_1
# horizontal block merging kan_1_7
# b1_last_line b2_fisrst line right comparish kan_1_9
#kan_1_12
#kan_1_16 line merrgin issue , merging above 7.1
#kan_1_20 (check after h_mergng is fixed)
#kna_1_21
#kan_1_22 (check whti lef/ right diff ) horizontal_diff threahold
#kan_1_29 horzontal region mersing if regions are very colse (a seprate condition)
#kan_1_30
#yolo_1_1
#20695_1_0 (reportable)
#20695_1_2 (merging of list with numbering)
#20695_1_3 sorting issue
# segmeter kan_1_1
# background removal (sasta fix)
# basckground removal integration with prima
#segmenter kan_1_23
#36066_2008_3_1502_15489_Judgement_01-Aug-2019_ORI page [1,6] last line of paragraph
def region_unifier(self,page_words, page_lines,page_regions):
try:
text_region,n_text_table_regions,tabel_region,image_region = self.get_text_tabel_region(page_regions)
tabel_region = remvoe_regions(copy.deepcopy(image_region), copy.deepcopy(tabel_region))
filtered_words = remvoe_regions(copy.deepcopy(image_region), copy.deepcopy(page_words))
filtered_lines = remvoe_regions(copy.deepcopy(image_region), copy.deepcopy(page_lines))
for idx,table in enumerate(tabel_region):
filtered_words = remvoe_regions(copy.deepcopy(table['children']), copy.deepcopy(filtered_words))
filtered_lines = remvoe_regions(copy.deepcopy(table['children']), copy.deepcopy(filtered_lines))
tabel_region[idx]['children'] = collate_regions(copy.deepcopy( table['children']),copy.deepcopy(page_words),grand_children=False,region_flag = False)
page_words = filtered_words
page_lines = filtered_lines
text_region = remvoe_regions(copy.deepcopy(tabel_region) ,copy.deepcopy(text_region))
# filtered_words = remvoe_regions(copy.deepcopy(tabel_region), copy.deepcopy(page_words))
# filtered_lines = remvoe_regions(copy.deepcopy(tabel_region), copy.deepcopy(page_lines))
line_list = collate_regions(copy.deepcopy( filtered_lines), copy.deepcopy( filtered_words))
v_list = collate_regions( copy.deepcopy( text_region),copy.deepcopy( line_list ),grand_children=True )
#t_list = collate_regions(copy.deepcopy( tabel_region),copy.deepcopy(page_words),grand_children=True,region_flag = False)
t_list = tabel_region
i_list = collate_regions(copy.deepcopy( image_region),copy.deepcopy(page_words),grand_children=True,region_flag = False,skip_enpty_children=True)
for i in i_list :
if 'chiildren' in i.keys():
v_list.append(i)
# line_list = collate_regions(page_lines,page_words)
# v_list = collate_regions(page_regions,line_list,grand_children=True)
page_config = Page_Config()
# text_regions, n_text_regions = self.get_text_region(v_list)
avg_height, avg_ver_dist, avg_width = page_config.avg_line_info(v_list)
if avg_height == 0:
avg_height = 1
self.avg_ver_ratio = avg_ver_dist /avg_height
for idx,v_block in enumerate(v_list):
#if 'children' in v_block.keys()
if v_block['children'] != None and len(v_block['children']) > 1 :
#print(idx, 'region index')
#print('merging horrrrrrrrrrrrrrrrrrrr' , len(v_block['children']))
avg__region_height, avg__region_ver_dist, avg__region_width = page_config.avg_line_info([v_block])
v_block['avg_ver_dist'] = avg__region_ver_dist
avrage_region_ver_ratio= avg__region_ver_dist / max(1,avg__region_height)
#v_block['children'] = horzontal_merging(v_block['children'],avrage_region_ver_ratio)
v_list[idx] =v_block
for idx,t_block in enumerate(t_list):
if t_block['children'] != None and len(t_block['children']) > 1 :
#print(idx, 'region index')
#print('merging horrrrrrrrrrrrrrrrrrrr' , len(v_block['children']))
avg__region_height, avg__region_ver_dist, avg__region_width = page_config.avg_line_info([t_block])
t_block['avg_ver_dist'] = avg__region_ver_dist
avrage_region_ver_ratio= avg__region_ver_dist / max(1,avg__region_height)
#t_block['children'] = horzontal_merging(t_block['children'],avrage_region_ver_ratio)
t_list[idx] =t_block
################### page configs for region unifier
#avg_hor_dist = page_config.avg_region_info(text_regions)
avg_word_sepc = page_config.avg_word_sep(line_list)
# print("av height : ",avg_height)
# print("avg_ver_dist : ",avg_ver_dist)
# print("av avg_width : ",avg_width)
# print("avg_hor_dist", avg_hor_dist)
# print('avg word spacing', avg_word_sepc)
# print("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")
########################
n_text_table_regions.extend(t_list)
#n_text_table_regions.extend(image_region)
if self.check_double_column(v_list,avg_height):
print("this document is double columnssssssss")
return v_list, n_text_table_regions
# flag =True
flag = False
while flag==True:
v_list, flag = self.merge_remove_overlap(v_list,avg_height, avg_ver_dist, avg_width,avg_word_sepc)
except Exception as e:
log_exception("Error occured during block unifier", app_context.application_context, e)
return None ,None
return v_list, n_text_table_regions
| StarcoderdataPython |
3236514 | '''
Created on Mar 1, 2017
@author: PJ
'''
from Scouting2017.model.reusable_models import Competition, Team, Match
from Scouting2017.model.models2017 import ScoreResult
from django.core.urlresolvers import reverse
from django.http.response import HttpResponseRedirect
from django.views.generic.base import TemplateView
class BulkSubmitMatch(TemplateView):
def post(self, request, **kargs):
post = request.POST
regional_code = kargs["regional_code"]
num_rows = int(post['rowCounter'])
for i in range(num_rows):
comp = Competition.objects.get(code=regional_code)
team = Team.objects.get(teamNumber=int(post['teamNumber-%s' % (i + 1)]))
match = Match.objects.get(competition=comp, matchNumber=int(post['matchNumber-%s' % (i + 1)]))
rope_climbed = 'ropeClimbed-%s' % (i + 1) in post
auto_baseline = 'autoBaseline-%s' % (i + 1) in post
yellow_card = 'yellowCard-%s' % (i + 1) in post
red_card = 'redCard-%s' % (i + 1) in post
score_result = ScoreResult.objects.create(
competition=comp,
team=team,
match=match,
# Auto
auto_fuel_high_score=int(post['autoFuelHighMade-%s' % (i + 1)]),
auto_fuel_high_shots=int(post['autoFuelHighShot-%s' % (i + 1)]),
auto_fuel_low_score=int(post['autoFuelLowMade-%s' % (i + 1)]),
auto_fuel_low_shots=int(post['autoFuelLowShot-%s' % (i + 1)]),
auto_gears=int(post['autoGears-%s' % (i + 1)]),
auto_baseline=auto_baseline,
# Tele-op
tele_gears=int(post['teleGears-%s' % (i + 1)]),
tele_fuel_high_shots=int(post['teleFuelHighShots-%s' % (i + 1)]),
tele_fuel_high_score=int(post['teleFuelHighScore-%s' % (i + 1)]),
tele_fuel_low_shots=int(post['teleFuelLowShots-%s' % (i + 1)]),
tele_fuel_low_score=int(post['teleFuelLowScore-%s' % (i + 1)]),
# Endgame
rope=rope_climbed,
# General
hoppers_dumped=int(post['hoppersDumped-%s' % (i + 1)]),
match_comments=post['comments-%s' % (i + 1)],
# Fouls
tech_foul=int(post['techFouls-%s' % (i + 1)]),
foul=int(post['fouls-%s' % (i + 1)]),
yellow_card=yellow_card,
red_card=red_card,
)
score_result.save()
return HttpResponseRedirect(reverse('Scouting2017:index', args=(regional_code,)))
| StarcoderdataPython |
197913 | <filename>favicons/_constants.py<gh_stars>1-10
"""Static values for one way import."""
SUPPORTED_FORMATS = (".svg", ".jpeg", ".jpg", ".png", ".tiff", ".tif")
HTML_LINK = '<link rel="{rel}" type="{type}" href="{href}" />'
ICON_TYPES = (
{"image_fmt": "ico", "rel": None, "dimensions": (64, 64), "prefix": "favicon"},
{"image_fmt": "png", "rel": "icon", "dimensions": (16, 16), "prefix": "favicon"},
{"image_fmt": "png", "rel": "icon", "dimensions": (32, 32), "prefix": "favicon"},
{"image_fmt": "png", "rel": "icon", "dimensions": (64, 64), "prefix": "favicon"},
{"image_fmt": "png", "rel": "icon", "dimensions": (96, 96), "prefix": "favicon"},
{"image_fmt": "png", "rel": "icon", "dimensions": (180, 180), "prefix": "favicon"},
{
"image_fmt": "png",
"rel": "apple-touch-icon",
"dimensions": (57, 57),
"prefix": "apple-touch-icon",
},
{
"image_fmt": "png",
"rel": "apple-touch-icon",
"dimensions": (60, 60),
"prefix": "apple-touch-icon",
},
{
"image_fmt": "png",
"rel": "apple-touch-icon",
"dimensions": (72, 72),
"prefix": "apple-touch-icon",
},
{
"image_fmt": "png",
"rel": "apple-touch-icon",
"dimensions": (76, 76),
"prefix": "apple-touch-icon",
},
{
"image_fmt": "png",
"rel": "apple-touch-icon",
"dimensions": (114, 114),
"prefix": "apple-touch-icon",
},
{
"image_fmt": "png",
"rel": "apple-touch-icon",
"dimensions": (120, 120),
"prefix": "apple-touch-icon",
},
{
"image_fmt": "png",
"rel": "apple-touch-icon",
"dimensions": (144, 144),
"prefix": "apple-touch-icon",
},
{
"image_fmt": "png",
"rel": "apple-touch-icon",
"dimensions": (152, 152),
"prefix": "apple-touch-icon",
},
{
"image_fmt": "png",
"rel": "apple-touch-icon",
"dimensions": (167, 167),
"prefix": "apple-touch-icon",
},
{
"image_fmt": "png",
"rel": "apple-touch-icon",
"dimensions": (180, 180),
"prefix": "apple-touch-icon",
},
{"image_fmt": "png", "rel": None, "dimensions": (70, 70), "prefix": "mstile"},
{"image_fmt": "png", "rel": None, "dimensions": (270, 270), "prefix": "mstile"},
{"image_fmt": "png", "rel": None, "dimensions": (310, 310), "prefix": "mstile"},
{"image_fmt": "png", "rel": None, "dimensions": (310, 150), "prefix": "mstile"},
{"image_fmt": "png", "rel": "shortcut icon", "dimensions": (196, 196), "prefix": "favicon"},
)
| StarcoderdataPython |
1735709 | <gh_stars>1000+
import os
import sys
from datetime import datetime
from unittest import TestCase
import pytest
from six.moves import cStringIO as StringIO
from pyinfra.operations import server
from pyinfra_cli.exceptions import CliError
from pyinfra_cli.util import get_operation_and_args, json_encode
class TestCliUtil(TestCase):
def test_json_encode_function(self):
assert json_encode(get_operation_and_args) == 'Function: get_operation_and_args'
def test_json_encode_datetime(self):
now = datetime.utcnow()
assert json_encode(now) == now.isoformat()
def test_json_encode_file(self):
file = StringIO()
assert json_encode(file) == 'In memory file: '
def test_json_encode_set(self):
assert json_encode({1, 2, 3}) == [1, 2, 3]
def test_setup_no_module(self):
with self.assertRaises(CliError) as context:
get_operation_and_args(('no.op',))
assert context.exception.message == 'No such module: no'
def test_setup_no_op(self):
with self.assertRaises(CliError) as context:
get_operation_and_args(('server.no',))
assert context.exception.message == 'No such operation: server.no'
def test_setup_op_and_args(self):
commands = ('server.user', 'one', 'two', 'hello=world')
assert get_operation_and_args(commands) == (
server.user,
(['one', 'two'], {'hello': 'world'}),
)
def test_setup_op_and_json_args(self):
commands = ('server.user', '[["one", "two"], {"hello": "world"}]')
assert get_operation_and_args(commands) == (
server.user,
(['one', 'two'], {'hello': 'world'}),
)
@pytest.fixture(scope='function')
def user_sys_path():
user_pkg = os.path.dirname(__file__) + '/user'
sys.path.append(user_pkg)
yield None
sys.path.pop()
to_rm = []
for k, v in sys.modules.items():
v = getattr(v, '__file__', '')
if isinstance(v, str) and v.startswith(user_pkg):
to_rm.append(k)
for k in to_rm:
del sys.modules[k]
# def test_no_user_op():
# commands = ('test_ops.dummy_op', 'arg1', 'arg2')
# with pytest.raises(CliError, match='^No such module: test_ops$'):
# get_operation_and_args(commands)
def test_user_op(user_sys_path):
commands = ('test_ops.dummy_op', 'arg1', 'arg2')
res = get_operation_and_args(commands)
import test_ops
assert res == (test_ops.dummy_op, (['arg1', 'arg2'], {}))
| StarcoderdataPython |
66352 | <reponame>Muntasir-Mahmud/Django-GraphQL-Test
# Generated by Django 3.1.3 on 2021-01-09 03:58
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('quiz', '0002_auto_20210109_0939'),
]
operations = [
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('answer_text', models.CharField(max_length=255, verbose_name='Answer Text')),
('is_right', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('technique', models.IntegerField(choices=[(0, 'Multiple Choice')], default=0, verbose_name='Type of Question')),
('title', models.CharField(max_length=255, verbose_name='Title')),
('difficulty', models.IntegerField(choices=[(0, 'Fundamental'), (1, 'Beginner'), (2, 'Intermediate'), (3, 'Advanced'), (4, 'Expert')], default=0, verbose_name='Difficulty')),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='Date Created')),
('is_active', models.BooleanField(default=False, verbose_name='Active Status')),
],
),
migrations.CreateModel(
name='Quizzes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(default='New Quiz', max_length=255)),
('date_created', models.DateTimeField(auto_now_add=True)),
('category', models.ForeignKey(default=1, on_delete=django.db.models.deletion.DO_NOTHING, to='quiz.category')),
],
),
migrations.DeleteModel(
name='Books',
),
migrations.AddField(
model_name='question',
name='quiz',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='question', to='quiz.quizzes'),
),
migrations.AddField(
model_name='answer',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='answer', to='quiz.question'),
),
]
| StarcoderdataPython |
13800 | <reponame>JihoChoi/BOJ
"""
TAG: 0-1 Knapsack Problem, Dynamic Programming (DP), O(nW)
References:
- https://www.geeksforgeeks.org/0-1-knapsack-problem-dp-10/
weights and values of n items, capacity -> max value
"""
N, W = map(int, input().split()) # number of items, capacity
weights = []
values = []
for i in range(N):
w, v = map(int, input().split())
weights.append(w)
values.append(v)
def knapsack(W, weights, values, n):
dp = [[0 for x in range(W+1)] for x in range(n+1)]
for i in range(n+1):
for w in range(W+1):
if i == 0 or w == 0:
dp[i][w] = 0
elif weights[i-1] <= w:
dp[i][w] = max(values[i-1] + dp[i-1][w - weights[i-1]], dp[i-1][w])
else:
dp[i][w] = dp[i-1][w]
return dp[n][W]
print(knapsack(W, weights, values, N))
# Naive
"""
def knapsack(W, weights, values, n):
if n == 0 or W == 0: # base
return 0
if (weights[n-1] > W):
return knapsack(W, weights, values, n-1)
else:
return max(
values[n-1] + knapsack(W - weights[n-1], weights, values, n-1),
knapsack(W, weights, values, n-1)
)
"""
| StarcoderdataPython |
194450 | <gh_stars>0
import json
import logging
from pathlib import Path
from random import shuffle
import discord
from discord.ext import commands
log = logging.getLogger(__name__)
game_recs = []
# Populate the list `game_recs` with resource files
for rec_path in Path("bot/resources/evergreen/game_recs").glob("*.json"):
with rec_path.open(encoding='utf-8') as file:
data = json.load(file)
game_recs.append(data)
shuffle(game_recs)
class RecommendGame(commands.Cog):
"""Commands related to recommending games."""
def __init__(self, bot: commands.Bot) -> None:
self.bot = bot
self.index = 0
@commands.command(name="recommendgame", aliases=['gamerec'])
async def recommend_game(self, ctx: commands.Context) -> None:
"""Sends an Embed of a random game recommendation."""
if self.index >= len(game_recs):
self.index = 0
shuffle(game_recs)
game = game_recs[self.index]
self.index += 1
author = self.bot.get_user(int(game['author']))
# Creating and formatting Embed
embed = discord.Embed(color=discord.Colour.blue())
if author is not None:
embed.set_author(name=author.name, icon_url=author.avatar_url)
embed.set_image(url=game['image'])
embed.add_field(name='Recommendation: ' + game['title'] + '\n' + game['link'], value=game['description'])
await ctx.send(embed=embed)
def setup(bot: commands.Bot) -> None:
"""Loads the RecommendGame cog."""
bot.add_cog(RecommendGame(bot))
| StarcoderdataPython |
151891 | <filename>memae-anomaly-detection/models/loss.py
import torch
def get_memory_loss(memory_att):
"""The memory attribute should be with size [batch_size, memory_dim, reduced_time_dim, f_h, f_w]
loss = \sum_{t=1}^{reduced_time_dim} (-mem) * (mem + 1e-12).log()
averaged on each pixel and each batch
2. average over batch_size * fh * fw
"""
s = memory_att.shape
memory_att = (-memory_att) * (memory_att + 1e-12).log() # [batch_size, memory_dim, time, fh, fw]
memory_att = memory_att.sum() / (s[0] * s[-2] * s[-1])
return memory_att
def get_unormalized_data(x_input, x_recons, mean, std):
x_input = x_input.mul(std).add(mean)
x_recons = x_recons.mul(std).add(mean)
return x_input, x_recons
def get_reconstruction_loss(x_input, x_recons, mean=0.5, std=0.5):
"""Calculates the reconstruction loss between x_input and x_recons
x_input: [batch_size, ch, time, imh, imw]
x_recons: [batch_size, ch, time, imh, imw]
"""
batch_size, ch, time_dimension, imh, imw = x_input.shape
x_input, x_recons = get_unormalized_data(x_input, x_recons, mean, std)
recons_loss = (x_input - x_recons) ** 2
recons_loss = recons_loss.sum().sqrt()/(batch_size * imh * imw)
return recons_loss | StarcoderdataPython |
4833497 | import datetime
import json
import logging
import stripe
from braces.views import LoginRequiredMixin
from channels import Channel
from django.conf import settings
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import (
HttpResponse,
HttpResponseRedirect,
)
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import (
ListView,
FormView,
)
from gargoyle import gargoyle
from contacts.models import Book, BookOwner
import payments as payment_constants
from .forms import PaymentForm
from .models import (
StripeCustomer,
StripeSubscription,
)
logger = logging.getLogger("loggly_logs")
class PaymentView(LoginRequiredMixin, FormView):
template_name = "pay.html"
form_class = PaymentForm
def dispatch(self, request, *args, **kwargs):
# TODO: Make this work even when the user isn't logged in
if not gargoyle.is_active('enable_payments', request):
return HttpResponseRedirect('/pricing')
self.plan = request.GET.get('plan')
plan = payment_constants.PLANS[self.plan]
if not self.plan or not plan['is_active']:
messages.warning(self.request, "Please select a plan")
url = reverse("pricing")
return HttpResponseRedirect(url)
if not self.request.user.is_authenticated():
url = "{}?next=/pay/%3Fplan%3D{}".format(reverse("account_signup"), self.plan)
return HttpResponseRedirect(url)
try:
if self.request.user.is_authenticated():
self.book = self.request.current_book
except Book.DoesNotExist:
self.book = None
return super(PaymentView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, *args, **kwargs):
context = super(PaymentView, self).get_context_data(*args, **kwargs)
if not self.book or self.book.owner != self.request.user:
messages.info(
self.request,
"Sorry, only the contact book owner can add a subscription. Please contact them, or email <EMAIL>",
)
context['owns_book'] = self.book and self.book.owner == self.request.user
context['selected_book'] = self.book
context['plan'] = payment_constants.PLANS[self.plan]
context['stripe_public_key'] = settings.STRIPE_PUBLIC_KEY
return context
def form_valid(self, form):
books = Book.objects.filter(owner=self.request.user)
book_ids = books.values('id')
if self.request.POST.get('book') and self.request.POST.get('book') in book_ids:
# User has submitted a book to pay for
self.subscribe_book(
book=books.filter(id=self.request.POST.get('book'))[0],
plan=self.plan,
token=form.cleaned_data['stripeToken'],
email=form.cleaned_data['stripeEmail'],
)
elif self.book:
# Use the book from the URL
self.subscribe_book(
book=self.book,
plan=self.plan,
token=form.cleaned_data['stripeToken'],
email=form.cleaned_data['stripeEmail'],
)
else:
# Use the first book we find
# TODO: If greater than 1, don't assume
book = books[0]
self.subscribe_book(
book=book,
plan=self.plan,
token=form.cleaned_data['stripeToken'],
email=form.cleaned_data['stripeEmail'],
)
return super(PaymentView, self).form_valid(form)
def get_success_url(self):
return reverse('contacts-list', kwargs={
'book': self.request.current_book.id,
})
def subscribe_book(self, book, plan, token, email):
# TODO: Allow for multiple customers, updates, books
stripe.api_key = settings.STRIPE_SECRET_KEY
response = stripe.Customer.create(
source=token,
plan=payment_constants.PLANS[plan]['stripe_id'],
email=email,
)
paid_until = datetime.datetime.fromtimestamp(
response.subscriptions.data[0].current_period_end,
)
customer = StripeCustomer.objects.create(
email=response.email,
stripe_id=response.id,
default_source=response.default_source,
user=self.request.user,
)
logger.info("Stripe customer created", extra={
'stripe_customer': response.id,
'customer_id': customer.id,
})
subscription = StripeSubscription.objects.create(
customer=customer,
book=book,
stripe_id=response.subscriptions.data[0].id,
paid_until=paid_until,
plan=payment_constants.PLANS[plan]['stripe_id'],
)
logger.info("Stripe subscription created", extra={
'stripe_customer': response.id,
'book_id': book.id,
'customer_id': customer.id,
'subscription_id': subscription.id,
})
book.paid_until = paid_until
book.plan = plan
book.save()
messages.success(
self.request,
"Thanks for your payment. Your book is now active!",
)
@csrf_exempt
def stripe_webhook_view(request):
body_unicode = request.body.decode('utf-8')
event_json = json.loads(body_unicode)
message = {
'id': event_json['id'],
'event': event_json,
}
Channel('process-stripe-webhook').send(message)
return HttpResponse(status=200)
| StarcoderdataPython |
1757511 | <filename>funcionalidades/consulta.py<gh_stars>0
import requests
class Consulta:
def consultar(self):
self.resultado = ''
cep = self.tela_buscador.input_cep.text()
url = requests.get(f'https://viacep.com.br/ws/{cep}/json/')
try:
endereco = url.json()
if "erro" in endereco:
self.tela_buscador.resultado.setText("CEP INVÁLIDO")
else:
for k, v in endereco.items():
self.resultado += f'{k.upper()} : {v}\n'
self.tela_buscador.resultado.setText(self.resultado)
except:
self.tela_buscador.resultado.setText("CEP INVÁLIDO")
| StarcoderdataPython |
4839216 | <filename>examples/groups.py
from roonapi import RoonApi
appinfo = {
"extension_id": "python_roon_test",
"display_name": "Python library for Roon",
"display_version": "1.0.0",
"publisher": "gregd",
"email": "<EMAIL>",
}
# Can be None if you don't yet have a token
token = open("mytokenfile").read()
# token= None
# Take a look at examples/discovery if you want to use discovery.
server = "192.168.3.60"
roonapi = RoonApi(appinfo, token, server)
# get all zones (as dict)
zones = roonapi.zones
outputs = roonapi.outputs
for (k, v) in outputs.items():
zone_id = v["zone_id"]
output_id = k
display_name = v["display_name"]
is_group_main = roonapi.is_group_main(output_id)
is_grouped = roonapi.is_grouped(output_id)
grouped_zone_names = roonapi.grouped_zone_names(output_id)
print(
display_name,
"grouped?",
is_grouped,
"is_main?",
is_group_main,
"grouped_zone_names:",
grouped_zone_names,
)
# save the token for next time
with open("mytokenfile", "w") as f:
f.write(roonapi.token)
| StarcoderdataPython |
82752 | """Folder structure of the platform."""
class Folders:
"""Class containing the relevant folders of the platforms.
The members without underscore at the beginning are the exported (useful)
ones. The tests folders are omitted.
"""
_ROOT = "/opt/dike/"
_CODEBASE = _ROOT + "codebase/"
_DATA = _ROOT + "data/"
_DATA_USER_CONFIGURATION = _DATA + "configuration/"
_DATA_DATASET = _DATA + "dataset/"
_DATA_DATASET_FILES = _DATA_DATASET + "files/"
_DATA_DATASET_LABELS = _DATA_DATASET + "labels/"
_DATA_DATASET_OTHERS = _DATA_DATASET + "others/"
_DATA_KEYSTORE = _DATA + "keystore/"
_DATA_SUBORDINATE = _DATA + "subordinate/"
_DATA_SUBORDINATE_QILING = _DATA_SUBORDINATE + "qiling/"
_SCRIPTS = _CODEBASE + "scripts/"
BENIGN_FILES = _DATA_DATASET_FILES + "benign/"
MALICIOUS_FILES = _DATA_DATASET_FILES + "malware/"
COLLECTED_FILES = _DATA_DATASET_FILES + "collected/"
CUSTOM_DATASETS = _DATA_DATASET_LABELS + "custom/"
MODELS = _DATA + "models/"
MODEL_FMT = MODELS + "{}/"
MODEL_PREPROCESSORS_FMT = MODEL_FMT + "preprocessors/"
QILING_LOGS = _DATA_SUBORDINATE_QILING + "logs/"
QILING_ROOTS = _DATA_SUBORDINATE_QILING + "rootfs/"
GHIDRA = "/opt/ghidra/"
GHIDRA_PROJECT = _DATA_SUBORDINATE + "ghidra/"
class Files:
"""Class containing the relevant files of the platform.
The members without underscore at the beginning are the exported (useful)
ones. The tests files are omitted.
"""
# Access of the private members only in the scope of this class, that is in
# the same configuration module. pylint: disable=protected-access
API_CATEGORIZATION = Folders._DATA_USER_CONFIGURATION + "_apis.yaml"
USER_CONFIGURATION = Folders._DATA_USER_CONFIGURATION + "configuration.yaml"
SSL_CERTIFICATE = Folders._DATA_KEYSTORE + "certificate.pem"
SSL_PRIVATE_KEY = Folders._DATA_KEYSTORE + "key.pem"
MALWARE_LABELS = Folders._DATA_DATASET_LABELS + "malware.csv"
BENIGN_LABELS = Folders._DATA_DATASET_LABELS + "benign.csv"
MALWARE_HASHES = Folders._DATA_DATASET_OTHERS + "malware_hashes.txt"
VT_DATA_FILE = Folders._DATA_DATASET_OTHERS + "vt_data.csv"
MODEL_DATASET_FMT = Folders.MODEL_FMT + "dataset.csv"
MODEL_PREPROCESSED_FEATURES_FMT = (Folders.MODEL_FMT
+ "preprocessed_features.csv")
MODEL_REDUCED_FEATURES_FMT = Folders.MODEL_FMT + "reduced_features.csv"
MODEL_REDUCTION_MODEL_FMT = Folders.MODEL_FMT + "reduction.model"
MODEL_PREPROCESSOR_MODEL_FMT = Folders.MODEL_PREPROCESSORS_FMT + "{}.model"
MODEL_ML_MODEL_FMT = Folders.MODEL_FMT + "ml.model"
MODEL_TRAINING_CONFIGURATION_FMT = (Folders.MODEL_FMT
+ "training_configuration.yml")
MODEL_EVALUATION_FMT = Folders.MODEL_FMT + "evaluation.json"
MODEL_PREDICTION_CONFIGURATION_FMT = (Folders.MODEL_FMT
+ "prediction_configuration.json")
GHIDRA_HEADLESS_ANALYZER = Folders.GHIDRA + "support/analyzeHeadless"
GHIDRA_EXTRACTION_SCRIPT = Folders._SCRIPTS + "delegate_ghidra.py"
| StarcoderdataPython |
1795792 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from pytunegen.constants import *
import random
import time
class TuneGen:
"""Tune generator"""
def __init__(self, seed = None, music_length = 50,
scale = None, bpm = None, time_sig = None,
note_jump_limit = 2.2, silence_percent = 1,
non_repeat_percent = 65):
if seed:
self.randseed = seed
else:
self.randseed = random.randint(0, 2**31)
random.seed(self.randseed)
# times the frequency of last note
self.note_jump_limit = note_jump_limit
# ratio of silences to notes
self.silence_percent = silence_percent
# ratio of non-repeated bars to all bars
# (set to 100 or above to inhibit repeated bars)
self.non_repeat_percent = non_repeat_percent
# number of bars
self.music_length = music_length
# get bpm, time signature, scale, silence percent, length
if bpm:
self.bpm_current = int(bpm)
else:
self.bpm_current = bpms[random.randint(0, len(bpms) - 1)]
if time_sig:
if int(time_sig[2]) <= 0 or not (int(time_sig[2]) % 2) == 0:
print("pyTuneGen Error: Illegal time signature.")
print("Lower time signature must be a positive even integer.")
return -1
self.time_sig_upper = int(time_sig[0])
self.time_sig_lower = int(time_sig[2])
self.time_sig = int(time_sig[0])/int(time_sig[2])
else:
self.time_sig_upper = random.randint(1, 8)
self.time_sig_lower = 4 # this can be hard-coded for now
self.time_sig = self.time_sig_upper / self.time_sig_lower
self.time_sig_display = (str(self.time_sig_upper) + "/" +
str(self.time_sig_lower))
if scale:
self.scale_current_name = scale
self.scale_current = scales.get(self.scale_current_name)
else:
self.scale_current_name = random.choice(list(scales.keys()))
self.scale_current = scales.get(self.scale_current_name)
def generate(self):
durations_current = []
bar_num = 0
music = []
while True:
duration_selection_loop = 0
while durations_current == [] or sum(durations_current) < self.time_sig:
duration_selection_loop += 1
next_duration = random.choice(list(note_durations.keys()))
next_duration_value = note_durations.get(next_duration)
if (sum(durations_current) + next_duration_value <= self.time_sig and not
(duration_selection_loop <= 15 and next_duration_value <= 0.125)):
durations_current.append(next_duration_value)
notes_current = []
notes_current_names = []
note_last = None
note_current_name = None
note_current = None
for duration in durations_current:
silence = False
# sometimes put silence instead of a note
# because sometimes silence tells more than a C4
if random.randint(1, 100) < self.silence_percent:
silence = True
# make sure the upcoming note in the bar is not the same
# with the previous one, and also make sure we don't jump
# between high and low notes too aggressively
while note_last == None or (note_current == note_last or
note_current - note_last >= note_last * (self.note_jump_limit - 1) or
note_last - note_current >= note_last / (self.note_jump_limit - 1)):
note_current_name = random.choice(list(self.scale_current.keys()))
note_current = self.scale_current.get(note_current_name)
if note_last == None:
break
if not silence:
note_last = note_current
notes_current.append(note_current)
notes_current_names.append(note_current_name)
else:
notes_current.append("silence")
notes_current_names.append("silence")
if random.uniform(0, 100) > self.non_repeat_percent:
bar_repeat_current = random.choice(bar_repeats)
else:
bar_repeat_current = 1
bar = Bar(notes_current_names, durations_current, bar_repeat_current)
for bar_repeat in range(bar_repeat_current):
bar_num += 1
for i in range(len(durations_current)):
bar.notes.append(Note(notes_current[i], durations_current[i], notes_current[i] == "silence"))
music.append(bar)
durations_current = []
if bar_num >= self.music_length:
return music
class Bar:
"""Representation of a single bar"""
def __init__(self, note_names, durations, bar_repeat):
self.note_names = note_names
self.durations = durations
self.bar_repeat = bar_repeat
self.notes = []
class Note:
"""Representation of a single note"""
def __init__(self, pitch, duration, silence):
self.pitch = pitch
self.duration = duration
self.silence = silence
| StarcoderdataPython |
1735936 | ##########################################################################
# MediPy - Copyright (C) Universite de Strasbourg, 2011
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
def update_meta(self, other):
self.__name__ = other.__name__
self.__doc__ = other.__doc__
self.__dict__.update(other.__dict__)
return self
class LateBindingProperty(property):
""" Late-binding property, allow easier usage of properties with derived
classes.
Regular properties require to redefine the property in derived classes,
while LateBindingProperty does not :
>>> class C(object):
...
... def getx(self):
... print 'C.getx'
... return self._x
...
... def setx(self, x):
... print 'C.setx'
... self._x = x
...
... x = LateBindingProperty(getx, setx)
>>> class D(C):
...
... def setx(self, x):
... print 'D.setx'
... super(D, self).setx(x)
>>> c = C()
>>> c.x = 1
C.setx
>>> c.x
C.getx
1
>>> d = D()
>>> d.x = 1
D.setx
C.setx
>>> d.x
C.getx
1
Source : http://code.activestate.com/recipes/408713/#c1
"""
def __new__(cls, fget=None, fset=None, fdel=None, doc=None):
if fget is not None:
def __get__(obj, objtype=None, name=fget.__name__):
fget = getattr(obj, name)
return fget()
fget = update_meta(__get__, fget)
if fset is not None:
def __set__(obj, value, name=fset.__name__):
fset = getattr(obj, name)
return fset(value)
fset = update_meta(__set__, fset)
if fdel is not None:
def __delete__(obj, name=fdel.__name__):
fdel = getattr(obj, name)
return fdel()
fdel = update_meta(__delete__, fdel)
return property(fget, fset, fdel, doc)
| StarcoderdataPython |
1761393 | from django import forms
from .models import FormModel
class Forms(forms.ModelForm):
class Meta:
# in hindsight should have copied this App... then made...now server busted | StarcoderdataPython |
3280049 | <filename>convert.py
from absl import app, flags, logging
from absl.flags import FLAGS
import numpy as np
from yolov3_tf2.models import YoloV3, YoloV3Tiny
from yolov3_tf2.utils import load_darknet_weights
import tensorflow as tf
flags.DEFINE_string('weights', './data/yolov3.weights', 'path to weights file')
flags.DEFINE_string('output', './checkpoints/yolov3.tf', 'path to output')
flags.DEFINE_boolean('tiny', False, 'yolov3 or yolov3-tiny')
flags.DEFINE_integer('num_classes', 80, 'number of classes in the model')
def main(_argv):
# physical_devices = tf.config.experimental.list_physical_devices('GPU')
# if len(physical_devices) > 0:
# tf.config.experimental.set_memory_growth(physical_devices[0], True)
if FLAGS.tiny:
yolo = YoloV3Tiny(classes=FLAGS.num_classes)
else:
yolo = YoloV3(classes=FLAGS.num_classes)
yolo.summary()
logging.info('model created')
load_darknet_weights(yolo, FLAGS.weights, FLAGS.tiny)
logging.info('weights loaded')
img = np.random.random((1, 320, 320, 3)).astype(np.float32)
output = yolo(img)
logging.info('sanity check passed')
yolo.save_weights(FLAGS.output)
logging.info('weights saved')
if __name__ == '__main__':
try:
app.run(main)
except SystemExit:
pass
| StarcoderdataPython |
3249510 | from Node import Node
import random as r
def accept(currentIteration, iterations):
return r.random() < (currentIteration / iterations)
class Operation(object):
BACK_MUTATION = 0
DELETE_MUTATION = 1
SWITCH_NODES = 2
PRUNE_REGRAFT = 3
NUMBER = 4
def __init__(self, type, node_name_1 = None, node_name_2 = None, node_name_3 = None):
self.type = type
self.node_name_1 = node_name_1
self.node_name_2 = node_name_2
self.node_name_3 = node_name_3
@classmethod
def tree_operation(cls, helper, tree, operation):
if operation == cls.BACK_MUTATION:
# back-mutation
return cls.add_back_mutation(helper, tree)
elif operation == cls.DELETE_MUTATION:
# delete random mutation
return cls.mutation_delete(helper, tree)
elif operation == cls.SWITCH_NODES:
# switch random nodes
return cls.switch_nodes(helper, tree)
elif operation == cls.PRUNE_REGRAFT:
# prune-regraft two random nodes
return cls.prune_regraft(helper, tree)
else:
raise SystemError("Something has happened while chosing an operation")
@classmethod
def add_back_mutation(cls, helper, tree):
max_losses = helper.k
# gets a list of all the nodes from cache
cached_nodes = tree.phylogeny.get_cached_content()
keys = list(cached_nodes.keys())
# select a random node
# node has no parent, hence cannot add a back mutation
# keep trying until we find a suitable node
node = r.choice(keys)
if node.up == None or node.up.up == None:
return 1
# if losses list has reached its maximum, then we can't procede
if (len(tree.losses_list) >= max_losses):
return 1
# select our candidates amongst the ancestors
candidates = [p for p in node.iter_ancestors() if (p.loss == False) and (p.mutation_id != -1)]
if len(candidates) == 0:
return 1
# selecting one random ancestor
candidate = r.choice(candidates)
# Ensuring we have no more than k mutations per mutation type
if (tree.k_losses_list[candidate.mutation_id] >= helper.k):
return 1
# If the mutation is already lost in the current tree, no way to remove it again
if (node.is_mutation_already_lost(candidate.mutation_id)):
return 1
#
node_deletion = Node(candidate.name, None, candidate.mutation_id, True)
tree.losses_list.append(node_deletion)
tree.k_losses_list[node_deletion.mutation_id] += 1
# saving parent before detaching
par = node.up
current = node.detach()
par.add_child(node_deletion)
node_deletion.add_child(current)
current.fix_for_losses(helper, tree)
tree.operation = cls(cls.BACK_MUTATION, node_name_1=candidate.name, node_name_2=node_deletion.name)
return 0
@classmethod
def mutation_delete(cls, helper, tree):
if (len(tree.losses_list) == 0):
return 1
node_delete = r.choice(tree.losses_list)
tree.operation = cls(cls.DELETE_MUTATION, node_name_1=node_delete.name)
node_delete.delete_b(helper, tree)
return 0
@classmethod
def switch_nodes(cls, helper, tree):
cached_nodes = tree.phylogeny.get_cached_content()
keys = list(cached_nodes.keys())
u = None
while (u == None or u.up == None or u.loss):
u = r.choice(keys)
keys.remove(u)
v = None
keys = list(cached_nodes.keys())
while (v == None or v.up == None or v.loss or u.name == v.name):
v = r.choice(keys)
keys.remove(v)
tree.operation = cls(cls.SWITCH_NODES, node_name_1=u.name, node_name_2=v.name)
u.swap(v)
u.fix_for_losses(helper, tree)
v.fix_for_losses(helper, tree)
return 0
@classmethod
def prune_regraft(cls, helper, tree):
nodes_list = tree.phylogeny.get_cached_content()
prune_res = -1
while prune_res != 0:
keys = list(nodes_list.keys())
u = None
while (u == None or u.up == None or u.loss):
u = r.choice(keys)
keys.remove(u)
v = None
keys = list(nodes_list.keys())
while (v == None or v.up == None or v.loss):
v = r.choice(keys)
keys.remove(v)
prune_res = u.prune_and_reattach(v)
tree.operation = cls(cls.PRUNE_REGRAFT, node_name_1=u.name, node_name_2=v.name)
u.fix_for_losses(helper, tree)
return 0
@classmethod
def prob(cls, I, E, genotypes, helper, particle, data=None):
p = 0
if I == 0:
if E == 0:
# TODO: sigma
if data is not None:
data.true_negative += 1
p = 1 - helper.beta
elif E == 1:
if data is not None:
data.false_negatives += 1
p = helper.alpha
else:
raise SystemError("Unknown value for E: %d" % E)
elif I == 1:
if E == 0:
if data is not None:
data.false_positives += 1
p = helper.beta
elif E == 1:
if data is not None:
data.true_positive += 1
p = 1 - helper.alpha
else:
raise SystemError("Unknown value for E: %d" % E)
elif I == 2:
if data:
data.missing_values += 1
p = 1
else:
raise SystemError("Unknown value for I: %d" % I)
return p
| StarcoderdataPython |
70532 | """Message types."""
from functools import lru_cache
from typing import Union, Optional, Type
from typing_extensions import get_args
from . import message_definitions as defs
from ..constants import MessageId
MessageDefinition = Union[
defs.HeartbeatRequest,
defs.HeartbeatResponse,
defs.DeviceInfoRequest,
defs.DeviceInfoResponse,
defs.StopRequest,
defs.GetStatusRequest,
defs.GetStatusResponse,
defs.EnableMotorRequest,
defs.DisableMotorRequest,
defs.MoveRequest,
defs.SetupRequest,
defs.WriteToEEPromRequest,
defs.ReadFromEEPromRequest,
defs.ReadFromEEPromResponse,
defs.AddLinearMoveRequest,
defs.GetMoveGroupRequest,
defs.GetMoveGroupResponse,
defs.ExecuteMoveGroupRequest,
defs.ClearAllMoveGroupsRequest,
defs.MoveCompleted,
defs.SetMotionConstraints,
defs.GetMotionConstraintsRequest,
defs.GetMotionConstraintsResponse,
defs.WriteMotorDriverRegister,
defs.ReadMotorDriverRequest,
defs.ReadMotorDriverResponse,
defs.ReadPresenceSensingVoltageRequest,
defs.ReadPresenceSensingVoltageResponse,
]
@lru_cache(maxsize=None)
def get_definition(message_id: MessageId) -> Optional[Type[MessageDefinition]]:
"""Get the message type for a message id.
Args:
message_id: A message id
Returns: The message definition for a type
"""
# Dumb linear search, but the result is memoized.
for i in get_args(MessageDefinition):
if i.message_id == message_id:
# get args returns Tuple[Any...]
return i # type: ignore[no-any-return]
return None
| StarcoderdataPython |
1745545 | <reponame>tvuong123/espnet<filename>espnet/__init__.py
import pkg_resources
try:
__version__ = pkg_resources.get_distribution('espnet').version
except Exception:
__version__ = '(Not installed from setup.py)'
del pkg_resources
| StarcoderdataPython |
11924 | <filename>tardis/model/tests/test_csvy_model.py
import numpy as np
import numpy.testing as npt
import tardis
import os
from astropy import units as u
from tardis.io.config_reader import Configuration
from tardis.model import Radial1DModel
import pytest
DATA_PATH = os.path.join(tardis.__path__[0],'model','tests','data')
@pytest.fixture(scope="module", params=['config_csvy_full.yml',
'config_csvy_nocsv_branch85.yml',
'config_csvy_nocsv_uniform.yml',
'config_csvy_nocsv_powerlaw.yml',
'config_csvy_nocsv_exponential.yml',
'config_csvy_full_rad.yml'])
def full_filename(request):
return os.path.join(DATA_PATH, request.param)
def test_compare_models(full_filename):
tardis_config = Configuration.from_yaml(full_filename)
csvy_model = Radial1DModel.from_csvy(tardis_config)
config_model = Radial1DModel.from_config(tardis_config)
csvy_model_props = csvy_model.get_properties().keys()
config_model_props = config_model.get_properties().keys()
npt.assert_array_equal(csvy_model_props, config_model_props)
for prop in config_model_props:
csvy_model_val = csvy_model.get_properties()[prop]
config_model_val = config_model.get_properties()[prop]
if prop == 'homologous_density':
npt.assert_array_almost_equal(csvy_model_val.density_0.value, config_model_val.density_0.value)
npt.assert_array_almost_equal(csvy_model_val.time_0.value, config_model_val.time_0.value)
else:
if hasattr(config_model_val, 'value'):
config_model_val = config_model_val.value
csvy_model_val = csvy_model_val.value
npt.assert_array_almost_equal(csvy_model_val, config_model_val)
| StarcoderdataPython |
3221549 | import os
import time
import boto3
from botocore.exceptions import ClientError
from botocore.client import Config
from django.utils.crypto import get_random_string
from storages.utils import setting, lookup_env
def get_bucket_name():
return setting("AWS_STORAGE_BUCKET_NAME") or lookup_env(
["DJANGO_AWS_STORAGE_BUCKET_NAME"]
)
def get_access_key_id():
return setting("AWS_S3_ACCESS_KEY_ID", setting("AWS_ACCESS_KEY_ID")) or lookup_env(
["AWS_S3_ACCESS_KEY_ID", "AWS_ACCESS_KEY_ID"]
)
def get_secret_access_key():
return setting(
"AWS_S3_SECRET_ACCESS_KEY", setting("AWS_SECRET_ACCESS_KEY")
) or lookup_env(["AWS_S3_SECRET_ACCESS_KEY", "AWS_SECRET_ACCESS_KEY"])
def get_endpoint_url():
return setting("AWS_S3_ENDPOINT_URL") or lookup_env(
["AWS_S3_ENDPOINT_URL", "AWS_ENDPOINT_URL"]
)
def file_form_upload_dir():
return setting("FILE_FORM_UPLOAD_DIR", "temp_uploads")
def get_client():
signature_version = setting("AWS_S3_SIGNATURE_VERSION", None)
region_name = setting("AWS_S3_REGION_NAME", None)
while True:
try:
# https://github.com/boto/boto3/issues/801
return boto3.client(
"s3",
endpoint_url=get_endpoint_url(),
aws_access_key_id=get_access_key_id(),
aws_secret_access_key=get_secret_access_key(),
config=Config(
signature_version=signature_version, region_name=region_name
),
)
except:
time.sleep(0.01)
def exists(client, bucket_name, name):
"""
Check if key already exists in bucket.
Code adapted from storage.backends.s3boto3
"""
try:
client.head_object(Bucket=bucket_name, Key=name)
return True
except ClientError:
return False
def get_alternative_name(file_root, file_ext):
"""
Return an alternative filename, by adding an underscore and a random 7
character alphanumeric string (before the file extension, if one
exists) to the filename.
Code adapted from django.storage.get_alternative_name
"""
return f"{file_root}_{get_random_string(7)}{file_ext}"
def get_available_name(client, bucket_name, name):
"""
Return a filename that's free on the target storage system and
available for new content to be written to.
Code adapted from django.storage.get_available_name
"""
dir_name, file_name = os.path.split(name)
file_root, file_ext = os.path.splitext(file_name)
# If the filename already exists, generate an alternative filename
# until it doesn't exist.
while exists(client, bucket_name, name):
# file_ext includes the dot.
name = os.path.join(dir_name, get_alternative_name(file_root, file_ext))
return name
| StarcoderdataPython |
114182 | import string
class strprocess:
"""add all extra processing method """
def __init__(self):
self.data=""
self.tags=["</p>","</br>","<br/>","<br>","<p>","</P>"]
self.marks=["/","?","-","!","@","#","$","%","^","*","(",")",";","{","}","~"]
def makehtml(self,data):
self.data="<"+"html"+">"
self.data=self.data+data
self.data=self.data+"<"+"/html"+">"
def addplus(self,title):
self.title=string.replace(title,"-","+")
return self.title
def removeHTML(self,page):
self.page=page
for tag in self.tags:
self.page=string.replace(self.page,tag," ")
return self.page
def removemarks(self,title):
self.title=title
for mark in self.marks:
self.title=string.replace(self.title,mark,"")
return self.title
if __name__ =="__main__":
pro=strprocess()
print pro.removeHTML("<p>aniket<br>mukeshbhai</br>patel</P>")
print pro.removemarks("Meherbaan - Bang Bang - WapKing.Cc")
| StarcoderdataPython |
3304473 | <reponame>NaulaN/PyNoSoucisGame_Prototype
from time import time
from pygame.time import Clock
class Fps( object ):
""" Fps( ) -> Frames rate dependency. """
fps_foreground = 60
fps_background = 30
fps_limit = 60
__target_fps = 60
__s = 0
enable_fps_on_screen = False
benchmark = False
def __init__( self ):
self.deltaTime = self.get_delta_time( )
self.__fps_get_list = [ ]
self.__msg = { }
# tick( ) -> Set a tick at your Game and limit the FPS
self.tick = lambda: self.clock.tick( self.fps_limit )
# get_fps( ) -> Append to the list the current fps
self.get_fps = lambda: self.__fps_get_list.append( int( self.clock.get_fps( ) ) )
def __call__(self):
self.clock = Clock()
self.prev_time = time()
return self
def get_delta_time(self):
""" get_delta_time( ) -> Return the time between two frames. """
self.deltaTime = float( time( )-self.prev_time )*self.__target_fps
self.prev_time = time( )
return self.deltaTime
def show_fps( self, surf, font, loc = ( 0, 0 ), antialiasing = True, color = ( 255, 255, 255 ) ):
""" show_fps( ) -> Draw the FPS on the screen.
:param surf: Specify the surface where on or must drawn.
:param font: Specify a font for drawing the Fps and other information.
:param color: Specify a color for your font.
:param antialiasing: Specify if the font do get a antialiasing effect or not.
:param loc: Specify a location at your font. """
x, y = loc
if int( self.__s )%15 == 0:
# If there are a "0" Fps per accident in the list
self.__fps_get_list.remove( 0 ) if self.__fps_get_list.count( 0 ) >= 1 else None
self.get_fps( )
index = len( self.__fps_get_list )-1
current_fps = self.__fps_get_list[ index ]
if self.benchmark:
min_fps = min( self.__fps_get_list )
max_fps = max( self.__fps_get_list )
avg_fps = sum( self.__fps_get_list ) // len( self.__fps_get_list )
self.__msg = { 'FPS': f'FPS: { current_fps }',
'MIN': f'Min: { min_fps }',
'MAX': f'Max: { max_fps }',
'AVG': f'Avg: { avg_fps }' }
else:
self.__msg = { 'FPS': f'{current_fps} Fps' }
self.s += 1*self.deltaTime
surf.blit( font.render( self.__msg[ 'FPS' ], antialiasing, color ), ( x, y ) )
if self.benchmark:
surf.blit( font.render( self.__msg[ 'MIN' ], antialiasing, color ), ( x, y+17 ) )
surf.blit( font.render( self.__msg[ 'MAX' ], antialiasing, color ), ( x, y+33 ) )
surf.blit( font.render( self.__msg[ 'AVG' ], antialiasing, color ), ( x, y+49 ) )
| StarcoderdataPython |
1782895 | # Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from base import *
import json
import toml
from itertools import chain
class GoBGPContainer(BGPContainer):
SHARED_VOLUME = '/root/shared_volume'
QUAGGA_VOLUME = '/etc/quagga'
def __init__(self, name, asn, router_id, ctn_image_name='gobgp',
log_level='debug', zebra=False):
super(GoBGPContainer, self).__init__(name, asn, router_id,
ctn_image_name)
self.shared_volumes.append((self.config_dir, self.SHARED_VOLUME))
self.log_level = log_level
self.prefix_set = None
self.neighbor_set = None
self.bgp_set = None
self.default_policy = None
self.zebra = zebra
def _start_gobgp(self):
zebra_op = ''
c = CmdBuffer()
c << '#!/bin/bash'
c << '/go/bin/gobgpd -f {0}/gobgpd.conf -l {1} -p {2} > ' \
'{0}/gobgpd.log 2>&1'.format(self.SHARED_VOLUME, self.log_level, zebra_op)
cmd = 'echo "{0:s}" > {1}/start.sh'.format(c, self.config_dir)
local(cmd, capture=True)
cmd = "chmod 755 {0}/start.sh".format(self.config_dir)
local(cmd, capture=True)
self.local("{0}/start.sh".format(self.SHARED_VOLUME), flag='-d')
def _start_zebra(self):
cmd = 'cp {0}/zebra.conf {1}/'.format(self.SHARED_VOLUME, self.QUAGGA_VOLUME)
self.local(cmd)
cmd = '/usr/lib/quagga/zebra -f {0}/zebra.conf'.format(self.QUAGGA_VOLUME)
self.local(cmd, flag='-d')
def run(self):
super(GoBGPContainer, self).run()
if self.zebra:
self._start_zebra()
self._start_gobgp()
return self.WAIT_FOR_BOOT
def _get_as_path(self, path):
asps = (p['as_paths'] for p in path['attrs'] if
p['type'] == BGP_ATTR_TYPE_AS_PATH and 'as_paths' in p
and p['as_paths'] != None)
asps = chain.from_iterable(asps)
asns = (asp['asns'] for asp in asps)
return list(chain.from_iterable(asns))
def _get_nexthop(self, path):
for p in path['attrs']:
if p['type'] == BGP_ATTR_TYPE_NEXT_HOP or p['type'] == BGP_ATTR_TYPE_MP_REACH_NLRI:
return p['nexthop']
def _trigger_peer_cmd(self, cmd, peer):
if peer not in self.peers:
raise Exception('not found peer {0}'.format(peer.router_id))
peer_addr = self.peers[peer]['neigh_addr'].split('/')[0]
cmd = 'gobgp neighbor {0} {1}'.format(peer_addr, cmd)
self.local(cmd)
def disable_peer(self, peer):
self._trigger_peer_cmd('disable', peer)
def enable_peer(self, peer):
self._trigger_peer_cmd('enable', peer)
def reset(self, peer):
self._trigger_peer_cmd('reset', peer)
def softreset(self, peer, rf='ipv4', type='in'):
self._trigger_peer_cmd('softreset{0} -a {1}'.format(type, rf), peer)
def get_local_rib(self, peer, prefix='', rf='ipv4'):
if peer not in self.peers:
raise Exception('not found peer {0}'.format(peer.router_id))
peer_addr = self.peers[peer]['neigh_addr'].split('/')[0]
cmd = 'gobgp -j neighbor {0} local {1} -a {2}'.format(peer_addr, prefix, rf)
output = self.local(cmd, capture=True)
ret = json.loads(output)
for d in ret:
for p in d["paths"]:
p["nexthop"] = self._get_nexthop(p)
p["as_path"] = self._get_as_path(p)
return ret
def get_global_rib(self, prefix='', rf='ipv4'):
cmd = 'gobgp -j global rib {0} -a {1}'.format(prefix, rf)
output = self.local(cmd, capture=True)
ret = json.loads(output)
for d in ret:
for p in d["paths"]:
p["nexthop"] = self._get_nexthop(p)
p["as_path"] = self._get_as_path(p)
return ret
def _get_adj_rib(self, adj_type, peer, prefix='', rf='ipv4'):
if peer not in self.peers:
raise Exception('not found peer {0}'.format(peer.router_id))
peer_addr = self.peers[peer]['neigh_addr'].split('/')[0]
cmd = 'gobgp neighbor {0} adj-{1} {2} -a {3} -j'.format(peer_addr,
adj_type,
prefix, rf)
output = self.local(cmd, capture=True)
ret = [p["paths"][0] for p in json.loads(output)]
for p in ret:
p["nexthop"] = self._get_nexthop(p)
p["as_path"] = self._get_as_path(p)
return ret
def get_adj_rib_in(self, peer, prefix='', rf='ipv4'):
return self._get_adj_rib('in', peer, prefix, rf)
def get_adj_rib_out(self, peer, prefix='', rf='ipv4'):
return self._get_adj_rib('out', peer, prefix, rf)
def get_neighbor_state(self, peer):
if peer not in self.peers:
raise Exception('not found peer {0}'.format(peer.router_id))
peer_addr = self.peers[peer]['neigh_addr'].split('/')[0]
cmd = 'gobgp -j neighbor {0}'.format(peer_addr)
output = self.local(cmd, capture=True)
return json.loads(output)['info']['bgp_state']
def clear_policy(self):
self.policies = {}
for info in self.peers.itervalues():
info['policies'] = {}
self.prefix_set = []
self.neighbor_set = []
self.statements = []
def set_prefix_set(self, ps):
self.prefix_set = ps
def set_neighbor_set(self, ns):
self.neighbor_set = ns
def set_bgp_defined_set(self, bs):
self.bgp_set = bs
def create_config(self):
self._create_config_bgp()
if self.zebra:
self._create_config_zebra()
def _create_config_bgp(self):
config = {'Global': {'GlobalConfig': {'As': self.asn, 'RouterId': self.router_id}}}
for peer, info in self.peers.iteritems():
afi_safi_list = []
version = netaddr.IPNetwork(info['neigh_addr']).version
if version == 4:
afi_safi_list.append({'AfiSafiName': 'ipv4-unicast'})
elif version == 6:
afi_safi_list.append({'AfiSafiName': 'ipv6-unicast'})
else:
Exception('invalid ip address version. {0}'.format(version))
if info['evpn']:
afi_safi_list.append({'AfiSafiName': 'l2vpn-evpn'})
afi_safi_list.append({'AfiSafiName': 'encap'})
afi_safi_list.append({'AfiSafiName': 'rtc'})
if info['flowspec']:
afi_safi_list.append({'AfiSafiName': 'ipv4-flowspec'})
afi_safi_list.append({'AfiSafiName': 'l3vpn-ipv4-flowspec'})
afi_safi_list.append({'AfiSafiName': 'ipv6-flowspec'})
afi_safi_list.append({'AfiSafiName': 'l3vpn-ipv6-flowspec'})
n = {'NeighborConfig':
{'NeighborAddress': info['neigh_addr'].split('/')[0],
'PeerAs': peer.asn,
'AuthPassword': <PASSWORD>['<PASSWORD>'],
},
'AfiSafis': {'AfiSafiList': afi_safi_list}
}
if info['passive']:
n['Transport'] = {'TransportConfig': {'PassiveMode': True}}
if info['is_rs_client']:
n['RouteServer'] = {'RouteServerConfig': {'RouteServerClient': True}}
if info['is_rr_client']:
clusterId = self.router_id
if 'cluster_id' in info and info['cluster_id'] is not None:
clusterId = info['cluster_id']
n['RouteReflector'] = {'RouteReflectorConfig' : {'RouteReflectorClient': True,
'RouteReflectorClusterId': clusterId}}
f = lambda typ: [p for p in info['policies'].itervalues() if p['type'] == typ]
import_policies = f('import')
export_policies = f('export')
in_policies = f('in')
f = lambda typ: [p['default'] for p in info['policies'].itervalues() if p['type'] == typ and 'default' in p]
default_import_policy = f('import')
default_export_policy = f('export')
default_in_policy = f('in')
if len(import_policies) + len(export_policies) + len(in_policies) + len(default_import_policy) \
+ len(default_export_policy) + len(default_in_policy) > 0:
n['ApplyPolicy'] = {'ApplyPolicyConfig': {}}
if len(import_policies) > 0:
n['ApplyPolicy']['ApplyPolicyConfig']['ImportPolicy'] = [p['name'] for p in import_policies]
if len(export_policies) > 0:
n['ApplyPolicy']['ApplyPolicyConfig']['ExportPolicy'] = [p['name'] for p in export_policies]
if len(in_policies) > 0:
n['ApplyPolicy']['ApplyPolicyConfig']['InPolicy'] = [p['name'] for p in in_policies]
def f(v):
if v == 'reject':
return 1
elif v == 'accept':
return 0
raise Exception('invalid default policy type {0}'.format(v))
if len(default_import_policy) > 0:
n['ApplyPolicy']['ApplyPolicyConfig']['DefaultImportPolicy'] = f(default_import_policy[0])
if len(default_export_policy) > 0:
n['ApplyPolicy']['ApplyPolicyConfig']['DefaultExportPolicy'] = f(default_export_policy[0])
if len(default_in_policy) > 0:
n['ApplyPolicy']['ApplyPolicyConfig']['DefaultInPolicy'] = f(default_in_policy[0])
if 'Neighbors' not in config:
config['Neighbors'] = {'NeighborList': []}
config['Neighbors']['NeighborList'].append(n)
config['DefinedSets'] = {}
if self.prefix_set:
config['DefinedSets']['PrefixSets'] = {'PrefixSetList': [self.prefix_set]}
if self.neighbor_set:
config['DefinedSets']['NeighborSets'] = {'NeighborSetList': [self.neighbor_set]}
if self.bgp_set:
config['DefinedSets']['BgpDefinedSets'] = self.bgp_set
policy_list = []
for p in self.policies.itervalues():
policy = {'Name': p['name'],
'Statements':{'StatementList': p['statements']}}
policy_list.append(policy)
if len(policy_list) > 0:
config['PolicyDefinitions'] = {'PolicyDefinitionList': policy_list}
if self.zebra:
config['Global']['Zebra'] = {'Enabled': True,
'RedistributeRouteTypeList':[{'RouteType': 'connect'}],}
with open('{0}/gobgpd.conf'.format(self.config_dir), 'w') as f:
print colors.yellow('[{0}\'s new config]'.format(self.name))
print colors.yellow(indent(toml.dumps(config)))
f.write(toml.dumps(config))
def _create_config_zebra(self):
c = CmdBuffer()
c << 'hostname zebra'
c << 'password <PASSWORD>'
c << 'log file {0}/zebra.log'.format(self.QUAGGA_VOLUME)
c << 'debug zebra packet'
c << 'debug zebra kernel'
c << 'debug zebra rib'
c << ''
with open('{0}/zebra.conf'.format(self.config_dir), 'w') as f:
print colors.yellow('[{0}\'s new config]'.format(self.name))
print colors.yellow(indent(str(c)))
f.writelines(str(c))
def reload_config(self):
daemon = []
daemon.append('gobgpd')
if self.zebra:
daemon.append('zebra')
for d in daemon:
cmd = '/usr/bin/pkill {0} -SIGHUP'.format(d)
self.local(cmd)
for v in self.routes.itervalues():
if v['rf'] == 'ipv4' or v['rf'] == 'ipv6':
cmd = 'gobgp global '\
'rib add {0} -a {1}'.format(v['prefix'], v['rf'])
elif v['rf'] == 'ipv4-flowspec' or v['rf'] == 'ipv6-flowspec':
cmd = 'gobgp global '\
'rib add match {0} then {1} -a {2}'.format(' '.join(v['matchs']), ' '.join(v['thens']), v['rf'])
else:
raise Exception('unsupported route faily: {0}'.format(rf))
self.local(cmd)
| StarcoderdataPython |
32775 | <filename>code/HHV2020_07/Adafruit_Trinket_Neopixel_Strip_Cycle/main.py
import board, time
import neopixel
# Define Neopixels
LED7_PIN = board.D0 # pin that the NeoPixel is connected to
# Most Neopixels have a color order of GRB or GRBW some use RGB
LED7_ORDER = neopixel.GRB # pixel color channel order
# Create NeoPixel object
LED6 = neopixel.NeoPixel(board.D1, 1, pixel_order=neopixel.RGB)
LED7 = neopixel.NeoPixel(LED7_PIN, 3, pixel_order=LED7_ORDER)
# Turn down brightness to 30%
LED7.brightness = 0.3
# Function to color cycle NeoPixels
def wheel(pos):
if (pos < 0) or (pos > 255):
return (0, 0, 0)
if (pos < 85):
return (int(pos * 3), int(255 - (pos*3)), 0)
elif (pos < 170):
pos -= 85
return (int(255 - pos*3), 0, int(pos*3))
else:
pos -= 170
return (0, int(pos*3), int(255 - pos*3))
# Iteration Var
i = 0
### MAIN LOOP ###
while True:
LED6[0] = (0, 0, 0) # turn off 8mm to focus on strip
LED7[0] = wheel(i & 255)
LED7[1] = wheel(i & 255)
LED7[2] = wheel(i & 255)
time.sleep(0.05)
i = (i+1) % 256 # run from 0 to 255
| StarcoderdataPython |
157601 | import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import numpy as np
from ahfhalotools.objects import Cluster
import ahfhalotools.filetools as ft
import ahfhalotools.analysis as analysis
## -----------Load Cluster Instances--------------##
#define base file name (these are for full files)
fileNameBaseGX = "GadgetX-NewMDCLUSTER_0001.snap_{snap:0=3d}.z{z:.3f}"
fileNameBaseGiz = "GIZMO-NewMDCLUSTER_0001.snap_{snap:0=3d}.z{z:.3f}"
fileNameBaseMus = "GadgetMUSIC-NewMDCLUSTER_0001.z{z:.3f}"
#define directory
gxdir = "GadgetX\\NewMDCLUSTER_0001\\"
gizdir = "gizmo\\"
musdir = "music\\"
#define snapshots to load
snapNosGX = np.arange(97,129)
snapNosGiz = np.arange(97,129)
#n.b. music doesnt have snapshot numbers (bruh)
#get snap num to redshift map
zsMus = ft.getMusZs(musdir)
rsmapGiz = ft.getSnapNumToZMapGiz(gizdir)
rsmapGX = ft.getSnapNumToZMapGX(gxdir)
snapNosMus = np.arange(129-len(zsMus),129)
#get redshifts from snapNos
zsGX = np.array([rsmapGX[num] for num in snapNosGX])
zsGiz = np.array([rsmapGiz[num] for num in snapNosGiz])
#truncated file names
truncFileGX = "first10\\GadgetX\\GX_first10_snap{snap:0=3d}.z{z:.3f}"
truncFileGiz = "first10\\Gizmo\\giz_first10_snap{snap:0=3d}.z{z:.3f}"
truncFileMus = "first10\\Music\\mus_first10_snap.z{z:.3f}"
#locations of enclosed halo files
gxEncHaloFileBase = "gadgetXenchalos//GadgetX-NewMDCLUSTER_0001.halo{haloID}.BDP_enchalos"
gizEncHaloFileBase = "gizmoenchalos//GIZMO-NewMDCLUSTER_0001.halo{haloID}.BDP_enchalos"
musEncHaloFileBase = "gadgetMUSICenchalos//GadgetMUSIC-NewMDCLUSTER_0001.halo{haloID}.BDP_enchalos"
#CLUSTER OBJECTS
gxCluster = Cluster(truncFileGX, snapNosGX, zsGX)
gizCluster = Cluster(truncFileGiz, snapNosGiz, zsGiz)
musCluster = Cluster(truncFileMus, snapNosMus, zsMus)
clusters = [gxCluster,gizCluster,musCluster]
clusterNames = ["GadgetX","Gizmo","GadgetMUSIC"]
clusterColors = ["C0","C1","C2"]
#gxCluster.generateEnclosedHaloFilesFromChain(128000000000001,inputFiles,gxEncHaloFileBase)
#gizCluster.generateEnclosedHaloFilesFromChain(128000000000001,inputFilesGiz,gizEncHaloFileBase)
#musCluster.generateEnclosedHaloFilesFromChain(128000000000001,inputFilesMus,musEncHaloFileBase)
#LOAD ENCLOSED HALO FILES
gxCluster.loadEnclosedHaloFilesFromChain(128000000000001,gxEncHaloFileBase)
gizCluster.loadEnclosedHaloFilesFromChain(128000000000001,gizEncHaloFileBase)
musCluster.loadEnclosedHaloFilesFromChain(128000000000001,musEncHaloFileBase)
##----------PLOT-----------##
## comparing sims on deltaM/M over time for total mass, gas mass and Stellar
# mass
fig, axes = plt.subplots(1,3,figsize=(15,6),sharey='row')
haloID = 128000000000001
axisTitles = ["Total Mass", "Gas Mass", "Stellar Mass"]
quantities = ["Mvir", "M_gas", "M_star"]
#plot on each axis
for i in range(3):
ax = axes[i]
#loop through each simulation
for j in range(3):
color = clusterColors[j]
cluster = clusters[j]
## can get delta values in two ways:
#age, deltaM = cluster.funcOfAgeDeltaHaloData(haloID,quantities[i])
age, deltaM = cluster.funcOfAgeHaloData(haloID,"delta"+quantities[i])
_, M = cluster.funcOfAgeHaloData(haloID,quantities[i])
#M is going to be 1 element longer than deltaM, as we cannot get delta
# for a quantity with unknown previous value
# note that delta M should be divided by M of halo of *earlier* snapshot
M = M[:-1]
deltaMoverM = deltaM/M
#deltaMoverM = deltaM
#plot
ax.plot(age,deltaMoverM, label=clusterNames[j],c=color)
#add in line to represent time of largest merge
mergeZ, mergeSize = cluster.getLargestMergeZInRange(haloID,0,1,scheme="halodata",fractional=False)
mergeTime = analysis.tfromz(mergeZ)
ls = ["--","-.",":"][j]
ax.axvline(mergeTime,c=color,alpha=0.7,lw=(4-j),ls=ls)
#set labels
ax.set_title(axisTitles[i])
ax.set_xlabel("age, $t$ / Gyr")
ax.set_ylabel("$\\frac{\\Delta M}{M}$")
ax.legend()
#annotate merge lines
pad=15
topCoord = ax.get_ylim()[1]
toppad = 0.02*(topCoord-ax.get_ylim()[0])
ax.annotate('Largest merger events\nusing halo data',xy=(mergeTime,topCoord-toppad),xytext=(-pad,0),xycoords="data",
textcoords="offset points", ha="right", va="top", c = 'dimgrey')
#set figure title
fig.suptitle("$\\frac{\\Delta M}{M}$ as a function of age")
plt.tight_layout()
plt.show()
| StarcoderdataPython |
1705119 | from yarl import URL
class Urls:
def __init__(self):
self.protocol = "http://"
self.base_url = "ethosdistro.com/"
self.panel_id = "{panel_id}."
self.no_pool_base_url = self.protocol + self.base_url
self.json_query_param = {"json": "yes"}
# Panel only URLs
self.get_panel = self.protocol + self.panel_id + self.base_url
def base_url(self) -> str:
return self.base_url
def get_panel_url(self, panel_id: str) -> URL:
return URL(self.get_panel.format(panel_id=panel_id)) % self.json_query_param
| StarcoderdataPython |
1634475 | <gh_stars>0
# -*- coding: utf-8 -*-
from sklearn.base import BaseEstimator, TransformerMixin # the TransformerMixin to ensure fit_transform()
import pandas as pd
import numpy as np
#we collect the data
Dataset = pd.read_csv('https://raw.githubusercontent.com/M-MSilva/Predict-NBA-player-Points-End-to-end-Project/master/Dataset/NBA_Athletes.csv',
on_bad_lines='skip',decimal='.', thousands=',',encoding='utf-8')
#irrelevant
Dataset['POS'].replace({'C-F':'F-C','G-F':'F-G'},inplace=True)
#new dataset that will be used for prediction
NbaDataset = Dataset.drop("APG", axis=1)
#some data must become float
NbaDataset = NbaDataset.astype({'GP':'float','FTA':'float','2PA':'float','3PA':'float'})
#we delete all unnecessary data
NbaDataset = NbaDataset[['MIN%','MPG','TOPG','GP','USG%','FTA','2PA','3PA','BPG']].copy()
Nba_num = NbaDataset.select_dtypes(include=[np.number])#only the numerical data for later
#we must first get the column indices
names = "GP", "FTA", "2PA", "3PA","TOPG"
gp_ix, fta_ix, pa2_ix, pa3_ix, topg_ix = [Nba_num.columns.get_loc(i) for i in names ]
#create the transformer that will imput the fit_transform
class CreateCombinedAttributes(BaseEstimator, TransformerMixin):
def __init__(self, add_PA2PG=True,add_FTAPG=True,add_PA3PG=True):
self.add_PA2PG = add_PA2PG
self.add_FTAPG = add_FTAPG
self.add_PA3PG = add_PA3PG
def fit(self, X, y=None):
return self #to implement the fit method
def transform(self, X):
TOT = X[:, topg_ix]*X[:, gp_ix]
if (self.add_PA2PG & self.add_FTAPG & self.add_PA3PG):
PA2PG = X[:, pa2_ix] / X[:, gp_ix]
FTAPG = X[:, fta_ix] / X[:, gp_ix]
PA3PG = X[:, pa3_ix] / X[:, gp_ix]
return np.c_[X, FTAPG, PA3PG,
PA2PG,TOT]
else:
return np.c_[X, TOT]
| StarcoderdataPython |
3383625 | """
This is the basic training script for the baseline MRI or CT Model
It is used to train the source segmenter
"""
import os
import sys
import logging
import datetime
import argparse
import tensorflow as tf
from tensorflow.python import debug as tf_debug
import source_segmenter as drn
import numpy as np
from lib import _read_lists
logging.basicConfig(filename = "general_log", level = logging.DEBUG)
currtime = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S')
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
def main():
train_fid = "./lists/mr_train_list"
val_fid = "./lists/mr_val_list"
output_path = "./tmp_exps/mr_baseline"
restore = True # set True if resume training from stored model
restored_path = output_path
lr_update_flag = False # Set True if want to use a new learning rate for fine-tuning
num_cls = 5
batch_size = 10
training_iters = 10
epochs = 5000
checkpoint_space = 1500
image_summeris = True
optimizer = 'adam'
cost_kwargs = {
"cross_flag": True, # use cross entropy loss
"miu_cross": 1.0,
"dice_flag": True, # use dice loss
"miu_dice": 1.0,
"regularizer": 1e-4
}
opt_kwargs = {
"learning_rate": 1e-3
}
try:
os.makedirs(output_path)
except:
print("folder exist!")
net = drn.Full_DRN(channels = 3, batch_size = batch_size, n_class = num_cls, image_summeris = image_summeris, cost_kwargs = cost_kwargs)
print("Network has been built!")
train_list = _read_lists(train_fid)
val_list = _read_lists(val_fid)
trainer = drn.Trainer(net, train_list = train_list, val_list = val_list, num_cls = num_cls, \
batch_size = batch_size, opt_kwargs = opt_kwargs, checkpoint_space = checkpoint_space,\
optimizer = optimizer, lr_update_flag = lr_update_flag)
# start tensorboard before getting started
command1 = "tensorboard --logdir=" + output_path + " --port=6999 " + " &"
os.system(command1)
print("Now start training...")
if restore is True:
trainer.train(output_path = output_path, training_iters = training_iters, epochs = epochs, restore = True, restored_path = restored_path)
else:
trainer.train(output_path = output_path, training_iters = training_iters, epochs = epochs)
if __name__ == "__main__":
main() | StarcoderdataPython |
3275704 | <filename>codeforces/santaclaus-748c.py<gh_stars>1-10
n = int(input())
moves = input()
rev = { 'R':'L', 'U':'D', 'L':'R', 'D':'U' }
seen = set()
min_pts = 1
for move in moves:
if rev[move] in seen:
min_pts += 1
seen = set()
seen.add(move)
print (min_pts)
| StarcoderdataPython |
3314169 | <gh_stars>0
# Image Swipe 2
__all__ = [
"renderer",
"imguiImage"
] | StarcoderdataPython |
3283412 | from rest_framework import serializers
from .models import Role
class RoleSerializer(serializers.ModelSerializer):
class Meta:
model = Role
fields = '__all__'
| StarcoderdataPython |
196565 | <filename>neutron_tempest_plugin/scenario/test_dhcp.py
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
from paramiko import ssh_exception as ssh_exc
from tempest.common import utils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
from neutron_tempest_plugin.common import ssh
from neutron_tempest_plugin import config
from neutron_tempest_plugin.scenario import base
CONF = config.CONF
LOG = log.getLogger(__name__)
class DHCPTest(base.BaseTempestTestCase):
credentials = ['primary', 'admin']
force_tenant_isolation = False
@classmethod
def resource_setup(cls):
super(DHCPTest, cls).resource_setup()
cls.rand_name = data_utils.rand_name(
cls.__name__.rsplit('.', 1)[-1])
cls.network = cls.create_network(name=cls.rand_name)
cls.subnet = cls.create_subnet(
network=cls.network, name=cls.rand_name)
cls.router = cls.create_router_by_client()
cls.create_router_interface(cls.router['id'], cls.subnet['id'])
cls.keypair = cls.create_keypair(name=cls.rand_name)
cls.security_group = cls.create_security_group(name=cls.rand_name)
cls.create_loginable_secgroup_rule(cls.security_group['id'])
@utils.requires_ext(extension='extra_dhcp_opt', service='network')
@decorators.idempotent_id('58f7c094-1980-4e03-b0d3-6c4dd27217b1')
def test_extra_dhcp_opts(self):
"""This test case tests DHCP extra options configured for Neutron port.
Test is checking just extra option "15" which is domain-name
according to the RFC 2132:
https://tools.ietf.org/html/rfc2132#section-5.3
To test that option, there is spawned VM connected to the port with
configured extra_dhcp_opts and test asserts that search domain name is
configured inside VM in /etc/resolv.conf file
"""
test_domain = "test.domain"
extra_dhcp_opts = [
{'opt_name': 'domain-name',
'opt_value': '"%s"' % test_domain}]
port = self.create_port(
network=self.network, name=self.rand_name,
security_groups=[self.security_group['id']],
extra_dhcp_opts=extra_dhcp_opts)
floating_ip = self.create_floatingip(port=port)
server = self.create_server(
flavor_ref=CONF.compute.flavor_ref,
image_ref=CONF.compute.image_ref,
key_name=self.keypair['name'],
networks=[{'port': port['id']}])
self.wait_for_server_active(server['server'])
self.wait_for_guest_os_ready(server['server'])
try:
ssh_client = ssh.Client(
floating_ip['floating_ip_address'],
CONF.validation.image_ssh_user,
pkey=self.keypair['private_key'])
vm_resolv_conf = ssh_client.exec_command(
"cat /etc/resolv.conf")
self.assertIn(test_domain, vm_resolv_conf)
except (lib_exc.SSHTimeout,
ssh_exc.AuthenticationException,
AssertionError) as error:
LOG.debug(error)
self._log_console_output([server])
self._log_local_network_status()
raise
| StarcoderdataPython |
1796280 | <gh_stars>1-10
import aioredis
from .settings import get_broker_settings
async def connect_redis() -> aioredis.Redis:
broker_settings = get_broker_settings()
return await aioredis.create_redis(f"redis://{broker_settings.host}:{broker_settings.port}/{broker_settings.db}")
| StarcoderdataPython |
3286386 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
import copy
import time
import json
from functools import wraps
from django.contrib import admin
from django.conf import settings
from django.core.cache import cache
from django.core.exceptions import PermissionDenied
from django.db.models import Sum
from django.template.response import TemplateResponse
from django.utils import timezone
from django.utils.encoding import force_text
from django.forms.models import model_to_dict
from idcops.lib.tasks import log_action
from idcops.lib.utils import (
diff_dict, shared_queryset,
get_content_type_for_model,
get_deleted_objects
)
from idcops.mixins import construct_menus, system_menus_key
from idcops.exports import make_to_excel
from idcops.models import Comment, Online, Client, Option
from idcops.lib.tasks import get_related_client_name
SOFT_DELETE = getattr(settings, 'SOFT_DELETE', False)
general = ['download', 'actived', 'reactive']
unit = ['download']
pdu = ['download']
device = ['download', ]
online = ['download', 'movedown']
offline = ['download', 'removeup', 'delete']
syslog = ['download', 'actived']
comment = ['download', 'actived', 'delete']
rack = ['download', 'release', 'distribution', 'delete']
configure = ['delete', ]
general_has_delete = ['download', 'actived', 'reactive', 'delete']
client = general_has_delete
jumpline = general_has_delete
option = general_has_delete
document = general_has_delete
goods = general_has_delete
testapply = general_has_delete
inventory = ['download', 'outbound', 'reoutbound', 'delete']
user = general_has_delete
idc = general_has_delete
def check_multiple_clients(func):
@wraps(func)
def wrapper(request, queryset):
model = queryset.model
opts = model._meta
if hasattr(model, 'client'):
verify = queryset.values('client').order_by('client').distinct()
if verify.count() > 1:
mesg = f"不允许操作多个不同客户的 {opts.verbose_name}"
return mesg
return func(request, queryset)
return wrapper
def construct_model_meta(request, model, title=None):
opts = model._meta
meta = {}
if title is None:
title = ''
meta['logo'] = request.user.onidc
meta['title'] = f"{title} {opts.verbose_name} {request.user.onidc.name}"
meta['icon'] = opts.icon
meta['model_name'] = opts.model_name
meta['verbose_name'] = opts.verbose_name
user_menus = cache.get_or_set(
system_menus_key + str(request.user.id) +
str(len(request.user.get_all_permissions())),
construct_menus(request.user),
180
)
return meta, user_menus
def construct_context(request, queryset, action, action_name):
meta, menus = construct_model_meta(request, queryset.model, action_name)
context = dict(
meta=meta,
menus=menus,
action=action,
action_name=action_name,
queryset=queryset,
)
return context
def download(request, queryset):
return make_to_excel(queryset)
download.description = "导出"
download.icon = 'fa fa-download'
download.required = 'exports'
@check_multiple_clients
def html_print(request, queryset):
model = queryset.model
opts = model._meta
action = sys._getframe().f_code.co_name
action_name = "打印"
verify = queryset.values('status').order_by('status').distinct()
if verify.count() > 1:
mesg = f"不允许打印多个不同状态的 {opts.verbose_name}"
return mesg
extra_for = queryset.count() - 10 < 0
if extra_for:
extra_for = list(range(abs(queryset.count() - 10)))
_extra = dict(
extra_for=extra_for,
ticket=int(time.time()),
)
context = construct_context(request, queryset, action, action_name)
context.update(_extra)
templates = ["%s/print.html" % (opts.model_name), "base/print.html"]
return TemplateResponse(request, templates, context)
html_print.description = "打印"
html_print.icon = 'fa fa-print'
html_print.required = 'view'
@check_multiple_clients
def removeup(request, queryset):
action = sys._getframe().f_code.co_name
action_name = "取消下架"
exclude = queryset.filter(rack__actived=False)
if exclude.exists():
mesg = "有设备所在机柜未使用, 无法取消下架"
return mesg
if request.POST.get('post'):
for obj in queryset:
o = copy.deepcopy(obj)
obj.actived = True
obj.status = 'online'
obj.operator = request.user
lastunits = copy.deepcopy(obj.units.all())
lastpdus = copy.deepcopy(obj.pdus.all())
ucan_recovery = False not in [u.actived for u in lastunits]
pcan_recovery = False not in [p.actived for p in lastpdus]
if ucan_recovery:
obj.units.all().update(actived=False, operator=obj.operator)
else:
verb = f"无法恢复 {force_text(obj)} 的U位"
log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(obj, True).pk,
object_id=obj.pk, action_flag="系统通知",
message=verb, content=verb
)
obj.units.clear()
if pcan_recovery:
obj.pdus.all().update(actived=False, operator=obj.operator)
else:
obj.pdus.clear()
obj.save()
diffs = diff_dict(model_to_dict(o), model_to_dict(obj))
message = json.dumps(list(diffs.keys()))
old_units = [force_text(u) for u in lastunits]
old_pdus = [force_text(p) for p in lastpdus]
diffs.update({'last_units': old_units, 'last_pdus': old_pdus})
content = json.dumps(diffs)
log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(obj, True).pk,
object_id=obj.pk, action_flag=action_name,
message=message, content=content
)
return None
context = construct_context(request, queryset, action, action_name)
return TemplateResponse(request, 'base/base_confirmation.html', context)
removeup.description = "取消下架"
removeup.icon = 'fa fa-level-up'
@check_multiple_clients
def movedown(request, queryset):
action = sys._getframe().f_code.co_name
action_name = "下架"
if request.POST.get('post'):
for obj in queryset:
o = copy.deepcopy(obj)
obj.actived = False
obj.status = 'offline'
obj.operator = request.user
obj.units.all().update(actived=True, operator=obj.operator)
obj.pdus.all().update(actived=True, operator=obj.operator)
obj.save()
diffs = diff_dict(model_to_dict(o), model_to_dict(obj))
log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(obj, True).pk,
object_id=obj.pk, action_flag=action_name,
message=json.dumps(list(diffs.keys())),
content=json.dumps(diffs)
)
return None
context = construct_context(request, queryset, action, action_name)
return TemplateResponse(request, 'base/base_confirmation.html', context)
movedown.description = "下架"
movedown.icon = 'fa fa-level-down'
@check_multiple_clients
def actived(request, queryset):
action = sys._getframe().f_code.co_name
action_name = "停用"
if request.POST.get('post'):
for obj in queryset:
o = copy.deepcopy(obj)
obj.actived = False
obj.save()
diffs = diff_dict(model_to_dict(o), model_to_dict(obj))
log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(obj, True).pk,
object_id=obj.pk,
action_flag="停用",
message=json.dumps(list(diffs.keys())),
content=json.dumps(diffs)
)
return None
context = construct_context(request, queryset, action, action_name)
return TemplateResponse(request, 'base/base_confirmation.html', context)
actived.description = "停用"
actived.icon = 'fa fa-ban'
@check_multiple_clients
def reclaim(request, queryset):
action = sys._getframe().f_code.co_name
action_name = "回收"
if request.POST.get('post'):
for obj in queryset:
o = copy.deepcopy(obj)
obj.actived = False
obj.save()
diffs = diff_dict(model_to_dict(o), model_to_dict(obj))
log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(obj, True).pk,
object_id=obj.pk,
action_flag=action_name,
message=json.dumps(list(diffs.keys())),
content=json.dumps(diffs)
)
return None
context = construct_context(request, queryset, action, action_name)
return TemplateResponse(request, 'base/base_confirmation.html', context)
reclaim.description = "回收"
reclaim.icon = 'fa fa-ban'
@check_multiple_clients
def cancel_reclaim(request, queryset):
action = sys._getframe().f_code.co_name
action_name = "取消回收"
if request.POST.get('post'):
for obj in queryset:
o = copy.deepcopy(obj)
obj.actived = True
obj.save()
diffs = diff_dict(model_to_dict(o), model_to_dict(obj))
log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(obj, True).pk,
object_id=obj.pk,
action_flag=action_name,
message=json.dumps(list(diffs.keys())),
content=json.dumps(diffs)
)
return None
context = construct_context(request, queryset, action, action_name)
return TemplateResponse(request, 'base/base_confirmation.html', context)
cancel_reclaim.description = "取消回收"
cancel_reclaim.icon = 'fa fa-check-circle-o'
@check_multiple_clients
def reactive(request, queryset):
action = sys._getframe().f_code.co_name
action_name = "启用"
if request.POST.get('post'):
for obj in queryset:
o = copy.deepcopy(obj)
obj.actived = True
obj.save()
diffs = diff_dict(model_to_dict(o), model_to_dict(obj))
log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(obj, True).pk,
object_id=obj.pk,
action_flag=action_name,
message=json.dumps(list(diffs.keys())),
content=json.dumps(diffs)
)
return None
context = construct_context(request, queryset, action, action_name)
return TemplateResponse(request, 'base/base_confirmation.html', context)
reactive.description = "启用"
reactive.icon = 'fa fa-check-circle-o'
@check_multiple_clients
def outbound(request, queryset):
action = sys._getframe().f_code.co_name
action_name = "出库"
queryset = queryset.filter(actived=True)
if not queryset.exists():
return "选择无结果"
total = queryset.aggregate(Sum('amount'))
if request.POST.get('post') and request.POST.getlist('items'):
def construct_item(index):
obj = queryset.get(pk=int(index))
out_amount = int(request.POST.get('count-' + str(index)))
out_serials = request.POST.getlist('sn-' + str(index))
copy_needed = True
if int(out_amount) == obj.amount:
copy_needed = False
comment = request.POST.get(('comment-' + index), None)
return obj, copy_needed, out_serials, out_amount, comment
for item in request.POST.getlist('items'):
obj, _copy, out_serials, out_amount, comment = construct_item(item)
o = copy.deepcopy(obj)
if _copy:
hold = [s for s in obj.serials.split(
',') if s not in out_serials]
obj.amount -= out_amount
obj.serials = ','.join(hold)
new_obj = copy.deepcopy(obj)
new_obj.pk = None
new_obj.amount = out_amount
new_obj.serials = ','.join(out_serials)
new_obj.actived = False
new_obj.creator = request.user
new_obj.created = timezone.datetime.now()
new_obj.operator = None
new_obj.parent = obj
new_obj.save()
comment_obj = new_obj
else:
obj.actived = False
obj.operator = request.user
comment_obj = obj
obj.save()
if comment:
Comment.objects.create(
object_repr=comment_obj, content=comment,
creator=request.user, onidc=obj.onidc)
diffs = diff_dict(model_to_dict(o), model_to_dict(obj))
log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(obj, True).pk,
object_id=comment_obj.pk,
action_flag=action_name,
message=json.dumps(list(diffs.keys())),
content=json.dumps(diffs)
)
return None
context = construct_context(request, queryset, action, action_name)
_extra = dict(total=total)
context.update(_extra)
return TemplateResponse(request, 'base/items_out.html', context)
outbound.description = "出库"
outbound.icon = 'fa fa-check'
@check_multiple_clients
def reoutbound(request, queryset):
action = sys._getframe().f_code.co_name
action_name = "取消出库"
queryset = queryset.filter(actived=False)
if not queryset.exists():
return "查无结果"
if request.POST.get('post'):
for obj in queryset:
o = copy.deepcopy(obj)
obj.actived = True
obj.save()
diffs = diff_dict(model_to_dict(o), model_to_dict(obj))
log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(obj, True).pk,
object_id=obj.pk,
action_flag=action_name,
message=json.dumps(list(diffs.keys())),
content=json.dumps(diffs)
)
return None
context = construct_context(request, queryset, action, action_name)
return TemplateResponse(request, 'base/base_confirmation.html', context)
reoutbound.description = "取消出库"
reoutbound.icon = 'fa fa-undo'
@check_multiple_clients
def release(request, queryset):
action = sys._getframe().f_code.co_name
action_name = "释放机柜"
rack_ids = [id for id in queryset.values_list('id', flat=True)]
# fix: unknown your action: The QuerySet value
if Online.objects.filter(rack_id__in=rack_ids).exists():
mesg = "选择的机柜中仍有在线设备,无法释放"
return mesg
queryset = queryset.filter(actived=True)
if request.POST.get('post'):
for obj in queryset:
o = copy.deepcopy(obj)
if obj.client and obj.client.onlinenum() == 0:
verb = f"客户 {force_text(obj.client)} 没有在线设备, 是否终止"
log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(obj, True).pk,
object_id=obj.pk, action_flag="系统通知",
message=verb, content=verb
)
obj.actived = False
obj.client = None
obj.cpower = 0
obj.style = None
obj.status = None
obj.operator = request.user
obj.tags.clear()
if obj.jnum() != 0:
verb = f"机柜 {force_text(obj)} 还有跳线存在, 请回收"
log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(obj, True).pk,
object_id=obj.pk, action_flag="系统通知",
message=verb, content=verb
)
obj.save()
diffs = diff_dict(model_to_dict(o), model_to_dict(obj))
log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(obj, True).pk,
object_id=obj.pk,
action_flag=action_name,
message=json.dumps(list(diffs.keys())),
content=json.dumps(diffs),
related_client=get_related_client_name(o)
)
return None
context = construct_context(request, queryset, action, action_name)
return TemplateResponse(request, 'base/base_confirmation.html', context)
release.description = "释放"
release.icon = 'fa fa-recycle'
@check_multiple_clients
def distribution(request, queryset):
action = sys._getframe().f_code.co_name
action_name = "分配机柜"
queryset = queryset.filter(actived=False)
onidc_id = request.user.onidc.id
options = Option.objects.filter(actived=True)
clients = shared_queryset(Client.objects.filter(actived=True), onidc_id)
status = shared_queryset(options.filter(flag='Rack-Status'), onidc_id)
styles = shared_queryset(options.filter(flag='Rack-Style'), onidc_id)
if request.POST.get('post') and request.POST.getlist('items'):
def construct_item(index):
obj = queryset.get(pk=int(index))
try:
client = int(request.POST.get('client-' + str(index)))
except BaseException:
client = 0
status = int(request.POST.get('status-' + str(index)))
style = int(request.POST.get('style-' + str(index)))
cpower = request.POST.get('cpower-' + str(index))
comment = request.POST.get(('comment-' + index), None)
return obj, client, status, style, cpower, comment
for item in request.POST.getlist('items'):
obj, client, status, style, cpower, _comment = construct_item(item)
o = copy.deepcopy(obj)
if client != 0:
obj.client_id = client
obj.status_id = status
obj.style_id = style
obj.cpower = cpower
obj.actived = True
obj.save()
diffs = diff_dict(model_to_dict(o), model_to_dict(obj))
log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(obj, True).pk,
object_id=obj.pk,
action_flag=action_name,
message=json.dumps(list(diffs.keys())),
content=json.dumps(diffs)
)
return None
context = construct_context(request, queryset, action, action_name)
_extra = dict(clients=clients, status=status, styles=styles)
context.update(_extra)
return TemplateResponse(request, 'rack/distribution.html', context)
distribution.description = "分配"
distribution.icon = 'fa fa-puzzle-piece'
def delete(request, queryset):
model = queryset.model
opts = model._meta
action = sys._getframe().f_code.co_name
action_name = "删除"
modeladmin = admin.site._registry.get(model)
# queryset = queryset.filter(actived=False)
if not modeladmin.has_delete_permission(request):
raise PermissionDenied
# using = router.db_for_write(modeladmin.model)
deletable_objects, model_count, perms_needed, protected = \
get_deleted_objects(queryset, request, modeladmin.admin_site)
if request.POST.get('post') and not protected:
if perms_needed:
raise PermissionDenied
if queryset.count():
for obj in queryset:
log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(obj, True).pk,
object_id=obj.pk,
action_flag="删除"
)
if not SOFT_DELETE:
queryset.delete()
else:
queryset.update(deleted=True, actived=False)
return None
if len(queryset) == 1:
objects_name = force_text(opts.verbose_name)
else:
objects_name = force_text(opts.verbose_name_plural)
meta, menus = construct_model_meta(request, model, action_name)
context = dict(
objects_name=objects_name,
deletable_objects=[deletable_objects],
model_count=dict(model_count).items(),
queryset=queryset,
perms_lacking=perms_needed,
protected=protected,
opts=opts,
meta=meta,
action=action,
action_name=action_name,
menus=menus,
)
request.current_app = modeladmin.admin_site.name
return TemplateResponse(request, 'base/delete_confirmation.html', context)
delete.description = "删除"
delete.icon = 'fa fa-trash'
delete.required = 'delete'
| StarcoderdataPython |
1677254 | import tensorflow as tf
import numpy as np
from tensorflow.keras import datasets
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tinyimagenet import *
def get_data(dataset,path_to_data=None):
aug_config={}
if dataset.lower()=='mnist':
(train_X,train_y),(test_X,test_y)=datasets.mnist.load_data()
aug_config['rotation_range']=4
mean,std=0.1307,0.3081
num_classes=10
elif dataset.lower()=='cifar10':
(train_X,train_y),(test_X,test_y)=datasets.cifar10.load_data()
aug_config['horizontal_flip']=True
aug_config['width_shift_range']=aug_config['height_shift_range']=4
mean,std=[0.4914,0.4822,0.4465],[0.2470,0.2435,0.2616]
num_classes=10
elif dataset.lower()=='cifar100':
aug_config['horizontal_flip']=True
aug_config['width_shift_range']=aug_config['height_shift_range']=4
(train_X,train_y),(test_X,test_y)=datasets.cifar100.load_data()
mean,std=[0.5071,0.4865,0.4409],[0.2673,0.2564,0.2762]
num_classes=100
elif dataset.lower()=='tinyimagenet':
aug_config['horizontal_flip']=True
aug_config['width_shift_range']=aug_config['height_shift_range']=4
train_X,train_y,test_X,test_y=load_images(path_to_data+'/tiny-imagenet-200',200)
train_X,test_X=np.float32(np.transpose(train_X,axes=(0,2,3,1))),np.float32(np.transpose(test_X,axes=(0,2,3,1)))
mean,std=[0.4802,0.4481,0.3975],[0.2770,0.2691,0.2821]
num_classes=200
train_y=tf.keras.utils.to_categorical(train_y,num_classes)
test_y=tf.keras.utils.to_categorical(test_y,num_classes)
train_X,test_X=np.divide(train_X,255),np.divide(test_X,255)
train_X,test_X=(train_X-mean)/std,(test_X-mean)/std
train_X=np.expand_dims(train_X,axis=3) if len(train_X.shape)<4 else train_X # adding channel dimensions when it's not there
test_X=np.expand_dims(test_X,axis=3) if len(test_X.shape)<4 else test_X
datagen=ImageDataGenerator(**aug_config)
datagen.fit(train_X)
return datagen,train_X,train_y,test_X,test_y | StarcoderdataPython |
1715862 | dataset_type = 'WheatDataset'
data_root = 'global-wheat-challenge/gwhd_2021/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
albu_train_transforms = [
dict(
type='ShiftScaleRotate',
shift_limit=0.0625,
scale_limit=0.0,
rotate_limit=0,
interpolation=1,
p=0.5),
dict(type='Flip', p=0.5),
dict(
type='OneOf',
transforms=[
dict(type='GaussNoise', var_limit=50.0, p=0.5),
dict(type='ISONoise', intensity=(0.2, 0.8), p=0.5),
dict(type='MultiplicativeNoise', multiplier=(0.5, 1.1), p=0.5),
dict(type='NoOp')
],
p=0.5),
dict(
type='OneOf',
transforms=[
dict(type='GaussianBlur', p=0.5),
dict(type='Blur', p=0.5),
dict(type='NoOp')
],
p=0.5),
dict(
type='OneOf',
transforms=[
dict(type='ToGray', p=0.5),
dict(type='RGBShift', p=0.5),
dict(type='NoOp')
],
p=0.5),
dict(
type='HueSaturationValue',
hue_shift_limit=20,
sat_shift_limit=30,
val_shift_limit=20,
p=0.5),
dict(
type='OneOf',
transforms=[
dict(type='RandomGamma', p=0.5),
dict(type='CLAHE', p=0.5),
dict(
type='RandomBrightnessContrast',
brightness_limit=[0.1, 0.3],
contrast_limit=[0.1, 0.3],
p=0.5),
],
p=0.5),
]
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=False),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='Pad', size_divisor=32),
dict(
type='Albu',
transforms=albu_train_transforms,
bbox_params=dict(
type='BboxParams',
format='pascal_voc',
label_fields=['gt_labels'],
min_visibility=0.0,
min_area=0.0,
filter_lost_elements=True),
keymap={
'img': 'image',
'gt_bboxes': 'bboxes'
},
update_pad_shape=False,
skip_img_without_anno=True),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(
type='Collect',
keys=['img', 'gt_bboxes', 'gt_labels'],
meta_keys=('filename', 'ori_shape', 'img_shape', 'img_norm_cfg',
'pad_shape', 'scale_factor'))
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/train.json',
img_prefix=data_root + 'images/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/test.json',
img_prefix=data_root + 'images/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/test.json',
img_prefix=data_root + 'images/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
| StarcoderdataPython |
1638856 | from .command import Command
from .scenario_data import ScenarioData
class Scenario(Command):
def __init__(self, result_collector):
super(Scenario, self).__init__(result_collector)
def parse(self, path, repetition_name=None):
self.result_collector.visit_by_scenario(ScenarioData(path[0], path[2:], repetition_name))
| StarcoderdataPython |
27496 | <filename>S2.Surface_Normal/regNormalNet/regNormalNet.py<gh_stars>100-1000
# coding: utf8
"""
@Author : <NAME>
"""
import os
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from torch.autograd import Variable
import torch
from basic.common import rdict
import numpy as np
from easydict import EasyDict as edict
from collections import OrderedDict as odict
from itertools import product
from basic.common import add_path, env
this_dir = os.path.dirname(os.path.abspath(__file__))
add_path(this_dir+'/../lib/')
from helper import *
from model import VGG16_Trunk
from modelSE import VGG16_Trunk as VGG16SE_Trunk
# net_arch2Trunk = dict(
# vgg16 = VGG16_Trunk,
# vgg16se = VGG16SE_Trunk,
# )
net_arch2Trunk = dict(
vgg16=dict(
Sflat = VGG16_Trunk,
Sexp = VGG16SE_Trunk,
),
)
from pytorch_util.libtrain import copy_weights, init_weights_by_filling
from pytorch_util.torch_v4_feature import LocalResponseNorm # *
from pytorch_util.torch_3rd_layers import Maskout
from pytorch_util.torch_3rd_funcs import norm2unit, exp_Normalization
def cls_pred(output, topk=(1,), dim=1):
maxk = max(topk)
batch_size = output.size(0)
_, pred = output.topk(maxk, dim=dim, largest=True, sorted=True)
return pred
class _regNormalNet(nn.Module):
def __init__(self, method, net_arch='vgg16', init_weights=True):
super(_regNormalNet, self).__init__()
_Trunk = net_arch2Trunk[net_arch][method]
self.trunk = _Trunk(init_weights=init_weights)
def forword(self, x, label):
raise NotImplementedError
#---------------------------------------------------------------------[regQuat]
class reg_Sflat_Net(_regNormalNet):
def __init__(self, net_arch='vgg16', init_weights=True):
_regNormalNet.__init__(self, 'Sflat', net_arch=net_arch, init_weights=init_weights)
# loss module
self.loss_handler = Cos_Proximity_Loss_Handler()
self.targets = ['norm']
def forward(self, x):
"""label shape (batchsize, ) """
x = self.trunk(x) # Forward Conv and Fc6,Fc7
#
batchsize = x.size(0) # x of shape (40, 3, 240, 320)
#-- Normalize coordinate to a unit
x_norm = norm2unit(x, dim=1)
Prob = edict(norm=x_norm.permute(0,2,3,1).double()) # transpose prediction from BxCxHxW to BxHxWxC order.
return Prob
def compute_loss(self, Prob, GT):
Loss, Errs = self.loss_handler.compute_loss(self.targets, Prob, GT)
_metric_ = edict(norm=Errs['norm'])
return Loss, _metric_
def compute_pred(self, Prob, encode_bit=8):
x_norm = Prob['norm']
# Get cpu data.
norm = x_norm.data.cpu().numpy().copy() # B,H,W,C
assert encode_bit in [8,16]
if encode_bit==8:
normImgs = ((norm+1)*(2**7)).astype(np.uint8) # map [-1,1] to [0,256)
else:
normImgs = ((norm+1)*(2**15)).astype(np.uint16) # map [-1,1] to [0,65535)
Pred = edict(norm=normImgs)
return Pred
#---------------------------------------------------------------------[regQuat]
class reg_Sexp_Net(_regNormalNet): # Spherical exponential Problem + sign classification
def __init__(self, net_arch='vgg16', init_weights=True):
_regNormalNet.__init__(self, 'Sexp', net_arch=net_arch, init_weights=init_weights)
self.reg_n_D = 3
# Note: for a surface normal (x,z,y) (Watch out the order)
# z should always satisfy z<=0 (Surface normal should from visible surfaces)
# Thus only x,y need sign prediction.
dim_need_sign = 2
_signs = list( product(*( [(-1,1)]*dim_need_sign )) ) # [(-1, -1), (-1, 1), (1, -1), (1, 1)], with len=4
self.signs = [(x[0],-1,x[1]) for x in _signs] # y-z-x order: [(-1, -1, -1), (-1, -1, 1), (1, -1, -1), (1, -1, 1)], with len=4; z always -1
self.signs2label = odict(zip(self.signs, range(len(self.signs))))
self.label2signs = Variable( torch.DoubleTensor(self.signs) ).cuda() # make it as a Variable
self.softmax = nn.Softmax(dim=1).cuda()
# loss module
self.loss_handler_abs_norm = Cos_Proximity_Loss_Handler()
self.loss_handler_sgc_norm = Cross_Entropy_Loss_Handler()
self.targets = ['sgc_norm','abs_norm']
self.gt_targets = ['norm']
self.cost, self.sint = torch.tensor(np.cos(np.pi/4)).double().cuda(), torch.tensor(np.sin(np.pi/4)).double().cuda()
def forward(self, x):
"""label shape (batchsize, ) """
x_abs, x_sgc = self.trunk(x) # Forward Conv and Fc6,Fc7
#
batchsize = x_abs.size(0)
#-- Exp and Normalize coordinate to a unit
x_sqr_norm = self.softmax(x_abs) #, nr_cate=self.nr_cate)
# sign category head (totally 4 category)
x_sgc_norm = x_sgc
Prob = edict(abs_norm=torch.sqrt(x_sqr_norm).permute(0,2,3,1).double(), # B,H,W,3
sgc_norm=x_sgc_norm.permute(0,2,3,1) ) # B,H,W,4
return Prob
def compute_loss(self, Prob, GT):
B,H,W,_3_ = GT.norm.size()
assert _3_==3, "Wrong dim: %s,%s,%s,%s" % (B,H,W,_3_)
# First get sign label from GT
#== Formulate squared value of quaternion
GT_abs_norm = torch.abs(GT.norm) # B,H,W,3
#== Formulate signs label of quaternion
GT_sign_norm = torch.sign(GT.norm) # B,H,W,3
#-------------------------------------
# hard coded: sign to label
#-------------------------------------
# y x label
# [-1 -1] --> 0
# [-1 1] --> 1
# [ 1 -1] --> 2
# [ 1 1] --> 3
# GT_sign_norm (B,H,W,3) in y-z-x order
GT_sign_norm[GT_sign_norm==0] = -1 # make sign of '0' as -1 (use -1 instead of 1 just because z<=0)
y_sign, x_sign = GT_sign_norm[:,:,:,0], GT_sign_norm[:,:,:,2]
y_sign += 1 # [y_sign==-1]
x_sign[x_sign==-1] = 0
GT_sgc_norm = (y_sign+x_sign).long() # data with shape with (B,H,W) index of [0,1,2,3]
# here just because compute_loss need a same key from Prob and GT,
# so we just give a fake name to GT.sqr_quat as '_GT.logsqr_norm'.
_GT = edict(abs_norm=GT_abs_norm, sgc_norm=GT_sgc_norm, mask=GT.mask) # abs_norm: (B,H,W,3) sgc_norm: (B,H,W)
Loss_abs_norm, abs_Errs = self.loss_handler_abs_norm.compute_loss(['abs_norm'], Prob, _GT)
Loss_sgc_norm = self.loss_handler_sgc_norm.compute_loss(['sgc_norm'], Prob, _GT)
# ----------------------------------------
# Compute the metric.
sign_ind = cls_pred(Prob['sgc_norm'], topk=(1,), dim=3).data.squeeze(dim=3) # B,H,W
pr_sign_norm = self.label2signs[sign_ind] # magic here: Indexing label2signs (4x3) by sign_ind (B,H,W) becomes (B,H,W,3) (10, 240, 320, 3)
pr_abs_norm = Prob['abs_norm']
_Prob = edict(norm=pr_abs_norm * pr_sign_norm) # current predicted final norm (applied sign prediction)
_Loss_norm, out_Errs = self.loss_handler_abs_norm.compute_loss(['norm'], _Prob, GT) # just borrow loss_handler_abs_norm, nothing more.
# Compute acc of classification: sign_ind vs GT_sgc_norm
mask = GT['mask']
acc = eval_cls(sign_ind[mask], GT_sgc_norm[mask])
_metric_ = edict(abs_norm = abs_Errs['abs_norm'],
norm = out_Errs['norm'] ,
sgc_norm_acc = acc ,)
# To add loss weights here.
Loss = edict( abs_norm=Loss_abs_norm['abs_norm']*10, # / 5.
sgc_norm=Loss_sgc_norm['sgc_norm'], )
return Loss, _metric_ # .update(abs_Errs)
def compute_pred(self, Prob, encode_bit=8):
x_abs_norm = Prob['abs_norm'] # B,H,W,3
x_sgc_norm = Prob['sgc_norm'] # B,H,W,4
batchsize = x_abs_norm.size(0)
#
sign_ind = cls_pred(x_sgc_norm, topk=(1,), dim=3).data.squeeze(dim=3) # .view(-1,) # B,H,W
x_sign_norm = self.label2signs[sign_ind] # magic here: Indexing label2signs (4x3) by sign_ind (B,H,W) becomes (B,H,W,3)
#
x_norm = x_abs_norm * x_sign_norm # B,H,W,3
# --------------Recover rot45 trick --------------
# Note: since we applied rot45 trick, here we recover it back
_x_norm = x_norm.detach().clone() # return a copy of x_norm without grad
_y,_z,_x = _x_norm[:,:,:,0],_x_norm[:,:,:,1],_x_norm[:,:,:,2]
y, z, x = x_norm[:,:,:,0],x_norm[:,:,:,1],x_norm[:,:,:,2]
x[:] = self.cost*_x - self.sint*_y
y[:] = self.sint*_x + self.cost*_y
# ------------------------------------------------
# Get cpu data.
norm = x_norm.data.cpu().numpy().copy() # B,H,W,C
assert encode_bit in [8,16]
if encode_bit==8:
normImgs = ((norm+1)*(2**7)).astype(np.uint8) # map [-1,1] to [0,256)
else:
normImgs = ((norm+1)*(2**15)).astype(np.uint16) # map [-1,1] to [0,65535)
Pred = edict(norm=normImgs)
return Pred
| StarcoderdataPython |
71128 | import pytest
from redis.exceptions import RedisError
from rq.exceptions import NoSuchJobError
from busy_beaver.models import Task, PostGitHubSummaryTask, PostTweetTask
MODULE_TO_TEST = "busy_beaver.models.task"
###########
# Base Task
###########
def test_create_task(session):
# Arrange
task = Task(
job_id="abcd",
name="task_created_for_test",
description="Task created for testing purposes",
)
# Act
session.add(task)
session.commit()
# Assert
assert task.job_id == "abcd"
assert task.complete is False
assert task.failed is False
def add(x, y):
return x + y
def test_run_async_task_update_progress(app, rq, session):
# Arrange
rq.job(add)
job = add.queue(5, 2)
job.meta["progress"] = 100
job.save_meta()
# Act
queued_task = Task(job_id=job.id, name="Add", description="Add task")
session.add(queued_task)
session.commit()
# Assert
assert queued_task.get_progress() == 100
def test_run_async_task_get_job_from_task(app, rq, session):
# Arrange
rq.job(add)
job = add.queue(5, 2)
queued_task = Task(job_id=job.id, name="Add", description="Add task")
session.add(queued_task)
session.commit()
# Act
retrieved_job = queued_task.get_rq_job()
# Assert
assert retrieved_job.id == job.id
@pytest.fixture
def patched_rq(patcher):
def _wrapper(replacement):
return patcher(MODULE_TO_TEST, namespace="Job", replacement=replacement)
return _wrapper
@pytest.mark.parametrize("raise_exc", [RedisError, NoSuchJobError])
def test_task_model_get_job_raises_exception(app, rq, session, patched_rq, raise_exc):
# Arrange
class FakeJob:
def __init__(self, error):
self.error = error
def fetch(self, *args, **kwargs):
raise self.error
patched_rq(FakeJob(raise_exc))
rq.job(add)
job = add.queue(5, 2)
queued_task = Task(job_id=job.id, name="Add", description="Add task")
session.add(queued_task)
session.commit()
# Act
retrieved_job = queued_task.get_rq_job()
# Assert
assert retrieved_job is None
#####################
# GitHub Summary Task
#####################
def test_post_github_summary_task(session):
# Arrange
channel_name = "test-channel"
task = PostGitHubSummaryTask(
job_id="abcd",
name="task_created_for_test",
description="Task created for testing purposes",
data={"channel_name": channel_name},
)
# Act
session.add(task)
session.commit()
# Assert
assert task.job_id == "abcd"
assert task.complete is False
assert task.failed is False
assert task.data["channel_name"] == channel_name
#################
# Post Tweet Task
#################
def test_post_tweet_task(session):
# Arrange
channel_name = "test-channel"
task = PostTweetTask(
job_id="abcd",
name="task_created_for_test",
description="Task created for testing purposes",
data={"channel_name": channel_name},
)
# Act
session.add(task)
session.commit()
# Assert
assert task.job_id == "abcd"
assert task.complete is False
assert task.failed is False
assert task.data["channel_name"] == channel_name
| StarcoderdataPython |
97492 | <gh_stars>0
import pathlib
from enum import Enum, auto
from typing import Any, Dict, List, Union
from data_to_model.type_detectors.types import SimpleType
CsvDataType = List[Dict[str, SimpleType]]
JsonDataType = Dict[str, Any]
Collection = Union[CsvDataType, Dict]
class SupportedDataTypes(Enum):
CSV = auto()
@classmethod
def from_path(cls, path: pathlib.Path) -> "SupportedDataTypes":
if path.suffix in {".csv", ".tsv", ".txt"}:
return cls.CSV
raise ValueError(f"Unsupported file type: {path.suffix}")
| StarcoderdataPython |
1668688 | <reponame>lumosan/deeplearning2018<gh_stars>0
# -*- coding: utf-8 -*-
################ Generic class ################
class Optimizer(object):
"""
Class for optimizers
"""
def __init__(self):
self.model = None
def step(self, *input):
raise NotImplementedError
def adaptive_lr(kappa=0.75, eta0=1e-5):
"""Adaptive learning rate. After creating the lr with the
values for kappa and eta0, it yields the value for the learning
rate of the next iteration. Used for (Stochastic) Gradient Descent
methods.
"""
t = 1
while True:
yield eta0 * t ** -kappa
t += 1
################ Implementations ################
class SGD(Optimizer):
"""Stochastic Gradient Descent with adaptive or fixed learning rate"""
def __init__(self, a_lr=None):
"""
INPUT
a_lr: Expects None or a list of two elements.
If not None, use the first parameter as kappa and the
second as eta0 of the adaptive learning rate
"""
if a_lr is not None:
self.a_lr = Optimizer.adaptive_lr(kappa=a_lr[0], eta0=a_lr[1])
else:
self.a_lr = None
def step(self, model, loss):
"""
Performs one optimizer step, updating the gradient
INPUT
model
loss: of last epoch
"""
# If using adaptive learning rate, get next one
if self.a_lr is not None:
next_a_lr = next(self.a_lr)
else:
next_a_lr = None
# Update gradients
model.update(lr=next_a_lr)
class Adam(Optimizer):
"""Adam optimizer with fixed learning rate"""
def __init__(self, alpha=1e-3, beta1=.9, beta2=.999, epsilon=1e-8):
"""
INPUT
alpha: learning rate
beta1: exponential decay for first moment
beta2: exponential decay for second moment
epsilon: small number to avoid division by zero
"""
self.alpha = alpha
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.t = 1
self.m_prev = None
self.v_prev = None
def restart(self):
"""Reset the parameters of the moments"""
self.t = 1
self.m_prev = None
self.v_prev = None
def compute_adam_moment_estimates(self, m_t_old, v_t_old, gradient):
"""
Compute the new Adam moments
INPUTS
m_t_old: previous first moment
v_t_old: previous second moment
gradient
"""
# compute bias-corrected first moment estimate
m_t = (self.beta1 * m_t_old + (1 - self.beta1) * gradient)
# compute bias-corrected second raw moment estimate
v_t = (self.beta2 * v_t_old + (1 - self.beta2) * gradient.pow(2))
out = (m_t / (1 - self.beta1 ** self.t) /
((v_t / (1 - self.beta2 ** self.t)).sqrt() + self.epsilon))
return out, m_t, v_t
def step(self, model, loss):
"""
Performs one optimizer step, updating the gradient
INPUT
model
loss: of last epoch
"""
if self.m_prev is None:
# 1st moment vector
self.m_prev = [x[1].clone().fill_(0) for x in model.param()]
if self.v_prev is None:
# 2nd moment vector
self.v_prev = [x[1].clone().fill_(0) for x in model.param()]
# Compute moment estimates
m_e = [self.compute_adam_moment_estimates(m_p, v_p, g[1]) for g, (m_p, v_p) in
zip(model.param(), zip(self.m_prev, self.v_prev))]
# Update optimizer with the computed values
self.m_prev = [p[1] for p in m_e]
self.v_prev = [p[2] for p in m_e]
# Update parameters with the computed values
out = [p[0] for p in m_e]
model.update(lr=self.alpha, values=out)
self.t += 1
| StarcoderdataPython |
3373471 | <gh_stars>1-10
#!/usr/bin/python
# coding: utf-8
"""Test the functionality of SASParser and GuinierParser"""
__authors__ = ["<NAME>"]
__license__ = "MIT"
__date__ = "25/03/2021"
import unittest
import logging
import io
import contextlib
from pathlib import Path
from .. import dated_version as freesas_version
from ..sas_argparser import SASParser, GuinierParser
logger = logging.getLogger(__name__)
class TestSasArgParser(unittest.TestCase):
def test_minimal_guinier_parser_requires_file_argument(self):
"""
Test that Guinier parser provides error if no file argument is provided.
"""
basic_parser = GuinierParser("program", "description", "epilog")
output_catcher = io.StringIO()
try:
with contextlib.redirect_stderr(output_catcher):
_ = basic_parser.parse_args()
except SystemExit:
pass
self.assertTrue(
basic_parser.usage in output_catcher.getvalue(),
msg="GuinierParser provides usage if no file provided",
)
self.assertTrue(
"the following arguments are required: FILE"
in output_catcher.getvalue(),
msg="GuinierParser states that the FILE argument is missing if no file provided",
)
def test_minimal_guinier_parser_parses_list_of_files(self):
"""
Test that the Guinier parsers parses a list of files.
"""
basic_parser = GuinierParser("program", "description", "epilog")
parsed_arguments = basic_parser.parse_args(["afile", "bfile", "cfile"])
self.assertEqual(
set(parsed_arguments.file),
{"afile", "bfile", "cfile"},
msg="GuinierParser parses list of files",
)
def test_add_file_argument_enables_SASParser_to_recognize_file_lists(
self,
):
"""
Test that add_file_argument adds the ability to parse a file list to SASParser.
"""
basic_parser = SASParser("program", "description", "epilog")
# Before running add_file_argument a file argument is not recognized
output_catcher = io.StringIO()
try:
with contextlib.redirect_stderr(output_catcher):
_ = basic_parser.parse_args(["afile"])
except SystemExit:
pass
self.assertTrue(
"unrecognized arguments: afile" in output_catcher.getvalue(),
msg="Minimal SASParser does not recognize file argument",
)
basic_parser.add_file_argument(help_text="file help")
parsed_arguments = basic_parser.parse_args(["afile", "bfile", "cfile"])
self.assertEqual(
set(parsed_arguments.file),
{"afile", "bfile", "cfile"},
msg="GuinierParser parses list of files",
)
def test_minimal_parser_usage_includes_program_name(self):
"""
Test that minimal parser includes the provided program in the usage string.
"""
basic_parser = SASParser("test❤️", "description", "epilog")
self.assertTrue(
"test❤️" in basic_parser.usage,
msg="SASParser usage includes program name",
)
def test_minimal_guinier_parser_usage_includes_program_name(self):
"""
Test that minimal parser includes the provided program in the usage string.
"""
basic_parser = GuinierParser("test❤️", "description", "epilog")
self.assertTrue(
"test❤️" in basic_parser.usage,
msg="GuinierParser usage includes program name",
)
def test_minimal_guinier_parser_help_includes_program_description_epilog(
self,
):
"""
Test that minimal guinier parser includes help includes
the provided program name, description and epilog.
"""
basic_parser = GuinierParser("test❤️", "description📚", "epilog🎦")
output_catcher = io.StringIO()
try:
with contextlib.redirect_stdout(output_catcher):
_ = basic_parser.parse_args(["--help"])
except SystemExit:
pass
self.assertTrue(
"test❤️" in output_catcher.getvalue(),
msg="GuinierParser outputs program name in help",
)
self.assertTrue(
"description📚" in output_catcher.getvalue(),
msg="GuinierParser outputs description in help",
)
self.assertTrue(
"epilog🎦" in output_catcher.getvalue(),
msg="GuinierParser outputs eplilog name in help",
)
def test_minimal_parser_help_includes_program_description_epilog(self):
"""
Test that minimal parser includes help includes
the provided program name, description and epilog.
"""
basic_parser = SASParser("test❤️", "description📚", "epilog🎦")
output_catcher = io.StringIO()
try:
with contextlib.redirect_stdout(output_catcher):
_ = basic_parser.parse_args(["--help"])
except SystemExit:
pass
self.assertTrue(
"test❤️" in output_catcher.getvalue(),
msg="SASParser outputs program name in help",
)
self.assertTrue(
"description📚" in output_catcher.getvalue(),
msg="SASParser outputs description in help",
)
self.assertTrue(
"epilog🎦" in output_catcher.getvalue(),
msg="SASParser outputs eplilog name in help",
)
def test_minimal_parser_default_verbosity_level_is_0(self):
"""
Test that the parser sets the verbosity to 0 if no args are provided
"""
basic_parser = SASParser("program", "description", "epilog")
parsed_arguments = basic_parser.parse_args()
self.assertEqual(
parsed_arguments.verbose,
0,
msg="SASParser default verbosity is 0",
)
def test_minimal_guinier_parser_default_verbosity_level_is_0(self):
"""
Test that the Guinier parser sets the verbosity to 0 if no args are provided
"""
basic_parser = GuinierParser("program", "description", "epilog")
parsed_arguments = basic_parser.parse_args(["afile"])
self.assertEqual(
parsed_arguments.verbose,
0,
msg="GuinierParser default verbosity is 0",
)
def test_minimal_parser_accumulates_verbosity_level(self):
"""
Test that the parser parser increases the verbosity level to two
if -vv argument is provided.
"""
basic_parser = SASParser("program", "description", "epilog")
parsed_arguments = basic_parser.parse_args(["-vv"])
self.assertEqual(
parsed_arguments.verbose,
2,
msg="SASParser verbosity increased to 2 by -vv",
)
def test_minimal_guinier_parser_accumulates_verbosity_level(self):
"""
Test that the parser parser increases the verbosity level to two
if -vv argument is provided.
"""
basic_parser = GuinierParser("program", "description", "epilog")
parsed_arguments = basic_parser.parse_args(["afile", "-vv"])
self.assertEqual(
parsed_arguments.verbose,
2,
msg="GuinierParser verbosity increased to 2 by -vv",
)
def test_minimal_parser_provides_correct_version(self):
"""
Test that parser provides the correct app version.
"""
basic_parser = SASParser("program", "description", "epilog")
output_catcher = io.StringIO()
try:
with contextlib.redirect_stdout(output_catcher):
_ = basic_parser.parse_args(["--version"])
except SystemExit:
pass
self.assertTrue(
freesas_version.version in output_catcher.getvalue(),
msg="SASParser outputs consistent version",
)
self.assertTrue(
freesas_version.date in output_catcher.getvalue(),
msg="SASParser outputs consistent date",
)
def test_minimal_guinier_parser_provides_correct_version(self):
"""
Test that parser provides the correct app version.
"""
basic_parser = GuinierParser("program", "description", "epilog")
output_catcher = io.StringIO()
try:
with contextlib.redirect_stdout(output_catcher):
_ = basic_parser.parse_args(["--version"])
except SystemExit:
pass
self.assertTrue(
freesas_version.version in output_catcher.getvalue(),
msg="GuinierParser outputs consistent version",
)
self.assertTrue(
freesas_version.date in output_catcher.getvalue(),
msg="GuinierParser outputs consistent date",
)
def test_minimal_guinier_parser_accepts_output_file_argument(self):
"""
Test that minimal Guinier parser accepts one output file argument.
"""
basic_parser = GuinierParser("program", "description", "epilog")
parsed_arguments = basic_parser.parse_args(["afile", "-o", "out.file"])
self.assertEqual(
parsed_arguments.output,
Path("out.file"),
msg="Minimal GuinierParser accepts output file argument",
)
def test_add_output_filename_argument_adds_output_file_argument_to_SASParser(
self,
):
"""
Test that add_output_filename_argument adds one output file argument to as SASParser.
"""
basic_parser = SASParser("program", "description", "epilog")
# Before running add_output_filename_argument -o file is not regognized
output_catcher = io.StringIO()
try:
with contextlib.redirect_stderr(output_catcher):
_ = basic_parser.parse_args(["-o", "out.file"])
except SystemExit:
pass
self.assertTrue(
"unrecognized arguments: -o out.file" in output_catcher.getvalue(),
msg="Minimal SASParser does not recognize -o argument",
)
basic_parser.add_output_filename_argument()
parsed_arguments = basic_parser.parse_args(["-o", "out.file"])
self.assertEqual(
parsed_arguments.output,
Path("out.file"),
msg="SASParser accepts output file argument"
"after running add_output_filename_argument()",
)
def test_minimal_guinier_parser_accepts_output_format_argument(self):
"""
Test that minimal Guinier parser accepts one output data format argument.
"""
basic_parser = GuinierParser("program", "description", "epilog")
parsed_arguments = basic_parser.parse_args(["afile", "-f", "aformat"])
self.assertEqual(
parsed_arguments.format,
"aformat",
msg="Minimal GuinierParser accepts output data format argument",
)
def test_add_output_data_format_adds_output_format_argument_to_SASParser(
self,
):
"""
Test that add_output_data_format adds one output data format argument to as SASParser.
"""
basic_parser = SASParser("program", "description", "epilog")
# Before running add_output_filename_argument -o file is not regognized
output_catcher = io.StringIO()
try:
with contextlib.redirect_stderr(output_catcher):
_ = basic_parser.parse_args(["-f", "aformat"])
except SystemExit:
pass
self.assertTrue(
"unrecognized arguments: -f aformat" in output_catcher.getvalue(),
msg="Minimal SASParser does not recognize -f argument",
)
basic_parser.add_output_data_format()
parsed_arguments = basic_parser.parse_args(["-f", "aformat"])
self.assertEqual(
parsed_arguments.format,
"aformat",
msg="SASParser accepts output data format argument"
"after running add_output_data_format()",
)
def test_minimal_guinier_parser_accepts_q_unit_argument(self):
"""
Test that minimal Guinier parser accepts a q unit argument.
"""
basic_parser = GuinierParser("program", "description", "epilog")
parsed_arguments = basic_parser.parse_args(["afile", "-u", "nm"])
self.assertEqual(
parsed_arguments.unit,
"nm",
msg="Minimal GuinierParser accepts q unit argument",
)
def test_add_q_unit_argument_adds_add_q_unit_argument_to_SASParser(
self,
):
"""
Test that add_q_unit_argument adds a q unit argument to as SASParser.
"""
basic_parser = SASParser("program", "description", "epilog")
# Before running add_output_filename_argument -o file is not regognized
output_catcher = io.StringIO()
try:
with contextlib.redirect_stderr(output_catcher):
_ = basic_parser.parse_args(["-u", "nm"])
except SystemExit:
pass
self.assertTrue(
"unrecognized arguments: -u nm" in output_catcher.getvalue(),
msg="Minimal SASParser does not recognize -u argument",
)
basic_parser.add_q_unit_argument()
parsed_arguments = basic_parser.parse_args(["-u", "nm"])
self.assertEqual(
parsed_arguments.unit,
"nm",
msg="SASParser accepts q unit argument after running add_q_unit_argument()",
)
def test_SASParser_q_unit_argument_allows_predefined_units(
self,
):
"""
Test that the q unit argument of a SASparser accepts "nm", "Å", "A".
"""
basic_parser = SASParser("program", "description", "epilog")
basic_parser.add_q_unit_argument()
parsed_arguments = basic_parser.parse_args(["-u", "nm"])
self.assertEqual(
parsed_arguments.unit,
"nm",
msg="SASParser accepts unit format nm",
)
parsed_arguments = basic_parser.parse_args(["-u", "A"])
self.assertEqual(
parsed_arguments.unit,
"Å",
msg="SASParser accepts unit format A",
)
parsed_arguments = basic_parser.parse_args(["-u", "Å"])
self.assertEqual(
parsed_arguments.unit,
"Å",
msg="SASParser accepts unit format A",
)
def test_SASParser_q_unit_argument_does_not_allow_not_predefined_units(
self,
):
"""
Test that the q unit argument of a SASparser does not accept a
unit that is not "nm", "Å", "A".
"""
basic_parser = SASParser("program", "description", "epilog")
basic_parser.add_q_unit_argument()
output_catcher = io.StringIO()
try:
with contextlib.redirect_stderr(output_catcher):
_ = basic_parser.parse_args(["-u", "m"])
except SystemExit:
pass
self.assertTrue(
"argument -u/--unit: invalid choice: 'm' (choose from 'nm', 'Å', 'A')"
in output_catcher.getvalue(),
msg="SASParser does not accept '-u m' argument",
)
def test_SASParser_q_unit_A_gets_converted_to_Å(
self,
):
"""
Test that for a SASParder q unit input "A" gets converted to "Å".
"""
basic_parser = SASParser("program", "description", "epilog")
basic_parser.add_q_unit_argument()
parsed_arguments = basic_parser.parse_args(["-u", "A"])
self.assertEqual(
parsed_arguments.unit,
"Å",
msg="SASParser converts unit input 'A' to 'Å'",
)
def test_GuinierParser_q_unit_argument_allows_predefined_units(
self,
):
"""
Test that the q unit argument of a Guinierparser accepts "nm", "Å", "A".
"""
basic_parser = GuinierParser("program", "description", "epilog")
parsed_arguments = basic_parser.parse_args(["afile", "-u", "nm"])
self.assertEqual(
parsed_arguments.unit,
"nm",
msg="SASParser accepts unit format nm",
)
parsed_arguments = basic_parser.parse_args(["afile", "-u", "A"])
self.assertEqual(
parsed_arguments.unit,
"Å",
msg="SASParser accepts unit format A",
)
parsed_arguments = basic_parser.parse_args(["afile", "-u", "Å"])
self.assertEqual(
parsed_arguments.unit,
"Å",
msg="SASParser accepts unit format A",
)
def test_GuinierParser_q_unit_argument_does_not_allow_not_predefined_units(
self,
):
"""
Test that the q unit argument of a Guinierparser does not accept a
unit that is not "nm", "Å", "A".
"""
basic_parser = GuinierParser("program", "description", "epilog")
output_catcher = io.StringIO()
try:
with contextlib.redirect_stderr(output_catcher):
_ = basic_parser.parse_args(["afile", "-u", "m"])
except SystemExit:
pass
self.assertTrue(
"argument -u/--unit: invalid choice: 'm' (choose from 'nm', 'Å', 'A')"
in output_catcher.getvalue(),
msg="SASParser does not accept '-u m' argument",
)
def test_GuinierParser_q_unit_A_gets_converted_to_Å(
self,
):
"""
Test that for a GuinierParser q unit input "A" gets converted to "Å".
"""
basic_parser = GuinierParser("program", "description", "epilog")
parsed_arguments = basic_parser.parse_args(["afile", "-u", "A"])
self.assertEqual(
parsed_arguments.unit,
"Å",
msg="SASParser converts unit input 'A' to 'Å'",
)
def test_add_argument_adds_an_argument_to_a_SASParser(
self,
):
"""
Test that new arguments can be added to SASParser.
"""
basic_parser = SASParser("program", "description", "epilog")
# Before running add_argument -c
output_catcher = io.StringIO()
try:
with contextlib.redirect_stderr(output_catcher):
_ = basic_parser.parse_args(["-c"])
except SystemExit:
pass
self.assertTrue(
"unrecognized arguments: -c" in output_catcher.getvalue(),
msg="Minimal SASParser does not recognize -c argument",
)
basic_parser.add_argument(
"-c",
"--check",
action="store_true",
)
parsed_arguments = basic_parser.parse_args(["-c"])
self.assertEqual(
parsed_arguments.check,
True,
msg="-c argument added to SASParser",
)
def test_add_argument_adds_an_argument_to_a_GuinierParser(
self,
):
"""
Test that new arguments can be added to GuinierParser.
"""
basic_parser = GuinierParser("program", "description", "epilog")
# Before running add_argument -c
output_catcher = io.StringIO()
try:
with contextlib.redirect_stderr(output_catcher):
_ = basic_parser.parse_args(["afile", "-c"])
except SystemExit:
pass
print(output_catcher.getvalue())
self.assertTrue(
"unrecognized arguments: -c" in output_catcher.getvalue(),
msg="Minimal GuinierParser does not recognize -c argument",
)
basic_parser.add_argument(
"-c",
"--check",
action="store_true",
)
parsed_arguments = basic_parser.parse_args(["afile", "-c"])
self.assertEqual(
parsed_arguments.check,
True,
msg="-c argument added to GuinierParser",
)
def suite():
"""Build a test suite from the TestSasArgParser class"""
test_suite = unittest.TestSuite()
for class_element in dir(TestSasArgParser):
if class_element.startswith("test"):
test_suite.addTest(TestSasArgParser(class_element))
return test_suite
if __name__ == "__main__":
runner = unittest.TextTestRunner()
runner.run(suite())
| StarcoderdataPython |
30698 | <reponame>christi-john/codechef-practice
# REMISS
for i in range(int(input())):
A,B = map(int,input().split())
if A>B: print(str(A) + " " + str(A+B))
else: print(str(B) + " " + str(A+B)) | StarcoderdataPython |
1604161 | <reponame>ttx/storyboard
# Copyright 2013 <NAME> <<EMAIL>>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls.defaults import patterns
urlpatterns = patterns('storyboard.stories.views',
(r'^$', 'dashboard'),
(r'^(\d+)$', 'view'),
(r'^(\d+)/addtask$', 'add_task'),
(r'^new$', 'add_story'),
(r'^(\d+)/edit$', 'edit_story'),
(r'^(\d+)/comment$', 'comment'),
(r'^(\d+)/priority$', 'set_priority'),
(r'^task/(\d+)$', 'edit_task'),
(r'^task/(\d+)/delete$', 'delete_task'),
)
| StarcoderdataPython |
3268593 | """Setup script."""
import glob
import os
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
def del_prefix(path):
"""Delete prefix path."""
return os.path.relpath(path, "src")
packages = list(map(
del_prefix,
glob.glob("CardGames/resources/**/*", recursive=True))
)
locales = list(map(
del_prefix,
glob.glob("CardGames/localization/**/**/*", recursive=True))
)
setup(
name='CardGames',
version='0.1',
author="<NAME> and <NAME>",
author_email="<EMAIL>",
description="Bleckjack, Fool and Queen games",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/AndrewBabichev/CardGames",
classifiers=[
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3 :: Only',
],
packages=['CardGames', 'CardGames.code'],
install_requires=[
'playsound>=1.2.2',
'numpy>=1.19.5',
'Pillow>=8.2.0',
'websocket_client>=1.0.1',
'websockets==9.0.1',
'vext.gi',
'vext',
'sphinx',
],
entry_points={
"console_scripts":
[
"cards = CardGames.__main__:main"
]
},
include_package_data=True,
package_data={'': packages + locales}
)
| StarcoderdataPython |
3358093 | <gh_stars>0
import time
import datetime
import json
import redis
import threading
import sys
import RPi.GPIO as GPIO
from .worker import Worker
sys.path.append('..')
from logger.Logger import Logger, LOG_LEVEL
class RelayWorker(Worker):
def __init__(self, config, main_thread_running, system_ready, relay_available, relay_active):
super().__init__(config, main_thread_running, system_ready)
self.config['pin'] = int(self.config['pin']) #parse possbile strings to avoid errors
# Events
self.relay_available = relay_available
self.relay_active = relay_active
# Dynamic Properties based on config
self.active = False
self.topic = self.config['topic'].replace(" ", "/").lower() if self.config['topic'] is not None else 'mudpi/relay/'
self.pin_state_off = GPIO.HIGH if self.config['normally_open'] is not None and self.config['normally_open'] else GPIO.LOW
self.pin_state_on = GPIO.LOW if self.config['normally_open'] is not None and self.config['normally_open'] else GPIO.HIGH
# Pubsub Listeners
self.pubsub = self.r.pubsub()
self.pubsub.subscribe(**{self.topic: self.handleMessage})
self.init()
return
def init(self):
GPIO.setup(self.config['pin'], GPIO.OUT)
#Close the relay by default, we use the pin state we determined based on the config at init
GPIO.output(self.config['pin'], self.pin_state_off)
time.sleep(0.1)
#Feature to restore relay state in case of crash or unexpected shutdown. This will check for last state stored in redis and set relay accordingly
if(self.config.get('restore_last_known_state', None) is not None and self.config.get('restore_last_known_state', False) is True):
if(self.r.get(self.config['key']+'_state')):
GPIO.output(self.config['pin'], self.pin_state_on)
Logger.log(LOG_LEVEL["info"], 'Restoring Relay \033[1;36m{0} On\033[0;0m'.format(self.config['key']))
Logger.log(LOG_LEVEL["info"], 'Relay Worker {key}...\t\t\t\033[1;32m Ready\033[0;0m'.format(**self.config))
return
def run(self):
Logger.log(LOG_LEVEL["info"], 'Relay Worker {key}...\t\t\t\033[1;32m Online\033[0;0m'.format(**self.config))
return super().run()
def handleMessage(self, message):
data = message['data']
if data is not None:
decoded_message = self.decodeMessageData(data)
try:
if decoded_message['event'] == 'Switch':
if decoded_message.get('data', None):
self.relay_active.set()
elif decoded_message.get('data', None) == 0:
self.relay_active.clear()
Logger.log(LOG_LEVEL["info"], 'Switch Relay \033[1;36m{0}\033[0;0m state to \033[1;36m{1}\033[0;0m'.format(self.config['key'], decoded_message['data']))
elif decoded_message['event'] == 'Toggle':
state = 'Off' if self.active else 'On'
if self.relay_active.is_set():
self.relay_active.clear()
else:
self.relay_active.set()
Logger.log(LOG_LEVEL["info"], 'Toggle Relay \033[1;36m{0} {1} \033[0;0m'.format(self.config['key'], state))
except:
Logger.log(LOG_LEVEL["error"], 'Error Decoding Message for Relay {0}'.format(self.config['key']))
def turnOn(self):
#Turn on relay if its available
if self.relay_available.is_set():
if not self.active:
GPIO.output(self.config['pin'], self.pin_state_on)
message = {'event':'StateChanged', 'data':1}
self.r.set(self.config['key']+'_state', 1)
self.r.publish(self.topic, json.dumps(message))
self.active = True
#self.relay_active.set() This is handled by the redis listener now
self.resetElapsedTime()
def turnOff(self):
#Turn off volkeye to flip off relay
if self.relay_available.is_set():
if self.active:
GPIO.output(self.config['pin'], self.pin_state_off)
message = {'event':'StateChanged', 'data':0}
self.r.delete(self.config['key']+'_state')
self.r.publish(self.topic, json.dumps(message))
#self.relay_active.clear() This is handled by the redis listener now
self.active = False
self.resetElapsedTime()
def work(self):
self.resetElapsedTime()
while self.main_thread_running.is_set():
if self.system_ready.is_set():
try:
self.pubsub.get_message()
if self.relay_available.is_set():
if self.relay_active.is_set():
self.turnOn()
else:
self.turnOff()
else:
self.turnOff()
time.sleep(1)
except:
Logger.log(LOG_LEVEL["error"], "Relay Worker \033[1;36m{key}\033[0;0m \t\033[1;31m Unexpected Error\033[0;0m".format(**self.config))
else:
#System not ready relay should be off
self.turnOff()
time.sleep(1)
self.resetElapsedTime()
time.sleep(0.1)
#This is only ran after the main thread is shut down
#Close the pubsub connection
self.pubsub.close()
Logger.log(LOG_LEVEL["info"], "Relay Worker {key} Shutting Down...\t\033[1;32m Complete\033[0;0m".format(**self.config)) | StarcoderdataPython |
1747011 | <filename>src/python/pants/backend/graph_info/tasks/cloc.py
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.backend.graph_info.subsystems.cloc_binary import ClocBinary
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnitLabel
from pants.task.console_task import ConsoleTask
from pants.util.contextutil import temporary_dir
from pants.util.process_handler import subprocess
class CountLinesOfCode(ConsoleTask):
"""Print counts of lines of code."""
@classmethod
def subsystem_dependencies(cls):
return super(CountLinesOfCode, cls).subsystem_dependencies() + (ClocBinary,)
@classmethod
def register_options(cls, register):
super(CountLinesOfCode, cls).register_options(register)
register('--version', advanced=True, fingerprint=True, default='1.66',
removal_version='1.7.0.dev0', removal_hint='Use --version in scope cloc-binary',
help='Version of cloc.')
register('--transitive', type=bool, fingerprint=True, default=True,
help='Operate on the transitive dependencies of the specified targets. '
'Unset to operate only on the specified targets.')
register('--ignored', type=bool, fingerprint=True,
help='Show information about files ignored by cloc.')
def _get_cloc_script(self):
return ClocBinary.global_instance().select(self.context)
def console_output(self, targets):
if not self.get_options().transitive:
targets = self.context.target_roots
buildroot = get_buildroot()
with temporary_dir() as tmpdir:
# Write the paths of all files we want cloc to process to the so-called 'list file'.
# TODO: 1) list_file, report_file and ignored_file should be relative files within the
# execution "chroot", 2) list_file should be part of an input files Snapshot, and
# 3) report_file and ignored_file should be part of an output files Snapshot, when we have
# that capability.
list_file = os.path.join(tmpdir, 'list_file')
with open(list_file, 'w') as list_file_out:
for target in targets:
for source in target.sources_relative_to_buildroot():
list_file_out.write(os.path.join(buildroot, source))
list_file_out.write(b'\n')
report_file = os.path.join(tmpdir, 'report_file')
ignored_file = os.path.join(tmpdir, 'ignored')
# TODO: Look at how to make BinaryUtil support Snapshots - such as adding an instrinsic to do
# network fetch directly into a Snapshot.
# See http://cloc.sourceforge.net/#options for cloc cmd-line options.
cmd = (
self._get_cloc_script(),
'--skip-uniqueness',
'--ignored={}'.format(ignored_file),
'--list-file={}'.format(list_file),
'--report-file={}'.format(report_file)
)
with self.context.new_workunit(
name='cloc',
labels=[WorkUnitLabel.TOOL],
cmd=' '.join(cmd)) as workunit:
exit_code = subprocess.call(
cmd,
stdout=workunit.output('stdout'),
stderr=workunit.output('stderr')
)
if exit_code != 0:
raise TaskError('{} ... exited non-zero ({}).'.format(' '.join(cmd), exit_code))
with open(report_file, 'r') as report_file_in:
for line in report_file_in.read().split('\n'):
yield line
if self.get_options().ignored:
yield 'Ignored the following files:'
with open(ignored_file, 'r') as ignored_file_in:
for line in ignored_file_in.read().split('\n'):
yield line
| StarcoderdataPython |
3301383 | from unittest import TestCase
from musicscore.musicxml.types.complextypes.attributes import Divisions
class TestDivisions(TestCase):
def setUp(self):
self.divisions = Divisions(1)
def test_divisions(self):
result = '''<divisions>1</divisions>
'''
self.assertEqual(self.divisions.to_string(), result)
self.divisions.value = 2
result = '''<divisions>2</divisions>
'''
self.assertEqual(self.divisions.to_string(), result)
self.divisions.text = 3
result = '''<divisions>3</divisions>
'''
self.assertEqual(self.divisions.to_string(), result)
| StarcoderdataPython |
9264 | import cv2
import ezdxf
import numpy as np
def draw_hatch(img, entity, color, mask):
for poly_path in entity.paths.paths:
# print(poly_path.path_type_flags)
polygon = np.array([vertex[:-1] for vertex in poly_path.vertices]).astype(int)
if poly_path.path_type_flags & 1 == 1:
cv2.fillPoly(img, [polygon], color)
cv2.fillPoly(mask, [polygon], (255, 255, 255))
else:
cv2.fillPoly(img, [polygon], (255, 255, 255))
return color
def draw_line(img, entity, color, mask):
p1 = entity.dxf.start[:-1]
p2 = entity.dxf.end[:-1]
cv2.line(img, (int(p1[0]), int(p1[1])), (int(p2[0]), int(p2[1])), color, 1)
cv2.line(mask, (int(p1[0]), int(p1[1])), (int(p2[0]), int(p2[1])), (255, 255, 255), 2)
return color
def draw_lwpolyline(img, entity, color, mask):
polyline = []
a = np.array(entity.lwpoints.values).astype(int)
while len(a) > 0:
polyline.append((a[0], a[1]))
a = a[5:]
cv2.polylines(img, [np.array(polyline)], entity.closed, color, 1)
cv2.polylines(mask, [np.array(polyline)], entity.closed, (255, 255, 255), 2)
return color
def draw_arc(img, entity, color, mask):
s = entity.dxf.start_angle * np.pi / 180
e = entity.dxf.end_angle * np.pi / 180
if s > e:
s -= 2 * np.pi
d = (e - s) / (int((e - s) * 180 / np.pi) + 1)
r = entity.dxf.radius
cx, cy = entity.dxf.center.xyz[:-1]
angles = np.arange(s, e + d / 2, d)
x = cx + r * np.cos(angles)
y = cy + r * np.sin(angles)
points = np.column_stack((x, y)).astype(int)
cv2.polylines(img, [points], abs(s - e) < 1e-9, color, 1)
cv2.polylines(mask, [points], abs(s - e) < 1e-9, (255, 255, 255), 2)
return color
def draw_circle(img, entity, color, mask):
r = entity.dxf.radius
cx, cy = entity.dxf.center.xyz[:-1]
cv2.circle(img, (int(cx), int(cy)), int(r), color, 1)
cv2.circle(mask, (int(cx), int(cy)), int(r), (255, 255, 255), -1)
return color
def draw_ellipse(img, entity, color, mask):
cx, cy = entity.dxf.center.xyz[:-1]
ma = entity.dxf.major_axis.magnitude
angle = entity.dxf.major_axis.angle_deg
mi = ma * entity.dxf.ratio
s = entity.dxf.start_param * 180 / np.pi
e = entity.dxf.end_param * 180 / np.pi
if entity.dxf.extrusion.z == -1:
s = 360 - s
e = 360 - e
cv2.ellipse(img, (int(cx), int(cy)), (int(ma), int(mi)), angle, s, e, color, 1)
cv2.ellipse(mask, (int(cx), int(cy)), (int(ma), int(mi)), angle, s, e, (255, 255, 255), 1)
return color
def draw_point(img, entity, color, mask):
cx, cy = entity.dxf.location.xyz[:-1]
cv2.circle(img, (int(cx), int(cy)), 0, color, 1)
cv2.circle(mask, (int(cx), int(cy)), 0, (255, 255, 255), -1)
return color
draw_map = {
'HATCH': draw_hatch,
'LINE': draw_line,
'LWPOLYLINE': draw_lwpolyline,
'ARC': draw_arc,
'CIRCLE': draw_circle,
'ELLIPSE': draw_ellipse,
'POINT': draw_point,
}
def paint(in_path, out_path, config):
doc = ezdxf.readfile(in_path)
extmax, extmin = doc.header['$EXTMAX'], doc.header['$EXTMIN']
xmin, ymin = np.floor(extmin[:-1]).astype(int)
xmax, ymax = np.ceil(extmax[:-1]).astype(int)
img = np.ones((ymax + ymin, xmax + xmin, 3), np.uint8) * 255
mask = np.zeros_like(img)
msp = doc.modelspace()
layers = config.get('layers', {})
colors = config.get('colors', {})
# print(doc.layers.entries.keys())
for layer_name, names in layers.items():
color = tuple(colors.get(layer_name, [0, 0, 0]))
for name in names:
if name not in doc.layers:
continue
entities = msp.query('*[layer=="%s"]' % name)
tmp = np.zeros((ymax + ymin, xmax + xmin), np.uint8)
for entity in entities:
if entity.DXFTYPE in draw_map:
draw_map[entity.DXFTYPE](img, entity, color, tmp)
else:
print("%s: %s" % (name, entity.DXFTYPE))
contours, hierarchy = cv2.findContours(tmp, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(mask, contours, -1, color, -1)
res, img_png = cv2.imencode('.png', cv2.flip(img, 0))
res, mask_png = cv2.imencode('.png', cv2.flip(mask, 0))
with open(out_path, 'wb') as f:
f.write(img_png.tobytes())
with open(out_path[:-4] + "_mask.png", 'wb') as f:
f.write(mask_png.tobytes())
| StarcoderdataPython |
3343723 | from PIL import Image, ImageDraw, ImageFont
from math import ceil
# pip install yapf, to install yapf
# to run format, go to root folder
# yapf -ir ./ --style ./yapf.conf
# specify how large the canvas needs to be
# size in pixels.
# horizontal size is fixed at 430,
# vertical size can vary based on how long the whole thing ends up being
multiplier = 5
canvas_size = (430 * multiplier, 7545)
canvas_background = (255, 255, 255)
dpi = 50 * multiplier
file_dpi = 200 * multiplier
# vertical spaces
space1rem = 16 * multiplier
space0p12rem = 0.25 * space1rem / 2
space0p25rem = 0.25 * space1rem
space0p5rem = 0.5 * space1rem
space1p5rem = 1.5 * space1rem
space2p5rem = 2.5 * space1rem
# ColoursH1
charcoal = '#333333'
grey = '#666666'
stone = '#BBBBBB'
green = '#4CB564'
forest = '#276234'
honeydew = '#F7F9F8'
purple = '#673494'
lilac = '#FBF5FF'
# H1
H1_size = (430 * multiplier, 30 * multiplier)
H1_text_offset = (5 * multiplier, 8 * multiplier)
H1_text_gap = 10 * multiplier
H1_colour = forest
H1_font_bold = ImageFont.truetype('Arial Bold.ttf', 14 * multiplier)
H1_font_regular = ImageFont.truetype('Arial.ttf', 14 * multiplier)
H1_font_space = ImageFont.truetype('Arial.ttf', 6 * multiplier)
H1_background = honeydew
# H2
H2_colour = charcoal
H2_colour_green = green
H2_colour_purple = purple
H2_font_regular = ImageFont.truetype('Arial.ttf', 18 * multiplier)
H2_font_bold = ImageFont.truetype('Arial Bold.ttf', 18 * multiplier)
# Subtitle
Sub_colour = grey
Sub_font = ImageFont.truetype('Arial.ttf', 14 * multiplier)
# Project Timeline Chart
BM1_canvas_size = (430 * multiplier, 1590)
BM1_duration_bar_colour = purple
BM1_duration_plot_size = (42, 21)
BM1_legend_diamond_size = (12 * multiplier, 12 * multiplier)
# Pricing Criteria
BM2_canvas_size = (430 * multiplier, 1974)
plot_size1 = (400 * multiplier, 130 * multiplier)
plot_size2 = (268 * multiplier, 130 * multiplier)
pricing_weight_calculation_colour = green
word_cloud_calculation_colour = purple
shape_colour = stone
line_length = (430 / 3) * multiplier
space_adjust_caption = (plot_size2[1] - 50 * multiplier)
# Vendor Pipeline
BM3_rect_size = (72 * multiplier, 36 * multiplier)
BM3_dash_size = (42 * multiplier, 28 * multiplier)
BM3_rect_bg = lilac
BM3_chart_height = 4 * BM3_rect_size[
1] + 3 * (BM3_dash_size[1] - ceil(BM3_rect_size[1] / 2) + 1) + 1
BM3_chart_size = (430 * multiplier, BM3_chart_height * multiplier)
BM3_canvas_size = (430 * multiplier, 1595)
BM3_corner_radius = 8 * multiplier
BM3_dash_width = 6 * multiplier
BM3_dash_gap = 4 * multiplier
# Requested Information
BM4_square_size = (35 * multiplier, 35 * multiplier)
BM4_square_bg = lilac
BM4_canvas_size = (430 * multiplier, 1594)
BM4_corner_radius = 8 * multiplier
calculation_colour_purple = purple
calculation_font_bold = ImageFont.truetype('Arial Bold.ttf', 14 * multiplier)
calculation_font_bold_percent = ImageFont.truetype('Arial Bold.ttf', 10 * multiplier)
calculation_font_regular = ImageFont.truetype('Arial.ttf', 14 * multiplier)
space_adj_box = -0.7 * multiplier
space_adj_box_text_width = 15 * multiplier
space_adj_box_text_width_percent = 7 * multiplier
space_adj_box_text_height = 10 * multiplier
space_adj_text = 30 * multiplier
space_adj_text_percent = 45 * multiplier
space_adjust_dotted_line = space0p5rem
space_adjust_percent_vertical = 3 * multiplier
height_adj_after_bold = 1 * multiplier
# General text around charts
caption_colour = grey
body_colour = charcoal
body_font = ImageFont.truetype('Arial.ttf', 12 * multiplier)
body_font_bold = ImageFont.truetype('Arial Bold.ttf', 12 * multiplier)
caption_font_italic = ImageFont.truetype('Arial Italic.ttf', 12 * multiplier)
| StarcoderdataPython |
1769537 | <reponame>Nikolas010101/Projects<filename>APIs/APIs 8 - Hashing/app/database.py
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
#SQLALCHEMY_DATABASE_URL = 'postgresql://<username>:<password>@<ip-address/hostname>/<database_name>'
SQLALCHEMY_DATABASE_URL = 'postgresql://postgres:29480783@localhost/fastapi'
#engine é o que conecta o sqlalchemy a uma base de dados do postgres
engine = create_engine(SQLALCHEMY_DATABASE_URL)
#para a comunicação de fato precisamos declarar uma sessão
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
#modelos das tabelas serão definidos com base na classe Base
Base = declarative_base()
#dependency
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close() | StarcoderdataPython |
1630430 | import sc2
from sc2 import run_game, maps, Race, Difficulty, position, Result
from sc2.player import Bot, Computer
from sc2.constants import *
import sc2
from sc2 import Race, Difficulty
from sc2.player import Bot, Computer
from sc2.player import Human
from sc2.ids.unit_typeid import UnitTypeId
from sc2.ids.ability_id import AbilityId
from sc2.unit import Unit
from sc2.units import Units
from sc2.position import Point2
class WorkerRushBot(sc2.BotAI):
async def on_step(self, iteration: int):
CCs: Units = self.townhalls(UnitTypeId.COMMANDCENTER)
cc: Unit = CCs.first
if self.supply_left < 6 and self.supply_used >= 14 and not self.already_pending(UnitTypeId.SUPPLYDEPOT):
if self.can_afford(UnitTypeId.SUPPLYDEPOT):
# This picks a near-random worker to build a depot at location
# 'from command center towards game center, distance 8'
await self.build(UnitTypeId.SUPPLYDEPOT, near=cc.position.towards(self.game_info.map_center, 8))
if self.can_afford(UnitTypeId.ABC):
# This picks a near-random worker to build a depot at location
# 'from command center towards game center, distance 8'
await self.build(UnitTypeId.ABC, near=cc.position.towards(self.game_info.map_center, 8))
# if self.can_afford(UnitTypeId.ENGINEERINGBAY2):
# This picks a near-random worker to build a depot at location
# 'from command center towards game center, distance 8'
# await self.build(UnitTypeId.ENGINEERINGBAY2, near=cc.position.towards(self.game_info.map_center, 8))
# if self.can_afford(UnitTypeId.ARMORY2) and self.already_pending(UnitTypeId.SUPPLYDEPOT) < 2:
# This picks a near-random worker to build a depot at location
# 'from command center towards game center, distance 8'
# await self.build(UnitTypeId.ARMORY2, near=cc.position.towards(self.game_info.map_center, 8))
class WorkerRushBot1(sc2.BotAI):
async def on_step(self, iteration: int):
''' if iteration == 0:
for worker in self.workers:
worker.attack(self.enemy_start_locations[0])
'''
run_game(maps.get("AbyssalReefLE2"), [
#run_game(maps.get(" 沙漠风暴经典版work"), [
Bot(Race.Terran,WorkerRushBot()),
#Bot(Race.Protoss,WorkerRushBot()),
Bot(Race.Zerg,WorkerRushBot1()),
#Computer(Race.Terran, Difficulty.Medium)
], realtime= False) | StarcoderdataPython |
34627 | <filename>rastervision/new_version/learner/classification_learner.py
import warnings
warnings.filterwarnings('ignore') # noqa
from os.path import join, isfile, isdir
import zipfile
import torch
from torchvision import models
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader, Subset, ConcatDataset
from torchvision.transforms import (Compose, ToTensor, Resize, ColorJitter,
RandomVerticalFlip, RandomHorizontalFlip)
from rastervision.backend.torch_utils.chip_classification.folder import (
ImageFolder)
from rastervision.utils.files import (download_if_needed, list_paths,
get_local_path)
from rastervision.new_version.learner.learner import Learner
from rastervision.new_version.learner.metrics import (compute_conf_mat_metrics,
compute_conf_mat)
class ClassificationLearner(Learner):
def build_model(self):
model = getattr(models, self.cfg.model.backbone)(pretrained=True)
in_features = model.fc.in_features
num_labels = len(self.cfg.data.labels)
model.fc = nn.Linear(in_features, num_labels)
return model
def build_data(self):
cfg = self.cfg
batch_sz = cfg.solver.batch_sz
num_workers = cfg.data.num_workers
label_names = cfg.data.labels
# download and unzip data
if cfg.data.data_format == 'image_folder':
if cfg.data.uri.startswith('s3://') or cfg.data.uri.startswith(
'/'):
data_uri = cfg.data.uri
else:
data_uri = join(cfg.base_uri, cfg.data.uri)
data_dirs = []
zip_uris = [data_uri] if data_uri.endswith('.zip') else list_paths(
data_uri, 'zip')
for zip_ind, zip_uri in enumerate(zip_uris):
zip_path = get_local_path(zip_uri, self.data_cache_dir)
if not isfile(zip_path):
zip_path = download_if_needed(zip_uri, self.data_cache_dir)
with zipfile.ZipFile(zip_path, 'r') as zipf:
data_dir = join(self.tmp_dir, 'data', str(zip_ind))
data_dirs.append(data_dir)
zipf.extractall(data_dir)
train_ds, valid_ds, test_ds = [], [], []
for data_dir in data_dirs:
train_dir = join(data_dir, 'train')
valid_dir = join(data_dir, 'valid')
# build datasets
transform = Compose(
[Resize((cfg.data.img_sz, cfg.data.img_sz)),
ToTensor()])
aug_transform = Compose([
RandomHorizontalFlip(),
RandomVerticalFlip(),
ColorJitter(0.1, 0.1, 0.1, 0.1),
Resize((cfg.data.img_sz, cfg.data.img_sz)),
ToTensor()
])
if isdir(train_dir):
if cfg.overfit_mode:
train_ds.append(
ImageFolder(
train_dir,
transform=transform,
classes=label_names))
else:
train_ds.append(
ImageFolder(
train_dir,
transform=aug_transform,
classes=label_names))
if isdir(valid_dir):
valid_ds.append(
ImageFolder(
valid_dir, transform=transform, classes=label_names))
test_ds.append(
ImageFolder(
valid_dir, transform=transform, classes=label_names))
train_ds, valid_ds, test_ds = \
ConcatDataset(train_ds), ConcatDataset(valid_ds), ConcatDataset(test_ds)
if cfg.overfit_mode:
train_ds = Subset(train_ds, range(batch_sz))
valid_ds = train_ds
test_ds = train_ds
elif cfg.test_mode:
train_ds = Subset(train_ds, range(batch_sz))
valid_ds = Subset(valid_ds, range(batch_sz))
test_ds = Subset(test_ds, range(batch_sz))
train_dl = DataLoader(
train_ds,
shuffle=True,
batch_size=batch_sz,
num_workers=num_workers,
pin_memory=True)
valid_dl = DataLoader(
valid_ds,
shuffle=True,
batch_size=batch_sz,
num_workers=num_workers,
pin_memory=True)
test_dl = DataLoader(
test_ds,
shuffle=True,
batch_size=batch_sz,
num_workers=num_workers,
pin_memory=True)
self.train_ds, self.valid_ds, self.test_ds = (train_ds, valid_ds,
test_ds)
self.train_dl, self.valid_dl, self.test_dl = (train_dl, valid_dl,
test_dl)
def train_step(self, batch, batch_nb):
x, y = batch
out = self.model(x)
return {'train_loss': F.cross_entropy(out, y, reduction='sum')}
def validate_step(self, batch, batch_nb):
x, y = batch
out = self.model(x)
val_loss = F.cross_entropy(out, y, reduction='sum')
num_labels = len(self.cfg.data.labels)
out = self.post_forward(out)
conf_mat = compute_conf_mat(out, y, num_labels)
return {'val_loss': val_loss, 'conf_mat': conf_mat}
def validate_end(self, outputs, num_samples):
conf_mat = sum([o['conf_mat'] for o in outputs])
val_loss = torch.stack([o['val_loss']
for o in outputs]).sum() / num_samples
conf_mat_metrics = compute_conf_mat_metrics(conf_mat,
self.cfg.data.labels)
metrics = {'val_loss': val_loss.item()}
metrics.update(conf_mat_metrics)
return metrics
def post_forward(self, x):
return x.argmax(-1)
def plot_xyz(self, ax, x, y, z=None):
x = x.permute(1, 2, 0)
if x.shape[2] == 1:
x = torch.cat([x for _ in range(3)], dim=2)
ax.imshow(x)
title = 'true: {}'.format(self.cfg.data.labels[y])
if z is not None:
title += ' / pred: {}'.format(self.cfg.data.labels[z])
ax.set_title(title, fontsize=8)
ax.axis('off')
| StarcoderdataPython |
188030 | COLORS = dict([
('CLEANUP', '\033[94m'),
('CREATE', '\033[92m'),
('INSTALL', '\033[92m'),
('SKIP', '\033[93m'),
('FAIL', '\033[31m'),
('DEFAULT', '\033[39m'),
('UNDEFINED', '\033[37m'),
('STATUS', '\033[36m')
])
def typed_message(message, message_type=None):
color = COLORS.setdefault(message_type, COLORS['UNDEFINED'])
tagged_type = "%s[%s] ..." % (color, message_type)
print("%26s %s %s" % (tagged_type, COLORS['DEFAULT'], message))
| StarcoderdataPython |
1637749 | # Generated by Django 3.0.4 on 2020-06-02 01:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0003_app_label'),
]
operations = [
migrations.AlterField(
model_name='action',
name='name',
field=models.CharField(max_length=50, unique=True, verbose_name='Modulo'),
),
]
| StarcoderdataPython |
49161 | #!/usr/bin/env python
from argparse import ArgumentParser
import os
import sys
if __name__ == '__main__':
arg_parser = ArgumentParser(description='list all files with given '
'extension in directory')
arg_parser.add_argument('--dir', default='.',
help='directory to search')
arg_parser.add_argument('ext', help='extension to use')
arg_parser.add_argument('--verbose', action='store_true',
help='show progress info')
options = arg_parser.parse_args()
for directory, _, files in os.walk(options.dir):
if options.verbose:
print("### checking directory '{}'".format(directory),
file=sys.stderr)
for file_name in files:
_, ext = os.path.splitext(file_name)
if ext == options.ext:
print(os.path.join(directory, file_name))
| StarcoderdataPython |
3229912 | """
validataclass
Copyright (c) 2021, binary butterfly GmbH and contributors
Use of this source code is governed by an MIT-style license that can be found in the LICENSE file.
"""
from validataclass.exceptions import ValidationError
__all__ = [
'RegexMatchError',
]
class RegexMatchError(ValidationError):
"""
Validation error raised by `RegexValidator` when the input string does not match the regular expression.
"""
code = 'invalid_string_format'
| StarcoderdataPython |
79192 | <reponame>fury-gl/helios
"""VTK/FURY Tools
This module implements a set o tools to enhance VTK given new functionalities.
"""
class Uniform:
"""This creates a uniform shader variable
It's responsible to store the value of a given uniform
variable and call the related vtk_program
"""
def __init__(self, name, uniform_type, value):
"""
Parameters
----------
name: str
name of the uniform variable
uniform_type: str
Uniform variable type which will be used inside the shader.
Any of this are valid: 1fv, 1iv, 2f, 2fv, 2i, 3f, 3fv,
3uc, 4f, 4fv, 4uc, GroupUpdateTime, Matrix,
Matrix3x3, Matrix4x4, Matrix4x4v, f, i
value: float or ndarray
value: type(uniform_type)
should be a value which represent's the shader uniform
variable. For example, if uniform_type is 'f' then value
should be a float; if uniform_type is '3f' then value
should be a 1x3 array.
"""
self.name = name
self.value = value
self.uniform_type = uniform_type
self.valid_types = [
'1fv', '1iv', '2f', '2fv', '2i', '3f', '3fv',
'3uc', '4f', '4fv', '4uc', 'GroupUpdateTime', 'Matrix',
'Matrix3x3', 'Matrix4x4', 'Matrix4x4v', 'f', 'i']
if self.uniform_type not in self.valid_types:
raise ValueError(
f"""Uniform type {self.uniform_type} not valid.
Choose one of this values: {self.valid_types}""")
self.vtk_func_uniform = f'SetUniform{self.uniform_type}'
def execute_program(self, program):
""" Given a shader program, this method
will update the value with the associated uniform variable
in a draw call
Parameters
----------
program: vtkmodules.vtkRenderingOpenGL2.vtkShaderProgram
A shader program which will be used to update the uniform
"""
program.__getattribute__(self.vtk_func_uniform)(
self.name, self.value)
def __repr__(self):
return f'Uniform(name={self.name}, value={self.value})'
class Uniforms:
def __init__(self, uniforms):
"""Creates an object which store and execute an uniform variable.
Parameters
-----------
uniforms: list
List of Uniform objects.
Examples
--------
.. highlight:: python
.. code-block:: python
uniforms = [
Uniform(name='edgeWidth', uniform_type='f', value=edgeWidth)...
]
CustomUniforms = Uniforms(markerUniforms)
add_shader_callback(
sq_actor, CustomUniforms)
sq_actor.CustomUniforms = CustomUniforms
sq_actor.CustomUniforms.edgeWidth = 0.5
"""
self.uniforms = uniforms
for obj in self.uniforms:
# if isinstance(obj, Uniform) is False:
# raise ValueError(f"""{obj} it's not an Uniform object""")
setattr(self, obj.name, obj)
def __call__(self, _caller, _event, calldata=None,):
"""This method should be used as a callback for a vtk Observer
Execute the shader program with the given uniform variables.
"""
program = calldata
if program is None:
return None
for uniform in self.uniforms:
uniform.execute_program(program)
def __repr__(self):
return f'Uniforms({[obj.name for obj in self.uniforms]})'
| StarcoderdataPython |
1767143 | <filename>blog/__init__.py<gh_stars>0
from .blog import app
| StarcoderdataPython |
3228788 | <gh_stars>0
from galleries.igallery import IGallery
from mnd_qtutils.qtutils import setup_widget_from_ui
import os
from pathlib import Path
from pyrulo_qt.ui_configurable_selector import ConfigurableSelector
from PySide2 import QtWidgets, QtGui, QtCore
import galleries_qt
class GalleryWizard(QtWidgets.QWidget):
def __init__(self, parent=None):
super(GalleryWizard, self).__init__(parent=parent)
self._dirty = False
ui_file_path = os.path.join(Path(__file__).parent, 'gallery_wizard.ui')
self._widget: QtWidgets.QWidget = setup_widget_from_ui(ui_file_path, self)
self._name_edit: QtWidgets.QLineEdit = self._widget.name_edit
self._name_edit.setValidator(QtGui.QRegExpValidator('[A-Za-z0-9_áéíóúÁÉÍÓÚ]*'))
self._name_edit.textEdited.connect(self._set_dirty)
self._gallery_container: QtWidgets.QWidget = self._widget.gallery_container
self._gallery_selector = ConfigurableSelector(base_class=IGallery)
self._gallery_selector.eventObjectSelected.connect(self._on_gallery_changed)
self._gallery_container.layout().addWidget(self._gallery_selector)
def is_dirty(self):
dirty = self._dirty
return dirty
def set_gallery(self, gallery_name: str, gallery: IGallery):
self._name_edit.setText(gallery_name)
self._set_gallery_ui_by_gallery(gallery)
self._dirty = False
def get_gallery(self) -> IGallery:
gallery_name = self._name_edit.text()
gallery: IGallery = self._gallery_selector.current_object()
gallery.set_name(gallery_name)
return gallery
def get_name(self) -> str:
return self._name_edit.text()
def clear(self):
self._provider_selector.set_current_index(0)
self._parser_selector.set_current_index(0)
self._dirty = False
def _set_gallery_ui_by_gallery(self, gallery: IGallery):
gallery_class = type(gallery)
self._gallery_selector.add_class(gallery_class)
self._gallery_selector.set_object_for_class(gallery_class, gallery)
self._gallery_selector.select_class(gallery_class)
@QtCore.Slot()
def _on_gallery_changed(self, index):
self._set_dirty()
def _set_dirty(self):
self._dirty = True
if __name__ == '__main__':
import sys
from PySide2.QtWidgets import QApplication
from PySide2.QtWidgets import QWidget, QVBoxLayout
app = QApplication(sys.argv)
window = QWidget()
window.setMinimumSize(600, 500)
layout = QVBoxLayout()
window.setLayout(layout)
panel = GalleryWizard()
layout.addWidget(panel)
window.show()
sys.exit(app.exec_())
| StarcoderdataPython |
3299920 | import screening as m
import random
import pymysql
import csv
import toml
from collections import Counter, defaultdict
from pyomo.environ import *
from pyomo.opt import SolverFactory
year = 2020
interview_number = 4
max_faculty_interview = 7
interview_low_score = 10 # Prints interviews with scores that low
weights = { "core core": 10
, "core minor": 5
, "minor minor": 2
, "interest": 100
, "invite": 50 # Faculty wants to interview
, "rejected": -10 # Faculty said no during screening
, "force": 10000 # Forcing an interview (by hand only)
, "intern": -10000 # Candidate was an intern (by hand)
, "unavailable": -10000 # Faculty not available
, "timeslot" : 1 # max point for best timeslots
}
aws_students_sql = """
-- in selection_2019+
SELECT a.family_n, a.given_n,
a.user_id,
a.fields,
a.faculty_last1, a.faculty_first1,
a.faculty_last2, a.faculty_first2,
a.faculty_last3, a.faculty_first3,
e.comment -- comment from committee
FROM applicant a
JOIN eval_master e ON e.user_id=a.user_id
"""
matrix_students_sql = """
-- in matrix
SELECT embark_id
FROM student
"""
matrix_faculty_sql = """
SELECT faculty_id, name, email FROM faculty;
"""
faculty_unavail = """
SELECT faculty, timeslot
FROM matrix
WHERE student LIKE \"X%\"
"""
# Taken directly from database, sorted by desirability
sorted_timeslots = ["5", "9", "16", "7", "11", "18", "6", "10", "17", "8", "12", "19"]
wants_interview = """
-- in selection_2019+
SELECT e.user_id, userid
FROM eval_detail e
JOIN logon l on e.id_examiner = l.userid
WHERE interview = "yes";
"""
said_no = """
-- in selection_2019+
SELECT e.user_id, userid
FROM eval_detail e
JOIN logon l on e.id_examiner = l.userid
WHERE invite = 1 -- 1: No, 2: Maybe, 3: Invite;
"""
def defered_students(students, path):
stu = {}
with open(path) as csvfile:
reader = csv.reader(csvfile)
for last, first, name, fields, l1, f1, l2, f2, l3, f3, com in reader:
faculty = [[l1, f1], [l2, f2], [l3, f3]]
stu[name] = { "name" : last + " " + first
, "faculty" : faculty
, "core" : []
, "minor" : fields.split('/')[:-1]
, "match" : []
, "comment" : com
}
students.update(stu)
def show_comments(students):
print("Showing Comment:")
for stu in students:
comment = students[stu]["comment"]
if comment:
faculty = students[stu]["faculty"]
print("Student {} {}: {}".format(stu, faculty, comment))
print("End of comments\n")
def add_availability_and_check(db, faculty, students):
"""
Adds availability from faculty
Also check IDs are consistent
Removes students not in matrix (deferred or cancelled students)
"""
# First delete students from matrix to start fresh
with db.cursor() as cursor:
cursor.execute("DELETE FROM matrix WHERE student NOT LIKE \"X_%\";")
# Checking faculty IDs are consistent
print("\nFaculty ID checks:")
fac_matrix = {}
with db.cursor() as cursor:
cursor.execute(matrix_faculty_sql)
for id, name, email in cursor.fetchall():
fac_matrix[id] = (name, email.strip().lower())
# Check if IDs from matrix are all in selection
for id in fac_matrix:
name, email = fac_matrix[id]
if id in faculty:
if faculty[id]["email"] != email:
name2 = faculty[id]["name"]
email2 = faculty[id]["email"]
print(f"ID {id}: matrix faculty {name} {email} different from selection faculty {name2} {email2}")
else:
print(f"Faculty {name}, {email}, ID {id} in matrix not found in selection_{year}")
# Check if IDs from selection are in matrix
for id in faculty:
name2 = faculty[id]["name"]
email2 = faculty[id]["email"]
if id in fac_matrix:
name, email = fac_matrix[id]
if email2 != email:
print(f"ID {id}: selection faculty {name2} {email2} different from matrix faculty {name} {email}")
else:
print(f"Faculty {name2}, {email2}, ID {id} in selection_{year} not found in matrix")
print("End of facultyID checks\n")
# Adding availability data
for f in faculty:
faculty[f]['avail'] = [x for x in sorted_timeslots]
with db.cursor() as cursor:
cursor.execute(faculty_unavail)
for id, unavail in cursor.fetchall():
if id in faculty:
if unavail in faculty[id]["avail"]:
faculty[id]["avail"].remove(unavail)
else:
print("Matrix Faculty not found in selection faculty", id)
# Filtering to students in matrix
with db.cursor() as cursor:
cursor.execute(matrix_students_sql)
stu_avail = [s[0] for s in cursor.fetchall()]
stu2 = {s:students[s] for s in students if s in stu_avail}
fac2 = {f:faculty[f] for f in faculty if len(faculty[f]["avail"]) > 0 and f in fac_matrix}
return (fac2, stu2)
def force_interviews(students, forced):
"""
Manually forcing interviews
Mutates students
"""
for stu, fac in forced:
students[stu]["match"][fac] += weights["forced"]
def reject_interns(students, interns):
"""
Avoiding interviews with previous interns
Mutates students
"""
for stu, fac in interns:
students[stu]["match"][fac] += weights["intern"]
def requested_interviews(db, faculty, students):
"""
This is to prioritize interviews that were requested by faculty
Mutates students
"""
with db.cursor() as cursor:
cursor.execute(wants_interview)
for stu, fac_id in cursor.fetchall():
if stu in students:
matches = students[stu]["match"]
if fac_id in matches:
matches[fac_id] += weights["invite"]
def rejected_students(db, faculty, students):
"""
This is to avoid interviews with students who were rejected by specific faculty
Mutates students
"""
with db.cursor() as cursor:
cursor.execute(said_no)
for stu, fac_id in cursor.fetchall():
if stu in students:
matches = students[stu]["match"]
if fac_id in matches:
matches[fac_id] += weights["rejected"]
def make_matrix(faculty_all, students_all):
# Define parameters
times = sorted_timeslots
faculty = faculty_all.keys()
students = students_all.keys()
time_pref = { time: weights["timeslot"]*i/(len(sorted_timeslots)-1) \
for i, time in enumerate(reversed(sorted_timeslots))}
# Initialize model
model = ConcreteModel()
# binary variables representing the time and session of each fac
model.grid = Var(((fac, stu, time) for fac in faculty for stu in students for time in times) ,
within=Binary, initialize=0)
# Define an objective function with model as input, to pass later
def obj_rule(m):
timing = sum(m.grid[fac, stu, time] * time_pref[time] for fac, stu, time in m.grid)
matching = sum(m.grid[fac, stu, time] * students_all[stu]["match"][fac] \
for fac, stu, time in m.grid)
unavailable = sum(m.grid[fac, stu, time] * (time not in faculty_all[fac]["avail"] ) \
for fac, stu, time in m.grid)
return timing + matching + weights["unavailable"]*unavailable
# add objective function to the model. rule (pass function) or expr (pass expression directly)
model.obj = Objective(rule=obj_rule, sense=maximize)
model.constraints = ConstraintList() # Create a set of constraints
# Constraint: N interviews per student
for stu in students:
model.constraints.add(
sum(model.grid[fac, stu, time] for fac in faculty for time in times) \
== interview_number
)
# Constraint: Maximum interviews per faculty
for fac in faculty:
model.constraints.add(
sum(model.grid[fac, stu, time] for stu in students for time in times) \
<= max_faculty_interview
)
# Constraint: Max one interview per time per faculty
for fac in faculty:
for time in times:
model.constraints.add(
sum( model.grid[fac, stu, time] for stu in students) <= 1
)
# Constraint: Max one interview per time per student
for stu in students:
for time in times:
model.constraints.add(
sum( model.grid[fac, stu, time] for fac in faculty) <= 1
)
# Constraint: each student/faculty pair interviews maximum once
for stu in students:
for fac in faculty:
model.constraints.add(
sum( model.grid[fac, stu, time] for time in times ) <= 1
)
model.preprocess()
# opt = SolverFactory('cbc', validate = False) # Select solver
# solver_manager = SolverManagerFactory('neos') # Solve in neos server
# results = solver_manager.solve(model, opt=opt)
opt = SolverFactory('cbc')
results = opt.solve(model) # Solve locally
print(results)
matrix = []
for fac, stu, time in model.grid:
if model.grid[fac, stu, time].value:
matrix.append((fac, stu, time))
return matrix
def matrix_analysis(matrix_original, faculty, students):
matrix = defaultdict(list)
for fac, stu, _ in matrix_original:
matrix[stu].append((students[stu]["match"][fac], fac))
print()
av = 0
for stu in matrix:
s = [score for score, _ in matrix[stu]]
if any([score < interview_low_score for score in s]):
intervs = [(sc, faculty[f]["name"]) for sc, f in matrix[stu]]
print("Student ", students[stu]["name"], " has some low interview scores ", intervs)
av += sum(s)
av /= interview_number * len(matrix)
print("Average score per candidate: {}".format(av))
print("Number of candidates: {}".format(len(matrix)))
fac = {}
for stu in matrix:
for score, f in matrix[stu]:
if f in fac:
fac[f].append((score, stu))
else:
fac[f] = [(score, stu)]
print()
dist = {}
av = 0
ints = 0
print("Faculty with less than 5 interviews:")
for f in sorted(faculty, key=lambda f: len(fac.get(f,[])), reverse = True):
if f not in fac:
print(faculty[f]["name"], "has no interviews")
else:
av += sum([score for score, _ in fac[f]])/len(fac[f])
ints += len(fac[f])
if len(fac[f]) < 5 :
print(faculty[f]["name"], "has", len(fac[f]), "interviews")
av /= len(fac)
ints /= len(fac)
print()
print("Average score per faculty: {}".format(av))
print("Average number of interviews per faculty: {}".format(ints))
c = Counter([len(fac[f]) for f in fac])
print("Distribution (number of ints, count): {}".format(sorted(list(c.items()))))
print("Number of faculty with interviews: {}".format(len(fac)))
def export_matrix(db, matrix):
"""
Rewrites the database matrix after manual confirm.
Input: database connection object, matrix
Returns: Nothing
"""
with db.cursor() as cursor:
query = "INSERT INTO matrix (student, faculty, timeslot) VALUES {};"
values = []
for fac, stu, t in matrix:
values.append(f"({stu}, {fac}, {t})")
cursor.execute(query.format(", ".join(values)))
db.commit()
"""
Checks that faculty and students IDs from selection and matrix match
"""
pass
if __name__ == "__main__":
login = toml.load("login.toml")
selection_db = m.connect(login, f"selection_{year}")
matrix_db = m.connect(login, "matrix")
# Faculty information
faculty = m.get_faculty(selection_db, "input/units.csv")
# Hard fixes
# del faculty["1"] # robertbaughman
# del faculty["23"] # mukhlessowwan
# del faculty["26"] # Stephens
# del faculty["29"] # Mikheyev
# del faculty["64"] # danielrokhsar
# del faculty["72"] # anastasiiatsvietkova
# del faculty["83"] # milindpurohit
# del faculty["76"] # Pauly
# faculty["101"] = faculty["833"] # Wrong id?
# del faculty["833"] # xiaodanzhou
# Fields information
fields = m.get_fields(selection_db, m.fields_sql)
# Student information
students = m.get_students(selection_db, aws_students_sql, fields)
# Student from last year
defered_students(students, "input/defered.csv")
# Add availabilities
(faculty, students) = add_availability_and_check(matrix_db, faculty, students)
# Data cleanup
m.fix_names(faculty, students)
# Show comments
show_comments(students)
# Manually adding faculty of interest
# students['75218934']["faculty"].append("keikokono")
# Compute matching scores
m.match(faculty, students, weights)
# Special case for Yanagida sensei => <NAME>
# forced = [("75183162", "42")]
forced = []
force_interviews(students, forced)
# Avoiding pairing intern students
print("\nRemember to remove past interns!")
# interns = [("75183162", "42")] :: [(student ID, faculty ID)]
interns = []
reject_interns(students, interns)
# Add interviews that faculty requested
requested_interviews(selection_db, faculty, students)
# Interviews to avoid
rejected_students(selection_db, faculty, students)
# Closes connection
# Make matrix
matrix = make_matrix(faculty, students)
# Analyze stats
matrix_analysis(matrix, faculty, students)
# # Export data
export_matrix(matrix_db, matrix)
selection_db.close()
matrix_db.close()
| StarcoderdataPython |
60540 | <reponame>TeamMacLean/stomatadetector<gh_stars>1-10
"""
Module for dealing with Multi-TIFF metadata in Perkin ELmer .flex files.
"""
import xmltodict
import tifffile as tf
#flex_file = '/Users/macleand/Desktop/stomata_detector/Test images and output/Ok/002002002/002002002.flex'
def count_planes_in_stack(flxml_arr):
return len(flxml_arr)
class FlexMetaData(object):
"""
Implements an `xmltodict` object that contains .flex metadata
:param: filename
:return: xmltodict object - nested structure with data
>>> flex_file = '/Users/user/test/002002002/002002002.flex'
>>> a = parse_flex_metadata(flex_file)
>>> metadata = a[0]
>>> print(metadata['Root']['Arrays']['Array'][0])
>>>
>>> OrderedDict([('@Type', 'Image'), ('@Name', 'Exp1Cam2'), ('@Width', '688'), ('@Height', '512'), ('@BitsPerPixel', '16'), ('@CompressionType', ''), ('@CompressionRate', ''), ('@Factor', '1.000000')])
"""
def __init__(self, flex_filepath):
self.metadata = self._parse_flex_metadata(flex_filepath)
self.planes_in_stack = count_planes_in_stack(self.metadata)
def _parse_flex_metadata(self, flex_filepath):
"""returns list of xmltodict objects of xml portion of a flex file.
Appends None if no xml found for a given image in the flex"""
with tf.TiffFile(flex_filepath) as flex:
xmls = []
for image in flex:
try:
xmls.append(xmltodict.parse(image.tags.get('flex_xml').value ))
except:
xmls.append(None)
return xmls
| StarcoderdataPython |
126415 | #!/usr/bin/python
#
# Compute MMANA geometry for vertical delta antenna.
# Copyright (C) 2005 <NAME> <<EMAIL>>
#
import math
# Parameters of antenna
MHz = 21.050 # Resonant frequency
R = 0.001 # Wire radius
# Wave length with experimental correction
wave = 300/MHz * 1.0707
H = wave/3 * math.sin (math.pi / 3)
print "Vertical delta", MHz, "MHz"
print "*"
print MHz
# Wires
print "*"
print "3"
def print_segment (x1, y1, x2, y2):
print "%.4f, %.4f, %.4f, %.4f, %.4f, %.4f, %.4f, -1" % \
(0, x1, y1, 0, x2, y2, R)
print_segment (0, 0, - wave/6, H)
print_segment (- wave/6, H, wave/6, H)
print_segment (wave/6, H, 0, 0)
# Source
print "*"
print "1, 1"
print "w1b, 0.0, 1.0"
# Load
print "*"
print "0, 1"
# Segmentation: DM1, DM2, SC, EC
print "*"
print "400, 40, 2.0, 1"
# Ground, Height, Material, R, Azimuth, Elevation, X
print "*"
print "%d, %.1f, %d, 50.0, 120, 60, 0" % (0, 0, 1) # Free space, Cu
| StarcoderdataPython |
3210828 | <filename>tests/Unit/Evolution/Systems/NewtonianEuler/BoundaryConditions/DirichletAnalytic.py
# Distributed under the MIT License.
# See LICENSE.txt for details.
import numpy as np
import PointwiseFunctions.AnalyticSolutions.Hydro.SmoothFlow as hydro
import Evolution.Systems.NewtonianEuler.TimeDerivative as flux
def soln_error(face_mesh_velocity, outward_directed_normal_covector, coords,
time, dim):
return None
_soln_pressure = 1.0
_soln_adiabatic_index = 5.0 / 3.0
_soln_perturbation_size = 0.2
def _soln_mean_velocity(dim):
mean_v = []
for i in range(0, dim):
mean_v.append(0.9 - i * 0.5)
return np.asarray(mean_v)
def _soln_wave_vector(dim):
wave_vector = []
for i in range(0, dim):
wave_vector.append(0.1 + i)
return np.asarray(wave_vector)
def soln_mass_density(face_mesh_velocity, outward_directed_normal_covector,
coords, time, dim):
return hydro.rest_mass_density(coords, time, _soln_mean_velocity(dim),
_soln_wave_vector(dim), _soln_pressure,
_soln_adiabatic_index,
_soln_perturbation_size)
def soln_momentum_density(face_mesh_velocity, outward_directed_normal_covector,
coords, time, dim):
return hydro.rest_mass_density(
coords, time, _soln_mean_velocity(dim), _soln_wave_vector(dim),
_soln_pressure, _soln_adiabatic_index,
_soln_perturbation_size) * hydro.spatial_velocity(
coords, time, _soln_mean_velocity(dim), _soln_wave_vector(dim),
_soln_pressure, _soln_adiabatic_index, _soln_perturbation_size)
def soln_energy_density(face_mesh_velocity, outward_directed_normal_covector,
coords, time, dim):
velocity = hydro.spatial_velocity(coords, time, _soln_mean_velocity(dim),
_soln_wave_vector(dim), _soln_pressure,
_soln_adiabatic_index,
_soln_perturbation_size)
int_energy = hydro.specific_internal_energy(coords, time,
_soln_mean_velocity(dim),
_soln_wave_vector(dim),
_soln_pressure,
_soln_adiabatic_index,
_soln_perturbation_size)
return hydro.rest_mass_density(
coords, time, _soln_mean_velocity(dim), _soln_wave_vector(dim),
_soln_pressure, _soln_adiabatic_index, _soln_perturbation_size) * (
0.5 * np.dot(velocity, velocity) + int_energy)
def soln_flux_mass_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim):
return soln_momentum_density(face_mesh_velocity,
outward_directed_normal_covector, coords,
time, dim)
def soln_flux_momentum_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim):
velocity = hydro.spatial_velocity(coords, time, _soln_mean_velocity(dim),
_soln_wave_vector(dim), _soln_pressure,
_soln_adiabatic_index,
_soln_perturbation_size)
pressure = hydro.pressure(coords, time, _soln_mean_velocity(dim),
_soln_wave_vector(dim), _soln_pressure,
_soln_adiabatic_index, _soln_perturbation_size)
return flux.momentum_density_flux_impl(
soln_momentum_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim),
soln_energy_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim), velocity, pressure)
def soln_flux_energy_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim):
velocity = hydro.spatial_velocity(coords, time, _soln_mean_velocity(dim),
_soln_wave_vector(dim), _soln_pressure,
_soln_adiabatic_index,
_soln_perturbation_size)
pressure = hydro.pressure(coords, time, _soln_mean_velocity(dim),
_soln_wave_vector(dim), _soln_pressure,
_soln_adiabatic_index, _soln_perturbation_size)
return flux.energy_density_flux_impl(
soln_momentum_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim),
soln_energy_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim), velocity, pressure)
def soln_velocity(face_mesh_velocity, outward_directed_normal_covector, coords,
time, dim):
return hydro.spatial_velocity(coords, time, _soln_mean_velocity(dim),
_soln_wave_vector(dim), _soln_pressure,
_soln_adiabatic_index,
_soln_perturbation_size)
def soln_specific_internal_energy(face_mesh_velocity,
outward_directed_normal_covector, coords,
time, dim):
return hydro.specific_internal_energy(coords, time,
_soln_mean_velocity(dim),
_soln_wave_vector(dim),
_soln_pressure,
_soln_adiabatic_index,
_soln_perturbation_size)
def data_error(face_mesh_velocity, outward_directed_normal_covector, coords,
time, dim):
return None
_data_adiabatic_index = 1.4
_data_strip_bimedian_height = 0.5
_data_strip_thickness = 0.5
_data_strip_density = 2.0
_data_strip_velocity = 0.5
_data_background_density = 1.0
_data_background_velocity = -0.5
_data_pressure = 2.5
_data_perturb_amplitude = 0.1
_data_perturb_width = 0.03
def data_mass_density(face_mesh_velocity, outward_directed_normal_covector,
coords, time, dim):
if np.abs(coords[-1] -
_data_strip_bimedian_height) < 0.5 * _data_strip_thickness:
return _data_strip_density
else:
return _data_background_density
def data_velocity(face_mesh_velocity, outward_directed_normal_covector, coords,
time, dim):
velocity = np.zeros([dim])
if np.abs(coords[-1] -
_data_strip_bimedian_height) < 0.5 * _data_strip_thickness:
velocity[0] = _data_strip_velocity
else:
velocity[0] = _data_background_velocity
one_over_two_sigma_squared = 0.5 / (_data_perturb_width)**2
strip_lower_bound = (_data_strip_bimedian_height -
0.5 * _data_strip_thickness)
strip_upper_bound = (_data_strip_bimedian_height +
0.5 * _data_strip_thickness)
velocity[-1] = (np.exp(-one_over_two_sigma_squared *
(coords[-1] - strip_lower_bound)**2) +
np.exp(-one_over_two_sigma_squared *
(coords[-1] - strip_upper_bound)**2))
velocity[-1] *= _data_perturb_amplitude * np.sin(4.0 * np.pi * coords[0])
return np.asarray(velocity)
def data_momentum_density(face_mesh_velocity, outward_directed_normal_covector,
coords, time, dim):
return data_mass_density(
face_mesh_velocity,
outward_directed_normal_covector, coords, time, dim) * data_velocity(
face_mesh_velocity, outward_directed_normal_covector, coords, time,
dim)
def data_pressure(face_mesh_velocity, outward_directed_normal_covector, coords,
time, dim):
return _data_pressure
def data_specific_internal_energy(face_mesh_velocity,
outward_directed_normal_covector, coords,
time, dim):
return 1.0 / (_data_adiabatic_index - 1.0) * data_pressure(
face_mesh_velocity, outward_directed_normal_covector,
coords, time, dim) / data_mass_density(
face_mesh_velocity, outward_directed_normal_covector, coords, time,
dim)
def data_energy_density(face_mesh_velocity, outward_directed_normal_covector,
coords, time, dim):
velocity = data_velocity(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim)
int_energy = data_specific_internal_energy(
face_mesh_velocity, outward_directed_normal_covector, coords, time,
dim)
return data_mass_density(
face_mesh_velocity, outward_directed_normal_covector, coords, time,
dim) * (0.5 * np.dot(velocity, velocity) + int_energy)
def data_flux_mass_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim):
return data_momentum_density(face_mesh_velocity,
outward_directed_normal_covector, coords,
time, dim)
def data_flux_momentum_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim):
velocity = data_velocity(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim)
pressure = data_pressure(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim)
return flux.momentum_density_flux_impl(
data_momentum_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim),
data_energy_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim), velocity, pressure)
def data_flux_energy_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim):
velocity = data_velocity(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim)
pressure = data_pressure(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim)
return flux.energy_density_flux_impl(
data_momentum_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim),
data_energy_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim), velocity, pressure)
| StarcoderdataPython |
1650568 | import time
import torch
import random
import itertools
import numpy as np
from argparse import ArgumentParser
from torch.utils.data import DataLoader
from .learning_approach import Learning_Appr
class Appr(Learning_Appr):
""" Class implementing the Riemannian Walk approach described in
http://openaccess.thecvf.com/content_ECCV_2018/papers/Arslan_Chaudhry__Riemannian_Walk_ECCV_2018_paper.pdf """
def __init__(self, model, device, nepochs=100, lr=0.05, lr_min=1e-4, lr_factor=3, lr_patience=5, clipgrad=10000,
momentum=0, wd=0, multi_softmax=False, wu_nepochs=0, wu_lr_factor=1, logger=None, lamb=1, alpha=0.5,
damping=0.1, fim_sampling_type='max_pred', fim_num_samples=-1, num_exemplars=200,
exemplar_selection='herding'):
super(Appr, self).__init__(model, device, nepochs, lr, lr_min, lr_factor, lr_patience, clipgrad, momentum, wd,
multi_softmax, wu_nepochs, wu_lr_factor, logger)
self.lamb = lamb
self.alpha = alpha
self.damping = damping
self.sampling_type = fim_sampling_type
self.num_samples = fim_num_samples
self.num_exemplars = num_exemplars
self.exemplar_selection = exemplar_selection
# In all cases, we only keep importance weights for the model, but not for the heads.
feat_ext = self.model.model
# Page 7: "task-specific parameter importance over the entire training trajectory."
self.w = {n: torch.zeros(p.shape).to(self.device) for n, p in feat_ext.named_parameters() if p.requires_grad}
# Store current parameters as the initial parameters before first task starts
self.older_params = {n: p.clone().detach() for n, p in feat_ext.named_parameters() if p.requires_grad}
# Store scores and fisher information
self.scores = {n: torch.zeros(p.shape).to(self.device) for n, p in feat_ext.named_parameters() if p.requires_grad}
self.fisher = {n: torch.zeros(p.shape).to(self.device) for n, p in feat_ext.named_parameters() if p.requires_grad}
# Returns a parser containing the approach specific parameters
@staticmethod
def extra_parser(args):
parser = ArgumentParser()
parser.add_argument('--lamb', default=1, type=float, required=False, help='(default=%(default)s)')
parser.add_argument('--alpha', default=0.5, type=float, required=False, help='(default=%(default)s)') # in [0,1]
parser.add_argument('--damping', default=0.1, type=float, required=False, help='(default=%(default)s)')
parser.add_argument('--fim_num_samples', default=-1, type=int, required=False, help='(default=%(default)s)')
parser.add_argument('--fim_sampling_type', default='max_pred', type=str, required=False,
choices=['true', 'max_pred', 'multinomial'], help='(default=%(default)s)')
parser.add_argument('--num_exemplars', default=200, type=int, required=False, help='(default=%(default)s)')
# TODO: implemented random uniform and herding, they also propose two more sampling strategies
parser.add_argument('--exemplar_selection', default='random', type=str, choices=['herding', 'random'],
required=False, help='(default=%(default)s)')
return parser.parse_known_args(args)
# Returns the optimizer
def _get_optimizer(self):
if len(self.model.heads) > 1:
return torch.optim.SGD(list(self.model.model.parameters()) + list(self.model.heads[-1].parameters()),
lr=self.lr, weight_decay=self.wd, momentum=self.momentum)
else:
return torch.optim.SGD(self.model.parameters(),
lr=self.lr, weight_decay=self.wd, momentum=self.momentum)
def train(self, t, trn_loader, val_loader):
# number of classes and buffer samples per class
num_cls = sum(self.model.task_cls)
num_trn_ex_cls = int(np.ceil(self.num_exemplars / num_cls))
# add exemplars to train_loader
if self.num_exemplars > 0 and t > 0:
# if dataset is in memory or files type
if type(trn_loader.dataset.images) is np.ndarray:
trn_loader.dataset.images = np.vstack([trn_loader.dataset.images, np.vstack(self.x_train_exemplars)])
trn_loader.dataset.labels.extend(sum(self.y_train_exemplars, []))
else:
print('Adding exemplars in Base Dataset is not implemented yet.')
exit()
# RESUME DEFAULT TRAINING -- contains the epochs loop
super().train(t, trn_loader, val_loader)
# EXEMPLAR MANAGEMENT -- select training subset
if self.num_exemplars > 0:
print('Select training exemplars')
clock0 = time.time()
if self.exemplar_selection == 'random':
# iterate through all existing classes
self.x_train_exemplars = []
self.y_train_exemplars = []
for curr_cls in range(num_cls):
# get all indices from current class -- check if there are exemplars from previous task in loader
cls_ind = np.where(np.asarray(trn_loader.dataset.labels) == curr_cls)[0]
assert (len(cls_ind) > 0), "No samples to choose from for class {:d}".format(curr_cls)
assert (num_trn_ex_cls <= len(cls_ind)), "Not enough samples to store"
# select the exemplars randomly
selected = random.sample(list(cls_ind), num_trn_ex_cls)
# add the exemplars to the buffer
self.x_train_exemplars.append(trn_loader.dataset.images[selected])
self.y_train_exemplars.append([trn_loader.dataset.labels[idx] for idx in selected])
elif self.exemplar_selection == 'herding':
# change loader and fix to go sequentially (shuffle=False), keeps same order for later, eval transforms
ex_sel_loader = DataLoader(trn_loader.dataset, batch_size=trn_loader.batch_size, shuffle=False,
num_workers=trn_loader.num_workers, pin_memory=trn_loader.pin_memory)
ex_sel_loader.dataset.transform = val_loader.dataset.transform
# extract outputs from the model for all train samples
extracted_features = []
with torch.no_grad():
self.model.eval()
for images, targets in ex_sel_loader:
extracted_features.append(self.model(images.to(self.device))[0])
extracted_features = (torch.cat(extracted_features)).cpu()
# iterate through all existing classes
self.x_train_exemplars = []
self.y_train_exemplars = []
for curr_cls in range(num_cls):
# get all indices from current class -- check if there are exemplars from previous task in loader
cls_ind = np.where(np.asarray(trn_loader.dataset.labels) == curr_cls)[0]
assert (len(cls_ind) > 0), "No samples to choose from for class {:d}".format(curr_cls)
assert (num_trn_ex_cls <= len(cls_ind)), "Not enough samples to store"
# get all extracted features for current class
cls_feats = extracted_features[cls_ind]
# calculate the mean
cls_mu = cls_feats.mean(0)
# select the exemplars closer to the mean of each class
selected = []
selected_feat = []
for k in range(num_trn_ex_cls):
# fix this to the dimension of the model features
sum_others = torch.zeros(cls_feats.shape[1])
for j in selected_feat:
sum_others += j / (k + 1)
dist_min = np.inf
# choose the closest to the mean of the current class
for item in cls_ind:
if item not in selected:
feat = extracted_features[item]
dist = torch.norm(cls_mu - feat / (k + 1) - sum_others)
if dist < dist_min:
dist_min = dist
newone = item
newonefeat = feat
selected_feat.append(newonefeat)
selected.append(newone)
# add the exemplars to the buffer
self.x_train_exemplars.append(trn_loader.dataset.images[selected])
self.y_train_exemplars.append([trn_loader.dataset.labels[idx] for idx in selected])
# Log
clock1 = time.time()
print(' | Selected {:d} train exemplars, time={:5.1f}s'.format(
sum([len(elem) for elem in self.y_train_exemplars]), clock1 - clock0))
# Runs a single epoch
def train_epoch(self, t, trn_loader):
self.model.train()
for images, targets in trn_loader:
# store current model
curr_feat_ext = {n: p.clone().detach() for n, p in self.model.model.named_parameters() if p.requires_grad}
# Forward current model
outputs = self.model(images.to(self.device))
# cross-entropy loss on current task
loss = torch.nn.functional.cross_entropy(torch.cat(outputs, dim=1), targets.to(self.device))
self.optimizer.zero_grad()
loss.backward(retain_graph=True)
# store gradients without regularization term
unreg_grads = {n: p.grad.clone().detach() for n, p in self.model.model.named_parameters()
if p.grad is not None}
# apply loss with path integral regularization
loss = self.criterion(t, outputs, targets.to(self.device))
# Backward
self.optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clipgrad)
self.optimizer.step()
# Page 7: "accumulate task-specific parameter importance over the entire training trajectory"
# "the parameter importance is defined as the ratio of the change in the loss function to the distance
# between the conditional likelihod distributions per step in the parameter space."
with torch.no_grad():
for n, p in self.model.model.named_parameters():
if n in unreg_grads.keys():
self.w[n] -= unreg_grads[n] * (p.detach() - curr_feat_ext[n])
def compute_fisher_matrix_diag(self, trn_loader):
# Store Fisher Information
fisher = {n: torch.zeros(p.shape).to(self.device) for n, p in self.model.model.named_parameters()
if p.requires_grad}
# Compute fisher information for specified number of samples -- rounded to the batch size
n_samples_batches = (self.num_samples // trn_loader.batch_size + 1) if self.num_samples > 0 \
else (len(trn_loader.dataset) // trn_loader.batch_size)
# Do forward and backward pass to compute the fisher information
self.model.train()
for images, targets in itertools.islice(trn_loader, n_samples_batches):
outputs = self.model.forward(images.to(self.device))
if self.sampling_type == 'true':
# Use the labels to compute the gradients based on the CE-loss with the ground truth
preds = targets.to(self.device)
elif self.sampling_type == 'max_pred':
# Not use labels and compute the gradients related to the prediction the model has learned
preds = torch.cat(outputs, dim=1).argmax(1).flatten()
elif self.sampling_type == 'multinomial':
# Use a multinomial sampling to compute the gradients
probs = torch.nn.functional.softmax(torch.cat(outputs, dim=1), dim=1)
preds = torch.multinomial(probs, len(targets)).flatten()
loss = torch.nn.functional.cross_entropy(torch.cat(outputs, dim=1), preds)
self.optimizer.zero_grad()
loss.backward()
# Page 6: "the Fisher component [...] is the expected square of the loss gradient w.r.t the i-th parameter."
for n, p in self.model.model.named_parameters():
if p.grad is not None:
fisher[n] += p.grad.pow(2) * len(targets)
# Apply mean across all samples
n_samples = n_samples_batches * trn_loader.batch_size
fisher = {n: (p / n_samples) for n, p in fisher.items()}
return fisher
# Runs after training all the epochs of the task (at the end of train function)
def post_train_process(self, t, trn_loader):
# Store current parameters for the next task
self.older_params = {n: p.clone().detach() for n, p in self.model.model.named_parameters() if p.requires_grad}
# calculate Fisher Information Matrix
curr_fisher = self.compute_fisher_matrix_diag(trn_loader)
# Eq. 10: efficiently update Fisher Information Matrix
for n in self.fisher.keys():
self.fisher[n] = self.alpha * curr_fisher[n] + (1 - self.alpha) * self.fisher[n]
# Page 7: Optimization Path-based Parameter Importance: importance scores computation
curr_score = {n: torch.zeros(p.shape).to(self.device) for n, p in self.model.model.named_parameters()
if p.requires_grad}
with torch.no_grad():
curr_params = {n: p for n, p in self.model.model.named_parameters() if p.requires_grad}
for n, p in self.scores.items():
curr_score[n] = self.w[n] / (self.fisher[n] * ((curr_params[n] - self.older_params[n]) ** 2) + self.damping)
self.w[n].zero_()
# Page 7: "Since we care about positive influence of the parameters, negative scores are set to zero."
curr_score[n] = torch.nn.functional.relu(curr_score[n])
# Page 8: alleviating regularization getting increasingly rigid by averaging scores
for n, p in self.scores.items():
self.scores[n] = (self.scores[n] + curr_score[n]) / 2
# Returns the loss value
def criterion(self, t, outputs, targets):
loss_reg = 0
if t > 0:
# Eq. 9: final objective function
for n, p in self.model.model.named_parameters():
loss_reg += torch.sum((self.fisher[n] + self.scores[n]) * (p - self.older_params[n]).pow(2))
# since there are no exemplars, the CE loss is only applied to the current training head
return torch.nn.functional.cross_entropy(torch.cat(outputs, dim=1), targets) + self.lamb * loss_reg
| StarcoderdataPython |
3239938 | #-*- coding:utf-8 -*-
import requests
import json
import time
import sys
import imp
imp.reload(sys)
if __name__ == "__main__":
def TencentReader():
url = "https://kdy.unisyou.net/yunpan/activity/doSign"
head = {}
head['User-Agent'] = 'Mozilla/5.0 (iPad; CPU OS 11_0 like Mac OS X) AppleWebKit/604.1.34 (KHTML, like Gecko) Version/11.0 Mobile/15A5341f Safari/604.1'
head['Accept'] = "application/json, text/javascript, */*; q=0.01"
head["Content-Type"] = "application/json"
head["DNT"] = "1"
head['Origin'] = "https://kdy.unisyou.net"
#你自己的口袋阅链接(微信扫码得到的地址,https开头)
head["Referer"] = "%Url%"
head["Sec-Fetch-Mode"] = "cors"
head["X-Requested-With"] = "XMLHttpRequest"
#自己的Request payload
req = requests.post(url, data=json.dumps(
{'userName': "自己注册的手机号", 'deviceNumber': "设备号", 'activityId': '10000',
'mobile': "iOSNaN --iPad"}), headers=head)
postmsg = req.content.decode("utf-8")
print(postmsg)
translate_results = json.loads(postmsg)
print(translate_results['resultData'])
return translate_results['resultData']
def SendWeChat():
localtime = u"---\n\n日期:" + time.strftime("%b %d, %Y", time.localtime()) + u"\n\n时间:" + time.strftime("%H:%M:%S", time.localtime()) + " (UTC)"
#localtimeasc = time.asctime(time.localtime())
#print(u"本地时间为 :", localtime)
#自己的Server酱URL
api = "%Api%"
title = u"口袋阅签到提醒"
content =localtime + u"\n\n\n\n今天好像还没有签到哦!!!\n\n[**尝试手动签到**(可能会失效)](https://www.baidu.com)"
data = {
"text": title,
"desp": content
}
#print(data)
req = requests.post(api, data=data)
if(TencentReader() is None):
print(u'已发送签到提醒')
SendWeChat()
| StarcoderdataPython |
3325101 | <filename>test/test_scrambling.py
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
try:
import sionna
except ImportError as e:
import sys
sys.path.append("../")
import unittest
import numpy as np
import tensorflow as tf
gpus = tf.config.list_physical_devices('GPU')
print('Number of GPUs available :', len(gpus))
if gpus:
gpu_num = 0 # Number of the GPU to be used
try:
tf.config.set_visible_devices(gpus[gpu_num], 'GPU')
print('Only GPU number', gpu_num, 'used.')
tf.config.experimental.set_memory_growth(gpus[gpu_num], True)
except Runtime as e:
print(e)
from sionna.fec.scrambling import Descrambler, Scrambler
from sionna.utils import BinarySource
class TestScrambler(unittest.TestCase):
def test_sequence_dimension(self):
"""Test against correct dimensions of the sequence"""
seq_lengths = [1, 100, 256, 1000, 1e4]
batch_sizes = [1, 100, 256, 1000, 1e4]
# keep_State=True
for seq_length in seq_lengths:
# init new scrambler for new sequence size;
# only different batch_sizes are allowed in this mode
s = Scrambler(binary=False)
for batch_size in batch_sizes:
llr = tf.random.uniform([tf.cast(batch_size, dtype=tf.int32),
tf.cast(seq_length, dtype=tf.int32)])
# build scrambler
x = s(llr).numpy()
self.assertTrue(np.array_equal(np.array(x.shape),
[int(batch_size), int(seq_length)]))
# keep_State=False
s = Scrambler(binary=False, keep_state=False)
for seq_length in seq_lengths:
for batch_size in batch_sizes:
llr = tf.random.uniform([tf.cast(batch_size, dtype=tf.int32),
tf.cast(seq_length, dtype=tf.int32)])
# build scrambler
x = s(llr).numpy()
self.assertTrue(np.array_equal(np.array(x.shape),
[int(batch_size), int(seq_length)]))
def test_sequence_offset(self):
"""Test that scrambling sequence has no offset, i.e., equal likely 0s
and 1s"""
seq_length = int(1e4)
batch_size = int(1e2)
for seed in (None, 1337, 1234, 1003): # test some initial seeds
for keep_state in (False, True):
s = Scrambler(seed=seed, keep_state=keep_state, binary=True)
llr = tf.random.uniform([tf.cast(batch_size, dtype=tf.int32),
tf.cast(seq_length, dtype=tf.int32)])
# build scrambler
s(llr)
# generate a random sequence
x = s(tf.zeros_like(llr))
self.assertAlmostEqual(np.mean(x),
0.5,
places=2)
def test_sequence_batch(self):
"""Test that scrambling sequence is random per batch sample iff
keep_batch_dims=True."""
seq_length = int(1e6)
batch_size = int(1e1)
llr = tf.random.uniform([tf.cast(batch_size, dtype=tf.int32),
tf.cast(seq_length, dtype=tf.int32)])
for keep_state in (False, True):
s = Scrambler(keep_batch_constant=False,
keep_state=keep_state,
binary=True)
# generate a random sequence
x = s(tf.zeros_like(llr))
for i in range(batch_size-1):
for j in range(i+1,batch_size):
# each batch sample must be different
self.assertAlmostEqual(np.mean(np.abs(x[i,:]-x[j,:])),
0.5,
places=2)
# test that the pattern is the same of option keep_batch_constant==True
for keep_state in (False, True):
s = Scrambler(keep_batch_constant=True,
keep_state=keep_state,
binary=True)
# generate a random sequence
x = s(tf.zeros_like(llr))
for i in range(batch_size-1):
for j in range(i+1,batch_size):
# each batch sample is the same
self.assertTrue(np.sum(np.abs(x[i,:]-x[j,:]))==0)
def test_sequence_realization(self):
"""Test that scrambling sequences are random for each new realization.
"""
seq_length = int(1e5)
batch_size = int(1e2)
s = Scrambler(keep_state=False, binary=True)
llr = tf.random.uniform([tf.cast(batch_size, dtype=tf.int32),
tf.cast(seq_length, dtype=tf.int32)])
# generate a random sequence
x1 = s(tf.zeros_like(llr))
x2 = s(tf.zeros_like(llr))
self.assertAlmostEqual(np.mean(np.abs(x1-x2)), 0.5, places=3)
def test_inverse(self):
"""Test that scrambling can be inverted/removed.
2x scrambling must result in the original sequence (for binary and
LLRs).
"""
seq_length = int(1e5)
batch_size = int(1e2)
#check binary scrambling
b = tf.random.uniform([tf.cast(batch_size, dtype=tf.int32),
tf.cast(seq_length, dtype=tf.int32)],
minval=0,
maxval=1)
for keep_batch in (False, True):
s = Scrambler(binary=True,
keep_batch_constant=keep_batch,
keep_state=True)
# only works if keep_state=True
b = tf.cast(tf.greater(0.5, b), dtype=tf.float32)
x = s(b)
x = s(x)
self.assertIsNone(np.testing.assert_array_equal(x.numpy(),
b.numpy()))
#check soft-value scrambling (flip sign)
s = Scrambler(binary=False,
keep_batch_constant=keep_batch,
keep_state=True)
llr = tf.random.uniform([tf.cast(batch_size, dtype=tf.int32),
tf.cast(seq_length, dtype=tf.int32)])
x = s(llr)
x = s(x)
self.assertIsNone(np.testing.assert_array_equal(x.numpy(),
llr.numpy()))
def test_llr(self):
"""Test that scrambling works for soft-values (sign flip)."""
s = Scrambler(binary=False, seed=12345)
b = tf.ones([100,200])
x = s(b)
s2 = Scrambler(binary=True, seed=12345)
res = -2. * s2(tf.zeros_like(x)) + 1
self.assertIsNone(np.testing.assert_array_equal(x.numpy(), res.numpy()))
def test_keep_state(self):
"""Test that keep_state works as expected.
Iff keep_state==True, the scrambled sequences must be constant."""
seq_length = int(1e5)
batch_size = int(1e2)
llr = tf.random.uniform([tf.cast(batch_size, dtype=tf.int32),
tf.cast(seq_length, dtype=tf.int32)],
minval=-100,
maxval=100)
S = Scrambler(binary=True, keep_state=True)
res1 = S(tf.zeros_like(llr))
res2 = S(tf.zeros_like(llr))
self.assertTrue(np.array_equal(res1.numpy(), res2.numpy()))
# also check that the sequence is unique with keep_state=False
S = Scrambler(binary=True, keep_state=False)
_ = S(llr)
res1 = S(tf.zeros_like(llr))
_ = S(llr)
res2 = S(tf.zeros_like(llr))
self.assertFalse(np.array_equal(res1.numpy(), res2.numpy()))
def test_keras(self):
"""Test that Keras model can be compiled (supports dynamic shapes)."""
bs = 10
k = 100
source = BinarySource()
inputs = tf.keras.Input(shape=(k), dtype=tf.float32)
x = Scrambler()(inputs)
model = tf.keras.Model(inputs=inputs, outputs=x)
# test that output batch dim is none
self.assertTrue(model.output_shape[0] is None)
# test that model can be called
b = source([bs,k])
model(b)
# call twice to see that bs can change
b2 = source([bs+1,k])
model(b2)
model.summary()
def test_tf_fun(self):
"""Test that graph mode and XLA works as expected"""
@tf.function()
def run_graph(llr):
return s(llr)
@tf.function(jit_compile=True)
def run_graph_xla(llr):
return s(llr)
for keep_state in (False, True):
s = Scrambler(keep_state=keep_state)
b = tf.ones([100,200])
x1 = run_graph(b)
x2 = run_graph_xla(b)
# again with different batch_size
b = tf.ones([101,200])
x1 = run_graph(b)
x2 = run_graph_xla(b)
# and different sequence length
b = tf.ones([101,201])
x1 = run_graph(b)
x2 = run_graph_xla(b)
self.assertTrue(np.any(np.not_equal(x1.numpy(),b.numpy())))
self.assertTrue(np.any(np.not_equal(x2.numpy(),b.numpy())))
def test_seed(self):
"""Test that seed generates reproducible results."""
seq_length = int(1e5)
batch_size = int(1e2)
b = tf.zeros([batch_size, seq_length])
s1 = Scrambler(seed=1337, binary=True, keep_state=False)
res_s1_1 = s1(b)
res_s1_2 = s1(b)
# new realization per call
self.assertFalse(np.array_equal(res_s1_1.numpy(), res_s1_2.numpy()))
# if keep_state=True, the same seed should lead to the same sequence
s2 = Scrambler(seed=1337, binary=True, keep_state=True)
res_s2_1 = s2(b)
s3 = Scrambler(seed=1337)
res_s3_1 = s3(b)
# same seed lead to same sequence
self.assertTrue(np.array_equal(res_s2_1.numpy(), res_s3_1.numpy()))
# but with random seed it gives a new sequence for each init
s4 = Scrambler(seed=None, binary=True, keep_state=True)
res_s4_1 = s2(b)
s5 = Scrambler(seed=None)
res_s5_1 = s5(b)
# same seed lead to same sequence
self.assertFalse(np.array_equal(res_s4_1.numpy(), res_s5_1.numpy()))
# for keep_State=False, even the same seed leads to new results
s6 = Scrambler(seed=1337, binary=True, keep_state=False)
res_s6_1 = s6(b)
# different seed generates new sequence
self.assertFalse(np.array_equal(res_s6_1.numpy(), res_s2_1.numpy()))
# init with same seed as previous random seed
s7 = Scrambler(seed=None, binary=True, keep_state=True)
res_s7_1 = s7(b)
s8 = Scrambler(seed=s7.seed, binary=True, keep_state=True)
res_s8_1 = s8(b)
# same seed lead to same sequence
self.assertTrue(np.array_equal(res_s7_1.numpy(), res_s8_1.numpy()))
# test that seed can be also provided to call
seed = 987654
s9 = Scrambler(seed=45234, keep_state=False)
s10 = Scrambler(seed=76543, keep_state=True)
x1 = s9([b, seed]).numpy()
x2 = s9([b, seed+1]).numpy()
x3 = s9([b, seed]).numpy()
x4 = s10([b, seed]).numpy()
self.assertFalse(np.array_equal(x1, x2)) # different seed
self.assertTrue(np.array_equal(x1, x3)) # same seed
self.assertTrue(np.array_equal(x1, x4)) # same seed (keep_state=f)
# test that random seed allows inverse
x5 = s9([b, seed])
x6 = s9([b, seed]).numpy()
# same seed
self.assertTrue(np.array_equal(x5, x6)) # identity
# different seed
x7 = s9([b, seed+1])
self.assertFalse(np.array_equal(x5, x7)) # identity
# same seed again
x8 = s9([b, seed+1])
self.assertTrue(np.array_equal(x7, x8)) # identity
def test_dtype(self):
"""Test that variable dtypes are supported."""
seq_length = int(1e1)
batch_size = int(1e2)
dt_supported = [tf.float16, tf.float32, tf.float64]
for dt in dt_supported:
for dt_in in dt_supported:
for dt_out in dt_supported:
b = tf.zeros([batch_size, seq_length], dtype=dt_in)
s1 = Scrambler(dtype=dt)
s2 = Descrambler(s1, dtype=dt_out)
x = s1(b)
y = s2(x)
assert (x.dtype==dt)
assert (y.dtype==dt_out)
def test_descrambler(self):
""""Test that descrambler works as expected."""
seq_length = int(1e2)
batch_size = int(1e1)
b = tf.zeros([batch_size, seq_length])
s1 = Scrambler()
s2 = Descrambler(s1)
x = s1(b)
y = s2(x)
assert (np.array_equal(b.numpy(), y.numpy()))
x = s1([b, 1234])
y = s2(x)
assert (not np.array_equal(b.numpy(), y.numpy()))
# check if seed is correctly retrieved from scrambler
s3 = Scrambler(seed=12345)
s4 = Descrambler(s3)
x = s3(b)
y = s4(x)
assert (np.array_equal(b.numpy(), y.numpy()))
def test_descrambler_nonbin(self):
""""Test that descrambler works with non-binary."""
seq_length = int(1e2)
batch_size = int(1e1)
b = tf.zeros([batch_size, seq_length])
# scrambler binary, but descrambler non-binary
scrambler = Scrambler(seed=1235456, binary=True)
descrambler = Descrambler(scrambler, binary=False)
# with explicit seed
s = 8764
y = scrambler([b, s])
z = descrambler([2*y-1, s]) # bspk
z = 1 + z # remove bpsk
assert (np.array_equal(b.numpy(), z.numpy()))
#without explicit seed
y = scrambler(b)
z = descrambler(2*y-1) # bspk
z = 1 + z # remove bpsk
assert (np.array_equal(b.numpy(), z.numpy()))
# scrambler non-binary, but descrambler
scrambler = Scrambler(seed=1235456, binary=False)
descrambler = Descrambler(scrambler, binary=True)
s = 546342
y = scrambler([2*b-1, s]) # bspk
y = 0.5*(1 + y) # remove bpsk
z = descrambler([y, s])
assert (np.array_equal(b.numpy(), z.numpy()))
#without explicit seed
y = scrambler(2*b-1) # bspk
y = 0.5*(1 + y) # remove bpsk
z = descrambler(y)
y = 1 + y # remove bpsk
assert (np.array_equal(b.numpy(), z.numpy()))
def test_scrambler_binary(self):
"""test that binary flag can be used as input"""
seq_length = int(1e2)
batch_size = int(1e1)
b = tf.ones([batch_size, seq_length])
# scrambler binary, but descrambler non-binary
scrambler = Scrambler(seed=1245, binary=True)
s = 1234
x1 = scrambler(b) # binary scrambling
x2 = scrambler([b, s]) # binary scrambling different seed
x3 = scrambler([b, s, True]) # binary scrambling different seed
x4 = scrambler([b, s, False]) # non-binary scrambling different seed
assert (not np.array_equal(x1.numpy(), x2.numpy())) # different seed
assert (np.array_equal(x2.numpy(), x3.numpy())) # same seed
# same but "bpsk modulated"
assert (not np.array_equal(x1.numpy(), 0.5*(1+x4.numpy())))
| StarcoderdataPython |
1610373 | <filename>barcodes/dxfwrite/tests/test_drawing.py
#!/usr/bin/env python
#coding:utf-8
# Created: 27.04.2010
# Copyright (C) 2010, <NAME>
# License: MIT License
__author__ = "mozman <<EMAIL>>"
import os
import re
import unittest
from dxfwrite import DXFEngine as dxf
from dxfwrite.util import is_string
class TestDrawing(unittest.TestCase):
def test_drawing(self):
dwg = dxf.drawing()
res1 = dwg.__dxf__()
self.assertTrue(is_string(res1))
def test_properties(self):
dwg = dxf.drawing()
self.assertTrue(dwg.linetypes)
self.assertTrue(dwg.layers)
self.assertTrue(dwg.styles)
self.assertTrue(dwg.views)
self.assertTrue(dwg.viewports)
self.assertTrue(dwg.ucs)
def test_add(self):
dwg = dxf.drawing()
self.assertEqual(dwg.add("TEST"), "TEST")
def test_add_modelspace(self):
dwg = dxf.drawing()
txt = dwg.modelspace.add(dxf.text('TEST', paper_space=1))
self.assertEqual(0, txt['paper_space'])
def test_add_paperspace(self):
dwg = dxf.drawing()
txt = dwg.paperspace.add(dxf.text('TEST', paper_space=0))
self.assertEqual(1, txt['paper_space'])
def test_anonymous_blockname(self):
dwg = dxf.drawing()
self.assertTrue(re.match("^\*U\d*$", dwg.anonymous_blockname('U')))
def test_add_anonymous_block(self):
dwg = dxf.drawing()
blockname = dwg.add_anonymous_block("TEST")
self.assertTrue(re.match("^\*U\d*$", blockname))
block = dwg.blocks.find(blockname)
entity = block.get_data().pop()
self.assertEqual(entity, "TEST")
def test_writing(self):
filename = 'test.dxf'
try:
os.remove(filename)
except OSError:
pass
dwg = dxf.drawing()
dwg.saveas(filename)
try:
os.remove(filename)
except OSError:
self.assertTrue(False, "Drawing not saved!")
def test_add_layer(self):
dwg = dxf.drawing()
element = dwg.add_layer('TEST')
self.assertEqual(element['name'], 'TEST')
def test_add_style(self):
dwg = dxf.drawing()
element = dwg.add_style('TEST')
self.assertEqual(element['name'], 'TEST')
def test_add_linetype(self):
dwg = dxf.drawing()
element = dwg.add_linetype('TEST')
self.assertEqual(element['name'], 'TEST')
def test_add_view(self):
dwg = dxf.drawing()
element = dwg.add_view('TEST')
self.assertEqual(element['name'], 'TEST')
def test_add_viewport(self):
dwg = dxf.drawing()
element = dwg.add_vport('TEST')
self.assertEqual(element['name'], 'TEST')
def test_add_ucs(self):
dwg = dxf.drawing()
element = dwg.add_ucs('TEST')
self.assertEqual(element['name'], 'TEST')
if __name__=='__main__':
unittest.main()
| StarcoderdataPython |
1682764 | <gh_stars>1-10
#!/usr/bin/env python3
#
# TW_fix_Strongs.py
#
# Copyright (c) 2021 unfoldingWord
# http://creativecommons.org/licenses/MIT/
# See LICENSE file for details.
#
# Contributors:
# <NAME> <<EMAIL>>
#
# Written Aug 2021 by RJH
# Last modified: 2021-08-10 by RJH
#
"""
Quick script to fix Strongs numbers in TW markdown files.
Note that each run of this script rewrites existing masrkdown files if there's any changes.
"""
from typing import List
import os
from pathlib import Path
import re
# import logging
LOCAL_SOURCE_BASE_FOLDERPATH = Path('/mnt/Data/uW_dataRepos/')
LOCAL_SOURCE_FOLDERPATH = LOCAL_SOURCE_BASE_FOLDERPATH.joinpath('en_tw/')
def handle_line(line_number:int, line:str) -> str:
"""
Expand the Strongs numbers to the correct number of leading zeroes
and add the trailing zero for the UGL.
These regex's are a little more complex (with lookahead) to ensure that
even if we run the script multiple times, we don't get multiple zeroes appended.
"""
# print(f"handle_line({line_number} {line=})…")
def handle_H(match_object) -> str:
# print(f"handle_H({match_object=}) with {match_object.group(1)} for {line_number} {line=})…")
return f'H{match_object.group(1).zfill(4)}'
line = re.sub(r'H(\d{1,4})(?=[^\d]|$)', handle_H, line)
def handle_G(match_object) -> str:
# print(f"handle_G({match_object=}) with {match_object.group(1)} for {line_number} {line=})…")
return f'G{match_object.group(1).zfill(4)}0' # includes a suffix
line = re.sub(r'G(\d{1,4})(?=[^\d]|$)', handle_G, line)
return line
# end of handle_line function
def handle_file(folderpath:str, filename:str) -> int:
"""
Read a TW markdown file, and fix the Strongs numbers if necessary.
Returns the number of files (0 or 1) written.
"""
filepath = Path(folderpath).joinpath(filename)
# print(f" Getting source lines from {filepath}")
have_changes = False
output_lines:List[str] = []
with open(filepath, 'rt') as mdFile:
for line_number,line in enumerate(mdFile, start=1):
line = line.rstrip() # Remove trailing whitespace including nl char
# print(f" {line_number} {line=}")
if 'Strong' in line: # do some basic filtering
new_line = handle_line(line_number, line)
output_lines.append(new_line)
if new_line != line: have_changes = True
else:
output_lines.append(line)
if not have_changes:
return 0
print(f" Writing updated lines to {filepath}")
with open(filepath, 'wt') as mdFile:
mdFile.write('\n'.join(output_lines) + '\n') # We always write a final newLine character
return 1
# end of handle_file function
def main():
"""
"""
print("TW_fix_Strongs.py")
print(f" Source folderpath is {LOCAL_SOURCE_FOLDERPATH}/")
num_md_files = num_changed_md_files = 0
for root, dirs, files in os.walk(LOCAL_SOURCE_FOLDERPATH):
if '.git' in root: continue
for name in files:
# print(f"file: {root=} {name=} {os.path.join(root, name)=}")
if name.lower().endswith('.md'):
num_md_files += 1
num_changed_md_files += handle_file(root, name)
# for name in dirs:
# print(f"dir: {root=} {name=} {os.path.join(root, name)=}")
print(f" {num_md_files:,} total markdown files found and {num_changed_md_files:,} written in {LOCAL_SOURCE_FOLDERPATH}/")
# end of main function
if __name__ == '__main__':
main()
# end of TW_fix_Strongs.py
| StarcoderdataPython |
191087 | # https://projecteuler.net/problem=20
def reverse_str(s):
return s[::-1]
def sum_str(a, b):
if len(b) > len(a):
return sum_str(b, a)
reverseA = reverse_str(a)
reverseB = reverse_str(b)
i = 0
add = 0
result = []
while i < len(a):
o1 = int(reverseA[i])
o2 = 0
if i < len(b):
o2 = int(reverseB[i])
r = (o1+o2+add)%10
add = (o1+o2+add)/10
result.append(str(r))
i = i + 1
if add > 0:
result.append(str(add))
return reverse_str(''.join(result))
def str_x_n(s, n):
if n == 0:
return ""
if n > 10:
low = str_x_n(s, n%10)
high = str_x_n(s, n/10)
return sum_str(high+"0", low)
reverse = reverse_str(s)
add = 0
result = []
for c in reverse:
m = int(c)*n + add
add = m/10
r = m%10
result.append(str(r))
if add > 0:
result.append(str(add))
return reverse_str(''.join(result))
def sum_str_digits(s):
sum = 0
for c in s:
sum = sum + int(c)
return sum
xret = "1"
for i in range(2, 101):
xret = str_x_n(xret, i)
print(i, len(xret), xret)
print(xret, sum_str_digits(xret))
| StarcoderdataPython |
3336071 | __all__ = ['bin', 'login']
| StarcoderdataPython |
68980 | journey_cost = float(input())
months = int(input())
saved_money = 0
for i in range(1, months+1):
if i % 2 != 0 and i != 1:
saved_money = saved_money * 0.84
if i % 4 == 0:
saved_money = saved_money * 1.25
saved_money += journey_cost / 4
diff = abs(journey_cost - saved_money)
if saved_money >= journey_cost:
print(f"Bravo! You can go to Disneyland and you will have {diff:.2f}lv. for souvenirs.")
else:
print(f"Sorry. You need {diff:.2f}lv. more.") | StarcoderdataPython |
3392516 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.1-9346c8cc45 (http://hl7.org/fhir/StructureDefinition/ExampleScenario) on 2020-02-03.
# 2020, SMART Health IT.
import sys
from dataclasses import dataclass, field
from typing import ClassVar, Optional, List
from .backboneelement import BackboneElement
from .codeableconcept import CodeableConcept
from .contactdetail import ContactDetail
from .domainresource import DomainResource
from .fhirdate import FHIRDate
from .identifier import Identifier
from .usagecontext import UsageContext
@dataclass
class ExampleScenarioProcessStepOperation(BackboneElement):
""" Each interaction or action.
"""
resource_type: ClassVar[str] = "ExampleScenarioProcessStepOperation"
number: str = None
type: Optional[str] = None
name: Optional[str] = None
initiator: Optional[str] = None
receiver: Optional[str] = None
description: Optional[str] = None
initiatorActive: Optional[bool] = None
receiverActive: Optional[bool] = None
request: Optional["ExampleScenarioInstanceContainedInstance"] = None
response: Optional["ExampleScenarioInstanceContainedInstance"] = None
@dataclass
class ExampleScenarioProcessStepAlternative(BackboneElement):
""" Alternate non-typical step action.
Indicates an alternative step that can be taken instead of the operations
on the base step in exceptional/atypical circumstances.
"""
resource_type: ClassVar[str] = "ExampleScenarioProcessStepAlternative"
title: str = None
description: Optional[str] = None
step: Optional[List["ExampleScenarioProcessStep"]] = None
@dataclass
class ExampleScenarioProcessStep(BackboneElement):
""" Each step of the process.
"""
resource_type: ClassVar[str] = "ExampleScenarioProcessStep"
process: Optional[List["ExampleScenarioProcess"]] = None
pause: Optional[bool] = None
operation: Optional[ExampleScenarioProcessStepOperation] = None
alternative: Optional[List[ExampleScenarioProcessStepAlternative]] = None
@dataclass
class ExampleScenarioInstanceVersion(BackboneElement):
""" A specific version of the resource.
"""
resource_type: ClassVar[str] = "ExampleScenarioInstanceVersion"
versionId: str = None
description: str = None
@dataclass
class ExampleScenarioInstanceContainedInstance(BackboneElement):
""" Resources contained in the instance.
Resources contained in the instance (e.g. the observations contained in a
bundle).
"""
resource_type: ClassVar[str] = "ExampleScenarioInstanceContainedInstance"
resourceId: str = None
versionId: Optional[str] = None
@dataclass
class ExampleScenarioActor(BackboneElement):
""" Actor participating in the resource.
"""
resource_type: ClassVar[str] = "ExampleScenarioActor"
actorId: str = None
type: str = None
name: Optional[str] = None
description: Optional[str] = None
@dataclass
class ExampleScenarioInstance(BackboneElement):
""" Each resource and each version that is present in the workflow.
"""
resource_type: ClassVar[str] = "ExampleScenarioInstance"
resourceId: str = None
resourceType: str = None
name: Optional[str] = None
description: Optional[str] = None
version: Optional[List[ExampleScenarioInstanceVersion]] = None
containedInstance: Optional[List[ExampleScenarioInstanceContainedInstance]] = None
@dataclass
class ExampleScenarioProcess(BackboneElement):
""" Each major process - a group of operations.
"""
resource_type: ClassVar[str] = "ExampleScenarioProcess"
title: str = None
description: Optional[str] = None
preConditions: Optional[str] = None
postConditions: Optional[str] = None
step: Optional[List[ExampleScenarioProcessStep]] = None
@dataclass
class ExampleScenario(DomainResource):
""" Example of workflow instance.
"""
resource_type: ClassVar[str] = "ExampleScenario"
url: Optional[str] = None
identifier: Optional[List[Identifier]] = None
version: Optional[str] = None
name: Optional[str] = None
status: str = None
experimental: Optional[bool] = None
date: Optional[FHIRDate] = None
publisher: Optional[str] = None
contact: Optional[List[ContactDetail]] = None
useContext: Optional[List[UsageContext]] = None
jurisdiction: Optional[List[CodeableConcept]] = None
copyright: Optional[str] = None
purpose: Optional[str] = None
actor: Optional[List[ExampleScenarioActor]] = None
instance: Optional[List[ExampleScenarioInstance]] = None
process: Optional[List[ExampleScenarioProcess]] = None
workflow: Optional[List[str]] = None | StarcoderdataPython |
183273 | import numpy as np
def load_glove(gloveFile):
'''
Requires packages: numpy
gloveFile: string
file path to txt file containing words and glove vectors
returns a dictionary of words as keys and their corresponding vectors as values
'''
f = open(gloveFile,'r', encoding='utf8')
word_vector = {}
for line in f:
splitLine = line.split(' ')
word = splitLine[0]
embedding = np.asarray(splitLine[1:], dtype='float32')
word_vector[word] = embedding
return word_vector | StarcoderdataPython |
3228245 | from tkinter import *
from tkinter import ttk
master = Tk()
def on_write(*args):
num = var.get()
if len(num) > 0:
if not num[-1].isdigit():
var.set(num[:-1])
else:
var.set(num[:max_len])
max_len = 5
var = StringVar()
var.trace('w', on_write)
entrada = Entry(master, textvariable=var)
entrada.pack()
master.mainloop() | StarcoderdataPython |
83340 | def calculated_quadratic_equation(a = 0, b = 0, c = 0):
r = a ** 2 + b + c
return r
print(calculated_quadratic_equation())
| StarcoderdataPython |
199055 | def solve(input, days):
# Lanternfish with internal timer t are the number of lanternfish with timer t+1 after a day
for day in range(days):
aux = input[0]
input[0] = input[1]
input[1] = input[2]
input[2] = input[3]
input[3] = input[4]
input[4] = input[5]
input[5] = input[6]
# Lantern fish with interal timer 0 replicate, but they have to be added to those lanternfish that have
# a timer equal to 7
input[6] = input[7] + aux
input[7] = input[8]
input[8] = aux
return sum([input[key] for key in input])
# Get input and transform to my chosen data structure
with open('Day6_input.txt','r') as inputfile:
input = inputfile.read().split(",")
input = [int(element) for element in input]
# Lanterfish dictionary where the key represents their internal timer, and the value how many lanternfish
# with that internal timer are alive right now
input = {
0: input.count(0),
1: input.count(1),
2: input.count(2),
3: input.count(3),
4: input.count(4),
5: input.count(5),
6: input.count(6),
7: input.count(7),
8: input.count(8)
}
part1_sol = solve(input,80)
print("Part 1 solution: ",part1_sol)
# 80 days have already been calculated, we take advantage of it
part2_sol = solve(input,256 - 80)
print("Part 2 solution: ",part2_sol) | StarcoderdataPython |
3304355 | <filename>scripts/input-demand-analysis.py
import pandas as pd
#from datetime import datetime
import numpy as np
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import matplotlib.pyplot as plt
from scipy import interpolate
# some processing stuff
df = pd.read_excel(
"data/cruise-arrivals.xlsx",
dtype= {"YEAR":int, "ARRIVED DATE": str, "TIME": str, "HOURS": str})
df = df[df["Year"] == 2018]
df["ARRIVAL"] = pd.to_datetime(df["ARRIVED DATE"] + " " + df["TIME"] )
df["DEPATURE"] = df["ARRIVAL"] + pd.to_timedelta(df['HOURS'],"h")
# create function to interpolate based on relative time length stay of ship
demand = np.array([12, 12, 10, 6, 4, 4, 4, 4, 6, 10, 12, 12])
timerel = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])/11
f = interpolate.interp1d(timerel, demand)
# create demand profiles for every arrived ships based on its duration
stays = {}
for _, row in df.iterrows():
df_ = pd.DataFrame(index= pd.date_range(row["ARRIVAL"], row["DEPATURE"], freq="h"))
df_["timerel"] = [i/len(df_) for i in range(1, len(df_)+1)]
df_["demand"] = df_.apply(lambda x: f(x["timerel"]), axis=1)
stays[_] = df_
# concat, resample and aggregated values,
demand_agg = pd.concat(stays.values(), axis=0).sort_index()
demand_agg_sum = demand_agg.resample("H").sum()
demand_agg_sum.sum()
# fix missing indices and fillna with 0
demand_agg_sum = demand_agg_sum.reindex(pd.date_range(start="2018", periods=8760, freq="H")).fillna(0)["demand"]
cruise_profile = demand_agg_sum / demand_agg_sum.sum()
cruise_profile.to_csv("data/cruise_ship_profile.csv")
# plot to see how it looks :-)
ax = demand_agg_sum.plot()
ax.set_ylabel("Aggregated Cruise Ship Demand in MW")
plt.savefig("visualization/figures/input-cruise-ship-demand.pdf")
# to see the structure of the arrivals and depatures
df["ARRIVAL TIME"] = pd.to_timedelta(df["TIME"]) / pd.offsets.Hour(1) #.astype("float")
df["HOURS OF STAY"] = pd.to_timedelta(df["HOURS"]) / pd.offsets.Hour(1) #.astype("float")
df.plot.scatter(x="ARRIVAL TIME", y="HOURS OF STAY")
plt.savefig("visualization/figures/input-arrival-and-stay.pdf")
load = pd.read_excel("scenarios/REF.xls", sheet_name="load", index_col=0)
df = pd.read_excel("scenarios/REF.xls", sheet_name="profiles", index_col=0, parse_dates=True)
profiles= df.iloc[:, 0:3]
amount = load["amount"].values
abs_profiles = profiles.multiply(amount)
abs_profiles["BB-Aggregated"] = abs_profiles.sum(axis=1)
ax = abs_profiles.iloc[4:168+4].plot(grid=True, color=["orange", "green", "skyblue", "darkred"])
#ax.set_ylim(0, 400)
ax.set_ylabel("Demand in MWh")
ax.set_xlabel("Hour")
handles, labels = ax.get_legend_handles_labels()
lgd = {k: v for k, v in dict(zip(handles, labels)).items()}
ax.set_ylabel("Demand in MW")
ax.grid(linestyle="--", lw=0.2)
lgd = ax.legend(
list(lgd.keys()),
["el-demand", "evcc-demand", "cruise-demand", "aggregated-demand"],
loc="lower left",
bbox_to_anchor=(0.1, -0.40),
ncol=2,
borderaxespad=0,
frameon=False,
)
inset = inset_axes(ax,
width="30%", # width = 30% of parent_bbox
height=1, # height : 1 inch
loc=1)
abs_profiles.iloc[:,2].plot(ax=inset, color="skyblue")
inset.set_title("Cruise Ships", backgroundcolor='w')
inset.set_ylabel("Demand in MW.", backgroundcolor='w')
inset.set_xlabel("Hour of year", backgroundcolor='w')
inset.set_xticklabels([""], backgroundcolor='w')
plt.savefig(
"visualization/figures/load-profiles-input.pdf",
#bbox_extra_artists=(lgd,),
bbox_inches="tight",
)
| StarcoderdataPython |
3255822 | #
# Copyright (c) 2020, Massachusetts Institute of Technology All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import MDSplus
import time
import numpy
class _ACQ2106_WRPG(MDSplus.Device):
"""
D-Tacq ACQ2106 with ACQ423 Digitizers (up to 6) real time streaming support.
DIO with 4 channels or 32
MDSplus.Device.debug - Controlled by environment variable DEBUG_DEVICES
MDSplus.Device.dprint(debuglevel, fmt, args)
- print if debuglevel >= MDSplus.Device.debug
"""
base_parts=[
{'path':':COMMENT', 'type':'text', 'options':('no_write_shot',)},
{'path':':NODE', 'type':'text', 'options':('no_write_shot',)},
{'path':':DIO_SITE', 'type':'numeric', 'value': int(4), 'options':('no_write_shot',)},
{'path':':TRIG_TIME', 'type':'numeric', 'options':('write_shot',)},
{'path':':RUNNING', 'type':'numeric', 'options':('no_write_model',)},
{'path':':LOG_OUTPUT', 'type':'text', 'options':('no_write_model', 'write_once', 'write_shot',)},
{'path':':INIT_ACTION', 'type':'action', 'valueExpr':"Action(Dispatch('CAMAC_SERVER','INIT',50,None),Method(None,'INIT',head))",'options':('no_write_shot',)},
{'path':':TRIG_ACTION', 'type':'action', 'valueExpr':"Action(Dispatch('CAMAC_SERVER','TRIG',50,None),Method(None,'TRIG',head))",'options':('no_write_shot',)},
{'path':':STOP_ACTION', 'type':'action', 'valueExpr':"Action(Dispatch('CAMAC_SERVER','STORE',50,None),Method(None,'STOP',head))",'options':('no_write_shot',)},
{'path':':STL_LISTS', 'type':'text', 'options':('write_shot',)},
{'path':':GPG_TRG_DX', 'type':'text', 'value': 'dx', 'options':('write_shot',)},
]
def init(self):
uut = self.getUUT()
slot = self.getSlot()
# Setting the trigger in the PG/GPG module. These settings depends very much on what is the
# configuration of the experiment. For example, when using one WRTT timing highway, then we can use d0, which will be
# the same used by the digitazer module. Otherwise, we can choose a different one, to be in an independent highway from
# the digitazer, like d1.
slot.GPG_ENABLE = 1
slot.GPG_MODE = 'LOOP'
if self.isPG():
slot.TRG = 'enable'
slot.TRG_DX = str(self.gpg_trg_dx.data())
slot.TRG_SENSE = 'rising'
else:
slot.GPG_TRG = 'enable'
slot.GPG_TRG_DX = str(self.gpg_trg_dx.data())
slot.GPG_TRG_SENSE = 'rising'
if self.debug >= 2:
start_time = time.time()
self.dprint(2, "Building STL: start")
#Create the STL table from a series of transition times and states given in OUTPUT.
#TIGA: PG nchans = 4, or non-TIGA PG nchans = 32
tiga = '7B'
nontiga = '6B'
site = self.dio_site.data()
if site == 0 or slot.MTYPE in nontiga:
nchans = 32
if self.debug >= 2:
self.dprint(2, 'DIO site and Number of Channels: {} {}'.format(self.dio_site.data(), nchans))
elif slot.MTYPE in tiga:
nchans = 4
if self.debug >= 2:
self.dprint(2, 'DIO site and Number of Channels: {} {}'.format(self.dio_site.data(), nchans))
# Create the STL table:
self.set_stl(nchans)
#Load the STL into the WRPG hardware: GPG
traces = False # True: shows debug information during loading
self.load_stl_data(traces)
if self.debug >= 1:
self.dprint(1,'WRPG has loaded the STL')
INIT=init
def stop(self):
slot = self.getSlot()
slot.GPG_ENABLE = 0
self.running.on = False
STOP=stop
def getUUT(self):
import acq400_hapi
uut = acq400_hapi.Acq2106_TIGA(self.node.data())
return uut
def getSlot(self):
uut = self.getUUT()
site_number = int(self.dio_site.data())
# Verify site_number is a valid int between 1 and 6
# if site_number in range(1, 7):
# self.slot = uut.__getattr__('s' + self.dio_site.data())
try:
if site_number == 0:
slot = uut.s0 # Only for a GPG system.
elif site_number == 1:
slot = uut.s1
elif site_number == 2:
slot = uut.s2
elif site_number == 3:
slot = uut.s3
elif site_number == 4:
slot = uut.s4
elif site_number == 5:
slot = uut.s5
elif site_number == 6:
slot = uut.s6
except:
pass
return slot
def isPG(self):
uut = self.getUUT()
slot = self.getSlot()
site = self.dio_site.data()
try:
if site == 0:
is_pg = False
else:
is_pg = slot.GPG_ENABLE is not None
except:
is_pg = False
return is_pg
def load_stl_data(self,traces):
uut = self.getUUT()
# Pair of (transition time, 32 bit channel states):
stl_pairs = self.stl_lists.data()
# Change from Numpy array to List with toList()
pairs = ''.join([ str(item) for item in stl_pairs.tolist() ])
#What follows checks if the system is a GPG module (site 0) or a PG module (site 1..6)
if self.isPG():
uut.load_dio482pg(self.dio_site.data(), pairs, traces)
else:
uut.load_wrpg(pairs, traces)
def set_stl(self, nchan):
all_t_times = []
all_t_times_states = []
for i in range(nchan):
chan_t_times = self.__getattr__('OUTPUT_%3.3d' % (i+1))
# Pair of (transition time, state) for each channel:
chan_t_states = chan_t_times.data()
# Creation of an array that contains, as EVERY OTHER element, all the transition times in it, appending them
# for each channel:
for x in numpy.nditer(chan_t_states):
all_t_times_states.append(x) #Appends arrays made of one element,
# Choosing only the transition times:
all_t_times = all_t_times_states[0::2]
# Removing duplicates and then sorting in ascending manner:
t_times = []
for i in all_t_times:
if i not in t_times:
t_times.append(i)
# t_times contains the unique set of transitions times used in the experiment:
t_times = sorted(numpy.float64(t_times))
# initialize the state matrix
rows, cols = (len(t_times), nchan)
state = [[0 for i in range(cols)] for j in range(rows)]
# Building the state matrix. For each channel, we traverse all the transition times to find those who are
# in the particular channel.
# If the transition time is in the channel, we copied its state into the state[i][j] element.
# If a transition time does not appear in that channel, we keep the previous state for, i.e. the state doesn't change.
for j in range(nchan):
chan_t_states = self.__getattr__('OUTPUT_%3.3d' % (j+1))
for i in range(len(t_times)):
if i == 0:
state[i][j] = 0
else:
state[i][j] = state[i-1][j]
# chan_t_states its elements are pairs of [ttimes, state]. e.g [[0.0, 0],[1.0, 1],...]
# chan_t_states[0] are all the first elements of those pairs, i.e the trans. times:
# e.g [[1D0], [2D0], [3D0], [4D0] ... ]
# chan_t_states[1] are all the second elements of those pairs, i.e. the states:
# e.g [[0],[1],...]
for t in range(len(chan_t_states[0])):
#Check if the transition time is one of the times that belongs to this channel:
if t_times[i] == chan_t_states[0][t][0]:
state[i][j] = int(chan_t_states[1][t][0])
# Building the string of 1s and 0s for each transition time:
binrows = []
for row in state:
rowstr = [str(i) for i in numpy.flip(row)] # flipping the bits so that chan 1 is in the far right position
binrows.append(''.join(rowstr))
# Converting the original units of the transtion times in seconds, to micro-seconts:
times_usecs = []
for elements in t_times:
times_usecs.append(int(elements * 1E6)) #in micro-seconds
# Building a pair between the t_times and bin states:
stl_tuple = zip(times_usecs, binrows)
#Record the list of lists into a tree node:
stl_list = []
# Write to a list with states in HEX form.
for s in stl_tuple:
stl_list.append('%d,%08X\n' % (s[0], int(s[1], 2)))
# MDSplus wants a numpy array
self.stl_lists.putData(numpy.array(stl_list))
OUTFMT3 = ':OUTPUT_%3.3d'
ACQ2106_CHANNEL_CHOICES = [4, 32]
def create_classes(base_class, root_name, parts, channel_choices):
my_classes = {}
for nchan in channel_choices:
class_name = "%s_%sCH" % (root_name, nchan)
my_parts = list(parts)
my_classes[class_name] = assemble(type(class_name, (base_class,), {"nchan": nchan, "parts": my_parts}))
my_classes[class_name].__module__ = base_class.__module__
return my_classes
def assemble(cls):
outfmt = OUTFMT3
for ch in range(1, cls.nchan+1):
cls.parts.append({'path':outfmt%(ch,), 'type':'NUMERIC', 'options':('no_write_shot',)})
return cls
class_ch_dict = create_classes(
_ACQ2106_WRPG, "ACQ2106_WRPG",
list(_ACQ2106_WRPG.base_parts),
ACQ2106_CHANNEL_CHOICES
)
globals().update(class_ch_dict)
del(class_ch_dict)
# public classes created in this module
# ACQ2106_WRPG_4CH
# ACQ2106_WRPG_32CH | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.