id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
8121810 | <filename>campus_app/migrations/0005_auto_20190706_1912.py
# Generated by Django 2.2.2 on 2019-07-06 22:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('campus_app', '0004_delete_usuario'),
]
operations = [
migrations.AlterField(
model_name='noticia',
name='data_publicacao',
field=models.DateTimeField(auto_now_add=True),
),
]
| StarcoderdataPython |
1633041 | # -*- coding: utf-8 -*-
"""
@author: LiuXin
@contact: <EMAIL>
@Created on: DATE{TIME}
"""
from __future__ import print_function, division
import os
from PIL import Image
import numpy as np
import torch
from torch.utils.data import Dataset
from mypath import Path
from torchvision import transforms
from dataloader.transforms_utils import custom_transforms as tr
from dataloader.transforms_utils import augment as au
from dataloader.transforms_utils import meta_transforms as meta_t
class SkmtDataSet(Dataset):
"""
PascalVoc dataset
"""
CLASSES = ('background', 'SAS', 'LHB', 'D',
'HH', 'SUB', 'SUP', 'GL', 'GC',
'SCB', 'INF', 'C', 'TM', 'SHB',
'LHT', 'SAC', 'INS','BBLH','LHBT')
PALETTE = np.asarray([[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128],
[128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0],
[192, 0, 0], [64, 128, 0]])
CLASSES_PIXS_WEIGHTS=(0.7450,0.0501,0.0016,0.0932 ,0.0611 ,
0.0085,0.0092,0.0014,0.0073,0.0012,0.0213)
#TODO:取消未出现的类
# NUM_CLASSES = len(CLASSES)
NUM_CLASSES=11
def __init__(self,
args,
base_dir=Path.db_root_dir('skmt'),
split='train',
):
"""
:param base_dir: path to VOC dataset directory
:param split: train/val
:param transform: transform to apply
"""
super().__init__()
self._base_dir = base_dir
self._image_dir = os.path.join(self._base_dir, 'JPEGImages')
self._cat_dir = os.path.join(self._base_dir, 'SegmentationClass')
if isinstance(split, str):
self.split = [split]
else:
split.sort()
self.split = split
self.args = args
_splits_dir = os.path.join(self._base_dir, 'ImageSets')
self.im_ids = []
self.images = []
self.categories = []
for splt in self.split:
with open(os.path.join(os.path.join(_splits_dir, splt + '.txt')), "r") as f:
lines = f.read().splitlines()
for ii, line in enumerate(lines):
_image = os.path.join(self._image_dir, line + ".jpg")
_cat = os.path.join(self._cat_dir, line + ".png")
# print(_image)
assert os.path.isfile(_image)
assert os.path.isfile(_cat)
self.im_ids.append(line)
self.images.append(_image)
self.categories.append(_cat)
assert (len(self.images) == len(self.categories))
# Display stats
print('Number of images in {}: {:d}'.format(split, len(self.images)))
def __len__(self):
return len(self.images)
def __getitem__(self, index):
_img, _target = self._make_img_gt_point_pair(index)
_section=self.get_section(index)
sample = {'image': _img, 'label': _target,'section':_section}
for split in self.split:
if split == "train":
for key,value in self.transform_tr(sample).items():
sample[key]=value
return sample
elif split == 'val':
for key,value in self.transform_val(sample).items():
sample[key]=value
return sample
def get_section(self,index):
_name=self.images[index].split('/')[-1]
_section=_name.split('_')[0][-2]
return int(_section)
def _make_img_gt_point_pair(self, index):
_img = Image.open(self.images[index]).convert('RGB')
_target = Image.open(self.categories[index])
return _img, _target
def transform_tr(self, sample):
#augm = au.Augment()
#sample = augm(sample)
composed_transforms = transforms.Compose([
tr.RandomHorizontalFlip(),
tr.RandomScaleCrop(base_size=self.args.image_size, crop_size=self.args.crop_size),
# tr.FixScaleCrop(crop_size=self.args.crop_size),
tr.RandomRotate(10),
# tr.RandomGaussianBlur(),
tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr.ToTensor()]
)
return composed_transforms(sample)
def transform_val(self, sample):
composed_transforms = transforms.Compose([
tr.FixedResize(self.args.crop_size),
tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr.ToTensor()])
return composed_transforms(sample)
# get_ISPRS and encode_segmap generate label map[
def count_section(self):
"""count the section num
:param img_list:
:return:
"""
table={}
for i in range(len(self.images)):
_section=self.get_section(i)
if(_section not in table.keys()):
table[_section]=0
table[_section]=table[_section]+1
return table
@classmethod
def encode_segmap(cls, mask):
"""Encode segmentation label images as pascal classes
Args:
mask (np.ndarray): raw segmentation label image of dimension
(M, N, 3), in which the Pascal classes are encoded as colours.
Returns:
(np.ndarray): class map with dimensions (M,N), where the value at
a given location is the integer denoting the class index.
"""
mask = mask.astype(int)
label_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.int16)
for ii, label in enumerate(cls.PALETTE):
label_mask[np.where(np.all(mask == label, axis=-1))[:2]] = ii
label_mask = label_mask.astype(int)
return label_mask
@classmethod
def decode_segmap(cls, label_mask):
"""Decode segmentation class labels into a color image
Args:
label_mask (np.ndarray): an (M,N) array of integer values denoting
the class label at each spatial location.
plot (bool, optional): whether to show the resulting color image
in a figure.
Returns:
(np.ndarray, optional): the resulting decoded color image.
"""
label_colours = cls.PALETTE
n_classes = len(label_colours)
r = label_mask.copy()
g = label_mask.copy()
b = label_mask.copy()
for ll in range(0, n_classes):
r[label_mask == ll] = label_colours[ll, 0]
g[label_mask == ll] = label_colours[ll, 1]
b[label_mask == ll] = label_colours[ll, 2]
rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3))
rgb[:, :, 0] = r / 255.0
rgb[:, :, 1] = g / 255.0
rgb[:, :, 2] = b / 255.0
return rgb
def __str__(self):
return 'skmt(split=' + str(self.split) + ')'
| StarcoderdataPython |
3354268 | <filename>temp.py
import numpy as np
import holoviews as hv
import pandas as pd
hv.extension('matplotlib')
# hv.notebook_extension('plotly')
vdata = pd.read_csv("./votingdata.csv")
vdata['log_DiffAdd'] = np.log(vdata.DiffAdd)
from matplotlib import pyplot
from mpl_toolkits.mplot3d import Axes3D
import random
# pyplot.style.use('ggplot') # pretty matplotlib plots
fig = pyplot.figure()
ax = Axes3D(fig)
ax.scatter(vdata.log_DiffAdd, np.power(vdata.log_DiffAdd,2), vdata.GenEl2004)
ax.set_xlabel("log")
ax.set_ylabel("log^2")
ax.set_zlabel("GenEl")
ax.view_init(90, 0)
pyplot.show()
| StarcoderdataPython |
185554 | <gh_stars>10-100
import theano.tensor as T
from pylearn2.utils import wraps
from pylearn2.models.mlp import Layer
from pylearn2.models.mlp import PretrainedLayer
class PretrainedLayerWeight(PretrainedLayer):
@wraps(Layer.get_weight_decay)
def get_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
W = self.layer_content.get_weights()
return coeff * T.sqr(W).sum() | StarcoderdataPython |
6696998 | import rubrik_cdm
rubrik = rubrik_cdm.Connect()
subnets = rubrik.set_esxi_subnets(["192.168.2.10/24","10.255.0.2/16"])
| StarcoderdataPython |
8057453 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Find the global maximum on ackley function
'''
from math import sin, cos, pi, pow, exp
import math
from gaft import GAEngine
from gaft.components import BinaryIndividual
from gaft.components import Population
from gaft.operators import TournamentSelection
from gaft.operators import UniformCrossover
from gaft.operators import FlipBitBigMutation
from time import time
# Built-in best fitness analysis.
from gaft.analysis.fitness_store import FitnessStore
from gaft.analysis.console_output import ConsoleOutput
from pypot.creatures import PoppyHumanoid
from pypot.primitive.move import MoveRecorder, MovePlayer
poppy = PoppyHumanoid()
best = 0
count = 0
x_values = []
y_values = []
def speed_calc(diff):
'''
This function is used to dynamically calculate the speed of the robot
from the difference of prior angle and the posterior angle.
Input : Difference (angle)
Output: Speed of the robot in seconds
'''
speed = float(diff / 20)
return speed
def search_indication():
'''
This function depicts the slot machines that the robot has searched and its indicated movements.
The function also does error handling of the positions.
'''
present = poppy.r_shoulder_y.present_position
movement = 10
poppy.r_shoulder_y.goto_position(present + movement, 1, wait=0.01)
error = abs(poppy.r_shoulder_y.present_position - present) - movement
print('The error is {}'.format(error))
present = poppy.r_shoulder_y.present_position
poppy.r_shoulder_x.goto_position(poppy.r_shoulder_x.present_position - 5, 1, wait=0.01)
poppy.r_shoulder_y.goto_position(present - (movement + error), 1, wait=0.01)
error = abs(poppy.r_shoulder_y.present_position - present) - movement - error
print('The erroe after the movement is {}'.format(error))
if error > 2 or error < -2:
print('Error of {} exists during the slot indication with a margin for the iteration {}'.format(error, count))
poppy.r_shoulder_y.goto_position(poppy.r_shoulder_y.present_position + 2 * error, 1, wait=0.01)
poppy.r_shoulder_x.goto_position(poppy.r_shoulder_x.present_position + 5, 1, wait=0.01)
def poppy_robot(next_position_x, next_position_y, position_difference_x,
position_difference_y, final_movement=False):
'''
This method can be used when your black-box function has two parameters that are used to measure the function.
If you want to use the number of parameters you can modify the function by adding the position for the third
parameter and soon
:return: The places that the robot is exploring and exploiting in a two dimensional space.
'''
speed_to_move = speed_calc(position_difference_y)
poppy.r_shoulder_y.goto_position(next_position_y, speed_to_move, wait=0.01)
print('The difference along the y-axis is {} and the speed is {}'.format(position_difference_y, speed_to_move))
speed_to_move = speed_calc(position_difference_x)
poppy.abs_z.goto_position(next_position_x, speed_to_move, wait=0.01)
print('The difference along the x-axis is {} and the speed is {}'.format(position_difference_x, speed_to_move))
# Maintaining the robots horizontal position
if poppy.abs_z.present_position >= 40:
poppy.abs_x.goto_position(-3, 0.5, wait=0.01)
elif poppy.abs_z.present_position <= -40:
poppy.abs_x.goto_position(3, 0.5, wait=0.01)
else:
poppy.abs_x.goto_position(0, 0.5, wait=0.01)
if final_movement is False:
# Exploration stage indicated with single motion
search_indication()
else:
# Optimized stage indicated with double motion
search_indication()
search_indication()
def ackley_fn(x, y):
global count
if count is 0:
prev_x_temp = 0.5
prev_y_temp = 0
prev_x_temp = prev_x_temp * 180 - 90
prev_y_temp = -110 + prev_y_temp
else:
prev_x_temp = x_values[count-1]
prev_y_temp = y_values[count-1]
prev_x_temp = prev_x_temp * 180 - 90
prev_y_temp = -(prev_y_temp * 70) - 110
x_temp = x * 180 - 90
y_temp = -(y*70) - 110
x_values.insert(count, x)
y_values.insert(count, y)
position_difference_x = abs(x_temp - prev_x_temp)
position_difference_y = abs(y_temp - prev_y_temp)
final_flag = False
poppy_mov = poppy.PoppyBO(next_position_x=x_temp, next_position_y=y_temp,
position_difference_x=position_difference_x,
position_difference_y=position_difference_y,
final_movement=final_flag)
poppy_mov.multi_dimensional_poppy_robot()
a = -20 * math.exp(-0.2 * (math.sqrt(0.5 * (math.pow(x, 2) + math.pow(y, 2)))))
b = -math.exp(0.5 * (math.cos(2 * math.pi * x) + math.cos(2 * math.pi * y)))
c = math.exp(1)
d = 20
result = a + b + c + d
global best
if best < abs(result):
best = abs(result)
print('The best outcome reward till now is {}'.format(best))
count += 1
return result
search_domain = (-5, 5)
indv_template = BinaryIndividual(ranges=[search_domain, search_domain], eps=0.001)
# Define population
population = Population(indv_template=indv_template, size=50).init()
# Create genetic operators.
#selection = RouletteWheelSelection()
selection = TournamentSelection()
crossover = UniformCrossover(pc=0.8, pe=0.5)
mutation = FlipBitBigMutation(pm=0.1, pbm=0.55, alpha=0.6)
# Create genetic algorithm engine.
# Here we pass all built-in analysis to engine constructor.
engine = GAEngine(population=population, selection=selection,
crossover=crossover, mutation=mutation,
analysis=[ConsoleOutput, FitnessStore])
# Define fitness function.
@engine.fitness_register
def fitness(indv):
x, y = indv.solution
return ackley_fn(x, y)
if '__main__' == __name__:
engine.run(ng=10)
| StarcoderdataPython |
11299118 | <reponame>RenolY2/mkdd-track-editor
import traceback
import os
from time import sleep
from timeit import default_timer
from io import StringIO
from math import sin, cos, atan2, radians, degrees, pi, tan
import json
from OpenGL.GL import *
from OpenGL.GLU import *
from PyQt5.QtGui import QMouseEvent, QWheelEvent, QPainter, QColor, QFont, QFontMetrics, QPolygon, QImage, QPixmap, QKeySequence
from PyQt5.QtWidgets import (QWidget, QListWidget, QListWidgetItem, QDialog, QMenu, QLineEdit,
QMdiSubWindow, QHBoxLayout, QVBoxLayout, QLabel, QPushButton, QTextEdit, QAction, QShortcut)
import PyQt5.QtWidgets as QtWidgets
import PyQt5.QtCore as QtCore
from PyQt5.QtCore import QSize, pyqtSignal, QPoint, QRect
from PyQt5.QtCore import Qt
from helper_functions import calc_zoom_in_factor, calc_zoom_out_factor
from lib.libgen import GeneratorObject
from lib.collision import Collision
from widgets.editor_widgets import catch_exception, catch_exception_with_dialog
#from pikmingen import PikminObject
from libpiktxt import PikminTxt
from opengltext import draw_collision
from lib.vectors import Matrix4x4, Vector3, Line, Plane, Triangle
import pikmingen
from lib.model_rendering import TexturedPlane, Model, Grid, GenericObject, Material, Minimap
from gizmo import Gizmo
from lib.object_models import ObjectModels
from editor_controls import UserControl
from lib.libpath import Paths
from lib.libbol import BOL
import numpy
MOUSE_MODE_NONE = 0
MOUSE_MODE_MOVEWP = 1
MOUSE_MODE_ADDWP = 2
MOUSE_MODE_CONNECTWP = 3
MODE_TOPDOWN = 0
MODE_3D = 1
#colors = [(1.0, 0.0, 0.0), (0.0, 0.5, 0.0), (0.0, 0.0, 1.0), (1.0, 1.0, 0.0)]
colors = [(0.0,191/255.0,255/255.0), (30/255.0,144/255.0,255/255.0), (0.0,0.0,255/255.0), (0.0,0.0,139/255.0)]
with open("lib/color_coding.json", "r") as f:
colors_json = json.load(f)
colors_selection = colors_json["SelectionColor"]
colors_area = colors_json["Areas"]
class SelectionQueue(list):
def __init__(self):
super().__init__()
def queue_selection(self, x, y, width, height, shift_pressed, do_gizmo=False):
if do_gizmo:
for i in self:
if i[-1] is True:
return
self.append((x, y, width, height, shift_pressed, do_gizmo))
def clear(self):
tmp = [x for x in self]
for val in tmp:
if tmp[-1] is True:
self.remove(tmp)
def queue_pop(self):
if len(self) > 0:
return self.pop(0)
else:
return None
class BolMapViewer(QtWidgets.QOpenGLWidget):
mouse_clicked = pyqtSignal(QMouseEvent)
entity_clicked = pyqtSignal(QMouseEvent, str)
mouse_dragged = pyqtSignal(QMouseEvent)
mouse_released = pyqtSignal(QMouseEvent)
mouse_wheel = pyqtSignal(QWheelEvent)
position_update = pyqtSignal(QMouseEvent, tuple)
height_update = pyqtSignal(float)
select_update = pyqtSignal()
move_points = pyqtSignal(float, float, float)
connect_update = pyqtSignal(int, int)
create_waypoint = pyqtSignal(float, float)
create_waypoint_3d = pyqtSignal(float, float, float)
rotate_current = pyqtSignal(Vector3)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._zoom_factor = 80
self.setFocusPolicy(Qt.ClickFocus)
self.SIZEX = 1024#768#1024
self.SIZEY = 1024#768#1024
self.canvas_width, self.canvas_height = self.width(), self.height()
self.resize(600, self.canvas_height)
#self.setMinimumSize(QSize(self.SIZEX, self.SIZEY))
#self.setMaximumSize(QSize(self.SIZEX, self.SIZEY))
self.setObjectName("bw_map_screen")
self.origin_x = self.SIZEX//2
self.origin_z = self.SIZEY//2
self.offset_x = 0
self.offset_z = 0
self.left_button_down = False
self.mid_button_down = False
self.right_button_down = False
self.drag_last_pos = None
self.selected = []
self.selected_positions = []
self.selected_rotations = []
#self.p = QPainter()
#self.p2 = QPainter()
# self.show_terrain_mode = SHOW_TERRAIN_REGULAR
self.selectionbox_start = None
self.selectionbox_end = None
self.visualize_cursor = None
self.click_mode = 0
self.level_image = None
self.collision = None
self.highlighttriangle = None
self.setMouseTracking(True)
self.level_file:BOL = None
self.waterboxes = []
self.mousemode = MOUSE_MODE_NONE
self.overlapping_wp_index = 0
self.editorconfig = None
self.visibility_menu = None
#self.setContextMenuPolicy(Qt.CustomContextMenu)
self.spawnpoint = None
self.alternative_mesh = None
self.highlight_colltype = None
self.shift_is_pressed = False
self.rotation_is_pressed = False
self.last_drag_update = 0
self.change_height_is_pressed = False
self.last_mouse_move = None
self.timer = QtCore.QTimer()
self.timer.setInterval(2)
self.timer.timeout.connect(self.render_loop)
self.timer.start()
self._lastrendertime = 0
self._lasttime = 0
self._frame_invalid = False
self.MOVE_UP = 0
self.MOVE_DOWN = 0
self.MOVE_LEFT = 0
self.MOVE_RIGHT = 0
self.MOVE_FORWARD = 0
self.MOVE_BACKWARD = 0
self.SPEEDUP = 0
self._wasdscrolling_speed = 1
self._wasdscrolling_speedupfactor = 3
self.main_model = None
self.buffered_deltas = []
# 3D Setup
self.mode = MODE_TOPDOWN
self.camera_horiz = pi*(1/2)
self.camera_vertical = -pi*(1/4)
self.camera_height = 1000
self.last_move = None
self.backgroundcolor = (255, 255, 255, 255)
#self.selection_queue = []
self.selectionqueue = SelectionQueue()
self.selectionbox_projected_start = None
self.selectionbox_projected_end = None
#self.selectionbox_projected_2d = None
self.selectionbox_projected_origin = None
self.selectionbox_projected_up = None
self.selectionbox_projected_right = None
self.selectionbox_projected_coords = None
self.last_position_update = 0
self.move_collision_plane = Plane(Vector3(0.0, 0.0, 0.0), Vector3(1.0, 0.0, 0.0), Vector3(0.0, 1.0, 0.0))
self.paths = Paths()
self.usercontrol = UserControl(self)
# Initialize some models
with open("resources/gizmo.obj", "r") as f:
self.gizmo = Gizmo.from_obj(f, rotate=True)
#self.generic_object = GenericObject()
self.models = ObjectModels()
self.grid = Grid(100000, 100000, 10000)
self.modelviewmatrix = None
self.projectionmatrix = None
self.arrow = None
self.minimap = Minimap(Vector3(-1000.0, 0.0, -1000.0), Vector3(1000.0, 0.0, 1000.0), 0,
None)
@catch_exception_with_dialog
def initializeGL(self):
self.rotation_visualizer = glGenLists(1)
glNewList(self.rotation_visualizer, GL_COMPILE)
glColor4f(0.0, 0.0, 1.0, 1.0)
glBegin(GL_LINES)
glVertex3f(0.0, 0.0, 0.0)
glVertex3f(0.0, 40.0, 0.0)
glEnd()
glEndList()
self.models.init_gl()
self.arrow = Material(texturepath="resources/arrow.png")
self.minimap = Minimap(Vector3(-1000.0, 0.0, -1000.0), Vector3(1000.0, 0.0, 1000.0), 0,
"resources/arrow.png")
def resizeGL(self, width, height):
# Called upon window resizing: reinitialize the viewport.
# update the window size
self.canvas_width, self.canvas_height = width, height
# paint within the whole window
glEnable(GL_DEPTH_TEST)
glViewport(0, 0, self.canvas_width, self.canvas_height)
@catch_exception
def set_editorconfig(self, config):
self.editorconfig = config
self._wasdscrolling_speed = config.getfloat("wasdscrolling_speed")
self._wasdscrolling_speedupfactor = config.getfloat("wasdscrolling_speedupfactor")
backgroundcolor = config["3d_background"].split(" ")
self.backgroundcolor = (int(backgroundcolor[0])/255.0,
int(backgroundcolor[1])/255.0,
int(backgroundcolor[2])/255.0,
1.0)
def change_from_topdown_to_3d(self):
if self.mode == MODE_3D:
return
else:
self.mode = MODE_3D
if self.mousemode == MOUSE_MODE_NONE:
self.setContextMenuPolicy(Qt.DefaultContextMenu)
# This is necessary so that the position of the 3d camera equals the middle of the topdown view
self.offset_x *= -1
self.do_redraw()
def change_from_3d_to_topdown(self):
if self.mode == MODE_TOPDOWN:
return
else:
self.mode = MODE_TOPDOWN
if self.mousemode == MOUSE_MODE_NONE:
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.offset_x *= -1
self.do_redraw()
def logic(self, delta, diff):
self.dolphin.logic(self, delta, diff)
@catch_exception
def render_loop(self):
now = default_timer()
diff = now-self._lastrendertime
timedelta = now-self._lasttime
if self.mode == MODE_TOPDOWN:
self.handle_arrowkey_scroll(timedelta)
else:
self.handle_arrowkey_scroll_3d(timedelta)
self.logic(timedelta, diff)
if diff > 1 / 60.0:
if self._frame_invalid:
self.update()
self._lastrendertime = now
self._frame_invalid = False
self._lasttime = now
def handle_arrowkey_scroll(self, timedelta):
if self.selectionbox_projected_coords is not None:
return
diff_x = diff_y = 0
#print(self.MOVE_UP, self.MOVE_DOWN, self.MOVE_LEFT, self.MOVE_RIGHT)
speedup = 1
if self.shift_is_pressed:
speedup = self._wasdscrolling_speedupfactor
if self.MOVE_FORWARD == 1 and self.MOVE_BACKWARD == 1:
diff_y = 0
elif self.MOVE_FORWARD == 1:
diff_y = 1*speedup*self._wasdscrolling_speed*timedelta
elif self.MOVE_BACKWARD == 1:
diff_y = -1*speedup*self._wasdscrolling_speed*timedelta
if self.MOVE_LEFT == 1 and self.MOVE_RIGHT == 1:
diff_x = 0
elif self.MOVE_LEFT == 1:
diff_x = 1*speedup*self._wasdscrolling_speed*timedelta
elif self.MOVE_RIGHT == 1:
diff_x = -1*speedup*self._wasdscrolling_speed*timedelta
if diff_x != 0 or diff_y != 0:
if self.zoom_factor > 1.0:
self.offset_x += diff_x * (1.0 + (self.zoom_factor - 1.0) / 2.0)
self.offset_z += diff_y * (1.0 + (self.zoom_factor - 1.0) / 2.0)
else:
self.offset_x += diff_x
self.offset_z += diff_y
# self.update()
self.do_redraw()
def handle_arrowkey_scroll_3d(self, timedelta):
if self.selectionbox_projected_coords is not None:
return
diff_x = diff_y = diff_height = 0
#print(self.MOVE_UP, self.MOVE_DOWN, self.MOVE_LEFT, self.MOVE_RIGHT)
speedup = 1
forward_vec = Vector3(cos(self.camera_horiz), sin(self.camera_horiz), 0)
sideways_vec = Vector3(sin(self.camera_horiz), -cos(self.camera_horiz), 0)
if self.shift_is_pressed:
speedup = self._wasdscrolling_speedupfactor
if self.MOVE_FORWARD == 1 and self.MOVE_BACKWARD == 1:
forward_move = forward_vec*0
elif self.MOVE_FORWARD == 1:
forward_move = forward_vec*(1*speedup*self._wasdscrolling_speed*timedelta)
elif self.MOVE_BACKWARD == 1:
forward_move = forward_vec*(-1*speedup*self._wasdscrolling_speed*timedelta)
else:
forward_move = forward_vec*0
if self.MOVE_LEFT == 1 and self.MOVE_RIGHT == 1:
sideways_move = sideways_vec*0
elif self.MOVE_LEFT == 1:
sideways_move = sideways_vec*(-1*speedup*self._wasdscrolling_speed*timedelta)
elif self.MOVE_RIGHT == 1:
sideways_move = sideways_vec*(1*speedup*self._wasdscrolling_speed*timedelta)
else:
sideways_move = sideways_vec*0
if self.MOVE_UP == 1 and self.MOVE_DOWN == 1:
diff_height = 0
elif self.MOVE_UP == 1:
diff_height = 1*speedup*self._wasdscrolling_speed*timedelta
elif self.MOVE_DOWN == 1:
diff_height = -1 * speedup * self._wasdscrolling_speed * timedelta
if not forward_move.is_zero() or not sideways_move.is_zero() or diff_height != 0:
#if self.zoom_factor > 1.0:
# self.offset_x += diff_x * (1.0 + (self.zoom_factor - 1.0) / 2.0)
# self.offset_z += diff_y * (1.0 + (self.zoom_factor - 1.0) / 2.0)
#else:
self.offset_x += (forward_move.x + sideways_move.x)
self.offset_z += (forward_move.y + sideways_move.y)
self.camera_height += diff_height
# self.update()
self.do_redraw()
def set_arrowkey_movement(self, up, down, left, right):
self.MOVE_UP = up
self.MOVE_DOWN = down
self.MOVE_LEFT = left
self.MOVE_RIGHT = right
def do_redraw(self, force=False):
self._frame_invalid = True
if force:
self._lastrendertime = 0
self.update()
def reset(self, keep_collision=False):
self.highlight_colltype = None
self.overlapping_wp_index = 0
self.shift_is_pressed = False
self.SIZEX = 1024
self.SIZEY = 1024
self.origin_x = self.SIZEX//2
self.origin_z = self.SIZEY//2
self.last_drag_update = 0
self.left_button_down = False
self.mid_button_down = False
self.right_button_down = False
self.drag_last_pos = None
self.selectionbox_start = None
self.selectionbox_end = None
self.selected = []
if not keep_collision:
# Potentially: Clear collision object too?
self.level_image = None
self.offset_x = 0
self.offset_z = 0
self._zoom_factor = 80
self.pikmin_generators = None
self.mousemode = MOUSE_MODE_NONE
self.spawnpoint = None
self.rotation_is_pressed = False
self._frame_invalid = False
self.MOVE_UP = 0
self.MOVE_DOWN = 0
self.MOVE_LEFT = 0
self.MOVE_RIGHT = 0
self.SPEEDUP = 0
def set_collision(self, verts, faces, alternative_mesh):
self.collision = Collision(verts, faces)
if self.main_model is None:
self.main_model = glGenLists(1)
self.alternative_mesh = alternative_mesh
glNewList(self.main_model, GL_COMPILE)
#glBegin(GL_TRIANGLES)
draw_collision(verts, faces)
#glEnd()
glEndList()
def set_mouse_mode(self, mode):
assert mode in (MOUSE_MODE_NONE, MOUSE_MODE_ADDWP, MOUSE_MODE_CONNECTWP, MOUSE_MODE_MOVEWP)
self.mousemode = mode
if self.mousemode == MOUSE_MODE_NONE and self.mode == MODE_TOPDOWN:
self.setContextMenuPolicy(Qt.CustomContextMenu)
else:
self.setContextMenuPolicy(Qt.DefaultContextMenu)
@property
def zoom_factor(self):
return self._zoom_factor/10.0
def zoom(self, fac):
if self._zoom_factor <= 60:
mult = 20.0
elif self._zoom_factor >= 600:
mult = 100.0
else:
mult = 40.0
if 10 < (self._zoom_factor + fac*mult) <= 1500:
self._zoom_factor += int(fac*mult)
#self.update()
self.do_redraw()
def mouse_coord_to_world_coord(self, mouse_x, mouse_y):
zf = self.zoom_factor
width, height = self.canvas_width, self.canvas_height
camera_width = width * zf
camera_height = height * zf
topleft_x = -camera_width / 2 - self.offset_x
topleft_y = camera_height / 2 + self.offset_z
relx = mouse_x / width
rely = mouse_y / height
res = (topleft_x + relx*camera_width, topleft_y - rely*camera_height)
return res
def mouse_coord_to_world_coord_transform(self, mouse_x, mouse_y):
mat4x4 = Matrix4x4.from_opengl_matrix(*glGetFloatv(GL_PROJECTION_MATRIX))
width, height = self.canvas_width, self.canvas_height
result = mat4x4.multiply_vec4(mouse_x-width/2, mouse_y-height/2, 0, 1)
return result
#@catch_exception_with_dialog
#@catch_exception
def paintGL(self):
start = default_timer()
offset_x = self.offset_x
offset_z = self.offset_z
#start = default_timer()
glClearColor(1.0, 1.0, 1.0, 0.0)
#glClearColor(*self.backgroundcolor)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
width, height = self.canvas_width, self.canvas_height
if self.mode == MODE_TOPDOWN:
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
zf = self.zoom_factor
#glOrtho(-6000.0, 6000.0, -6000.0, 6000.0, -3000.0, 2000.0)
camera_width = width*zf
camera_height = height*zf
glOrtho(-camera_width / 2 - offset_x, camera_width / 2 - offset_x,
-camera_height / 2 + offset_z, camera_height / 2 + offset_z, -120000.0, 80000.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
else:
#glEnable(GL_CULL_FACE)
# set yellow color for subsequent drawing rendering calls
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(75, width / height, 256.0, 160000.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
look_direction = Vector3(cos(self.camera_horiz), sin(self.camera_horiz), sin(self.camera_vertical))
# look_direction.unify()
fac = 1.01 - abs(look_direction.z)
# print(fac, look_direction.z, look_direction)
gluLookAt(self.offset_x, self.offset_z, self.camera_height,
self.offset_x + look_direction.x * fac, self.offset_z + look_direction.y * fac,
self.camera_height + look_direction.z,
0, 0, 1)
self.camera_direction = Vector3(look_direction.x * fac, look_direction.y * fac, look_direction.z)
#print(self.camera_direction)
self.modelviewmatrix = numpy.transpose(numpy.reshape(glGetFloatv(GL_MODELVIEW_MATRIX), (4,4)))
self.projectionmatrix = numpy.transpose(numpy.reshape(glGetFloatv(GL_PROJECTION_MATRIX), (4,4)))
self.mvp_mat = numpy.dot(self.projectionmatrix, self.modelviewmatrix)
self.modelviewmatrix_inv = numpy.linalg.inv(self.modelviewmatrix)
campos = Vector3(self.offset_x, self.camera_height, -self.offset_z)
self.campos = campos
if self.mode == MODE_TOPDOWN:
gizmo_scale = 3*zf
else:
gizmo_scale = (self.gizmo.position - campos).norm() / 130.0
self.gizmo_scale = gizmo_scale
#print(self.gizmo.position, campos)
vismenu: FilterViewMenu = self.visibility_menu
while len(self.selectionqueue) > 0:
glClearColor(1.0, 1.0, 1.0, 1.0)
#
click_x, click_y, clickwidth, clickheight, shiftpressed, do_gizmo = self.selectionqueue.queue_pop()
click_y = height - click_y
hit = 0xFF
#print("received request", do_gizmo)
if clickwidth == 1 and clickheight == 1:
self.gizmo.render_collision_check(gizmo_scale, is3d=self.mode == MODE_3D)
pixels = glReadPixels(click_x, click_y, clickwidth, clickheight, GL_RGB, GL_UNSIGNED_BYTE)
#print(pixels)
hit = pixels[2]
if do_gizmo and hit != 0xFF:
self.gizmo.run_callback(hit)
self.gizmo.was_hit_at_all = True
#if hit != 0xFF and do_:
glClearColor(1.0, 1.0, 1.0, 1.0)
if self.level_file is not None and hit == 0xFF and not do_gizmo:
#objects = self.pikmin_generators.generators
glDisable(GL_TEXTURE_2D)
#for i, pikminobject in enumerate(objects):
# self.models.render_object_coloredid(pikminobject, i)
id = 0x100000
objlist = []
offset = 0
if self.minimap is not None and vismenu.minimap.is_selectable() and self.minimap.is_available():
objlist.append((self.minimap, self.minimap.corner1, self.minimap.corner2, None))
self.models.render_generic_position_colored_id(self.minimap.corner1, id + (offset) * 4)
self.models.render_generic_position_colored_id(self.minimap.corner2, id + (offset) * 4 + 1)
offset = 1
"""
for ptr, pos in self.karts:
objlist.append((ptr, pos, None, None))
self.models.render_generic_position_colored_id(pos, id + (offset) * 4)
offset += 1"""
self.dolphin.render_collision(self, objlist)
offset = len(objlist)
if vismenu.enemyroute.is_selectable():
for i, obj in enumerate(self.level_file.enemypointgroups.points()):
objlist.append((obj, obj.position, None, None))
self.models.render_generic_position_colored_id(obj.position, id + (offset+i) * 4)
offset = len(objlist)
if vismenu.itemroutes.is_selectable():
i = 0
for route in self.level_file.routes:
for obj in route.points:
objlist.append((obj, obj.position, None, None))
self.models.render_generic_position_colored_id(obj.position, id + (offset+i) * 4)
i += 1
offset = len(objlist)
if vismenu.checkpoints.is_selectable():
for i, obj in enumerate(self.level_file.objects_with_2positions()):
objlist.append((obj, obj.start, obj.end, None))
self.models.render_generic_position_colored_id(obj.start, id+(offset+i)*4)
self.models.render_generic_position_colored_id(obj.end, id+(offset+i)*4 + 1)
for is_selectable, collection in (
(vismenu.objects.is_selectable(), self.level_file.objects.objects),
(vismenu.kartstartpoints.is_selectable(), self.level_file.kartpoints.positions),
(vismenu.areas.is_selectable(), self.level_file.areas.areas),
(vismenu.cameras.is_selectable(), self.level_file.cameras),
(vismenu.respawnpoints.is_selectable(), self.level_file.respawnpoints)
):
offset = len(objlist)
if not is_selectable:
continue
for i, obj in enumerate(collection):
objlist.append((obj, obj.position, None, obj.rotation))
self.models.render_generic_position_rotation_colored_id(obj.position, obj.rotation,
id + (offset + i) * 4 + 2)
assert len(objlist)*4 < id
print("We queued up", len(objlist))
pixels = glReadPixels(click_x, click_y, clickwidth, clickheight, GL_RGB, GL_UNSIGNED_BYTE)
#print(pixels, click_x, click_y, clickwidth, clickheight)
selected = {}
selected_positions = []
selected_rotations = []
#for i in range(0, clickwidth*clickheight, 4):
start = default_timer()
for i in range(0, clickwidth*clickheight, 13):
# | (pixels[i*3+0] << 16)
if pixels[i * 3] != 0xFF:
upper = pixels[i * 3] & 0x0F
index = (upper << 16)| (pixels[i*3 + 1] << 8) | pixels[i*3 + 2]
if index & 0b1:
# second position
entry = objlist[index//4]
if entry[0] not in selected:
selected[entry[0]] = 2
selected_positions.append(entry[2])
elif selected[entry[0]] == 1:
selected[entry[0]] = 3
selected_positions.append(entry[2])
else:
entry = objlist[index // 4]
if entry[0] not in selected:
selected[entry[0]] = 1
selected_positions.append(entry[1])
if index & 0b10:
print("found a rotation")
selected_rotations.append(entry[3])
elif selected[entry[0]] == 2:
selected[entry[0]] = 3
selected_positions.append(entry[1])
#print("select time taken", default_timer() - start)
#print("result:", selected)
selected = [x for x in selected.keys()]
if not shiftpressed:
self.selected = selected
self.selected_positions = selected_positions
self.selected_rotations = selected_rotations
self.select_update.emit()
else:
for obj in selected:
if obj not in self.selected:
self.selected.append(obj)
for pos in selected_positions:
if pos not in self.selected_positions:
self.selected_positions.append(pos)
for rot in selected_rotations:
if rot not in self.selected_rotations:
self.selected_rotations.append(rot)
self.select_update.emit()
self.gizmo.move_to_average(self.selected_positions)
if len(selected) == 0:
#print("Select did register")
self.gizmo.hidden = True
if self.mode == MODE_3D: # In case of 3D mode we need to update scale due to changed gizmo position
gizmo_scale = (self.gizmo.position - campos).norm() / 130.0
#print("total time taken", default_timer() - start)
#print("gizmo status", self.gizmo.was_hit_at_all)
#glClearColor(1.0, 1.0, 1.0, 0.0)
glClearColor(*self.backgroundcolor)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glEnable(GL_DEPTH_TEST)
glDisable(GL_TEXTURE_2D)
glColor4f(1.0, 1.0, 1.0, 1.0)
if self.main_model is not None:
if self.alternative_mesh is None:
glCallList(self.main_model)
else:
glPushMatrix()
glScalef(1.0, -1.0, 1.0)
self.alternative_mesh.render(selectedPart=self.highlight_colltype)
glPopMatrix()
glDisable(GL_TEXTURE_2D)
glColor4f(1.0, 1.0, 1.0, 1.0)
self.grid.render()
if self.mode == MODE_TOPDOWN:
glClear(GL_DEPTH_BUFFER_BIT)
if self.minimap is not None and vismenu.minimap.is_visible() and self.minimap.is_available():
self.minimap.render()
glClear(GL_DEPTH_BUFFER_BIT)
#else:
# if self.minimap is not None and vismenu.minimap.is_visible():
# self.minimap.render()
# glDisable(GL_DEPTH_TEST)
glEnable(GL_ALPHA_TEST)
glAlphaFunc(GL_GEQUAL, 0.5)
p = 0
self.dolphin.render_visual(self, self.selected)
"""for valid, kartpos in self.karts:
if valid:
self.models.render_player_position_colored(kartpos, valid in self.selected, p)
p += 1"""
if self.level_file is not None:
selected = self.selected
positions = self.selected_positions
select_optimize = {x:True for x in selected}
#objects = self.pikmin_generators.generators
#for pikminobject in objects:
# self.models.render_object(pikminobject, pikminobject in selected)
vismenu = self.visibility_menu
if vismenu.itemroutes.is_visible():
for route in self.level_file.routes:
for point in route.points:
self.models.render_generic_position_colored(point.position, point in select_optimize, "itempoint")
glBegin(GL_LINE_STRIP)
glColor3f(0.0, 0.0, 0.0)
for point in route.points:
pos = point.position
glVertex3f(pos.x, -pos.z, pos.y)
glEnd()
if vismenu.enemyroute.is_visible():
for group in self.level_file.enemypointgroups.groups:
for point in group.points:
if point in select_optimize:
glColor3f(0.3, 0.3, 0.3)
self.models.draw_sphere(point.position, point.scale)
self.models.render_generic_position_colored(point.position, point in select_optimize, "enemypoint")
glBegin(GL_LINE_STRIP)
glColor3f(0.0, 0.0, 0.0)
for point in group.points:
pos = point.position
glVertex3f(pos.x, -pos.z, pos.y)
glEnd()
groups = self.level_file.enemypointgroups.groups
links = {}
for group in groups:
for point in group.points:
if point.link != -1:
if point.link not in links:
links[point.link] = [point.position]
else:
links[point.link].append(point.position)
"""
and point.link in groups and len(groups[point.link].points) > 0:
if point != groups[point.link].points[0]:
pos1 = point.position
pos2 = groups[point.link].points[0].position
glVertex3f(pos1.x, -pos1.z, pos1.y)
glVertex3f(pos2.x, -pos2.z, pos2.y)"""
glColor3f(0.0, 0.0, 1.0)
for link, points in links.items():
glBegin(GL_LINE_LOOP)
for point in points:
glVertex3f(point.x, -point.z, point.y)
glEnd()
if vismenu.checkpoints.is_visible():
for i, group in enumerate(self.level_file.checkpoints.groups):
prev = None
for checkpoint in group.points:
self.models.render_generic_position_colored(checkpoint.start, checkpoint.start in positions, "checkpointleft")
self.models.render_generic_position_colored(checkpoint.end, checkpoint.end in positions, "checkpointright")
glColor3f(*colors[i % 4])
glBegin(GL_LINES)
for checkpoint in group.points:
pos1 = checkpoint.start
pos2 = checkpoint.end
#self.models.render_generic_position(pos1, False)
#self.models.render_generic_position(pos2, False)
glVertex3f(pos1.x, -pos1.z, pos1.y)
glVertex3f(pos2.x, -pos2.z, pos2.y)
#glColor3f(0.0, 0.0, 0.0)
if prev is not None:
pos3 = prev.start
pos4 = prev.end
glVertex3f(pos1.x, -pos1.z, pos1.y)
glVertex3f(pos3.x, -pos3.z, pos3.y)
glVertex3f(pos2.x, -pos2.z, pos2.y)
glVertex3f(pos4.x, -pos4.z, pos4.y)
prev = checkpoint
glEnd()
#glColor3f(1.0, 1.0, 1.0)
#glEnable(GL_TEXTURE_2D)
#glBindTexture(GL_TEXTURE_2D, self.arrow.tex)
glPushMatrix()
#lines = []
if vismenu.checkpoints.is_visible():
for group in self.level_file.checkpoints.groups:
prev = None
for checkpoint in group.points:
if prev is None:
prev = checkpoint
else:
#mid1 = prev.mid
#mid2 = checkpoint.mid
mid1 = (prev.start + prev.end) / 2.0
mid2 = (checkpoint.start + checkpoint.end) / 2.0
self.models.draw_arrow_head(mid1, mid2)
#lines.append((mid1, mid2))
prev = checkpoint
glPopMatrix()
glBegin(GL_LINES)
"""for linestart, lineend in lines:
glVertex3f(linestart.x, -linestart.z, linestart.y)
glVertex3f(lineend.x, -lineend.z, lineend.y)"""
if vismenu.checkpoints.is_visible():
for group in self.level_file.checkpoints.groups:
prev = None
for checkpoint in group.points:
if prev is None:
prev = checkpoint
else:
mid1 = (prev.start+prev.end)/2.0
mid2 = (checkpoint.start+checkpoint.end)/2.0
#mid1 = prev.mid
#mid2 = checkpoint.mid
glVertex3f(mid1.x, -mid1.z, mid1.y)
glVertex3f(mid2.x, -mid2.z, mid2.y)
prev = checkpoint
glEnd()
if vismenu.objects.is_visible():
for object in self.level_file.objects.objects:
self.models.render_generic_position_rotation_colored("objects",
object.position, object.rotation,
object in select_optimize)
if vismenu.kartstartpoints.is_visible():
for object in self.level_file.kartpoints.positions:
self.models.render_generic_position_rotation_colored("startpoints",
object.position, object.rotation,
object in select_optimize)
if vismenu.areas.is_visible():
for object in self.level_file.areas.areas:
self.models.render_generic_position_rotation_colored("areas",
object.position, object.rotation,
object in select_optimize)
if object in select_optimize:
glColor4f(*colors_selection)
else:
glColor4f(*colors_area)
self.models.draw_wireframe_cube(object.position, object.rotation, object.scale*100)
if vismenu.cameras.is_visible():
for object in self.level_file.cameras:
self.models.render_generic_position_rotation_colored("camera",
object.position, object.rotation,
object in select_optimize)
if vismenu.respawnpoints.is_visible():
for object in self.level_file.respawnpoints:
self.models.render_generic_position_rotation_colored("respawn",
object.position, object.rotation,
object in select_optimize)
if self.minimap is not None and self.minimap.is_available() and vismenu.minimap.is_visible():
self.models.render_generic_position(self.minimap.corner1, self.minimap.corner1 in positions)
self.models.render_generic_position(self.minimap.corner2, self.minimap.corner2 in positions)
#glDisable(GL_TEXTURE_2D)
glColor3f(0.0, 0.0, 0.0)
glDisable(GL_TEXTURE_2D)
glColor4f(0.0, 1.0, 0.0, 1.0)
rendered = {}
for p1i, p2i in self.paths.unique_paths:
p1 = self.paths.waypoints[p1i]
p2 = self.paths.waypoints[p2i]
glBegin(GL_LINES)
glVertex3f(p1.position.x, -p1.position.z, p1.position.y+5)
glVertex3f(p2.position.x, -p2.position.z, p2.position.y+5)
glEnd()
if p1i not in rendered:
self.models.draw_sphere(p1.position, p1.radius/2)
rendered[p1i] = True
if p2i not in rendered:
self.models.draw_sphere(p2.position, p2.radius/2)
rendered[p2i] = True
glColor4f(0.0, 1.0, 1.0, 1.0)
"""for points in self.paths.wide_paths:
glBegin(GL_LINE_LOOP)
for p in points:
glVertex3f(p.x, -p.z, p.y + 5)
glEnd()"""
self.gizmo.render_scaled(gizmo_scale, is3d=self.mode == MODE_3D)
glDisable(GL_DEPTH_TEST)
if self.selectionbox_start is not None and self.selectionbox_end is not None:
#print("drawing box")
startx, startz = self.selectionbox_start
endx, endz = self.selectionbox_end
glColor4f(1.0, 0.0, 0.0, 1.0)
glLineWidth(2.0)
glBegin(GL_LINE_LOOP)
glVertex3f(startx, startz, 0)
glVertex3f(startx, endz, 0)
glVertex3f(endx, endz, 0)
glVertex3f(endx, startz, 0)
glEnd()
if self.selectionbox_projected_origin is not None and self.selectionbox_projected_coords is not None:
#print("drawing box")
origin = self.selectionbox_projected_origin
point2, point3, point4 = self.selectionbox_projected_coords
glColor4f(1.0, 0.0, 0.0, 1.0)
glLineWidth(2.0)
point1 = origin
glBegin(GL_LINE_LOOP)
glVertex3f(point1.x, point1.y, point1.z)
glVertex3f(point2.x, point2.y, point2.z)
glVertex3f(point3.x, point3.y, point3.z)
glVertex3f(point4.x, point4.y, point4.z)
glEnd()
glEnable(GL_DEPTH_TEST)
glFinish()
now = default_timer() - start
#print("Frame time:", now, 1/now, "fps")
@catch_exception
def mousePressEvent(self, event):
self.usercontrol.handle_press(event)
@catch_exception
def mouseMoveEvent(self, event):
self.usercontrol.handle_move(event)
@catch_exception
def mouseReleaseEvent(self, event):
self.usercontrol.handle_release(event)
def wheelEvent(self, event):
wheel_delta = event.angleDelta().y()
if self.editorconfig is not None:
invert = self.editorconfig.getboolean("invertzoom")
if invert:
wheel_delta = -1*wheel_delta
if wheel_delta < 0:
self.zoom_out()
elif wheel_delta > 0:
self.zoom_in()
def zoom_in(self):
current = self.zoom_factor
fac = calc_zoom_out_factor(current)
self.zoom(fac)
def zoom_out(self):
current = self.zoom_factor
fac = calc_zoom_in_factor(current)
self.zoom(fac)
def create_ray_from_mouseclick(self, mousex, mousey, yisup=False):
self.camera_direction.normalize()
height = self.canvas_height
width = self.canvas_width
view = self.camera_direction.copy()
h = view.cross(Vector3(0, 0, 1))
v = h.cross(view)
h.normalize()
v.normalize()
rad = 75 * pi / 180.0
vLength = tan(rad / 2) * 1.0
hLength = vLength * (width / height)
v *= vLength
h *= hLength
x = mousex - width / 2
y = height - mousey- height / 2
x /= (width / 2)
y /= (height / 2)
camerapos = Vector3(self.offset_x, self.offset_z, self.camera_height)
pos = camerapos + view * 1.0 + h * x + v * y
dir = pos - camerapos
if yisup:
tmp = pos.y
pos.y = -pos.z
pos.z = tmp
tmp = dir.y
dir.y = -dir.z
dir.z = tmp
return Line(pos, dir)
class ObjectViewSelectionToggle(object):
def __init__(self, name, menuparent):
self.name = name
self.menuparent = menuparent
self.action_view_toggle = QAction("{0} visible".format(name), menuparent)
self.action_select_toggle = QAction("{0} selectable".format(name), menuparent)
self.action_view_toggle.setCheckable(True)
self.action_view_toggle.setChecked(True)
self.action_select_toggle.setCheckable(True)
self.action_select_toggle.setChecked(True)
self.action_view_toggle.triggered.connect(self.handle_view_toggle)
self.action_select_toggle.triggered.connect(self.handle_select_toggle)
menuparent.addAction(self.action_view_toggle)
menuparent.addAction(self.action_select_toggle)
def handle_view_toggle(self, val):
if not val:
self.action_select_toggle.setChecked(False)
else:
self.action_select_toggle.setChecked(True)
def handle_select_toggle(self, val):
if val:
self.action_view_toggle.setChecked(True)
def is_visible(self):
return self.action_view_toggle.isChecked()
def is_selectable(self):
return self.action_select_toggle.isChecked()
class FilterViewMenu(QMenu):
filter_update = pyqtSignal()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setTitle("Filter View")
self.show_all = QAction("Show All", self)
self.show_all.triggered.connect(self.handle_show_all)
self.addAction(self.show_all)
self.hide_all = QAction("Hide All", self)
self.hide_all.triggered.connect(self.handle_hide_all)
self.addAction(self.hide_all)
self.enemyroute = ObjectViewSelectionToggle("Enemy Routes", self)
self.itemroutes = ObjectViewSelectionToggle("Object Routes", self)
self.checkpoints = ObjectViewSelectionToggle("Checkpoints", self)
self.objects = ObjectViewSelectionToggle("Objects", self)
self.areas = ObjectViewSelectionToggle("Areas", self)
self.cameras = ObjectViewSelectionToggle("Cameras", self)
self.respawnpoints = ObjectViewSelectionToggle("Respawn Points", self)
self.kartstartpoints = ObjectViewSelectionToggle("Kart Start Points", self)
self.minimap = ObjectViewSelectionToggle("Minimap", self)
for action in (self.enemyroute, self.itemroutes, self.checkpoints, self.objects,
self.areas, self.cameras, self.respawnpoints, self.kartstartpoints,
self.minimap):
action.action_view_toggle.triggered.connect(self.emit_update)
action.action_select_toggle.triggered.connect(self.emit_update)
def handle_show_all(self):
for action in (self.enemyroute, self.itemroutes, self.checkpoints, self.objects,
self.areas, self.cameras, self.respawnpoints, self.kartstartpoints,
self.minimap):
action.action_view_toggle.setChecked(True)
action.action_select_toggle.setChecked(True)
self.filter_update.emit()
def handle_hide_all(self):
for action in (self.enemyroute, self.itemroutes, self.checkpoints, self.objects,
self.areas, self.cameras, self.respawnpoints, self.kartstartpoints,
self.minimap):
action.action_view_toggle.setChecked(False)
action.action_select_toggle.setChecked(False)
self.filter_update.emit()
def emit_update(self, val):
self.filter_update.emit()
def mouseReleaseEvent(self, e):
try:
action = self.activeAction()
if action and action.isEnabled():
action.trigger()
else:
QMenu.mouseReleaseEvent(self, e)
except:
traceback.print_exc()
| StarcoderdataPython |
9653410 | import os
from rich.logging import RichHandler
def configure_logger() -> None:
"""
Configure the loguru configuration with Rich.
:return:
"""
if "LOGURU_LEVEL" not in os.environ:
os.environ["LOGURU_LEVEL"] = "INFO"
from loguru import logger
logger.configure(
handlers=[{"sink": RichHandler(markup=True), "format": "{message}"}]
)
configure_logger()
| StarcoderdataPython |
4968902 | import json
from redata import settings
from redata.grafana.utils import load_json_data, update_home_panel_element
from grafana_api.grafana_face import GrafanaFace
from redata.grafana.panels.base import HomeLastDayTraffic, HomeLastModifiedTime, HomeAlerts
import math
def load_json_data(file_name):
with open(file_name) as json_file:
data = json.load(json_file)
return data
def generate_overrides(dashboards):
override_list = []
for dashboard in dashboards:
override = load_json_data(settings.HOME_OVERRIDES_LOCATION)
override['clickThrough'] = dashboard['dashboard']['url']
override['metricName'] = dashboard['table'].table_name
override['label'] = dashboard['table'].table_name
override_list.append(
override
)
return override_list
def get_best_column_count(dashboards_num):
if dashboards_num <= 10:
return dashboards_num
return int(math.sqrt(dashboards_num * 4))
def create_home_dashboard(grafana_api, dashboards):
home_data = load_json_data(settings.HOME_DASHBOARD_LOCATION)
panels = home_data['panels']
for panel in panels:
if panel['title'] in ['new_records_created (in last 24h)', 'time_since_last_record_created']:
panel['savedOverrides'] = generate_overrides(dashboards)
# native polystat logic for column/row auto scalling works strange
panel['polystat']['columns'] = get_best_column_count(len(dashboards))
if panel['title'] == 'new_records_created (in last 24h)':
update_home_panel_element(panel, HomeLastDayTraffic)
if panel['title'] == 'time_since_last_record_created':
update_home_panel_element(panel, HomeLastModifiedTime)
if panel['title'] == 'recent_alerts':
update_home_panel_element(panel, HomeAlerts)
response = grafana_api.dashboard.update_dashboard(
dashboard={
'dashboard': home_data,
'folderID': 0,
'overwrite': True
}
)
print (f"Dashboard for home generated:", response)
return response
| StarcoderdataPython |
6432867 | <filename>Python/leetcode.055.jump-game.py
class Solution(object):
def canJump(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
x = len(nums)
if x == 0: return False
i = 0
rightb = 0
while i <= rightb:
rightb = max(rightb, nums[i]+i)
i += 1
if rightb >= len(nums)- 1:
return True
return False | StarcoderdataPython |
11344718 | <reponame>peppocola/Screening-COVID19
import os
import re
import argparse
import pandas as pd
import numpy as np
from tqdm import tqdm
from PIL import Image as pil
if __name__ == '__main__':
# Usage example:
# python covidx/ct/seq_preprocessing.py /hdd/Datasets/covidx-ct/train_COVIDx_CT-2A.txt /hdd/Datasets/covidx-ct/val_COVIDx_CT-2A.txt \
# /hdd/Datasets/covidx-ct/test_COVIDx_CT-2A.txt --src-path /hdd/Datasets/covidx-ct/2A_images --size 224 224 --ct-length 16 --dest-path datasets/covidx-seqct
# Instantiate the command line arguments parser
parser = argparse.ArgumentParser(description='CT Image dataset preprocessor')
parser.add_argument(
'train_labels', type=str, help='The train labels text filepath.'
)
parser.add_argument(
'valid_labels', type=str, help='The validation labels text filepath.'
)
parser.add_argument(
'test_labels', type=str, help='The test labels text filepath.'
)
parser.add_argument(
'--src-path', type=str, default='.', help='The input dataset path.'
)
parser.add_argument(
'--size', nargs=2, type=int, default=(224, 224), help='The size of the output images.'
)
parser.add_argument(
'--ct-length', type=int, default=16, help='The fixed length of a CT scan.'
)
parser.add_argument(
'--dest-path', type=str, default='.', help='The output dataset path.'
)
args = parser.parse_args()
# Set the random seed
np.random.seed(42)
# Create the images directories
train_images_path = os.path.join(args.dest_path, 'train')
if not os.path.isdir(train_images_path):
os.makedirs(train_images_path)
valid_images_path = os.path.join(args.dest_path, 'valid')
if not os.path.isdir(valid_images_path):
os.makedirs(valid_images_path)
test_images_path = os.path.join(args.dest_path, 'test')
if not os.path.isdir(test_images_path):
os.makedirs(test_images_path)
# Load the labels CSVs
labels_column_names = ['filename', 'class', 'xmin', 'ymin', 'xmax', 'ymax']
train_labels_filename, _ = os.path.splitext(os.path.basename(args.train_labels))
valid_labels_filename, _ = os.path.splitext(os.path.basename(args.valid_labels))
test_labels_filename, _ = os.path.splitext(os.path.basename(args.test_labels))
dataset_labels = {
'train': pd.read_csv(args.train_labels, sep=' ', names=labels_column_names),
'valid': pd.read_csv(args.valid_labels, sep=' ', names=labels_column_names),
'test': pd.read_csv(args.test_labels, sep=' ', names=labels_column_names)
}
# Initialize the preprocessed output CSV
out_labels_column_names = ['filename', 'class']
out_dataset_labels = {
'train': pd.DataFrame(columns=out_labels_column_names),
'valid': pd.DataFrame(columns=out_labels_column_names),
'test': pd.DataFrame(columns=out_labels_column_names)
}
# Process rows
for dataset in ['train', 'valid', 'test']:
ct_infos = []
prev_ct_id = None
df = dataset_labels[dataset]
tk = tqdm(total=len(df))
idx = 0
while idx < len(df):
sample = df.iloc[idx]
filename = sample['filename']
target = sample['class']
box = (sample['xmin'], sample['ymin'], sample['xmax'], sample['ymax'])
result = re.search('^(.+)[_-]([0-9]+).png$', filename)
if result is None:
result = re.search('^(.+)[_-]IM([0-9]+).png$', filename)
assert result is not None, 'Regex mismatch - {}'.format(filename)
ct_id = result.group(1)
if prev_ct_id is None:
prev_ct_id = ct_id
ct_infos.clear()
if prev_ct_id == ct_id:
filepath = os.path.join(args.src_path, filename)
ct_infos.append((filepath, box, target))
idx += 1
tk.update()
tk.refresh()
continue
ct_id = prev_ct_id
prev_ct_id = None
num_ct_images = len(ct_infos)
if num_ct_images < args.ct_length:
continue
ct_slices = []
for img_filepath, img_box, img_target in ct_infos:
with pil.open(img_filepath) as img:
img = img.convert(mode='L').crop(img_box).resize(args.size, resample=pil.BICUBIC)
ct_slices.append((img, img_target))
out_filename = '{}.tiff'.format(ct_id)
out_filepath = os.path.join(args.dest_path, dataset, out_filename)
images, targets = zip(*ct_slices)
assert len(set(targets)) == 1, 'Targets mismatch - {} : {}'.format(ct_id, targets)
ct_class = targets[0]
step_size = num_ct_images // args.ct_length
sample_indices = np.arange(0, num_ct_images, step=step_size)
mask = np.random.choice(np.arange(len(sample_indices)), size=args.ct_length, replace=False)
sample_indices = sample_indices[mask]
sample_indices = list(sorted(sample_indices))
filtered_images = []
for i in sample_indices:
filtered_images.append(images[i])
filtered_images[0].save(out_filepath, append_images=filtered_images[1:], save_all=True)
out_dataset_labels[dataset] = out_dataset_labels[dataset].append({
'filename': out_filename,
'class': ct_class
}, ignore_index=True)
tk.close()
out_df_filepath = os.path.join(args.dest_path, '{}.csv'.format(dataset))
out_dataset_labels[dataset].to_csv(out_df_filepath, index=False)
| StarcoderdataPython |
12851459 | <reponame>mbodenhamer/yatr
__version__ = '0.0.11b'
from .base import *
from .context import *
from .task import *
from .env import *
from .parse import *
| StarcoderdataPython |
5002450 | <reponame>lucashenrs/acelerapython-codenation<filename>semana-3/test_main.py
from main import get_temperature
import pytest
from unittest.mock import patch
temp_imputs = [
(62, -14.235004, -51.92528, 16),
]
@patch('main.requests.get')
@pytest.mark.parametrize("temp_farenheit_input, lat, lng, \
celsius_expected_output", temp_imputs)
def test_get_temperature_by_lat_lng(mock_get, temp_farenheit_input,
lat, lng, celsius_expected_output):
mock_get.return_value.json.return_value = {
"currently": {
"temperature": temp_farenheit_input
}
}
response = get_temperature(lat, lng)
assert response == celsius_expected_output
| StarcoderdataPython |
4964035 | import functools
import heapq
__all__ = ['iterunion']
def iterunion(concepts, sortkey, next_concepts):
heap = [(sortkey(c), c) for c in concepts]
heapq.heapify(heap)
push = functools.partial(heapq.heappush, heap)
pop = functools.partial(heapq.heappop, heap)
seen = -1
while heap:
index, concept = pop()
# requires sortkey to be an extension of the lattice order
# (a toplogical sort of it) in the direction of next_concepts
# assert index >= seen
if index > seen:
seen = index
yield concept
for c in next_concepts(concept):
push((sortkey(c), c))
| StarcoderdataPython |
3297842 | # Copyright (c) 2021 OpenKS Authors, DCD Research Lab, Zhejiang University.
# All Rights Reserved.
import logging
import torch
import torch.nn as nn
from ...model import TorchModel
logger = logging.getLogger(__name__)
@TorchModel.register("question-embedding", "PyTorch")
class QuestionEmbedding(TorchModel):
def __init__(self, **kwargs):
super(QuestionEmbedding, self).__init__()
self.config = kwargs
target_size = self.config['label']
self.embed = nn.Embedding(self.config['words_num'], self.config['words_dim'])
if self.config['train_embed'] == False:
self.embed.weight.requires_grad = False
if self.config['qa_mode'] == 'LSTM':
self.lstm = nn.LSTM(input_size=self.config['words_num'],
hidden_size=self.config['hidden_size'],
num_layers=self.config['num_layer'],
dropout=self.config['rnn_dropout'],
bidirectional=True)
elif self.config['qa_mode'] == 'GRU':
self.gru = nn.GRU(input_size=self.config['words_num'],
hidden_size=self.config['hidden_size'],
num_layers=self.config['num_layer'],
dropout=self.config['rnn_dropout'],
bidirectional=True)
self.dropout = nn.Dropout(p=self.config['rnn_fc_dropout'])
self.nonlinear = nn.Tanh()
self.hidden2tag = nn.Sequential(
nn.Linear(self.config['hidden_size'] * 2, self.config['hidden_size'] * 2),
nn.BatchNorm1d(self.config['hidden_size'] * 2),
self.nonlinear,
self.dropout,
nn.Linear(self.config['hidden_size'] * 2, target_size)
)
def loss(self, scores, batch):
loss_func = nn.MSELoss()
return loss_func(scores, batch)
def forward(self, x):
text = x.text
embed = x.embed
x = self.embed(text)
num_word, batch_size, words_dim = x.size()
if self.config['entity_detection_mode'] == 'LSTM':
outputs, (ht, ct) = self.lstm(x)
elif self.config['entity_detection_mode'] == 'GRU':
outputs, ht = self.gru(x)
else:
print("Wrong Entity Prediction Mode")
exit(1)
outputs = outputs.view(-1, outputs.size(2))
tags = self.hidden2tag(outputs).view(num_word, batch_size, -1)
scores = nn.functional.normalize(torch.mean(tags, dim=0), dim=1)
return self.loss(scores, x.embed), scores
| StarcoderdataPython |
8158100 | <reponame>m-m-git/transparentwindow
from ctypes import POINTER
from ctypes import WINFUNCTYPE
from ctypes import c_int
from ctypes import c_int64
from ctypes import c_void_p
from ctypes import windll
from ctypes.wintypes import ATOM
from ctypes.wintypes import BOOL
from ctypes.wintypes import DWORD
from ctypes.wintypes import HANDLE
from ctypes.wintypes import HBITMAP
from ctypes.wintypes import HDC
from ctypes.wintypes import HGDIOBJ
from ctypes.wintypes import HINSTANCE
from ctypes.wintypes import HMENU
from ctypes.wintypes import HMODULE
from ctypes.wintypes import HWND
from ctypes.wintypes import LPARAM
from ctypes.wintypes import LPCWSTR
from ctypes.wintypes import LPPOINT
from ctypes.wintypes import LPRECT
from ctypes.wintypes import RECT
from ctypes.wintypes import UINT
from ctypes.wintypes import WPARAM
INT_PTR = c_int64
DLGPROC = WINFUNCTYPE(INT_PTR, HWND, UINT, WPARAM, LPARAM)
# Window Styles
WS_OVERLAPPED = 0x00000000
WS_POPUP = 0x80000000
WS_CHILD = 0x40000000
WS_MINIMIZE = 0x20000000
WS_VISIBLE = 0x10000000
WS_DISABLED = 0x08000000
WS_CLIPSIBLINGS = 0x04000000
WS_CLIPCHILDREN = 0x02000000
WS_MAXIMIZE = 0x01000000
WS_CAPTION = 0x00C00000
WS_BORDER = 0x00800000
WS_DLGFRAME = 0x00400000
WS_VSCROLL = 0x00200000
WS_HSCROLL = 0x00100000
WS_SYSMENU = 0x00080000
WS_THICKFRAME = 0x00040000
WS_GROUP = 0x00020000
WS_TABSTOP = 0x00010000
WS_MINIMIZEBOX = 0x00020000
WS_MAXIMIZEBOX = 0x00010000
WS_TILED = WS_OVERLAPPED
WS_ICONIC = WS_MINIMIZE
WS_SIZEBOX = WS_THICKFRAME
# Extended Window Styles
WS_EX_DLGMODALFRAME = 0x00000001
WS_EX_NOPARENTNOTIFY = 0x00000004
WS_EX_TOPMOST = 0x00000008
WS_EX_ACCEPTFILES = 0x00000010
WS_EX_TRANSPARENT = 0x00000020
WS_EX_MDICHILD = 0x00000040
WS_EX_TOOLWINDOW = 0x00000080
WS_EX_WINDOWEDGE = 0x00000100
WS_EX_CLIENTEDGE = 0x00000200
WS_EX_CONTEXTHELP = 0x00000400
WS_EX_RIGHT = 0x00001000
WS_EX_LEFT = 0x00000000
WS_EX_RTLREADING = 0x00002000
WS_EX_LTRREADING = 0x00000000
WS_EX_LEFTSCROLLBAR = 0x00004000
WS_EX_RIGHTSCROLLBAR = 0x00000000
WS_EX_CONTROLPARENT = 0x00010000
WS_EX_STATICEDGE = 0x00020000
WS_EX_APPWINDOW = 0x00040000
WS_EX_LAYERED = 0x00080000
WS_EX_NOINHERITLAYOUT = 0x00100000
WS_EX_NOREDIRECTIONBITMAP = 0x00200000
WS_EX_LAYOUTRTL = 0x00400000
WS_EX_COMPOSITED = 0x02000000
WS_EX_NOACTIVATE = 0x08000000
WS_EX_PALETTEWINDOW = WS_EX_WINDOWEDGE | WS_EX_TOOLWINDOW | WS_EX_TOPMOST
# Window Messages
WM_CLOSE = 0x0010
WM_CONTEXTMENU = 0x007B
WM_KEYDOWN = 0x0100
WM_INITDIALOG = 0x0110
WM_COMMAND = 0x0111
# Dialog Styles
DS_SETFONT = 0x0040
DS_MODALFRAME = 0x0080
DS_CENTER = 0x0800
DS_CENTERMOUSE = 0x1000
# Dialog Box Command IDs
IDOK = 1
IDCANCEL = 2
IDABORT = 3
IDRETRY = 4
IDIGNORE = 5
IDYES = 6
IDNO = 7
IDCLOSE = 8
IDHELP = 9
IDTRYAGAIN = 10
IDCONTINUE = 11
# Static Control Constants
SS_LEFT = 0x00000000
SS_CENTER = 0x00000001
SS_RIGHT = 0x00000002
SS_ICON = 0x00000003
SS_BLACKRECT = 0x00000004
SS_GRAYRECT = 0x00000005
SS_WHITERECT = 0x00000006
SS_BLACKFRAME = 0x00000007
SS_GRAYFRAME = 0x00000008
SS_WHITEFRAME = 0x00000009
SS_USERITEM = 0x0000000A
SS_SIMPLE = 0x0000000B
SS_LEFTNOWORDWRAP = 0x0000000C
SS_OWNERDRAW = 0x0000000D
SS_BITMAP = 0x0000000E
SS_ENHMETAFILE = 0x0000000F
SS_ETCHEDHORZ = 0x00000010
SS_ETCHEDVERT = 0x00000011
SS_ETCHEDFRAME = 0x00000012
SS_TYPEMASK = 0x0000001F
SS_REALSIZECONTROL = 0x00000040
SS_NOPREFIX = 0x00000080
SS_NOTIFY = 0x00000100
SS_CENTERIMAGE = 0x00000200
SS_RIGHTJUST = 0x00000400
SS_REALSIZEIMAGE = 0x00000800
SS_SUNKEN = 0x00001000
SS_EDITCONTROL = 0x00002000
SS_ENDELLIPSIS = 0x00004000
SS_PATHELLIPSIS = 0x00008000
SS_WORDELLIPSIS = 0x0000C000
SS_ELLIPSISMASK = 0x0000C000
# Edit Control Styles
ES_LEFT = 0x0000
ES_CENTER = 0x0001
ES_RIGHT = 0x0002
ES_MULTILINE = 0x0004
ES_UPPERCASE = 0x0008
ES_LOWERCASE = 0x0010
ES_PASSWORD = <PASSWORD>
ES_AUTOVSCROLL = 0x0040
ES_AUTOHSCROLL = 0x0080
ES_NOHIDESEL = 0x0100
ES_OEMCONVERT = 0x0400
ES_READONLY = 0x0800
ES_WANTRETURN = 0x1000
ES_NUMBER = 0x2000
# Menu flags
MF_POPUP = 0x00000010
MF_HILITE = 0x00000080
MF_SEPARATOR = 0x00000800
# SetWindowPos Flags
SWP_NOSIZE = 0x0001
SWP_NOMOVE = 0x0002
SWP_NOZORDER = 0x0004
SWP_NOREDRAW = 0x0008
SWP_NOACTIVATE = 0x0010
SWP_FRAMECHANGED = 0x0020
SWP_SHOWWINDOW = 0x0040
SWP_HIDEWINDOW = 0x0080
SWP_NOCOPYBITS = 0x0100
SWP_NOOWNERZORDER = 0x0200
SWP_NOSENDCHANGING = 0x0400
SWP_DEFERERASE = 0x2000
SWP_ASYNCWINDOWPOS = 0x4000
# DPI_AWARENESS_CONTEXT handle
DPI_AWARENESS_CONTEXT_UNAWARE = -1
DPI_AWARENESS_CONTEXT_SYSTEM_AWARE = -2
DPI_AWARENESS_CONTEXT_PER_MONITOR_AWARE = -3
DPI_AWARENESS_CONTEXT_PER_MONITOR_AWARE_V2 = -4
DPI_AWARENESS_CONTEXT_UNAWARE_GDISCALED = -5
_AdjustWindowRectExForDpi = windll.user32.AdjustWindowRectExForDpi
_AdjustWindowRectExForDpi.argtypes = (LPRECT, DWORD, BOOL, DWORD, UINT)
_AdjustWindowRectExForDpi.restype = BOOL
_BitBlt = windll.gdi32.BitBlt
_BitBlt.argtypes = (HDC, c_int, c_int, c_int, c_int, HDC, c_int, c_int, DWORD)
_BitBlt.restype = BOOL
_ClientToScreen = windll.user32.ClientToScreen
_ClientToScreen.argtypes = (HWND, LPPOINT)
_ClientToScreen.restype = BOOL
_CreateCompatibleDC = windll.gdi32.CreateCompatibleDC
_CreateCompatibleDC.argtypes = (HDC,)
_CreateCompatibleDC.restype = HDC
_CreateDIBSection = windll.gdi32.CreateDIBSection
_CreateDIBSection.argtypes = (HDC, c_void_p, UINT, c_void_p, HANDLE, DWORD)
_CreateDIBSection.restype = HBITMAP
_DeleteDC = windll.gdi32.DeleteDC
_DeleteDC.argtypes = (HDC,)
_DeleteDC.restype = BOOL
_DeleteObject = windll.gdi32.DeleteObject
_DeleteObject.argtypes = (HGDIOBJ,)
_DeleteObject.restype = BOOL
_DestroyMenu = windll.user32.DestroyMenu
_DestroyMenu.argtypes = (HMENU,)
_DestroyMenu.restype = BOOL
_DialogBoxIndirectParamW = windll.user32.DialogBoxIndirectParamW
_DialogBoxIndirectParamW.argtypes = (HINSTANCE, c_void_p, HWND, c_void_p, LPARAM)
_DialogBoxIndirectParamW.restype = INT_PTR
_EndDialog = windll.user32.EndDialog
_EndDialog.argtypes = (HWND, INT_PTR)
_EndDialog.restype = BOOL
_FindWindowW = windll.user32.FindWindowW
_FindWindowW.argtypes = (LPCWSTR, LPCWSTR)
_FindWindowW.restype = HWND
_GetClassInfoExW = windll.user32.GetClassInfoExW
_GetClassInfoExW.argtypes = (HINSTANCE, LPCWSTR, c_void_p)
_GetClassInfoExW.restype = BOOL
_GetClientRect = windll.user32.GetClientRect
_GetClientRect.argtypes = (HWND, LPRECT)
_GetClientRect.restype = BOOL
_GetDC = windll.user32.GetDC
_GetDC.argtypes = (HWND,)
_GetDC.restype = HDC
_GetDesktopWindow = windll.user32.GetDesktopWindow
_GetDesktopWindow.restype = HWND
_GetDlgItem = windll.user32.GetDlgItem
_GetDlgItem.argtypes = (HWND, c_int)
_GetDlgItem.restype = HWND
_GetDlgItemInt = windll.user32.GetDlgItemInt
_GetDlgItemInt.argtypes = (HWND, c_int, POINTER(BOOL), BOOL)
_GetDlgItemInt.restype = UINT
_GetDpiForWindow = windll.user32.GetDpiForWindow
_GetDpiForWindow.argtypes = (HWND,)
_GetDpiForWindow.restype = UINT
_GetModuleHandleW = windll.kernel32.GetModuleHandleW
_GetModuleHandleW.argtypes = (LPCWSTR,)
_GetModuleHandleW.restype = HMODULE
_GetParent = windll.user32.GetParent
_GetParent.argtypes = (HWND,)
_GetParent.restype = HWND
_GetSubMenu = windll.user32.GetSubMenu
_GetSubMenu.argtypes = (HMENU, c_int)
_GetSubMenu.restype = HMENU
_GetWindowDC = windll.user32.GetWindowDC
_GetWindowDC.argtypes = (HWND,)
_GetWindowDC.restype = HDC
_GetWindowRect = windll.user32.GetWindowRect
_GetWindowRect.argtypes = (HWND, LPRECT)
_GetWindowRect.restype = BOOL
_LoadMenuIndirectW = windll.user32.LoadMenuIndirectW
_LoadMenuIndirectW.argtypes = (c_void_p,)
_LoadMenuIndirectW.restype = HMENU
_RegisterClassExW = windll.user32.RegisterClassExW
_RegisterClassExW.argtypes = (c_void_p,)
_RegisterClassExW.restype = ATOM
_ReleaseDC = windll.user32.ReleaseDC
_ReleaseDC.argtypes = (HWND, HDC)
_ReleaseDC.restype = c_int
_ScreenToClient = windll.user32.ScreenToClient
_ScreenToClient.argtypes = (HWND, LPPOINT)
_ScreenToClient.restype = BOOL
_SelectObject = windll.gdi32.SelectObject
_SelectObject.argtypes = (HDC, HGDIOBJ)
_SelectObject.restype = HGDIOBJ
_SetDlgItemInt = windll.user32.SetDlgItemInt
_SetDlgItemInt.argtypes = (HWND, c_int, UINT, BOOL)
_SetDlgItemInt.restype = BOOL
_SetFocus = windll.user32.SetFocus
_SetFocus.argtypes = (HWND,)
_SetFocus.restype = HWND
_SetThreadDpiAwarenessContext = windll.user32.SetThreadDpiAwarenessContext
_SetThreadDpiAwarenessContext.argtypes = (c_void_p,)
_SetThreadDpiAwarenessContext.restype = c_void_p
_SetWindowPos = windll.user32.SetWindowPos
_SetWindowPos.argtypes = (HWND, HWND, c_int, c_int, c_int, c_int, UINT)
_SetWindowPos.restype = BOOL
_SetWindowTextW = windll.user32.SetWindowTextW
_SetWindowTextW.argtypes = (HWND, LPCWSTR)
_SetWindowTextW.restype = BOOL
_TrackPopupMenuEx = windll.user32.TrackPopupMenuEx
_TrackPopupMenuEx.argtypes = (HMENU, UINT, c_int, c_int, HWND, POINTER(RECT))
_TrackPopupMenuEx.restype = BOOL
| StarcoderdataPython |
9725177 | WHITESPACE_STR = ' \t\n\r'
def parse_array(s, _w=WHITESPACE_STR, _sep=","):
array = None
stack = []
accumulator = ""
closed_flag = False
sep_flag = False
whitespace_flag = False
started_flag = False
for ch in s:
if ch in _w:
whitespace_flag = True
continue
if ch == "[":
if started_flag and not stack:
raise ValueError("Wrong string.")
if closed_flag or accumulator:
raise ValueError
in_array = []
if stack:
stack[-1](in_array)
else:
array = in_array
started_flag = True
stack.append(in_array.append)
elif not started_flag:
raise ValueError("Wrong string.")
elif ch == "]":
if not stack:
raise ValueError("Wrong string.")
if accumulator:
stack[-1](int(accumulator))
accumulator = ""
stack.pop()
closed_flag = True
sep_flag = False
whitespace_flag = False
elif ch in _sep:
if accumulator:
stack[-1](int(accumulator))
accumulator = ""
elif closed_flag:
pass
else:
raise ValueError("Wrong string.")
sep_flag = True
closed_flag = False
whitespace_flag = False
else:
if whitespace_flag and accumulator or closed_flag:
raise ValueError
accumulator += ch
whitespace_flag = False
if not closed_flag:
raise ValueError("Wrong string")
if stack:
raise ValueError("Wrong string")
if not array is None:
return array
else:
raise ValueError("Wrong string") | StarcoderdataPython |
6551255 | <reponame>dkuspawono/cloudmarker<filename>cloudmarker/test/test_main.py
"""Tests for package execution."""
import unittest
from unittest import mock
class MainTest(unittest.TestCase):
"""Tests for package execution."""
@mock.patch('sys.argv', ['cloudmarker', '-c', '-n'])
def test_main(self):
# Run cloudmarker package with only the default base
# configuration and ensure that it runs without issues.
import cloudmarker.__main__
self.assertEqual(type(cloudmarker.__main__).__name__, 'module')
| StarcoderdataPython |
3413981 | <reponame>jerenner/stempy
import sys
from pathlib import Path
import click
import stempy.io as stio
import stempy.image as stim
@click.command()
@click.option('-i', '--input-path', help='HDF5 file containing the electron counts',
type=click.Path(exists=True, dir_okay=False), default=None, show_default=True)
@click.argument('scan-num', required=False)
def main(input_path, scan_num):
if input_path is None:
if scan_num is None:
raise click.ClickException('Please provide scan number')
input_path = Path(f'/mnt/hdd1/data_scan{scan_num}_th4_electrons.h5')
# Load the electron counted data
ee = stio.load_electron_counts(str(input_path))
# Create STEM images with inner and outer radii
ims = stim.create_stem_images(ee, (0, 0, 110, 220),
(110, 220, 240, 288),
center=(307, 282))
# Calculate summed diffraction pattern
dp = stim.calculate_sum_sparse(ee.data, ee.frame_dimensions)
if __name__ == '__main__':
main() | StarcoderdataPython |
6691246 | from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
class UserManager(BaseUserManager):
def create_user(self, username, password):
if not username:
raise ValueError('User must have a username')
user = self.model(username=username)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, username, password):
user = self.create_user(username=username,
password=password)
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser):
username = models.CharField(max_length=20, unique=True)
password = models.TextField(null=False)
is_superuser = models.BooleanField(default=False)
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['password',]
objects = UserManager()
| StarcoderdataPython |
6425222 | <filename>my_games/snake/lib/model/snake.py<gh_stars>0
from lib.model.game_model_list import GameModelList
# game snake for holding block game objects
# - manages block color draw
# - manages block collidable property
# - manages additions and removals of game blocks (simulate snake movement)
class Snake(GameModelList):
def __init__(
self,
game,
game_models=[],
collidable=False,
block_color=None
):
super().__init__(
game=game,
game_models=game_models,
collidable=collidable,
block_color=block_color
)
self.fed_countdown = self.game.config.SNAKE_FED_DURATION
for block in self.game_models:
block.set_color(self.block_color)
block.draw()
self.head = {
'model': game_models[0],
'next': None,
'prev': None
}
last = self.head
for index, game_model in enumerate(game_models):
if index > 0:
new = {
'model': game_model,
'next': None,
'prev': last
}
last['next'] = new
last = last['next']
if index == len(game_models) - 1:
self.tail = new
def get_head(self):
return self.head['model']
def get_tail(self):
return self.tail['model']
def append_head(self, head_model):
new_head = {
'model': head_model,
'next': self.head,
'prev': None
}
self.head['prev'] = new_head
self.head = new_head
self.add_game_model(game_model=head_model)
head_model.set_color(self.block_color)
head_model.draw()
head_model.update()
def pop_tail(self):
new_tail = self.tail['prev']
old_tail = self.tail
old_tail['prev'] = None
self.tail = new_tail
self.remove_game_model(old_tail['model'])
return old_tail['model']
def move_snake(self, new_head):
self.append_head(new_head)
if self.fed_countdown > 0:
self.fed_countdown -= 1
return None
else:
return self.pop_tail()
def feed(self):
self.fed_countdown = self.game.config.SNAKE_FED_DURATION
| StarcoderdataPython |
1977161 | <filename>madelon/main.py<gh_stars>10-100
# python spearmint_sync.py --method=GPEIOptChooser madelon_sofia_vw_rbf
import sys, subprocess, re, os
from math import exp
def get_validation_loss( data ):
pattern = 'error: ([0-9.]+)'
matches = re.search( pattern, data )
validation_loss = float( matches.group( 1 ))
return validation_loss
def get_validation_acc( data ):
pattern = 'accuracy: ([0-9.]+)'
matches = re.search( pattern, data )
acc = float( matches.group( 1 ))
return acc
def run_test( params ):
#debug_o = open( 'debug', 'wb' )
#print >> debug_o, params
centers = params["centers"][0]
rbf_param = params["rbf_param"][0]
#print >> debug_o, parameters
# find centers
# dimensionality w/label (+1)
cmd = "sofia-kmeans --k %s --init_type optimized_kmeans_pp --opt_type mini_batch_kmeans --mini_batch_size 100 --iterations 500 --objective_after_init --objective_after_training --training_file data/all.libsvm.txt --model_out data/model_sofia --dimensionality 13" % ( centers )
os.system( cmd )
# map train
cmd = "sofia-kmeans --model_in data/model_sofia --test_file data/train.libsvm.txt --cluster_mapping_out data/mapped_train.libsvm.txt --cluster_mapping_type rbf_kernel --cluster_mapping_param %s" % ( rbf_param )
os.system( cmd )
# map validation
cmd = "sofia-kmeans --model_in data/model_sofia --test_file data/validation.libsvm.txt --cluster_mapping_out data/mapped_validation.libsvm.txt --cluster_mapping_type rbf_kernel --cluster_mapping_param %s" % ( rbf_param )
os.system( cmd )
# map test
#cmd = "sofia-kmeans --model_in data/model_sofia --test_file data/test.libsvm.txt --cluster_mapping_out data/mapped_test.libsvm.txt"
#os.system( cmd )
###
# train 2 vw
cmd = "python libsvm2vw.py data/mapped_train.libsvm.txt data/mapped_train.vw"
os.system( cmd )
# validation 2 vw
cmd = "python libsvm2vw.py data/mapped_validation.libsvm.txt data/mapped_validation.vw"
os.system( cmd )
###
# train vw
cmd = "vw -d data/mapped_train.vw -f data/model_vw -c -k --passes 100 --loss_function logistic"
os.system( cmd )
# predict vw
cmd = "vw -t -d data/mapped_validation.vw -i data/model_vw -p data/p.txt --loss_function logistic"
os.system( cmd )
# python rmse.py data/sparse_validation.vw data/p.txt
data = subprocess.check_output( ['python', 'auc.py', 'data/mapped_validation.vw', 'data/p.txt' ] )
validation_loss = get_validation_loss( data )
#data = subprocess.check_output( ['python', 'acc.py', 'data/mapped_validation.vw', 'data/p.txt' ] )
#validation_acc = get_validation_acc( data )
print 'error: ', validation_loss
#print 'acc: ', validation_acc
print
return validation_loss
def main( job_id, params ):
print 'Job id:', str( job_id )
print "centers: %s" % ( params['centers'][0] )
print "rbf param: %s" % ( params['rbf_param'][0] )
return run_test( params )
| StarcoderdataPython |
3373279 | import sys
from pysam import VariantFile
def main():
vcfpath = sys.argv[1]
vcf = VariantFile(vcfpath)
print('\n'.join(str(vcf.header).split('\n'))[:-1])
hgnones = {0: 0, 1: 0, 2: 0}
nanones = {0: 0, 1: 0, 2: 0}
for record in vcf.fetch():
hg1, hg2 = record.samples["HG00733"]["GT"]
na1, na2 = record.samples["NA19240"]["GT"]
hg = (hg1 is None) + (hg2 is None)
na = (na1 is None) + (na2 is None)
hgnones[hg] += 1
nanones[na] += 1
if hg1 is None or hg2 is None or na1 is None or na2 is None:
continue
print(record, end='')
print(hgnones, file=sys.stderr)
print(nanones, file=sys.stderr)
if __name__ == "__main__":
main()
| StarcoderdataPython |
1683383 | <reponame>CharleeSF/brian2
"""
<NAME> (2013). Sharpness of spike initiation in neurons explained by compartmentalization.
PLoS Comp Biol, doi: 10.1371/journal.pcbi.1003338.
Fig. 3C-F. Kink with Nav1.6 and Nav1.2
"""
from brian2 import *
from params import *
defaultclock.dt = 0.01*ms
# Morphology
morpho = Soma(50*um) # chosen for a target Rm
morpho.axon = Cylinder(diameter=1*um, length=300*um, n=300)
location16 = 40*um # where Nav1.6 channels are placed
location12 = 15*um # where Nav1.2 channels are placed
va2 = va + 15*mV # depolarized Nav1.2
# Channels
duration = 100*ms
eqs='''
Im = gL * (EL - v) + gNa*m*(ENa - v) + gNa2*m2*(ENa - v) : amp/meter**2
dm/dt = (minf - m) / taum : 1 # simplified Na channel
minf = 1 / (1 + exp((va - v) / ka)) : 1
dm2/dt = (minf2 - m2) / taum : 1 # simplified Na channel, Nav1.2
minf2 = 1/(1 + exp((va2 - v) / ka)) : 1
gNa : siemens/meter**2
gNa2 : siemens/meter**2 # Nav1.2
Iin : amp (point current)
'''
neuron = SpatialNeuron(morphology=morpho, model=eqs, Cm=Cm, Ri=Ri,
method="exponential_euler")
compartment16 = morpho.axon[location16]
compartment12 = morpho.axon[location12]
neuron.v = EL
neuron.gNa[compartment16] = gNa_0/neuron.area[compartment16]
neuron.gNa2[compartment12] = 20*gNa_0/neuron.area[compartment12]
# Monitors
M = StateMonitor(neuron, ['v', 'm', 'm2'], record=True)
run(20*ms, report='text')
neuron.Iin[0] = gL * 20*mV * neuron.area[0]
run(80*ms, report='text')
subplot(221)
plot(M.t/ms, M[0].v/mV, 'r')
plot(M.t/ms, M[compartment16].v/mV, 'k')
plot(M.t/ms, M[compartment16].m*(80+60)-80, 'k--') # open channels
ylim(-80, 60)
xlabel('Time (ms)')
ylabel('V (mV)')
title('Voltage traces')
subplot(222)
plot(M[0].v/mV, M[compartment16].m,'k')
plot(M[0].v/mV, 1 / (1 + exp((va - M[0].v) / ka)), 'k--')
plot(M[0].v/mV, M[compartment12].m2, 'r')
plot(M[0].v/mV, 1 / (1 + exp((va2 - M[0].v) / ka)), 'r--')
xlim(-70, 0)
xlabel('V (mV)')
ylabel('m')
title('Activation curves')
subplot(223)
dm = diff(M[0].v) / defaultclock.dt
dm40 = diff(M[compartment16].v) / defaultclock.dt
plot((M[0].v/mV)[1:], dm/(volt/second), 'r')
plot((M[compartment16].v/mV)[1:], dm40/(volt/second), 'k')
xlim(-80, 40)
xlabel('V (mV)')
ylabel('dV/dt (V/s)')
title('Phase plot')
subplot(224)
plot((M[0].v/mV)[1:], dm/(volt/second), 'r')
plot((M[compartment16].v/mV)[1:], dm40/(volt/second), 'k')
plot((M[0].v/mV)[1:], 10 + 0*dm/(volt/second), 'k--')
xlim(-70, -40)
ylim(0, 20)
xlabel('V (mV)')
ylabel('dV/dt (V/s)')
title('Phase plot(zoom)')
show()
| StarcoderdataPython |
226436 | <filename>Aeneas/aeneas/aeneas/ttswrappers/espeakngttswrapper.py
#!/usr/bin/env python
# coding=utf-8
# aeneas is a Python/C library and a set of tools
# to automagically synchronize audio and text (aka forced alignment)
#
# Copyright (C) 2012-2013, <NAME> (www.albertopettarin.it)
# Copyright (C) 2013-2015, ReadBeyond Srl (www.readbeyond.it)
# Copyright (C) 2015-2017, <NAME> (www.albertopettarin.it)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
This module contains the following classes:
* :class:`~aeneas.ttswrappers.espeakngttswrapper.ESPEAKNGTTSWrapper`,
a wrapper for the ``eSpeak-ng`` TTS engine.
Please refer to
https://github.com/espeak-ng/espeak-ng/
for further details.
"""
from __future__ import absolute_import
from __future__ import print_function
from aeneas.exacttiming import TimeValue
from aeneas.language import Language
from aeneas.runtimeconfiguration import RuntimeConfiguration
from aeneas.ttswrappers.basettswrapper import BaseTTSWrapper
import aeneas.globalfunctions as gf
class ESPEAKNGTTSWrapper(BaseTTSWrapper):
"""
A wrapper for the ``eSpeak-ng`` TTS engine.
This wrapper supports calling the TTS engine
via ``subprocess``.
Future support for calling via Python C extension
is planned.
In abstract terms, it performs one or more calls like ::
$ espeak-ng -v voice_code -w /tmp/output_file.wav < text
To use this TTS engine, specify ::
"tts=espeak-ng"
in the ``RuntimeConfiguration`` object.
To execute from a non-default location: ::
"tts=espeak-ng|tts_path=/path/to/espeak-ng"
See :class:`~aeneas.ttswrappers.basettswrapper.BaseTTSWrapper`
for the available functions.
Below are listed the languages supported by this wrapper.
:param rconf: a runtime configuration
:type rconf: :class:`~aeneas.runtimeconfiguration.RuntimeConfiguration`
:param logger: the logger object
:type logger: :class:`~aeneas.logger.Logger`
"""
AFR = Language.AFR
""" Afrikaans """
AMH = Language.AMH
""" Amharic (not tested) """
ARG = Language.ARG
""" Aragonese (not tested) """
ASM = Language.ASM
""" Assamese (not tested) """
AZE = Language.AZE
""" Azerbaijani (not tested) """
BEN = Language.BEN
""" Bengali (not tested) """
BOS = Language.BOS
""" Bosnian (not tested) """
BUL = Language.BUL
""" Bulgarian """
CAT = Language.CAT
""" Catalan """
CES = Language.CES
""" Czech """
CMN = Language.CMN
""" Mandarin Chinese (not tested) """
CYM = Language.CYM
""" Welsh """
DAN = Language.DAN
""" Danish """
DEU = Language.DEU
""" German """
ELL = Language.ELL
""" Greek (Modern) """
ENG = Language.ENG
""" English """
EPO = Language.EPO
""" Esperanto (not tested) """
EST = Language.EST
""" Estonian """
EUS = "eus"
""" Basque (not tested) """
FAS = Language.FAS
""" Persian """
FIN = Language.FIN
""" Finnish """
FRA = Language.FRA
""" French """
GLA = Language.GLA
""" Scottish Gaelic (not tested) """
GLE = Language.GLE
""" Irish """
GRC = Language.GRC
""" Greek (Ancient) """
GRN = Language.GRN
""" Guarani (not tested) """
GUJ = Language.GUJ
""" Gujarati (not tested) """
HIN = Language.HIN
""" Hindi (not tested) """
HRV = Language.HRV
""" Croatian """
HUN = Language.HUN
""" Hungarian """
HYE = Language.HYE
""" Armenian (not tested) """
INA = Language.INA
""" Interlingua (not tested) """
IND = Language.IND
""" Indonesian (not tested) """
ISL = Language.ISL
""" Icelandic """
ITA = Language.ITA
""" Italian """
JBO = Language.JBO
""" Lojban (not tested) """
KAL = Language.KAL
""" Greenlandic (not tested) """
KAN = Language.KAN
""" Kannada (not tested) """
KAT = Language.KAT
""" Georgian (not tested) """
KIR = Language.KIR
""" Kirghiz (not tested) """
KUR = Language.KUR
""" Kurdish (not tested) """
LAT = Language.LAT
""" Latin """
LAV = Language.LAV
""" Latvian """
LFN = Language.LFN
""" Lingua Franca Nova (not tested) """
LIT = Language.LIT
""" Lithuanian """
MAL = Language.MAL
""" Malayalam (not tested) """
MAR = Language.MAR
""" Marathi (not tested) """
MKD = Language.MKD
""" Macedonian (not tested) """
MLT = Language.MLT
""" Maltese (not tested) """
MSA = Language.MSA
""" Malay (not tested) """
MYA = Language.MYA
""" Burmese (not tested) """
NAH = Language.NAH
""" Nahuatl (not tested) """
NEP = Language.NEP
""" Nepali (not tested) """
NLD = Language.NLD
""" Dutch """
NOR = Language.NOR
""" Norwegian """
ORI = Language.ORI
""" Oriya (not tested) """
ORM = Language.ORM
""" Oromo (not tested) """
PAN = Language.PAN
""" Panjabi (not tested) """
PAP = Language.PAP
""" Papiamento (not tested) """
POL = Language.POL
""" Polish """
POR = Language.POR
""" Portuguese """
RON = Language.RON
""" Romanian """
RUS = Language.RUS
""" Russian """
SIN = Language.SIN
""" Sinhala (not tested) """
SLK = Language.SLK
""" Slovak """
SLV = Language.SLV
""" Slovenian (not tested) """
SPA = Language.SPA
""" Spanish """
SQI = Language.SQI
""" Albanian (not tested) """
SRP = Language.SRP
""" Serbian """
SWA = Language.SWA
""" Swahili """
SWE = Language.SWE
""" Swedish """
TAM = Language.TAM
""" Tamil (not tested) """
TAT = Language.TAT
""" Tatar (not tested) """
TEL = Language.TEL
""" Telugu (not tested) """
TSN = Language.TSN
""" Tswana (not tested) """
TUR = Language.TUR
""" Turkish """
UKR = Language.UKR
""" Ukrainian """
URD = Language.URD
""" Urdu (not tested) """
VIE = Language.VIE
""" Vietnamese (not tested) """
YUE = Language.YUE
""" Yue Chinese (not tested) """
ZHO = Language.ZHO
""" Chinese (not tested) """
ENG_GBR = "eng-GBR"
""" English (GB) """
ENG_SCT = "eng-SCT"
""" English (Scotland) (not tested) """
ENG_USA = "eng-USA"
""" English (USA) """
SPA_ESP = "spa-ESP"
""" Spanish (Castillan) """
FRA_BEL = "fra-BEL"
""" French (Belgium) (not tested) """
FRA_FRA = "fra-FRA"
""" French (France) """
POR_BRA = "por-bra"
""" Portuguese (Brazil) (not tested) """
POR_PRT = "por-prt"
""" Portuguese (Portugal) """
AF = "af"
""" Afrikaans """
AN = "an"
""" Aragonese (not tested) """
AM = "am"
""" Amharic (not tested) """
AS = "as"
""" Assamese (not tested) """
AZ = "az"
""" Azerbaijani (not tested) """
BG = "bg"
""" Bulgarian """
BN = "bn"
""" Bengali (not tested) """
BS = "bs"
""" Bosnian (not tested) """
CA = "ca"
""" Catalan """
CS = "cs"
""" Czech """
CY = "cy"
""" Welsh """
DA = "da"
""" Danish """
DE = "de"
""" German """
EL = "el"
""" Greek (Modern) """
EN = "en"
""" English """
EN_GB = "en-gb"
""" English (GB) """
EN_GB_SCOTLAND = "en-gb-scotland"
""" English (Scotland) (not tested) """
EN_GB_X_GBCLAN = "en-gb-x-gbclan"
""" English (Northern) (not tested) """
EN_GB_X_GBCWMD = "en-gb-x-gbcwmd"
""" English (Midlands) (not tested) """
EN_GB_X_RP = "en-gb-x-rp"
""" English (Received Pronunciation) (not tested) """
EN_US = "en-us"
""" English (USA) """
EN_029 = "en-029"
""" English (West Indies) (not tested) """
EO = "eo"
""" Esperanto (not tested) """
ES = "es"
""" Spanish (Castillan) """
ES_419 = "es-419"
""" Spanish (Latin America) (not tested) """
ET = "et"
""" Estonian """
EU = "eu"
""" Basque (not tested) """
FA = "fa"
""" Persian """
FA_LATN = "fa-Latn"
""" Persian (Pinglish) """
FI = "fi"
""" Finnish """
FR = "fr"
""" French """
FR_BE = "fr-be"
""" French (Belgium) (not tested) """
FR_FR = "fr-fr"
""" French (France) """
GA = "ga"
""" Irish """
GD = "gd"
""" Scottish Gaelic (not tested) """
GN = "gn"
""" Guarani (not tested) """
# NOTE already defined
# COMMENTED GRC = "grc"
# COMMENTED """ Greek (Ancient) """
GU = "gu"
""" Gujarati (not tested) """
HI = "hi"
""" Hindi (not tested) """
HR = "hr"
""" Croatian """
HU = "hu"
""" Hungarian """
HY = "hy"
""" Armenian (not tested) """
HY_AREVMDA = "hy-arevmda"
""" Armenian (West) (not tested) """
IA = "ia"
""" Interlingua (not tested) """
ID = "id"
""" Indonesian (not tested) """
IS = "is"
""" Icelandic """
IT = "it"
""" Italian """
# NOTE already defined
# COMMENTED JBO = "jbo"
# COMMENTED """ Lojban (not tested) """
KA = "ka"
""" Georgian (not tested) """
KL = "kl"
""" Greenlandic (not tested) """
KN = "kn"
""" Kannada (not tested) """
KU = "ku"
""" Kurdish (not tested) """
KY = "ky"
""" Kirghiz (not tested) """
LA = "la"
""" Latin """
# NOTE already defined
# COMMENTED LFN = "lfn"
# COMMENTED """ <NAME> (not tested) """
LT = "lt"
""" Lithuanian """
LV = "lv"
""" Latvian """
MK = "mk"
""" Macedonian (not tested) """
ML = "ml"
""" Malayalam (not tested) """
MR = "mr"
""" Marathi (not tested) """
MS = "ms"
""" Malay (not tested) """
MT = "mt"
""" Maltese (not tested) """
MY = "my"
""" Burmese (not tested) """
NCI = "nci"
""" Nahuatl (not tested) """
NE = "ne"
""" Nepali (not tested) """
NL = "nl"
""" Dutch """
NO = "no"
""" Norwegian """
OM = "om"
""" Oromo (not tested) """
OR = "or"
""" Oriya (not tested) """
PA = "pa"
""" Panjabi (not tested) """
PL = "pl"
""" Polish """
PT = "pt"
""" Portuguese """
PT_BR = "pt-br"
""" Portuguese (Brazil) (not tested) """
PT_PT = "pt-pt"
""" Portuguese (Portugal) """
RO = "ro"
""" Romanian """
RU = "ru"
""" Russian """
SI = "si"
""" Sinhala (not tested) """
SK = "sk"
""" Slovak """
SL = "sl"
""" Slovenian (not tested) """
SQ = "sq"
""" Albanian (not tested) """
SR = "sr"
""" Serbian """
SV = "sv"
""" Swedish """
SW = "sw"
""" Swahili """
TA = "ta"
""" Tamil (not tested) """
TE = "te"
""" Telugu (not tested) """
TN = "tn"
""" Tswana (not tested) """
TR = "tr"
""" Turkish """
TT = "tt"
""" Tatar (not tested) """
UK = "uk"
""" Ukrainian """
UR = "ur"
""" Urdu (not tested) """
VI = "vi"
""" Vietnamese (not tested) """
VI_VN_X_CENTRAL = "vi-vn-x-central"
""" Vietnamese (hue) (not tested) """
VI_VN_X_SOUTH = "vi-vn-x-south"
""" Vietnamese (sgn) (not tested) """
ZH = "zh"
""" Mandarin Chinese (not tested) """
ZH_YUE = "zh-yue"
""" Yue Chinese (not tested) """
CODE_TO_HUMAN = {
AFR: u"Afrikaans",
AMH: u"Amharic (not tested)",
ARG: u"Aragonese (not tested)",
ASM: u"Assamese (not tested)",
AZE: u"Azerbaijani (not tested)",
BEN: u"Bengali (not tested)",
BOS: u"Bosnian (not tested)",
BUL: u"Bulgarian",
CAT: u"Catalan",
CES: u"Czech",
CMN: u"Mandarin Chinese (not tested)",
CYM: u"Welsh",
DAN: u"Danish",
DEU: u"German",
ELL: u"Greek (Modern)",
ENG: u"English",
EPO: u"Esperanto (not tested)",
EST: u"Estonian",
EUS: u"Basque (not tested)",
FAS: u"Persian",
FIN: u"Finnish",
FRA: u"French",
GLA: u"Scottish Gaelic (not tested)",
GLE: u"Irish",
GRC: u"Greek (Ancient)",
GRN: u"Guarani (not tested)",
GUJ: u"Gujarati (not tested)",
HIN: u"Hindi (not tested)",
HRV: u"Croatian",
HUN: u"Hungarian",
HYE: u"Armenian (not tested)",
INA: u"Interlingua (not tested)",
IND: u"Indonesian (not tested)",
ISL: u"Icelandic",
ITA: u"Italian",
JBO: u"Lojban (not tested)",
KAL: u"Greenlandic (not tested)",
KAN: u"Kannada (not tested)",
KAT: u"Georgian (not tested)",
KIR: u"Kirghiz (not tested)",
KUR: u"Kurdish (not tested)",
LAT: u"Latin",
LAV: u"Latvian",
LFN: u"Lingua Franca Nova (not tested)",
LIT: u"Lithuanian",
MAL: u"Malayalam (not tested)",
MAR: u"Marathi (not tested)",
MKD: u"Macedonian (not tested)",
MLT: u"Maltese (not tested)",
MSA: u"Malay (not tested)",
MYA: u"Burmese (not tested)",
NAH: u"Nahuatl (not tested)",
NEP: u"Nepali (not tested)",
NLD: u"Dutch",
NOR: u"Norwegian",
ORI: u"Oriya (not tested)",
ORM: u"Oromo (not tested)",
PAN: u"Panjabi (not tested)",
PAP: u"Papiamento (not tested)",
POL: u"Polish",
POR: u"Portuguese",
RON: u"Romanian",
RUS: u"Russian",
SIN: u"Sinhala (not tested)",
SLK: u"Slovak",
SLV: u"Slovenian (not tested)",
SPA: u"Spanish",
SQI: u"Albanian (not tested)",
SRP: u"Serbian",
SWA: u"Swahili",
SWE: u"Swedish",
TAM: u"Tamil (not tested)",
TAT: u"Tatar (not tested)",
TEL: u"Telugu (not tested)",
TSN: u"Tswana (not tested)",
TUR: u"Turkish",
UKR: u"Ukrainian",
URD: u"Urdu (not tested)",
VIE: u"Vietnamese (not tested)",
YUE: u"Yue Chinese (not tested)",
ZHO: u"Chinese (not tested)",
ENG_GBR: u"English (GB)",
ENG_SCT: u"English (Scotland) (not tested)",
ENG_USA: u"English (USA)",
SPA_ESP: u"Spanish (Castillan)",
FRA_BEL: u"French (Belgium) (not tested)",
FRA_FRA: u"French (France)",
POR_BRA: u"Portuguese (Brazil) (not tested)",
POR_PRT: u"Portuguese (Portugal)",
AF: u"Afrikaans",
AN: u"Aragonese (not tested)",
AM: u"Amharic (not tested)",
AS: u"Assamese (not tested)",
AZ: u"Azerbaijani (not tested)",
BG: u"Bulgarian",
BN: u"Bengali (not tested)",
BS: u"Bosnian (not tested)",
CA: u"Catalan",
CS: u"Czech",
CY: u"Welsh",
DA: u"Danish",
DE: u"German",
EL: u"Greek (Modern)",
EN: u"English",
EN_GB: u"English (GB)",
EN_GB_SCOTLAND: u"English (Scotland) (not tested)",
EN_GB_X_GBCLAN: u"English (Northern) (not tested)",
EN_GB_X_GBCWMD: u"English (Midlands) (not tested)",
EN_GB_X_RP: u"English (Received Pronunciation) (not tested)",
EN_US: u"English (USA)",
EN_029: u"English (West Indies) (not tested)",
EO: u"Esperanto (not tested)",
ES: u"Spanish (Castillan)",
ES_419: u"Spanish (Latin America) (not tested)",
ET: u"Estonian",
EU: u"Basque (not tested)",
FA: u"Persian",
FA_LATN: u"Persian (Pinglish)",
FI: u"Finnish",
FR: u"French",
FR_BE: u"French (Belgium) (not tested)",
FR_FR: u"French (France)",
GA: u"Irish",
GD: u"Scottish Gaelic (not tested)",
GN: u"Guarani (not tested)",
GU: u"Gujarati (not tested)",
HI: u"Hindi (not tested)",
HR: u"Croatian",
HU: u"Hungarian",
HY: u"Armenian (not tested)",
HY_AREVMDA: u"Armenian (West) (not tested)",
IA: u"Interlingua (not tested)",
ID: u"Indonesian (not tested)",
IS: u"Icelandic",
IT: u"Italian",
KA: u"Georgian (not tested)",
KL: u"Greenlandic (not tested)",
KN: u"Kannada (not tested)",
KU: u"Kurdish (not tested)",
KY: u"Kirghiz (not tested)",
LA: u"Latin",
LT: u"Lithuanian",
LV: u"Latvian",
MK: u"Macedonian (not tested)",
ML: u"Malayalam (not tested)",
MR: u"Marathi (not tested)",
MS: u"Malay (not tested)",
MT: u"Maltese (not tested)",
MY: u"Burmese (not tested)",
NCI: u"Nahuatl (not tested)",
NE: u"Nepali (not tested)",
NL: u"Dutch",
NO: u"Norwegian",
OM: u"Oromo (not tested)",
OR: u"Oriya (not tested)",
PA: u"Panjabi (not tested)",
PL: u"Polish",
PT: u"Portuguese",
PT_BR: u"Portuguese (Brazil) (not tested)",
PT_PT: u"Portuguese (Portugal)",
RO: u"Romanian",
RU: u"Russian",
SI: u"Sinhala (not tested)",
SK: u"Slovak",
SL: u"Slovenian (not tested)",
SQ: u"Albanian (not tested)",
SR: u"Serbian",
SV: u"Swedish",
SW: u"Swahili",
TA: u"Tamil (not tested)",
TE: u"Telugu (not tested)",
TN: u"Tswana (not tested)",
TR: u"Turkish",
TT: u"Tatar (not tested)",
UK: u"Ukrainian",
UR: u"Urdu (not tested)",
VI: u"Vietnamese (not tested)",
VI_VN_X_CENTRAL: u"Vietnamese (hue) (not tested)",
VI_VN_X_SOUTH: u"Vietnamese (sgn) (not tested)",
ZH: u"Mandarin Chinese (not tested)",
ZH_YUE: u"Yue Chinese (not tested)",
}
CODE_TO_HUMAN_LIST = sorted([u"%s\t%s" % (k, v) for k, v in CODE_TO_HUMAN.items()])
LANGUAGE_TO_VOICE_CODE = {
AF: "af",
AM: "am",
AN: "an",
AS: "as",
AZ: "az",
BG: "bg",
BN: "bn",
BS: "bs",
CA: "ca",
CS: "cs",
CY: "cy",
DA: "da",
DE: "de",
EL: "el",
EN: "en",
EN_029: "en-029",
EN_GB: "en-gb",
EN_GB_SCOTLAND: "en-gb-scotland",
EN_GB_X_GBCLAN: "en-gb-x-gbclan",
EN_GB_X_GBCWMD: "en-gb-x-gbcwmd",
EN_GB_X_RP: "en-gb-x-rp",
EN_US: "en-us",
EO: "eo",
ES: "es",
ES_419: "es-419",
ET: "et",
EU: "eu",
FA: "fa",
FA_LATN: "fa-Latn",
FI: "fi",
FR: "fr",
FR_BE: "fr-be",
FR_FR: "fr-fr",
GA: "ga",
GD: "gd",
# COMMENTED GRC: "grc",
GN: "gn",
GU: "gu",
HI: "hi",
HR: "hr",
HU: "hu",
HY: "hy",
HY_AREVMDA: "hy-arevmda",
IA: "ia",
ID: "id",
IS: "is",
IT: "it",
# COMMENTED JBO: "jbo",
KA: "ka",
KL: "kl",
KN: "kn",
KU: "ku",
KY: "ky",
LA: "la",
# COMMENTED LFN: "lfn",
LT: "lt",
LV: "lv",
MK: "mk",
ML: "ml",
MR: "mr",
MS: "ms",
MT: "mt",
MY: "my",
NCI: "nci",
NE: "ne",
NL: "nl",
NO: "no",
OM: "om",
OR: "or",
PA: "pa",
# COMMENTED PAP: "pap",
PL: "pl",
PT: "pt",
PT_BR: "pt-br",
PT_PT: "pt-pt",
RO: "ro",
RU: "ru",
SI: "si",
SK: "sk",
SL: "sl",
SQ: "sq",
SR: "sr",
SV: "sv",
SW: "sw",
TA: "ta",
TE: "te",
TN: "tn",
TR: "tr",
TT: "tt",
UK: "ru", # NOTE mocking support for Ukrainian with Russian voice
UR: "ur",
VI: "vi",
VI_VN_X_CENTRAL: "vi-vn-x-central",
VI_VN_X_SOUTH: "vi-vn-x-south",
ZH: "zh",
ZH_YUE: "zh-yue",
AFR: "af",
AMH: "am",
ARG: "an",
ASM: "as",
AZE: "az",
BEN: "bn",
BOS: "bs",
BUL: "bg",
CAT: "ca",
CES: "cs",
CMN: "zh",
CYM: "cy",
DAN: "da",
DEU: "de",
ELL: "el",
ENG: "en",
EPO: "eo",
EST: "et",
FAS: "fa",
FIN: "fi",
FRA: "fr",
GLA: "gd",
GLE: "ga",
GRC: "grc",
GRN: "gn",
GUJ: "gu",
HIN: "hi",
HRV: "hr",
HUN: "hu",
HYE: "hy",
INA: "ia",
IND: "id",
ISL: "is",
ITA: "it",
JBO: "jbo",
KAL: "kl",
KAN: "kn",
KAT: "ka",
KIR: "ky",
KUR: "ku",
LAT: "la",
LAV: "lv",
LFN: "lfn",
LIT: "lt",
MAL: "ml",
MAR: "mr",
MKD: "mk",
MLT: "mt",
MSA: "ms",
MYA: "my",
NAH: "nci",
NEP: "ne",
NLD: "nl",
NOR: "no",
ORI: "or",
ORM: "om",
PAN: "pa",
PAP: "pap",
POL: "pl",
POR: "pt",
RON: "ro",
RUS: "ru",
SIN: "si",
SLK: "sk",
SLV: "sl",
SPA: "es",
SQI: "sq",
SRP: "sr",
SWA: "sw",
SWE: "sv",
TAM: "ta",
TAT: "tt",
TEL: "te",
TSN: "tn",
TUR: "tr",
UKR: "ru", # NOTE mocking support for Ukrainian with Russian voice
URD: "ur",
VIE: "vi",
YUE: "zh-yue",
ZHO: "zh",
ENG_GBR: "en-gb",
ENG_SCT: "en-gb-scotland",
ENG_USA: "en-us",
SPA_ESP: "es-es",
FRA_BEL: "fr-be",
FRA_FRA: "fr-fr",
POR_BRA: "pt-br",
POR_PRT: "pt-pt"
}
DEFAULT_LANGUAGE = ENG
DEFAULT_TTS_PATH = "espeak-ng"
OUTPUT_AUDIO_FORMAT = ("pcm_s16le", 1, 22050)
HAS_SUBPROCESS_CALL = True
TAG = u"ESPEAKNGTTSWrapper"
def __init__(self, rconf=None, logger=None):
super(ESPEAKNGTTSWrapper, self).__init__(rconf=rconf, logger=logger)
self.set_subprocess_arguments([
self.tts_path,
u"-v",
self.CLI_PARAMETER_VOICE_CODE_STRING,
u"-w",
self.CLI_PARAMETER_WAVE_PATH,
self.CLI_PARAMETER_TEXT_STDIN
])
| StarcoderdataPython |
1985324 | #!/usr/bin/env python3
import numpy as np
from keras.layers import Input, Dense, Conv2D, MaxPooling2D, GlobalAveragePooling2D, Flatten, Dropout
from keras.layers.merge import concatenate
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, TensorBoard
# import tensorflow as tf
# from keras.backend.tensorflow_backend import set_session
# config = tf.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction = 0.45
# set_session(tf.Session(config = config))
from utils import DataLoader
def branch1(input, n_1x1):
return Conv2D(n_1x1, kernel_size=(1, 1), padding='same', activation='selu')(input)
def branch2(input, n_3x3r, n_3x3):
net = Conv2D(n_3x3r, kernel_size=(1, 1), padding='same', activation='selu')(input)
return Conv2D(n_3x3, kernel_size=(3, 3), padding='same', activation='selu')(net)
def branch3(input, n_5x5r, n_5x5):
net = Conv2D(n_5x5r, kernel_size=(1, 1), padding='same', activation='selu')(input)
return Conv2D(n_5x5, kernel_size=(5, 5), padding='same', activation='selu')(net)
def branch4(input, n_pool):
net = MaxPooling2D(pool_size=(3, 3), strides=(1, 1), padding='same')(input)
return Conv2D(n_pool, kernel_size=(1, 1), padding='same', activation='selu')(net)
def inception_block(input, n_1x1, n_3x3r, n_3x3, n_5x5r, n_5x5, n_pool):
br1 = branch1(input, n_1x1)
br2 = branch2(input, n_3x3r, n_3x3)
br3 = branch3(input, n_5x5r, n_5x5)
br4 = branch4(input, n_pool)
# channel last
# return concatenate([br1, br2, br3, br4], axis=-1)
return concatenate([input, br1, br2, br3, br4], axis=-1)
def build_model():
inputs = Input(shape=(256, 256, 1))
model = Conv2D(64, kernel_size=(5, 5), padding='same', activation='selu')(inputs)
model = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(model)
model = Conv2D(64, kernel_size=(1, 1), padding='same', activation='selu')(model)
model = Conv2D(192, kernel_size=(3, 3), padding='same', activation='selu')(model)
model = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(model)
model = inception_block(model, 64, 96, 128, 16, 32, 32)
model = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(model)
model = inception_block(model, 196, 96, 208, 16, 48, 64)
model = inception_block(model, 160, 112, 224, 24, 64, 64)
model = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(model)
model = inception_block(model, 128, 128, 256, 24, 64, 64)
model = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(model)
model = inception_block(model, 112, 144, 288, 32, 64, 64)
model = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(model)
model = inception_block(model, 256, 160, 320, 32, 96, 96)
model = GlobalAveragePooling2D()(model)
# model = Dropout(0.4)(model)
model = Dense(2, activation='softmax')(model)
model = Model(inputs, model)
model.summary()
return model
if __name__ == '__main__':
train_size, valid_size = 20411, 6804
batch_size = 24
epochs = 100
train_loader = DataLoader(file_glob_pattern = 'feature/train_*.npy', batch_size = batch_size)
valid_loader = DataLoader(file_glob_pattern = 'feature/valid_*.npy', batch_size = batch_size)
model_ckpt = ModelCheckpoint('./models/lang_classify.h5', verbose = 1, save_best_only = True)
tensorboard = TensorBoard(log_dir='./logs/lang_classify', histogram_freq=0, write_graph=True, write_images=False)
model = build_model()
model.compile(loss = 'binary_crossentropy', optimizer = Adam(lr = 1e-4), metrics = ['accuracy'])
model.fit_generator(train_loader, steps_per_epoch = train_size // batch_size,\
validation_data = valid_loader, validation_steps = valid_size // batch_size,\
epochs = epochs, callbacks = [model_ckpt, tensorboard])
| StarcoderdataPython |
3364112 | #!/usr/bin/env python
# encoding: utf-8
"""
@author: Alfons
@contact: <EMAIL>
@file: 07-04-singledispatch.py
@time: 18-2-28 下午8:40
@version: v1.0
"""
from functools import singledispatch
import html
# 单分派泛函数,实现重载
@singledispatch
def htmlize(obj):
content = html.escape(repr(obj))
return "<pre>%s</pre>" % content
@htmlize.register(str)
def func1(text):
content = html.escape(text).replace("\n", "<br>\n")
return "<p>%s</p>" % content
@htmlize.register(int)
def func1(num):
return "<pre>{0} (0x{0:x})</pre>".format(num)
@htmlize.register(list)
def func1(lst):
content = "</li>\n<li>".join(htmlize(enum) for enum in lst)
return "<ul>\n<li>%s</li>\n</ul>" % content
print("htmlize({1, 2, 3}:".rjust(30), htmlize({1, 2, 3}))
print("""htmlize("helllo"):""".rjust(30), htmlize("helllo"))
print(" htmlize(['jels', 54, {1, 2, 3}]):\n", htmlize(['jels', 54, {1, 2, 3}])) | StarcoderdataPython |
9741660 | # firmware to play back TASes at high speed by just streaming in latches.
# includes APU frequency adjustments too.
# notational notes:
# boneless is a word-based architecture, it has no concept of the 8 bit byte.
# but we do, so we have to define what a word means.
# * a "word" is a 16 bit unsigned integer, transmitted and stored in
# little-endian byte order.
# * an "address" selects one "word".
# we define "CRC" as CRC-16/KERMIT (as defined by anycrc). it's computed in
# little-endian order, such that the CRC of words [0x0102, 0x0304] equals the
# CRC of bytes [0x2, 0x1, 0x3, 0x4]
# HOW IT WORKS
# The TAS is composed of a long sequence of "latches". To us, these are C word
# quantities (C is the number of controllers, up to six) that need to make it
# out to the hardware in time.
# Including 4 controllers and 1 APU frequency controller at 288 latches per
# frame (the fastest known TAS), we need to be able to stream around 1.4Mbits/s
# peak into the firmware. With a 1.6Mbits/s maximum rate, this will be a bit
# tight. Complicating things is the fact that the FTDI device imposes a 16ms
# device -> host latency.
# To handle this, we have a very large ring buffer to hold latches. We keep
# track of how much space remains in the buffer, plus the "stream position", a
# quantity that starts at 0 and increments every latch (and wraps around once it
# overflows a word). "Large" being a relative term; we can hold about 170ms of
# latches.
# Every 25ms, the firmware sends out a status packet, which tells the host the
# current stream position and how much space there is in the buffer. Because of
# the latency, the information is outdated as soon as it is sent. However, the
# host knows its own stream position, so it can calculate how much data has
# arrived and reduce the buffer space correspondingly. It then sends enough data
# to fill up the device's buffer again.
# If there is an error, then the firmware will immediately send a status packet
# with the corresponding error code. In response, the host will resume
# transmission at the stream position sent in the error packet.
# playback command packet format:
# first word: header (always 0x7A5A)
# second word: command
# bits 7-0: number of parameters, always 3 for the playback system's commands
# bits 15-8: command number, defined later
# third word: parameter 1 (command specific)
# fourth word: parameter 2 (command specific)
# fifth word: parameter 3 (command specific)
# sixth word: CRC of previous words (except first)
# Note: also accepts the bootloader hello command (command number 1 with 2
# unused parameters)
# playback status packet format
# first word: header (always 0x7A5A)
# second word: result
# bits 7-0: number of parameters, always 3 for the playback system's responses
# bits 15-8: result code, always 0x10 for the playback system's responses
# third word: last error:
# REGULAR ERRORS
# 0x00=no error, 0x01=invalid command, 0x02=bad CRC, 0x03=RX error,
# 0x04=RX timeout
# FATAL ERRORS (playback must be restarted)
# 0x40=buffer underrun, 0x41=missed latch
# fourth word: stream position
# fifth word: buffer space remaining
# sixth word: CRC of previous words (except first)
# commands
# command 0x10: send latches
# parameter 1: stream position
# parameter 2: number of latches
# parameter 3: unused
# purpose: send latch data.
#
# there is no response once the command packet is received and
# validated. the firmware expects "number of latches"*C words to
# follow, and a CRC of them all. if there is a problem, an error
# status packet will be sent as described above.
# command 0x11: request status
# parameter 1: unused
# parameter 2: unused
# parameter 3: unused
# purpose: request a status packet be immediately sent.
import random
from enum import IntEnum
from boneless.arch.opcode import Instr
from boneless.arch.opcode import *
from .bonetools import *
from ..gateware.periph_map import p_map
__all__ = ["make_firmware", "ErrorCode"]
class ErrorCode(IntEnum):
NONE = 0x00
INVALID_COMMAND = 0x01
BAD_CRC = 0x02
RX_ERROR = 0x03
RX_TIMEOUT = 0x04
BAD_STREAM_POS = 0x05
# this code and after are fatal errors
FATAL_ERROR_START = 0x40
BUFFER_UNDERRUN = 0x40
MISSED_LATCH = 0x41
# register number of each controller name
controller_name_to_addr = {
"p1d0": p_map.snes.w_p1d0,
"p1d1": p_map.snes.w_p1d1,
"p2d0": p_map.snes.w_p2d0,
"p2d1": p_map.snes.w_p2d1,
"apu_freq_basic": p_map.snes.w_apu_freq_basic,
"apu_freq_advanced": p_map.snes.w_apu_freq_advanced,
}
# MEMORY MAP
# We have a 32K word RAM into which we have to fit all the code, buffers, and
# register windows. We need as large a buffer as possible. We don't bother with
# write protection since the system can just be reset and the application can be
# redownloaded in the event of any corruption.
# Address | Size | Purpose
# ------------+-------+--------------------------------
# 0x0000-01BF | 448 | Code and variables
# 0x01C0-01FF | 64 | Register windows (8x)
# 0x0200-7FFF | 32256 | Latch buffer
LATCH_BUF_START = 0x200
LATCH_BUF_END = 0x8000
LATCH_BUF_WORDS = LATCH_BUF_END-LATCH_BUF_START
# determine how many latches can fit in the above buffer given the number of
# controllers (i.e. words per latch). note that, since this is a ring buffer,
# it's full at buf_size-1 latches. but also there is 1 latch in the interface,
# so this cancels out.
def calc_buf_size(num_controllers):
return LATCH_BUF_WORDS // num_controllers
FW_MAX_LENGTH = 0x1C0
INITIAL_REGISTER_WINDOW = 0x1F8
# variable number in the "vars" array. we don't bother giving variables
# individual labels because loading a variable from a label requires a register
# equal to zero, and the non-EXTI immediate size is smaller. so if we load the
# base of all the variables into that register, we can load any number of
# variables without having to keep a register zeroed and without having to use
# EXTIs to address anything.
class Vars(IntEnum):
# the buffer is a ring buffer. head == tail is empty, head-1 == tail is full
# (mod size). note that these are in units of latches, not words.
buf_tail = 0
buf_head = 1
stream_pos = 2
last_error = 3
# return instructions that calculate the address of the latch from the buffer
# index (multiply by number of controllers and add base)
def i_calc_latch_addr(dest, src, num_controllers):
if num_controllers == 1:
return ADDI(dest, src, LATCH_BUF_START)
elif num_controllers == 2:
return [
ADDI(dest, src, LATCH_BUF_START),
ADD(dest, dest, src),
]
elif num_controllers == 3:
return [
ADDI(dest, src, LATCH_BUF_START),
ADD(dest, dest, src),
ADD(dest, dest, src),
]
elif num_controllers == 4:
return [
SLLI(dest, src, 2),
ADDI(dest, dest, LATCH_BUF_START),
]
elif num_controllers == 5:
return [
SLLI(dest, src, 2),
ADD(dest, dest, src),
ADDI(dest, dest, LATCH_BUF_START),
]
elif num_controllers == 6:
return [
SLLI(dest, src, 2),
ADD(dest, dest, src),
ADD(dest, dest, src),
ADDI(dest, dest, LATCH_BUF_START),
]
else:
raise ValueError("'{}' controllers is not 1-6".format(num_controllers))
# queue an error packet for transmission and return to main loop
# on entry (in caller window)
# R5: error code
def f_handle_error():
lp = "_{}_".format(random.randrange(2**32))
r = RegisterManager("R5:error_code R4:last_error R3:temp R0:vars")
fw = [
# error code is already in R5. since we don't return, we don't have to
# set up our own register frame
# is the current error a fatal error?
CMPI(r.error_code, ErrorCode.FATAL_ERROR_START),
BLTU(lp+"regular"), # no, handle it normally
# yes. disable latching so the console can no longer see pressed
# buttons. if the error was a missed latch, then the console probably
# saw garbage, but now it can't anymore.
MOVI(r.temp, 0),
STXA(r.temp, p_map.snes.w_enable_latch),
J(lp+"transmit"), # fatal error codes are always sent
L(lp+"regular"),
# do we already have an error code stored?
# get the last error
MOVR(r.vars, "vars"),
LD(r.last_error, r.vars, Vars.last_error),
CMPI(r.last_error, ErrorCode.NONE),
# yes, the host already knows that there was an error, and one error is
# likely to lead to more. just go back to the main loop and wait.
BNE("main_loop"),
# no stored error, so fall through and send the current one out
L(lp+"transmit"),
# store the error
ST(r.error_code, r.vars, Vars.last_error),
# then send a status packet containing that error
J("send_status_packet")
]
return fw
# put a new latch into the SNES interface if necessary
# on entry (in caller window)
# R7: return address
def f_update_interface(controller_addrs, buf_size):
num_controllers = len(controller_addrs)
lp = "_{}_".format(random.randrange(2**32))
r = RegisterManager("R6:last_error R5:buf_head R4:buf_tail R3:buf_addr "
"R2:status R1:latch_data R0:vars")
fw = [
# set up register frame
ADJW(-8),
# did the data in the interface get latched?
LDXA(r.status, p_map.snes.r_did_latch),
AND(r.status, r.status, r.status),
BZ(lp+"ret"), # if zero, then no; we don't need to put anything new in
# it did, so we have to update it. load the buffer pointers
MOVR(r.vars, "vars"),
# first check if we logged a fatal error
LD(r.last_error, r.vars, Vars.last_error),
CMPI(r.last_error, ErrorCode.FATAL_ERROR_START),
# if we did, just return. this makes sure send_status_packet will
# continue sending the packet instead of us re-entering it through
# handle_error and sending another packet in the middle of the first
BGEU(lp+"ret"),
LD(r.buf_head, r.vars, Vars.buf_head),
LD(r.buf_tail, r.vars, Vars.buf_tail),
# is there anything in there?
CMP(r.buf_head, r.buf_tail),
BEQ(lp+"empty"), # the pointers are equal, so nope
# ah, good. there is. convert the buffer tail index into the address
i_calc_latch_addr(r.buf_addr, r.buf_tail, num_controllers),
]
# then transfer that data to the interface
for controller_i, controller_addr in enumerate(controller_addrs):
fw.append([
LD(r.latch_data, r.buf_addr, controller_i),
STXA(r.latch_data, controller_addr),
])
fw.append([
# did we miss a latch? if another latch happened while we were
# transferring data (or before we started), the console would get junk.
# this read also clears the did latch and missed latch flags.
LDXA(r.status, p_map.snes.r_missed_latch_and_ack),
AND(r.status, r.status, r.status),
BNZ(lp+"missed"), # ah crap, the flag is set.
# otherwise, we've done our job. advance the buffer pointer.
ADDI(r.buf_tail, r.buf_tail, 1),
CMPI(r.buf_tail, buf_size),
BNE(lp+"advanced"),
MOVI(r.buf_tail, 0),
L(lp+"advanced"),
ST(r.buf_tail, r.vars, Vars.buf_tail),
L(lp+"ret"),
ADJW(8),
JR(R7, 0), # R7 in caller's window
])
r -= "buf_head"
r += "R5:error_code"
fw.append([
L(lp+"empty"), # the buffer is empty so we are screwed
ADJW(8),
MOVI(r.error_code, ErrorCode.BUFFER_UNDERRUN),
J("handle_error"),
L(lp+"missed"), # we missed a latch so we are screwed
ADJW(8),
MOVI(r.error_code, ErrorCode.MISSED_LATCH),
J("handle_error"),
])
return fw
# jumps right back to main loop
def send_status_packet(buf_size):
lp = "_{}_".format(random.randrange(2**32))
r = RegisterManager(
"R7:lr R6:comm_word R5:txlr R4:temp "
"R3:space_remaining R2:buf_head R1:buf_tail R0:vars")
fw = [
L("send_status_packet"),
# calculate status variables
MOVR(r.vars, "vars"),
LD(r.buf_tail, r.vars, Vars.buf_tail),
LD(r.buf_head, r.vars, Vars.buf_head),
CMP(r.buf_tail, r.buf_head),
BGTU(lp+"not_wrapped"),
ADDI(r.buf_tail, r.buf_tail, buf_size),
L(lp+"not_wrapped"),
SUB(r.space_remaining, r.buf_tail, r.buf_head),
SUBI(r.space_remaining, r.space_remaining, 1), # one is always empty
]
r -= "buf_head buf_tail"
r += "R2:stream_pos R1:last_error"
fw.append([
LD(r.stream_pos, r.vars, Vars.stream_pos),
LD(r.last_error, r.vars, Vars.last_error),
# send the header first
MOVI(r.comm_word, 0x7A5A),
JAL(r.txlr, lp+"tx_comm_word"),
# then reset the UART CRC
STXA(r.temp, p_map.uart.w_crc_reset), # we can write anything
MOVI(r.comm_word, 0x1003),
JAL(r.txlr, lp+"tx_comm_word"),
MOV(r.comm_word, r.last_error),
JAL(r.txlr, lp+"tx_comm_word"),
MOV(r.comm_word, r.stream_pos),
JAL(r.txlr, lp+"tx_comm_word"),
MOV(r.comm_word, r.space_remaining),
JAL(r.txlr, lp+"tx_comm_word"),
# CRC is still being calculated, prepare for return
MOVR(r.txlr, "main_loop"), # return destination
# reset the timer to send another status packet in another 25ms
MOVI(r.temp, int((12e6*(25/1000))/256)),
STXA(r.temp, p_map.timer.timer[0].w_value),
# now we can send it
LDXA(r.comm_word, p_map.uart.r_crc_value),
# fall through
L(lp+"tx_comm_word"),
# set return address to first loop so we can branch to update_interface
# and have it return correctly
MOVR(r.lr, lp+"tx_lo"),
L(lp+"tx_lo"),
# wait for buffer space
LDXA(r.temp, p_map.uart.r_tx_status),
ANDI(r.temp, r.temp, 1),
# none yet, go update the interface while we wait
BZ0("update_interface"),
# then send the low byte
STXA(r.comm_word, p_map.uart.w_tx_lo),
# and repeat for the high byte
MOVR(r.lr, lp+"tx_hi"),
L(lp+"tx_hi"),
LDXA(r.temp, p_map.uart.r_tx_status),
ANDI(r.temp, r.temp, 1),
BZ0("update_interface"),
STXA(r.comm_word, p_map.uart.w_tx_hi),
# and we're done
JR(r.txlr, 0),
# WARNING! the CRC is still being calculated, so reading it immediately
# after this function returns will return garbage
])
return fw
# jumps right back to main loop.
# on entry (in caller window)
# R3: param3
# R2: param2
# R1: param1
# needs to be really fast. we have less than 30 instructions per word!
def cmd_send_latches(controller_addrs, buf_size):
num_controllers = len(controller_addrs)
latch_buf_size = calc_buf_size(num_controllers)
lp = "_{}_".format(random.randrange(2**32))
r = RegisterManager(
"R7:lr R6:comm_word R5:error_code R4:stream_pos "
"R3:buf_head R2:length R1:input_stream_pos R0:vars")
fw = [
L("cmd_send_latches"),
MOVR(r.vars, "vars"),
LD(r.buf_head, r.vars, Vars.buf_head),
# the host needs to send us latches that fit at the end of our buffer.
# if there is a mismatch in stream position, this won't work.
LD(r.stream_pos, r.vars, Vars.stream_pos),
CMP(r.stream_pos, r.input_stream_pos),
BEQ(lp+"right_pos"),
MOVI(r.error_code, ErrorCode.BAD_STREAM_POS),
J("handle_error"),
]
r -= "vars stream_pos error_code"
r += "R0:buf_addr R4:temp R5:rxlr"
fw.append([
L(lp+"right_pos"),
# figure out the address where we'll be sticking the latches
i_calc_latch_addr(r.buf_addr, r.buf_head, num_controllers),
# if everything goes well, we'll have received all of them. if it
# doesn't, we won't store these calculated values and so the buffer head
# and stream position won't actually be advanced.
ADD(r.input_stream_pos, r.input_stream_pos, r.length),
ADD(r.buf_head, r.buf_head, r.length),
CMPI(r.buf_head, latch_buf_size),
BLTU(lp+"loop"),
SUBI(r.buf_head, r.buf_head, latch_buf_size),
L(lp+"loop"),
])
# receive all the words in this latch
for controller_i in range(num_controllers):
fw.append([
JAL(r.rxlr, "rx_comm_word"),
ST(r.comm_word, r.buf_addr, controller_i),
])
fw.append([
# keep the interface full
JAL(r.lr, "update_interface"),
# advance to the next buffer position
ADDI(r.buf_addr, r.buf_addr, num_controllers),
CMPI(r.buf_addr, LATCH_BUF_START+num_controllers*latch_buf_size),
BNE(lp+"not_wrapped"),
MOVI(r.buf_addr, LATCH_BUF_START),
L(lp+"not_wrapped"),
# do we have any latches remaining?
SUBI(r.length, r.length, 1),
BNZ(lp+"loop"), # yup, go take care of them
# receive and validate the CRC
JAL(r.rxlr, "rx_comm_word"),
])
r -= "rxlr buf_addr"
r += "R5:error_code R0:vars"
fw.append([
# assume there was a CRC error
MOVI(r.error_code, ErrorCode.BAD_CRC),
LDXA(r.temp, p_map.uart.r_crc_value),
AND(r.temp, r.temp, r.temp),
# oh no, we were right. go handle it.
BZ0("handle_error"),
# if the CRC validated, then all the data is good and we can update the
# head pointer in order to actually save the latches
MOVR(r.vars, "vars"),
ST(r.buf_head, r.vars, Vars.buf_head),
# and stream position
ST(r.input_stream_pos, r.vars, Vars.stream_pos),
# and now, we are done
J("main_loop"),
])
return fw
# this is a weird pseudo-function (and two subfunctions) to handle receiving
# data from the UART. it doesn't set up its own register frame.
def rx_comm_word():
lp = "_{}_".format(random.randrange(2**32))
r = RegisterManager("R7:lr R6:comm_word R5:rxlr R4:temp")
fw = []
fw.append([
L("rx_comm_word"),
# set return address to first loop so we can branch to update_interface
# and have it return correctly
MOVR(r.lr, lp+"rcw_lo"),
L(lp+"rcw_lo"),
# check for UART errors (timeouts, overflows, etc.)
LDXA(r.temp, p_map.uart.r_error),
AND(r.temp, r.temp, r.temp), # set flags
BZ0("rcw_error"),
# check if we have a new byte
LDXA(r.comm_word, p_map.uart.r_rx_lo),
ROLI(r.comm_word, r.comm_word, 1),
# nope, go keep the interface up to date (it will return to rcw_lo)
BS1("update_interface"),
# we have the low byte in comm_word
L("rx_comm_byte_hi"),
MOVR(r.lr, lp+"rcw_hi"),
L(lp+"rcw_hi"),
# check again for UART errors
LDXA(r.temp, p_map.uart.r_error),
AND(r.temp, r.temp, r.temp),
BZ0("rcw_error"),
# and see if we have the high byte yet
LDXA(r.temp, p_map.uart.r_rx_hi),
ADD(r.temp, r.temp, r.temp),
# nope, go keep the interface up to date (it will return to rcw_hi)
BC1("update_interface"),
# put the bytes together
OR(r.comm_word, r.comm_word, r.temp),
# and we are done
JR(r.rxlr, 0),
])
r -= "rxlr"
r += "R5:error_code"
fw.append([
L("rcw_error"),
# assume it was a timeout error
MOVI(r.error_code, ErrorCode.RX_TIMEOUT),
# was it a timeout error?
ANDI(r.comm_word, r.temp, 2),
BZ0("handle_error"), # it was. go deal with it
# otherwise, it must have been a framing error
MOVI(r.error_code, ErrorCode.RX_ERROR),
# go deal with it
J("handle_error"),
])
return fw
def rx_header():
lp = "_{}_".format(random.randrange(2**32))
r = RegisterManager(
"R7:lr R6:comm_word R5:error_code R4:temp "
"R0:vars")
fw = [
L("rx_header"),
# is there an active error?
MOVR(r.vars, "vars"),
LD(r.error_code, r.vars, Vars.last_error),
CMPI(r.error_code, ErrorCode.NONE),
BNE(lp+"in_error"), # yup, handle that separately
]
fw.append([
# here there isn't, so we are careful to check for errors and raise them
# appropriately. we also are sure to keep the interface up to date. the
# only error we generate here is invalid command if one of the bytes is
# wrong.
MOVI(r.error_code, ErrorCode.INVALID_COMMAND),
MOVR(r.lr, lp+"rx_header_lo"), # return here from update_interface
L(lp+"rx_header_lo"), # the 0x5A
LDXA(r.temp, p_map.uart.r_error),
CMPI(r.temp, 2), # is there only a timeout error?
BEQ("main_loop"), # yup. rerun the main loop to reset everything
AND(r.temp, r.temp, r.temp),
BZ0("rcw_error"), # if there's some other error, raise the alarm
# receive the byte and complain if it doesn't match
LDXA(r.comm_word, p_map.uart.r_rx_hi),
ADD(r.comm_word, r.comm_word, r.comm_word),
BC1("update_interface"), # update the interface if nothing's come yet
CMPI(r.comm_word, 0x5A << 8),
BNE("handle_error"), # the error was already loaded
MOVR(r.lr, lp+"rx_header_hi"), # return here from update_interface
L(lp+"rx_header_hi"), # the 0x7A this time
LDXA(r.temp, p_map.uart.r_error),
# timeouts aren't accepted on the second byte.
AND(r.temp, r.temp, r.temp),
BZ0("rcw_error"), # if there's an error, raise the alarm.
# receive the byte and complain if it doesn't match
LDXA(r.comm_word, p_map.uart.r_rx_hi),
ADD(r.comm_word, r.comm_word, r.comm_word),
BC1("update_interface"), # update the interface if nothing's come yet
CMPI(r.comm_word, 0x5A << 8), # if we got the low byte instead
BEQ(lp+"rx_header_hi"), # wait for the high byte again
CMPI(r.comm_word, 0x7A << 8), # complain if it wasn't right
BNE("handle_error"),
L(lp+"got_it"),
# the header was received and it's good. get back to the action (after
# tending to the interface again)
MOVR(r.lr, "main_loop_after_header"),
J("update_interface"),
])
r -= "vars"
r += "R3:update_time R2:header_curr R1:header_hi R0:header_lo"
fw.append([
L(lp+"in_error"),
# in an error state, we could be off by a byte and thus the RX buffer is
# filled with junk. we only have at most 15 instructions per byte, and
# we're already dealing with an error, so we don't raise errors here.
# load the header parts into registers to save EXTIs
MOVI(r.header_hi, 0x7A << 8),
MOVI(r.header_lo, 0x5A << 8),
# we receive the low byte first
MOV(r.header_curr, r.header_lo),
L(lp+"rx_start"),
# we receive 10 bytes before checking on the interface
MOVI(r.update_time, 12),
L(lp+"rx_something"),
SUBI(r.update_time, r.update_time, 1),
BZ(lp+"update"), # it's time to check
# we don't care about errors at all, just receiving the right thing
LDXA(r.comm_word, p_map.uart.r_rx_hi),
ADD(r.comm_word, r.comm_word, r.comm_word),
BC1(lp+"rx_something"), # nothing yet
CMP(r.header_curr, r.header_hi),
BEQ(lp+"wait_hi"), # go handle waiting for the high byte separately
# if we are waiting for the low byte
CMP(r.comm_word, r.header_lo), # did we get the low byte?
BNE(lp+"rx_something"), # nope, try again
# we did, so start waiting for the high byte
MOV(r.header_curr, r.header_hi),
J(lp+"rx_something"),
L(lp+"wait_hi"),
# did we actually get a low byte?
CMP(r.comm_word, r.header_lo),
BEQ(lp+"rx_something"), # go back waiting for the high byte
# did we then get the high byte we wanted?
CMP(r.comm_word, r.header_hi),
BEQ(lp+"got_hi"), # yes, header received!
# nope. wait for low again
MOV(r.header_curr, r.header_lo),
J(lp+"rx_something"),
L(lp+"got_hi"),
# clear out any error in the UART
MOVI(r.temp, 0xFFFF),
STXA(r.temp, p_map.uart.w_error_clear),
J(lp+"got_it"),
L(lp+"update"),
JAL(r.lr, "update_interface"),
# is it time to send a status packet? sending a status packet is kind of
# lame because it will go back to the main loop and so reset to the low
# byte. but it gets sent relatively rarely so we accept that problem.
LDXA(r.temp, p_map.timer.timer[0].r_ended),
AND(r.temp, r.temp, r.temp),
BZ1(lp+"rx_start"), # the timer is still going, so no
J("send_status_packet"),
])
return fw
def main_loop_body():
lp = "_{}_".format(random.randrange(2**32))
r = RegisterManager(
"R7:lr R6:comm_word R5:rxlr R4:temp "
"R3:param3 R2:param2 R1:param1 R0:command")
fw = [
L("main_loop"),
# make sure the interface is kept up to date
JAL(r.lr, "update_interface"),
# is it time to send the status packet?
LDXA(r.temp, p_map.timer.timer[0].r_ended),
AND(r.temp, r.temp, r.temp),
BZ0("send_status_packet"), # the timer ended
# clear any UART errors and reset the receive timeout
MOVI(r.temp, 0xFFFF),
STXA(r.temp, p_map.uart.w_error_clear),
# receive the header. this handles all the header-related problems too
# (and trashes all the registers incidentally)
J("rx_header"),
# and it returns here
L("main_loop_after_header"),
# who knows what the CRC is now after receiving whatever header data
STXA(r.temp, p_map.uart.w_crc_reset), # write something to reset it
# receive the command packet
JAL(r.rxlr, "rx_comm_word"),
MOV(r.command, r.comm_word),
JAL(r.rxlr, "rx_comm_word"),
MOV(r.param1, r.comm_word),
JAL(r.rxlr, "rx_comm_word"),
MOV(r.param2, r.comm_word),
JAL(r.rxlr, "rx_comm_word"),
# if the host sent the hello command, we need to check for it now
# because it's a word shorter than all the others
CMPI(r.command, 0x0102),
BEQ(lp+"handle_hello"),
MOV(r.param3, r.comm_word),
JAL(r.rxlr, "rx_comm_word"),
# if the current CRC is x, then CRC(x) = 0, always. we use to reset the
# CRC to 0 when we are done sending or receiving.
# so, we've received all the data words and the CRC. if everything went
# okay, the CRC should now be 0.
LDXA(r.temp, p_map.uart.r_crc_value),
AND(r.temp, r.temp, r.temp),
BZ(lp+"crc_ok"),
]
r -= "rxlr"
r += "R5:error_code"
fw.append([
L(lp+"crc_bad"),
# aw heck, it didn't go okay. send the appropriate error.
MOVI(r.error_code, ErrorCode.BAD_CRC),
J("handle_error"),
L(lp+"crc_ok"),
# we've got a valid packet, so reset the error state (if the error was
# not fatal)
# rudely borrow LR
MOVR(r.lr, "vars"),
LD(r.error_code, r.lr, Vars.last_error),
CMPI(r.error_code, ErrorCode.FATAL_ERROR_START),
BGEU(lp+"error_done"),
MOVI(r.error_code, ErrorCode.NONE),
ST(r.error_code, r.lr, Vars.last_error),
L(lp+"error_done"),
# make sure the interface is kept up to date
JAL(r.lr, "update_interface"),
# now we need to figure out the command. the low 8 bits are the length,
# which is always 3. the high 8 bits are the command number, and the
# first command number is 0x10. each command is separated by 0x100.
SUBI(r.command, r.command, 0x1003),
BEQ("cmd_send_latches"),
SUBI(r.command, r.command, 0x100),
BEQ("send_status_packet"),
# oh no, we don't know the command
MOVI(r.error_code, ErrorCode.INVALID_COMMAND),
J("handle_error"),
L(lp+"handle_hello"),
# validate the CRC (the word was already received for us)
LDXA(r.temp, p_map.uart.r_crc_value),
AND(r.temp, r.temp, r.temp),
BNZ(lp+"crc_bad"),
# we got a valid hello. reset into the bootloader.
MOVI(R0, 0xFADE),
MOVI(R1, 0xDEAD),
STXA(R0, p_map.reset_req.w_enable_key_fade),
STXA(R1, p_map.reset_req.w_perform_key_dead),
J(-1), # hang until it happens
])
return fw
# we accept some priming latches to download with the code. this way there is
# some stuff in the buffer before communication gets reestablished. really we
# only need one latch that we can put in the interface at the very start. just
# sticking it in the buffer to begin with avoids special-casing that latch, and
# the extra is nice to jumpstart the buffer.
def make_firmware(controllers, priming_latches,
apu_freq_basic=None,
apu_freq_advanced=None):
num_controllers = len(controllers)
buf_size = calc_buf_size(num_controllers)
# convert controllers from list of names to list of absolute register
# addresses because that's what the system writes to
controller_addrs = []
for controller in controllers:
try:
addr = controller_name_to_addr[controller]
except IndexError:
raise ValueError("unknown controller name '{}'".format(
controller)) from None
controller_addrs.append(addr)
if apu_freq_basic is None and apu_freq_advanced is not None:
raise ValueError("must set apu basic before advanced")
num_priming_latches = len(priming_latches)//num_controllers
if len(priming_latches) % num_controllers != 0:
raise ValueError("priming latches must have {} words per latch".format(
num_controllers))
if num_priming_latches == 0:
raise ValueError("must have at least one priming latch")
if num_priming_latches > buf_size:
raise ValueError("too many priming latches: got {}, max is {}".format(
num_priming_latches, buf_size))
fw = [
# start from "reset" (i.e. download is finished)
# set up initial register window. we get a free one from the bootloader
# (so that we can load a register with the window address), but we can't
# keep using it.
MOVI(R0, INITIAL_REGISTER_WINDOW),
STW(R0),
# set UART receive timeout to about 2ms. we can't afford to be waiting!
MOVI(R0, int((12e6*(2/1000))/256)),
STXA(R0, p_map.uart.w_rt_timer),
# same timeout for the status timer, just cause it's already in the
# register. once it expires, the correct value will be loaded.
STXA(R0, p_map.timer.timer[0].w_value),
]
# out of reset, the button registers are all zero, the APU frequency is
# 24.607104MHz, and latching is disabled. as long as latching remains
# disabled, the frequency won't change and the console will see no buttons
# no matter how much it latches.
# set the initial APU frequency values
if apu_freq_basic is not None:
fw.append([
MOVI(R2, int(apu_freq_basic) & 0xFFFF),
STXA(R2, p_map.snes.w_apu_freq_basic),
])
if apu_freq_advanced is not None:
fw.append([
MOVI(R2, int(apu_freq_advanced) & 0xFFFF),
STXA(R2, p_map.snes.w_apu_freq_advanced),
])
# force a latch so the APU clock generator gets updated
fw.append(STXA(R2, p_map.snes.w_force_latch))
# load the initial buttons into the registers
for controller_i, controller_addr in enumerate(controller_addrs):
fw.append([
MOVI(R2, priming_latches[controller_i]),
STXA(R2, controller_addr),
])
# now that the registers are loaded, we can turn latching back on. this
# setup guarantees the console will transition directly from seeing no
# buttons to seeing the first set of buttons once it latches. there can't be
# any intermediate states.
fw.append([
MOVI(R2, 1),
STXA(R2, p_map.snes.w_enable_latch),
])
# initialization is done. let's get the party started!
fw.append(J("main_loop"))
fw.append(send_status_packet(buf_size))
fw.append(main_loop_body())
fw.append(rx_comm_word())
fw.append(cmd_send_latches(controller_addrs, buf_size))
# define all the variables
defs = [0]*len(Vars)
# the buffer is primed with some latches so that we can start before
# communication gets reestablished. but we put one in the interface at the
# beginning
defs[Vars.buf_head] = num_priming_latches-1
defs[Vars.stream_pos] = num_priming_latches
fw.append([
L("vars"),
defs
])
# include all the functions
fw.append([
L("handle_error"),
f_handle_error(),
L("update_interface"),
f_update_interface(controller_addrs, buf_size),
])
# header reception is called once so we stick it far away
fw.append(rx_header())
# assemble just the code region
assembled_fw = Instr.assemble(fw)
fw_len = len(assembled_fw)
if len(assembled_fw) > FW_MAX_LENGTH:
raise ValueError(
"firmware length {} is over max of {} by {} words".format(
fw_len, FW_MAX_LENGTH, fw_len-FW_MAX_LENGTH))
elif False:
print("firmware length {} is under max of {} by {} words".format(
fw_len, FW_MAX_LENGTH, FW_MAX_LENGTH-fw_len))
# pad it out until the latch buffer starts
assembled_fw.extend([0]*(LATCH_BUF_START-len(assembled_fw)))
# then fill it with the priming latches (skipping the one we stuck in the
# interface at the beginning)
assembled_fw.extend(priming_latches[num_controllers:])
return assembled_fw
| StarcoderdataPython |
4881385 | # Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Meta information about the service.
Currently this only provides API versioning information
"""
from flask import current_app
from colin_api.exceptions import GenericException
from colin_api.resources.db import DB
class EntityName:
"""Entity name object."""
legal_name = None
event_id = None
def __init__(self):
"""Initialize with all values None."""
def as_dict(self):
"""Return dict camel case version of self."""
return {
'legalName': self.legal_name,
'eventId': self.event_id
}
@classmethod
def _create_name_obj(cls, cursor, identifier: str = None):
"""Return a EntityName obj by parsing cursor."""
corp_name_info = cursor.fetchone()
if not corp_name_info:
raise GenericException(error=f'{identifier} name not found', status_code=404)
test_second_name = cursor.fetchone()
if test_second_name:
current_app.logger.error(f'Got more than 1 current name for {identifier}')
corp_name_info = dict(zip([x[0].lower() for x in cursor.description], corp_name_info))
name_obj = EntityName()
name_obj.legal_name = corp_name_info['corp_nme']
name_obj.event_id = corp_name_info['start_event_id']
return name_obj
@classmethod
def get_current(cls, cursor, identifier: str = None):
"""Get current entity name."""
if not identifier:
return None
querystring = ("""
select start_event_id, corp_name
from corp_name
where corp_num=:identifier and end_event_id is null
""")
try:
if not cursor:
cursor = DB.connection.cursor()
cursor.execute(querystring, identifier=identifier)
return cls._create_name_obj(cursor=cursor, identifier=identifier)
except Exception as err:
current_app.logger.error('error getting entity name for corp: {}'.format(identifier))
raise err
@classmethod
def get_by_event(cls, cursor, identifier: str = None, event_id: str = None):
"""Get the entity name corresponding with the given event id."""
if not identifier or not event_id:
return None
querystring = ("""
select start_event_id, corp_nme
from corp_name
where corp_num=:identifier and start_event_id=:event_id
""")
try:
if not cursor:
cursor = DB.connection.cursor()
cursor.execute(querystring, identifier=identifier, event_id=event_id)
return cls._create_name_obj(cursor=cursor, identifier=identifier)
except Exception as err:
current_app.logger.error('error getting entity name for corp: {}'.format(identifier))
raise err
| StarcoderdataPython |
1884439 |
#Loop Through a List
#You can loop through the list items by using a for loop:
#Print all items in the list, one by one:
thislist = ["apple", "banana", "cherry"]
for x in thislist:
print(x)
thislist = [1, 2, 3]
for x in thislist:
print(x)
#Learn more about for loops in our Python For Loops Chapter.
#Loop Through the Index Numbers
#You can also loop through the list items by referring to their index number.
#Use the range() and len() functions to create a suitable iterable.
#Print all items by referring to their index number:
thislist = ["apple", "banana", "cherry",'jbj']
for i in range(len(thislist)):
print(thislist[i])
thislist = [1, 2, 3]
for i in range(len(thislist)):
print(thislist[i])
#The iterable created in the example above is [0, 1, 2].
#Using a While Loop
#You can loop through the list items by using a while loop.
#Use the len() function to determine the length of the list, then start at 0 and loop your way through the list items by refering to their indexes.
#Remember to increase the index by 1 after each iteration.
#Print all items, using a while loop to go through all the index numbers
thislist = ["apple", "banana", "cherry"]
i = 0
while i < len(thislist):
print(thislist[i])
i = i + 1
thislist = [1, 2, 3]
i = 1
while i < len(thislist):
print(thislist[i])
i = i + 1
#Learn more about while loops in our Python While Loops Chapter.
#Looping Using List Comprehension
#List Comprehension offers the shortest syntax for looping through lists:
#A short hand for loop that will print all items in a list:
thislist = ["apple", "banana", "cherry"]
[print(x) for x in thislist]
thislist = ["none", "none", "none"]
[print(x) for x in thislist]
| StarcoderdataPython |
1832825 | import torch
import torch.nn as nn
import torch.nn.parallel
from miscc.config import cfg
from torch.autograd import Variable
def conv3_1d(in_planes, out_planes, stride=1, padding=False):
"3 convolution without padding"
return nn.Conv1d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=(padding*1), bias=True)
def conv3_1dxn(in_planes, out_planes, n_layers, stride=1, padding=False):
in_planes = [in_planes] + [out_planes] * (n_layers - 1)
seqs = [nn.Sequential(
conv3_1d(in_plane, out_planes, stride, padding),
nn.BatchNorm1d(out_planes),
nn.LeakyReLU(0.2, inplace=True)
) for in_plane in in_planes] + [nn.Sequential(nn.Dropout(p=0.4))]
return nn.Sequential(*seqs)
def conv3x3_2d(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def conv3x3_3d(in_vol, out_vol, stride=1):
"3x3 convolution with padding"
return nn.Conv3d(in_vol, out_vol, kernel_size=3, stride=stride,
padding=1, bias=False)
# Upsale the spatial size by a factor of 2
def upBlock_2d(in_planes, out_planes, scale_factor=2):
block = nn.Sequential(
nn.Upsample(scale_factor=scale_factor, mode='nearest'),
conv3x3_2d(in_planes, out_planes),
nn.BatchNorm2d(out_planes),
nn.ReLU(True))
return block
# Upsale the spatial size by a factor of 2
def upBlock_3d(in_vol, out_vol, scale_factor=(2,2,2)):
block = nn.Sequential(
nn.Upsample(scale_factor=scale_factor, mode='trilinear'),
conv3x3_3d(in_vol, out_vol),
nn.BatchNorm3d(out_vol),
nn.ReLU(True))
return block
class Squeeze(nn.Module):
def forward(self, x):
return torch.squeeze(x) # "flatten" the C * H * W values into a single vector per image
class ResBlock1d(nn.Module):
def __init__(self, in_dim, feat_dim, stride=2, dropout=0.4):
super(ResBlock1d, self).__init__()
self.in_dim = in_dim
self.feat_dim = feat_dim
self.stride = stride
self.dropout = dropout
self.define_module()
def define_module(self):
self.cell = nn.Sequential(
# nn.Conv1d(in_dim, feat_dim, kernel_size=1, stride=1),
# nn.BatchNorm1d(feat_dim),
# nn.ReLU(True),
conv3_1d(self.in_dim, self.feat_dim, stride=self.stride, padding=True),
nn.BatchNorm1d(self.feat_dim),
nn.ReLU(True),
conv3_1d(self.feat_dim, self.feat_dim, stride=1, padding=True),
nn.BatchNorm1d(self.feat_dim),
# nn.ReLU(True),
# nn.Conv1d(feat_dim, in_dim, kernel_size=1, stride=1),
# nn.BatchNorm1d(feat_dim),
)
self.residual = nn.Sequential(
nn.Conv1d(self.in_dim, self.feat_dim, kernel_size=1, stride=1),
nn.BatchNorm1d(self.feat_dim),
nn.AvgPool1d(self.stride),
)
def forward(self, x):
if (self.in_dim == self.feat_dim) and (self.stride == 1):
out = torch.nn.functional.relu((self.cell(x) + x), True)
else:
out = torch.nn.functional.relu((self.cell(x) + self.residual(x)), True)
if self.dropout > 0:
out = torch.nn.functional.dropout(out, p=self.dropout)
return out
class EmbeddingNet(nn.Module):
def __init__(self, feat_dim, out_dim):
super(EmbeddingNet, self).__init__()
self.feat_dim = feat_dim
self.out_dim = out_dim
self.define_module()
def define_module(self):
self.embedding_net_1 = nn.Sequential(
conv3_1dxn(self.feat_dim, 256, 5), # 420
nn.MaxPool1d(2), # 210
conv3_1dxn(256, 512, 5), # 200
nn.MaxPool1d(2), # 100
# conv3_1d(512, 1024, stride=2, padding=True), # 50
# nn.BatchNorm1d(1024),
# nn.ReLU(True),
# nn.Dropout(p=0.4),
# conv3_1d(1024, 1024, stride=2, padding=True), # 25
# nn.BatchNorm1d(1024),
# nn.ReLU(True),
# nn.Dropout(p=0.4),
ResBlock1d(512, 512, 1), # 100
nn.MaxPool1d(2), # 50
ResBlock1d(512, 512, 1), # 50
nn.MaxPool1d(2), # 25
ResBlock1d(512, 1024, 1), # 25
nn.MaxPool1d(2), # 12
ResBlock1d(1024, self.out_dim), # 12
nn.MaxPool1d(6), # 2
# ResBlock1d(1024, 1024), # 3
# ResBlock1d(1024, 1024, 3), # 1
# conv3_1d(1024, 1024, stride=1, padding=True), # 12
# nn.BatchNorm1d(1024),
# nn.ReLU(True),
# nn.MaxPool1d(12),
Squeeze(), # shape (N, 2048)
# nn.Linear(1024, self.out_dim)
)
# self.lstm = nn.LSTM(1024, 1024)
# self.embedding_net_2 = nn.Sequential(
# Flatten(), # (N, 1024 * 5)
# nn.Linear(1024 * 5, 128 * 5)
# )
# self.e3 = nn.Sequential(
# Flatten(),
# nn.Linear(128*430, 1024)
# )
def forward(self, features):
output = self.embedding_net_1(features)
return output
# output = torch.transpose(output, 1, 2)
# output = torch.transpose(output, 0, 1)
# output, (h,c) = self.lstm(output)
# output = torch.transpose(output, 0, 2)
# output = torch.transpose(output, 0, 1)
# return self.embedding_net_2(output)
# return self.e3(features)
class EmbeddingNetLSTM(nn.Module):
def __init__(self, feat_dim, out_dim):
super(EmbeddingNetLSTM, self).__init__()
self.feat_dim = feat_dim
self.out_dim = out_dim
self.define_module()
def define_module(self):
self.embedding_net_1 = nn.Sequential(
conv3_1dxn(self.feat_dim, 256, 5),
nn.MaxPool1d(2),
conv3_1dxn(256, 512, 5),
nn.MaxPool1d(2),
conv3_1d(512, 1024, stride=2, padding=True),
nn.BatchNorm1d(1024),
nn.ReLU(True),
conv3_1d(1024, 1024, stride=2, padding=True), # 25
nn.BatchNorm1d(1024),
nn.ReLU(True),
nn.Dropout(p=0.4),
)
self.lstm = nn.LSTM(1024, 512)
self.embedding_net_2 = nn.Sequential(
nn.MaxPool1d(5),
Flatten(), # (N, 1024 * 5)
nn.Linear(512 * 5, self.out_dim),
)
# self.e3 = nn.Sequential(
# Flatten(),
# nn.Linear(128*430, 1024)
# )
def forward(self, features):
output = self.embedding_net_1(features)
output = torch.transpose(output, 1, 2)
output = torch.transpose(output, 0, 1)
output, (h,c) = self.lstm(output)
output = torch.transpose(output, 0, 1)
output = torch.transpose(output, 1, 2)
return self.embedding_net_2(output)
# return self.e3(features)
class Flatten(nn.Module):
def forward(self, x):
N, F, T = x.size()
return x.view(N, -1)
class CA_NET(nn.Module):
# some code is modified from vae examples
# (https://github.com/pytorch/examples/blob/master/vae/main.py)
def __init__(self):
super(CA_NET, self).__init__()
# self.t_dim = cfg.TEXT.DIMENSION
self.t_dim = cfg.AUDIO.DIMENSION
# self.t_dim = 1024
self.c_dim = cfg.GAN.CONDITION_DIM
self.fc = nn.Linear(self.t_dim, self.c_dim * 2, bias=True)
self.relu = nn.ReLU()
def encode(self, audio_embedding): # audio encoding
x = self.relu(self.fc(audio_embedding))
mu = x[:, :self.c_dim]
logvar = x[:, self.c_dim:]
return mu, logvar
def reparametrize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
if cfg.CUDA:
eps = torch.cuda.FloatTensor(std.size()).normal_()
else:
eps = torch.FloatTensor(std.size()).normal_()
eps = Variable(eps)
return eps.mul(std).add_(mu)
def forward(self, audio_embedding):
mu, logvar = self.encode(audio_embedding)
c_code = self.reparametrize(mu, logvar)
return c_code, mu, logvar
class D_GET_LOGITS(nn.Module):
def __init__(self, ndf, nef, nout, bcondition=True):
super(D_GET_LOGITS, self).__init__()
self.df_dim = ndf
self.ef_dim = nef
self.out_dim = nout+1 if nout > 1 else 1
self.bcondition = bcondition
if bcondition:
self.outlogits = nn.Sequential(
conv3x3_2d(ndf * 8 + nef, ndf * 8),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=4),
nn.Sigmoid()
)
else:
self.outlogits = nn.Sequential(
nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=4),
nn.Sigmoid()
)
def forward(self, h_code, c_code=None):
# conditioning output
if self.bcondition and c_code is not None:
c_code = c_code.view(-1, self.ef_dim, 1, 1)
c_code = c_code.repeat(1, 1, 4, 4)
# state size (ngf+egf) x 4 x 4
h_c_code = torch.cat((h_code, c_code), 1)
else:
h_c_code = h_code
output = self.outlogits(h_c_code)
return output.view(-1)
class STAGE1_G(nn.Module):
def __init__(self):
super(STAGE1_G, self).__init__()
self.gf_dim = cfg.GAN.GF_DIM * 8
self.ef_dim = cfg.GAN.CONDITION_DIM
self.z_dim = cfg.Z_DIM
self.define_module()
def define_module(self):
ninput = self.z_dim + self.ef_dim
ngf = self.gf_dim
# TEXT.DIMENSION -> GAN.CONDITION_DIM
self.ca_net = CA_NET()
# -> ngf x 4 x 4
self.fc = nn.Sequential(
nn.Linear(ninput, ngf * 4 * 4, bias=False),
nn.BatchNorm1d(ngf * 4 * 4),
nn.ReLU(True))
# ngf x 4 x 4 -> ngf/2 x 8 x 8
self.upsample1 = upBlock_2d(ngf, ngf // 2)
# -> ngf/4 x 16 x 16
self.upsample2 = upBlock_2d(ngf // 2, ngf // 4)
# -> ngf/8 x 32 x 32
self.upsample3 = upBlock_2d(ngf // 4, ngf // 8)
# -> ngf/16 x 64 x 64
self.upsample4 = upBlock_2d(ngf // 8, ngf // 16)
# -> 3 x 64 x 64
self.img = nn.Sequential(
conv3x3_2d(ngf // 16, 3),
nn.Tanh())
def forward(self, text_embedding, noise):
c_code, mu, logvar = self.ca_net(text_embedding)
z_c_code = torch.cat((noise, c_code), 1)
h_code = self.fc(z_c_code)
h_code = h_code.view(-1, self.gf_dim, 4, 4)
h_code = self.upsample1(h_code)
h_code = self.upsample2(h_code)
h_code = self.upsample3(h_code)
h_code = self.upsample4(h_code)
# state size 3 x 64 x 64
fake_img = self.img(h_code)
return None, fake_img, mu, logvar
class STAGE1_D(nn.Module):
def __init__(self):
super(STAGE1_D, self).__init__()
self.df_dim = cfg.GAN.DF_DIM
self.ef_dim = cfg.GAN.CONDITION_DIM
self.out_dim = cfg.GAN.N_OUTPUT
self.define_module()
def define_module(self):
ndf, nef, nout = self.df_dim, self.ef_dim, self.out_dim
self.encode_img = nn.Sequential(
nn.Conv2d(3, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size (ndf*2) x 16 x 16
nn.Conv2d(ndf*2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size (ndf*4) x 8 x 8
nn.Conv2d(ndf*4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
# state size (ndf * 8) x 4 x 4)
nn.LeakyReLU(0.2, inplace=True)
)
self.get_cond_logits = D_GET_LOGITS(ndf, nef, nout)
self.get_uncond_logits = None
def forward(self, image):
img_embedding = self.encode_img(image)
return img_embedding
class ResBlock(nn.Module):
def __init__(self, channel_num):
super(ResBlock, self).__init__()
self.block = nn.Sequential(
conv3x3_2d(channel_num, channel_num),
nn.BatchNorm2d(channel_num),
nn.ReLU(True),
conv3x3_2d(channel_num, channel_num),
nn.BatchNorm2d(channel_num))
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
residual = x
out = self.block(x)
out += residual
out = self.relu(out)
return out
class STAGE2_D(nn.Module):
def __init__(self):
super(STAGE2_D, self).__init__()
self.df_dim = cfg.GAN.DF_DIM
self.ef_dim = cfg.GAN.CONDITION_DIM
self.out_dim = cfg.GAN.N_OUTPUT
self.define_module()
def define_module(self):
ndf, nef, nout = self.df_dim, self.ef_dim, self.out_dim
# img size = 4 * 256 * 256
self.encode_img = nn.Sequential(
nn.Conv3d(3, ndf, 4, stride=2, padding=1, bias=False), # 2 * 128 * 128 * ndf
nn.LeakyReLU(0.2, inplace=True),
nn.Conv3d(ndf, ndf * 2, 4, stride=2, padding=1, bias=False), # 1 * 64 * 64 * ndf
nn.BatchNorm3d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True), # 64 * 64 * ndf * 2
Squeeze(), # 64 * 64 * ndf * 2
nn.Conv2d(ndf * 2, ndf * 4, 4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True), # 32 * 32 * ndf * 4
nn.Conv2d(ndf * 4, ndf * 8, 4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True), # 16 * 16 * ndf * 8
nn.Conv2d(ndf * 8, ndf * 16, 4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(ndf * 16),
nn.LeakyReLU(0.2, inplace=True), # 8 * 8 * ndf * 16
nn.Conv2d(ndf * 16, ndf * 32, 4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(ndf * 32),
nn.LeakyReLU(0.2, inplace=True), # 4 * 4 * ndf * 32
conv3x3_2d(ndf * 32, ndf * 16),
nn.BatchNorm2d(ndf * 16),
nn.LeakyReLU(0.2, inplace=True), # 4 * 4 * ndf * 16
conv3x3_2d(ndf * 16, ndf * 8),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True) # 4 * 4 * ndf * 8
)
self.get_cond_logits = D_GET_LOGITS(ndf, nef, nout, bcondition=True)
self.get_uncond_logits = D_GET_LOGITS(ndf, nef, nout, bcondition=False)
def forward(self, image):
# print("image shape = {}".format(image.shape))
img_embedding = self.encode_img(image)
# print("img_embedding shape = {}".format(img_embedding.shape))
return img_embedding
class STAGE2_G_twostream(nn.Module):
def __init__(self, STAGE1_G):
super(STAGE2_G_twostream, self).__init__()
self.gf_dim = cfg.GAN.GF_DIM
self.ef_dim = cfg.GAN.CONDITION_DIM #c_dim
self.z_dim = cfg.Z_DIM
self.STAGE1_G = STAGE1_G
# fix parameters of stageI GAN
for param in self.STAGE1_G.parameters():
param.requires_grad = False
self.define_module()
def _make_layer(self, block, channel_num):
layers = []
for i in range(cfg.GAN.R_NUM):
layers.append(block(channel_num))
return nn.Sequential(*layers)
def define_module(self):
ngf = self.gf_dim
# TEXT.DIMENSION -> GAN.CONDITION_DIM
self.ca_net = CA_NET() # c_dim
# --> 4ngf x 16 x 16
self.encoder = nn.Sequential(
conv3x3_2d(3, ngf),
nn.ReLU(True),
nn.Conv2d(ngf, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
nn.Conv2d(ngf * 2, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True))
self.hr_joint = nn.Sequential(
conv3x3_2d(self.ef_dim + ngf * 4, ngf * 4),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True))
self.residual = self._make_layer(ResBlock, ngf * 4)
# upsample background
self.upsample1 = upBlock_2d(ngf * 4, ngf * 2)
# --> ngf x 64 x 64
self.upsample2 = upBlock_2d(ngf * 2, ngf)
# --> ngf // 2 x 128 x 128
self.upsample3 = upBlock_2d(ngf, ngf // 2)
# --> ngf // 4 x 256 x 256
self.upsample4 = upBlock_2d(ngf // 2, ngf // 4)
# --> 3 x 256 x 256
self.background = nn.Sequential(
conv3x3_2d(ngf // 4, 3),
nn.Tanh())
# upsample foreground
self.upsample1_3d = upBlock_3d(ngf * 4, ngf * 2, scale_factor=(1,2,2))
# --> ngf x 1 x 64 x 64
self.upsample2_3d = upBlock_3d(ngf * 2, ngf, scale_factor=(1,2,2))
# --> ngf // 2 x 1 x 128 x 128
self.upsample3_3d = upBlock_3d(ngf, ngf // 2)
# --> ngf // 4 x 2 x 256 x 256
self.upsample4_3d = upBlock_3d(ngf // 2, ngf // 4)
# --> 3 x x 4 x 256 x 256
self.foreground = nn.Sequential(
conv3x3_3d(ngf // 4, 3),
nn.Tanh())
# forground_background_mask
self.foreground_mask = nn.Sequential(
conv3x3_3d(ngf// 4, 1),
nn.Tanh())
def forward(self, audio_embedding, noise):
_, stage1_img, _, _ = self.STAGE1_G(audio_embedding, noise)
stage1_img = stage1_img.detach()
encoded_img = self.encoder(stage1_img)
c_code, mu, logvar = self.ca_net(audio_embedding)
c_code = c_code.view(-1, self.ef_dim, 1, 1)
c_code = c_code.repeat(1, 1, 16, 16)
i_c_code = torch.cat([encoded_img, c_code], 1)
h_code = self.hr_joint(i_c_code)
h_code = self.residual(h_code)
h_code_2d = self.upsample1(h_code)
h_code_2d = self.upsample2(h_code_2d)
h_code_2d = self.upsample3(h_code_2d)
h_code_2d = self.upsample4(h_code_2d)
background = self.background(h_code_2d)
# print("h_code shape = {}".format(h_code.shape))
# print("h_code_2d shape = {}".format(h_code_2d.shape))
h_code_3d = torch.unsqueeze(h_code, 2)
# print("unsqueezed shape = {}".format(h_code_3d.shape))
h_code_3d = self.upsample1_3d(h_code_3d)
h_code_3d = self.upsample2_3d(h_code_3d)
h_code_3d = self.upsample3_3d(h_code_3d)
h_code_3d = self.upsample4_3d(h_code_3d)
foreground = self.foreground(h_code_3d)
# print("foreground shape = {}".format(foreground.shape))
mask = self.foreground_mask(h_code_3d)
expanded_background = torch.unsqueeze(background, 2)
foreground_D = mask.shape[2]
expanded_background = expanded_background.repeat(1,1,foreground_D, 1,1)
# print("expanded background shape = {}".format(expanded_background.shape))
fake_img = mask * expanded_background + (1-mask) * foreground
return stage1_img, fake_img, mu, logvar
class BASELINE_GIF_G(nn.Module):
def __init__(self, BASELINE_GIF_G):
super(BASELINE_GIF_G, self).__init__() | StarcoderdataPython |
6509776 | # Custom Bot Message by @Hageru-Ray
from discord.ext import commands
import discord
class Say(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command()
@commands.cooldown(1, 30, commands.BucketType.user)
async def say(self, ctx, *, message):
embed = discord.Embed(title=f':loudspeaker: {message}')
embed.set_footer(text="Command invoked by {}".format(ctx.message.author.name))
await ctx.send(embed=embed)
def setup(client):
client.add_cog(Say(client))
# Hageru-Ray
| StarcoderdataPython |
3231325 | <reponame>JohnKarima/hood<filename>hoodapp/migrations/0008_auto_20201031_1609.py
# Generated by Django 3.1.2 on 2020-10-31 16:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('hoodapp', '0007_auto_20201031_1459'),
]
operations = [
migrations.RemoveField(
model_name='neighbourhood',
name='prof_ref',
),
migrations.AddField(
model_name='profile',
name='hood_ref',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='profiles', to='hoodapp.neighbourhood'),
),
]
| StarcoderdataPython |
9624029 | <filename>solution/input_module.py
'''
InputModule
Contains the necessary code to process the user
input
Author: <NAME>
'''
valid_characters = [
'*', # Space
'#' # Wall
]
class InputModule:
'''
InputModule class that is going to contain
the method to process an input
'''
def is_valid(self, cells):
'''
Check if the elements contained in the cells are
valid. This mean that are either: '*' (Spaces) or '#' (Wall)
'''
for cell in cells:
if not cell in valid_characters:
return False
return True
def separate_data(self, line, separation_token = " "):
'''
Use to break the data into pieces using a separation token
for example:
1 2 3 4 5 (From line)
[1, 2, 3, 4, 5] (Data generated)
'''
return line.split(separation_token)
def generate_grid(self, string_grid, number_columns):
'''
Turn the string representation of the grid
into a python matrix (A two dimensional array)
'''
rows = self.separate_data(string_grid, '\n')
grid = []
for row in rows:
cells = self.separate_data(row)
if len(cells) != number_columns:
raise ValueError(2)
if self.is_valid(cells):
grid.append(cells)
else:
raise ValueError(1)
return grid
| StarcoderdataPython |
6541428 | <reponame>rohitsinha54/Learning-Python
#!/usr/bin/env python
"""selectionsort.py: Program to implement selection sort"""
__author__ = '<NAME>'
def selection_sort(alist):
for i in range(len(alist) - 1, 0, -1):
max_position = 0
for j in range(1, i+1):
if alist[j] > alist[max_position]:
max_position = j
alist[max_position], alist[i] = alist[i], alist[max_position]
if __name__ == '__main__':
alist = [84, 69, 76, 86, 94, 91]
selection_sort(alist)
print(alist) | StarcoderdataPython |
3439810 | <reponame>google/vizier
"""Tests for vizier.pyvizier.oss.metadata_util."""
from vizier._src.pyvizier.oss import metadata_util
from vizier.service import study_pb2
from absl.testing import absltest
class MetadataUtilTest(absltest.TestCase):
def test_get(self):
meta_trial = study_pb2.Trial(id='meta_trial')
trial = study_pb2.Trial(id='trial')
metadata_util.assign(trial, key='any', ns='', value=meta_trial)
metadata_util.assign(trial, key='text', ns='x', value='x-value')
metadata_util.assign(trial, key='text', ns='', value='value')
metadata_util.assign(trial, key='text', ns='y', value='y-value')
self.assertEqual(
metadata_util.get_proto(trial, key='any', ns='', cls=study_pb2.Trial),
meta_trial)
self.assertEqual(metadata_util.get(trial, key='text', ns=''), 'value')
self.assertEqual(metadata_util.get(trial, key='text', ns='x'), 'x-value')
self.assertIsNone(
metadata_util.get_proto(trial, key='any', ns='', cls=study_pb2.Study))
self.assertIsNone(metadata_util.get(trial, key='TYPO', ns=''))
self.assertIsNone(
metadata_util.get_proto(trial, key='TYPO', ns='', cls=study_pb2.Trial))
if __name__ == '__main__':
absltest.main()
| StarcoderdataPython |
4895350 | # Copyright 2015, Aiven, https://aiven.io/
#
# This file is under the Apache License, Version 2.0.
# See the file `LICENSE` for details.
from . import journalpump
journalpump.JournalPump.run_exit()
| StarcoderdataPython |
160324 | <gh_stars>1-10
import datetime
class ParamCheck():
"""
パラメータチェックを実施するクラス。
"""
def __init__(self):
"""
初期化を実施する。
"""
pass
def check_required(self, columns, column_name):
"""
必須チェックを実施する。
Parameters
----------
columns : obj
必須チェックをする項目
column_name: str
項目名
Returns
-------
str
エラー内容
"""
columns_replaced = str(columns).replace(' ', '')
if columns is None or not columns_replaced:
return '必須入力エラー:' + column_name
def check_length(self, columns, column_name, min, max):
"""
文字数チェックを実施する。
Parameters
----------
columns : obj
文字数チェックをする項目
column_name: str
項目名
min : int
最小桁数
max : int
最大桁数
Returns
-------
str
エラー内容
"""
if type(columns) is int:
columns = str(columns)
if min and int(min) > len(columns):
return f'文字数エラー(最小文字数[{min}]未満):{column_name}'
if max and int(max) < len(columns):
return f'文字数エラー(最大文字数[{max}]超過):{column_name}'
def check_int(self, columns, column_name):
"""
int型チェックを実施する。
Parameters
----------
columns : obj
int型チェックをする項目
column_name: str
項目名
Returns
-------
str
エラー内容
"""
if isinstance(columns, int):
columns_replaced = True
else:
columns_replaced = columns.isnumeric()
if columns is None or not columns_replaced:
return 'int型チェックエラー:' + column_name
def check_year_month(self, columns, column_name):
"""
年月の形式をチェックする。
Parameters
----------
columns : obj
形式チェックをする項目
column_name: str
項目名
Returns
-------
str
エラー内容
"""
# 日付のハイフンとスラッシュ区切りに対応
columns_replaced = columns.replace('-', '').replace('/', '')
try:
datetime.datetime.strptime(columns_replaced, "%Y%m")
except ValueError:
return f'年月形式エラー : {column_name}({columns})'
def check_year_month_day(self, columns, column_name):
"""
年月日の形式をチェックする。
Parameters
----------
columns : obj
形式チェックをする項目
column_name: str
項目名
Returns
-------
str
エラー内容
"""
# 日付のハイフンとスラッシュ区切りに対応
columns_replaced = columns.replace('-', '').replace('/', '')
try:
datetime.datetime.strptime(columns_replaced, "%Y%m%d")
except ValueError:
return f'年月日形式エラー : {column_name}({columns})'
def check_time_format(self, columns, column_name, time_format):
"""
時間の形式をチェックする。
Parameters
----------
columns : obj
形式チェックをする項目
column_name: str
項目名
time_format: str
チェックしたいフォーマット
Returns
-------
str
エラー内容
"""
# 日付のハイフンとスラッシュ区切りに対応
columns_replaced = columns.replace(':', '')
try:
datetime.datetime.strptime(columns_replaced, time_format)
except ValueError:
return f'時間形式エラー : {column_name}({columns})'
| StarcoderdataPython |
8080088 | """
In this Bite we'd like you to loop over the characters in the large
block of text (the most important text for any Python programmer:
The Zen of Python!)
Within this loop you'll perform the following actions:
Replace all vowels (aeiou) with stars (*), do this case insensitively.
Count the number of replacements you do (= vowels in the text).
Return the new block of text post replacements and the count of vowels
you replaced.
Hint: Try converting the block of text to a list first to make working
with the characters simpler.
Tip: If you're struggling, work on one step at a time and expand on
your code slowly. Don't try and tackle every requirement right away.
Bonus: if you already have some Python under your belt, try to use re
and try to solve it without a for loop :)
"""
import re
text = """
The Zen of Python, by <NAME>
Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambiguity, refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Although that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain, it's a bad idea.
If the implementation is easy to explain, it may be a good idea.
Namespaces are one honking great idea -- let's do more of those!
"""
vowels = "aeiou"
def strip_vowels(text: str) -> (str, int):
"""Replace all vowels in the input text string by a star
character (*).
Return a tuple of (replaced_text, number_of_vowels_found)
So if this function is called like:
strip_vowels('hello world')
... it would return:
('h*ll* w*rld', 3)
The str/int types in the function defintion above are part
of Python's new type hinting:
https://docs.python.org/3/library/typing.html"""
return re.subn(r"[aeiou]", "*", text, flags=re.IGNORECASE)
| StarcoderdataPython |
5199669 | <filename>modulestf/render.py
import glob
import json
import os
import re
import shutil
from pprint import pformat, pprint
from cookiecutter.exceptions import NonTemplatedInputDirException
from cookiecutter.main import cookiecutter
from modulestf.const import *
from modulestf.logger import setup_logging
from modulestf.modules import *
logger = setup_logging()
def mkdir_safely(dir):
try:
shutil.rmtree(dir)
except FileNotFoundError:
pass
try:
os.mkdir(dir)
except OSError:
pass
def prepare_render_dirs():
output_dir = os.path.join(tmp_dir, OUTPUT_DIR)
mkdir_safely(output_dir)
os.chdir(output_dir)
mkdir_safely(WORK_DIR)
os.chdir(WORK_DIR)
mkdir_safely(FINAL_DIR)
def render_single_layer(resource, region):
dir_name = resource.get("dir_name")
full_dir_name = ("single_layer/%s/%s" % (region, dir_name)).lower()
single_layer = {
"dir_name": full_dir_name,
"region": region,
"module_source": MODULES[resource["type"]]["source"],
"module_variables": MODULES[resource["type"]]["variables"],
}
extra_context = resource.update(single_layer) or resource
cookiecutter(os.path.join(COOKIECUTTER_TEMPLATES_DIR, COOKIECUTTER_TEMPLATES_PREFIX + "-single-layer"),
config_file=os.path.join(COOKIECUTTER_TEMPLATES_DIR, "config_aws_lambda.yaml"),
no_input=True,
extra_context=extra_context)
def render_common_layer(region):
common_layer = {
"dir_name": "common_layer",
"region": region,
}
try:
cookiecutter(os.path.join(COOKIECUTTER_TEMPLATES_DIR, COOKIECUTTER_TEMPLATES_PREFIX + "-common-layer"),
config_file=os.path.join(COOKIECUTTER_TEMPLATES_DIR, "config_aws_lambda.yaml"),
no_input=True,
extra_context=common_layer)
except NonTemplatedInputDirException:
pass
def render_root_dir(source, region, dirs):
root_dir = {
"dir_name": "root_dir",
"source_name": source["name"],
"region": region,
"dirs": dirs,
}
cookiecutter(os.path.join(COOKIECUTTER_TEMPLATES_DIR, "root"),
config_file=os.path.join(COOKIECUTTER_TEMPLATES_DIR, "config_aws_lambda.yaml"),
no_input=True,
extra_context=root_dir)
# Count unique combination of type and text to decide if to append unique resource id
def get_types_text(resources):
types_text = {}
for r in resources:
try:
t = r.get("type") + r.get("text")
except TypeError:
t = r.get("type")
if t not in types_text.keys():
types_text[t] = 1
else:
types_text[t] = types_text[t] + 1
return types_text
def make_dir_name(type, text, appendix=""):
path_parts = []
if text is not None and len(text):
path_parts.append(text)
if appendix:
path_parts.append(appendix)
else:
path_parts.append(type)
if appendix:
path_parts.append(appendix)
dir_name = "_".join(path_parts)
dir_name = re.sub(' ', '_', dir_name.strip())
dir_name = re.sub('[^a-zA-Z0-9-_]', '', dir_name)
dir_name = re.sub('_+', '_', dir_name)
return dir_name
def render_from_modulestf_config(config, source, regions):
resources = json.loads(config)
# pprint(resources)
types_text = get_types_text(resources)
try:
region = regions[0]
except Exception:
region = "eu-west-1"
dirs = {}
also_append = []
# 1. Get list of all resources and define correct dir names for all resources
# 3. Update dynamic params and dependencies for each resource
for resource in resources:
try:
t = resource.get("type") + resource.get("text")
except TypeError:
t = resource.get("type")
if types_text[t] > 1 or (types_text[t] == 1 and t in also_append):
appendix = str(types_text[t])
new_appendix = types_text[t] - 1
also_append.append(t)
else:
appendix = ""
new_appendix = 0
ref_id = resource.get("ref_id")
if ref_id:
dirs.update({ref_id: make_dir_name(type=resource.get("type"), text=resource.get("text"), appendix=appendix)})
types_text[t] = new_appendix
# render single layers in a loop
for resource in resources:
# Update dependencies with correct dir name
deps = []
if resource.get("dependencies"):
for d in resource.get("dependencies"):
this_dir = dirs.get(d)
if this_dir:
deps.append(this_dir)
# cookiecutter does not support list values, so we join it to string here and split in template
resource.update({"dependencies": ",".join(deps)})
# Update dynamic parameters with correct dir name
dynamic_params = resource.get("dynamic_params")
if dynamic_params:
for k in dynamic_params:
try:
v = dynamic_params[k].split(".")
# replace second element with real directory name
dynamic_params.update({k: v[0] + "." + dirs[v[1]] + "." + ".".join(v[2:])})
except KeyError:
pass
# Set correct dir name
resource.update({"dir_name": dirs.get(resource.get("ref_id"))})
# Render the layer
logger.info("Rendering single layer resource id: %s" % resource.get("ref_id"))
render_single_layer(resource, region)
logger.info("Rendering common layer")
render_common_layer(region)
logger.info("Rendering root dir")
render_root_dir(source, region, dirs)
files = glob.glob("single_layer/*") + \
glob.glob("single_layer/.*") + \
glob.glob("common_layer/*") + \
glob.glob("common_layer/.*") + \
glob.glob("root_dir/*") + \
glob.glob("root_dir/.*")
logger.info("Moving files into final dir: %s" % FINAL_DIR)
for file in files:
shutil.move(file, FINAL_DIR)
logger.info("Complete!")
| StarcoderdataPython |
321655 | # Requires python3 to work since, python 3< does not implement %z.
import sys
import os
import json
from datetime import datetime
import pytz
from subprocess import Popen, PIPE, STDOUT
class Version(object):
def __init__(self, version):
v = version
if v.startswith('v'):
v = v[1:].split('\n')[0]
fix = v.split('.')
self.major = int(fix[0])
self.minor = int(fix[1])
self.patch = int(fix[2])
self.build = None
self.tip = False
def __str__(self):
return self.__repr__()
def __repr__(self):
return "v" + str(self.major) + "." + str(self.minor) + "." + str(self.patch)
class Changelog(object):
def __init__(self, info):
a, b, log = info
self.log = log
self.a = a
self.b = b
def log_in_between_versions(self):
hash = self.log.split(' ')[0]
if zeroversion(self.a):
return ""
date_time = git_log([str(self.a), "-1", '--format="%ad"']).split('\n')[0]
if date_time is not '':
dt = datetime.strptime(date_time, '%a %b %d %H:%M:%S %Y %z')
else:
dt = datetime.now()
dt = dt.strftime('%a, %d %b %Y %H:%M:%S')
log = str(self.a) + " - " + dt + " UTC\n"
log = log + ("-" * (len(log) - 1)) + "\n\n"
actual_log = self.log.splitlines()
if len(actual_log) == 1:
entries = "-\n\n"
else:
entries = "\n".join(map(url_entry, actual_log[1:])) + "\n\n"
log = log + entries
return log
def __str__(self):
return self.__repr__(self)
def __repr__(self):
return "Changelog: " + self.log
def url_entry(entry):
log = entry.split(' ')
hash = log[0]
log = ' '.join(log[1:])
return "- [%s](../../commit/%s) %s" % (hash, hash, log)
def zeroversion(v):
return v.major == 0 and v.minor == 0 and v.patch == 0
class compareversions(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
if self.obj.major < other.obj.major:
return True
if self.obj.minor < other.obj.minor:
return True
if self.obj.patch < other.obj.patch:
return True
return False
def git_exec(args):
p = Popen(" ".join(["git"] + args), shell=True, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
return out.decode('utf-8')
def git_log(args):
return git_exec(["log"] + args)
def adjacents(ls, f, res):
if len(ls) == 0:
return res
first = ls[0]
if len(ls) == 1:
next = None
else:
next = ls[1]
res.append(f(first, next))
return adjacents(ls[1:], f, res)
def logs_between(a, b):
v = a
if b is None:
_ = str(a)
else:
if a.tip:
a = "HEAD"
else:
a = str(a)
_ = str(b) + ".." + a
return (v, b, git_log(["--format='%h %s'", _]))
def changelog(with_versions):
process = with_versions
versions = []
generate_all = len(with_versions) == 0
if generate_all:
lines = git_exec(["tag", "-l"])
process = lines.splitlines()
for item in process:
versions.append(Version(item))
versions = sorted(versions, key=compareversions, reverse=True)
if generate_all:
vs = map(Changelog, adjacents(versions, logs_between, []))
else:
versions[0].tip = True
vs = map(Changelog, [logs_between(versions[0], versions[1])])
return [v.log_in_between_versions() for v in vs]
if __name__ == "__main__":
args = sys.argv[1:]
for_version=[]
if len(args) > 0:
for_version = list(args)
print("\n".join(changelog(for_version)))
| StarcoderdataPython |
5133782 | <reponame>jsevo/taxumap
# Authors: <NAME> <<EMAIL>>, <NAME> <<EMAIL>>
# License: MIT
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="taxumap",
version="0.1",
description="UMAP visualization for microbiota compositions with taxonomic structure.",
url="http://github.com/jsevo/taxumap",
author="<NAME>",
author_email="<EMAIL>",
license="MIT License",
packages=["taxumap"],
install_requires=[
"matplotlib",
"pandas",
"seaborn",
"numpy",
"scipy",
"numba",
"umap-learn",
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
zip_safe=False,
)
| StarcoderdataPython |
5098934 | <reponame>LiYuzhu12138/3D-DART-server
#!/usr/bin/env python2.7
USAGE = """
==========================================================================================
Author: <NAME>, Department of NMR spectroscopy, Bijvoet Center
for Biomolecular Research, Utrecht university, The Netherlands
Copyright (C): 2007 (DART project)
DART version: 1.2 (25-11-2008)
DART plugin: PDBeditor.py
Input: PDB data file and any of the allowed options, any order and
combined.
Output: A new PDB file or XML representation.
Plugin excecution: Either command line driven (use -h/--help for the option) or as
part of a DART batch sequence.
Plugin function: A suite of functions to modify PDB files. Features include:
change nucleic acid nomenclature from a 1-letter to a 3-letter
code and vice versa; support for the new wwwPDB nucleic-acid abreviation
scheme; set chain-ID; renumber residues and/or atom
numbering; check if PDB is valid for HADDOCK (TER statement,
END statement, CNS nomenclature); place chain ID to location of
seg ID; split ensemble files or concate PDB files to an ensemble;
convert PDB to an XML representation.
Examples: PDBeditor.py -f test.pdb -kn test_fixed.pdb
PDBeditor.py -f test.pdb -r 1 -c B -adg
Dependencies: Standard python2.3 or higher. DART package (XMLwriter,
and Constants modules)
==========================================================================================
"""
"""Import modules"""
import os, sys, re, glob
"""Setting pythonpath variables if run from the command line"""
base, dirs = os.path.split(os.path.dirname(os.path.join(os.getcwd(), __file__)))
if base in sys.path:
pass
else:
sys.path.append(base)
from system.XMLwriter import Node
from system.Constants import *
def PluginXML():
PluginXML = """
<metadata>
<name>PDB formating options</name>
<input type="Filetype">.pdb</input>
<output type="Filetype">.pdb</output>
</metadata>
<parameters>
<option type="useplugin" form="hidden" text="None">True</option>
<option type="inputfrom" form="hidden" text="None">1</option>
<option type="NA1to3" form="checkbox" text="Convert nucleic acid 1 letter to 3 letter notation">False</option>
<option type="NA3to1" form="checkbox" text="Convert nucleic acid 3 letter to 1 letter notation">False</option>
<option type="setchainID" form="text" text="Set PDB chain id"></option>
<option type="IUPACtoCNS" form="checkbox" text="Convert IUPAC to CNS notation">False</option>
<option type="reres" form="text" text="Renumber residues starting from"></option>
<option type="reatom" form="text" text="Renumber atoms starting from"></option>
<option type="xsegchain" form="checkbox" text="Move chain id to segment id">False</option>
<option type="noheader" form="checkbox" text="PDB without header lines">False</option>
<option type="nohetatm" form="checkbox" text="PDB without HETATM records">False</option>
<option type="nofooter" form="checkbox" text="PDB without footer lines">False</option>
<option type="pdb2haddock" form="checkbox" text="Make PDB HADDOCK ready">True</option>
<option type="joinpdb" form="checkbox" text="Join PDB files to one">False</option>
<option type="splitpdb" form="text" text="Split PDB files based on TER or MODEL statement"></option>
<option type="name" form="text" text="Give your structure a name"></option>
<option type="pdb2xml" form="checkbox" text="Convert PDB to DART XML representation">False</option>
</parameters>"""
return PluginXML
def PluginCore(paramdict, inputlist):
print "--> Starting PDBeditor"
"""Split ensemble of PDB files in separate PDB files"""
if not paramdict['splitpdb'] == None:
pdb = PDBeditor()
for files in inputlist:
print(" * Spliting ensemble PDB in individual PDB files on %s statement" % paramdict['splitpdb'])
pdb.SplitPDB(ensemble=files, mode=paramdict['splitpdb'])
sys.exit(0)
filecount = 1 # to keep track of processed files in the PDB joining process
for files in inputlist:
pdb = PDBeditor()
pdb.ReadPDB(files)
"""Perform fixes to the pdb file to make it suitable for HADDOCK"""
if paramdict['pdb2haddock']:
paramdict['NA1to3'] = True
paramdict['NA3to1'] = False
paramdict['IUPACtoCNS'] = True
paramdict['reatom'] = int(1)
paramdict['noheader'] = True
paramdict['nohetatm'] = True
"""Convert Nucleic-Acids residue one-letter-code to three-letter-code or vice versa"""
if paramdict['NA1to3']:
print " * Convert Nucleic-Acids one-letter-code to three-letter-code"
pdb.NAresid1to3()
if paramdict['NA3to1']:
print " * Convert Nucleic-Acids three-letter-code to one-letter-code (wwwPDB notation)"
pdb.NAresid3to1()
"""Convert IUPAC atom naming to CNS"""
if paramdict['IUPACtoCNS']:
print " * Convert IUPAC atom notation to CNS atom notation"
pdb.IUPACtoCNS()
"""Place HADDOCK chain ID in propper place"""
if paramdict['xsegchain']:
print " * Set seg ID to position of chain ID"
pdb.XsegChain()
"""Set the chain ID"""
if paramdict['setchainID'] is not None:
chainID = paramdict['setchainID'].split(',')
try:
old = chainID[0].upper()
new = chainID[1].upper()
print " * Converting chain ID:", old, "to chain ID:", new
pdb.SetchainID(old=old, new=new)
except:
new = chainID[0].upper()
print " * Converting all to chain ID:", new
pdb.SetchainID(new=new)
"""Renumbering residues"""
if paramdict['reres'] is not None:
print " * Renumber residues starting from:", paramdict['reres']
pdb.Reres(paramdict['reres'])
"""Renumber atoms"""
if paramdict['reatom'] is not None:
print " * Renumber atoms starting from:", paramdict['reatom']
pdb.Reatom(paramdict['reatom'])
pdb.CorrectConect(paramdict['reatom'])
"""Make XML representation of pdb"""
if paramdict['pdb2xml']:
xml = pdb.PDB2XML()
root = os.path.basename(files)
basename, extension = os.path.splitext(root)
outfile = basename + ".xml"
print " * Generating DART XML representation of the PDB as:", outfile
out = file(outfile, 'w')
out.write(xml.xml())
out.close
"""Write new PDB file"""
if paramdict['pdb2xml'] == False and paramdict['joinpdb'] == False and paramdict['splitpdb'] == None:
if paramdict['name'] == None:
root = os.path.basename(files)
basename, extension = os.path.splitext(root)
outfile = basename + "_fixed" + extension
else:
files = glob.glob('*.pdb')
basename, extension = os.path.splitext(paramdict['name'])
if paramdict['name'] in files:
count = 1
while count < len(files):
newname = basename + '-' + str(count) + extension
if newname in files:
pass
else:
outfile = newname
break
count = count + 1
else:
outfile = paramdict['name']
print " * Printing fixed pdb file as:", outfile
pdb.WritePDB(file_out=outfile, join=False, modelnr=0, noheader=paramdict['noheader'],
nofooter=paramdict['nofooter'], nohetatm=paramdict['nohetatm'])
elif paramdict['pdb2xml'] == False and paramdict['joinpdb'] == True and paramdict['splitpdb'] == None:
if paramdict['name'] == None:
outfile = 'joined.pdb'
else:
outfile = paramdict['name']
print " * Append", os.path.basename(files), "to concatenated file:", outfile
pdb.WritePDB(file_out=outfile, join=True, modelnr=filecount, noheader=True, nofooter=True,
nohetatm=paramdict['nohetatm'])
filecount = filecount + 1
# ================================================================================================================================#
# PLUGIN SPECIFIC DEFINITIONS BELOW THIS LINE #
# ================================================================================================================================#
class CommandlineOptionParser:
"""Parses command line arguments using optparse"""
def __init__(self):
self.option_dict = {}
self.option_dict = self.CommandlineOptionParser()
def CommandlineOptionParser(self):
"""Parsing command line arguments"""
usage = "usage: %prog" + USAGE
parser = OptionParser(usage)
parser.add_option("-f", "--file", action="callback", callback=self.varargs, dest="inputfile", type="string",
help="Supply pdb inputfile(s). Standard UNIX selection syntax accepted")
parser.add_option("-a", "--na1to3", action="store_true", dest="NA1to3", default=False,
help="Convert nucleic-acid residues from one-letter to three-letter code")
parser.add_option("-b", "--na3to1", action="store_true", dest="NA3to1", default=False,
help="Convert nucleic-acid residues from three-letter to one-letter code")
parser.add_option("-c", "--setchainid", action="store", dest="setchainID", type="string",
help="Convert chain ID. Input as A,B (old,new) or A (all to A)")
parser.add_option("-i", "--iupactocns", action="store_true", dest="IUPACtoCNS", default=False,
help="Convert IUPAC NA atom naming to CNS atom naming")
parser.add_option("-r", "--reres", action="store", dest="reres", type="int",
help="Renumber residues. Options: starting from (number)")
parser.add_option("-p", "--reatom", action="store", dest="reatom", type="int",
help="Renumber atoms. Options: starting from (number)")
parser.add_option("-s", "--xsegchain", action="store_true", dest="xsegchain", default=False,
help="Places chain ID in propper place")
parser.add_option("-d", "--noheader", action="store_true", dest="noheader", default=False,
help="Write pdb file without header lines")
parser.add_option("-e", "--nohetatm", action="store_true", dest="nohetatm", default=False,
help="Write pdb file without hetatm lines")
parser.add_option("-g", "--nofooter", action="store_true", dest="nofooter", default=False,
help="Write pdb file without footer lines (CONECT)")
parser.add_option("-k", "--pdb2haddock", action="store_true", dest="pdb2haddock", default=False,
help="Perform general pdb fixes for HADDOCK (-aipd)")
parser.add_option("-l", "--joinpdb", action="store_true", dest="joinpdb", default=False,
help="Concatenate PDB files")
parser.add_option("-m", "--splitpdb", action="store", dest="splitpdb", type="string",
help="Split ensemble PDB files on MODEL or TER statemend")
parser.add_option("-n", "--name", action="store", dest="name", type="string", help="name for the new PDB file")
parser.add_option("-x", "--pdb2xml", action="store_true", dest="pdb2xml", default=False,
help="Make DART XML representation of pdb")
(options, args) = parser.parse_args()
self.option_dict['input'] = options.inputfile
self.option_dict['NA1to3'] = options.NA1to3
self.option_dict['NA3to1'] = options.NA3to1
self.option_dict['setchainID'] = options.setchainID
self.option_dict['IUPACtoCNS'] = options.IUPACtoCNS
self.option_dict['reres'] = options.reres
self.option_dict['reatom'] = options.reatom
self.option_dict['xsegchain'] = options.xsegchain
self.option_dict['noheader'] = options.noheader
self.option_dict['nohetatm'] = options.nohetatm
self.option_dict['nofooter'] = options.nofooter
self.option_dict['pdb2haddock'] = options.pdb2haddock
self.option_dict['joinpdb'] = options.joinpdb
self.option_dict['splitpdb'] = options.splitpdb
self.option_dict['name'] = options.name
self.option_dict['pdb2xml'] = options.pdb2xml
if not self.option_dict['input'] == None:
parser.remove_option('-f')
arg = self.GetFirstArgument(parser, shorta='-f', longa='--file')
self.option_dict['input'].append(arg)
fullpath = self.GetFullPath(self.option_dict['input'])
self.option_dict['input'] = fullpath
if parser.has_option('-f'):
pass
else:
parser.add_option("-f", "--file", action="store", dest="dummy2",
type="string") # only needs to be here to complete the argument list, not used!
return self.option_dict
def GetFullPath(self, inputfiles):
currdir = os.getcwd()
filelist = []
for files in inputfiles:
path = os.path.join(currdir, files)
filelist.append(path)
return filelist
def GetFirstArgument(self, parser, shorta, longa):
"""HACK, optparse has difficulties in variable argument lists. The varargs definition solves this but never reports the first
argument of the list. This definition hacks this issue"""
parser.add_option(shorta, longa, action="store", dest="temp", type="string",
help="Execute custom workflow assembled on the command line. You can execute a single plugin by typing '-p pluginname' or a sequence of plugins by typing '-p plugin1,plugin2...'")
(options, args) = parser.parse_args()
first_arg = options.temp
parser.remove_option(shorta)
return first_arg
def varargs(self, option, opt_str, value, parser):
"""Deals with variable list of command line arguments"""
value = []
rargs = parser.rargs
while rargs:
arg = rargs[0]
if ((arg[:2] == "--" and len(arg) > 2) or
(arg[:1] == "-" and len(arg) > 1 and arg[1] != "-")):
break
else:
value.append(arg)
del rargs[0]
setattr(parser.values, option.dest, value)
class PDBeditor:
def __init__(self, inputfile=None):
self.title = []
self.atcounter = 0
self.allatom_line = []
self.header = []
self.footer = []
self.end = []
self.model = []
self.label = []
self.atnum = []
self.elem = []
self.atname = []
self.atalt = []
self.resname = []
self.chain = []
self.resnum = []
self.resext = []
self.coord = []
self.occ = []
self.b = []
self.hdoc_chain = []
self.sequence = {}
self.firstatnr = 1
def ReadPDB(self, inputfile, debug=0):
# check if passed filename string or a file descriptor
if type(inputfile) == type(sys.stdin):
readfile = inputfile
else:
readfile = file(inputfile, 'r')
lines = readfile.readlines()
self.ReadPDBlines(lines, debug)
def ReadPDBlines(self, lines, debug=0):
"""
Reads a list of PDB-format file lines in to the Protein class object.
Thus can be called by another routine that already has the lines in a list.
Returns the number of atoms read in.
"""
i = 0
atom_hetatm = re.compile('(ATOM |TER |HETATM)')
head = re.compile('^(HEADER|COMPND|SOURCE|JRNL|HELIX|REMARK|SEQRES|CRYST1|SCALE|ORIG)')
title = re.compile('^TITLE')
foot = re.compile('(CONECT|MASTER)')
end = re.compile('(END)')
model = re.compile('(MODEL)')
element = re.compile('[A-Za-z ][A-Za-z]')
for line in lines:
if atom_hetatm.match(line):
line = line[:-1]
# Add TER statement if change in chainid
if len(self.chain) and not self.chain[-1] == line[21]:
self.label.append('TER ')
for b in (
self.allatom_line, self.atnum, self.atname, self.resnum, self.resname, self.chain, self.atalt,
self.resext, self.occ, self.b, self.hdoc_chain, self.elem):
b.append(blank)
self.coord.append((0.000, 0.000, 0.000))
if not line.startswith("TER"):
self.allatom_line.append(line)
self.label.append(line[0:6]) # atom label
self.atnum.append(int(line[6:12])) # atom number
self.atname.append(line[12:16]) # atom type
self.atalt.append(line[16:17])
self.resname.append(line[17:21]) # residu name
self.chain.append(line[21]) # chain
self.resnum.append(int(line[22:26])) # residu number
try:
self.resext.append(line[27])
except:
self.resext.append(blank)
try:
self.coord.append((float(line[30:38]), float(line[38:46]), float(line[46:54]))) # X,Y,Z coordinates
except:
if not line[0:3] == 'TER' or line[0:5] == 'MODEL':
print " * ERROR: coordinate error in line:"
print " ", line
try:
self.occ.append(float(line[54:60]))
except:
self.occ = ([1.00] * len(self.atnum))
try:
self.b.append(float(line[60:66])) # B factor
except:
self.b = ([0.00] * len(self.atnum))
try:
self.hdoc_chain.append(line[72]) # SEGID
except:
self.hdoc_chain = ([blank] * len(self.atnum))
if element.match(line[76:78]): # Get first element in elementlist
self.elem.append(line[76:78])
else:
self.elem.append(line[12:14])
self.atcounter += 1
i += 1
elif head.match(line):
self.header.append(line[:-1])
elif foot.match(line):
self.footer.append(line[:-1])
elif end.match(line):
self.end.append(line[:-1])
elif model.match(line[:-1]):
self.model.append(line)
elif title.match(line):
self.title.append(line[:-1])
self.firstatnr = self.atnum[
0] # Need to know original number of first atom for possible CONECT statement correction when renumbering atoms
if debug:
return len(self.atnum), self.atcounter
def WritePDB(self, file_out, join=False, modelnr=0, noheader=False, nofooter=False, nohetatm=False):
"""
Saves the Protein class object to a PDB-format file
if noheader = True, no header (REMARK etc.) or footer lines are written
if nohetatm = True, no hetero atoms are written
"""
if join == True:
out = open(file_out, 'a')
else:
out = file(file_out, 'w')
if noheader == False:
for i in range(len(self.title)):
out.write('%s\n' % self.title[i])
for i in range(len(self.header)):
out.write("%s\n" % self.header[i])
if join == True:
out.write('MODEL ' + str(modelnr) + '\n')
for i in xrange(len(self.resnum)):
if self.label[i] == 'ATOM ':
self.WritePDBline(out, i)
elif self.label[i] == 'TER ':
out.write('TER \n')
elif self.label[i] == 'HETATM' and not nohetatm:
out.write("%s\n" % self.allatom_line[i])
if nofooter == False:
for i in range(len(self.footer)):
out.write("%s\n" % self.footer[i])
if join == False:
if len(self.end) == 3:
for i in range(len(self.end)):
out.write("%s\n" % self.end[i])
else:
self.end = ['END']
for i in range(len(self.end)):
out.write("%s\n" % self.end[i])
else:
self.end = ['ENDMDL']
for i in range(len(self.end)):
out.write("%s\n" % self.end[i])
out.close()
def WritePDBline(self, FD, i):
"""
Writes a single line of data in the PDB-format
called by writePDB
"""
FD.write('%-6s%5i %-4s%1s%-4s%1s%4i%1s %8.3f%8.3f%8.3f%6.2f%6.2f%10s%2s\n' %
(self.label[i], self.atnum[i], self.atname[i], self.atalt[i],
self.resname[i], self.chain[i], self.resnum[i], self.resext[i],
self.coord[i][0], self.coord[i][1], self.coord[i][2], self.occ[i], self.b[i], blank, self.elem[i]))
def SplitPDB(self, ensemble=None, mode=None):
"""
Split ensemble PDB files in seperate PDB files based on MODEL or TER tag
"""
# check if passed filename string or a file descriptor
if type(ensemble) == type(sys.stdin):
readfile = ensemble
else:
readfile = file(ensemble, 'r')
lines = readfile.readlines()
mode = mode.upper()
atom_hetatm = re.compile('(ATOM |HETATM)')
model = re.compile('(' + mode + ')')
modelcount = 1
models = {}
models[modelcount] = []
linecount = len(lines)
linenr = 0
while linenr < linecount:
line = lines[linenr].strip()
if model.match(line):
if not len(models[modelcount]) == 0:
modelcount += 1
if models.has_key(modelcount) == False:
models[modelcount] = []
linenr += 1
if atom_hetatm.match(line):
models[modelcount].append(linenr)
linenr = linenr + 1
if len(models) == 1:
print " * No splitting occured, splitting statement not found"
else:
for model in models.keys():
outfile = os.path.splitext(ensemble)[0] + '_' + str(model) + '.pdb'
out = file(outfile, 'w')
print " * Writing model %s as %s" % (model, outfile)
for line in models[model]:
out.write(lines[line])
out.write('END')
out.close()
def NAresid1to3(self):
"""
Convert list of 1-letter nucleic-acid code sequence to 3-letter code and update resname
"""
seq3 = []
for resid1 in self.resname:
try:
resid3 = NAres3[
NAres1.index(resid1.upper())] # If NAresid is one-letter code, convert to three-letter code
seq3.append(resid3)
except ValueError, err:
if resid1.upper() in AAres3: # If resid is amino-acid three letter code, just append
seq3.append(resid1.upper()) # Amino-acid one letter code in PDB not accepted(expected)
elif resid1.upper() == 'HOH ': # Waters are neglected, just append.
seq3.append(resid1.upper())
elif resid1.upper() in NAres3: # If NAresid allready in three letter code, just append
seq3.append(resid1.upper())
else:
print " - WARNING: no match for residue: %s" % (
resid1) # If not of the above, raise exception.
seq3.append(resid1.upper())
if len(seq3) == len(self.resname):
self.resname = seq3
else:
pass
def NAresid3to1(self):
"""
Convert list of 3-letter nucleic-acid code sequence to 1-letter code and update resname. The 1-letter code is the new
(2006) wwwPDB notation. This is DA,DT,DC,DG for DNA and RA,RU,RG,RC for RNA.
"""
print " - WARNING: The conversion of nucleic-acid three-letter code to two-letter code does not check for ribose or"
print " deoxy-ribose. If Uracil is found the structure is regarded as RNA otherwise as DNA. Please check"
print " your structure in case of mixed conformations."
seq1 = []
THREELETTER = ['--- ', 'CYT ', 'THY ', 'GUA ', 'ADE ', 'URI ']
DNA1LETTER = [' - ', ' DC ', ' DT ', ' DG ', ' DA ', ' RU ']
RNA1LETTER = [' - ', ' RC ', ' DT ', ' RG ', ' RA ', ' RU ']
if 'URI ' in self.resname:
RNA = True
DNA = False
else:
DNA = True
RNA = False
for resid3 in self.resname:
try:
if RNA == True:
resid1 = RNA1LETTER[THREELETTER.index(resid3.upper())]
seq1.append(resid1)
elif DNA == True:
resid1 = DNA1LETTER[THREELETTER.index(resid3.upper())]
seq1.append(resid1)
except ValueError, err:
print " - WARNING: no match for residue:", resid3
seq1.append(resid3.upper())
if len(seq1) == len(self.resname):
self.resname = seq1
else:
pass
def SetchainID(self, old=None, new=None):
"""
Convert the chain ID from the old ID to the user supplied new ID if None do nothing.
Option examples: (A) all to A, (A,B) all A to B. Lower case is converted to upper case.
"""
newchainseq = []
if not old and new == None:
for chainid in self.chain:
if chainid == old:
newchainseq.append(new)
else:
newchainseq.append(chainid)
else:
for chainid in self.chain:
newchainseq.append(new)
if len(newchainseq) == len(self.chain): # in case of any errors that yield non-equal arrays
self.chain = newchainseq
else:
pass
def IUPACtoCNS(self):
"""
Convert IUPAC atom type notation to CNS atom type notation. Get info from IUPAC and CNS lists from Constants.py.
Currently only conversion of nucleic-acid atom types.
"""
newatomseq = []
for atom in self.atname:
try:
newatom = CNS[IUPAC.index(atom)]
newatomseq.append(newatom)
except ValueError:
newatomseq.append(atom)
if len(newatomseq) == len(self.atname):
self.atname = newatomseq
else:
pass
def PDB2XML(self):
"""
Makes a XML representation of the PDB. Needs system.XMLwriter
"""
main = Node("DART_pdbx")
acount = 0
lastchain = ' '
lastresnum = ' '
for i in xrange(len(self.atnum)):
if i == 0:
lastchain = self.chain[i]
lastresnum = self.resnum[i]
chain = Node("chain", ID=lastchain)
resid = Node("resid", ID=self.resname[i], nr=str(lastresnum))
atom = Node("atom", ID=self.atname[i], nr=str(self.atnum[i]), corx=str(self.coord[i][0]),
cory=str(self.coord[i][1]), corz=str(self.coord[i][2]), occ=str(self.occ[i]),
b=str(self.b[i]))
resid += atom
chain += resid
main += chain
else:
if self.chain[i] == lastchain:
lastchain = self.chain[i]
if self.resnum[i] == lastresnum:
atom = Node("atom", ID=self.atname[i], nr=str(self.atnum[i]), corx=str(self.coord[i][0]),
cory=str(self.coord[i][1]), corz=str(self.coord[i][2]), occ=str(self.occ[i]),
b=str(self.b[i]))
lastresnum = self.resnum[i]
resid += atom
else:
lastresnum = self.resnum[i]
resid = Node("resid", ID=self.resname[i], nr=str(lastresnum))
atom = Node("atom", ID=self.atname[i], nr=str(self.atnum[i]), corx=str(self.coord[i][0]),
cory=str(self.coord[i][1]), corz=str(self.coord[i][2]), occ=str(self.occ[i]),
b=str(self.b[i]))
resid += atom
chain += resid
else:
lastchain = self.chain[i]
lastresnum = self.resnum[i]
chain = Node("chain", ID=lastchain)
resid = Node("resid", ID=self.resname[i], nr=str(lastresnum))
atom = Node("atom", ID=self.atname[i], nr=str(self.atnum[i]), corx=str(self.coord[i][0]),
cory=str(self.coord[i][1]), corz=str(self.coord[i][2]), occ=str(self.occ[i]),
b=str(self.b[i]))
resid += atom
chain += resid
main += chain
return main
def Reres(self, start):
"""
Renumber residues. Option example: (4) renumber starting from 4.
"""
start = int(start)
lastresnum = -9999
lastchain = ' '
lastresname = ' '
lastext = ' '
idres = start - 1
icount = 0
for i in xrange(len(self.resnum)):
if i == 0:
icount += 1
lastchain = self.chain[i]
lastresname = self.resname[i]
lastresnum = self.resnum[i]
lastext = self.resext[i]
self.resnum[i] = start
idres += 1
else:
if (self.chain[i] != lastchain or lastresnum != self.resnum[i] or
lastresname != self.resname[i] or lastext != self.resext[i]):
icount += 1
idres += 1
lastchain = self.chain[i]
lastresname = self.resname[i]
lastresnum = self.resnum[i]
self.resnum[i] = idres
else:
self.resnum[i] = idres
def Reatom(self, start):
"""
Renumber atoms. Option example: (4) renumber complete list starting from 4.
"""
start = int(start)
newatomnum = range(start, (len(self.atnum) + start))
if len(newatomnum) == len(self.atnum):
self.atnum = newatomnum
else:
pass
def CorrectConect(self, number):
"""
Correct the CONECT statement when renumbering atoms.
"""
diff = number - self.firstatnr
correctconect = []
for line in self.footer:
p = line.split()
if p[0] == 'CONECT':
for atomnr in xrange(1, len(p)):
p[atomnr] = int(p[atomnr]) + diff
correct = "CONECT"
for n in p[1:]:
correct = correct + ("%5i" % n)
correctconect.append(correct)
else:
correctconect.append(line)
self.footer = correctconect
def XsegChain(self):
"""
Copy SEGID to CHAIN location.
"""
hdoc_chain = []
for chainid in self.hdoc_chain:
chainid.strip()
if chainid == ' ':
pass
else:
hdoc_chain.append(chainid)
if len(hdoc_chain) > 0:
self.chain = self.hdoc_chain
else:
pass
if __name__ == '__main__':
"""Running from the command line"""
from optparse import *
"""Parse command line arguments"""
option_dict = CommandlineOptionParser().option_dict
"""Check for input"""
if option_dict['input'] == None:
print " * Please supply pdb file using option -f or use option -h/--help for usage"
sys.exit(0)
else:
inputlist = option_dict['input']
"""Envoce main functions"""
PluginCore(option_dict, inputlist)
sys.exit(0)
| StarcoderdataPython |
1893148 | """
Abstract Class for using datatables package with server side processing option in Django project.
"""
from .utils.data_type_ensure import ensure
from .utils.enum import TripleEnum
from collections import OrderedDict, defaultdict
from typing import (
Tuple, Any, Dict
)
from rest_framework.serializers import ModelSerializer
from .forms import AbstractFooterForm
class DataTablesMeta(type):
"""
Simple meta class to check if a subclass of DataTables defines a nested Meta
class and inside the Meta class the variables:
'serializer', 'frame', 'mapping' (perhaps 'form')
are defined as desired data type.
"""
def __new__(mcs, name: str, bases: Tuple[type, ...],
namespace: Dict[str, Any]) -> type:
"""
Used for defining the Meta class's structure and check it when creating
a subclass's instance,
such that the user will be forced to follow the rules as following:
1. serializer: must be defined as a subclass of ModelSerializer
2. structure: must be defined as a list of dict, it should look like:
[
{
"header": "<Header to display>",
"searchable": <bool value to set the columns to be
searchable or not, True/False>,
"orderable": <bool value to set the columns to be orderable
or not, True/False>,
"footer_type": type of the footer search bar, input/select,
"id": "<id of this footer search bar, together with
parameter 'prefix' to build the id>",
"serializer_key": "<key to extract the data from the
serialized data set>"
}
]
3. structure_for_superuser: same as above
4. form: must be defined if there is at least one footer whose type is
not 'input'
5. mapping: must be defined as the following structure:
class COL_TRIPLE_ENUM(TripleEnum):
A = ("<number of the column in frontend>", "<correspinding field
name>", "<corresponding filter key>")
It's the key to get the correct data from DB
:return: class instance
"""
cls = super().__new__(mcs, name, bases, namespace)
# the class 'DataTables' doesn't need any further checks
if not bases:
return cls
# each subclass of class 'DataTables' must define the Meta class
if "Meta" not in namespace:
raise AttributeError("Class Meta isn't defined.")
_meta = getattr(cls, "Meta")
# make sure the Meta is a nested class
if not isinstance(_meta, type):
raise AttributeError("Meta isn't defined as a nested class.")
# checks the Meta class contains the definitions of variables:
# serializer, frame, mapping
meta_attrs = {"serializer", "frame", "mapping"}
missing_attrs = meta_attrs.difference(_meta.__dict__.keys())
if missing_attrs:
raise AttributeError("Variable(s) %r must be defined in Meta class."
% list(missing_attrs))
# checks the variables are not None
for attr_name in meta_attrs:
if getattr(_meta, attr_name) is None:
raise AttributeError(
"Variable '%s' can not be None." % attr_name)
# serializer should be a subclass of ModelSerializer from rest_framework
serializer = getattr(_meta, "serializer")
if not issubclass(serializer, ModelSerializer):
raise TypeError(
"Variable 'serializer' must be a subclass of ModelSerializer.")
# frame should be a list of dictionaries and in each dictionary some
# keys must be contained.
frame = getattr(_meta, "frame")
must_have_keys = {"id", "serializer_key", "header", "searchable",
"orderable", "footer_type"}
if not isinstance(frame, list):
raise ValueError("Variable 'frame' must be a list of dictionaries.")
elif not frame:
raise ValueError("Variable 'frame' must not be empty.")
for item in frame:
if not isinstance(item, dict):
raise ValueError("Variable 'frame' must be a list of "
"dictionaries.")
missing_keys = must_have_keys.difference(item.keys())
if missing_keys:
raise ValueError("Keys %r are missing" % list(missing_keys))
# mapping must be a subclass of TripleEnum class
mapping = getattr(_meta, "mapping")
if not issubclass(mapping, TripleEnum):
raise ValueError("Variable 'mapping' must inherit from class "
"TripleEnum.")
# form can be None, if the user doesn't user footer or uses input field
# as footer. Otherwise, it must be defined as a subclass of
# AbstractFooterForm
if not hasattr(_meta, "form"):
_meta.form = None
if not _meta.form:
for item in frame:
if item.get("footer_type") not in {"input", None}:
raise ValueError(
"If you don't use 'input' as your table's footer type, "
"please do not leave the form as None.")
elif not issubclass(_meta.form, AbstractFooterForm):
raise ValueError(
"Variable 'form' must be defined as a subclass of "
"AbstractFooterForm or None.")
cls._meta = _meta
return cls
class DataTables(metaclass=DataTablesMeta):
"""
This class aims to simplify the process of using datatables's serverside option
in a django project. It defines a similar structure like django ModelForm, using Meta
class. Inside the Meta class, the user has to define couple things:
* serializer: a ModelSerializer class (using rest_framework package)
* form: a normal django from, which defines the choice fields for the footer.
An abstract dynamical form is provided in forms.py file.
* structure: list of dict, defines the table structure in frontend
* structure_for_superuser: same as above, but for superuser
* mapping: TripleEnum class, which holds the mapping between column number
in frontend, corresponding field name in model class and corresponding key
for filtering in DB
Besides the Meta class, the functions, 'get_query_dict', 'query_by_args', can be
customized by the user according to some specific use cases. The other functions are
not necessary to be overridden.
"""
serializer = property(lambda self: self.Meta.serializer)
"""Wrapper to render the serializer in Meta class, it provides a way to use
one DataTables class with different serializers."""
form = property(lambda self: self.Meta.form)
"""Wrapper to render the form in Meta class, it provides a way to use one
DataTables class with different forms"""
frame = property(lambda self: self.Meta.frame)
"""wrapper to render the structure in Meta class"""
mapping = property(lambda self: self.Meta.mapping)
"""Wrapper to render the mapping in Meta class, it provides a way
to use one DataTables class with different mappings."""
def footer_form(self, *args, **kwargs):
"""
wrapper to render an instance of the footer form, which is the form in
Meta class
:param args: list: args for the footer form initialization
:param kwargs: dict: args for the footer form initialization
:return: form instance
"""
return self.form(*args, **kwargs)
def get_table_frame(self, prefix="", table_id="sspdtable", *args, **kwargs):
"""
render the structure (or structure_for_superuser) and an instance of the
footer form
:param prefix: str: used for unifying the rendered parameter's name,
such
that the template of serverside datatables can be used in one page
multiple times
:param table_id: str:
:param args: list: args for the footer form initialization
:param kwargs: dict: args for the footer form initialization
:return: dict
"""
if not table_id:
raise ValueError("table_id parameter can not be an empty string.")
table_key = prefix + "sspdtable"
context = {
table_key: {
"id": table_id,
"frame": self.frame
}
}
if self.form:
context[table_key]['footer_form'] = self.footer_form(*args, **kwargs)
return context
def get_query_dict(self, **kwargs):
"""
function to generate a filter dictionary, in which the key is the
keyword used in django filter function in string form, and the value is
the searched value.
:param kwargs:dict: query dict sent by data tables package
:return: dict: filtering dictionary
"""
total_cols = ensure(int, kwargs.get('total_cols', [0])[0], 0)
mapping = self.mapping
filter_dict = defaultdict(dict)
# set up the starter, since sometimes we start the enumeration from '1'
starter = mapping.keys()[0]
for i in range(starter, total_cols):
key = 'columns[{index}]'.format(index=i)
if kwargs.get(key + '[searchable]', [0])[0] != 'true':
continue
search_value = kwargs.get(key + '[search][value]', [''])[0].strip()
if not search_value:
continue
enum_item = mapping.from_key(i)
filter_obj = enum_item.extra
if type(filter_obj) is tuple and len(filter_obj) == 2:
filter_func, filter_key = filter_obj
filter_dict[filter_func][filter_key] = search_value
elif type(filter_obj) is str:
filter_dict['filter'][filter_obj] = search_value
else:
raise ValueError("Invalid filter key.")
return filter_dict
def get_order_key(self, **kwargs):
"""
function to get the order key to apply it in the filtered queryset
:param kwargs: dict: query dict sent by data tables package
:return: str: order key, which can be used directly in queryset's
order_by function
"""
# get the mapping enumeration class from Meta class
mapping = self.mapping
# use the first element in the enumeration as default order column
order_column = kwargs.get('order[0][column]',
[mapping.keys()[0]])[0]
order_column = ensure(int, order_column, mapping.keys()[0])
order = kwargs.get('order[0][dir]', ['asc'])[0]
order_key = mapping.from_key(order_column).label
# django orm '-' -> desc
if order == 'desc':
order_key = '-' + order_key
return order_key
@staticmethod
def filtering(queryset, query_dict):
"""
function to apply the pre search condition to the queryset to narrow
down the queryset's size
:param queryset: Django Queryset: queryset of all objects
:param query_dict: dict: contains selected_related, filter and other
customized filter functions
:return: queryset: result after applying the pre search condition dict
"""
# apply pre_search_condition
for key, value in query_dict.items():
assert hasattr(queryset, key), "Parameter 'query_dict' contains"\
" non-existent attribute."
if isinstance(value, list):
queryset = getattr(queryset, key)(*value)
elif isinstance(value, dict):
queryset = getattr(queryset, key)(**value)
else:
queryset = getattr(queryset, key)(value)
return queryset
@staticmethod
def slicing(queryset, **kwargs):
"""
function to slice the queryset according to the display length
:param queryset: Django Queryset: filtered and ordered queryset result
:param kwargs: dict: query dict sent by data tables package
:return: queryset: result after slicing
"""
# if the length is -1, we need to display all the records
# otherwise, just slicing the queryset
length = ensure(int, kwargs.get('length', [0])[0], 0)
start = ensure(int, kwargs.get('start', [0])[0], 0)
if length >= 0:
queryset = queryset[start:start + length]
return queryset
def query_by_args(self, pre_search_condition=None, **kwargs):
"""
intends to process the queries sent by data tables package in frontend.
The model_cls indicates the model class, get_query_dict is a function
implemented by you, such that it can
return a query dictionary, in which the key is the query keyword in str
form and the value is the queried value
:param pre_search_condition: None/OrderedDict: dictionary contains
filter conditions which should be processed before applying the filter
dictionary from user. None, if no pre_search_condition provided.
:param kwargs: QueryDict: contains query parameters
:return: dict: contains total records number, queryset of the filtered
instances, size of this queryset
"""
if pre_search_condition and not isinstance(pre_search_condition, OrderedDict):
raise TypeError(
"Parameter 'pre_search_condition' must be an OrderedDict.")
# extract requisite parameters from kwargs
draw = ensure(int, kwargs.get('draw', [0])[0], 0)
# just implement the get_query_dict function
query_dict = self.get_query_dict(**kwargs)
order_key = self.get_order_key(**kwargs)
# get the model from the serializer parameter
model_class = self.serializer.Meta.model
# get the objects
queryset = model_class.objects
# apply the pre search condition if it exists
if pre_search_condition:
queryset = self.filtering(queryset, pre_search_condition)
else:
queryset = queryset.all()
# number of the total records
total = queryset.count()
# if the query dict not empty, then apply the query dict
if query_dict:
queryset = self.filtering(queryset, query_dict)
# number of the records after applying the query
count = queryset.count()
# order the queryset
queryset = queryset.order_by(order_key)
# slice the queryset
queryset = self.slicing(queryset, **kwargs)
return {'items': queryset, 'count': count, 'total': total, 'draw': draw}
def process(self, pre_search_condition=None, **kwargs):
"""
function to be called outside to get the footer search condition,
apply the search in DB and render the serialized result.
:param pre_search_condition: None/OrderedDict: pre search condition to
be applied before applying the one getting from footer
:param kwargs: dict: search parameters got from footer
:return: dict: contains the filtered data, total number of records,
number of filtered records and drawing number.
"""
records = self.query_by_args(pre_search_condition=pre_search_condition,
**kwargs)
serializer = self.serializer(records['items'], many=True)
result = {
'data': serializer.data,
'draw': records['draw'],
'recordsTotal': records['total'],
'recordsFiltered': records['count'],
}
return result
| StarcoderdataPython |
1989910 | <gh_stars>0
import numpy as np
class TrainTestSplitter:
def __init__(self, data_dict, user_ratings_count_threshold, percent):
self.books = data_dict['books']
self.users = data_dict['users']
self.ratings = data_dict['ratings']
self.user_ratings_count_threshold = user_ratings_count_threshold
self.percent = percent
def __iter__(self):
ratings_grouped_by_user = self.ratings.groupby('User-ID').agg(len)
users_needed = ratings_grouped_by_user[ratings_grouped_by_user.ISBN > self.user_ratings_count_threshold].index.values
test_ratings = []
for user in users_needed:
user_ratings = np.array(self.ratings[self.ratings['User-ID'] == user].index.values)
test_ratings += user_ratings[
np.random.randint(
len(user_ratings),
size=int(len(user_ratings) * self.percent)
)
].tolist()
test = {
'books': self.books, 'users': self.users, 'ratings': self.ratings[self.ratings.index.isin(test_ratings)]
}
train = {
'books': self.books, 'users': self.users, 'ratings': self.ratings[~self.ratings.index.isin(test_ratings)]
}
yield train, test
| StarcoderdataPython |
3210380 | import numpy as np
import pandas as pd
from datetime import date
import os
print("Reading clean data")
data = pd.read_csv('../clean/all_clean_data.csv', encoding='utf-8', dtype=str)
if not os.path.exists('../clean/macro'):
os.mkdir('../clean/macro')
def update_visa_class(x):
if pd.to_datetime(x).year<2007:
return 'H-1B'
else:
return np.nan
print("Basic fixes to VISA_CLASS")
#Fix remaining VISA_CLASS issues (remove Australian, make uppercase)
data['VISA_CLASS']=data.VISA_CLASS.str.upper().str.replace("AUSTRALIAN","").str.strip()
print("Updating CASE_STATUS")
#Fix inconsistent CASE_STATUSes (someone forgot to add WITHDRAWN)
data["CASE_STATUS"]=data.apply(lambda x: x.CASE_STATUS+"-WITHDRAWN" if x.WITHDRAWN=="Y" else x.CASE_STATUS, axis=1)
print("Updating CASE_SUBMITTED")
#Update CASE_SUBMITTED with DECISION_DATE if empty
data["CASE_SUBMITTED"]=data.apply(lambda x: x.DECISION_DATE if pd.isnull(x.CASE_SUBMITTED) and not pd.isnull(x.DECISION_DATE) else x.CASE_SUBMITTED, axis=1)
print("Updating VISA_CLASS")
#All Pre-2007 VISA_CLASS is H-1B
data["VISA_CLASS"]=data.apply(lambda x: update_visa_class(x.DECISION_DATE) if pd.isnull(x.VISA_CLASS) else x.VISA_CLASS, axis=1)
print("Filtering and De-Duping Data")
#Get only certified data
certified_data = data.query('CASE_STATUS=="CERTIFIED"')
#Sort by dates
certified_sorted_data = certified_data.sort_values(["CASE_SUBMITTED","DECISION_DATE"],ascending=True)
#Dedupe, keeping only the latest date
certified_sorted_deduped_data = certified_sorted_data.drop_duplicates(["CASE_NUMBER","CASE_STATUS","EMPLOYER_NAME","TOTAL_WORKERS","WAGE_RATE_OF_PAY"], keep='last')
print("Writing deduped, filtered, clean data!")
#Write to file
certified_sorted_deduped_data.to_csv("../clean/macro/cleaned_data_fixed_visa_dates_status.csv",index=False, encoding='utf-8')
| StarcoderdataPython |
5167690 | <filename>Adobe_CSV.py
import sys
import argparse
import csv
#TODO
#@V1.1
#@Date 2/13/2020
#@Author mlizbeth
#sort/diff switches?
file_headers = ["Identity Type", "Username", "Domain", "Email", "First Name", "Last Name", "Country Code", "Product Configurations", "Admin Roles", "Product Configurations Administered",
"User Groups", "User Groups Administered", "Products Administered", "Developer Access", "Auto Assigned Products"]
entitlements = ["Default All Apps plan - 1 TB configuration", "Default Spark with Premium Features for Higher-Ed - 2 GB configuration"]
groups = ["Students", "Faculty/Staff", "Class Exception", "Student Workers", "Campus Publications", "Service/Shared Accounts"]
parser = argparse.ArgumentParser(description="Adobe Entitlement Manager")
parser.add_argument("-i","--input", type=str, help="File to read from", required=True)
parser.add_argument("-o","--output", type=str, help="File to write to", required=True)
parser.add_argument("-m","--mode", type=int, help="Operating Mode.\n\t 1 for FacStaff, 2 for Students", required=True)
args = parser.parse_args()
class Entity:
first_name = ""
last_name = ""
username = ""
email = ""
product_configurations = ""
identity_type = "Federated ID"
domain = "trinity.edu"
country_code = "US"
# @Optional Params
admin_roles = ""
user_groups = ""
product_configurations_administered = ""
user_groups_administered = ""
products_administered = ""
developer_access = ""
auto_assigned_products = ""
def __init__(self, first_name, last_name, username, email, product_configurations, admin_roles=None, product_configurations_administered=None, user_groups=None, user_groups_administered=None, products_administered=None, developer_access=None, auto_assigned_products=None):
self.first_name = first_name
self.last_name = last_name
self.username = username
self.email = email
self.product_configurations = product_configurations
def write_document():
with open(args.output, mode='w') as out_file:
with open(args.input, mode='r', errors='ignore') as in_file:
csv_reader = csv.reader(in_file, delimiter=',')
csv_writer = csv.writer(out_file, delimiter=',')
next(in_file) #we want to skip the first line of the in_file since it contains junk data
csv_writer.writerow(file_headers)
for row in csv_reader:
if args.mode is 1:
e = Entity(row[0], row[1], row[2], row[2] + "@trinity.edu", entitlements[0])
elif args.mode is 2:
e = Entity(row[0], row[1], row[2], row[2] + "@trinity.edu", entitlements[1])
else:
print("Invalid syntax, please refer to documentation")
break
csv_writer.writerow([e.identity_type, e.username, e.domain, e.email, e.first_name, e.last_name, e.country_code, e.product_configurations, e.admin_roles, e.product_configurations_administered, e.user_groups, e.user_groups_administered, e.products_administered, e.developer_access, e.auto_assigned_products])
if __name__ == "__main__":
if not args.input and not args.output and not args.mode:
print("You broke something!")
else:
write_document() | StarcoderdataPython |
6519152 | """
Implement an iterator over a binary search tree (BST). Your iterator will be initialized with the root node of a BST.
Calling next() will return the next smallest number in the BST.
Note: next() and hasNext() should run in average O(1) time and uses O(h) memory, where h is the height of the tree.
"""
# Definition for a binary tree node
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class BSTIterator:
# @param root, a binary search tree's root node
def __init__(self, root):
self.current = root
self.stack = []
# @return a boolean, whether we have a next smallest number
def hasNext(self):
return self.current or self.stack
# @return an integer, the next smallest number
def next(self):
# move to the left most child
while self.current:
self.stack.append(self.current)
self.current = self.current.left
# retreive smallest element then switch to right
self.current = self.stack.pop(-1)
value = self.current.val
self.current = self.current.right
return value
a = TreeNode(1)
b = TreeNode(2)
c = TreeNode(3)
d = TreeNode(4)
e = TreeNode(5)
c.left = b
c.right = d
b.left = a
d.right = e
# Your BSTIterator will be called like this:
i, v = BSTIterator(c), []
while i.hasNext(): v.append(i.next())
print v
| StarcoderdataPython |
11244810 | <gh_stars>100-1000
import torch
import torch.utils.data as data
import numpy as np
import os
import sys
import subprocess
import shlex
import pickle
from .data_utils import grid_subsampling
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
def pc_normalize(pc):
# Center and rescale point for 1m radius
pmin = np.min(pc, axis=0)
pmax = np.max(pc, axis=0)
pc -= (pmin + pmax) / 2
scale = np.max(np.linalg.norm(pc, axis=1))
pc *= 1.0 / scale
return pc
def get_cls_features(input_features_dim, pc, normal=None):
if input_features_dim == 3:
features = pc
elif input_features_dim == 4:
features = torch.ones(size=(pc.shape[0], 1), dtype=torch.float32)
features = torch.cat([features, pc], -1)
elif input_features_dim == 6:
features = torch.cat([pc, normal])
elif input_features_dim == 7:
features = torch.ones(size=(pc.shape[0], 1), dtype=torch.float32)
features = torch.cat([features, pc, normal], -1)
else:
raise NotImplementedError("error")
return features.transpose(0, 1).contiguous()
class ModelNet40Cls(data.Dataset):
def __init__(self, input_features_dim, num_points,
data_root=None, transforms=None, split='train',
subsampling_parameter=0.02, download=False):
"""ModelNet40 dataset for shape classification.
Args:
input_features_dim: input features dimensions, used to choose input feature type
num_points: max number of points for the input point cloud.
data_root: root path for data.
transforms: data transformations.
split: dataset split name.
subsampling_parameter: grid length for pre-subsampling point clouds.
download: whether downloading when dataset don't exists.
"""
super().__init__()
self.num_points = num_points
self.input_features_dim = input_features_dim
self.transforms = transforms
self.use_normal = (input_features_dim >= 6)
self.subsampling_parameter = subsampling_parameter
self.label_to_names = {0: 'airplane',
1: 'bathtub',
2: 'bed',
3: 'bench',
4: 'bookshelf',
5: 'bottle',
6: 'bowl',
7: 'car',
8: 'chair',
9: 'cone',
10: 'cup',
11: 'curtain',
12: 'desk',
13: 'door',
14: 'dresser',
15: 'flower_pot',
16: 'glass_box',
17: 'guitar',
18: 'keyboard',
19: 'lamp',
20: 'laptop',
21: 'mantel',
22: 'monitor',
23: 'night_stand',
24: 'person',
25: 'piano',
26: 'plant',
27: 'radio',
28: 'range_hood',
29: 'sink',
30: 'sofa',
31: 'stairs',
32: 'stool',
33: 'table',
34: 'tent',
35: 'toilet',
36: 'tv_stand',
37: 'vase',
38: 'wardrobe',
39: 'xbox'}
self.name_to_label = {v: k for k, v in self.label_to_names.items()}
if data_root is None:
self.data_root = os.path.join(ROOT_DIR, 'data')
else:
self.data_root = data_root
if not os.path.exists(self.data_root):
os.makedirs(self.data_root)
self.folder = 'ModelNet40'
self.data_dir = os.path.join(self.data_root, self.folder, 'modelnet40_normal_resampled')
self.url = "https://shapenet.cs.stanford.edu/media/modelnet40_normal_resampled.zip"
if download and not os.path.exists(self.data_dir):
zipfile = os.path.join(self.data_root, os.path.basename(self.url))
subprocess.check_call(
shlex.split("curl {} -o {}".format(self.url, zipfile))
)
subprocess.check_call(
shlex.split("unzip {} -d {}".format(zipfile, os.path.join(self.data_root, self.folder)))
)
subprocess.check_call(shlex.split("rm {}".format(zipfile)))
# Collect test file names
if split == 'train':
names = np.loadtxt(os.path.join(self.data_dir, 'modelnet40_train.txt'), dtype=np.str)
elif split == 'test':
names = np.loadtxt(os.path.join(self.data_dir, 'modelnet40_test.txt'), dtype=np.str)
else:
raise KeyError(f"ModelNet40 has't split: {split}")
filename = os.path.join(self.data_root, self.folder, '{}_{:.3f}_data.pkl'.format(split, subsampling_parameter))
if not os.path.exists(filename):
print(f"Preparing ModelNet40 data with subsampling_parameter={subsampling_parameter}")
point_list, normal_list, label_list = [], [], []
# Collect point clouds
for i, cloud_name in enumerate(names):
# Read points
class_folder = '_'.join(cloud_name.split('_')[:-1])
txt_file = os.path.join(self.data_dir, class_folder, cloud_name) + '.txt'
data = np.loadtxt(txt_file, delimiter=',', dtype=np.float32)
pc = data[:, :3]
pc = pc_normalize(pc)
normal = data[:, 3:]
label = np.array(self.name_to_label[class_folder])
# Subsample
if subsampling_parameter > 0:
pc, normal = grid_subsampling(pc, features=normal, sampleDl=subsampling_parameter)
point_list.append(pc)
normal_list.append(normal)
label_list.append(label)
self.points = point_list
self.normals = normal_list
self.labels = label_list
with open(filename, 'wb') as f:
pickle.dump((self.points, self.normals, self.labels), f)
print(f"{filename} saved successfully")
else:
with open(filename, 'rb') as f:
self.points, self.normals, self.labels = pickle.load(f)
print(f"{filename} loaded successfully")
print(f"{split} dataset has {len(self.points)} data with {num_points} points")
def __getitem__(self, idx):
"""
Returns:
pc: (N, 3), a point cloud.
mask: (N, ), 0/1 mask to distinguish padding points.
features: (input_features_dim, N), input points features.
label: (1), class label.
"""
current_points = self.points[idx]
cur_num_points = current_points.shape[0]
if cur_num_points >= self.num_points:
choice = np.random.choice(cur_num_points, self.num_points)
current_points = current_points[choice, :]
if self.use_normal:
current_normals = self.normals[idx]
current_normals = current_normals[choice, :]
mask = torch.ones(self.num_points).type(torch.int32)
else:
padding_num = self.num_points - cur_num_points
shuffle_choice = np.random.permutation(np.arange(cur_num_points))
padding_choice = np.random.choice(cur_num_points, padding_num)
choice = np.hstack([shuffle_choice, padding_choice])
current_points = current_points[choice, :]
if self.use_normal:
current_normals = self.normals[idx]
current_normals = current_normals[choice, :]
mask = torch.cat([torch.ones(cur_num_points), torch.zeros(padding_num)]).type(torch.int32)
label = torch.from_numpy(self.labels[idx]).type(torch.int64)
if self.use_normal:
current_points = np.hstack([current_points, current_normals])
if self.transforms is not None:
current_points = self.transforms(current_points)
pc = current_points[:, :3]
normal = current_points[:, 3:]
features = get_cls_features(self.input_features_dim, pc, normal)
return pc, mask, features, label
def __len__(self):
return len(self.points)
if __name__ == "__main__":
import data_utils as d_utils
from torchvision import transforms
transforms = transforms.Compose(
[
d_utils.PointcloudToTensor(),
]
)
dset = ModelNet40Cls(3, 10000, split='train', transforms=transforms)
dset_test = ModelNet40Cls(3, 10000, split='test', transforms=transforms)
print(dset[0][0])
print(dset[0][1])
print(dset[0][2])
print(dset[0][3])
print(len(dset))
| StarcoderdataPython |
11361694 | class Solution:
def topKFrequent(self, words: List[str], k: int) -> List[str]:
a=[]
x={}
for i in set(words):
x[i]=words.count(i)
a = sorted(x, key=lambda i: (-x[i], i))
return a[:k]
| StarcoderdataPython |
4994270 | from crawler.crawler import get_recursive_urls
from data.collect import collect_data
import argparse
from urllib.parse import urlparse
import json
from subprocess import Popen
from utils.utils import eprint, add_auth, dsum, dstd
import os
def get_domain(url):
parsed = urlparse(url)
return parsed.scheme + '://' + parsed.netloc
def get_path(url):
parsed = urlparse(url)
path = parsed.path
if len(path) == 0:
path = '/'
if len(parsed.query) > 0:
path += '/?' + parsed.query
if len(parsed.fragment) > 0:
path += '#' + parsed.fragment
return path
def work(baseline_url, updated_url, max_depth, max_urls, prefix, auth_baseline_username, auth_baseline_password,
auth_updated_username, auth_updated_password):
baseline_url = add_auth(url=baseline_url, username=auth_baseline_username, password=<PASSWORD>_baseline_password)
updated_url = add_auth(url=updated_url, username=auth_updated_username, password=<PASSWORD>)
crawled_baseline = get_recursive_urls(baseline_url, max_depth, max_urls)[:max_urls]
crawled_upgraded = get_recursive_urls(updated_url, max_depth, max_urls)[:max_urls]
baseline_domain = get_domain(baseline_url)
updated_domain = get_domain(updated_url)
crawled_baseline_paths = [get_path(path) for path in crawled_baseline]
crawled_updated_paths = [get_path(path) for path in crawled_upgraded]
all_paths = list(set(crawled_baseline_paths) | set(crawled_updated_paths))
ss_report = {}
for i, path in enumerate(all_paths):
eprint('[LOG] Taking screenshots for {} - {}'.format(prefix, path))
collect_data(baseline_domain + path, prefix + '_baseline', '{}.png'.format(i + 1))
collect_data(updated_domain + path, prefix + '_updated', '{}.png'.format(i + 1))
ss_report[i + 1] = {'baseline': baseline_domain + path, 'updated': updated_domain + path, 'endpoint': path,
'baseline_assets': 'tmp/' + prefix + "_baseline/",
'updated_assets': 'tmp/' + prefix + "_updated/"}
eprint('[LOG] Finished taking screenshots for {}'.format(prefix))
with open(os.path.join('./tmp', prefix + '_ss_report.json'), 'w') as f:
json.dump(ss_report, f, indent=2)
p = Popen(['python3', 'worker_predict.py', '--baseline-dir', prefix + '_baseline', '--updated-dir',
prefix + '_updated', '--prefix', prefix])
if p.poll() is not None and p.poll() > 0:
eprint('[ERR] Failed to launch inference process')
exit(3)
eprint('[LOG] Waiting for {}'.format(prefix))
p.wait()
if p.poll() != 0:
eprint('[ERR] Prediction script failed for {}'.format(prefix))
exit(p.poll())
eprint('[LOG] Finished prediction for {}'.format(prefix))
ui_risk_scores = []
network_risk_scores = []
js_stats_total = []
net_stats_total = []
pixelwise_div_total = []
mask_div_total = []
with open(os.path.join('./tmp', prefix + '_report.json'), 'w') as f:
scores_report = json.load(open(os.path.join('./tmp', prefix + '_scores.json')))
screenshots_report = json.load(open(os.path.join('./tmp', prefix + '_ss_report.json')))
page_report = {}
for i in range(1, len(all_paths) + 1):
page_report[i] = scores_report[str(i)]
js_stats_total.append(scores_report[str(i)]["js_stats"])
net_stats_total.append(scores_report[str(i)]["network_stats"])
page_report[i]['links'] = screenshots_report[str(i)]
ui_risk_scores.append(page_report[i]["ui_stats"]["risk_score"])
network_risk_scores.append(page_report[i]["risk_score"])
pixelwise_div_total.append(page_report[i]['ui_stats']['pixelwise_div'])
mask_div_total.append(page_report[i]['ui_stats']['mask_div'])
page_report['risk_score'] = max(max(ui_risk_scores), max(network_risk_scores))
page_report['js_stats'] = dsum(js_stats_total)
page_report['ui_stats'] = {'pixelwise_div_mean': dsum(pixelwise_div_total, True),
'mask_div_mean': dsum(mask_div_total, True),
'pixelwise_div_std': dstd(pixelwise_div_total),
'mask_div_std': dstd(mask_div_total)}
page_report['network_stats'] = dsum(net_stats_total)
json.dump(page_report, f, indent=4)
eprint('[LOG] Saved {} report to {}'.format(prefix, prefix + '_report.json'))
os.remove(os.path.join('./tmp', prefix + '_scores.json'))
os.remove(os.path.join('./tmp', prefix + '_ss_report.json'))
exit(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--baseline-url', type=str)
parser.add_argument('--updated-url', type=str)
parser.add_argument('--max-depth', type=int)
parser.add_argument('--max-urls', type=int)
parser.add_argument('--prefix', type=str)
parser.add_argument('--auth-baseline-username', type=str, default='')
parser.add_argument('--auth-baseline-password', type=str, default='')
parser.add_argument('--auth-updated-username', type=str, default='')
parser.add_argument('--auth-updated-password', type=str, default='')
args = parser.parse_args()
work(args.baseline_url, args.updated_url, args.max_depth, args.max_urls, args.prefix, args.auth_baseline_username,
args.auth_baseline_password, args.auth_updated_username, args.auth_updated_password)
| StarcoderdataPython |
3500702 | """Utility stuff that fitted nowhere else."""
from .dict2arr import (
dict2arr,
dict2arrlabels,
io_dict2arr,
)
from .read_sample import (
read_sample,
)
from .event_ixs import (
EventIxs,
)
from .par_trafo import (
ParTrafoBase,
ParTrafo,
)
from .log import (
log_samples,
)
| StarcoderdataPython |
3549693 |
from jk_hwriter import HWriter
from .htmlgeneral import *
class HTMLRawText(object):
def __init__(self, textOrTextList):
if isinstance(textOrTextList, str):
self.texts1 = [ textOrTextList ]
else:
self.texts1 = list(textOrTextList)
self.texts2 = []
#
def __call__(self, **attrs):
self.texts1.append("".join(attrs))
return self
#
def __getitem__(self, textOrTexts):
if hasattr(type(textOrTexts), "__iter__"):
self.texts2.extend(textOrTexts)
else:
self.texts2.append(textOrTexts)
return self
#
def _serialize(self, w:HWriter):
if self.texts1:
for text in self.texts1:
w.lineBreak()
w.write(text)
w.lineBreak()
for text in self.texts2:
w.write(text)
#
#
| StarcoderdataPython |
6640492 | <reponame>Harald-R/aw_nas
import torch
import torch.nn.functional as F
from aw_nas.objective.detection_utils.base import Losses
from aw_nas.utils import box_utils, getLogger
__all__ = ["MultiBoxLoss", "FocalLoss"]
class MultiBoxLoss(Losses):
NAME = "multibox_loss"
"""SSD Weighted Loss Function
Objective Loss:
L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N
Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss
weighted by α which is set to 1 by cross val.
Args:
c: class confidences,
l: predicted boxes,
g: ground truth boxes
N: number of matched default boxes
See: https://arxiv.org/pdf/1512.02325.pdf for more details.
"""
def __init__(self, num_classes, neg_pos=3, loc_coef=1.0, schedule_cfg=None):
super(MultiBoxLoss, self).__init__(schedule_cfg)
self.num_classes = num_classes
self.negpos_ratio = neg_pos
self.loc_coef = loc_coef
def filter_samples(self, predictions, targets):
_, conf_data, loc_data = predictions
conf_t, _ = targets
batch_size = loc_data.size(0)
pos_idx = conf_t > 0
num_pos = pos_idx.sum(dim=1, keepdim=True)
# Localization Loss (Smooth L1)
# Shape: [batch,num_anchors,4]
batch_conf = conf_data.view(-1, self.num_classes + 1)
loss_c = box_utils.log_sum_exp(batch_conf) - batch_conf.gather(
1, conf_t.view(-1, 1)
)
# Hard Negative Mining
tmp = pos_idx.reshape(loss_c.shape)
loss_c[tmp] = 0 # filter out pos boxes for now
loss_c = loss_c.view(batch_size, -1)
_, loss_idx = loss_c.sort(1, descending=True)
_, idx_rank = loss_idx.sort(1)
num_neg = torch.clamp(
self.negpos_ratio * num_pos.to(torch.long), max=pos_idx.size(1) - 1
)
neg_idx = idx_rank < num_neg.expand_as(idx_rank)
# Confidence Loss Including Positive and Negative Examples
cls_idx = (pos_idx + neg_idx).gt(0)
N = num_pos.data.sum()
return (cls_idx, pos_idx), (N, N)
def forward(self, predictions, targets, indices, normalizer):
"""Multibox Loss
Args:
predictions (tuple): A tuple containing loc preds, conf preds,
and prior boxes from SSD net.
conf shape: torch.size(batch_size,num_anchors,num_classes)
loc shape: torch.size(batch_size,num_anchors,4)
anchors shape: torch.size(num_anchors,4)
targets (tensor): Ground truth boxes and labels for a batch,
shape: [batch_size,num_objs,5] (last idx is the label).
"""
_, conf_data, loc_data = predictions
conf_t, loc_t = targets
loc_t = torch.autograd.Variable(loc_t, requires_grad=False)
conf_t = torch.autograd.Variable(conf_t, requires_grad=False)
cls_indices, pos_indices = indices
cls_normalizer, pos_normalizer = normalizer
conf_data = conf_data[cls_indices]
conf_t = conf_t[cls_indices]
loc_p = loc_data[pos_indices].view(-1, 4)
loc_t = loc_t[pos_indices].view(-1, 4)
loss_l = F.smooth_l1_loss(loc_p, loc_t, reduction="sum")
loss_c = F.cross_entropy(conf_data, conf_t, reduction="sum")
# Sum of losses: L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N
loss_l /= cls_normalizer
loss_c /= pos_normalizer
return {"cls_loss": loss_c, "reg_loss": loss_l * self.loc_coef}
class FocalLoss(Losses):
NAME = "focal_loss"
def __init__(
self,
num_classes,
alpha,
gamma,
background_label=0,
loc_coef=1.0,
schedule_cfg=None,
):
super().__init__(schedule_cfg)
self.num_classes = num_classes
self.alpha = alpha
self.gamma = gamma
self.background_label = background_label
self.loc_coef = loc_coef
def filter_samples(self, predictions, targets):
conf_t, _ = targets
reserved_indices = conf_t >= 0
positive_indices = conf_t > 0
num_pos_per_batch = positive_indices.sum(-1).to(torch.float)
has_pos_batch = num_pos_per_batch > 0
reserved_indices[~has_pos_batch] = False
positive_indices[~has_pos_batch] = False
normalizer = torch.unsqueeze(num_pos_per_batch, -1)
normalizer = normalizer.repeat(1, positive_indices.shape[-1])
cls_normalizer = normalizer[reserved_indices]
reg_normalizer = normalizer[positive_indices]
return (reserved_indices, positive_indices), (cls_normalizer, reg_normalizer)
def forward(self, predicts, targets, indices, normalizer):
_, logits, regressions = predicts
conf_t, loc_t = targets
confidences = logits.sigmoid()
confidences = torch.clamp(confidences, 1e-4, 1.0 - 1e-4)
batch_size = confidences.shape[0]
device = confidences.device
reserved_indices, positive_indices = indices
cls_normalizer, reg_normalizer = normalizer
confidences = confidences[reserved_indices]
regressions = regressions[positive_indices]
# convert label to one-hot encoding.
conf_t = conf_t[reserved_indices]
assert 0 < conf_t.max() < confidences.shape[-1], (
"The number of classes exceeds the number of predicted classes, "
"please ensure the correction of configuration."
)
conf_t = (
torch.zeros_like(confidences)
.to(device)
.scatter_(1, conf_t.reshape(-1, 1), 1)
.to(torch.float)
)
conf_t[:, 0] = self.background_label
loc_t = loc_t[positive_indices]
alpha_factor = torch.ones_like(confidences) * self.alpha
alpha_factor = torch.where(
torch.eq(conf_t, 1.0), alpha_factor, 1.0 - alpha_factor
)
focal_weight = torch.where(
torch.eq(conf_t, 1.0), 1.0 - confidences, confidences
)
focal_weight = alpha_factor * torch.pow(focal_weight, self.gamma)
bce = -(
conf_t * torch.log(confidences)
+ (1.0 - conf_t) * torch.log(1.0 - confidences)
)
classification_losses = focal_weight * bce
classfication_loss = (
classification_losses / torch.unsqueeze(cls_normalizer, 1)
).sum() / batch_size
regression_diff = torch.abs(loc_t - regressions)
regression_losses = torch.where(
torch.le(regression_diff, 1.0 / 9.0),
0.5 * 9.0 * torch.pow(regression_diff, 2),
regression_diff - 0.5 / 9.0,
)
regression_loss = (
regression_losses.mean(-1) / reg_normalizer
).sum() / batch_size
return {
"cls_loss": classfication_loss,
"reg_loss": regression_loss * self.loc_coef,
}
def sigmoid_cross_entropy_with_logits(logits, labels):
return (
torch.where(logits > 0, logits, torch.zeros_like(logits))
- logits * labels
+ torch.log(1 + torch.exp(-torch.abs(logits)))
)
class AdaptiveDistillationLoss(Losses):
"""
ref: [Learning Efficient Detector with Semi-supervised Adaptive Distillation]
(https://arxiv.org/abs/1901.00366)
"""
NAME = "adaptive_distillation_loss"
SCHEDULABLE_ATTRS = ["loss_coef"]
def __init__(self, beta, gamma, temperature, loss_coef=1.0, schedule_cfg=None):
super().__init__(schedule_cfg)
self.beta = beta
self.gamma = gamma
self.temperature = temperature
self.loss_coef = loss_coef
def forward(self, predictions, targets, indices, normalizer):
if self.loss_coef == 0:
return {"adaptive_soft_loss": torch.tensor(0.0).to(predictions[-1].device)}
_, positive_indices = indices
_, pos_normalizer = normalizer
logit_s = predictions[1]
logit_t = targets[1]
logit_s = logit_s[positive_indices] / self.temperature
logit_t = logit_t[positive_indices] / self.temperature
conf_t = logit_t.sigmoid()
soft_cross_entropy = sigmoid_cross_entropy_with_logits(logit_s, conf_t)
teacher_entropy = sigmoid_cross_entropy_with_logits(logit_t, conf_t)
kullback_leiber_dist = -teacher_entropy + soft_cross_entropy
adaptive_weight = 1 - torch.exp(
-kullback_leiber_dist - self.beta * teacher_entropy
)
if self.gamma != 1.0:
adaptive_weight = torch.pow(adaptive_weight, self.gamma)
adaptive_soft_loss = adaptive_weight * kullback_leiber_dist
return {
"adaptive_soft_loss": (adaptive_soft_loss.sum(-1) / pos_normalizer).sum()
* self.loss_coef
}
| StarcoderdataPython |
8017213 | import os, random, json
import numpy as np
from scipy import stats
import util
SEQ_SIZE = 8
NUM_TO_GEN = 20
MODEL_DIR = 'trained_all/'
PARSED_DIR = 'parsed_all/'
MAKE_STATEFUL = False
IS_REVERSE = True
#Load titles
title_words, title_word_to_ix = util.load_title_dict(PARSED_DIR)
title_dict_size = len(title_words)
title_sentences = util.load_title_sentences(PARSED_DIR)
#Load comments
comment_words, comment_word_to_ix = util.load_comment_dict(PARSED_DIR)
comment_dict_size = len(comment_words)
comment_sentences = util.load_comment_sentences(PARSED_DIR)
assert(len(title_sentences) == len(comment_sentences))
def word_ixs_to_str(word_ixs, is_title):
result_txt = ""
for w_ix in word_ixs:
w = (title_words if is_title else comment_words)[w_ix]
if len(result_txt) == 0 or w in ['.', ',', "'", '!', '?', ':', ';', '...']:
result_txt += w
elif len(result_txt) > 0 and result_txt[-1] == "'" and w in ['s', 're', 't', 'll', 've', 'd']:
result_txt += w
else:
result_txt += ' ' + w
if len(result_txt) > 0:
result_txt = result_txt[:1].upper() + result_txt[1:]
return result_txt
def probs_to_word_ix(pk, is_first):
if is_first:
pk[0] = 0.0
pk /= np.sum(pk)
else:
pk *= pk
pk /= np.sum(pk)
#for i in range(3):
# max_val = np.amax(pk)
# if max_val > 0.5:
# break
# pk *= pk
# pk /= np.sum(pk)
xk = np.arange(pk.shape[0], dtype=np.int32)
custm = stats.rv_discrete(name='custm', values=(xk, pk))
return custm.rvs()
def pred_text(model, context, max_len=64):
output = []
context = np.expand_dims(context, axis=0)
if MAKE_STATEFUL:
past_sample = np.zeros((1,), dtype=np.int32)
else:
past_sample = np.zeros((SEQ_SIZE,), dtype=np.int32)
while len(output) < max_len:
pk = model.predict([context, np.expand_dims(past_sample, axis=0)], batch_size=1)[-1]
if MAKE_STATEFUL:
pk = pk[0]
else:
past_sample = np.roll(past_sample, 1 if IS_REVERSE else -1)
new_sample = probs_to_word_ix(pk, len(output) == 0)
past_sample[0 if IS_REVERSE else -1] = new_sample
if new_sample == 0:
break
output.append(new_sample)
model.reset_states()
return output
#Load Keras and Theano
print("Loading Keras...")
import os, math
os.environ['KERAS_BACKEND'] = "tensorflow"
import tensorflow as tf
print("Tensorflow Version: " + tf.__version__)
import keras
print("Keras Version: " + keras.__version__)
from keras.layers import Input, Dense, Activation, Dropout, Flatten, Reshape, RepeatVector, TimeDistributed, concatenate
from keras.layers.convolutional import Conv2D, Conv2DTranspose, UpSampling2D, Convolution1D
from keras.layers.embeddings import Embedding
from keras.layers.local import LocallyConnected2D
from keras.layers.pooling import MaxPooling2D
from keras.layers.noise import GaussianNoise
from keras.layers.normalization import BatchNormalization
from keras.layers.recurrent import LSTM, SimpleRNN, GRU
from keras.models import Model, Sequential, load_model, model_from_json
from keras.optimizers import Adam, RMSprop, SGD
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l1
from keras.utils import plot_model, to_categorical
from keras import backend as K
K.set_image_data_format('channels_first')
#Fix bug with sparse_categorical_accuracy
from tensorflow.python.ops import math_ops
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend as K
from tensorflow.python.ops import array_ops
def new_sparse_categorical_accuracy(y_true, y_pred):
y_pred_rank = ops.convert_to_tensor(y_pred).get_shape().ndims
y_true_rank = ops.convert_to_tensor(y_true).get_shape().ndims
# If the shape of y_true is (num_samples, 1), squeeze to (num_samples,)
if (y_true_rank is not None) and (y_pred_rank is not None) and (len(K.int_shape(y_true)) == len(K.int_shape(y_pred))):
y_true = array_ops.squeeze(y_true, [-1])
y_pred = math_ops.argmax(y_pred, axis=-1)
# If the predicted output and actual output types don't match, force cast them
# to match.
if K.dtype(y_pred) != K.dtype(y_true):
y_pred = math_ops.cast(y_pred, K.dtype(y_true))
return math_ops.cast(math_ops.equal(y_true, y_pred), K.floatx())
#Load the model
print("Loading Model...")
model = load_model(MODEL_DIR + 'model.h5', custom_objects={'new_sparse_categorical_accuracy':new_sparse_categorical_accuracy})
if MAKE_STATEFUL:
weights = model.get_weights()
model_json = json.loads(model.to_json())
layers = model_json['config']['layers']
for layer in layers:
if 'batch_input_shape' in layer['config']:
layer['config']['batch_input_shape'][0] = 1
if layer['config']['batch_input_shape'][1] == SEQ_SIZE:
layer['config']['batch_input_shape'][1] = 1
if layer['class_name'] == 'Embedding':
layer['config']['input_length'] = 1
if layer['class_name'] == 'RepeatVector':
layer['config']['n'] = 1
if layer['class_name'] == 'LSTM':
assert(layer['config']['stateful'] == False)
layer['config']['stateful'] = True
print(json.dumps(model_json, indent=4, sort_keys=True))
model = model_from_json(json.dumps(model_json))
model.set_weights(weights)
#plot_model(model, to_file='temp.png', show_shapes=True)
def generate_titles(my_title):
my_title = util.clean_text(my_title)
my_words = my_title.split(' ')
print(' '.join((w.upper() if w in title_word_to_ix else w) for w in my_words) + '\n')
my_title_ixs = [title_word_to_ix[w] for w in my_words if w in title_word_to_ix]
my_title_sample = util.bag_of_words(my_title_ixs, title_dict_size)
for i in range(10):
print(' ' + word_ixs_to_str(pred_text(model, my_title_sample), False))
print('')
while True:
my_title = input('Enter Title:\n')
generate_titles(my_title)
| StarcoderdataPython |
11212754 | <reponame>KayanOkagawa/Cursoemvideo-Python3-Exercicios
def interface(conect):
from time import sleep
while True:
print('-=' * 20)
print('MENU PRINCIPAL')
print('-=' * 20)
print("""
1 - Lista Usuarios;
2 - Cadastra Usuarios;
3 - Sair
""")
print('-=' * 20)
opcao = verificador('Sua Opção')
sleep(1)
print('-=' * 20)
if opcao == 1:
listar(conect)
elif opcao == 2:
cadastrar(conect)
elif opcao == 3:
print('Desligando o programa...')
break
else:
print('Erro! Digite um opção valida.')
def verificador(msg):
while True:
try:
resp = int(input(f'{msg}: '))
except ValueError:
print('\033[01;31mErro! Digite uma opção valida.\033[m')
except KeyboardInterrupt:
print('\033[01;31mErro! Usuario desligou o programa.\033[m')
break
else:
return resp
def load_arquivo():
try:
arquivo = open('utilidade115/dados.txt', 'r+')
except FileNotFoundError:
arquivo = open('utilidade115/dados.txt', 'w')
return arquivo
else:
return arquivo
def listar(conect):
for n, p in enumerate(conect.readlines()):
print(f'{n+1}°PESSSOA')
print(f'NOME: {p[:p.find(";")]}')
print(f'IDADE: {p[p.find(";")+1:]}')
def cadastrar(conect):
inf_v = False
name = str(input('Nome: ')).strip().lower()
age = str(input('Idade: ')).strip().lower()
inf = name + ';' + age
for p in conect.readlines():
if p == inf:
inf_v = True
break
if inf_v:
print('\033[01;31mUsuario já existe no sistema.\033[m')
else:
conect.write(f'\n{inf}')
print('\033[01;32mUsuario registrado no sistema.\033[m')
| StarcoderdataPython |
11206506 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 27 22:12:24 2021
@author: leonl42
print useful statistics about the data
"""
import numpy as np
import matplotlib as plt
from scripts.util import get_freq_dist, PANDAS_DTYPE,COLUMN_LABEL,COLUMN_HASHTAGS, COLUMN_TIMEDELTAS,SUFFIX_POST,COLUMN_TWEET, COLUMN_LIKES,COLUMN_RETWEETS
import pandas as pd
import csv
from nltk import ngrams
from ast import literal_eval
# load data
df = pd.read_csv("data/preprocessing/preprocessed.csv", quoting = csv.QUOTE_NONNUMERIC, lineterminator = "\n", dtype = PANDAS_DTYPE)
################################################
#####plot a pair of frequency distributions#####
################################################
def plot_freq_dist(freq_dist_pos,freq_dist_neg,num, c1, c2):
fig = plt.pyplot.figure()
ax = fig.add_axes([0,0,1,1])
X = np.arange(num)
# ectract relative frequency of the 'num' most common elements and store them in the data vector
data = [[freq_dist_pos.freq(element[0])for element in freq_dist_pos.most_common(num)],
[freq_dist_neg.freq(element[0]) for element in freq_dist_pos.most_common(num)]]
# extract the most common elements and store their labels
labels = [element[0] for element in freq_dist_pos.most_common(num)]
ax.set_xticklabels(labels, rotation=90)
ax.xaxis.set_ticks([i for i in range(num)])
ax.bar(X + 0.00, data[0], color = c1, width = 0.25)
ax.bar(X + 0.25, data[1], color = c2, width = 0.25)
plt.pyplot.show()
###########################################################
#####Split dataframe into positive and negative labels#####
###########################################################
grouped = df.groupby(COLUMN_LABEL)
df_true = grouped.get_group(True)
df_false= grouped.get_group(False)
#################################################
#####Plot most common hashtags per dataframe#####
#################################################
def get_most_common_hashtags(df):
freq_dist = get_freq_dist(df[COLUMN_HASHTAGS])
return freq_dist
freq_dist_hashtags_pos = get_most_common_hashtags(df_true)
freq_dist_hashtags_neg = get_most_common_hashtags(df_false)
plot_freq_dist(freq_dist_hashtags_pos,freq_dist_hashtags_neg,50,'g','r')
plot_freq_dist(freq_dist_hashtags_neg,freq_dist_hashtags_pos,50,'r','g')
#################################
#####Plot average time delta#####
#################################
def statistics_time_deltas(df):
year = np.array([])
date = np.array([])
time = np.array([])
for entry in (df[COLUMN_TIMEDELTAS]):
entry = literal_eval(str(entry))
year = np.append(year,entry[0])
date = np.append(date,entry[1])
time = np.append(time,entry[2])
return [(np.mean(year),np.std(year)),(np.mean(date),np.std(date)),(np.mean(time),np.std(time))]
print(statistics_time_deltas(df_true))
print(statistics_time_deltas(df_false))
############################
#####Plot average ngram#####
############################
def average_ngram(df,n):
formatted_column = []
for post_tweet in df[COLUMN_TWEET+SUFFIX_POST]:
words = [str(word_and_tag[0]) for word_and_tag in literal_eval(str(post_tweet))]
formatted_column.append(words)
ngrams_list = []
for row in formatted_column:
ngrams_zipped = (ngrams(row, n))
for unziped in ngrams_zipped:
#convert the n-tuple representing a ngram into a string and append this
#string as a list to our ngrams
ngrams_list.append([' '.join(list(unziped))])
#compute the frequency distribution of our ngrams
#and select the most common
freq_dist = get_freq_dist(ngrams_list)
return freq_dist
#ngrams_list = [element for element in freq_dist.most_common(num_ngrams)]
#return ngrams_list
freq_dist_ngram_pos = average_ngram(df_true,2)
freq_dist_ngram_neg = average_ngram(df_false,2)
print(freq_dist_ngram_pos.most_common(1))
print(freq_dist_ngram_neg.most_common(1))
plot_freq_dist(freq_dist_ngram_pos,freq_dist_ngram_neg,20,'g','r')
plot_freq_dist(freq_dist_ngram_neg,freq_dist_ngram_pos,20,'r','g')
################################
#####Plot label distribution####
################################
def get_label_dist_is_viral(df, threshold):
df[COLUMN_LABEL] = (df[COLUMN_LIKES] + df[COLUMN_RETWEETS]) > threshold
grouped = df.groupby(COLUMN_LABEL)
df_true = grouped.get_group(True)
return len(df_true)
num_pos_list = []
thresholds = []
iterations = 500
for i in range(iterations):
num_pos_list.append(get_label_dist_is_viral(df, i))
thresholds.append(i)
plt.pyplot.scatter(thresholds,num_pos_list)
###########################
#####character lenght######
###########################
def mean_std_tweet_length(tweets):
arr = []
for tweet in tweets:
arr.append(len(str(tweet)))
arr = np.array(arr)
return (np.mean(arr),np.std(arr))
print(mean_std_tweet_length(df_true[COLUMN_TWEET]))
print(mean_std_tweet_length(df_false[COLUMN_TWEET]))
######################################################################
#####plot how many viral tweets use a certain number of hashtags######
######################################################################
num_hashtags = {}
for hashtag_list in df_true[COLUMN_HASHTAGS]:
for hashtag in literal_eval(str(hashtag_list)):
#lenght of the hashtag is the number of hashtags that were used
num = len(hashtag)
if num in num_hashtags:
num_hashtags[num] +=1
else:
num_hashtags[num] =1
plt.pyplot.scatter(num_hashtags.keys(),num_hashtags.values())
| StarcoderdataPython |
5073301 | <filename>NumpyTutorial/Questions on Numpy Indexing.py
import numpy as np
#Questions on NumPy Indexing
#Replace NumPy array elements that doesn’t satisfy the given condition
#where Return elements chosen from x or y depending on condition.
x = np.array([[45.42436315, 52.48558583, 10.32924763],
[5.7439979, 50.58220701, 25.38213418]])
x[x >30] = 10
x = np.array([[45.42436315, 52.48558583, 10.32924763],
[5.7439979, 50.58220701, 25.38213418]])
np.where(x < 5, x, x*10)
#Return the indices of elements where the given condition is satisfied
x = np.array([[45.42436315, 52.48558583, 10.32924763],
[5.7439979, 50.58220701, 25.38213418]])
indieces = np.where(x < 50)
#Replace NaN values with average of columns
#nanmean Compute the arithmetic mean along the specified axis, ignoring NaNs.
x = np.array([[1.3, 2.5, 3.6, np.nan],
[2.6, 3.3, np.nan, 5.5],
[2.1, 3.2, 5.4, 6.5]])
x[np.isnan(x)] = np.nanmean(x)
x
#Replace negative value with zero in numpy array
x = np.array([[45.42436315, 52.48558583, -10.32924763],
[5.7439979, 50.58220701, -25.38213418]])
x[x < 0] = 0
x
#How to get values of an NumPy array at certain index positions?
#put Replaces specified elements of an array with given values.
a1 = np.array([11, 10, 22, 30, 33])
a2 = np.array([1, 15, 60])
a1.put([0, 4], a2)
a1
#Find indices of elements equal to zero in a NumPy array
x = np.array([[1.3, 2.5, 3.6, 0],
[2.6, 3.3, 0, 5.5],
[2.1, 3.2, 5.4, 6.5]])
np.where(x == 0)
#How to Remove columns in Numpy array that contains non-numeric values?
x = np.array([[1.3, 2.5, 3.6, np.nan],
[2.6, 3.3, np.nan, 5.5],
[2.1, 3.2, 5.4, 6.5]])
x[:, ~np.isnan(x).any(axis=0)]
#How to access different rows of a multidimensional NumPy array?
x = np.array([[1.3, 2.5, 3.6, np.nan],
[2.6, 3.3, np.nan, 5.5],
[2.1, 3.2, 5.4, 6.5]])
x[[0, 1]]
#Get row numbers of NumPy array having element larger than X
x = np.array([[1.3, 2.5, 3.6, 0],
[2.6, 3.3, 0, 5.5],
[2.1, 3.2, 5.4, 6.5]])
np.where(np.any(x > 6, axis = 1))
#Get filled the diagonals of NumPy array
#fill_diagonal Return the filled value in the diagonal of an array.
x = np.array([[1.3, 2.5, 3.6, 0],
[2.6, 3.3, 0, 5.5],
[2.1, 3.2, 5.4, 6.5]])
np.fill_diagonal(x, 10)
x
#Check elements present in the NumPy array
#isin Return boolean array having same size as of target_array.
x = np.array([[1.3, 2.5, 3.6, 0],
[2.6, 3.3, 0, 5.5],
[2.1, 3.2, 5.4, 6.5]])
np.isin(x, [1.3, 2.5, 5.4, 3.2])
#Combined array index by index
#dstack Return combined array index by index.
np.dstack((np.array([1, 2, 3]), np.array([4, 5, 6])))
| StarcoderdataPython |
6480949 | from flask import Blueprint
from .views import ComputeSimilarityAPI, IndexContributionAPI, SetupSimilarityAPI
similarity_blueprint = Blueprint("similarity", __name__)
similarity_blueprint.add_url_rule('/similar/<contribution_id>/',
view_func=ComputeSimilarityAPI.as_view('compute_similarity'), methods=['GET'])
similarity_blueprint.add_url_rule('/internal/init',
view_func=SetupSimilarityAPI.as_view('setup_similarity'), methods=['GET'])
similarity_blueprint.add_url_rule('/internal/index/<contribution_id>/',
view_func=IndexContributionAPI.as_view('index_contribution'), methods=['GET'])
| StarcoderdataPython |
33715 | import dingus.codec
import utils
import transaction
class BlockHeader(object):
VERSION = 0
def __init__(
self,
) -> None:
raise NotImplementedError
class Block(object):
VERSION = 0
def __init__(
self,
header: BlockHeader,
payload: list[transaction.Transaction],
fee: int,
asset_schema: GeneratedProtocolMessageType,
asset_params: dict,
strict: bool,
) -> None:
raise NotImplementedError
| StarcoderdataPython |
4843673 | import heapq
import os
from typing import List, TypeVar
import numpy as np
_GridItem = TypeVar('_GridItem')
def tile(
h: int,
w: int,
sub_grids: List[List[List[_GridItem]]]
) -> List[List[_GridItem]]:
tiled_grid: List[List[_GridItem]] = []
for _ in range(h):
tile_row = sub_grids[:w]
sub_grids = sub_grids[w:]
for i in range(len(tile_row[0])):
tiled_grid.append([x for sub_grid in tile_row for x in sub_grid[i]])
return tiled_grid
class Cell:
numbers = None
cells = {}
def __init__(self, x, y, cost=0):
self.x = x
self.y = y
self.value = self.numbers[self.y][self.x]
self.cost = self.value + cost
self.visited = False
self.cells[(self.x, self.y)] = self
@classmethod
def reset(cls):
cls.numbers = None
cls.cells = {}
@classmethod
def get_adj(cls, cell):
adj = []
dx = [-1, 0, 1, 0]
dy = [0, 1, 0, -1]
for _x, _y in zip(dx, dy):
if (x := cell.x + _x) in range(0, len(cls.numbers[0])) and (y := cell.y + _y) in range(0, len(cls.numbers)):
if (x, y) in cls.cells:
if not (c := cls.cells[(x, y)]).visited:
adj.append(c)
else:
adj.append(cls(x, y, cell.cost))
return adj
def __lt__(self, other):
return self.cost < other.cost
def increment(item, inc):
item += inc
item %= 9
if item == 0:
item = 9
return item
def main():
# Read contents of input (as a file) with a context manager
file_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'input.data')
)
numbers = []
with open(file_path, "r") as input_file:
for line in input_file:
line = list(map(int, line.strip()))
numbers.append(line)
all_numbers = dict()
v_increment = np.vectorize(increment)
for i in range(9):
all_numbers[i] = v_increment(numbers, i)
sub_grids = []
for i in range(5):
for j in range(5):
sub_grids.append(all_numbers[i+j])
tiled_numbers = tile(5, 5, sub_grids)
answer_1 = dijkstras(numbers)
answer_2 = dijkstras(tiled_numbers)
print(f"Puzzle 1 -> {answer_1}")
print(f"Puzzle 2 -> {answer_2}")
return answer_1, answer_2
def dijkstras(numbers):
Cell.reset()
Cell.numbers = numbers
start = Cell(0, 0, -numbers[0][0])
# end = Cell(len(numbers[0]) - 1, len(numbers) - 1)
heap = []
mins = {(0, 0): 0}
finished = False
while not finished:
if not heap:
heapq.heappush(heap, start)
c = heapq.heappop(heap)
c.visited = True
adj = Cell.get_adj(c)
for ad in adj:
if (ad.x, ad.y) not in mins or ad.cost < mins[(ad.x, ad.y)]:
heapq.heappush(heap, ad)
mins[(ad.x, ad.y)] = ad.cost
if heap[0].x == len(numbers[0]) - 1 and heap[0].y == len(numbers) - 1:
finished = True
return heap[0].cost
if __name__ == '__main__':
main()
| StarcoderdataPython |
6524312 | from rest_framework.permissions import BasePermission
from .models import Itemlist
class IsOwner(BasePermission):
"""Custom permission class to allow itemlist owners to edit them."""
def has_object_permission(self, request, view, obj):
"""Return True if permission is granted to the itemlist owner."""
if isinstance(obj, Itemlist):
return obj.owner == request.user
return obj.owner == request.user
| StarcoderdataPython |
1981738 | <reponame>terminalkitten/pdfmate<gh_stars>1-10
# -*- coding: utf-8 -*-
import asyncio
import io
import re
from tempfile import NamedTemporaryFile
from typing import BinaryIO, Dict, NoReturn, Union
from PyPDF2 import PdfFileMerger
from pyppeteer import errors, launch
from .configuration import DEFAULT_CONFIG
from .errors import InvalidSourceError
from .utils import is_iterable
class PDFMate(object):
"""
Main class that does all generation routine.
:param url_or_file: str - either a URL, a path to a file or a string containing HTML
:param type_: str - either 'url', 'file' or 'string'
:param options: dict (optional) with pyppeteer options
"""
def __init__(self, sources, options=None):
self.sources = sources if is_iterable(sources) else [sources]
self.configuration = DEFAULT_CONFIG
self.options = self.configuration.options
self.pyppeteer = self.configuration.pyppeteer
self.environ = self.configuration.environ
self.browser = None
if options is not None:
self.options.update(options)
async def print_pyppeteer(self, source, output_path) -> Union[str, BinaryIO]:
is_stdout = (not output_path) or (output_path == '-')
try:
page = await self.browser.newPage()
settings = self.pyppeteer
emulateMediaType = settings.get('emulateMedia', None)
if emulateMediaType:
await page.emulateMedia(emulateMediaType)
bypassCSPFlag = settings.get('setBypassCSP', None)
if bypassCSPFlag:
await page.setBypassCSP(bypassCSPFlag)
requestInterceptionFlag = settings.get('setRequestInterception', None)
if requestInterceptionFlag:
await page.setRequestInterception(requestInterceptionFlag)
cookies = settings.get('setCookie', None)
if cookies:
await page.setCookie(cookies)
if source.isString():
await page.setContent(source.to_s())
elif source.isFileObj():
await page.setContent(source.source.read())
else:
path = source.urlPath()
await page.goto(path, self.configuration.page_options)
options = self.options
options.update(self._find_options_in_meta(await page.content()))
if not is_stdout:
options['path'] = output_path
stdout = await page.pdf(options)
return stdout if is_stdout else output_path
finally:
await page.close()
async def merge_pdfs(self, input_pdfs, output_path=None) -> Union[str, BinaryIO]:
is_stdout = (not output_path) or (output_path == '-')
merger = PdfFileMerger()
for pdf in input_pdfs:
merger.append(pdf)
if is_stdout:
output = io.BytesIO()
else:
output = output_path
merger.write(output)
return output.getvalue() if is_stdout else output_path
async def to_pdf(self, path=None) -> Union[str, BinaryIO, NoReturn]:
result = None
self.browser = await launch(
args=["--no-sandbox --allow-insecure-localhost"]
+ self.configuration.browser_args,
env=self.environ,
headless=not self.options.get('debug', True),
dumpio=self.options.get('debug', False),
handleSIGINT=False,
handleSIGTERM=False,
handleSIGHUP=False,
)
try:
count = len(self.sources)
result = await asyncio.gather(
*(
self.print_pyppeteer(source, self._get_output_path(path, i, count))
for i, source in enumerate(self.sources)
)
)
except errors.NetworkError as e:
raise InvalidSourceError(e)
finally:
await self.browser.close()
if count > 1:
result = await self.merge_pdfs(result, path)
else:
if is_iterable(result):
result = result[0]
return result
def _get_output_path(self, path, i, count) -> str:
if count > 1:
return NamedTemporaryFile(
prefix=f'{path}-{i}-', suffix='.pdf', delete=False
).name
else:
return path
def _find_options_in_meta(self, content) -> Dict:
"""Reads 'content' and extracts options encoded in HTML meta tags
:param content: str or file-like object - contains HTML to parse
returns:
dict: {config option: value}
"""
if (
isinstance(content, io.IOBase)
or content.__class__.__name__ == 'StreamReaderWriter'
):
content = content.read()
found = {}
for x in re.findall('<meta [^>]*>', content):
if re.search('name=["\']%s' % self.configuration.meta_tag_prefix, x):
name = re.findall(
'name=["\']%s([^"\']*)' % self.configuration.meta_tag_prefix, x
)[0]
found[name] = re.findall('content=["\']([^"\']*)', x)[0]
return found
| StarcoderdataPython |
374019 | <gh_stars>0
# coding=utf-8
import json
import requests
from globalvars import GlobalVars
import threading
# noinspection PyPackageRequirements
import websocket
from collections import Iterable
from datetime import datetime, timedelta
from glob import glob
import sys
import traceback
import time
import os
import datahandling
import parsing
import apigetpost
import spamhandling
import classes
import chatcommunicate
from helpers import log, only_blacklists_changed
from gitmanager import GitManager
from blacklists import load_blacklists
# noinspection PyClassHasNoInit,PyBroadException,PyUnresolvedReferences,PyProtectedMember
class Metasmoke:
@staticmethod
def init_websocket():
has_succeeded = False
while True:
try:
GlobalVars.metasmoke_ws = websocket.create_connection(GlobalVars.metasmoke_ws_host,
origin=GlobalVars.metasmoke_host)
payload = json.dumps({"command": "subscribe",
"identifier": "{\"channel\":\"SmokeDetectorChannel\","
"\"key\":\"" + GlobalVars.metasmoke_key + "\"}"})
GlobalVars.metasmoke_ws.send(payload)
GlobalVars.metasmoke_ws.settimeout(10)
has_succeeded = True
while True:
a = GlobalVars.metasmoke_ws.recv()
try:
data = json.loads(a)
GlobalVars.metasmoke_last_ping_time = datetime.now()
Metasmoke.handle_websocket_data(data)
except Exception as e:
GlobalVars.metasmoke_ws = websocket.create_connection(GlobalVars.metasmoke_ws_host,
origin=GlobalVars.metasmoke_host)
payload = json.dumps({"command": "subscribe",
"identifier": "{\"channel\":\"SmokeDetectorChannel\"}"})
GlobalVars.metasmoke_ws.send(payload)
log('error', e)
try:
exc_info = sys.exc_info()
traceback.print_exception(*exc_info)
except:
log('error', "Can't fetch full exception details")
except:
log('error', "Couldn't bind to MS websocket")
if not has_succeeded:
break
else:
time.sleep(10)
@staticmethod
def check_last_pingtime():
now = datetime.utcnow()
errlog = open('errorLogs.txt', 'a', encoding="utf-8")
if GlobalVars.metasmoke_last_ping_time is None:
errlog.write("\nINFO/WARNING: SmokeDetector has not received a ping yet, forcing SmokeDetector restart "
"to try and reset the connection states.\n%s UTC\n" % now)
os._exit(10)
elif GlobalVars.metasmoke_last_ping_time < (datetime.now() - timedelta(seconds=120)):
errlog.write("\nWARNING: Last metasmoke ping with a response was over 120 seconds ago, "
"forcing SmokeDetector restart to reset all sockets.\n%s UTC\n" % now)
# os._exit(10)
else:
pass # Do nothing
@staticmethod
def handle_websocket_data(data):
if "message" not in data:
return
message = data['message']
if isinstance(message, Iterable):
if "message" in message:
chatcommunicate.tell_rooms_with("debug", message['message'])
elif "exit" in message:
os._exit(message["exit"])
elif "blacklist" in message:
datahandling.add_blacklisted_user((message['blacklist']['uid'], message['blacklist']['site']),
"metasmoke", message['blacklist']['post'])
elif "naa" in message:
post_site_id = parsing.fetch_post_id_and_site_from_url(message["naa"]["post_link"])
datahandling.add_ignored_post(post_site_id[0:2])
elif "fp" in message:
post_site_id = parsing.fetch_post_id_and_site_from_url(message["fp"]["post_link"])
datahandling.add_false_positive(post_site_id[0:2])
elif "report" in message:
post_data = apigetpost.api_get_post(message["report"]["post_link"])
if post_data is None or post_data is False:
return
if datahandling.has_already_been_posted(post_data.site, post_data.post_id, post_data.title) \
and not datahandling.is_false_positive((post_data.post_id, post_data.site)):
return
user = parsing.get_user_from_url(post_data.owner_url)
if user is not None:
datahandling.add_blacklisted_user(user, "metasmoke", post_data.post_url)
why = u"Post manually reported by user *{}* from metasmoke.\n".format(message["report"]["user"])
postobj = classes.Post(api_response={'title': post_data.title, 'body': post_data.body,
'owner': {'display_name': post_data.owner_name,
'reputation': post_data.owner_rep,
'link': post_data.owner_url},
'site': post_data.site,
'is_answer': (post_data.post_type == "answer"),
'score': post_data.score, 'link': post_data.post_url,
'question_id': post_data.post_id,
'up_vote_count': post_data.up_vote_count,
'down_vote_count': post_data.down_vote_count})
spamhandling.handle_spam(post=postobj,
reasons=["Manually reported " + post_data.post_type],
why=why)
elif "deploy_updated" in message:
sha = message["deploy_updated"]["head_commit"]["id"]
if sha != os.popen('git log --pretty=format:"%H" -n 1').read():
if "autopull" in message["deploy_updated"]["head_commit"]["message"]:
if only_blacklists_changed(GitManager.get_remote_diff()):
commit_md = "[`{0}`](https://github.com/Charcoal-SE/SmokeDetector/commit/{0})" \
.format(sha[:7])
i = [] # Currently no issues with backlists
for bl_file in glob('bad_*.txt') + glob('blacklisted_*.txt'): # Check blacklists for issues
with open(bl_file, 'r') as lines:
seen = dict()
for lineno, line in enumerate(lines, 1):
if line.endswith('\r\n'):
i.append("DOS line ending at `{0}:{1}` in {2}".format(bl_file, lineno,
commit_md))
if not line.endswith('\n'):
i.append("No newline at end of `{0}` in {1}".format(bl_file, commit_md))
if line == '\n':
i.append("Blank line at `{0}:{1}` in {2}".format(bl_file, lineno,
commit_md))
if line in seen:
i.append("Duplicate entry of {0} at lines {1} and {2} of {3} in {4}"
.format(line.rstrip('\n'), seen[line], lineno, bl_file, commit_md))
seen[line] = lineno
if i == []: # No issues
GitManager.pull_remote()
load_blacklists()
chatcommunicate.tell_rooms_with("debug", "No code modified in {0}, only blacklists"
" reloaded.".format(commit_md))
else:
i.append("please fix before pulling.")
chatcommunicate.tell_rooms_with("debug", ", ".join(i))
elif "commit_status" in message:
c = message["commit_status"]
sha = c["commit_sha"][:7]
if c["commit_sha"] != os.popen('git log --pretty=format:"%H" -n 1').read():
if c["status"] == "success":
if "autopull" in c["commit_message"]:
s = "[CI]({ci_link}) on [`{commit_sha}`](https://github.com/Charcoal-SE/SmokeDetector/" \
"commit/{commit_sha})"\
" succeeded. Message contains 'autopull', pulling...".format(ci_link=c["ci_url"],
commit_sha=sha)
chatcommunicate.tell_rooms_with("debug", s)
time.sleep(2)
os._exit(3)
else:
s = "[CI]({ci_link}) on [`{commit_sha}`](https://github.com/Charcoal-SE/SmokeDetector/" \
"commit/{commit_sha}) succeeded.".format(ci_link=c["ci_url"], commit_sha=sha)
chatcommunicate.tell_rooms_with("debug", s)
elif c["status"] == "failure":
s = "[CI]({ci_link}) on [`{commit_sha}`](https://github.com/Charcoal-SE/SmokeDetector/" \
"commit/{commit_sha}) failed.".format(ci_link=c["ci_url"], commit_sha=sha)
chatcommunicate.tell_rooms_with("debug", s)
elif "everything_is_broken" in message:
if message["everything_is_broken"] is True:
os._exit(6)
@staticmethod
def send_stats_on_post(title, link, reasons, body, username, user_link, why, owner_rep,
post_score, up_vote_count, down_vote_count):
if GlobalVars.metasmoke_host is None:
log('info', "Metasmoke location not defined, not reporting")
return
metasmoke_key = GlobalVars.metasmoke_key
try:
if len(why) > 1024:
why = why[:512] + '...' + why[-512:]
post = {'title': title, 'link': link, 'reasons': reasons,
'body': body, 'username': username, 'user_link': user_link,
'why': why, 'user_reputation': owner_rep, 'score': post_score,
'upvote_count': up_vote_count, 'downvote_count': down_vote_count}
# Remove None values (if they somehow manage to get through)
post = dict((k, v) for k, v in post.items() if v)
payload = {'post': post, 'key': metasmoke_key}
headers = {'Content-type': 'application/json'}
requests.post(GlobalVars.metasmoke_host + "/posts.json", data=json.dumps(payload), headers=headers)
except Exception as e:
log('error', e)
@staticmethod
def send_feedback_for_post(post_link, feedback_type, user_name, user_id, chat_host):
if GlobalVars.metasmoke_host is None:
log('info', "Metasmoke location not defined; not reporting")
return
metasmoke_key = GlobalVars.metasmoke_key
try:
payload = {
'feedback': {
'user_name': user_name,
'chat_user_id': user_id,
'chat_host': chat_host,
'feedback_type': feedback_type,
'post_link': post_link
},
'key': metasmoke_key
}
headers = {'Content-type': 'application/json'}
requests.post(GlobalVars.metasmoke_host + "/feedbacks.json", data=json.dumps(payload), headers=headers)
except Exception as e:
log('error', e)
@staticmethod
def send_deletion_stats_for_post(post_link, is_deleted):
if GlobalVars.metasmoke_host is None:
log('info', "Metasmoke location not defined; not reporting deletion stats")
return
metasmoke_key = GlobalVars.metasmoke_key
try:
payload = {
'deletion_log': {
'is_deleted': is_deleted,
'post_link': post_link
},
'key': metasmoke_key
}
headers = {'Content-type': 'application/json'}
requests.post(GlobalVars.metasmoke_host + "/deletion_logs.json", data=json.dumps(payload), headers=headers)
except Exception as e:
log('error', e)
@staticmethod
def send_status_ping():
if GlobalVars.metasmoke_host is None:
log('info', "Metasmoke location not defined; not sending status ping")
return
metasmoke_key = GlobalVars.metasmoke_key
try:
payload = {
'location': GlobalVars.location,
'key': metasmoke_key,
'standby': GlobalVars.standby_mode
}
headers = {'content-type': 'application/json'}
response = requests.post(GlobalVars.metasmoke_host + "/status-update.json",
data=json.dumps(payload), headers=headers)
try:
response = response.json()
if 'failover' in response and GlobalVars.standby_mode:
if response['failover']:
GlobalVars.standby_mode = False
GlobalVars.metasmoke_last_ping_time = datetime.now() # Otherwise the ping watcher will exit(10)
chatcommunicate.tell_rooms_with("debug", GlobalVars.location + " received failover signal.")
if response['standby']:
chatcommunicate.tell_rooms_with("debug",
GlobalVars.location + " entering metasmoke-forced standby.")
time.sleep(2)
os._exit(7)
if 'shutdown' in response:
if response['shutdown']:
os._exit(6)
except Exception:
pass
except Exception as e:
log('error', e)
@staticmethod
def update_code_privileged_users_list():
payload = {'key': GlobalVars.metasmoke_key}
headers = {'Content-type': 'application/json'}
response = requests.get(GlobalVars.metasmoke_host + "/api/users/code_privileged",
data=json.dumps(payload), headers=headers).json()['items']
GlobalVars.code_privileged_users = set()
for id in response["stackexchange_chat_ids"]:
GlobalVars.code_privileged_users.add(("stackexchange.com", id))
for id in response["meta_stackexchange_chat_ids"]:
GlobalVars.code_privileged_users.add(("meta.stackexchange.com", id))
for id in response["stackoverflow_chat_ids"]:
GlobalVars.code_privileged_users.add(("stackoverflow.com", id))
@staticmethod
def determine_if_autoflagged(post_url):
"""
Given the URL for a post, determine whether or not it has been autoflagged.
"""
payload = {
'key': GlobalVars.metasmoke_key,
'filter': 'GKNJKLILHNFMJLFKINGJJHJOLGFHJF', # id and autoflagged
'urls': post_url
}
response = requests.get(GlobalVars.metasmoke_host + "/api/v2.0/posts/urls", params=payload).json()
if len(response["items"]) > 0 and response["items"][0]["autoflagged"]:
# get flagger names
id = str(response["items"][0]["id"])
payload = {'key': GlobalVars.metasmoke_key}
flags = requests.get(GlobalVars.metasmoke_host + "/api/v2.0/posts/" + id + "/flags", params=payload).json()
if len(flags["items"]) > 0:
return True, [user["username"] for user in flags["items"][0]["autoflagged"]["users"]]
return False, []
@staticmethod
def stop_autoflagging():
payload = {'key': GlobalVars.metasmoke_key}
headers = {'Content-type': 'application/json'}
requests.post(GlobalVars.metasmoke_host + "/flagging/smokey_disable",
data=json.dumps(payload), headers=headers)
@staticmethod
def send_statistics():
GlobalVars.posts_scan_stats_lock.acquire()
if GlobalVars.post_scan_time != 0:
posts_per_second = GlobalVars.num_posts_scanned / GlobalVars.post_scan_time
payload = {'key': GlobalVars.metasmoke_key,
'statistic': {'posts_scanned': GlobalVars.num_posts_scanned, 'api_quota': GlobalVars.apiquota,
'post_scan_rate': posts_per_second}}
else:
payload = {'key': GlobalVars.metasmoke_key,
'statistic': {'posts_scanned': GlobalVars.num_posts_scanned, 'api_quota': GlobalVars.apiquota}}
GlobalVars.post_scan_time = 0
GlobalVars.num_posts_scanned = 0
GlobalVars.posts_scan_stats_lock.release()
headers = {'Content-type': 'application/json'}
if GlobalVars.metasmoke_host is not None:
requests.post(GlobalVars.metasmoke_host + "/statistics.json",
data=json.dumps(payload), headers=headers)
| StarcoderdataPython |
1759485 | # -*- coding: utf-8 -*-
import sqlite3
import argparse
from urllib.request import Request, urlopen
import csv
import re
import xml.etree.ElementTree as ET
from typing import List, Dict
import collections
import json
# BLASTの結果のtextファイル名をパラメータから取得
# $ python m2o.py <file path> のようにBLASTのformat 6 outputファイルを引数にしてスクリプトを呼ぶ
parser = argparse.ArgumentParser()
parser.add_argument('arg1', type=argparse.FileType('r'))
args = parser.parse_args()
ncbi_nucleotide_base = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=nuccore&id={}&rettype=fasta&retmode=xml'
target_db = "data/orgname_2021_4"
tbl = "sid_tax_orgname"
file_out = "data/population.json"
def main():
"""
BLASTの結果(format6 textファイル)を入力として
outputに含まれるsequence idをorganism nameに変換しつつ種レベルでカウントする
:return: [[taxonomy, organism name, count, ratio],,]
"""
# dbを初めて作る場合は実行。使いまわしたい場合は一旦dropすることになるので、コメントアウトしておく
# create_table()
# textを読み込み一行ごとsequence idを取得
sids = get_sequence_id()
# sequence id を使ってNCBI Nucleotideを検索し、taxonomyとorganism nameを取得し保存
# データベースが保存済みであればコメントアウトしても問題ない
for i in sids:
# dbにsequence idが登録されていなければデータを取得する
if is_new_id(i):
store_orgname(get_genbank_data(i))
# BLASTの結果(各リードの類似度一位の配列)からseqquence idの数をカウントし、
# さらにorganismかtaxonomyにマッピングし再集計する
count_tax = count_orgname(sids)
# リード全体の中のtaxonomyの割合を追加
count_tax_p = add_percentage_value(len(sids), count_tax)
return count_tax_p
def get_sequence_id():
"""
BLASTの各出力(format 6の各行)からsequence id を取得する
:return: sequence_id
"""
f = args.arg1
ids = []
with open(f.name, 'r') as f:
reader = csv.reader(f, delimiter='\t')
for row in reader:
# id+ _N | 属名が文字列としてrowに含まれるので_, |で分割
s = re.split('[_|]', row[1])[0]
ids.append(s)
return ids
# c = collections.Counter(ids)
# [(id, num),,]を降順に書き出す
# top_n = c.most_common(20)
def create_table():
"""
sequence id-orgname,taxonomyを保存するテーブルを作る
:return:
"""
sql_orgname = """
create table {}(
sid text PRIMARY KEY,
orgname text,
taxonomy text
);
"""
conn = sqlite3.connect(target_db)
cur = conn.cursor()
cur.execute('SELECT count(*) FROM {}'.format(tbl))
is_exist = len(cur.fetchone()[0])
if not is_exist:
cur.execute(sql_orgname.format(tbl))
conn.commit()
conn.close()
def store_orgname(n:tuple):
"""
sequence idをキーにorgname(とtaxonomy id)を保存する
保存先はとりあえずsqlite
:param t:
:return:
"""
conn = sqlite3.connect(target_db)
cur = conn.cursor()
# 一致するsequence id(sid)が登録されていなければ新たにdbにデータを保存する
cur.execute('INSERT INTO {} VALUES {}'.format(tbl, n))
conn.commit()
conn.close()
def is_new_id(i):
""""
sequence idが登録済みか否か返す
"""
conn = sqlite3.connect(target_db)
cur = conn.cursor()
q = 'SELECT * from {} where sid="{}"'.format(tbl, i)
cur.execute(q)
is_new = True if len(cur.fetchall()) == 0 else False
return is_new
def get_genbank_data(sid) -> tuple:
"""
sequence idをクエリにEFetchでGenbankデータを取得し、orgname, taxonomyを
:return: (sid, taxonomy:str, organism name:str )
"""
url = ncbi_nucleotide_base.format(sid)
req = Request(url)
with urlopen(req) as res:
xml_data = res.read()
root = ET.fromstring(xml_data)
for v in root.iter('TSeq_taxid'):
taxonomy = v.text
for v in root.iter('TSeq_orgname'):
orgname = v.text
return sid, taxonomy, orgname
def count_orgname(sids) -> List[dict]:
"""
sequence idのlistをtaxonomyのlistにマッピングし、taxonomyをカウントする
:return:
"""
sid_taxonomy = map(lambda x: get_taxonomy(x), sids)
# c = collections.Counter(sid_taxonomy)
# cはタプルのCounterオブジェクトだが、taxonomyでcountすると
t = [x[1] for x in sid_taxonomy]
c = collections.Counter(t)
tax_count = c.most_common()
tax_count = [get_orgname(x) for x in tax_count]
return tax_count
def get_taxonomy(sid: str) -> tuple:
"""
sequence idを引数に保存したデータから(sid, taxonomy, organism name)のタプルを生成し返す
:param sid:
:return:
"""
conn = sqlite3.connect(target_db)
cur = conn.cursor()
q = 'SELECT * from {} where sid="{}"'.format(tbl, sid)
cur.execute(q)
sid_tax_org = cur.fetchone()
conn.close()
return sid_tax_org
def get_orgname(tax_c) -> tuple:
"""
taxonomyを引数にorganism nameを追加
:param tax:
:return:
"""
conn = sqlite3.connect(target_db)
cur = conn.cursor()
t = tax_c[0]
q = 'SELECT orgname from {} where taxonomy="{}"'.format(tbl, t)
cur.execute(q)
n = cur.fetchone()
tax_c = list(tax_c)
tax_c.insert(1, n[0])
conn.close()
return tax_c
def add_percentage_value(l:int, to:List[list]) -> List[list]:
"""
counter.most_commonしたtaxonomyリストに割合を追加する
:param sids:
:param sto:
:return:
"""
for d in to:
d.append(d[2]/l)
return to
def test_count_taxonomy():
sids = get_sequence_id()
res = count_orgname(sids)
res = add_percentage_value(len(sids), res)
print(res)
def test_get_taxonomy(sid):
get_taxonomy(sid)
if __name__ == "__main__":
population = main()
with open(file_out, 'w') as f:
json.dump(population, f)
| StarcoderdataPython |
7222 | <gh_stars>1-10
from seeker.models import Building, Classroom, Time
import json
import os
os.chdir('../data')
fileList = os.listdir()
#loops through each json file
for jsonfile in fileList:
#opens the jsonfile and loads the data
f = open(jsonfile, 'r')
data = f.read()
jsondata = json.loads(data)
#create the building
building = Building(BuildingName=os.path.splitext(jsonfile)[0])
building.save()
for day in jsondata:
for room in jsondata[day].keys():
#creates each classroom, adding one only if one doesn't exist
classroom = Classroom.objects.get_or_create(building = Building.objects.get(BuildingName = os.path.splitext(jsonfile)[0]), ClassroomName = os.path.splitext(jsonfile)[0] + ' - ' + room)
for time in jsondata[day][room]:
#creates each time
time = Time(building=Building.objects.get(BuildingName = os.path.splitext(jsonfile)[0]), classroom=Classroom.objects.get(ClassroomName = os.path.splitext(jsonfile)[0] + ' - ' + room), DayofWeek=day, TimeValue=time)
time.save()
#IMPORTANT!!!!!!!
# This program must be run inside a python manage.py shell for it to work, in the future a fix may be found,
# but for the time being, follow these steps:
# 1. open powershell and navigate to the folder that contains this file
# 2. type in "python manage.py shell"
# 3. copy and paste the code into the shell and press enter
# 4. wait time is around 5 minutes
| StarcoderdataPython |
6415826 | <gh_stars>10-100
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth import get_user_model
from django.test import TestCase
from comment.models import Comment
from blog.models import Blog
# Create your tests here.
class CommentTest(TestCase):
def setUp(self) -> None:
self.user = get_user_model().objects.create_user(
phone="98912888888",
)
self.blog = Blog.objects.create(
author=self.user,
title='title-test-1',
body='title-test-1',
summary="summary-test-1",
special=True,
status='p',
visits=1,
)
self.comment1 = Comment.objects.create(
user=self.user,
name="test-name-1",
content_type=ContentType.objects.get_for_model(self.blog),
object_id=self.blog.pk,
parent=None,
body="test-body-1",
)
self.comment2 = Comment.objects.create(
user=self.user,
name="test-name-2",
content_type=ContentType.objects.get_for_model(self.blog),
object_id=self.blog.pk,
parent=self.comment1,
body="test-body-2",
)
def test_str_method(self):
self.assertEquals(str(self.comment1), self.user.phone)
self.assertEquals(str(self.comment2), self.user.phone)
self.assertNotEquals(str(self.comment1), self.user.first_name)
def test_comment1_instance(self):
self.assertEquals(self.comment1.body, "test-body-1")
self.assertEquals(self.comment1.user, self.user)
self.assertIsNone(self.comment1.parent)
def test_comment2_instance(self):
self.assertEquals(self.comment2.body, "test-body-2")
self.assertEquals(self.comment2.user, self.user)
self.assertIsNotNone(self.comment2.parent)
def test_model_manager(self):
query = Comment.objects.filter_by_instance(self.blog)
self.assertQuerysetEqual(query, [self.comment2, self.comment1])
| StarcoderdataPython |
9765447 | <reponame>K-Mike/ml_package
# -*- coding: utf-8 -*-
from __future__ import print_function
from ._version import get_versions
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__version__ = get_versions()['version']
del get_versions
# Project from http://confusable-homoglyphs.readthedocs.io/en/latest/apidocumentation.html
# Fix some issues, but don't remember where
# TODO write about erros to github | StarcoderdataPython |
6567205 | <reponame>nickerso/opencmiss-libcellml<gh_stars>1-10
import os
from omniidl import idlvisitor, output, idlast
import jnutils
import string
class NativeStubVisitor (idlvisitor.AstVisitor):
def __init__(self):
self.mangleParts = ['Java', 'pjm2pcm']
self.inameParts = []
self.mangled = ''
def visitAST(self, node):
directory, basename = os.path.split(node.file())
self._included = ['xpcom.idl']
if string.lower(basename[-4:]) == '.idl':
basename = basename[:-4]
self.hxx = output.Stream(open('j2p' + basename + '.hxx', 'w'))
self.cppMod = output.Stream(open('j2p' + basename + 'Mod.cpp', 'w'))
self.cppSup = output.Stream(open('j2p' + basename + 'Sup.cpp', 'w'))
self.hxx.out(
"// This output is automatically generated. Do not edit.")
self.modname = basename
self.defname='J2P__' + self.modname
guardname=self.defname + '__INCLUDED'
self.hxx.out('#ifndef ' + guardname)
self.hxx.out("#define " + guardname)
self.hxx.out('#include "pick-jni.h"')
self.hxx.out('#include "j2psupport.hxx"')
self.hxx.out('#include "Iface' + basename + '.hxx"')
self.cppMod.out('#include <exception>')
self.cppMod.out('#include "cda_compiler_support.h"')
self.cppMod.out('#include "j2p' + basename + '.hxx"')
self.cppMod.out('#include "p2j' + basename + '.hxx"')
self.cppMod.out('#include <Utilities.hxx>')
self.cppSup.out('#include <exception>')
self.cppSup.out('#include "cda_compiler_support.h"')
self.cppSup.out('#include "j2p' + basename + '.hxx"')
self.cppSup.out('#include "p2j' + basename + '.hxx"')
self.cppSup.out('#include <Utilities.hxx>')
for n in node.declarations():
if n.mainFile():
pass
else:
filename = n.file()
pos = string.rfind(filename, '/')
if pos != -1:
filename = filename[pos + 1:]
if not filename in self._included:
self._included.append(filename)
if filename[-4:] == ".idl":
filename = filename[0:-4] + ".hxx"
if filename != "xpcom.idl":
self.hxx.out('#include "j2p@filename@"',
filename=filename)
self.hxx.out('#undef PUBLIC_JAVAMOD_PRE')
self.hxx.out('#undef PUBLIC_JAVAMOD_POST')
self.hxx.out('#ifdef IN_MODULE_%s' % self.defname)
self.hxx.out('#define PUBLIC_JAVAMOD_PRE CDA_EXPORT_PRE')
self.hxx.out('#define PUBLIC_JAVAMOD_POST CDA_EXPORT_POST')
self.hxx.out('#else')
self.hxx.out('#define PUBLIC_JAVAMOD_PRE CDA_IMPORT_PRE')
self.hxx.out('#define PUBLIC_JAVAMOD_POST CDA_IMPORT_POST')
self.hxx.out('#endif')
self.hxx.out('#undef PUBLIC_JAVALIB_PRE')
self.hxx.out('#undef PUBLIC_JAVALIB_POST')
self.hxx.out('#ifdef IN_LIBRARY_%s' % self.defname)
self.hxx.out('#define PUBLIC_JAVALIB_PRE CDA_EXPORT_PRE')
self.hxx.out('#define PUBLIC_JAVALIB_POST CDA_EXPORT_POST')
self.hxx.out('#else')
self.hxx.out('#define PUBLIC_JAVALIB_PRE CDA_IMPORT_PRE')
self.hxx.out('#define PUBLIC_JAVALIB_POST CDA_IMPORT_POST')
self.hxx.out('#endif')
for n in node.declarations():
if n.mainFile():
n.accept(self)
self.hxx.out('#endif // not ' + guardname)
def pushManglePart(self, name):
self.mangleParts.append(string.replace(string.replace(name, '_', '_1'), '/', '_'))
def popManglePart(self):
self.mangleParts.pop()
def calculateMangled(self):
self.mangled = string.join(self.mangleParts, '_')
def visitModule(self, node):
self.inameParts.append(node.identifier());
self.pushManglePart(jnutils.JavaName(node))
for defn in node.definitions():
defn.accept(self)
self.popManglePart()
self.inameParts.pop()
def findAllInherits(self, node):
inherits = []
for n in node.inherits():
inherits.append(n)
inherits = inherits + self.findAllInherits(n)
return inherits
def visitInterface(self, node):
self.pushManglePart(jnutils.JavaName(node))
self.inameParts.append(node.identifier())
constructor = 'wrap_' + string.join(self.inameParts, '_')
classsig = string.join(self.inameParts, '/')
scopedn = jnutils.ScopedCppName(node)
cxxclass = 'iface::' + scopedn
self.cxxclass = cxxclass
self.hxx.out('PUBLIC_JAVALIB_PRE jobject ' + constructor + '(JNIEnv* env, ' + cxxclass + '* obj) PUBLIC_JAVALIB_POST;')
self.cppSup.out('jobject ' + constructor + '(JNIEnv* env, ' + cxxclass + '* obj)')
self.cppSup.out('{')
self.cppSup.inc_indent()
self.cppSup.out("if (obj == NULL)")
self.cppSup.inc_indent()
self.cppSup.out('return NULL;')
self.cppSup.dec_indent()
hasCallback = 0
for p in node.pragmas(): hasCallback = hasCallback or (p.text() == "user-callback")
if hasCallback != 0:
# If it is a p2j object, unwrap it...
self.cppSup.out('p2j::' + scopedn + ' * wrap = dynamic_cast<p2j::' + scopedn + '*>(obj);')
self.cppSup.out('if (wrap != NULL)')
self.cppSup.inc_indent()
self.cppSup.out('return env->NewLocalRef(wrap->unwrap());')
self.cppSup.dec_indent()
# It is a non-Java C++ object, so make a Java wrapper for it...
self.cppSup.out('jclass clazz = env->FindClass("pjm2pcm/' + classsig + '");')
self.cppSup.out('jmethodID constr = env->GetMethodID(clazz, "<init>", "()V");')
self.cppSup.out('jobject wrapper = env->NewObject(clazz, constr);')
self.cppSup.out('jfieldID fid = env->GetFieldID(clazz, "nativePtr", "J");')
self.cppSup.out('obj->add_ref();')
self.cppSup.out('jlong field = reinterpret_cast<int64_t>(obj);')
self.cppSup.out('env->SetLongField(wrapper, fid, field);')
self.cppSup.out('fid = env->GetFieldID(clazz, "nativePtr_xpcom_iobject", "J");')
self.cppSup.out('field = reinterpret_cast<int64_t>(static_cast<iface::XPCOM::IObject*>(obj));')
self.cppSup.out('env->SetLongField(wrapper, fid, field);')
self.recurseBuildInheritedFieldSetup(node)
self.cppSup.out('return wrapper;')
self.cppSup.dec_indent()
self.cppSup.out('}')
# Write a finalizer...
self.pushManglePart('finalize')
self.calculateMangled()
self.hxx.out('extern "C" { PUBLIC_JAVAMOD_PRE void ' + self.mangled +
'(JNIEnv* env, jobject thisptr) PUBLIC_JAVAMOD_POST; }')
self.cppMod.out('void ' + self.mangled + '(JNIEnv* env, jobject thisptr)')
self.cppMod.out('{')
self.cppMod.inc_indent()
self.cppMod.out('jclass thisclazz = env->GetObjectClass(thisptr);')
self.cppMod.out('jfieldID fid = env->GetFieldID(thisclazz, "nativePtr", "J");')
self.cppMod.out('reinterpret_cast<' + self.cxxclass +
'*>(env->GetLongField(thisptr, fid))->release_ref();')
self.cppMod.dec_indent()
self.cppMod.out('}')
self.popManglePart()
self.pushManglePart('nqueryInterface')
self.calculateMangled()
self.hxx.out('extern "C" { PUBLIC_JAVAMOD_PRE jobject ' + self.mangled +
'(JNIEnv* env, jclass* clazz, jlong fromptr) PUBLIC_JAVAMOD_POST; }')
self.cppMod.out('jobject ' + self.mangled + '(JNIEnv* env, jclass* clazz, jlong fromptr)')
self.cppMod.out('{')
self.cppMod.inc_indent()
self.cppMod.out('iface::XPCOM::IObject * obj = reinterpret_cast<iface::XPCOM::IObject*>' +
'(fromptr);')
self.cppMod.out('if (obj == NULL) { env->ExceptionClear(); return NULL; }')
self.cppMod.out('DECLARE_QUERY_INTERFACE_OBJREF(qiobj, obj, ' + scopedn + ');')
# If qiobj is null, it doesn't implement the requested interface...
self.cppMod.out('if (qiobj == NULL) return NULL;')
self.cppMod.out('jobject objj = ' + constructor + '(env, qiobj);')
self.cppMod.out('return objj;')
self.cppMod.dec_indent()
self.cppMod.out('}')
self.popManglePart()
self.pushManglePart('hashCode')
self.calculateMangled()
self.hxx.out('extern "C" { PUBLIC_JAVAMOD_PRE jint ' + self.mangled +
'(JNIEnv* env, jobject thisptr) PUBLIC_JAVAMOD_POST; }')
self.cppMod.out('jint ' + self.mangled + '(JNIEnv* env, jobject thisptr)')
self.cppMod.out('{')
self.cppMod.inc_indent()
self.cppMod.out('jclass thisclazz = env->GetObjectClass(thisptr);')
self.cppMod.out('jfieldID fid = env->GetFieldID(thisclazz, "nativePtr", "J");')
self.cppMod.out('std::string oid1 = reinterpret_cast<' + self.cxxclass +
'*>(env->GetLongField(thisptr, fid))->objid();')
self.cppMod.out('jint hash = 0;')
self.cppMod.out('size_t l = oid1.size();');
self.cppMod.out('for (uint32_t i = 0; i < l; i++)')
self.cppMod.out('{')
self.cppMod.inc_indent()
self.cppMod.out('uint32_t n = (i * 13) % 32;')
self.cppMod.out('hash ^= (oid1[i] << n) | (oid1[i] >> (32 - n));')
self.cppMod.dec_indent()
self.cppMod.out('}')
self.cppMod.out('return hash;')
self.cppMod.dec_indent()
self.cppMod.out('}')
self.popManglePart()
self.recurseAcceptInheritedContents(node)
self.inameParts.pop()
self.popManglePart()
def recurseBuildInheritedFieldSetup(self, node):
self.cppSup.out('{')
self.cppSup.inc_indent()
if isinstance(node, idlast.Declarator) and node.alias():
node = node.alias().aliasType().unalias().decl()
fieldName = 'nativePtr_' + string.join(node.scopedName(), '_')
self.cppSup.out('jfieldID fid = env->GetFieldID(clazz, "' + fieldName + '", "J");')
className = string.join(node.scopedName(), '::')
self.cppSup.out('field = reinterpret_cast<int64_t>' +\
'(static_cast<iface::' + className + '*>(obj));')
self.cppSup.out('env->SetLongField(wrapper, fid, field);')
self.cppSup.dec_indent()
self.cppSup.out('}')
for i in node.inherits():
if i.scopedName() != ['XPCOM', 'IObject']:
self.recurseBuildInheritedFieldSetup(i)
def recurseAcceptInheritedContents(self, node):
if isinstance(node, idlast.Declarator) and node.alias():
node = node.alias().aliasType().unalias().decl()
for i in node.contents():
i.accept(self)
for i in node.inherits():
if i.scopedName() != ['XPCOM', 'IObject']:
self.recurseAcceptInheritedContents(i)
def visitOperation(self, node):
rti = jnutils.GetTypeInformation(node.returnType().unalias())
params = []
for p in node.parameters():
dirn = [jnutils.Type.IN, jnutils.Type.OUT, jnutils.Type.INOUT][p.direction()]
pti = jnutils.GetTypeInformation(p.paramType().unalias())
params.append([p.identifier(), pti, dirn])
self.pushManglePart(node.identifier())
self.calculateMangled()
self.writeMethod(self.mangled, jnutils.CppName(node.identifier()), rti, params, node.raises())
self.popManglePart()
def visitAttribute(self, node):
ti = jnutils.GetTypeInformation(node.attrType().unalias())
for n in node.declarators():
self.pushManglePart(jnutils.AccessorName(n, 0))
self.calculateMangled()
self.writeMethod(self.mangled, jnutils.CppName(n.identifier()),
ti, [], [])
self.popManglePart()
if not node.readonly():
itype = ti.jniType(jnutils.Type.IN)
for n in node.declarators():
self.pushManglePart(jnutils.AccessorName(n, 1))
self.calculateMangled()
self.writeMethod(self.mangled, jnutils.CppName(n.identifier()),
None, [['param', ti, jnutils.Type.IN]], [])
self.popManglePart()
def writeMethod(self, name, pcmName, rtype, params, excepts):
if rtype == None:
rtypeName = 'void'
else:
rtypeName = rtype.jniType(jnutils.Type.RETURN)
paramString = 'JNIEnv* env, jobject thisptr'
for (pname, ti, dirn) in params:
tiName = ti.jniType(dirn)
paramString = paramString + ', ' + tiName + ' ' + pname
self.hxx.out('extern "C" { PUBLIC_JAVAMOD_PRE ' + rtypeName + ' ' + name +
'(' + paramString + ') PUBLIC_JAVAMOD_POST; };')
self.cppMod.out(rtypeName + ' ' + name + '(' + paramString + ')')
self.cppMod.out('{')
self.cppMod.inc_indent()
if (rtype != None and rtypeName != 'void'):
needRet = 1
self.cppMod.out(rtype.pcmType(jnutils.Type.DERIVE) + ' _pcm_ret;')
else:
needRet = 0
pcmParams = ''
for (pname, ti, dirn) in params:
# We need to convert in parameters to the CXX type...
indirect = ''
if dirn != jnutils.Type.IN:
indirect = ti.cref
if pcmParams != '':
pcmParams = pcmParams + ', '
self.cppMod.out(ti.pcmType(jnutils.Type.DERIVE) + ' _pcm_' + pname + ';')
pcmParams = pcmParams + indirect + '_pcm_' + pname
if dirn == jnutils.Type.OUT:
continue
self.cppMod.out(ti.convertToPCM(pname, '_pcm_' + pname, dirn != jnutils.Type.IN))
# Next, we need to extract the 'this' pointer...
self.cppMod.out(self.cxxclass + '* pcm_this;')
self.cppMod.out('{')
self.cppMod.inc_indent()
self.cppMod.out('jclass thisclazz = env->GetObjectClass(thisptr);')
self.cppMod.out('jfieldID fid = env->GetFieldID(thisclazz, "nativePtr", "J");')
self.cppMod.out('pcm_this = reinterpret_cast<' + self.cxxclass + '*>(env->GetLongField(thisptr, fid));')
self.cppMod.dec_indent()
self.cppMod.out('}')
# Make the call to the PCM interface...
if needRet:
retsave = '_pcm_ret = '
else:
retsave = ''
self.cppMod.out('try')
self.cppMod.out('{')
self.cppMod.inc_indent()
self.cppMod.out(retsave + 'pcm_this->' + pcmName + '(' + pcmParams + ');')
self.cppMod.dec_indent()
self.cppMod.out('}')
for e in excepts:
self.cppMod.out('catch (%s& _except)' % ('iface::' + jnutils.ScopedCppName(e)))
self.cppMod.out('{')
self.cppMod.inc_indent()
# Clean up parameters...
for (pname, ti, dirn) in params:
self.cppMod.out(ti.pcmDestroy('_pcm_' + pname))
sigArgs = ''
invokeArgs = ''
for mem in e.members():
ti = jnutils.GetTypeInformation(mem.memberType())
for d in mem.declarators():
jniName = '_ejni_' + d.identifier()
self.cppMod.out('%s %s;' % (ti.jniType(jnutils.Type.IN), jniName));
self.cppMod.out(ti.convertToJNI(jniName, '_except.%s' % (jnutils.CppName(d.identifier()))))
sigArgs = sigArgs + ti.javaSig(jnutils.Type.IN)
invokeArgs = invokeArgs + ',' + jniName
self.cppMod.out('jclass eclazz = env->FindClass("%s");' % string.join(e.scopedName(), '/'))
self.cppMod.out('jmethodID meth = env->GetMethodID(eclazz, "<init>", "(%s)V");' % sigArgs)
self.cppMod.out('jobject eobj = env->NewObject(eclazz, meth%s);' % invokeArgs)
self.cppMod.out('env->Throw((jthrowable)eobj);')
if needRet:
self.cppMod.out('return ' + rtype.failure_return + ';')
else:
self.cppMod.out('return;')
self.cppMod.dec_indent()
self.cppMod.out('}')
self.cppMod.out ('catch (...)')
self.cppMod.out('{')
self.cppMod.inc_indent()
# Clean up parameters...
for (pname, ti, dirn) in params:
self.cppMod.out(ti.pcmDestroy('_pcm_' + pname))
# Raise an exception...
self.cppMod.out('jclass eclazz = env->FindClass("java/lang/RuntimeException");')
self.cppMod.out('env->ThrowNew(eclazz, "Native code threw exception");')
if needRet:
self.cppMod.out('return ' + rtype.failure_return + ';')
else:
self.cppMod.out('return;')
self.cppMod.dec_indent()
self.cppMod.out('}')
# Convert out / inout parameters to JNI...
for (pname, ti, dirn) in params:
if dirn == jnutils.Type.IN:
continue
self.cppMod.out(ti.convertToJNI(pname, '_pcm_' + pname, indirectOut = 1))
if needRet:
self.cppMod.out(rtypeName + ' _jni_ret;')
self.cppMod.out(rtype.convertToJNI('_jni_ret', '_pcm_ret'))
# Clean up parameters...
for (pname, ti, dirn) in params:
self.cppMod.out(ti.pcmDestroy('_pcm_' + pname))
if needRet:
self.cppMod.out(rtype.pcmDestroy('_pcm_ret'))
self.cppMod.out('return _jni_ret;')
self.cppMod.dec_indent()
self.cppMod.out('}')
def run(tree):
iv = NativeStubVisitor()
tree.accept(iv)
| StarcoderdataPython |
1983755 | from collections import deque
price_per_bullet = int(input())
barrel_size = int(input())
bull = list(map(int, input().split()))
bullets = deque(bull[::-1])
locks = deque(list(map(int, input().split())))
intelligence_value = int(input())
total_money = 0
local_barrel = int(barrel_size)
while bullets and locks:
cur_bullet = bullets.popleft()
cur_lock = locks.popleft()
local_barrel -= 1
total_money -= price_per_bullet
if cur_bullet <= cur_lock:
print("Bang!")
else:
print("Ping!")
locks.appendleft(cur_lock)
if local_barrel == 0:
if bullets:
print("Reloading!")
local_barrel = int(barrel_size)
if locks and not bullets:
print(f"Couldn't get through. Locks left: {len(locks)}")
else:
total_money += intelligence_value
print(f"{len(bullets)} bullets left. Earned ${total_money}") | StarcoderdataPython |
3363362 | #############
## SETUP ##
#############
import itertools
def setup(file):
with open(file) as f:
data = [int(val) for val in f.read().splitlines()]
return data
class RollingSum:
def __init__(self, start):
self._values = start
self._sums = [None] * (len(start) - 1)
for i in range(len(start) - 1):
self._sums[i] = [None] * len(start)
for j in range(i + 1, len(start)):
self._sums[i][j] = start[i] + start[j]
def replace(self, index, value):
index = index % len(self._values)
delta = value - self._values[index]
self._values[index] = value
for i in range(len(self._values) - 1):
if i < index:
self._sums[i][index] = self._sums[i][index] + delta
elif i == index:
for j in range(i + 1, len(self._values)):
self._sums[i][j] = self._sums[i][j] + delta
else:
break
return self
def values(self):
nums = set(itertools.chain.from_iterable(self._sums))
nums.remove(None)
return nums
preamble = 25
##############
## PART 1 ##
##############
def part1(data):
sums = RollingSum(data[:preamble])
for i in range(preamble, len(data)):
num = data[i]
if not num in sums.values():
return num
sums.replace(i, num)
##############
## PART 2 ##
##############
def part2(data, weak):
for i in range(len(data) - 1):
sum = data[i]
if sum >= weak:
break
for j in range(i + 1, len(data)):
sum = sum + data[j]
if sum == weak:
return max(data[i:j + 1]) + min(data[i: j + 1])
elif sum > weak:
break
if __name__ == "__main__":
data = setup("../input/09.txt")
print("\nPart 1:")
p1 = part1(data)
print(p1)
print("\nPart 2:")
print(part2(data, p1))
| StarcoderdataPython |
3545005 | <reponame>elifesciences/elife-bot
import unittest
from mock import mock, patch
import activity.activity_ScheduleDownstream as activity_module
from activity.activity_ScheduleDownstream import (
activity_ScheduleDownstream as activity_object,
)
from provider import article, lax_provider
import tests.activity.settings_mock as settings_mock
from tests.activity.classes_mock import FakeLogger, FakeStorageContext
import tests.activity.test_activity_data as activity_test_data
class TestScheduleDownstream(unittest.TestCase):
def setUp(self):
fake_logger = FakeLogger()
self.activity = activity_object(settings_mock, fake_logger, None, None, None)
@patch("provider.lax_provider.article_first_by_status")
@patch.object(article, "storage_context")
@patch.object(activity_module, "storage_context")
def test_do_activity(
self, fake_activity_storage_context, fake_storage_context, fake_first
):
expected_result = True
fake_storage_context.return_value = FakeStorageContext()
fake_activity_storage_context.return_value = FakeStorageContext()
fake_first.return_value = True
self.activity.emit_monitor_event = mock.MagicMock()
# do the activity
result = self.activity.do_activity(
activity_test_data.data_example_before_publish
)
# check assertions
self.assertEqual(result, expected_result)
@patch.object(lax_provider, "get_xml_file_name")
@patch.object(lax_provider, "article_first_by_status")
def test_do_activity_exception(self, fake_first, fake_get_xml_file_name):
expected_result = False
fake_get_xml_file_name.side_effect = Exception("Something went wrong!")
fake_first.return_value = True
self.activity.emit_monitor_event = mock.MagicMock()
# do the activity
result = self.activity.do_activity(
activity_test_data.data_example_before_publish
)
# check assertions
self.assertEqual(result, expected_result)
def test_choose_outboxes_poa_first(self):
"""first poa version"""
outbox_list = activity_module.choose_outboxes("poa", True)
self.assertTrue("pubmed/outbox/" in outbox_list)
self.assertTrue("publication_email/outbox/" in outbox_list)
self.assertFalse("pmc/outbox/" in outbox_list)
def test_choose_outboxes_poa_not_first(self):
"""poa but not the first poa"""
outbox_list = activity_module.choose_outboxes("poa", False)
self.assertTrue("pubmed/outbox/" in outbox_list)
# do not send to pmc
self.assertFalse("pmc/outbox/" in outbox_list)
# do not send publication_email
self.assertFalse("publication_email/outbox/" in outbox_list)
def test_choose_outboxes_vor_first(self):
"""first vor version"""
outbox_list = activity_module.choose_outboxes("vor", True)
self.assertTrue("pmc/outbox/" in outbox_list)
self.assertTrue("pubmed/outbox/" in outbox_list)
self.assertTrue("publication_email/outbox/" in outbox_list)
self.assertTrue("pub_router/outbox/" in outbox_list)
def test_choose_outboxes_vor_not_first(self):
"""vor but not the first vor"""
outbox_list = activity_module.choose_outboxes("vor", False)
self.assertTrue("pmc/outbox/" in outbox_list)
self.assertTrue("pubmed/outbox/" in outbox_list)
self.assertTrue("pub_router/outbox/" in outbox_list)
# do not send publication_email
self.assertFalse("publication_email/outbox/" in outbox_list)
def test_choose_outboxes_vor_silent_first(self):
outbox_list = activity_module.choose_outboxes("vor", True, "silent-correction")
self.assertTrue("pmc/outbox/" in outbox_list)
# do not send publication_email
self.assertFalse("publication_email/outbox/" in outbox_list)
# do not send to pubmed
self.assertFalse("pubmed/outbox/" in outbox_list)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
4835260 | <reponame>Rafilek/backend-app-api-one<filename>app/app/calc.py
def add(x, y):
""" test function """
return x + y
| StarcoderdataPython |
4981640 | from .create_package import CreatePackage | StarcoderdataPython |
6490765 | <filename>Opt_DM_GUI5.py
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 27 14:29:46 2019
@author: 028375
"""
from __future__ import unicode_literals, division
try:
import tkinter as tk
import tkinter.messagebox as msbox
except ImportError:
import Tkinter as tk
import tkMessageBox as msbox
import os, copy
import pandas as pd
import numpy as np
import Opt_DM_GUI3
try:
#path0=os.path.dirname(os.path.realpath(__file__))+'\\'
path0=os.getcwd()+'\\'
except:
#path0=os.path.dirname((os.path.realpath(__file__)).decode('gb2312'))+'\\'
path0=(os.getcwd()).decode('gb2312')+'\\'
path5='Opt_DM\Report_Opt_Book.xlsx'
path6='Opt_DM\TheAssess.xlsx'
path7='Opt_DM\Report_Opt_Book2.xlsx'
def GenBook():
try:
Outputs2=pd.read_excel(path0+path5,3,encoding='gbk',keep_default_na=False)
except BaseException:
msbox.showinfo(title='Notice',message='找不到文件'+path0+path5)
return 0
try:
TheAssess=pd.read_excel(path0+path6,0,encoding='gbk',keep_default_na=False)
except BaseException:
msbox.showinfo(title='Notice',message='找不到文件'+path0+path6)
return 0
try:
Outputs2['期权标的']=Outputs2['期初表期权标的'].copy()
Outputs2.loc[Outputs2['期权标的']=='',['期权标的']]=Outputs2[Outputs2['期权标的']=='']['期末表期权标的']
Outputs2.loc[Outputs2['期权标的']=='',['期权标的']]=Outputs2[Outputs2['期权标的']=='']['资金表期权标的']
Outputs2['期权标的']=Outputs2['期权标的'].str.rstrip(';,,,;,.')
Outputs2['标的类型']=Outputs2['期初表标的类型'].copy()
Outputs2.loc[Outputs2['标的类型']=='',['标的类型']]=Outputs2[Outputs2['标的类型']=='']['期末表标的类型']
Outputs2.loc[Outputs2['标的类型']=='',['标的类型']]=Outputs2[Outputs2['标的类型']=='']['资金表标的类型']
Outputs3=Outputs2.groupby(['期权标的','标的类型'])['实盈','公允价值变动损益'].sum().reset_index()
except BaseException:
msbox.showinfo(title='Notice',message='处理期权端数据时发生错误!')
return 0
try:
TheAssess=TheAssess[(TheAssess['部门2']=='股权衍生品业务线') & (TheAssess['业务类型']=='场外期权')]
TheAssess['投资收益']=TheAssess['卖出利润']-TheAssess['交易费用']
TheAssess['分红']=TheAssess['本日计提利息']
TheAssess['浮动盈亏2']=pd.to_numeric(TheAssess['计提公允价值损益'])
TheAssess2=TheAssess[['代码','名称', '市场','金融分类', '投资收益','分红','浮动盈亏2']].reset_index(drop=True)
TheAssess2.loc[TheAssess2['市场']=='X_SZSC',['市场']]='X_SHSC' #部分港股通市场串号
TheAssess2=TheAssess2.groupby(['代码','名称','市场','金融分类'])['投资收益','分红','浮动盈亏2'].sum().reset_index()
TheAssess2['修正后的代码']=TheAssess2['代码'].copy()
for i in list(range(len(TheAssess2))):
try:
if TheAssess2['市场'][i]=='XSHE':
TheAssess2['修正后的代码'][i]='{0:0>6}'.format(TheAssess2['代码'][i])+'.SZ'
elif TheAssess2['市场'][i]=='XSHG':
TheAssess2['修正后的代码'][i]='{0:0>6}'.format(TheAssess2['代码'][i])+'.SH'
elif (TheAssess2['市场'][i]=='X_SZSC')|(TheAssess2['市场'][i]=='X_SHSC'):
TheAssess2['修正后的代码'][i]='{0:0>4}'.format(TheAssess2['代码'][i])+'.HK'
elif TheAssess2['市场'][i]=='X_CNFFEX':
if TheAssess2['名称'][i] in (['螺纹钢','热轧卷板','锌','铝','黄金','白银','镍','锡','阴极铜','纸浆','石油沥青','天然橡胶']):
TheAssess2['修正后的代码'][i]=(TheAssess2['代码'][i]).upper()+'.SHF'
elif TheAssess2['名称'][i] in (['原油']):
TheAssess2['修正后的代码'][i]=(TheAssess2['代码'][i]).upper()+'.INE'
elif TheAssess2['名称'][i] in (['聚氯乙烯','聚丙烯','大豆原油','铁矿石','冶金焦炭','豆粕','棕榈油','玉米','线型低密度聚乙烯','冶金焦炭']):
TheAssess2['修正后的代码'][i]=(TheAssess2['代码'][i]).upper()+'.DCE'
elif TheAssess2['名称'][i] in (['动力煤','鲜苹果','一号棉花','精对苯二甲酸(PTA)','甲醇','白砂糖']):
TheAssess2['修正后的代码'][i]=(TheAssess2['代码'][i]).upper()+'.CZC'
except: 0
except BaseException:
msbox.showinfo(title='Notice',message='处理现货端数据时发生错误!')
return 0
try:
TheAssess3=TheAssess2.rename(columns={'修正后的代码':'期权标的'})
Outputs3=Outputs3[Outputs3['期权标的']!='']
TheAssess3=TheAssess3[TheAssess3['期权标的']!='']
Outputs5=pd.merge(Outputs3,TheAssess3,how='outer',on='期权标的')
wbw3=pd.ExcelWriter(path0+path7)
Outputs2.to_excel(wbw3,'期权台账',index=False)
TheAssess.to_excel(wbw3,'考核报表',index=False)
Outputs3.to_excel(wbw3,'期权台账_按标的汇总',index=False)
TheAssess2.to_excel(wbw3,'考核报表_按标的汇总',index=False)
Outputs5.to_excel(wbw3,'对冲结果',index=False)
wbw3.save()
msbox.showinfo(title='Notice',message='生成期权台账(按标的),输出到:'+path0+path7)
except:
msbox.showinfo(title='Notice',message='生成期权台账(按标的)发生错误!')
return 0
class APP_class(Opt_DM_GUI3.APP_class):
def __init__(self,root):
self.root=root
self.setupUI2()
def setupUI2(self):
self.setupUI1()
tk.Button(self.root,text="生成台账(按标的)",command=GenBook,width=18).grid(row=1,column=1,sticky="e")
if __name__=="__main__":
root=tk.Tk()
root.title('iAccountingTool.0.1.1')
APP1=APP_class(root)
root.mainloop()
| StarcoderdataPython |
5061676 | <filename>src/client/pytriloquist/gui/app.py
import appuifw as ui
from pytriloquist import Const
from pytriloquist.btclient import BluetoothError
from pytriloquist.gui import Dialog
class ApplicationsDialog(Dialog):
"""
Dialog used to manage applications.
"""
def __init__(self, app, parent):
Dialog.__init__(self, app, parent)
def get_title(self):
"""Returns the dialog title.
"""
return self.parent.get_title()
def init_ui(self):
"""Initializes the user interface.
"""
# Load the list of applications
self.apps = []
view = self.app.dbm.query(Const.DB_APPLICATIONS_SELECT)
for i in range(view.count_line()):
view.get_line()
self.apps.append((view.col(1), view.col(2)))
view.next_line()
# Menu
self.menu = [
(_(u"Open") , self.app_list_observe),
(_(u"New") , self.new_app),
(_(u"Delete"), self.delete_app),
(_(u"Rename"), self.rename_app),
]
self.menu.extend(self.parent.menu)
# Cannot display an empty Listbox
if not self.apps:
self.apps.append((-1, _("New")))
self.app_list = ui.Listbox([app[1] for app in self.apps], self.app_list_observe)
def display(self):
"""Displays the dialog on the device.
"""
ui.app.body = self.app_list
ui.app.menu = self.menu
def app_list_observe(self):
"""Function called when an application is selected from the list.
"""
selected = self.apps[self.app_list.current()]
if selected[0] == -1:
self.new_app()
else:
CommandsDialog(selected, self.app, self).execute()
def new_app(self):
"""Adds a new application.
"""
name = ui.query(_(u"Application name"), "text")
if name:
self.app.dbm.execute(Const.DB_APPLICATIONS_INSERT % name)
self.execute(force=True)
def delete_app(self):
"""Removes the selected application.
"""
index = self.app_list.current()
if index >= 0:
selected = self.apps[index]
if selected[0] >= 0:
if ui.query(_(u"Delete \"%s\"?") % selected[1], "query"):
self.app.dbm.execute_atomic([
Const.DB_APPLICATIONS_DELETE % selected[0],
Const.DB_COMMANDS_APP_DELETE % selected[0]])
self.execute(force=True)
else:
ui.note(_(u"Cannot remove an action."), "error")
def rename_app(self):
"""Renames the selected application.
"""
index = self.app_list.current()
if index >= 0:
selected = self.apps[index]
if selected[0] >= 0:
name = ui.query(_(u"Application name"), "text", self.apps[index][1])
if name:
self.app.dbm.execute(
Const.DB_APPLICATIONS_UPDATE % (name, selected[0]))
self.execute(force=True)
else:
ui.note(_(u"Cannot edit an action."), "error")
class CommandsDialog(Dialog):
"""
Dialog used to manage the commands of an application.
"""
def __init__(self, app_data, app, parent):
Dialog.__init__(self, app, parent)
self.app_data = app_data
def get_title(self):
"""Returns the dialog title.
"""
return self.app_data[1]
def init_ui(self):
"""Initializes the user interface.
"""
# Load the list of commands
self.cmds = []
view = self.app.dbm.query(Const.DB_COMMANDS_SELECT % self.app_data[0])
for i in range(view.count_line()):
view.get_line()
self.cmds.append((view.col(1), view.col(2), view.col(3), view.col(4)))
view.next_line()
# Menu
self.menu = [
(_(u"Open") , self.cmd_list_observe),
(_(u"New") , self.new_cmd),
(_(u"Delete"), self.delete_cmd),
(_(u"Edit") , self.edit_cmd),
(_(u"Back") , self.back),
]
# Cannot display an empty Listbox
if not self.cmds:
self.cmds.append((-1, -1, _("New"), ""))
self.cmd_list = ui.Listbox([cmd[2] for cmd in self.cmds], self.cmd_list_observe)
def display(self):
"""Displays the dialog on the device.
"""
ui.app.body = self.cmd_list
ui.app.menu = self.menu
def cmd_list_observe(self):
"""Function called when a command is selected from the list.
"""
selected = self.cmds[self.cmd_list.current()]
if selected[0] == -1:
self.new_cmd()
else:
try:
self.app.btclient.send_command(0, selected[3])
except BluetoothError, e:
ui.note(_(e.msg), "error")
def new_cmd(self):
"""Adds a new command.
"""
EditCommandDialog(self.app_data, None, self.app, self).execute()
def delete_cmd(self):
"""Removes the selected command.
"""
index = self.cmd_list.current()
if index >= 0:
selected = self.cmds[index]
if selected[0] >= 0:
if ui.query(_(u"Delete \"%s\"?") % selected[2], "query"):
self.app.dbm.execute(Const.DB_COMMANDS_DELETE % selected[0])
self.execute(force=True)
else:
ui.note(_(u"Cannot remove an action."), "error")
def edit_cmd(self):
"""Renames the selected command.
"""
index = self.cmd_list.current()
if index >= 0:
selected = self.cmds[index]
if selected[0] >= 0:
EditCommandDialog(self.app_data, selected, self.app, self).execute()
else:
ui.note(_(u"Cannot edit an action."), "error")
class EditCommandDialog(Dialog):
"""
Dialog used to add and edit application commands.
"""
def __init__(self, app_data, command_data, app, parent):
Dialog.__init__(self, app, parent)
self.form_saved = False
self.app_data = app_data
self.command_data = command_data
def get_title(self):
"""Returns the dialog title.
"""
return self.app_data[1]
def init_ui(self):
"""Initializes the user interface.
"""
name = u""
command = u""
if self.command_data:
name = self.command_data[2]
command = self.command_data[3]
# Form fields
self.fields = [
(_(u"Command name"), "text", name),
(_(u"Command line"), "text", command),
]
# Adjust flags
self.initialized = True
# Set up form
self.form = ui.Form(self.fields, flags=ui.FFormEditModeOnly | ui.FFormDoubleSpaced)
self.form.save_hook = self.save
def display(self):
"""Displays the dialog on the device.
"""
self.form_saved = False
self.form.execute()
self.parent.execute(force=self.form_saved)
def get_name(self):
"""Gets the command name from the saved form.
"""
if self.form_saved:
return self.saved_data[0][2]
def get_command(self):
"""Gets the command line from the saved form.
"""
if self.form_saved:
return self.saved_data[1][2]
def save(self, data):
"""Adds or edits the command.
"""
self.form_saved, self.saved_data = True, data
if not self.command_data:
self.app.dbm.execute(Const.DB_COMMANDS_INSERT % (
self.app_data[0],
self.get_name(),
self.get_command()
))
else:
self.app.dbm.execute(Const.DB_COMMANDS_UPDATE % (
self.get_name(),
self.get_command(),
self.command_data[0]
))
return True
| StarcoderdataPython |
53396 | <filename>transformer/__init__.py
import transformer.Beam
import transformer.Constants
import transformer.Layers
import transformer.Models
import transformer.Modules
import transformer.Optim
import transformer.SubLayers
import transformer.Translator
__all__ = [
transformer.Constants, transformer.Modules, transformer.Layers,
transformer.SubLayers, transformer.Models, transformer.Optim,
transformer.Translator, transformer.Beam]
| StarcoderdataPython |
11270552 | <reponame>cherry-wb/googleads-python-lib
#!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests to cover Client."""
__author__ = '<EMAIL> (<NAME>)'
import os
import pickle
import sys
import tempfile
import unittest
sys.path.insert(0, os.path.join('..', '..', '..'))
from adspygoogle.common.Client import _DEFAULT_CONFIG
from adspygoogle.common.Client import Client
from adspygoogle.common.Errors import ValidationError
class ClientTest(unittest.TestCase):
"""Tests for the adspygoogle.common.Client module."""
def setUp(self):
"""Initialize a Client to test with."""
self.client = Client()
def testLoadAuthCredentials_ClientLogin(self):
"""Tests the _LoadAuthCredentials function."""
_, filename = tempfile.mkstemp()
auth_credentials = {
'username': 'Joseph',
'password': '<PASSWORD>'
}
with open(filename, 'w') as handle:
pickle.dump(auth_credentials, handle)
try:
Client.auth_pkl = filename
self.assertEqual(self.client._LoadAuthCredentials(), auth_credentials)
finally:
Client.auth_pkl = ''
def testLoadAuthCredentials_OAuth2(self):
"""Tests the _LoadAuthCredentials function."""
_, filename = tempfile.mkstemp()
client_id = 'id1234id',
client_secret = 'shhh,itsasecret',
refresh_token = '1/<PASSWORD>_a_refresh_token'
auth_credentials = {
'clientId': client_id,
'clientSecret': client_secret,
'refreshToken': refresh_token
}
with open(filename, 'w') as handle:
pickle.dump(auth_credentials, handle)
try:
Client.auth_pkl = filename
read_credentials = self.client._LoadAuthCredentials()
self.assertEqual(read_credentials['oauth2credentials'].refresh_token,
refresh_token)
self.assertEqual(read_credentials['oauth2credentials'].client_secret,
client_secret)
self.assertEqual(read_credentials['oauth2credentials'].client_id,
client_id)
self.assertTrue('clientId' not in read_credentials)
self.assertTrue('clientSecret' not in read_credentials)
self.assertTrue('refreshToken' not in read_credentials)
finally:
Client.auth_pkl = ''
def testLoadAuthCredentials_noPickle(self):
"""Tests the _LoadAuthCredentials function."""
try:
self.client._LoadAuthCredentials()
self.fail('Exception should have been thrown.')
except ValidationError, e:
self.assertEqual(str(e), 'Authentication data is missing.')
def testWriteUpdatedAuthValue(self):
"""Tests the _WriteUpdatedAuthValue function."""
_, filename = tempfile.mkstemp()
auth_credentials = {
'username': 'Joseph',
'password': '<PASSWORD>'
}
with open(filename, 'w') as handle:
pickle.dump(auth_credentials, handle)
try:
Client.auth_pkl = filename
self.client._WriteUpdatedAuthValue('password', '<PASSWORD>')
with open(filename, 'r') as handle:
self.assertEqual(pickle.load(handle),
{'username': 'Joseph', 'password': '<PASSWORD>'})
finally:
Client.auth_pkl = ''
def testLoadConfigValues(self):
"""Tests the _LoadConfigValues function."""
_, filename = tempfile.mkstemp()
config_values = {
'debug': 'yes plz',
'compress': 'crunch'
}
with open(filename, 'w') as handle:
pickle.dump(config_values, handle)
try:
Client.config_pkl = filename
self.assertEqual(self.client._LoadConfigValues(), config_values)
finally:
Client.config_pkl = ''
def testLoadConfigValues_noPickle(self):
"""Tests the _LoadConfigValues function."""
self.assertEqual(self.client._LoadConfigValues(), {})
def testSetMissingDefaultConfigValues(self):
"""Tests the _SetMissingDefaultConfigValues function."""
self.assertEqual(self.client._SetMissingDefaultConfigValues(),
_DEFAULT_CONFIG)
self.assertEqual(self.client._SetMissingDefaultConfigValues({}),
_DEFAULT_CONFIG)
# Ensure it doesn't overwrite values which exist already
partial_config = {
'xml_parser': '2',
'debug': 'y',
'xml_log': 'y',
'request_log': 'y',
'auth_token_epoch': <PASSWORD>0,
'auth_type': 'value',
'pretty_xml': 'n',
}
expected_config = _DEFAULT_CONFIG.copy()
for key in partial_config:
expected_config[key] = partial_config[key]
self.assertEqual(self.client._SetMissingDefaultConfigValues(partial_config),
expected_config)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3440154 | <reponame>neelkarma/xkcd-cli
from .cli.main import main
main()
| StarcoderdataPython |
5050313 | <filename>config.py
import os
import sys
class Env:
_errors = []
@staticmethod
def get_errors():
return Env._errors
@staticmethod
def _get_default(env, default, required):
if required:
Env._errors.append("'$" + env + "' is not defined in the environment variables!")
else:
return default
@staticmethod
def get_env(env, default="", required=False):
try:
return os.environ[env]
except KeyError:
return Env._get_default(env, default, required)
cloudflare = {
"username": Env.get_env("CLOUDFLARE_USERNAME", required=True),
"token": Env.get_env("CLOUDFLARE_TOKEN", required=True)
}
settings = {
"server_prefix": Env.get_env("LSCF_SUFFIX")
}
if len(Env.get_errors()) > 0:
sys.stderr.write('\n'.join(str(x) for x in Env.get_errors()) )
sys.exit(1)
| StarcoderdataPython |
4871976 | import json
import os
import re
from typing import Any, Dict, Optional
import pandas as pd
from pandas import DataFrame
import geopandas as gpd
from network_wrangler import ProjectCard
from network_wrangler import RoadwayNetwork
from .transit import CubeTransit, StandardTransit
from .logger import WranglerLogger
from .parameters import Parameters
from .roadway import ModelRoadwayNetwork
class Project(object):
"""A single or set of changes to the roadway or transit system.
Compares a base and a build transit network or a base and build
highway network and produces project cards.
.. highlight:: python
Typical usage example:
::
test_project = Project.create_project(
base_transit_source=os.path.join(CUBE_DIR, "transit.LIN"),
build_transit_source=os.path.join(CUBE_DIR, "transit_route_shape_change"),
)
test_project.evaluate_changes()
test_project.write_project_card(
os.path.join(SCRATCH_DIR, "t_transit_shape_test.yml")
)
Attributes:
DEFAULT_PROJECT_NAME: a class-level constant that defines what
the project name will be if none is set.
STATIC_VALUES: a class-level constant which defines values that
are not evaluated when assessing changes.
card_data (dict): {"project": <project_name>, "changes": <list of change dicts>}
roadway_changes (DataFrame): pandas dataframe of CUBE roadway changes.
transit_changes (CubeTransit):
base_roadway_network (RoadwayNetwork):
base_transit_network (CubeTransit):
build_transit_network (CubeTransit):
project_name (str): name of the project, set to DEFAULT_PROJECT_NAME if not provided
"""
DEFAULT_PROJECT_NAME = "USER TO define"
STATIC_VALUES = [
"model_link_id",
"area_type",
"county",
#"assign_group",
"centroidconnect",
]
def __init__(
self,
roadway_changes: Optional[DataFrame] = None,
transit_changes: Optional[CubeTransit] = None,
base_roadway_network: Optional[RoadwayNetwork] = None,
base_transit_network: Optional[CubeTransit] = None,
build_transit_network: Optional[CubeTransit] = None,
project_name: Optional[str] = "",
evaluate: bool = False,
parameters={},
):
"""
constructor
"""
self.card_data = Dict[str, Dict[str, Any]]
self.roadway_changes = roadway_changes
self.base_roadway_network = base_roadway_network
self.base_transit_network = base_transit_network
self.build_transit_network = build_transit_network
self.transit_changes = transit_changes
self.project_name = (
project_name if project_name else Project.DEFAULT_PROJECT_NAME
)
self.parameters = Parameters(**parameters)
if base_roadway_network != None:
self.determine_roadway_network_changes_compatability()
if evaluate:
self.evaluate_changes()
def write_project_card(self, filename):
"""
Writes project cards.
Args:
filename (str): File path to output .yml
Returns:
None
"""
ProjectCard(self.card_data).write(filename)
WranglerLogger.info("Wrote project card to: {}".format(filename))
@staticmethod
def create_project(
roadway_log_file: Optional[str] = None,
roadway_shp_file: Optional[str] = None,
roadway_csv_file: Optional[str] = None,
base_roadway_dir: Optional[str] = None,
base_transit_source: Optional[str] = None,
build_transit_source: Optional[str] = None,
roadway_changes: Optional[DataFrame] = None,
transit_changes: Optional[CubeTransit] = None,
base_roadway_network: Optional[RoadwayNetwork] = None,
base_transit_network: Optional[CubeTransit] = None,
build_transit_network: Optional[CubeTransit] = None,
project_name=None,
parameters={},
):
"""
Constructor for a Project instance.
Args:
roadway_log_file (str): File path to consuming logfile.
roadway_shp_file (str): File path to consuming shape file for roadway changes.
roadway_csv_file (str): File path to consuming csv file for roadway changes.
base_roadway_dir (str): Folder path to base roadway network.
base_transit_dir (str): Folder path to base transit network.
base_transit_file (str): File path to base transit network.
build_transit_dir (str): Folder path to build transit network.
build_transit_file (str): File path to build transit network.
roadway_changes (DataFrame): pandas dataframe of CUBE roadway changes.
transit_changes (CubeTransit): build transit changes.
base_roadway_network (RoadwayNetwork): Base roadway network object.
base_transit_network (CubeTransit): Base transit network object.
build_transit_network (CubeTransit): Build transit network object.
Returns:
A Project instance.
"""
if base_transit_source:
base_transit_network = CubeTransit.create_from_cube(base_transit_source)
WranglerLogger.debug(
"Base network has {} lines".format(len(base_transit_network.lines))
)
if len(base_transit_network.lines) <= 10:
WranglerLogger.debug(
"Base network lines: {}".format(
"\n - ".join(base_transit_network.lines)
)
)
else:
msg = "No base transit network."
WranglerLogger.info(msg)
base_transit_network = None
if build_transit_source and transit_changes:
msg = "Method takes only one of 'build_transit_source' and 'transit_changes' but both given"
WranglerLogger.error(msg)
raise ValueError(msg)
if build_transit_source:
WranglerLogger.debug("build")
build_transit_network = CubeTransit.create_from_cube(build_transit_source)
WranglerLogger.debug(
"Build network has {} lines".format(len(build_transit_network.lines))
)
if len(build_transit_network.lines) <= 10:
WranglerLogger.debug(
"Build network lines: {}".format(
"\n - ".join(build_transit_network.lines)
)
)
else:
msg = "No transit changes given or processed."
WranglerLogger.info(msg)
transit_changes = None
if roadway_log_file and roadway_changes:
msg = "Method takes only one of 'roadway_log_file' and 'roadway_changes' but both given"
WranglerLogger.error(msg)
raise ValueError(msg)
if roadway_shp_file and roadway_changes:
msg = "Method takes only one of 'roadway_shp_file' and 'roadway_changes' but both given"
WranglerLogger.error(msg)
raise ValueError(msg)
if roadway_csv_file and roadway_changes:
msg = "Method takes only one of 'roadway_csv_file' and 'roadway_changes' but both given"
WranglerLogger.error(msg)
raise ValueError(msg)
if roadway_log_file and roadway_csv_file:
msg = "Method takes only one of 'roadway_log_file' and 'roadway_csv_file' but both given"
WranglerLogger.error(msg)
raise ValueError(msg)
if roadway_shp_file and roadway_csv_file:
msg = "Method takes only one of 'roadway_shp_file' and 'roadway_csv_file' but both given"
WranglerLogger.error(msg)
raise ValueError(msg)
if roadway_log_file and roadway_shp_file:
msg = "Method takes only one of 'roadway_log_file' and 'roadway_shp_file' but both given"
WranglerLogger.error(msg)
raise ValueError(msg)
if roadway_log_file:
roadway_changes = Project.read_logfile(roadway_log_file)
elif roadway_shp_file:
roadway_changes = gpd.read_file(roadway_shp_file)
roadway_changes = DataFrame(roadway_changes.drop("geometry", axis = 1))
roadway_changes["model_node_id"] = 0
elif roadway_csv_file:
roadway_changes = pd.read_csv(roadway_csv_file)
roadway_changes["model_node_id"] = 0
else:
msg = "No roadway changes given or processed."
WranglerLogger.info(msg)
roadway_changes = pd.DataFrame({})
if base_roadway_network and base_roadway_dir:
msg = "Method takes only one of 'base_roadway_network' and 'base_roadway_dir' but both given"
WranglerLogger.error(msg)
raise ValueError(msg)
if base_roadway_dir:
base_roadway_network = ModelRoadwayNetwork.read(
os.path.join(base_roadway_dir, "link.json"),
os.path.join(base_roadway_dir, "node.geojson"),
os.path.join(base_roadway_dir, "shape.geojson"),
True,
)
base_roadway_network.create_calculated_variables()
base_roadway_network.calculate_distance(overwrite = True)
base_roadway_network.fill_na()
base_roadway_network.convert_int()
base_roadway_network.split_properties_by_time_period_and_category()
else:
msg = "No base roadway network."
WranglerLogger.info(msg)
base_roadway_network = None
project = Project(
roadway_changes=roadway_changes,
transit_changes=transit_changes,
base_roadway_network=base_roadway_network,
base_transit_network=base_transit_network,
build_transit_network=build_transit_network,
evaluate=True,
project_name=project_name,
parameters=parameters,
)
return project
@staticmethod
def read_logfile(logfilename: str) -> DataFrame:
"""
Reads a Cube log file and returns a dataframe of roadway_changes
Args:
logfilename (str): File path to CUBE logfile.
Returns:
A DataFrame reprsentation of the log file.
"""
WranglerLogger.info("Reading logfile: {}".format(logfilename))
with open(logfilename) as f:
content = f.readlines()
# (content[0].startswith("HighwayLayerLogX")):
if not content[0].startswith("HighwayLayerLogX"):
WranglerLogger.info("Returning an empty dataframe")
return DataFrame()
NodeLines = [x.strip() for x in content if x.startswith("N")]
LinkLines = [x.strip() for x in content if x.startswith("L")]
linkcol_names = ["OBJECT", "OPERATION", "GROUP"] + LinkLines[0].split(",")[1:]
nodecol_names = ["OBJECT", "OPERATION", "GROUP"] + NodeLines[0].split(",")[1:]
link_df = DataFrame(
data=[re.split(",|;", x) for x in LinkLines[1:]], columns=linkcol_names
)
node_df = DataFrame(
data=[re.split(",|;", x) for x in NodeLines[1:]], columns=nodecol_names
)
log_df = pd.concat([link_df, node_df], ignore_index=True, sort=False)
WranglerLogger.info(
"Processed {} Node lines and {} Link lines".format(
link_df.shape[0], node_df.shape[0]
)
)
return log_df
def determine_roadway_network_changes_compatability(self):
"""
Checks to see that any links or nodes that change exist in base roadway network.
"""
WranglerLogger.info(
"Evaluating compatibility between roadway network changes and base network. Not evaluating deletions."
)
# CUBE log file saves all varilable names in upper cases, need to convert them to be same as network
log_to_net_df = pd.read_csv(self.parameters.log_to_net_crosswalk)
log_to_net_dict = dict(zip(log_to_net_df["log"], log_to_net_df["net"]))
dbf_to_net_df = pd.read_csv(self.parameters.net_to_dbf_crosswalk)
dbf_to_net_dict = dict(zip(dbf_to_net_df["dbf"], dbf_to_net_df["net"]))
for c in self.roadway_changes.columns:
if c in list(log_to_net_dict.keys()):
self.roadway_changes.rename(columns = {c : log_to_net_dict[c]},
inplace = True)
if c in list(dbf_to_net_dict.keys()):
self.roadway_changes.rename(columns = {c : dbf_to_net_dict[c]},
inplace = True)
else:
continue
link_changes_df = self.roadway_changes[
(self.roadway_changes.OBJECT == "L")
& (self.roadway_changes.OPERATION == "C")
]
link_merge_df = pd.merge(
link_changes_df[["A", "B"]].astype(str),
self.base_roadway_network.links_df[["A", "B", "model_link_id"]].astype(str),
how="left",
on=["A", "B"],
)
missing_links = link_merge_df.loc[link_merge_df["model_link_id"].isna()]
if missing_links.shape[0]:
msg = "Network missing the following AB links:\n{}".format(missing_links)
WranglerLogger.error(msg)
raise ValueError(msg)
node_changes_df = self.roadway_changes[
(self.roadway_changes.OBJECT == "N")
& (self.roadway_changes.OPERATION == "C")
]
node_merge_df = pd.merge(
node_changes_df[["model_node_id"]],
self.base_roadway_network.nodes_df[["model_node_id", "geometry"]],
how="left",
on=["model_node_id"],
)
missing_nodes = node_merge_df.loc[node_merge_df["geometry"].isna()]
if missing_nodes.shape[0]:
msg = "Network missing the following nodes:\n{}".format(missing_nodes)
WranglerLogger.error(msg)
raise ValueError(msg)
def evaluate_changes(self):
"""
Determines which changes should be evaluated, initiates
self.card_data to be an aggregation of transit and highway changes.
"""
highway_change_list = []
transit_change_list = []
WranglerLogger.info("Evaluating project changes.")
if not self.roadway_changes.empty:
highway_change_list = self.add_highway_changes()
if (self.transit_changes is not None) or (
self.base_transit_network is not None
and self.build_transit_network is not None
):
transit_change_list = self.add_transit_changes()
self.card_data = {
"project": self.project_name,
"changes": transit_change_list + highway_change_list,
}
def add_transit_changes(self):
"""
Evaluates changes between base and build transit objects and
adds entries into the self.card_data dictionary.
"""
transit_change_list = self.build_transit_network.evaluate_differences(
self.base_transit_network
)
return transit_change_list
def add_highway_changes(self, limit_variables_to_existing_network=False):
"""
Evaluates changes from the log file based on the base highway object and
adds entries into the self.card_data dictionary.
Args:
limit_variables_to_existing_network (bool): True if no ad-hoc variables. Default to False.
"""
## if worth it, could also add some functionality to network wrangler itself.
node_changes_df = self.roadway_changes[
self.roadway_changes.OBJECT == "N"
].copy()
link_changes_df = self.roadway_changes[
self.roadway_changes.OBJECT == "L"
].copy()
def _final_op(x):
if x.OPERATION_history[-1] == "D":
if "A" in x.OPERATION_history[:-1]:
return "N"
else:
return "D"
elif x.OPERATION_history[-1] == "A":
if "D" in x.OPERATION_history[:-1]:
return "C"
else:
return "A"
else:
if "A" in x.OPERATION_history[:-1]:
return "A"
else:
return "C"
def _consolidate_actions(log, base, key_list):
log_df = log.copy()
# will be changed if to allow new variables being added/changed that are not in base network
changeable_col = [x for x in log_df.columns if x in base.columns]
for x in changeable_col:
log_df[x] = log_df[x].astype(base[x].dtype)
action_history_df = (
log_df.groupby(key_list)["OPERATION"]
.agg(lambda x: x.tolist())
.rename("OPERATION_history")
.reset_index()
)
log_df = pd.merge(log_df, action_history_df, on=key_list, how="left")
log_df.drop_duplicates(subset=key_list, keep="last", inplace=True)
log_df["OPERATION_final"] = log_df.apply(lambda x: _final_op(x), axis=1)
return log_df[changeable_col + ["OPERATION_final"]]
if len(link_changes_df) != 0:
link_changes_df = _consolidate_actions(
link_changes_df, self.base_roadway_network.links_df, ["A", "B"]
)
if len(node_changes_df) != 0:
node_changes_df = _consolidate_actions(
node_changes_df, self.base_roadway_network.nodes_df, ["model_node_id"]
)
# print error message for node change and node deletion
if (
len(node_changes_df[node_changes_df.OPERATION_final.isin(["C", "D"])])
> 0
):
msg = "NODE changes and deletions are not allowed!"
WranglerLogger.error(msg)
raise ValueError(msg)
node_add_df = node_changes_df[node_changes_df.OPERATION_final == "A"]
else:
node_add_df = pd.DataFrame()
# process deletions
WranglerLogger.debug("Processing link deletions")
cube_delete_df = link_changes_df[link_changes_df.OPERATION_final == "D"]
if cube_delete_df.shape[1] > 0:
links_to_delete = cube_delete_df["model_link_id"].tolist()
delete_link_dict = {
"category": "Roadway Deletion",
"links": {"model_link_id": links_to_delete},
}
WranglerLogger.debug("{} Links Deleted.".format(len(links_to_delete)))
else:
delete_link_dict = None
WranglerLogger.debug("No link deletions processed")
# process additions
WranglerLogger.debug("Processing link additions")
cube_add_df = link_changes_df[link_changes_df.OPERATION_final == "A"]
if cube_add_df.shape[1] > 0:
if limit_variables_to_existing_network:
add_col = [
c
for c in cube_add_df.columns
if c in self.base_roadway_network.links_df.columns
]
else:
add_col = cube_add_df.columns
# can leave out "OPERATION_final" from writing out, is there a reason to write it out?
add_link_properties = cube_add_df[add_col].to_dict("records")
# WranglerLogger.debug("Add Link Properties: {}".format(add_link_properties))
WranglerLogger.debug("{} Links Added".format(len(add_link_properties)))
add_link_dict = {"category": "New Roadway", "links": add_link_properties}
else:
WranglerLogger.debug("No link additions processed")
add_link_dict = {}
if len(node_add_df):
add_nodes_dict_list = node_add_df.drop(["OPERATION_final"], axis=1).to_dict(
"records"
)
WranglerLogger.debug("{} Nodes Added".format(len(add_nodes_dict_list)))
add_link_dict["nodes"] = add_nodes_dict_list
else:
WranglerLogger.debug("No Nodes Added")
node_dict_list = None
# process changes
WranglerLogger.debug("Processing changes")
changeable_col = [
x
for x in link_changes_df.columns
if x in self.base_roadway_network.links_df.columns
]
change_link_dict_df = pd.DataFrame()
if len(link_changes_df[
link_changes_df.OPERATION_final == "C"
]) > 0:
for index, change_row in link_changes_df[
link_changes_df.OPERATION_final == "C"
].iterrows():
base_df = self.base_roadway_network.links_df[
(self.base_roadway_network.links_df["A"] == change_row.A)
& (self.base_roadway_network.links_df["B"] == change_row.B)
]
if not base_df.shape[0]:
msg = "No match found in network for AB combination: ({},{}). Incompatible base network.".format(
change_row.A, change_row.B
)
WranglerLogger.error(msg)
raise ValueError(msg)
if base_df.shape[0] > 1:
WranglerLogger.warning(
"Found more than one match in base network for AB combination: ({},{}). Selecting first one to operate on but AB should be unique to network.".format(
row.A, row.B
)
)
base_row = base_df.iloc[0]
out_col = []
for col in changeable_col:
# if it is the same as before, or a static value, don't process as a change
if (str(change_row[col]) == base_row[col].astype(str)) | (
col in Project.STATIC_VALUES
):
continue
if ((col == "roadway_class") & (change_row[col] == 0)):
continue
# only look at distance if it has significantly changed
if col == "distance":
if (
abs(
(change_row[col] - base_row[col].astype(float))
/ base_row[col].astype(float)
)
> 0.01
):
out_col.append(col)
else:
continue
else:
out_col.append(col)
if len(out_col) > 0:
property_dict_list = []
for c in out_col:
if (c[:-3] in list(self.parameters.properties_to_split.keys())) | (
c.split("_")[0] == "price"
) :
split_existing = 0
for property in property_dict_list:
if (property["property"] == c.split("_")[0]) & (property["existing"] == base_row[c]):
if (c.split("_")[0] == "price") :
property["timeofday"] = property["timeofday"] + [{
"time" : list(self.parameters.time_period_to_time[c.split("_")[-1]]),
"category" : c.split("_")[-2],
"set" : change_row[c]
}]
else:
property["timeofday"] = property["timeofday"] + [{
"time" : list(self.parameters.time_period_to_time[c.split("_")[-1]]),
"set" : change_row[c]
}]
split_existing = 1
else:
continue
if split_existing == 0:
property_dict = {}
if c.split("_")[0] == "ML":
property_dict["property"] = c[:-3]
else:
property_dict["property"] = c.split("_")[0]
property_dict["existing"] = base_row[c]
if (c.split("_")[0] == "price") :
property_dict["timeofday"] = [{
"time" : list(self.parameters.time_period_to_time[c.split("_")[-1]]),
"category" : c.split("_")[-2],
"set" : change_row[c]
}]
else:
property_dict["timeofday"] = [{
"time" : list(self.parameters.time_period_to_time[c.split("_")[-1]]),
"set" : change_row[c]
}]
property_dict_list.append(property_dict)
else:
property_dict = {}
property_dict["property"] = c
property_dict["existing"] = base_row[c]
property_dict["set"] = change_row[c]
property_dict_list.append(property_dict)
# WranglerLogger.debug("property_dict_list: {}".format(property_dict_list))
# WranglerLogger.debug("base_df.model_link_id: {}".format(base_row['model_link_id']))
card_df = pd.DataFrame(
{
"properties": pd.Series([property_dict_list]),
"model_link_id": pd.Series(base_row["model_link_id"]),
}
)
else:
card_df = pd.DataFrame()
# WranglerLogger.debug('card_df: {}'.format(card_df))
change_link_dict_df = pd.concat(
[change_link_dict_df, card_df], ignore_index=True, sort=False
)
change_link_dict_df["properties"] = change_link_dict_df["properties"].astype(
str
)
# WranglerLogger.debug('change_link_dict_df 1: {}'.format(change_link_dict_df))
change_link_dict_df = (
change_link_dict_df.groupby("properties")[["model_link_id"]]
.agg(lambda x: list(x))
.reset_index()
)
# WranglerLogger.debug('change_link_dict_df 2: {}'.format(change_link_dict_df))
change_link_dict_df["facility"] = change_link_dict_df.apply(
lambda x: {"link": {"model_link_id": x.model_link_id}}, axis=1
)
# WranglerLogger.debug('change_link_dict_df 3: {}'.format(change_link_dict_df))
change_link_dict_df["properties"] = change_link_dict_df["properties"].apply(
lambda x: json.loads(x.replace("'", '"'))
)
change_link_dict_list = change_link_dict_df[["facility", "properties"]].to_dict(
"record"
)
for change in change_link_dict_list:
change["category"] = "Roadway Attribute Change"
WranglerLogger.debug("{} Changes Processed".format(len(change_link_dict_list)))
else:
WranglerLogger.debug("No link changes processed")
change_link_dict_list = []
highway_change_list = list(
filter(None, [delete_link_dict] + [add_link_dict] + change_link_dict_list)
)
return highway_change_list
| StarcoderdataPython |
1736480 | <reponame>Mehi335/testreport.py
# Imports all (*) classes,
# atributes, and methods of tkinter into the
# current workspace
import tkinter as tk
#import tkinter.messagebox as messagebox
def il_get_value():
# ***********************************
# Creates an instance of the class tkinter.Tk.
# This creates what is called the "root" window. By conventon,
# the root window in Tkinter is usually called "root",
# but you are free to call it by any other name.
il_get_window = tk.Tk()
il_get_window.title('TestGraph')
mystring = tk.StringVar()
number_float = 0
def getvalue():
try:
number_float = float(mystring.get())
il_get_window.destroy()
except:
messagebox.showerror("Not a number!", "Decimal number format should be 0.00")
tk.Label(il_get_window, text="IL limit for 1 connector (0.08 by default) : ").grid(row=0, sticky=tk.W) # label
tk.Entry(il_get_window, textvariable=mystring).grid(row=0, column=1, sticky=tk.E) # entry textbox
WSignUp = tk.Button(il_get_window, text="Apply", command=getvalue).grid(row=0, column=4, sticky=tk.W) # button
il_get_window.mainloop()
return float(mystring.get())
print(il_get_value())
| StarcoderdataPython |
212132 | from enum import Enum
from typing import Optional, Union, Dict, Any, List
try:
from typing_extensions import TypedDict
except ImportError:
from typing import TypedDict
class HttpCredentials(TypedDict):
username: str
password: str
class _GeoCoordinated(TypedDict):
longitude: float
latitude: float
class GeoLocation(_GeoCoordinated, total=False):
"""Defines the geolocation.
- ``latitude`` Latitude between -90 and 90.
- ``longitude`` Longitude between -180 and 180.
- ``accuracy`` *Optional* Non-negative accuracy value. Defaults to 0.
Example usage: ``{'latitude': 59.95, 'longitude': 30.31667}``"""
accuracy: float
class Small(Enum):
"""This is the Documentation.
This was defined within the class definition."""
one = 1
two = 2
three = 3
four = 4
AssertionOperator = Enum(
"AssertionOperator",
{
"equal": "==",
"==": "==",
"<": "<",
">": ">",
"<=": "<=",
">=": ">="
},
)
AssertionOperator.__doc__ = """This is some Doc
This has was defined by assigning to __doc__."""
class DataTypesLibrary:
"""This Library has Data Types.
It has some in ``__init__`` and others in the `Keywords`.
The DataTypes are the following that should be linked.
`HttpCredentials` , `GeoLocation` , `Small` and `AssertionOperator`.
"""
def __init__(self, credentials: Small = Small.one):
"""This is the init Docs.
It links to `Set Location` keyword and to `GeoLocation` data type."""
print(type(credentials))
def set_location(self, location: GeoLocation):
pass
def assert_something(self, value, operator: Optional[AssertionOperator] = None, exp: str = 'something?'):
"""This links to `AssertionOperator` .
This is the next Line that links to 'Set Location` ."""
pass
def funny_unions(self,
funny: Union[
bool,
Union[
int,
float,
bool,
str,
AssertionOperator,
Small,
GeoLocation,
None]] = AssertionOperator.equal):
pass
def typing_types(self, list_of_str: List[str], dict_str_int: Dict[str, int], Whatever: Any, *args: List[Any]):
pass
| StarcoderdataPython |
3511827 | <reponame>playbase/DDOS-Attack<filename>web-app/app.py
import pandas as pd
from flask import Flask, request, jsonify, render_template
import joblib
app = Flask(__name__)
model = joblib.load(open('model.pkl', 'rb'))
@app.route('/')
def home():
return render_template('index.html')
@app.route('/predict',methods=['POST'])
def predict():
'''
For rendering results on HTML GUI
'''
int_features = [x for x in request.form.values()]
final_features = pd.DataFrame(int_features).T
final_features.columns = ['average_dur','stddev_dur','min_dur','max_dur','srate','drate']
print('df',final_features)
prediction = model.predict(final_features)
output = prediction[0]
return render_template('index.html', prediction_text='Category is {}'.format(output))
if __name__ == "__main__":
app.run(debug=True) | StarcoderdataPython |
100762 | <gh_stars>1-10
from Callable import Callable
from Environment import Environment
class FunctionCallable(Callable):
def __init__(self, declaration):
self.declaration = declaration
def arity(self):
return len(self.declaration.params)
def call(self, interpreter, arguments):
environment = Environment(enclosing=interpreter.globals)
#bind parameter names to passed arguments
for param_token, arg in zip(self.declaration.params, arguments):
environment.define(param_token.lexeme, arg)
interpreter.executeBlock(self.declaration.body, environment)
return None
def __str__(self):
return f"<Function '{self.declaration.name.lexeme}'>"
| StarcoderdataPython |
4874829 | from __future__ import annotations
class ExperimentConflictError(Exception):
"""experiment conflict error"""
class ExperimentNotFoundError(Exception):
"""experiment not found error"""
class WorkerNotFoundError(Exception):
"""worker not found error"""
class GitError(Exception):
"""git error"""
class GitCommandNotFoundError(GitError):
"""git command not found error"""
class GitRepositoryNotFoundError(GitError):
"""git repository not found error"""
| StarcoderdataPython |
305342 | <reponame>LaudateCorpus1/labmanager-unit-vsphere<gh_stars>1-10
from web.settings import Settings as settings
import asyncio
import logging
import sys
import datetime
import time
import base64
import hashlib
from sanic.response import json as sanicjson
import ldap
logger = logging.getLogger()
def get_ldap_connection(username, password):
if not settings.app['service']['ldap'].get('cert_check', True):
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
l_obj = ldap.initialize(settings.app['service']['ldap']['url'])
l_obj.set_option(ldap.OPT_TIMEOUT, 8)
l_obj.protocol_version = ldap.VERSION3
try:
l_obj.simple_bind_s(username, password)
return l_obj
except Exception as ex:
logger.warning("Svc-like auth attempt failed, trying user-like attempt...")
try:
l_obj.simple_bind_s(
<EMAIL>(username, settings.app['service']['ldap']['domain_name']),
password
)
return l_obj
except Exception as iex:
logger.error(
"An exception occured when connecting to ldap: {}".format(iex)
)
return None
return None
def terminate_ldap_connection(conn):
conn.unbind_s()
async def parse_auth(request):
auth_string = request.headers['authorization']
if auth_string.find("Basic ") == 0:
hash_string = auth_string[6:]
base64_bytes = hash_string.encode('ascii')
message_bytes = base64.b64decode(base64_bytes)
message = message_bytes.decode('ascii')
mess_arr = message.split(':', 1)
return {'username': mess_arr[0], 'password': mess_arr[1]}
else:
return None
async def anonymize_password(password):
md5_passwd = hashlib.md5(password.encode('ascii')).hexdigest()
return md5_passwd
def get_user_dn(conn, username):
try:
res = conn.search_s(
settings.app['service']['ldap']['base_dn'],
ldap.SCOPE_SUBTREE,
attrlist=[],
filterstr="(&(objectClass=user)(sAMAccountName={}))".format(username)
)
auth_user_dn = res[0][0]
return auth_user_dn
except Exception as ex:
return None
def check_group(conn, group_dn, user_dn):
try:
result = conn.search_s(
group_dn,
ldap.SCOPE_BASE,
filterstr="(&(objectClass=group)(member={}))".format(user_dn),
attrlist=["name"],
)
is_a_member = result[0][0] == group_dn
return is_a_member
except Exception as ex:
logger.warning("Failed check in group: {}".format(group_dn))
return False
async def auth(request):
if 'authorization' not in request.headers:
return sanicjson(
{"error": "you cannot be authenticated to access the service, no credentials provided"},
401
)
loop = asyncio.get_event_loop()
try:
auth_struct = await parse_auth(request)
anonymized_passwd = await anonymize_password(auth_struct['password'])
except Exception as ex:
logger.warning("Not parsable auth header received: {}".format(ex))
return sanicjson(
{"error": "wrong authorization header received, you cannot be authenticated to access the service"},
401
)
logger.debug("An attempt to auth: {}:{}".format(auth_struct['username'], anonymized_passwd))
conn = await loop.run_in_executor(None, get_ldap_connection, auth_struct['username'], auth_struct['password'])
if conn is None:
logger.warning("wrong credentials for: {}".format(auth_struct['username']))
return sanicjson({"error": "you cannot be authenticated to access the service"}, 401)
try:
user_dn = await loop.run_in_executor(None, get_user_dn, conn, auth_struct['username'])
user_group_dn = settings.app['service']['ldap']['ugroup']
is_auth_as_user = await loop.run_in_executor(None, check_group, conn, user_group_dn, user_dn)
if is_auth_as_user:
request.headers['LDAP_AUTHORISED_LOGIN'] = auth_struct['username']
request.headers['LDAP_AUTHORISED_DN'] = user_dn
request.headers['AUTHORISED_AS'] = 'user'
return
admin_group_dn = settings.app['service']['ldap']['agroup']
is_auth_as_admin = await loop.run_in_executor(None, check_group, conn, admin_group_dn, user_dn)
if is_auth_as_admin:
request.headers['LDAP_AUTHORISED_LOGIN'] = auth_struct['username']
request.headers['LDAP_AUTHORISED_DN'] = user_dn
request.headers['AUTHORISED_AS'] = 'admin'
return
return sanicjson({"error": "you are not authorized to see the content"}, 403)
finally:
await loop.run_in_executor(None, terminate_ldap_connection, conn)
| StarcoderdataPython |
6443765 | # -*- coding: utf-8 -*-
import unittest
from lunar_python import SolarSeason
class SolarSeasonTest(unittest.TestCase):
def test(self):
season = SolarSeason.fromYm(2019, 5)
self.assertEqual("2019.2", season.toString())
self.assertEqual("2019年2季度", season.toFullString())
self.assertEqual("2019.3", season.next(1).toString())
self.assertEqual("2019年3季度", season.next(1).toFullString())
| StarcoderdataPython |
4999849 | <filename>pvlib/tools.py
"""
Collection of functions used in pvlib_python
"""
import datetime as dt
import numpy as np
import pandas as pd
import pytz
import warnings
def cosd(angle):
"""
Cosine with angle input in degrees
Parameters
----------
angle : float or array-like
Angle in degrees
Returns
-------
result : float or array-like
Cosine of the angle
"""
res = np.cos(np.radians(angle))
return res
def sind(angle):
"""
Sine with angle input in degrees
Parameters
----------
angle : float
Angle in degrees
Returns
-------
result : float
Sin of the angle
"""
res = np.sin(np.radians(angle))
return res
def tand(angle):
"""
Tan with angle input in degrees
Parameters
----------
angle : float
Angle in degrees
Returns
-------
result : float
Tan of the angle
"""
res = np.tan(np.radians(angle))
return res
def asind(number):
"""
Inverse Sine returning an angle in degrees
Parameters
----------
number : float
Input number
Returns
-------
result : float
arcsin result
"""
res = np.degrees(np.arcsin(number))
return res
def localize_to_utc(time, location):
"""
Converts or localizes a time series to UTC.
Parameters
----------
time : datetime.datetime, pandas.DatetimeIndex,
or pandas.Series/DataFrame with a DatetimeIndex.
location : pvlib.Location object
Returns
-------
pandas object localized to UTC.
"""
if isinstance(time, dt.datetime):
if time.tzinfo is None:
time = pytz.timezone(location.tz).localize(time)
time_utc = time.astimezone(pytz.utc)
else:
try:
time_utc = time.tz_convert('UTC')
except TypeError:
time_utc = time.tz_localize(location.tz).tz_convert('UTC')
return time_utc
def datetime_to_djd(time):
"""
Converts a datetime to the Dublin Julian Day
Parameters
----------
time : datetime.datetime
time to convert
Returns
-------
float
fractional days since 12/31/1899+0000
"""
if time.tzinfo is None:
time_utc = pytz.utc.localize(time)
else:
time_utc = time.astimezone(pytz.utc)
djd_start = pytz.utc.localize(dt.datetime(1899, 12, 31, 12))
djd = (time_utc - djd_start).total_seconds() * 1.0/(60 * 60 * 24)
return djd
def djd_to_datetime(djd, tz='UTC'):
"""
Converts a Dublin Julian Day float to a datetime.datetime object
Parameters
----------
djd : float
fractional days since 12/31/1899+0000
tz : str, default 'UTC'
timezone to localize the result to
Returns
-------
datetime.datetime
The resultant datetime localized to tz
"""
djd_start = pytz.utc.localize(dt.datetime(1899, 12, 31, 12))
utc_time = djd_start + dt.timedelta(days=djd)
return utc_time.astimezone(pytz.timezone(tz))
def _pandas_to_doy(pd_object):
"""
Finds the day of year for a pandas datetime-like object.
Useful for delayed evaluation of the dayofyear attribute.
Parameters
----------
pd_object : DatetimeIndex or Timestamp
Returns
-------
dayofyear
"""
return pd_object.dayofyear
def _doy_to_datetimeindex(doy, epoch_year=2014):
"""
Convert a day of year scalar or array to a pd.DatetimeIndex.
Parameters
----------
doy : numeric
Contains days of the year
Returns
-------
pd.DatetimeIndex
"""
doy = np.atleast_1d(doy).astype('float')
epoch = pd.Timestamp('{}-12-31'.format(epoch_year - 1))
timestamps = [epoch + dt.timedelta(days=adoy) for adoy in doy]
return pd.DatetimeIndex(timestamps)
def _datetimelike_scalar_to_doy(time):
return pd.DatetimeIndex([pd.Timestamp(time)]).dayofyear
def _datetimelike_scalar_to_datetimeindex(time):
return pd.DatetimeIndex([pd.Timestamp(time)])
def _scalar_out(arg):
if np.isscalar(arg):
output = arg
else: #
# works if it's a 1 length array and
# will throw a ValueError otherwise
output = np.asarray(arg).item()
return output
def _array_out(arg):
if isinstance(arg, pd.Series):
output = arg.values
else:
output = arg
return output
def _build_kwargs(keys, input_dict):
"""
Parameters
----------
keys : iterable
Typically a list of strings.
input_dict : dict-like
A dictionary from which to attempt to pull each key.
Returns
-------
kwargs : dict
A dictionary with only the keys that were in input_dict
"""
kwargs = {}
for key in keys:
try:
kwargs[key] = input_dict[key]
except KeyError:
pass
return kwargs
def _build_args(keys, input_dict, dict_name):
"""
Parameters
----------
keys : iterable
Typically a list of strings.
input_dict : dict-like
A dictionary from which to pull each key.
dict_name : str
A variable name to include in an error message for missing keys
Returns
-------
kwargs : list
A list with values corresponding to keys
"""
try:
args = [input_dict[key] for key in keys]
except KeyError as e:
missing_key = e.args[0]
msg = (f"Missing required parameter '{missing_key}'. Found "
f"{input_dict} in {dict_name}.")
raise KeyError(msg)
return args
# Created April,2014
# Author: <NAME>, <NAME>
# Modified: November, 2020 by <NAME>, to add atol and change exit
# criteria
def _golden_sect_DataFrame(params, lower, upper, func, atol=1e-8):
"""
Vectorized golden section search for finding maximum of a function of a
single variable.
Parameters
----------
params : dict of numeric
Parameters to be passed to `func`. Each entry must be of the same
length.
lower: numeric
Lower bound for the optimization. Must be the same length as each
entry of params.
upper: numeric
Upper bound for the optimization. Must be the same length as each
entry of params.
func: function
Function to be optimized. Must be in the form
result = f(dict or DataFrame, str), where result is a dict or DataFrame
that also contains the function output, and str is the key
corresponding to the function's input variable.
Returns
-------
numeric
function evaluated at the optimal points
numeric
optimal points
Notes
-----
This function will find the points where the function is maximized.
Returns nan where lower or upper is nan, or where func evaluates to nan.
See also
--------
pvlib.singlediode._pwr_optfcn
"""
phim1 = (np.sqrt(5) - 1) / 2
df = params
df['VH'] = upper
df['VL'] = lower
converged = False
iterations = 0
# handle all NaN case gracefully
with warnings.catch_warnings():
warnings.filterwarnings(action='ignore',
message='All-NaN slice encountered')
iterlimit = 1 + np.nanmax(
np.trunc(np.log(atol / (df['VH'] - df['VL'])) / np.log(phim1)))
while not converged and (iterations <= iterlimit):
phi = phim1 * (df['VH'] - df['VL'])
df['V1'] = df['VL'] + phi
df['V2'] = df['VH'] - phi
df['f1'] = func(df, 'V1')
df['f2'] = func(df, 'V2')
df['SW_Flag'] = df['f1'] > df['f2']
df['VL'] = df['V2']*df['SW_Flag'] + df['VL']*(~df['SW_Flag'])
df['VH'] = df['V1']*~df['SW_Flag'] + df['VH']*(df['SW_Flag'])
err = abs(df['V2'] - df['V1'])
# works with single value because err is np.float64
converged = (err[~np.isnan(err)] < atol).all()
# err will be less than atol before iterations hit the limit
# but just to be safe
iterations += 1
if iterations > iterlimit:
raise Exception("Iterations exceeded maximum. Check that func",
" is not NaN in (lower, upper)") # pragma: no cover
try:
func_result = func(df, 'V1')
x = np.where(np.isnan(func_result), np.nan, df['V1'])
except KeyError:
func_result = np.full_like(upper, np.nan)
x = func_result.copy()
return func_result, x
def _get_sample_intervals(times, win_length):
""" Calculates time interval and samples per window for Reno-style clear
sky detection functions
"""
deltas = np.diff(times.values) / np.timedelta64(1, '60s')
# determine if we can proceed
if times.inferred_freq and len(np.unique(deltas)) == 1:
sample_interval = times[1] - times[0]
sample_interval = sample_interval.seconds / 60 # in minutes
samples_per_window = int(win_length / sample_interval)
return sample_interval, samples_per_window
else:
raise NotImplementedError('algorithm does not yet support unequal '
'times. consider resampling your data.')
| StarcoderdataPython |
4993945 | import sys, os,pathlib
current_file_path = pathlib.Path(__file__).parent.absolute()
sys.path.insert(1,'/Users/matin/Downloads/testProjs/RTvisalize')
from realtime import monitor
settings = {
"Cell count": {
"graph_dir" : "cell_count.csv",
"graph_type" : 'lines',
"graph_size" : 800,
'x-axis-moves': False
},
"ECM": {
"graph_dir" : "ECM.csv",
"graph_type" : 'scatter',
"graph_size" : 800
},
"Cells": {
"graph_dir" : "cells.csv",
"graph_type" : 'scatter',
"graph_size" : 800
},
}
monitor.watch(settings).run() | StarcoderdataPython |
1727220 | <reponame>pedrodaniel10/ALC
#!/usr/bin/env python3
# File: chk.py
# Author: mikolas
# Created on: Fri Oct 11 14:18:32 WEST 2019
# Copyright (C) 2019, <NAME>
import sys
def err(msg):
print("ERROR:", msg)
exit(1)
def parse_samples(f):
nms = None
samples = []
for l in f:
s = l.rstrip().split()
if not s: continue
if nms:
samples.append([int(l) for l in s])
else:
nms = [int(l) for l in s]
return (nms, samples)
def chk(nfts, nns, ls, samples):
all_ns = set()
lns = dict() # left children
rns = dict() # right children
a = dict() # assigned features
tl = set() # leaf true
fl = set() # leaf false
for l in ls:
l = l.rstrip()
if not l: continue
if l == 'UNSAT':
print('OK (UNSAT)')
return
spl = l.split()
if spl[0] == 'l' or spl[0] == 'r':
vs = [ int(s) for s in spl[1:] ]
assert(len(vs)==2)
all_ns.add(vs[0])
all_ns.add(vs[1])
if spl[0] == 'l':
if vs[0] in lns: err("{} already has left child".format(vs[0]))
lns[vs[0]] = vs[1]
else:
if vs[0] in rns: err("{} already has right child".format(vs[0]))
rns[vs[0]] = vs[1]
if spl[0] == 'c':
vs = [ int(s) for s in spl[1:] ]
assert(len(vs)==2)
all_ns.add(vs[0])
if vs[1] == 0:
fl.add(vs[0])
elif vs[1] == 1:
tl.add(vs[0])
else:
assert(False)
if spl[0] == 'a':
vs = [ int(s) for s in spl[1:] ]
assert(len(vs)==2)
if vs[1] in a: err("{} already has assigned feature".format(vs[1]))
a[vs[1]] = vs[0]
all_ns.add(vs[1])
if len(all_ns) != nns:
err("wrong number of nodes")
def check_structure(nd, visited):
if nd in visited: err("there is a cycle on node {}".format(nd))
visited.add(nd)
hl = nd in lns
hr = nd in rns
if hl != hr: err("{} can only have zero or two children".format(nd))
if not hl:
if nd in a: err("{} is a leaf and therefore it cannot have have a feature assigned".format(nd))
if nd not in fl and nd not in tl: err("{} is a leaf and therefore it has to have a class assigned".format(nd))
return
if nd not in a: err("{} is internal and therefore it has to have a feature assigned".format(nd))
if nd in fl or nd in tl: err("{} is internal and therefore it cannot have have a class assigned".format(nd))
check_structure(lns[nd], visited)
check_structure(rns[nd], visited)
check_structure(1, set())
def get_val(nd, sample):
if nd in fl: return 0
if nd in tl: return 1
ftr = a[nd]
nxt = lns[nd] if sample[ftr - 1] == 0 else rns[nd]
return get_val(nxt, sample)
for sample in samples:
if sample[-1] != get_val(1, sample):
err('FAIL on sample {} '.format(sample))
return False
else:
print('OK on {} '.format(sample))
return True
if __name__ == "__main__":
if len(sys.argv) != 2:
print('USAGE: {} <sample-file>'.format(sys.argv[0]))
exit(1)
with open(sys.argv[1]) as sf:
nms, samples = parse_samples(sf)
if chk(nms[0], nms[1], sys.stdin, samples):
print('OK')
| StarcoderdataPython |
8169738 | from __future__ import division
import os
import cv2
import numpy as np
import pickle
import time
from kentaroy47.config import Config as config
from keras import backend as K
from keras.layers import Input
from keras.models import Model
import kentaroy47.roi_helpers as roi_helpers
import argparse
import os
import kentaroy47.resnet as nn
from kentaroy47.visualize import draw_boxes_and_label_on_image_cv2
from kentaroy47.predict import predict_single_image, parse_args
def predict(args_):
# from: https://github.com/kentaroy47/frcnn-from-scratch-with-keras
path = args_
with open('config.pickle', 'rb') as f_in:
cfg = pickle.load(f_in)
cfg.use_horizontal_flips = False
cfg.use_vertical_flips = False
cfg.rot_90 = False
class_mapping = cfg.class_mapping
if 'bg' not in class_mapping:
class_mapping['bg'] = len(class_mapping)
class_mapping = {v: k for k, v in class_mapping.items()}
input_shape_img = (None, None, 3)
input_shape_features = (None, None, 1024)
img_input = Input(shape=input_shape_img)
roi_input = Input(shape=(cfg.num_rois, 4))
feature_map_input = Input(shape=input_shape_features)
shared_layers = nn.nn_base(img_input, trainable=True)
# define the RPN, built on the base layers
num_anchors = len(cfg.anchor_box_scales) * len(cfg.anchor_box_ratios)
rpn_layers = nn.rpn(shared_layers, num_anchors)
classifier = nn.classifier(feature_map_input, roi_input, cfg.num_rois, nb_classes=len(class_mapping),
trainable=True)
model_rpn = Model(img_input, rpn_layers)
model_classifier_only = Model([feature_map_input, roi_input], classifier)
model_classifier = Model([feature_map_input, roi_input], classifier)
print('Loading weights from {}'.format(cfg.model_path))
model_rpn.load_weights(cfg.model_path, by_name=True)
model_classifier.load_weights(cfg.model_path, by_name=True)
model_rpn.compile(optimizer='sgd', loss='mse')
model_classifier.compile(optimizer='sgd', loss='mse')
if os.path.isdir(path):
for idx, img_name in enumerate(sorted(os.listdir(path))):
if not img_name.lower().endswith(('.bmp', '.jpeg', '.jpg', '.png', '.tif', '.tiff')):
continue
print(img_name)
predict_single_image(os.path.join(path, img_name), model_rpn,
model_classifier_only, cfg, class_mapping)
elif os.path.isfile(path):
print('predict image from {}'.format(path))
predict_single_image(path, model_rpn, model_classifier_only, cfg, class_mapping)
| StarcoderdataPython |
1627851 | <reponame>ShinyShoes12/1_ImgClassfication
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
"""This script is for users having colab pro ver.
If you are colab free user, execute untill NOTICE HERE ~ index and go to other script."""
# # 1) **Preparing Datas**
#
#
#
# ## Check out : Gpu/Ram
# In[ ]:
## 할당된 GPU 확인
gpu_info = get_ipython().getoutput('nvidia-smi')
gpu_info = '\n'.join(gpu_info)
if gpu_info.find('failed') >= 0:
print('Select the Runtime > "Change runtime type" menu to enable a GPU accelerator, ')
print('and then re-execute this cell.')
else:
print(gpu_info)
# In[ ]:
## 할당된 Ram 확인
from psutil import virtual_memory
ram_gb = virtual_memory().total / 1e9
print('Your runtime has {:.1f} gigabytes of available RAM\n'.format(ram_gb))
if ram_gb < 20:
print('To enable a high-RAM runtime, select the Runtime > "Change runtime type"')
print('menu, and then select High-RAM in the Runtime shape dropdown. Then, ')
print('re-execute this cell.')
else:
print('You are using a high-RAM runtime!')
# ## Mount Google Drive
# In[ ]:
from google.colab import drive
drive.mount('/content/drive')
# ## Unzip picture_data
# (for train&test)
# In[ ]:
## Check out details of current dir
get_ipython().system("ls '/content/drive/MyDrive/VGGnet'")
# In[ ]:
## Copy 'VGGnet' directory to content directory in Colab
get_ipython().system("cp -r '/content/drive/MyDrive/VGGnet/' '/content/'")
# In[ ]:
## Check details
get_ipython().system('ls ./VGGnet')
# In[ ]:
## Current directory
get_ipython().run_line_magic('pwd', '')
# In[ ]:
## Unzip 'DATASETS.zip' file
get_ipython().system("unzip '/content/VGGnet/DATASETS.zip' -d '/content/VGGnet'")
# In[ ]:
## Delete zipfile
get_ipython().system("rm '/content/VGGnet/DATASETS.zip'")
# In[ ]:
## Check Train_Dataset
get_ipython().run_line_magic('cd', "'/content/VGGnet/Train_Dataset'")
get_ipython().system("ls '/content/VGGnet/Train_Dataset'")
print('')
print('cnt of pictures')
get_ipython().system('ls -l | grep ^-.*\\.jpg$ | wc -l')
# In[ ]:
## Check Test_Dataset
get_ipython().run_line_magic('cd', "'/content/VGGnet/Test_Dataset'")
get_ipython().system("ls '/content/VGGnet/Test_Dataset'")
print('')
print('cnt of pictures')
get_ipython().system('ls -l | grep ^-.*\\.jpg$ | wc -l')
# In[ ]:
## Check out details of 'VGGnet' dir
get_ipython().run_line_magic('cd', '/content/VGGnet/')
get_ipython().system('ls ')
# # **2) Load datasets**
# ## Import Modules
# In[ ]:
## Modules Required
import Func_1 #Custom Functions
import os
import re
import cv2
import csv
import numpy as np
import random
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from google.colab.patches import cv2_imshow
# ## Load Train_Dataset
# (to Colab environment from google-drive)
# In[ ]:
## Check out current working directory
get_ipython().run_line_magic('pwd', '')
# In[ ]:
## Check out cv2.__version
cv2.__version__ #4.1.2
# In[ ]:
## Check out state of imgs
#Train_Dataset
img = cv2.imread('/content/VGGnet/Train_Dataset/1.jpg')
cv2_imshow(img)
#Test_Dataset
img2 = cv2.imread('/content/VGGnet/Test_Dataset/1.jpg')
cv2_imshow(img2)
# In[ ]:
## Load X : Features
path_train = "/content/VGGnet/Train_Dataset"
resize = Func_1.img_load_c(path_train)
# In[ ]:
## Check out first img(resized) & shape
img = resize[0, : ]
plt.figure()
plt.imshow(img)
print(resize.shape)
# ## Scailing : Trainset(X)
# In[ ]:
## Make X (feature)
X = resize
X.shape
# In[ ]:
## Sampling
X = X.astype('float')
X = X/255
X.shape
# ## Load Label
# In[ ]:
path_label = "/content/VGGnet/label.csv"
y = Func_1.label_load(path_label,label_cnt=5) #label_cnt = len(신발종류)
y.shape
# ## Check out
# In[ ]:
## Confirm X, y
print(X.shape)
print(y.shape, end='\n\n\n')
# print("#####Check out : X#####")
# print(X, end='\n\n\n')
# print("#####Check out : y#####")
# print(y)
# In[ ]:
## Check out Train_Dataset imgs
index = [1,2,1801,1802,3601,3602,5401,5402,7201,7202]
plt.figure(figsize=(10, 10))
for j, i in enumerate(index):
ax = plt.subplot(3, 4, j+1)
img = X[i, ]
plt.imshow(img)
plt.title(np.argmax(y[i]))
plt.axis("off")
## Index
# 0 - addidas
# 1 - converse
# 2 - new balance
# 3 - nike
# 4 - vans
# # **3) Training Models : VGGnet**
# ## Import Modules
# In[ ]:
## module import
import tensorflow as tf # tensorflow 2.0
from tensorflow.keras.models import Sequential, save_model
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.layers import Conv2D, Flatten, MaxPooling2D, Activation
from tensorflow.keras.layers import Dropout, BatchNormalization, Dense
from tensorflow.keras.optimizers import Adam
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
## Transfer learning (전이학습)
from tensorflow.keras.applications.vgg16 import VGG16
#from tensorflow.keras.applications.vgg19 import VGG19
# from tensorflow.keras.applications.inception_v3 import InceptionV3
# from tensorflow.keras.applications.resnet import ResNet50
# In[ ]:
## Check out Tensor version
import tensorflow
print(tensorflow.__version__) #2.4.1
# ## Set Validation
# In[ ]:
# 훈련/테스트 데이터를 0.7/0.3의 비율로 분리합니다.
x_train, x_val, y_train, y_val = train_test_split(X, y,
test_size = 0.3,
random_state = 777)
# Checkout
print("x_train.shape :", x_train.shape)
print("y_train.shape :", y_train.shape)
print("x_val.shape :", x_val.shape)
print("y_val.shape :", y_val.shape)
# ## Create Architecture
# (VGG16)
# In[ ]:
## Set VGG16 options
vgg16 = VGG16(weights = 'imagenet', include_top = False, input_shape = (128, 128, 3))
# In[ ]:
## Check out initial Architecture
vgg16.summary()
# In[ ]:
## Set trainable / non trainable layers
# 가중치 초기값 : imagenet
# layer.trainable=True : 동결 해제 (default)
# layer.trainable=False : 동결 (option)
for layer in vgg16.layers[:-4]:
layer.trainable = False
# In[ ]:
## Check out Architecture
vgg16.summary()
# In[ ]:
## Make Dense layer for classificaion
# 신경망 객체 생성
model = Sequential()
# stacking vgg16
model.add(vgg16)
# Reshape : Flatten
model.add(Flatten())
# 완전연결계층1
model.add(Dense(256, kernel_regularizer=tf.keras.regularizers.l2(0.2), activation='relu' ) ) #ㅣ2 default=0.01
model.add(BatchNormalization())
model.add(Dropout(0.5))
# 완전연결계층2
model.add(Dense(256, kernel_regularizer=tf.keras.regularizers.l2(0.2), activation='relu' ) ) #ㅣ2 default=0.01
model.add(BatchNormalization())
model.add(Dropout(0.5))
# 출력층(softmax)
model.add(Dense(5, activation='softmax')) # class : 5
# Check out model
model.summary()
# ## Compile model
# In[ ]:
## Compile
model.compile(loss='categorical_crossentropy', #multi class over 2
optimizer=Adam(lr = 0.0001), #default 0.001
metrics=[tf.keras.metrics.CategoricalAccuracy()])
## Compile options
"""
# Compile option index
https://www.tensorflow.org/api_docs/python/tf/keras/Model
## Optimizer options
https://www.tensorflow.org/api_docs/python/tf/keras/optimizers
## Loss options
https://www.tensorflow.org/api_docs/python/tf/keras/losses
## Metrics options
https://www.tensorflow.org/api_docs/python/tf/keras/metrics
"""
#
# ## Fitting model
# (training)
# In[ ]:
## Parameters
epochs = 10
batch_size = 16
callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=1) #Early stopping
#https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/EarlyStopping
## Fitting
hist = model.fit(x_train, y_train,
validation_data=(x_val, y_val),
epochs=epochs,
batch_size=batch_size,
callbacks = [callback])
# # **4) Visualize the results**
# ## Get Logs of the model
# In[ ]:
# list all data in history
print(hist.history.keys())
# In[ ]:
## log : Accuracy
from pandas import DataFrame
Sample = [hist.history['categorical_accuracy'],hist.history['val_categorical_accuracy']]
df = DataFrame(Sample).transpose()
df.columns = ['train_Acc', 'val_Acc']
df
# In[ ]:
## log : Loss
Sample2 = [hist.history['loss'],hist.history['val_loss']]
df2 = DataFrame(Sample2).transpose()
df2.columns = ['train_Loss', 'val_loss']
df2
# ## Print graphs
# In[ ]:
## Visualization : Accuracy
plt.plot(hist.history['categorical_accuracy'], color='blue')
plt.plot(hist.history['val_categorical_accuracy'], color='red')
plt.title('Vgg16 : Accuracy')
plt.ylabel('accuracy')
plt.xlabel('epochs')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
print('')
## Visualization : Loss
plt.plot(hist.history['loss'], color='blue')
plt.plot(hist.history['val_loss'], color='red')
plt.title('Vgg16 : Loss')
plt.ylabel('loss')
plt.xlabel('epochs')
plt.legend(['train', 'val'], loc='upper right')
plt.show()
# ## *@@@ NOTICE HERE BEFORE EXECUTE CODES BELOW!!!*
# ### *Non - Colab pro User*
#
#
#
# In[ ]:
""" If you are not colab user, Save the model and move to script "For_Colab_Free.ipynb" file.
If you are ignore and keep going, your resources provided go empty and session will be die. """
## Save model : .h5(Hdf5 type file)
save_path = "/content/VGGnet"
model.save('VGG16_Model_v1.h5', save_path, save_format="h5")
# In[ ]:
## Copy model colab to google drive
get_ipython().system("cp '/content/VGGnet/VGG16_Model_v1.h5' /content/drive/MyDrive/VGGnet")
# ### *Colab pro User*
# In[ ]:
""" If you are Colab pro user, just keep going.
Don't worry about saving model code. It is located end of this page."""
# ## Get accuracy & loss
# (Train_Dataset)
# In[ ]:
# Check results of train set
scores = model.evaluate(x_train, y_train, batch_size=16, verbose=1)
print("Vgg16 train Error : %.2f%%" % (100-scores[1]*100))
print("Vgg16 train Loss : %.2f" % (scores[0]))
# In[ ]:
# Check results of validation set
scores2 = model.evaluate(x_val, y_val, batch_size=16, verbose=1)
print("Vgg16 val Error : %.2f%%" % (100-scores2[1]*100))
print("Vgg16 val Loss : %.2f" % (scores2[0]))
# # **5) Test with Test_Dataset**
# ## Load Test_Dataset
# In[ ]:
## Check out current working directory
get_ipython().run_line_magic('pwd', '')
# In[ ]:
## Load X : Features
path_test = "/content/VGGnet/Test_Dataset"
resize_t = Func_1.img_load_c(path_test)
# In[ ]:
## Check out first img(resized) & shape
img = resize_t[0, : ]
plt.figure()
plt.imshow(img)
print(resize_t.shape)
# ## Scailing : Testset(X_t)
# In[ ]:
## Make X_t (feature)
X_t = resize_t
X_t.shape
# In[ ]:
X_t = X_t.astype('float')
X_t = X_t/255
X_t.shape
# ## Load Label
# In[ ]:
path_label2 = "/content//VGGnet/label_2.csv"
y_t = Func_1.label_load(path_label2,label_cnt=5) #label_cnt = len(신발종류)
y_t.shape
# ## Check out
# In[ ]:
## Confirm X, y
print(X_t.shape)
print(y_t.shape, end='\n\n\n')
# print("#####Check out : X#####")
# print(X, end='\n\n\n')
# print("#####Check out : y#####")
# print(y)
# In[ ]:
## Check out Test_Dataset imgs
index = [1,2,11,12,21,22,31,32,41,42]
plt.figure(figsize=(10, 10))
for j, i in enumerate(index):
ax = plt.subplot(3, 4, j+1)
img = X_t[i, ]
plt.imshow(img)
plt.title(np.argmax(y_t[i]))
plt.axis("off")
## Index
# 0 - addidas
# 1 - converse
# 2 - new balance
# 3 - nike
# 4 - vans
# ## Get accuracy & loss >>> Colab pro ver.
# (Test_Dataset)
# In[ ]:
## Check out accuracy (with test data) #모델을 실측데이터로 정확도 측정한 결과 확인
scores3 = model.evaluate(X_t, y_t, batch_size=16, verbose=1)
print("Vgg16 ind_dataset Error : %.2f%%" % (100-scores3[1]*100))
print("Vgg16 ind_dataset loss : %.2f" % (scores3[0]))
# # **6) Extract trained model**
# (to google drive)
# In[ ]:
## Save model : .h5(Hdf5 type file)
save_path = "/content/VGGnet"
model.save('VGG16_Model_v1.h5', save_path, save_format="h5")
# In[ ]:
# ## Save model : .data
# model.save('VGG16_Model_v2.tf', save_path, save_format="tf")
# In[ ]:
## Download extracted model
"""If this code takes too much time, Copy model to google drive and download to local in google drive Gui envirnment.
In my case it takes 10-15 minutes
Help code is located below.
I recommand copy model to google drive and download in g-drive."""
from google.colab import files
files.download('/content/VGGnet/VGG16_Model_v1.h5')
# In[ ]:
## Copy model colab to google drive
get_ipython().system("cp '/content/VGGnet/VGG16_Model_v1.h5' /content/drive/MyDrive/VGGnet")
| StarcoderdataPython |
6638554 | <filename>src/export_excel.py
# -*- coding: utf-8 -*-
import json
import os
import re
import sys
from datetime import datetime, date
from decimal import Decimal
from getopt import getopt, GetoptError
from openpyxl import Workbook, load_workbook
from openpyxl.styles import PatternFill, Font
from openpyxl.utils import get_column_letter
from common import _json_load
def export_nach_excel(documents, export_profil):
# Zieldateiname ermitteln
ziel_dateiname = export_profil["dateiname"]
postfix = export_profil.get("dateiname_postfix")
if postfix:
if "%" in postfix:
postfix = datetime.now().strftime(postfix)
splitext = os.path.splitext(os.path.basename(export_profil["dateiname"]))
ziel_dateiname = os.path.join(
os.path.dirname(export_profil["dateiname"]),
splitext[0] + postfix + splitext[1]
)
# letzte fortlaufende Nummer ermitteln
fortlaufendes_feld = export_profil.get("fortlaufendes_feld")
letzte_fortlaufende_nummer = -1
filename_fortlaufendes_feld = None
if fortlaufendes_feld:
filename_fortlaufendes_feld = os.path.join(
os.path.dirname(export_profil["dateiname"]),
os.path.splitext(os.path.basename(export_profil["dateiname"]))[0] + "_" +
"fortlaufendes_feld.txt"
)
if os.path.exists(filename_fortlaufendes_feld):
with open(filename_fortlaufendes_feld, 'r', encoding='utf-8') as outfile:
value = outfile.read()
if value:
letzte_fortlaufende_nummer = int(value)
# Datei Mapping Caches
datei_mappings = dict()
# Zeilen und Spalten aus den Dokumenten anhand Export Profil ermitteln
rows = list()
for document in documents["documents"]:
columns = list()
rows.append(columns)
for spalte in export_profil["spalten"]:
column = dict()
columns.append(column)
feld_name = spalte["feld"]
if spalte.get("alias"):
column["feld_name"] = spalte["alias"]
else:
column["feld_name"] = spalte["feld"]
mapped_value = ""
if feld_name:
# Spalten Wert auslesen und mappen
if feld_name in document:
value = document[feld_name]
elif feld_name in document["classifyAttributes"]:
value = document["classifyAttributes"][feld_name]
else:
raise RuntimeError(
f"Die Spalte '{feld_name}' existiert nicht im Dokument. Bitte Export-Profil überprüfen.")
# Mapping
mapping_def = spalte.get("mapping")
if mapping_def is not None:
# konfiguriertes Mapping anwenden
if mapping_def["typ"] == "re":
# Mapping mit RegEx Methode
# - zuerst immer in String umwandeln, wegen RegEx Methode auf String
mapped_value = map_value(value, "string")
re_operation = getattr(re, mapping_def["methode"])
argumente = mapping_def["argumente"]
if len(argumente) == 2:
mapped_value = re_operation(argumente[0], argumente[1], mapped_value)
else:
raise RuntimeError(
f"Fehler beim Mapping zum Feld '{feld_name}'. "
f"Es werden nur 2 Argument unterstützt.")
mapped_value = map_value(mapped_value, spalte.get("type"))
elif mapping_def["typ"] == "datei":
# Mapping aus Datei auslesen
# - zuerst immer in String umwandeln, Id wird immer als String normalisiert
mapped_value = map_value(value, "string")
# - Datei Mapping Cache initialisieren
if datei_mappings.get(mapping_def["dateiname"]) is None:
datei_mappings[mapping_def["dateiname"]] = _init_mapping_data(mapping_def)
# mapping von id zu name
mapping_data = datei_mappings[mapping_def["dateiname"]]
mapped_value = mapping_data[mapped_value]
else:
raise RuntimeError(f"Unbekannter Mapping Typ: {mapping_def['type']}")
else:
mapped_value = map_value(value, spalte.get("type"))
else:
# keine Feld Name, damit bleibt die Spalte leer
pass
column["value"] = mapped_value
if spalte.get("number_format"):
column["number_format"] = spalte["number_format"]
else:
if isinstance(mapped_value, date):
column["number_format"] = 'DD.MM.YYYY'
if isinstance(mapped_value, datetime):
column["number_format"] = 'DD.MM.YYYY HH:MM:SS'
if spalte.get("computed"):
column["computed"] = spalte["computed"]
# sortieren
if export_profil.get("sortierung"):
for sort_def in reversed(export_profil["sortierung"]["felder"]):
if sort_def["wie"] == "absteigend":
reverse = True
elif sort_def["wie"] == "aufsteigend":
reverse = False
else:
raise RuntimeError(
f"Unbekannte Sortierung zum 'feld'='{sort_def['feld']}' mit 'wie'='{sort_def['wie']}' "
f", erlaubt sind nur 'aufsteigend' oder 'absteigend'.")
rows.sort(
key=lambda r: list(filter(lambda c: c["feld_name"] == sort_def["feld"], r))[0]["value"],
reverse=reverse
)
# Computed und Format ermitteln
for row in rows:
for column in row:
# computed Wert ermitteln
if column.get("computed"):
computed = column.get("computed")
# bekannte Methoden ersetzen
computed = computed \
.replace("nicht_fortlaufend()",
"pruefe_is_nicht_fortlaufend(row, fortlaufendes_feld, letzte_fortlaufende_nummer)")
column["value"] = eval(computed)
# Format ermitteln
if export_profil.get("formate"):
for format_candidate in export_profil["formate"]:
if re.match(format_candidate["match"], str(column["value"])):
if "PatternFill" == format_candidate["format"]["format"]:
column["fill"] = PatternFill(start_color=format_candidate["format"]["start_color"],
end_color=format_candidate["format"]["end_color"],
fill_type=format_candidate["format"]["fill_type"])
for column in row:
# max. fortlaufendes Feld merken
if fortlaufendes_feld and column["feld_name"] == fortlaufendes_feld:
letzte_fortlaufende_nummer = column["value"]
if not letzte_fortlaufende_nummer:
raise RuntimeError("Die fortlaufende Nummer konnte nicht ermittelt werden")
# als Excel speichern
if not os.path.exists(ziel_dateiname):
# neue Excel Datei
if not export_profil.get("vorlage_dateiname"):
# neu
wb = Workbook()
ws = wb.active
else:
# aus Vorlage
wb = load_workbook(filename=export_profil["vorlage_dateiname"])
if not export_profil.get("vorlage_sheet_name"):
ws = wb.active
else:
ws = wb[export_profil["vorlage_sheet_name"]]
row_idx = 1
# mit Spaltenüberschrifen
if export_profil["spaltenueberschrift"].lower() == "ja":
column_header_format = export_profil.get("spaltenueberschrift_format")
if column_header_format is not None:
if "PatternFill" == column_header_format["format"]:
column_header = PatternFill(start_color=column_header_format["start_color"],
end_color=column_header_format["end_color"],
fill_type=column_header_format["fill_type"])
else:
raise RuntimeError(
f"Unbekanntes Format {column_header_format['format']} in 'spaltenueberschrift_format/format'. "
f"Möglich ist nur 'PatternFill'")
else:
# Standard Format
column_header = PatternFill(start_color='AAAAAA',
end_color='AAAAAA',
fill_type='solid')
column_idx = 1
for spalte in export_profil["spalten"]:
ws.cell(column=column_idx, row=row_idx, value=spalte["ueberschrift"])
col = ws["{}{}".format(get_column_letter(column_idx), row_idx)]
col.font = Font(bold=True)
col.fill = column_header
column_idx += 1
row_idx += 1
# Zeilen und Spalten ins Excel Dokument schreiben
append_rows(row_idx, rows, ws)
else:
# vorhandene Excel Datei fortschreiben
wb = load_workbook(filename=ziel_dateiname)
if not export_profil.get("vorlage_sheet_name"):
ws = wb.active
else:
ws = wb[export_profil["vorlage_sheet_name"]]
id_feld = export_profil["id_feld"]
id_feld_idx = -1
for idx, spalte in enumerate(export_profil["spalten"]):
if spalte["feld"] == id_feld:
id_feld_idx = idx
if id_feld_idx == -1:
raise RuntimeError(
f"Fehler das id_feld '{id_feld}' existiert nicht als Spalte in der Export Konfiguration.")
# update Rows
empties = 0
last_row = 0
for row_idx, row in enumerate(ws.iter_rows()):
cell = row[id_feld_idx]
if cell.value:
empties = 0
update_row(cell.value, id_feld_idx, rows, row)
rows = remove_row(cell.value, id_feld_idx, rows)
else:
empties += 1
for cell in row:
# evtl. leere Zeile, nur wenn alle Spalten ebenfalls leer sind
if cell.value:
empties = 0
break
if empties == 0:
last_row = row_idx + 1
if empties > 100:
# fertig, nur noch leere Id Spalten
break
# neue Rows anhängen
row_idx = last_row + 1
append_rows(row_idx, rows, ws)
wb.save(filename=ziel_dateiname)
print(f"Die Excel-Datei wurde geschrieben: '{ziel_dateiname}'")
# letzte fortlaufende Nummer in Datei merken
if fortlaufendes_feld:
with open(filename_fortlaufendes_feld, 'w', encoding='utf-8') as outfile:
outfile.write(str(letzte_fortlaufende_nummer))
def append_rows(row_idx, rows, ws):
"""
Hängt die rows an das Sheet, beginnend ab Zeile row_idx
"""
for row in rows:
column_idx = 1
for column in row:
new_cell = ws.cell(column=column_idx, row=row_idx, value=column["value"])
if column.get("number_format"):
new_cell.number_format = column["number_format"]
if column.get("fill"):
new_cell.fill = column["fill"]
column_idx += 1
row_idx += 1
def update_row(id_value, id_feld_idx, rows, row):
"""
Aktualisiert die row im Sheet, wenn sie innerhalb der neuen rows existiert.
Existiert die row nicht in den neuen rows, bleibt sie unverändert.
"""
existing = list(filter(lambda r: r[id_feld_idx]["value"] == id_value, rows))
if len(existing) == 1:
# aktualisieren vorhandene Row
for column_idx, column in enumerate(existing[0]):
row[column_idx].value = column["value"]
if column.get("number_format"):
row[column_idx].number_format = column["number_format"]
if column.get("fill"):
row[column_idx].fill = column["fill"]
elif len(existing) > 1:
raise RuntimeError(f"Zeile mit Id '{id_value}' ist mehrfach vorhanden. Anzahl: {len(existing)}")
# ignorieren, Row nur im Excel Dokument
def remove_row(id_value, id_feld_idx, rows):
"""
Löscht die row mit der id_value aus den rows
"""
return [row for row in rows if row[id_feld_idx]["value"] != id_value]
def map_value(value, mapping_type=None):
if mapping_type == "string":
return str(value)
if mapping_type == "int":
try:
return int(value)
except ValueError:
return -1
return map_str_value(value)
def map_str_value(value):
if type(value) != str:
return value
if value == "undefined":
# clean up
value = ""
if value == "true":
value = "ja"
if value == "false":
value = "nein"
if "€" in value \
and (value[0].isnumeric() or len(value) >= 2 and value[0] == "-" and value[1].isnumeric()):
return map_eur(value)
eur_pattern = re.compile(r"^-?[0-9]+,?[0-9]* (€|EUR)$")
if eur_pattern.match(value):
return map_eur(value)
datum_pattern = re.compile(r"^[0-9]{2}\.[0-9]{2}\.[0-9]{4}$")
if datum_pattern.match(value):
return map_datum(value)
datum_pattern = re.compile(r"^[0-9]{4}-[0-9]{2}-[0-9]{2}$")
if datum_pattern.match(value):
return map_datum(value)
datum_pattern = re.compile(r"^[0-9]{2}\.[0-9]{2}\.[0-9]{4} [0-9]{2}:[0-9]{2}:[0-9]{2}$")
if datum_pattern.match(value):
return map_datum_zeit(value)
datum_pattern = re.compile(r"^[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}$")
if datum_pattern.match(value):
return map_datum_zeit(value)
decimal_pattern = re.compile(r"^-?[0-9]+,?[0-9]*$")
if decimal_pattern.match(value):
return map_number(value)
return value
def map_number(value):
if value is None:
return None
return Decimal(value.replace('.', '').replace(' ', '').replace(',', '.'))
def map_eur(value):
return map_number(value.replace("€", "").replace("EUR", ""))
def map_datum(value):
if "-" in value:
return datetime.strptime(value, "%Y-%m-%d").date()
return datetime.strptime(value, "%d.%m.%Y").date()
def map_datum_zeit(value):
if "-" in value:
return datetime.strptime(value, "%Y-%m-%d %H:%M:%S")
return datetime.strptime(value, "%d.%m.%Y %H:%M:%S")
def pruefe_is_nicht_fortlaufend(columns, fortlaufendes_feld, previous_fortlaufendes_feld):
return not list(filter(lambda c: c["feld_name"] == fortlaufendes_feld, columns))[0][
"value"] == previous_fortlaufendes_feld + 1
def _init_mapping_data(mapping_def):
"""
List alle Einträge und erzeugt ein neues Dict anhand der 'id' und 'name' Definition
"""
result = dict()
mapping_data = _json_load(mapping_def["dateiname"])
for entry in mapping_data:
result[str(entry[mapping_def["id"]])] = entry[mapping_def["name"]]
return result
def main(argv):
"""
Export die übergebene JSON Datei (documents_datei) mit den exportierten DMS Dokumenten Feldern nach Excel.
Das Export Format wird mit der übergebenen Export Parameter Datei (export_parameter_datei) konfiguriert.
"""
hilfe = f"{os.path.basename(__file__)} -d <documents_datei> -e <export_parameter_datei>"
documents_datei = ""
export_parameter_datei = ""
try:
opts, args = getopt(argv, "hd:e:", ["documents_datei=", "export_parameter_datei="])
except GetoptError:
print(hilfe)
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
print(hilfe)
sys.exit()
elif opt in ("-d", "--documents_datei"):
documents_datei = arg
elif opt in ("-e", "--export_parameter_datei"):
export_parameter_datei = arg
if not documents_datei or not export_parameter_datei:
print("Usage: " + hilfe)
sys.exit(2)
if not os.path.exists(documents_datei):
raise RuntimeError(f"Die Datei '{documents_datei}' existiert nicht.")
if not os.path.exists(export_parameter_datei):
raise RuntimeError(f"Die Datei '{export_parameter_datei}' existiert nicht.")
with open(documents_datei, encoding="utf-8") as file:
documents = json.load(file)
with open(export_parameter_datei, encoding="utf-8") as file:
export_parameter = json.load(file)
export_nach_excel(documents, export_parameter["export"])
if __name__ == '__main__':
# main(sys.argv[1:])
main(["-d", "../export_documents.json", "-e", "../config/dmsarchiv_vorlage.json"])
| StarcoderdataPython |
1639691 | #!/usr/bin/env python
from __future__ import print_function
import re
import argparse
import logging
from xigt.codecs import xigtxml
from xigt import XigtCorpus, Igt, xigtpath as xp
def run(args):
xc = xigtxml.load(args.infile)
if args.igt_key:
logging.info('Sorting %s IGTs' % args.infile)
xc.sort(key=make_sortkey(args.igt_key))
if args.tier_key:
logging.info('Sorting %s tiers by key' % args.infile)
for igt in xc:
igt.sort(key=make_sortkey(args.tier_key))
elif args.tier_deps:
logging.info('Sorting %s tiers by ref-dependencies' % args.infile)
refattrs = [ra.strip() for ra in args.tier_deps.split(',')]
for igt in xc:
igt.sort_tiers(refattrs=refattrs)
if args.item_key:
logging.info('Sorting %s items by key' % args.infile)
for igt in xc:
for tier in igt:
tier.sort(key=make_sortkey(args.item_key))
if args.in_place:
xigtxml.dump(args.infile, xc)
else:
print(xigtxml.dumps(xc))
def make_sortkey(sortkeys):
# return int values if possible (for int comparison), otherwise strings
def safe_int(x):
try:
return int(x)
except ValueError:
return x
key = lambda x: [k for sk in sortkeys
for k in map(safe_int,
re.split(r'(\d+)', xp.find(x, sk) or ''))]
return key
def main(arglist=None):
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Sort Igts, Tiers, or Items in Xigt corpora",
epilog='examples:\n'
' xigt sort --igt-key=\'@doc-id\' --igt-key=\'@id\' in.xml > out.xml\n'
' xigt sort --tier-key=\'@type\' in.xml > out.xml\n'
' xigt sort --tier-deps="segmentation,alignment,content" in.xml > out.xml'
)
parser.add_argument('-v', '--verbose',
action='count', dest='verbosity', default=2,
help='increase the verbosity (can be repeated: -vvv)'
)
parser.add_argument('infile',
help='the Xigt corpus file to sort'
)
parser.add_argument('--in-place',
action='store_true',
help='don\'t print to stdout; modify the input file in-place'
)
parser.add_argument('--igt-key',
metavar='XIGTPATH', action='append',
help='the XigtPath query for IGTs (must result in a string, so '
'it should end with an @attribute, text(), or value())'
)
tiergroup = parser.add_mutually_exclusive_group()
tiergroup.add_argument('--tier-key',
metavar='XIGTPATH', action='append',
help='the XigtPath query for Tiers (must result in a string, so '
'it should end with an @attribute, text(), or value())'
)
tiergroup.add_argument('--tier-deps',
metavar='REFATTRS',
help='sort tiers by reference dependencies; argument is a '
'comma-separated prioritized list of considered reference '
'attributes'
)
parser.add_argument('--item-key',
metavar='XIGTPATH', action='append',
help='the XigtPath query for Items (must result in a string, so '
'it should end with an @attribute, text(), or value())'
)
# parser.add_argument('--item-deps',
# action='store_true',
# help='sort items by reference dependencies'
# )
args = parser.parse_args(arglist)
logging.basicConfig(level=50-(args.verbosity*10))
run(args)
if __name__ == '__main__':
main()
| StarcoderdataPython |
133604 | # -*- coding: utf-8 -*-
"""
Conversion between Dialog-2010 (http://ru-eval.ru/) and aot.ru tags.
Dialog-2010 tags are less detailed than aot tags so aot -> dialog2010
conversion discards information.
"""
from __future__ import absolute_import, unicode_literals
import itertools
from russian_tagsets import converters
from russian_tagsets import aot
from russian_tagsets.utils import invert_mapping
POS = {
'С': 'S',
'П': 'A',
'МС': '-',
'Г' : 'V',
'ПРИЧАСТИЕ' : 'V',
'ДЕЕПРИЧАСТИЕ' : 'V',
'ИНФИНИТИВ': 'V',
'МС-ПРЕДК': '-',
'МС-П': '-',
'ЧИСЛ': '-',
'ЧИСЛ-П': '-',
'Н': 'ADV',
'ПРЕДК': '-',
'ПРЕДЛ': 'PR',
'СОЮЗ': 'CONJ',
'МЕЖД': 'ADV',
'ЧАСТ': 'ADV',
'ВВОДН': 'ADV',
'КР_ПРИЛ': 'A',
'КР_ПРИЧАСТИЕ': 'V', # FIXME: ?
'ПОСЛ': '-',
'ФРАЗ': '-',
}
POS_INV = {
'S': 'С',
'A': 'П',
'V': 'Г',
'ADV': 'Н',
'PR': 'ПРЕДЛ',
'CONJ': 'СОЮЗ',
}
GENDERS = {
'мр': 'm',
'жр': 'f',
'ср': 'n',
'мр-жр': '', # FIXME: ?
}
CASES = {
'им': 'nom',
'рд': 'gen',
'дт': 'dat',
'вн': 'acc',
'тв': 'ins',
'пр': 'loc',
'зв': 'voc',
}
NUMBERS = {'ед': 'sg', 'мн': 'pl'}
PERSONS = {'1л': '1p', '2л': '2p', '3л': '3p'}
VOICES = {'дст': 'act', 'стр': 'pass'}
TENSES = {
'нст': 'pres',
'прш': 'past',
'буд': 'pres',
}
EXTRA = {
'сравн': 'comp',
'прев': 'supr',
'пвл': 'imper',
}
GRAMINFO_MAP = dict(itertools.chain(
GENDERS.items(), CASES.items(), NUMBERS.items(), PERSONS.items(),
TENSES.items(), VOICES.items(), EXTRA.items(),
))
GRAMINFO_MAP_INV = invert_mapping(GRAMINFO_MAP)
def from_aot(aot_tag, word=None):
pos, info = aot.split_tag(aot_tag)
extra_info = set()
if pos in ['ПРИЧАСТИЕ', 'КР_ПРИЧАСТИЕ']:
extra_info.add('partcp')
else:
info.discard('дст')
info.discard('стр')
if pos == 'ИНФИНИТИВ':
extra_info.add('inf')
elif pos == 'ДЕЕПРИЧАСТИЕ':
extra_info.add('ger')
new_form = (GRAMINFO_MAP[attr] for attr in info if attr in GRAMINFO_MAP)
return ",".join(itertools.chain([POS[pos]], extra_info, new_form))
def to_aot(dialog_tag, word=None):
pos, info = aot.split_tag(dialog_tag)
new_form = (GRAMINFO_MAP_INV[tag] for tag in info if tag in GRAMINFO_MAP_INV)
new_pos = POS_INV[pos]
if pos == 'V':
if 'inf' in info:
new_pos = 'ИНФИНИТИВ'
elif 'partcp' in info:
new_pos = 'ПРИЧАСТИЕ'
elif 'ger' in info:
new_pos = 'ДЕЕПРИЧАСТИЕ'
return ",".join(itertools.chain([new_pos], new_form))
converters.add('dialog2010', 'aot', to_aot)
converters.add('aot', 'dialog2010', from_aot)
| StarcoderdataPython |
4836573 | <gh_stars>1-10
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements various transmuter classes.
Transmuters are essentially classes that generate TransformedStructures from
various data sources. They enable the high-throughput generation of new
structures and input files.
It also includes the helper function, batch_write_vasp_input to generate an
entire directory of vasp input files for running.
"""
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "Mar 4, 2012"
import os
import re
from multiprocessing import Pool
from pymatgen.alchemy.materials import TransformedStructure
from pymatgen.io.vasp.sets import MPRelaxSet
class StandardTransmuter:
"""
An example of a Transmuter object, which performs a sequence of
transformations on many structures to generate TransformedStructures.
.. attribute: transformed_structures
List of all transformed structures.
"""
def __init__(self, transformed_structures, transformations=None,
extend_collection=0, ncores=None):
"""
Initializes a transmuter from an initial list of
:class:`pymatgen.alchemy.materials.TransformedStructure`.
Args:
transformed_structures ([TransformedStructure]): Input transformed
structures
transformations ([Transformations]): New transformations to be
applied to all structures.
extend_collection (int): Whether to use more than one output
structure from one-to-many transformations. extend_collection
can be an int, which determines the maximum branching for each
transformation.
ncores (int): Number of cores to use for applying transformations.
Uses multiprocessing.Pool. Default is None, which implies
serial.
"""
self.transformed_structures = transformed_structures
self.ncores = ncores
if transformations is not None:
for trans in transformations:
self.append_transformation(trans,
extend_collection=extend_collection)
def __getitem__(self, index):
return self.transformed_structures[index]
def __getattr__(self, name):
return [getattr(x, name) for x in self.transformed_structures]
def undo_last_change(self):
"""
Undo the last transformation in the TransformedStructure.
Raises:
IndexError if already at the oldest change.
"""
for x in self.transformed_structures:
x.undo_last_change()
def redo_next_change(self):
"""
Redo the last undone transformation in the TransformedStructure.
Raises:
IndexError if already at the latest change.
"""
for x in self.transformed_structures:
x.redo_next_change()
def __len__(self):
return len(self.transformed_structures)
def append_transformation(self, transformation, extend_collection=False,
clear_redo=True):
"""
Appends a transformation to all TransformedStructures.
Args:
transformation: Transformation to append
extend_collection: Whether to use more than one output structure
from one-to-many transformations. extend_collection can be a
number, which determines the maximum branching for each
transformation.
clear_redo (bool): Whether to clear the redo list. By default,
this is True, meaning any appends clears the history of
undoing. However, when using append_transformation to do a
redo, the redo list should not be cleared to allow multiple
redos.
Returns:
List of booleans corresponding to initial transformed structures
each boolean describes whether the transformation altered the
structure
"""
if self.ncores and transformation.use_multiprocessing:
p = Pool(self.ncores)
# need to condense arguments into single tuple to use map
z = map(
lambda x: (x, transformation, extend_collection, clear_redo),
self.transformed_structures)
new_tstructs = p.map(_apply_transformation, z, 1)
self.transformed_structures = []
for ts in new_tstructs:
self.transformed_structures.extend(ts)
else:
new_structures = []
for x in self.transformed_structures:
new = x.append_transformation(transformation,
extend_collection,
clear_redo=clear_redo)
if new is not None:
new_structures.extend(new)
self.transformed_structures.extend(new_structures)
def extend_transformations(self, transformations):
"""
Extends a sequence of transformations to the TransformedStructure.
Args:
transformations: Sequence of Transformations
"""
for t in transformations:
self.append_transformation(t)
def apply_filter(self, structure_filter):
"""
Applies a structure_filter to the list of TransformedStructures
in the transmuter.
Args:
structure_filter: StructureFilter to apply.
"""
def test_transformed_structure(ts):
return structure_filter.test(ts.final_structure)
self.transformed_structures = list(filter(test_transformed_structure,
self.transformed_structures))
for ts in self.transformed_structures:
ts.append_filter(structure_filter)
def write_vasp_input(self, **kwargs):
"""
Batch write vasp input for a sequence of transformed structures to
output_dir, following the format output_dir/{formula}_{number}.
Args:
\\*\\*kwargs: All kwargs supported by batch_write_vasp_input.
"""
batch_write_vasp_input(self.transformed_structures, **kwargs)
def set_parameter(self, key, value):
"""
Add parameters to the transmuter. Additional parameters are stored in
the as_dict() output.
Args:
key: The key for the parameter.
value: The value for the parameter.
"""
for x in self.transformed_structures:
x.other_parameters[key] = value
def add_tags(self, tags):
"""
Add tags for the structures generated by the transmuter.
Args:
tags: A sequence of tags. Note that this should be a sequence of
strings, e.g., ["My awesome structures", "Project X"].
"""
self.set_parameter("tags", tags)
def __str__(self):
output = ["Current structures", "------------"]
for x in self.transformed_structures:
output.append(str(x.final_structure))
return "\n".join(output)
def append_transformed_structures(self, tstructs_or_transmuter):
"""
Method is overloaded to accept either a list of transformed structures
or transmuter, it which case it appends the second transmuter"s
structures.
Args:
tstructs_or_transmuter: A list of transformed structures or a
transmuter.
"""
if isinstance(tstructs_or_transmuter, self.__class__):
self.transformed_structures.extend(tstructs_or_transmuter
.transformed_structures)
else:
for ts in tstructs_or_transmuter:
assert isinstance(ts, TransformedStructure)
self.transformed_structures.extend(tstructs_or_transmuter)
@staticmethod
def from_structures(structures, transformations=None, extend_collection=0):
"""
Alternative constructor from structures rather than
TransformedStructures.
Args:
structures: Sequence of structures
transformations: New transformations to be applied to all
structures
extend_collection: Whether to use more than one output structure
from one-to-many transformations. extend_collection can be a
number, which determines the maximum branching for each
transformation.
Returns:
StandardTransmuter
"""
tstruct = [TransformedStructure(s, []) for s in structures]
return StandardTransmuter(tstruct, transformations, extend_collection)
class CifTransmuter(StandardTransmuter):
"""
Generates a Transmuter from a cif string, possibly containing multiple
structures.
"""
def __init__(self, cif_string, transformations=None, primitive=True,
extend_collection=False):
"""
Generates a Transmuter from a cif string, possibly
containing multiple structures.
Args:
cif_string: A string containing a cif or a series of cifs
transformations: New transformations to be applied to all
structures
primitive: Whether to generate the primitive cell from the cif.
extend_collection: Whether to use more than one output structure
from one-to-many transformations. extend_collection can be a
number, which determines the maximum branching for each
transformation.
"""
transformed_structures = []
lines = cif_string.split("\n")
structure_data = []
read_data = False
for line in lines:
if re.match(r"^\s*data", line):
structure_data.append([])
read_data = True
if read_data:
structure_data[-1].append(line)
for data in structure_data:
tstruct = TransformedStructure.from_cif_string("\n".join(data), [],
primitive)
transformed_structures.append(tstruct)
super().__init__(transformed_structures, transformations,
extend_collection)
@staticmethod
def from_filenames(filenames, transformations=None, primitive=True,
extend_collection=False):
"""
Generates a TransformedStructureCollection from a cif, possibly
containing multiple structures.
Args:
filenames: List of strings of the cif files
transformations: New transformations to be applied to all
structures
primitive: Same meaning as in __init__.
extend_collection: Same meaning as in __init__.
"""
allcifs = []
for fname in filenames:
with open(fname, "r") as f:
allcifs.append(f.read())
return CifTransmuter("\n".join(allcifs), transformations,
primitive=primitive,
extend_collection=extend_collection)
class PoscarTransmuter(StandardTransmuter):
"""
Generates a transmuter from a sequence of POSCARs.
Args:
poscar_string: List of POSCAR strings
transformations: New transformations to be applied to all
structures.
extend_collection: Whether to use more than one output structure
from one-to-many transformations.
"""
def __init__(self, poscar_string, transformations=None,
extend_collection=False):
tstruct = TransformedStructure.from_poscar_string(poscar_string, [])
super().__init__([tstruct], transformations,
extend_collection=extend_collection)
@staticmethod
def from_filenames(poscar_filenames, transformations=None,
extend_collection=False):
"""
Convenient constructor to generates a POSCAR transmuter from a list of
POSCAR filenames.
Args:
poscar_filenames: List of POSCAR filenames
transformations: New transformations to be applied to all
structures.
extend_collection:
Same meaning as in __init__.
"""
tstructs = []
for filename in poscar_filenames:
with open(filename, "r") as f:
tstructs.append(TransformedStructure
.from_poscar_string(f.read(), []))
return StandardTransmuter(tstructs, transformations,
extend_collection=extend_collection)
def batch_write_vasp_input(transformed_structures, vasp_input_set=MPRelaxSet,
output_dir=".", create_directory=True,
subfolder=None,
include_cif=False, **kwargs):
"""
Batch write vasp input for a sequence of transformed structures to
output_dir, following the format output_dir/{group}/{formula}_{number}.
Args:
transformed_structures: Sequence of TransformedStructures.
vasp_input_set: pymatgen.io.vaspio_set.VaspInputSet to creates
vasp input files from structures.
output_dir: Directory to output files
create_directory (bool): Create the directory if not present.
Defaults to True.
subfolder: Function to create subdirectory name from
transformed_structure.
e.g., lambda x: x.other_parameters["tags"][0] to use the first
tag.
include_cif (bool): Boolean indication whether to output a CIF as
well. CIF files are generally better supported in visualization
programs.
"""
for i, s in enumerate(transformed_structures):
formula = re.sub(r"\s+", "", s.final_structure.formula)
if subfolder is not None:
subdir = subfolder(s)
dirname = os.path.join(output_dir, subdir,
"{}_{}".format(formula, i))
else:
dirname = os.path.join(output_dir, "{}_{}".format(formula, i))
s.write_vasp_input(vasp_input_set, dirname,
create_directory=create_directory, **kwargs)
if include_cif:
from pymatgen.io.cif import CifWriter
writer = CifWriter(s.final_structure)
writer.write_file(os.path.join(dirname, "{}.cif".format(formula)))
def _apply_transformation(inputs):
"""
Helper method for multiprocessing of apply_transformation. Must not be
in the class so that it can be pickled.
Args:
inputs: Tuple containing the transformed structure, the transformation
to be applied, a boolean indicating whether to extend the
collection, and a boolean indicating whether to clear the redo
Returns:
List of output structures (the modified initial structure, plus
any new structures created by a one-to-many transformation)
"""
ts, transformation, extend_collection, clear_redo = inputs
new = ts.append_transformation(transformation, extend_collection,
clear_redo=clear_redo)
o = [ts]
if new:
o.extend(new)
return o
| StarcoderdataPython |
6427456 | #!/usr/bin/env python
# coding=utf-8
from handlers.kbeServer.Editor.Interface import interface_res
def ResVersionResponse(DB, subCode, uid, data):
if subCode == 10:
return interface_res.GetUpdateVersion(DB,uid, data)
elif subCode == 11:
return interface_res.AnlyzeCode(DB,uid, data)
elif subCode == 40:
# 新版获取资源版本号
return interface_res.new_get_update_version(DB, data)
elif subCode == 41:
return interface_res.new_anlyze_code(DB, data)
def ConfigGet_Server(DB,params):
return interface_res.Server_ConfigGet(DB,params)
| StarcoderdataPython |
12803868 | import logging
import os
from dockerfile_parse.constants import DOCKERFILE_FILENAME
from checkov.common.output.record import Record
from checkov.common.output.report import Report
from checkov.common.runners.base_runner import BaseRunner, filter_ignored_directories
from checkov.dockerfile.parser import parse, collect_skipped_checks
from checkov.dockerfile.registry import registry
from checkov.runner_filter import RunnerFilter
DOCKER_FILE_MASK = [DOCKERFILE_FILENAME]
class Runner(BaseRunner):
check_type = "dockerfile"
def run(self, root_folder=None, external_checks_dir=None, files=None, runner_filter=RunnerFilter(),
collect_skip_comments=True):
report = Report(self.check_type)
definitions = {}
definitions_raw = {}
parsing_errors = {}
files_list = []
if external_checks_dir:
for directory in external_checks_dir:
registry.load_external_checks(directory)
if files:
for file in files:
if os.path.basename(file) in DOCKER_FILE_MASK:
(definitions[file], definitions_raw[file]) = parse(file)
if root_folder:
for root, d_names, f_names in os.walk(root_folder):
filter_ignored_directories(d_names, runner_filter.excluded_paths)
for file in f_names:
if file in DOCKER_FILE_MASK:
files_list.append(os.path.join(root, file))
for file in files_list:
relative_file_path = f'/{os.path.relpath(file, os.path.commonprefix((root_folder, file)))}'
try:
(definitions[relative_file_path], definitions_raw[relative_file_path]) = parse(file)
except TypeError:
logging.info(f'Dockerfile skipping {file} as it is not a valid dockerfile template')
for docker_file_path in definitions.keys():
# There are a few cases here. If -f was used, there could be a leading / because it's an absolute path,
# or there will be no leading slash; root_folder will always be none.
# If -d is used, root_folder will be the value given, and -f will start with a / (hardcoded above).
# The goal here is simply to get a valid path to the file (which docker_file_path does not always give).
if docker_file_path[0] == '/':
path_to_convert = (root_folder + docker_file_path) if root_folder else docker_file_path
else:
path_to_convert = (os.path.join(root_folder, docker_file_path)) if root_folder else docker_file_path
file_abs_path = os.path.abspath(path_to_convert)
skipped_checks = collect_skipped_checks(definitions[docker_file_path])
instructions = definitions[docker_file_path]
results = registry.scan(docker_file_path, instructions, skipped_checks,
runner_filter)
for check, check_result in results.items():
result_configuration = check_result['results_configuration']
startline = 0
endline = 0
result_instruction = ""
if result_configuration:
startline = result_configuration['startline']
endline = result_configuration['endline']
result_instruction = result_configuration["instruction"]
codeblock = []
self.calc_record_codeblock(codeblock, definitions_raw, docker_file_path, endline, startline)
record = Record(check_id=check.id, check_name=check.name, check_result=check_result,
code_block=codeblock,
file_path=docker_file_path,
file_line_range=[startline,
endline],
resource="{}.{}".format(docker_file_path,
result_instruction,
startline),
evaluations=None, check_class=check.__class__.__module__,
file_abs_path=file_abs_path, entity_tags=None)
report.add_record(record=record)
return report
def calc_record_codeblock(self, codeblock, definitions_raw, docker_file_path, endline, startline):
for line in range(startline, endline + 1):
codeblock.append((line, definitions_raw[docker_file_path][line]))
| StarcoderdataPython |
1647737 | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gpflow
import pytest
import tensorflow as tf
from tests.util.misc import random_seed
from trieste.acquisition.function import ExpectedHypervolumeImprovement
from trieste.acquisition.rule import AcquisitionRule, EfficientGlobalOptimization
from trieste.bayesian_optimizer import BayesianOptimizer
from trieste.data import Dataset
from trieste.models import GaussianProcessRegression
from trieste.models.model_interfaces import ModelStack
from trieste.observer import OBJECTIVE
from trieste.space import Box
from trieste.utils.multi_objectives import VLMOP2
from trieste.utils.objectives import mk_observer
from trieste.utils.pareto import Pareto, get_reference_point
@random_seed
@pytest.mark.parametrize(
"num_steps, acquisition_rule",
[
(20, EfficientGlobalOptimization(ExpectedHypervolumeImprovement().using(OBJECTIVE))),
],
)
def test_multi_objective_optimizer_finds_pareto_front_of_the_VLMOP2_function(
num_steps: int, acquisition_rule: AcquisitionRule
) -> None:
search_space = Box([-2, -2], [2, 2])
def build_stacked_independent_objectives_model(data: Dataset) -> ModelStack:
gprs = []
for idx in range(2):
single_obj_data = Dataset(
data.query_points, tf.gather(data.observations, [idx], axis=1)
)
variance = tf.math.reduce_variance(single_obj_data.observations)
kernel = gpflow.kernels.Matern52(variance, tf.constant([0.2, 0.2], tf.float64))
gpr = gpflow.models.GPR(single_obj_data.astuple(), kernel, noise_variance=1e-5)
gpflow.utilities.set_trainable(gpr.likelihood, False)
gprs.append((GaussianProcessRegression(gpr), 1))
return ModelStack(*gprs)
observer = mk_observer(VLMOP2().objective(), OBJECTIVE)
initial_query_points = search_space.sample(10)
initial_data = observer(initial_query_points)
model = build_stacked_independent_objectives_model(initial_data[OBJECTIVE])
dataset = (
BayesianOptimizer(observer, search_space)
.optimize(num_steps, initial_data, {OBJECTIVE: model}, acquisition_rule)
.try_get_final_datasets()[OBJECTIVE]
)
# A small log hypervolume difference corresponds to a succesful optimization.
ref_point = get_reference_point(dataset.observations)
obs_hv = Pareto(dataset.observations).hypervolume_indicator(ref_point)
ideal_pf = tf.cast(VLMOP2().gen_pareto_optimal_points(100), dtype=tf.float64)
ideal_hv = Pareto(ideal_pf).hypervolume_indicator(ref_point)
assert tf.math.log(ideal_hv - obs_hv) < -3.5
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.