blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dabae791212f75fb54e5c3afe7fdb7c4b28ddbe7 | cc0c40037d04330df9354e5f3c8abb911c621151 | /model/model.py | ad7e3aa51c789c1685fd64bf4a80746260588d59 | [] | no_license | johleh/voxelnet | e5f5ff022d7448b546b765ddbc32e8f5beb3a21c | 394e0f63cc81aae941e98db91edb9d3a426e6949 | refs/heads/master | 2021-01-25T13:58:16.354943 | 2018-02-19T22:41:31 | 2018-02-19T22:41:31 | 123,631,344 | 1 | 0 | null | 2018-03-02T21:12:01 | 2018-03-02T21:12:01 | null | UTF-8 | Python | false | false | 18,554 | py | #!/usr/bin/env python
# -*- coding:UTF-8 -*-
import sys
import os
import tensorflow as tf
import cv2
from numba import jit
from config import cfg
from utils import *
from model.group_pointcloud import FeatureNet
from model.rpn import MiddleAndRPN
class RPN3D(object):
def __init__(self,
cls='Car',
single_batch_size=2, # batch_size_per_gpu
learning_rate=0.001,
max_gradient_norm=5.0,
alpha=1.5,
beta=1,
is_train=True,
avail_gpus=['0']):
# hyper parameters and status
self.cls = cls
self.single_batch_size = single_batch_size
self.learning_rate = tf.Variable(
float(learning_rate), trainable=False, dtype=tf.float32)
self.global_step = tf.Variable(1, trainable=False)
self.epoch = tf.Variable(0, trainable=False)
self.epoch_add_op = self.epoch.assign(self.epoch + 1)
self.alpha = alpha
self.beta = beta
self.avail_gpus = avail_gpus
boundaries = [80, 120]
values = [ self.learning_rate, self.learning_rate * 0.1, self.learning_rate * 0.01 ]
lr = tf.train.piecewise_constant(self.epoch, boundaries, values)
# build graph
# input placeholders
self.vox_feature = []
self.vox_number = []
self.vox_coordinate = []
self.targets = []
self.pos_equal_one = []
self.pos_equal_one_sum = []
self.pos_equal_one_for_reg = []
self.neg_equal_one = []
self.neg_equal_one_sum = []
self.delta_output = []
self.prob_output = []
self.opt = tf.train.AdamOptimizer(lr)
self.gradient_norm = []
self.tower_grads = []
with tf.variable_scope(tf.get_variable_scope()):
for idx, dev in enumerate(self.avail_gpus):
with tf.device('/gpu:{}'.format(dev)), tf.name_scope('gpu_{}'.format(dev)):
# must use name scope here since we do not want to create new variables
# graph
feature = FeatureNet(
training=is_train, batch_size=self.single_batch_size)
rpn = MiddleAndRPN(
input=feature.outputs, alpha=self.alpha, beta=self.beta, training=is_train)
tf.get_variable_scope().reuse_variables()
# input
self.vox_feature.append(feature.feature)
self.vox_number.append(feature.number)
self.vox_coordinate.append(feature.coordinate)
self.targets.append(rpn.targets)
self.pos_equal_one.append(rpn.pos_equal_one)
self.pos_equal_one_sum.append(rpn.pos_equal_one_sum)
self.pos_equal_one_for_reg.append(
rpn.pos_equal_one_for_reg)
self.neg_equal_one.append(rpn.neg_equal_one)
self.neg_equal_one_sum.append(rpn.neg_equal_one_sum)
# output
feature_output = feature.outputs
delta_output = rpn.delta_output
prob_output = rpn.prob_output
# loss and grad
self.loss = rpn.loss
self.reg_loss = rpn.reg_loss
self.cls_loss = rpn.cls_loss
self.cls_pos_loss = rpn.cls_pos_loss_rec
self.cls_neg_loss = rpn.cls_neg_loss_rec
self.params = tf.trainable_variables()
gradients = tf.gradients(self.loss, self.params)
clipped_gradients, gradient_norm = tf.clip_by_global_norm(
gradients, max_gradient_norm)
self.delta_output.append(delta_output)
self.prob_output.append(prob_output)
self.tower_grads.append(clipped_gradients)
self.gradient_norm.append(gradient_norm)
self.rpn_output_shape = rpn.output_shape
# loss and optimizer
# self.xxxloss is only the loss for the lowest tower
with tf.device('/gpu:{}'.format(self.avail_gpus[0])):
self.grads = average_gradients(self.tower_grads)
self.update = self.opt.apply_gradients(
zip(self.grads, self.params), global_step=self.global_step)
self.gradient_norm = tf.group(*self.gradient_norm)
self.delta_output = tf.concat(self.delta_output, axis=0)
self.prob_output = tf.concat(self.prob_output, axis=0)
self.anchors = cal_anchors()
# for predict and image summary
self.rgb = tf.placeholder(
tf.uint8, [None, cfg.IMAGE_HEIGHT, cfg.IMAGE_WIDTH, 3])
self.bv = tf.placeholder(tf.uint8, [
None, cfg.BV_LOG_FACTOR * cfg.INPUT_HEIGHT, cfg.BV_LOG_FACTOR * cfg.INPUT_WIDTH, 3])
self.bv_heatmap = tf.placeholder(tf.uint8, [
None, cfg.BV_LOG_FACTOR * cfg.FEATURE_HEIGHT, cfg.BV_LOG_FACTOR * cfg.FEATURE_WIDTH, 3])
self.boxes2d = tf.placeholder(tf.float32, [None, 4])
self.boxes2d_scores = tf.placeholder(tf.float32, [None])
# NMS(2D)
with tf.device('/gpu:{}'.format(self.avail_gpus[0])):
self.box2d_ind_after_nms = tf.image.non_max_suppression(
self.boxes2d, self.boxes2d_scores, max_output_size=cfg.RPN_NMS_POST_TOPK, iou_threshold=cfg.RPN_NMS_THRESH)
# summary and saver
self.saver = tf.train.Saver(write_version=tf.train.SaverDef.V2,
max_to_keep=10, pad_step_number=True, keep_checkpoint_every_n_hours=1.0)
self.train_summary = tf.summary.merge([
tf.summary.scalar('train/loss', self.loss),
tf.summary.scalar('train/reg_loss', self.reg_loss),
tf.summary.scalar('train/cls_loss', self.cls_loss),
tf.summary.scalar('train/cls_pos_loss', self.cls_pos_loss),
tf.summary.scalar('train/cls_neg_loss', self.cls_neg_loss),
*[tf.summary.histogram(each.name, each) for each in self.params]
])
self.validate_summary = tf.summary.merge([
tf.summary.scalar('validate/loss', self.loss),
tf.summary.scalar('validate/reg_loss', self.reg_loss),
tf.summary.scalar('validate/cls_loss', self.cls_loss),
tf.summary.scalar('validate/cls_pos_loss', self.cls_pos_loss),
tf.summary.scalar('validate/cls_neg_loss', self.cls_neg_loss)
])
# TODO: bird_view_summary and front_view_summary
self.predict_summary = tf.summary.merge([
tf.summary.image('predict/bird_view_lidar', self.bv),
tf.summary.image('predict/bird_view_heatmap', self.bv_heatmap),
tf.summary.image('predict/front_view_rgb', self.rgb),
])
def train_step(self, session, data, train=False, summary=False):
# input:
# (N) tag
# (N, N') label
# vox_feature
# vox_number
# vox_coordinate
tag = data[0]
label = data[1]
vox_feature = data[2]
vox_number = data[3]
vox_coordinate = data[4]
print('train', tag)
pos_equal_one, neg_equal_one, targets = cal_rpn_target(
label, self.rpn_output_shape, self.anchors, cls=cfg.DETECT_OBJ, coordinate='lidar')
pos_equal_one_for_reg = np.concatenate(
[np.tile(pos_equal_one[..., [0]], 7), np.tile(pos_equal_one[..., [1]], 7)], axis=-1)
pos_equal_one_sum = np.clip(np.sum(pos_equal_one, axis=(
1, 2, 3)).reshape(-1, 1, 1, 1), a_min=1, a_max=None)
neg_equal_one_sum = np.clip(np.sum(neg_equal_one, axis=(
1, 2, 3)).reshape(-1, 1, 1, 1), a_min=1, a_max=None)
input_feed = {}
for idx in range(len(self.avail_gpus)):
input_feed[self.vox_feature[idx]] = vox_feature[idx]
input_feed[self.vox_number[idx]] = vox_number[idx]
input_feed[self.vox_coordinate[idx]] = vox_coordinate[idx]
input_feed[self.targets[idx]] = targets[idx *
self.single_batch_size:(idx + 1) * self.single_batch_size]
input_feed[self.pos_equal_one[idx]] = pos_equal_one[idx *
self.single_batch_size:(idx + 1) * self.single_batch_size]
input_feed[self.pos_equal_one_sum[idx]] = pos_equal_one_sum[idx *
self.single_batch_size:(idx + 1) * self.single_batch_size]
input_feed[self.pos_equal_one_for_reg[idx]] = pos_equal_one_for_reg[idx *
self.single_batch_size:(idx + 1) * self.single_batch_size]
input_feed[self.neg_equal_one[idx]] = neg_equal_one[idx *
self.single_batch_size:(idx + 1) * self.single_batch_size]
input_feed[self.neg_equal_one_sum[idx]] = neg_equal_one_sum[idx *
self.single_batch_size:(idx + 1) * self.single_batch_size]
if train:
output_feed = [self.loss, self.reg_loss,
self.cls_loss, self.cls_pos_loss, self.cls_neg_loss, self.gradient_norm, self.update]
else:
output_feed = [self.loss, self.reg_loss, self.cls_loss, self.cls_pos_loss, self.cls_neg_loss]
if summary:
output_feed.append(self.train_summary)
# TODO: multi-gpu support for test and predict step
return session.run(output_feed, input_feed)
def validate_step(self, session, data, summary=False):
# input:
# (N) tag
# (N, N') label
# vox_feature
# vox_number
# vox_coordinate
tag = data[0]
label = data[1]
vox_feature = data[2]
vox_number = data[3]
vox_coordinate = data[4]
print('valid', tag)
pos_equal_one, neg_equal_one, targets = cal_rpn_target(
label, self.rpn_output_shape, self.anchors)
pos_equal_one_for_reg = np.concatenate(
[np.tile(pos_equal_one[..., [0]], 7), np.tile(pos_equal_one[..., [1]], 7)], axis=-1)
pos_equal_one_sum = np.clip(np.sum(pos_equal_one, axis=(
1, 2, 3)).reshape(-1, 1, 1, 1), a_min=1, a_max=None)
neg_equal_one_sum = np.clip(np.sum(neg_equal_one, axis=(
1, 2, 3)).reshape(-1, 1, 1, 1), a_min=1, a_max=None)
input_feed = {}
for idx in range(len(self.avail_gpus)):
input_feed[self.vox_feature[idx]] = vox_feature[idx]
input_feed[self.vox_number[idx]] = vox_number[idx]
input_feed[self.vox_coordinate[idx]] = vox_coordinate[idx]
input_feed[self.targets[idx]] = targets[idx *
self.single_batch_size:(idx + 1) * self.single_batch_size]
input_feed[self.pos_equal_one[idx]] = pos_equal_one[idx *
self.single_batch_size:(idx + 1) * self.single_batch_size]
input_feed[self.pos_equal_one_sum[idx]] = pos_equal_one_sum[idx *
self.single_batch_size:(idx + 1) * self.single_batch_size]
input_feed[self.pos_equal_one_for_reg[idx]] = pos_equal_one_for_reg[idx *
self.single_batch_size:(idx + 1) * self.single_batch_size]
input_feed[self.neg_equal_one[idx]] = neg_equal_one[idx *
self.single_batch_size:(idx + 1) * self.single_batch_size]
input_feed[self.neg_equal_one_sum[idx]] = neg_equal_one_sum[idx *
self.single_batch_size:(idx + 1) * self.single_batch_size]
output_feed = [self.loss, self.reg_loss, self.cls_loss]
if summary:
output_feed.append(self.validate_summary)
return session.run(output_feed, input_feed)
def predict_step(self, session, data, summary=False, vis=False):
# input:
# (N) tag
# (N, N') label(can be empty)
# vox_feature
# vox_number
# vox_coordinate
# img (N, w, l, 3)
# lidar (N, N', 4)
# output: A, B, C
# A: (N) tag
# B: (N, N') (class, x, y, z, h, w, l, rz, score)
# C; summary(optional)
tag = data[0]
label = data[1]
vox_feature = data[2]
vox_number = data[3]
vox_coordinate = data[4]
img = data[5]
lidar = data[6]
if summary or vis:
batch_gt_boxes3d = label_to_gt_box3d(
label, cls=self.cls, coordinate='lidar')
print('predict', tag)
input_feed = {}
for idx in range(len(self.avail_gpus)):
input_feed[self.vox_feature[idx]] = vox_feature[idx]
input_feed[self.vox_number[idx]] = vox_number[idx]
input_feed[self.vox_coordinate[idx]] = vox_coordinate[idx]
output_feed = [self.prob_output, self.delta_output]
probs, deltas = session.run(output_feed, input_feed)
# BOTTLENECK
batch_boxes3d = delta_to_boxes3d(
deltas, self.anchors, coordinate='lidar')
batch_boxes2d = batch_boxes3d[:, :, [0, 1, 4, 5, 6]]
batch_probs = probs.reshape(
(len(self.avail_gpus) * self.single_batch_size, -1))
# NMS
ret_box3d = []
ret_score = []
for batch_id in range(len(self.avail_gpus) * self.single_batch_size):
# remove box with low score
ind = np.where(batch_probs[batch_id, :] >= cfg.RPN_SCORE_THRESH)[0]
tmp_boxes3d = batch_boxes3d[batch_id, ind, ...]
tmp_boxes2d = batch_boxes2d[batch_id, ind, ...]
tmp_scores = batch_probs[batch_id, ind]
# TODO: if possible, use rotate NMS
boxes2d = corner_to_standup_box2d(
center_to_corner_box2d(tmp_boxes2d, coordinate='lidar'))
ind = session.run(self.box2d_ind_after_nms, {
self.boxes2d: boxes2d,
self.boxes2d_scores: tmp_scores
})
tmp_boxes3d = tmp_boxes3d[ind, ...]
tmp_scores = tmp_scores[ind]
ret_box3d.append(tmp_boxes3d)
ret_score.append(tmp_scores)
ret_box3d_score = []
for boxes3d, scores in zip(ret_box3d, ret_score):
ret_box3d_score.append(np.concatenate([np.tile(self.cls, len(boxes3d))[:, np.newaxis],
boxes3d, scores[:, np.newaxis]], axis=-1))
if summary:
# only summry 1 in a batch
cur_tag = tag[0]
P, Tr, R = load_calib( os.path.join( cfg.CALIB_DIR, cur_tag + '.txt' ) )
front_image = draw_lidar_box3d_on_image(img[0], ret_box3d[0], ret_score[0],
batch_gt_boxes3d[0], P2=P, T_VELO_2_CAM=Tr, R_RECT_0=R)
bird_view = lidar_to_bird_view_img(
lidar[0], factor=cfg.BV_LOG_FACTOR)
bird_view = draw_lidar_box3d_on_birdview(bird_view, ret_box3d[0], ret_score[0],
batch_gt_boxes3d[0], factor=cfg.BV_LOG_FACTOR, P2=P, T_VELO_2_CAM=Tr, R_RECT_0=R)
heatmap = colorize(probs[0, ...], cfg.BV_LOG_FACTOR)
ret_summary = session.run(self.predict_summary, {
self.rgb: front_image[np.newaxis, ...],
self.bv: bird_view[np.newaxis, ...],
self.bv_heatmap: heatmap[np.newaxis, ...]
})
return tag, ret_box3d_score, ret_summary
if vis:
front_images, bird_views, heatmaps = [], [], []
for i in range(len(img)):
cur_tag = tag[i]
P, Tr, R = load_calib( os.path.join( cfg.CALIB_DIR, cur_tag + '.txt' ) )
front_image = draw_lidar_box3d_on_image(img[i], ret_box3d[i], ret_score[i],
batch_gt_boxes3d[i], P2=P, T_VELO_2_CAM=Tr, R_RECT_0=R)
bird_view = lidar_to_bird_view_img(
lidar[i], factor=cfg.BV_LOG_FACTOR)
bird_view = draw_lidar_box3d_on_birdview(bird_view, ret_box3d[i], ret_score[i],
batch_gt_boxes3d[i], factor=cfg.BV_LOG_FACTOR, P2=P, T_VELO_2_CAM=Tr, R_RECT_0=R)
heatmap = colorize(probs[i, ...], cfg.BV_LOG_FACTOR)
front_images.append(front_image)
bird_views.append(bird_view)
heatmaps.append(heatmap)
return tag, ret_box3d_score, front_images, bird_views, heatmaps
return tag, ret_box3d_score
def average_gradients(tower_grads):
# ref:
# https://github.com/tensorflow/models/blob/6db9f0282e2ab12795628de6200670892a8ad6ba/tutorials/image/cifar10/cifar10_multi_gpu_train.py#L103
# but only contains grads, no vars
average_grads = []
for grad_and_vars in zip(*tower_grads):
grads = []
for g in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
grad_and_var = grad
average_grads.append(grad_and_var)
return average_grads
if __name__ == '__main__':
pass
| [
"qianguih@usc.edu"
] | qianguih@usc.edu |
6396de713f8a178c91286cf994ca71be0ee0077d | 87a1a0a93d8c6020044146645ef93df7b1a9c9bf | /led.py | c1403e0a1f6f33c5db18305b42a7f84bb9cdca2f | [] | no_license | sitbon/synapse | 7fd82b0e1020e2b79bb7abb7586a2c53f80a9828 | 40e2e6048927e6e5f159f311132d1791cc9fd997 | refs/heads/master | 2020-04-08T22:01:15.972131 | 2018-11-30T04:32:05 | 2018-11-30T04:32:05 | 159,768,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,966 | py | import sys, threading
from time import sleep
from edi2c import ads1x15
from edi2c import pca9685
CHANNELS = pca9685.CHANNELS
MAX_VALUE = pca9685.PWM_MAX_OFF
ALL_CHANNELS = None
MAP = [1, 4, 5, 0, 3, 2, 6, 7, 10, 13, 8, 9, 11, 12, 14, 15]
LEVELS = [
[0],
[1, 2],
[3, 4, 5],
[6, 7],
[8, 9],
[10, 11, 12, 13]
]
adc = ads1x15.ADS1X15(ic=ads1x15.IC_ADS1115)
pwm = pca9685.PCA9685()
ekg_bpm = 60
def ekg_task():
global ekg_bpm
print >>sys.stderr, "LIGHTS: EKG read stdin running"
try:
while True:
print >>sys.stderr, "LIGHTS: waiting for EKG input"
value = sys.stdin.readline()
print >>sys.stderr, "LIGHTS: value =", repr(value)
ekg_bpm = int(value)
continue
value = value.strip()
try:
print >>sys.stderr, "LIGHTS: parsing value"
#value = int(value)
value = 60
except:
print >>sys.stderr, "LIGHTS: bad value", value
continue
print >>sys.stderr, "LIGHTS: setting BPM"
ekg_bpm = value
print >>sys.stderr, "LIGHTS: EKG at", ekg_bpm, "BPM"
except Exception, e:
print >>sys.stderr, "LIGHTS:", str(e)
finally:
print >>sys.stderr, "LIGHTS: EKG task exiting"
def led_ekg(args):
intensity_map = lambda x: max(0.0, float(x - 50) / (90.0 - 50.0))
def ekg_led_task():
print >>sys.stderr, "LIGHTS: EKG lights running"
program = []
step_duration = lambda: (60.0 / float(ekg_bpm)) / float(len(program))
for level in LEVELS:
activations = [[MAP[c], 0, 30] for c in level]
for level2 in LEVELS:
if level2 != level:
activations += [[MAP[c], 0, 0] for c in level2]
on = [step_duration, activations]
#off = [0, [[MAP[c], 0, 0] for c in level]]
program.append(on)
program += list(reversed(program))[1:]
while True:
pwm.run_program(program, debug=False)
ekg_thread = threading.Thread(target=ekg_led_task)
ekg_thread.setDaemon(True)
ekg_thread.start()
sleep(0.001)
ekg_task()
def led_mindwave(args):
intensity_map = lambda x: max(0.0, float(x - 50) / (90.0 - 50.0))
program = []
for level in LEVELS:
on = [0, [[MAP[c], 0, 16] for c in level]]
#off = [0, [[MAP[c], 0, 0] for c in level]]
program.append(on)
while True:
value = sys.stdin.readline()
value = value.strip()
try:
value = int(value)
except:
continue
on = intensity_map(value)
step = min(len(program) - 1, max(0, int(round((len(program) - 1) * on))))
print "LIGHTS:", str(round(on * 100)) + "%", program[step:step+1]
pwm.set_off(None, 0)
pwm.run_program(program[:step+1])
def led_climb_test(args):
duration = 5.0
program = []
for level in LEVELS:
on = [duration / float(len(LEVELS)), [[MAP[c], 0, 3] for c in level]]
#off = [0, [[MAP[c], 0, 0] for c in level]]
program.append(on)
pwm.run_program(program, debug=False)
def led_map_assist(args):
duration = 5.0
program = []
for channel in range(CHANNELS):
program.append([duration, [[channel, 0, 10]]])
program.append([0, [[channel, 0, 0]]])
pwm.run_program(program, debug=False)
def led_portray(args):
HIGH = MAX_VALUE
LOW = int(HIGH * .333)
MEDIUM = int(HIGH * .666)
PULSE_INTERVAL = 2.0
RESOLUTION = max(1, int(MAX_VALUE * .01))
channel = ALL_CHANNELS
def generate_pulse(peak, pulse_interval=PULSE_INTERVAL, duration_at=None):
index = 0
pulse_program = []
while index <= peak:
if duration_at is None:
duration = pulse_interval / ((2. * peak + 1) / RESOLUTION)
else:
duration = duration_at(index)
value = index
program_step = [duration, [[MAP[channel], 0, index]]]
pulse_program.append(program_step)
index += RESOLUTION
return pulse_program + list(reversed(pulse_program))[1:]
#pulse_series_program = generate_pulse(LOW, 0.8)
#pulse_series_program += generate_pulse(MEDIUM, 1.0)
#pulse_series_program += generate_pulse(HIGH, 1.2)
pulse_series_program = generate_pulse(LOW, 1.6)
pulse_series_program += generate_pulse(MEDIUM, 2.0)
pulse_series_program += generate_pulse(HIGH, 2.5)
program = pulse_series_program + [[2, [[MAP[channel], 0, 0]]]]
program *= 4
pwm.run_program(program, debug=False)
def led_pulse(args):
amplitude = 8
PULSE_INTERVAL = 1.0
RESOLUTION = 1
def generate_pulse(peak, pulse_interval=PULSE_INTERVAL, duration_at=None):
index = 0
pulse_program = []
while index <= peak:
if duration_at is None:
duration = pulse_interval / ((2. * peak + 1) / RESOLUTION)
else:
duration = duration_at(index)
value = index
program_step = [duration, [[None, 0, index]]]
pulse_program.append(program_step)
index += RESOLUTION
return pulse_program + list(reversed(pulse_program))[1:]
pulse_series_program = generate_pulse(amplitude, 1.5)
program = pulse_series_program
while True:
pwm.run_program(program, debug=False)
def led_pulse_fast(args):
amplitude = 8
PULSE_INTERVAL = 0.5
RESOLUTION = 1
def generate_pulse(peak, pulse_interval=PULSE_INTERVAL, duration_at=None):
index = 0
pulse_program = []
while index <= peak:
if duration_at is None:
duration = pulse_interval / ((2. * peak + 1) / RESOLUTION)
else:
duration = duration_at(index)
value = index
program_step = [duration, [[None, 0, index]]]
pulse_program.append(program_step)
index += RESOLUTION
return pulse_program + list(reversed(pulse_program))[1:]
pulse_series_program = generate_pulse(amplitude, 0.5)
program = pulse_series_program
while True:
pwm.run_program(program, debug=False)
def led_proximity_simulation_sequenced(args):
HIGH = MAX_VALUE
LOW = int(HIGH * .10)
MEDIUM = int(HIGH * .25)
RAMP_INTERVAL = 1.0
RESOLUTION = max(1, int(MAX_VALUE * .01))
def generate_ramp(start, peak, ramp_interval=RAMP_INTERVAL, duration_at=None):
index = start
ramp_program = []
while index <= peak:
if duration_at is None:
duration = float(ramp_interval) / (((peak - start) + 1) / RESOLUTION)
else:
duration = duration_at(index)
value = index
program_step = [duration, [[ALL_CHANNELS, 0, index]]]
ramp_program.append(program_step)
index += RESOLUTION
return ramp_program
ramp_series_program = []
ramp_series_program += generate_ramp(0, LOW, 2.5)
ramp_series_program += generate_ramp(LOW+1, MEDIUM, 4)
ramp_series_program += list(reversed(generate_ramp(0, MEDIUM-1, 1.25)))
program = ramp_series_program
pwm.run_program(program, debug=False)
lowr = generate_ramp(0, 13, 1.25)
lowr += list(reversed(lowr))[1:]
#highr = generate_ramp(0, MEDIUM, 1.25)
#highr += list(reversed(highr))[1:]
#pwm.run_program(program, debug=False)
while True:
pwm.run_program(lowr, debug=False)
def led_proximity_simulation(args):
steps = MAX_VALUE
length = 2.66
program = [[(length / 2.0) / steps, [[c, 0, 0] for c in range(CHANNELS)]] for _ in range(steps)]
# compress the range of 0-full from N/16 to 1 scaled to steps
# 0 if step/steps < N/16
# (steps - (N/16)*steps (1 - N/16)
for channel in range(CHANNELS):
channel_scaled = channel / float(CHANNELS)
for step in range(steps):
step_scaled = step / float(steps - 1)
if step_scaled >= channel_scaled:
program[step][1][MAP[channel]] = MAP[channel], 0, int(round(MAX_VALUE * (step_scaled - channel_scaled) / (1 - channel_scaled)))
program += list(reversed(program))[1:]
pwm.run_program(program, debug=False)
def led_proximity(args):
# map: on = 1 - min(max(distance - 20, 0) / 130., 1.)
RAMP_INTERVAL = 0.0
RESOLUTION = int(MAX_VALUE * .15)
def generate_ramp(peak, ramp_interval=RAMP_INTERVAL, duration_at=None):
index = 0
ramp_program = []
while index <= peak:
if duration_at is None:
duration = ramp_interval / ((peak + 1) / RESOLUTION)
else:
duration = duration_at(index)
value = index
program_step = [duration, [[ALL_CHANNELS, 0, index]]]
ramp_program.append(program_step)
index += RESOLUTION
return ramp_program
program = generate_ramp(MAX_VALUE)
readings = []
median_readings = 15
while True:
#vcc = adc.read_single_ended(1, pga=6144, sps=128)
distance = adc.read_single_ended(0, pga=1024, sps=1600) * 1024 / 3300.
readings.append(distance)
if len(readings) == median_readings:
readings.sort()
distance = readings[median_readings/2]
readings = []
else:
continue
#print int(round(distance)), "cm" # , int(round(vcc)), "mv"
#if abs(distance2 - distance) > 40:
# print "[reject]"
# #distance = 10000
# continue
#else:
# distance = (distance + distance2) / 2.
# print
on = 1 - min(max(distance - 20, 0) / 77., 1.)
step = int(round((len(program) - 1) * on))
#print int(round(distance)), "cm" , round(on, 2)
pwm.run_program(program[step:step+1])
programs = {
"portray": led_portray,
"pulse": led_pulse,
"pulse_fast": led_pulse_fast,
"proximity_simulation": led_proximity_simulation_sequenced,
"proximity": led_proximity,
"map_assist": led_map_assist,
"climb_test": led_climb_test,
"mindwave": led_mindwave,
"ekg": led_ekg,
}
def run_program(name, args):
program_handler = programs.get(name, None)
if program_handler is None:
usage()
else:
pwm.reset()
try:
program = program_handler(args)
finally:
pwm.reset()
def usage():
print >>sys.stderr, "usage:", sys.argv[0], "<program>"
print >>sys.stderr, "\n".join(programs.keys())
if __name__ == '__main__':
if len(sys.argv) <= 1:
usage()
else:
args = sys.argv[1:]
name = args[0]
run_program(name, args[1:])
| [
"phillip.sitbon@intel.com"
] | phillip.sitbon@intel.com |
f2a547bd33931d78d49c2ff6df5e8ee253c7dca3 | 74a6cd43880120d2c87f948a1a552dfe0ebbbeda | /zara/urls.py | 39f865b932e8dff444a651005d2c2fd1ea3ee40c | [] | no_license | oscar84922tw/zara_web | b4a0ba3832976eb9f45f60575e0c4a164e2276e1 | d84f37a69e583bfae3c90186458e35c630896603 | refs/heads/master | 2021-01-13T15:48:43.897644 | 2017-01-02T14:26:06 | 2017-01-02T14:26:06 | 76,882,225 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,259 | py | """zara URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from female.views import main, design_under_production, _fashion, _fb, _google, _list, _product, _region, _shortage, _ship, _produce
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', main),
url(r'^design_under_production/$', design_under_production),
url(r'^fashion/$', _fashion),
url(r'^fb/$', _fb),
url(r'^google/$', _google),
url(r'^list/$', _list),
url(r'^product/$', _product),
url(r'^region/$', _region),
url(r'^shortage$', _shortage),
url(r'^ship$', _ship),
url(r'^produce$', _produce)
]
| [
"oscar84922tw@gmail.com"
] | oscar84922tw@gmail.com |
a1799316414f9d43f8aefc69a8c774811bcec9cf | d1990bd7fef37901a4b0807cb852badcf894a8cb | /algorithms/SimulatedAnnealing.py | a0a7bb27eb36c2e26902fada5fa1934cbbae82bf | [] | no_license | Jara555/Proteins | 69ff702aa7f58c99431674287c69638c09ffe754 | c9575c607e8d3d9588f5bcfdcfddcd80dc2e0f3a | refs/heads/master | 2020-03-10T06:30:25.326450 | 2018-05-28T15:15:35 | 2018-05-28T15:15:35 | 129,241,273 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,246 | py | import copy
import random
from algorithms.HillClimber import HillClimber
class SimulatedAnnealing(HillClimber):
""" Subclass of HillClimber algorithm:
Implements SimulatedAnnealing algorithm in order to efficiently fold a protein.
This algorithm tries to escape local maxima/minima by calculating a probability score
for the acceptance of degradations. Degradations can be in the form of overlap or
stability. Together with a temperature which is cooling down every iteration,
the probability scores for both degradations can be calculated."""
def __init__(self, protein, writeCsv, maxIterations, startPattern):
""" Set and initiate all properties.
:param protein: protein to be fold
:param writeCsv: writes solutions to .csv file when ON
:param maxIterations: stop after maxIterations
:param startPattern: start pattern of algorithm
"""
HillClimber.__init__(self, protein, writeCsv, maxIterations, startPattern)
self.name = "SimulatedAnnealing"
# set overlap tracker and maximal allowed overlap
self.trackOverlap = 0
self.maxOverlap = 50
# set starting temperature
self.temperature = 100
def coolDown(self):
""" Calculates the temperature for every iteration. The cooling down of
the temperature follows a linear pattern."""
self.temperature = self.temperature - (self.temperature - self.maxIterations)
def handleOverlap(self):
""" Overrides the standard method of the Hill climber class.
Instead: Does not set pattern back to temp pattern when overlap
is observed, accepts overlap based on a probability score """
# track overlap
self.trackOverlap += 1
# calculate probability score and random probability
probabilityScore = self.getOverlapProbability()
randomProbability = random.random()
# only accepts overlap if probability is higher than random
if probabilityScore < randomProbability:
# reset tracker
self.trackOverlap = 0
# go back to best pattern (without overlap)
self.foldPattern = copy.copy(self.bestPattern)
self.protein.fold(self.foldPattern)
def handleDegradation(self):
""" Overrides the standard method of the Hill climber class.
Instead: Does not set pattern back to temp pattern when a degradation in
stability is observed, but allows a degradation in stability for
maxDegrade times in a row """
# calculates probability score and random probability
probabilityScore = self.getStabilityProbability()
randomProbability = random.random()
# only accepts overlap if probability is higher than random
if probabilityScore < randomProbability:
# reset tracker
self.trackOverlap = 0
# go back to best pattern (without overlap)
self.foldPattern = copy.copy(self.bestPattern)
self.protein.fold(self.foldPattern)
def getOverlapProbability(self):
""" Calculates the probability of acceptance, based on an overlap score and
the current temperature. The overlap score depends on the amount of overlap
tracked in a row and the maximal allowed overlap.
:return: probability of acceptance """
# score of overlap tracker and max allowed overlap
score = (self.maxOverlap - self.trackOverlap) / self.maxOverlap
# probability is based on overlap score and temp
return (score * self.temperature) / 100
def getStabilityProbability(self):
""" Calculates the probability of acceptance, based on an stability score and
the current temperature. The stability score depends on the difference between
the new stability and the best found stability.
:return: probability of acceptance """
# score of difference in best stability and current stability score
score = 1 - ((self.bestStability - self.protein.stabilityScore)
/ self.bestStability)
# probability is based on stability score and temp
return (score * self.temperature) / 100
| [
"jaralinders@gmail.com"
] | jaralinders@gmail.com |
91b1725adfaa4f3636377b6571089cf7925ad856 | 05e634a232574f676434dfa8e4183f3d0a1a4bc9 | /tutorials/pp-series/HRNet-Keypoint/lib/metrics/json_results.py | 9e0ceea69b6c0e57ed0f0224ad12a02078870de0 | [
"Apache-2.0"
] | permissive | PaddlePaddle/models | 67ac00d93c5255ac64a9d80ae5be2e8927e47cee | 8042c21b690ffc0162095e749a41b94dd38732da | refs/heads/release/2.4 | 2023-09-04T15:23:59.543625 | 2023-07-20T11:54:16 | 2023-07-20T11:54:16 | 88,868,842 | 7,633 | 3,597 | Apache-2.0 | 2023-09-05T23:23:54 | 2017-04-20T13:30:15 | Python | UTF-8 | Python | false | false | 5,121 | py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import numpy as np
__all__ = [
'get_det_res', 'get_det_poly_res', 'get_seg_res', 'get_solov2_segm_res',
'get_keypoint_res'
]
def get_det_res(bboxes, bbox_nums, image_id, label_to_cat_id_map, bias=0):
det_res = []
k = 0
for i in range(len(bbox_nums)):
cur_image_id = int(image_id[i][0])
det_nums = bbox_nums[i]
for j in range(det_nums):
dt = bboxes[k]
k = k + 1
num_id, score, xmin, ymin, xmax, ymax = dt.tolist()
if int(num_id) < 0:
continue
category_id = label_to_cat_id_map[int(num_id)]
w = xmax - xmin + bias
h = ymax - ymin + bias
bbox = [xmin, ymin, w, h]
dt_res = {
'image_id': cur_image_id,
'category_id': category_id,
'bbox': bbox,
'score': score
}
det_res.append(dt_res)
return det_res
def get_det_poly_res(bboxes, bbox_nums, image_id, label_to_cat_id_map, bias=0):
det_res = []
k = 0
for i in range(len(bbox_nums)):
cur_image_id = int(image_id[i][0])
det_nums = bbox_nums[i]
for j in range(det_nums):
dt = bboxes[k]
k = k + 1
num_id, score, x1, y1, x2, y2, x3, y3, x4, y4 = dt.tolist()
if int(num_id) < 0:
continue
category_id = label_to_cat_id_map[int(num_id)]
rbox = [x1, y1, x2, y2, x3, y3, x4, y4]
dt_res = {
'image_id': cur_image_id,
'category_id': category_id,
'bbox': rbox,
'score': score
}
det_res.append(dt_res)
return det_res
def get_seg_res(masks, bboxes, mask_nums, image_id, label_to_cat_id_map):
import pycocotools.mask as mask_util
seg_res = []
k = 0
for i in range(len(mask_nums)):
cur_image_id = int(image_id[i][0])
det_nums = mask_nums[i]
for j in range(det_nums):
mask = masks[k].astype(np.uint8)
score = float(bboxes[k][1])
label = int(bboxes[k][0])
k = k + 1
if label == -1:
continue
cat_id = label_to_cat_id_map[label]
rle = mask_util.encode(
np.array(
mask[:, :, None], order="F", dtype="uint8"))[0]
if six.PY3:
if 'counts' in rle:
rle['counts'] = rle['counts'].decode("utf8")
sg_res = {
'image_id': cur_image_id,
'category_id': cat_id,
'segmentation': rle,
'score': score
}
seg_res.append(sg_res)
return seg_res
def get_solov2_segm_res(results, image_id, num_id_to_cat_id_map):
import pycocotools.mask as mask_util
segm_res = []
# for each batch
segms = results['segm'].astype(np.uint8)
clsid_labels = results['cate_label']
clsid_scores = results['cate_score']
lengths = segms.shape[0]
im_id = int(image_id[0][0])
if lengths == 0 or segms is None:
return None
# for each sample
for i in range(lengths - 1):
clsid = int(clsid_labels[i])
catid = num_id_to_cat_id_map[clsid]
score = float(clsid_scores[i])
mask = segms[i]
segm = mask_util.encode(np.array(mask[:, :, np.newaxis], order='F'))[0]
segm['counts'] = segm['counts'].decode('utf8')
coco_res = {
'image_id': im_id,
'category_id': catid,
'segmentation': segm,
'score': score
}
segm_res.append(coco_res)
return segm_res
def get_keypoint_res(results, im_id):
anns = []
preds = results['keypoint']
for idx in range(im_id.shape[0]):
image_id = im_id[idx].item()
kpts, scores = preds[idx]
for kpt, score in zip(kpts, scores):
kpt = kpt.flatten()
ann = {
'image_id': image_id,
'category_id': 1, # XXX hard code
'keypoints': kpt.tolist(),
'score': float(score)
}
x = kpt[0::3]
y = kpt[1::3]
x0, x1, y0, y1 = np.min(x).item(), np.max(x).item(), np.min(
y).item(), np.max(y).item()
ann['area'] = (x1 - x0) * (y1 - y0)
ann['bbox'] = [x0, y0, x1 - x0, y1 - y0]
anns.append(ann)
return anns
| [
"noreply@github.com"
] | PaddlePaddle.noreply@github.com |
b70bea9fac76d8a55ec92e8c4c7cd3d8d9f7457f | be6baceaf8ffbee463da7a6c919b23a05324b38c | /scripts/training/tests_runner.py | b09039f24e54b64f34b9fb7c6aa69a7d56fc6456 | [] | no_license | dambac/NTU | dc08c7aced94997137bdbab78dfe642be1c206f8 | 693919e7e31e4eab0865f277889e754cd0ae65e1 | refs/heads/master | 2023-07-27T19:10:47.676602 | 2021-09-05T19:28:12 | 2021-09-05T19:28:12 | 403,395,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,266 | py | import json
from scripts.definitions.models.model_creator import create_model
from scripts.definitions.params import RunParams, get_t_model_params, Combination
from scripts.definitions.results import DatasetResults, IterationResults, RunResults, CombinationResults
from scripts.definitions.models.optimizers import create_optimizer
from scripts.prepare_data_for_training.samples.convert_to_full_samples import samples_to_full_samples
from scripts.training.fitter import FitInput, Fitter, FitOutput
from scripts.prepare_data_for_training.samples.samples_v2 import *
from scripts.utils.constants import C
from scripts.utils.serialization import read_json
import time
class FullSamplesCache:
def __init__(self):
self.samples_by_t_model = {}
def has_for_t_model(self, t_model_name):
return t_model_name in self.samples_by_t_model.keys()
def get_for_t_model(self, t_model_name) -> List[FullSampleV2]:
return self.samples_by_t_model[t_model_name]
def store_for_t_model(self, t_model_name, samples_and_ids):
self.samples_by_t_model[t_model_name] = samples_and_ids
class TestRunner:
def __init__(self, run_params: RunParams):
self.run_params = run_params
self.train_params = run_params.train_params
self.dataset_params = run_params.dataset_params
self.samples = self._get_samples()
labels = [s.label for s in self.samples]
self.labels_size = len(set(labels))
self.full_samples_cache = FullSamplesCache()
self.output_dir = f"{C.RESULTS_DIR}/{run_params.output_dir}/{run_params.name}"
Path(self.output_dir).mkdir(exist_ok=True, parents=True)
self.iterations = run_params.iterations
self.combinations = self._prepare_all_combinations()
def run(self):
print(f"Running: {self.run_params.name}")
combinations_results = []
for c in self.combinations:
combinations_results.append(self._run_combination(c))
results = RunResults(self.run_params, combinations_results)
results.to_json_dict()
with open(f"{self.output_dir}/results.json", "w") as file:
json.dump(results.to_json_dict(), file, indent=2)
return results
def _get_samples(self):
if self.dataset_params.samples_file_name:
samples = SampleV2.from_csv(self.dataset_params.samples_file_name)
else:
raise Exception("No samples file")
# if self.dataset_params.samples_limit:
# samples = get_n_random(samples, self.dataset_params.samples_limit)
return samples
def _run_combination(self, combination):
"""
Create output dir for combination
"""
combination_dir = f"{self.output_dir}/{combination.model}-{combination.t_model}"
Path(combination_dir).mkdir(exist_ok=True)
"""
Create samples for current t_model
"""
model = combination.model
t_model_params = get_t_model_params(combination.t_model)
samples: List[FullSampleV2]
samples, samples_ids = self._get_full_samples_for_combination(combination)
# if self.dataset_params.sets_absolutes:
# ids_subsets = split_into_sets_absolute(samples_ids, self.dataset_params.sets_absolutes)
# else:
# ids_subsets = split_into_sets(samples_ids, self.dataset_params.sets_ratios)
ids_subsets = self._get_samples_subsets_ids()
z = 1
"""
Save labels distributions
"""
dataset_results = self._prepare_dataset_results(samples, ids_subsets)
"""
Run iterations
"""
iterations_results = []
for it in range(self.iterations):
print(f"Running iteration: {it}/{self.iterations - 1}")
"""
Setup
"""
epochs = self.train_params.epochs
batch_size = self.train_params.batch_size
train_ds = SamplesDatasetV2(samples, ids_subsets[0])
valid_ds = SamplesDatasetV2(samples, ids_subsets[1])
test_ds = SamplesDatasetV2(samples, ids_subsets[2])
train_loader = torch.utils.data.DataLoader(train_ds, batch_size=batch_size, shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_ds, batch_size=batch_size, shuffle=False)
test_loader = torch.utils.data.DataLoader(test_ds, batch_size=batch_size, shuffle=False)
nn = create_model(model, t_model_params.output_size)
opt = create_optimizer(self.run_params.optimizer, nn)
if self.dataset_params.labels_weights:
weights = torch.FloatTensor(self.dataset_params.labels_weights).to('cuda')
loss = torch.nn.CrossEntropyLoss(weights)
else:
label_0_size = len([s for s in samples if s.label == 0])
label_1_size = len([s for s in samples if s.label == 1])
max_label_size = max(label_0_size, label_1_size)
labels_weights = [max_label_size / label_0_size,
max_label_size / label_1_size]
weights = torch.FloatTensor(labels_weights).to('cuda')
loss = torch.nn.CrossEntropyLoss(weights)
fit_input = FitInput(
nn=nn,
optim=opt,
train_loss=loss,
dev_loss=loss,
test_loss=loss,
train_ds=train_ds,
valid_ds=valid_ds,
train_loader=train_loader,
valid_loader=valid_loader,
test_loader=test_loader,
epochs=epochs,
labels_size=self.labels_size
)
"""
Fit nn
"""
start = time.time()
fit_output: FitOutput = Fitter(fit_input).fit()
stop = time.time()
execution_time = stop - start
"""
Save best model
"""
best_model = fit_output.best_model
best_model_file = f"{combination_dir}/best_model_{it}.pt"
torch.save(best_model, best_model_file)
"""
Save iterations results
"""
fit_results = fit_output.fit_results
iteration_results = IterationResults(it, fit_results, best_model_file, execution_time)
iterations_results.append(iteration_results)
return CombinationResults(combination, dataset_results, iterations_results)
def _get_samples_subsets_ids(self):
samples_split_name = self.dataset_params.samples_split
samples_split = read_json(f"{C.DistributionsAndSets.SETS_SPLITS_SAMPLES}/{samples_split_name}.json")
subsets = []
for set_name, sample_ids in samples_split.items():
subsets.append(sample_ids)
return subsets
def _get_full_samples_for_combination(self, combination):
t_model_name = combination.t_model
t_model_params = get_t_model_params(t_model_name)
if self.full_samples_cache.has_for_t_model(t_model_name):
samples, samples_ids = self.full_samples_cache.get_for_t_model(t_model_name)
else:
samples, samples_ids = samples_to_full_samples(self.samples, t_model_params.views_path)
self.full_samples_cache.store_for_t_model(t_model_name, (samples, samples_ids))
return samples, samples_ids
def _prepare_dataset_results(self, samples: List[FullSampleV2], ids_subsets) -> DatasetResults:
labels_count = len(samples)
labels_distribution = self._count_number_of_samples_per_label(samples)
train_labels = ids_subsets[0]
train_labels_count, train_labels_distribution = self._get_subset_count_and_dist(samples, train_labels)
valid_labels = ids_subsets[1]
valid_labels_count, valid_labels_distribution = self._get_subset_count_and_dist(samples, valid_labels)
test_labels = ids_subsets[2]
test_labels_count, test_labels_distribution = self._get_subset_count_and_dist(samples, test_labels)
return DatasetResults(
labels_count, labels_distribution,
train_labels, train_labels_count, train_labels_distribution,
valid_labels, valid_labels_count, valid_labels_distribution,
test_labels, test_labels_count, test_labels_distribution
)
def _prepare_all_combinations(self):
combinations = []
for model in self.run_params.models:
for t_model in self.run_params.t_models:
combinations.append(Combination(model, t_model))
return combinations
def _get_subset_count_and_dist(self, samples: List[FullSampleV2], subset_ids):
subset_samples = [s for s in samples if s.id in subset_ids]
return len(subset_samples), self._count_number_of_samples_per_label(subset_samples)
def _count_number_of_samples_per_label(self, samples: List[FullSampleV2]):
labels_dict = {}
for s in samples:
label = s.label.item()
if label not in labels_dict:
labels_dict[label] = 0
labels_dict[label] = labels_dict[label] + 1
return labels_dict
| [
"damian.baciur@tink.se"
] | damian.baciur@tink.se |
c22c2d6937f2f8e7d0605c8690d553ce6add5b2e | 2aac13d0048f12ac877af92a93f73c4ef1311d6e | /mrchunks/process.py | 49a1ec417764052f224f2e231e044e3ae6be2ef8 | [] | no_license | victorpoluceno/mrchunks | 18250e2bf0be375de48e01b2a42976285d556e85 | 8328ed3d836144ccc563b135d78f59e50ff4104b | refs/heads/master | 2021-01-15T22:18:39.091832 | 2015-11-22T23:00:53 | 2015-11-22T23:00:53 | 32,928,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,760 | py | import zmq
from mrchunks.concurrent import Engine, switch
from mrchunks.mailbox import Mailbox
from mrchunks.serializer import decode, encode
class Server:
def __init__(self):
self._context = zmq.Context()
def __call__(self, *args, **kwargs):
self.listen(kwargs['address'])
while True:
envelop = self.get()
self.send(envelop)
def get(self):
while True:
socks = dict(self._poller.poll(100))
if socks:
if socks.get(self._socket) != zmq.POLLIN:
switch()
continue
data = self._socket.recv()
# FIXME may be we need to ack after sendo ipc socket
self._socket.send(b"OK+")
break
else:
switch()
return decode(data)
def send(self, envelop):
sender, recipient, message = envelop
_, _, p = recipient
socket = self._context.socket(zmq.REQ)
socket.connect("ipc:///tmp/%d" % (p,))
socket.send(encode(envelop), zmq.NOBLOCK)
def listen(self, address):
self._socket = self._context.socket(zmq.REP)
address, port = address
self._socket.bind("tcp://*:%s" % (port,))
self._poller = zmq.Poller()
self._poller.register(self._socket, zmq.POLLIN)
class Arbiter:
def __init__(self, address, number_of_workers=1):
self._next_pid = 0
self._address = address
self._engine = Engine(number_of_workers)
self._listen()
def _get_next_pid(self):
pid = self._next_pid
self._next_pid += 1
return self._address + (pid,)
def _listen(self):
server = Server()
self._engine.apply(server, address=self._address)
def spawn(self, start, *args, **kwargs):
pid = self._get_next_pid()
process = Process(pid, start)
self._engine.apply(process, *args, **kwargs)
return pid
def run(self, forever=True):
self._engine.run(forever)
def get_arbiter(*args, **kwargs):
return Arbiter(*args, **kwargs)
class Process(object):
def __init__(self, pid, start):
self.pid = pid
self._start = start
def __call__(self, *args, **kwargs):
self._mailbox = Mailbox()
self._mailbox.run(self.pid)
self._start(self, *args, **kwargs)
def send(self, recipient, message):
print('Sending message: {} from: {} to: {}'.format(message, self.pid,
recipient))
self._mailbox.send(recipient, message)
def receive(self):
print('Receiving...')
envelop = self._mailbox.receive()
return envelop
| [
"victorpoluceno@gmail.com"
] | victorpoluceno@gmail.com |
5bcb5224d286df6b18619cd81e4a38ee67d7c03a | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/django-1.2/tests/modeltests/user_commands/__init__.py | f026fbc3a9ca470bd92710829f6f0b97924d31b1 | [] | no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | /home/action/.parts/packages/googleappengine/1.9.4/lib/django-1.2/tests/modeltests/user_commands/__init__.py | [
"ron.y.kagan@gmail.com"
] | ron.y.kagan@gmail.com |
4777b4a79dd2980f4990197243427c927d3e3cbb | d55a83245f0635959694597c65e2391c6f0ce179 | /57.py | b43f168e7c5906b906c64d9ffaf24cbfd0646aaa | [] | no_license | EmilHernvall/projecteuler | 2cc41492054dba2a88e22cc9b804b6b55c6be54b | e56a4514b8758373c5d83f50cccbc93717cd3f3f | refs/heads/master | 2021-01-21T04:25:00.967621 | 2016-08-10T14:53:47 | 2016-08-10T14:53:47 | 1,697,265 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | a = 1
b = 2
count = 0
for i in range(1, 1000):
# print "1 + " + str(a) + "/" + str(b) + "=" + str(b+a) + "/" + str(b)
if len(str(a+b)) > len(str(b)):
count += 1
# print "\tHeureka"
newA = b
newB = 2*b + a
a = newA
b = newB
print "count: " + str(count)
| [
"emil@quench.at"
] | emil@quench.at |
530ae96e854fca34aa8899b13ba869d5d6b1f658 | 019fd2c29b8239d7b0a3906cfbdddfd440362417 | /asset/google/cloud/asset_v1beta1/gapic/asset_service_client_config.py | 340e89de38b2510f4f5c219239170706bfdfdc83 | [
"Apache-2.0"
] | permissive | tswast/google-cloud-python | 1334d26cdb994293f307d889251d7daef5fcb826 | d897d56bce03d1fda98b79afb08264e51d46c421 | refs/heads/master | 2021-06-10T17:40:06.968584 | 2020-01-11T17:41:29 | 2020-01-11T17:41:29 | 58,775,221 | 1 | 1 | Apache-2.0 | 2019-04-10T17:09:46 | 2016-05-13T22:06:37 | Python | UTF-8 | Python | false | false | 1,179 | py | config = {
"interfaces": {
"google.cloud.asset.v1beta1.AssetService": {
"retry_codes": {
"idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"],
"non_idempotent": [],
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 100,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 20000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 20000,
"total_timeout_millis": 600000,
}
},
"methods": {
"ExportAssets": {
"timeout_millis": 600000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"BatchGetAssetsHistory": {
"timeout_millis": 600000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
},
}
}
}
| [
"noreply@github.com"
] | tswast.noreply@github.com |
05aa84a57259db94136094b6e59f0d92bc22ce49 | 9a0dcf0d7d932c73dcc2842f0efa6a2ee2e6bf69 | /core/feature_exraction/example_2/gen_feature.py | 77531a9be64e2d3f872f99da86f33c5f96c4a56b | [
"BSD-2-Clause"
] | permissive | kabengka/dds-avec2019 | 89e248017552f4bef4dd881b4959a285997902f8 | 9a0ee86bddf6c23460a689bde8d75302f1d5aa45 | refs/heads/master | 2023-04-29T00:47:48.848958 | 2021-05-28T13:45:03 | 2021-05-28T13:45:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,254 | py | """
Use the feature originally in COVAREP and FORMANT
Extract some statistical features of them;
Audio feature with statistical feature are 547.
"""
import itertools
import pandas as pd
from concurrent.futures import ThreadPoolExecutor, as_completed
from common.sql_handler import SqlHandler
from common.stats_features import StatsFea
from common.log_handler import get_logger
from global_values import *
import config
logger = get_logger()
stats_fea = StatsFea()
def gen_sigle_fea(fold):
fea_item = list()
fea_item.append(fold[:-1])
path = f"{config.data_dir}avec/{fold}P/{fold}{SUFFIX['covarep']}"
covarep = pd.read_csv(path, header=None)
covarep.columns = COVAREP_COLUMNS
path = f"{config.data_dir}avec/{fold}P/{fold}{SUFFIX['formant']}"
formant = pd.read_csv(path, header=None)
formant.columns = FORMANT_COLUMNS
covarep = covarep[covarep['VUV'] == 1]
for fea in COVAREP_COLUMNS:
if fea is 'VUV':
continue
else:
fea_item += stats_fea.gen_fea(covarep[fea].values)
for fea in FORMANT_COLUMNS:
fea_item += stats_fea.gen_fea(formant[fea].values)
logger.info(f'{fold} has been extrated audio feature in exp2!..')
return fea_item
def gen_fea():
sql_handler = SqlHandler()
audio_value = list()
with ThreadPoolExecutor(max_workers=30) as executor:
task = [executor.submit(gen_sigle_fea, fold) for fold in PREFIX]
for future in as_completed(task):
try:
fea_item = future.result()
audio_value.append(fea_item)
except:
continue
COVAREP_COLUMNS.remove('VUV')
audio_fea = list()
audio_fea.append('ID')
COVAREP_COLUMNS.extend(FORMANT_COLUMNS)
for a_fea, s_fea in itertools.product(COVAREP_COLUMNS, stats_fea.columns):
audio_fea.append(a_fea + '_' + s_fea)
assert len(audio_value[0]) == len(audio_fea)
audio_df = pd.DataFrame(audio_value, columns=audio_fea)
sql_handler.execute(f'drop table if exists {config.tbl_exp2_audio_fea};')
sql_handler.df_to_db(audio_df, config.tbl_exp2_audio_fea)
logger.info('audio feature exp2 has been stored!')
| [
"yangzhikai@cug.edu.cn"
] | yangzhikai@cug.edu.cn |
4760ffe77c6c60d83d7a8fba574b4b2058d34dac | 8ed3e03099807de1a7c35f8ae0b6b50ec204bd6f | /hubstorage/itemwriter.py | 78a57c934a5c930aff93dd8a7dcf75dda6f58b37 | [] | no_license | omab/python-hubstorage | 8dd96c759e0fcd3c18cad7fbec38458edb589da7 | e7dd64c6b60bd569a0ff191661e19e913d71bbfd | refs/heads/master | 2021-01-17T23:10:30.998014 | 2012-12-24T17:45:47 | 2012-12-24T17:45:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,974 | py | import atexit, warnings, socket
import json, time, logging, requests
from Queue import Queue, Empty
from threading import Thread
logger = logging.getLogger('hubstorage')
class ItemWriter(object):
chunk_size = 1000
retry_wait_time = 5.0
StopThread = object()
def __init__(self, client, url):
self.client = client
self.url = url
self.queue = Queue(self.chunk_size)
self.thread = Thread(target=self._worker)
self.thread.daemon = True
self.thread.start()
self.closed = False
atexit.register(self._atexit)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def __del__(self):
if not self.closed:
self.close()
def __repr__(self):
return "ItemWriter(%r)" % self.url
def write_item(self, item):
jsonitem = json.dumps(item)
self.write_json_item(jsonitem)
def write_json_item(self, jsonitem):
if self.closed:
raise RuntimeError("ItemWriter already closed")
self.queue.put(jsonitem)
def close(self):
self.closed = True
self.queue.put(self.StopThread)
self.thread.join()
def _worker(self):
offset = 0
closing = False
while not closing:
item = self.queue.get()
if item is self.StopThread:
break
items = [item]
try:
for _ in xrange(self.chunk_size-1):
try:
item = self.queue.get_nowait()
if item is self.StopThread:
closing = True
break
items.append(item)
except Empty:
break
while True:
try:
self._upload_items(items, offset)
break
except (socket.error, requests.RequestException) as e:
if isinstance(e, requests.HTTPError):
r = e.response
msg = "[HTTP error %d] %s" % (r.status_code, r.text.rstrip())
else:
msg = str(e)
logger.warning("Failed writing data %s: %s", self.url, msg)
time.sleep(self.retry_wait_time)
finally:
for _ in items:
self.queue.task_done()
offset += len(items)
def _upload_items(self, items, offset):
data = "\n".join(items)
url = self.url + "?start=%d" % offset
r = requests.post(url, data=data, prefetch=True, auth=self.client.auth)
r.raise_for_status()
def _atexit(self):
if not self.closed:
warnings.warn("%r not closed properly, some items may have been lost!" % self)
| [
"dangra@gmail.com"
] | dangra@gmail.com |
c87cb5850140af866b559c1a8b9171c8536ddee4 | 5e8132f0c518f8f7e0c7f6424d12f7f4604d4d7a | /map-change.py | d67d62f4cb08fdce18dbe462d5527cccda7eab17 | [] | no_license | ajaymenon18/Mapping-using-Folium | 73ce7f38e64465b639bb3352e2730a77f2300336 | 77b6f523ba19f219cbe8aa59a91d4ad1feb19402 | refs/heads/master | 2023-08-05T09:43:39.479190 | 2021-09-20T05:50:55 | 2021-09-20T05:50:55 | 393,925,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | import folium
import pandas
data = pandas.read_csv("mapping/Volcanoes.txt")
lat = list(data["LAT"])
lon = list(data["LON"])
elev = list(data["ELEV"])
name = list(data["NAME"])
html = """
Volcano name:<br>
<a href="https://www.google.com/search?q=%%22%s%%22" target="_blank">%s</a><br>
Height: %s m
"""
map = folium.Map(location=[38.58, -99.09], zoom_start=5, tiles="Stamen Terrain")
fg = folium.FeatureGroup(name = "My Map")
for lt, ln, el, name in zip(lat, lon, elev, name):
iframe = folium.IFrame(html=html % (name, name, el), width=200, height=100)
fg.add_child(folium.Marker(location=[lt, ln], popup=folium.Popup(iframe), icon = folium.Icon(color = "green")))
map.add_child(fg)
map.save("Map_html_popup_advanced.html") | [
"ajaymenon17@gmail.com"
] | ajaymenon17@gmail.com |
91c46ed6861438fb001bf94fe1fa600cd41ad2c9 | 423f9cbe3b39e431b7eca2ba6ad15b2fd70ef56b | /EditDistance.py | d1cb6dc2cf6a49df28a308e6019e0e55bb7329c4 | [] | no_license | SerChirag/Dynamic-Programming | 8f7e6f23fd76c8d99fb8eb23b4324e1eb8e5b790 | 672bf3cb726cea302ce72ad7183d7f684b2788f0 | refs/heads/master | 2021-10-10T17:29:16.346125 | 2019-01-14T18:00:59 | 2019-01-14T18:00:59 | 115,982,525 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | def edit(str1,str2):
edit = [[0 for j in range(len(str2)+1)] for i in range(len(str1)+1)]
count = 0
for i in range(1,len(str1)+1):
for j in range(1,len(str2)+1):
if(str1[i-1] == str2[j-1]):
edit[i][j] = edit[i-1][j-1] + 1
else:
edit[i][j] = max(edit[i][j-1],edit[i-1][j])
return edit[len(str1)][len(str2)]
str1 = "ABCDGH"
str2 = "AEDFHR"
print edit(str1,str2)
| [
"you@example.com"
] | you@example.com |
9947c254c93bcc92b396bff46d6f0321e70fe555 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adverbs/_bleakly.py | 28b608df7c1220eda261a778a410c245be7da9ea | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 434 | py |
#calss header
class _BLEAKLY():
def __init__(self,):
self.name = "BLEAKLY"
self.definitions = [u'in a way that suggests a lack of hope: ', u'in a way that is cold and empty: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adverbs'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
4ea106fa0d3d20f24ebd2e1aff243ba8a7d62ae0 | 4735d59cc76bfe88e1f46ee6f8d668e7a9fc71ba | /user_data.py | f2ff0b599efe80564d04fd77d017319f25699909 | [
"MIT"
] | permissive | Rreuben/password | f120bee3463f6d03bdeec7be46eb6f39a7cf5e11 | b226801c7efdda2369c7008e768210215ecfe945 | refs/heads/master | 2020-03-27T04:56:38.746213 | 2018-08-27T12:18:42 | 2018-08-27T12:18:42 | 145,980,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 701 | py | '''Module containing the user data'''
class User:
'''
Class that will create accounts for users
'''
user_accounts = []
def __init__(self, master_password):
'''
Initializes the class
'''
self.master_password = master_password
def save_account(self):
'''
Saves new master password to user_accounts list
'''
User.user_accounts.append(self)
@classmethod
def existing_user(cls, master_password):
'''
Authentication process
'''
for user in User.user_accounts:
if user.master_password == master_password:
return user
return False | [
"jreuben07@gmail.com"
] | jreuben07@gmail.com |
6a7d6dda50500b1028cdea7eaded2287bf62c2c7 | cf8e6539b0d7b4a8849a5f08fcac3d928cfbf0d7 | /Find Output/OP6.py | acb2243e19e1b80ecf96559b2be2f3b93dba6354 | [] | no_license | sairaj225/Python | cb7aabf89580307da657adcc1675a46aa15e6a6a | ef429b2f068ff058b0d7fa0637515511857a6b91 | refs/heads/master | 2023-01-19T11:41:10.279543 | 2020-11-27T10:20:56 | 2020-11-27T10:20:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28 | py | x = 1_2_3
y = 4
print(x*y) | [
"saiking131@gmail.com"
] | saiking131@gmail.com |
79f2687bc8f4a9add7c0fbbba2af25d1ce45be2a | 4fcb2e797ba83b310fe05461d48f02931ea5a427 | /2017/day-19/solution.py | b22d5be009cad3f1c42f831b1e093a846f34a4d9 | [] | no_license | BrentChesny/AdventOfCode | 5a642d081505563f7518c5244bb814e9e4dfc5de | dad5224961539149bed5757bbae0ccc35a3a293d | refs/heads/master | 2022-12-11T19:51:22.138655 | 2022-12-04T21:46:29 | 2022-12-04T21:46:29 | 47,266,210 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,463 | py | def parse_input():
return [list(line.strip('\n')) for line in open('input.txt').readlines()]
def solve_part_one():
grid = parse_input()
pos_r, pos_c = 0, grid[0].index('|')
dir_r, dir_c = 1, 0
result = ''
while grid[pos_r+dir_r][pos_c+dir_c] != ' ':
pos_r, pos_c = pos_r + dir_r, pos_c + dir_c
if grid[pos_r][pos_c].isupper():
result += grid[pos_r][pos_c]
if grid[pos_r][pos_c] == '+':
dir_r, dir_c = find_new_direction(grid, (pos_r, pos_c), (dir_r, dir_c))
return result
def solve_part_two():
grid = parse_input()
pos_r, pos_c = 0, grid[0].index('|')
dir_r, dir_c = 1, 0
result = 1
while grid[pos_r+dir_r][pos_c+dir_c] != ' ':
result += 1
pos_r, pos_c = pos_r + dir_r, pos_c + dir_c
if grid[pos_r][pos_c] == '+':
dir_r, dir_c = find_new_direction(grid, (pos_r, pos_c), (dir_r, dir_c))
return result
def find_new_direction(grid, pos, old_dir):
pos_r, pos_c = pos
if grid[pos_r-1][pos_c] == '|' and old_dir != (1, 0):
return -1, 0
if grid[pos_r+1][pos_c] == '|' and old_dir != (-1, 0):
return 1, 0
if grid[pos_r][pos_c-1] == '-' and old_dir != (0, 1):
return 0, -1
if grid[pos_r][pos_c+1] == '-' and old_dir != (0, -1):
return 0, 1
def main():
print 'Part one: ', solve_part_one()
print 'Part two: ', solve_part_two()
if __name__ == '__main__':
main()
| [
"brent.chesny@gmail.com"
] | brent.chesny@gmail.com |
128d601cae05a0f318b0a90ac3ac53d97636fa48 | e0980f704a573894350e285f66f4cf390837238e | /.history/home/models_20201026174905.py | 36accded34a74330fba536c865386a721c5957a3 | [] | no_license | rucpata/WagtailWebsite | 28008474ec779d12ef43bceb61827168274a8b61 | 5aa44f51592f49c9a708fc5515ad877c6a29dfd9 | refs/heads/main | 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,730 | py | from django.db import models
from wagtail.core.models import Page
from wagtail.core.fields import StreamField
from wagtail.admin.edit_handlers import FieldPanel, PageChooserPanel, StreamFieldPanel
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.snippets.blocks import SnippetChooserBlock
from streams import blocks
class HomePage(Page):
lead_text = models.CharField(
max_length = 140,
blank = True,
help_text = 'Podtytuł pod tytułem banera'
)
button = models.ForeignKey(
'wagtailcore.Page',
blank = True,
null = True,
related_name = '+',
help_text = 'Wybierz opcjonalną stronę, do której chcesz utworzyć łącze',
on_delete = models.SET_NULL,
)
button_text = models.CharField(
max_length = 50,
default = 'Czytaj więcej',
blank = False,
help_text = 'Przycisk tekstowy'
)
banner_background_image = models.ForeignKey(
'wagtailimages.Image',
blank = False,
null =True,
related_name = '+',
help_text = 'Obraz tła baneru',
on_delete = models.SET_NULL,
)
body = StreamField([
('title', blocks.TitleBlock()),
('cards', blocks.CardsBlock()),
('image_and_text', blocks.ImageAndTextBlock()),
('cta', blocks.CallToActionBlock()),
('testimonial', SnippetChooserBlock(target_model='')),
], null=True, blank=True)
content_panels = Page.content_panels + [
FieldPanel('lead_text'),
PageChooserPanel('button'),
FieldPanel('button_text'),
ImageChooserPanel('banner_background_image'),
StreamFieldPanel('body'),
]
| [
"rucinska.patrycja@gmail.com"
] | rucinska.patrycja@gmail.com |
77d4308ef4f478de26c6fc394155d3854b2ea2a6 | 058e5be6d77df6448197708c1b12fd5aca5616e1 | /scripts/dualtor_neighbor_check.py | 161177008a20df1c0e6dcb6bdbda625234238344 | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | Junchao-Mellanox/sonic-utilities | d958c8699032cc01155802c29d7174e3fb79c2d8 | 8cb7320e4b9b364da110b7b737eeaf991665b300 | refs/heads/master | 2023-08-31T21:00:17.622141 | 2023-08-16T22:27:23 | 2023-08-16T22:27:23 | 242,450,146 | 0 | 0 | NOASSERTION | 2023-08-31T08:37:57 | 2020-02-23T03:30:43 | Python | UTF-8 | Python | false | false | 18,441 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
dualtor_neighbor_check.py
This tool is designed to verify that, for dualtor SONiC, the neighbors learnt from
mux ports should have correct neighbor/route entry in ASIC.
"""
import argparse
import enum
import functools
import ipaddress
import json
import logging
import shlex
import sys
import syslog
import subprocess
import tabulate
from natsort import natsorted
from swsscommon import swsscommon
from sonic_py_common import daemon_base
try:
from swsssdk import port_util
except ImportError:
from sonic_py_common import port_util
DB_READ_SCRIPT = """
-- this script is to read required tables from db:
-- APPL_DB:
-- - MUX_CABLE_TABLE
-- - HW_MUX_CABLE_TABLE
-- - NEIGH_TABLE
-- ASIC_DB:
-- - ASIC_STATE
--
-- KEYS - None
-- ARGV[1] - APPL_DB db index
-- ARGV[2] - APPL_DB separator
-- ARGV[3] - APPL_DB neighbor table name
-- ARGV[4] - APPL_DB mux cable table name
-- ARGV[5] - APPL_DB hardware mux cable table name
-- ARGV[6] - ASIC_DB db index
-- ARGV[7] - ASIC_DB separator
-- ARGV[8] - ASIC_DB asic state table name
local APPL_DB = 0
local APPL_DB_SEPARATOR = ':'
local neighbor_table_name = 'NEIGH_TABLE'
local mux_state_table_name = 'MUX_CABLE_TABLE'
local hw_mux_state_table_name = 'HW_MUX_CABLE_TABLE'
local ASIC_DB = 1
local ASIC_DB_SEPARATOR = ':'
local asic_state_table_name = 'ASIC_STATE'
local asic_route_key_prefix = 'SAI_OBJECT_TYPE_ROUTE_ENTRY'
local asic_neigh_key_prefix = 'SAI_OBJECT_TYPE_NEIGHBOR_ENTRY'
local asic_fdb_key_prefix = 'SAI_OBJECT_TYPE_FDB_ENTRY'
if table.getn(ARGV) == 7 then
APPL_DB = ARGV[1]
APPL_DB_SEPARATOR = ARGV[2]
neighbor_table_name = ARGV[3]
mux_state_table_name = ARGV[4]
hw_mux_state_table_name = ARGV[5]
ASIC_DB = ARGV[6]
ASIC_DB_SEPARATOR = ARGV[7]
asic_state_table_name = ARGV[8]
end
local neighbors = {}
local mux_states = {}
local hw_mux_states = {}
local asic_fdb = {}
local asic_route_table = {}
local asic_neighbor_table = {}
-- read from APPL_DB
redis.call('SELECT', APPL_DB)
-- read neighbors learnt from Vlan devices
local neighbor_table_vlan_prefix = neighbor_table_name .. APPL_DB_SEPARATOR .. 'Vlan'
local neighbor_keys = redis.call('KEYS', neighbor_table_vlan_prefix .. '*')
for i, neighbor_key in ipairs(neighbor_keys) do
local second_separator_index = string.find(neighbor_key, APPL_DB_SEPARATOR, string.len(neighbor_table_vlan_prefix), true)
if second_separator_index ~= nil then
local neighbor_ip = string.sub(neighbor_key, second_separator_index + 1)
local mac = string.lower(redis.call('HGET', neighbor_key, 'neigh'))
neighbors[neighbor_ip] = mac
end
end
-- read mux states
local mux_state_table_prefix = mux_state_table_name .. APPL_DB_SEPARATOR
local mux_cables = redis.call('KEYS', mux_state_table_prefix .. '*')
for i, mux_cable_key in ipairs(mux_cables) do
local port_name = string.sub(mux_cable_key, string.len(mux_state_table_prefix) + 1)
local mux_state = redis.call('HGET', mux_cable_key, 'state')
if mux_state ~= nil then
mux_states[port_name] = mux_state
end
end
local hw_mux_state_table_prefix = hw_mux_state_table_name .. APPL_DB_SEPARATOR
local hw_mux_cables = redis.call('KEYS', hw_mux_state_table_prefix .. '*')
for i, hw_mux_cable_key in ipairs(hw_mux_cables) do
local port_name = string.sub(hw_mux_cable_key, string.len(hw_mux_state_table_prefix) + 1)
local mux_state = redis.call('HGET', hw_mux_cable_key, 'state')
if mux_state ~= nil then
hw_mux_states[port_name] = mux_state
end
end
-- read from ASIC_DB
redis.call('SELECT', ASIC_DB)
-- read ASIC fdb entries
local fdb_prefix = asic_state_table_name .. ASIC_DB_SEPARATOR .. asic_fdb_key_prefix
local fdb_entries = redis.call('KEYS', fdb_prefix .. '*')
for i, fdb_entry in ipairs(fdb_entries) do
local bridge_port_id = redis.call('HGET', fdb_entry, 'SAI_FDB_ENTRY_ATTR_BRIDGE_PORT_ID')
local fdb_details = cjson.decode(string.sub(fdb_entry, string.len(fdb_prefix) + 2))
local mac = string.lower(fdb_details['mac'])
asic_fdb[mac] = bridge_port_id
end
-- read ASIC route table
local route_prefix = asic_state_table_name .. ASIC_DB_SEPARATOR .. asic_route_key_prefix
local route_entries = redis.call('KEYS', route_prefix .. '*')
for i, route_entry in ipairs(route_entries) do
local route_details = string.sub(route_entry, string.len(route_prefix) + 2)
table.insert(asic_route_table, route_details)
end
-- read ASIC neigh table
local neighbor_prefix = asic_state_table_name .. ASIC_DB_SEPARATOR .. asic_neigh_key_prefix
local neighbor_entries = redis.call('KEYS', neighbor_prefix .. '*')
for i, neighbor_entry in ipairs(neighbor_entries) do
local neighbor_details = string.sub(neighbor_entry, string.len(neighbor_prefix) + 2)
table.insert(asic_neighbor_table, neighbor_details)
end
local result = {}
result['neighbors'] = neighbors
result['mux_states'] = mux_states
result['hw_mux_states'] = hw_mux_states
result['asic_fdb'] = asic_fdb
result['asic_route_table'] = asic_route_table
result['asic_neigh_table'] = asic_neighbor_table
return redis.status_reply(cjson.encode(result))
"""
DB_READ_SCRIPT_CONFIG_DB_KEY = "_DUALTOR_NEIGHBOR_CHECK_SCRIPT_SHA1"
ZERO_MAC = "00:00:00:00:00:00"
NEIGHBOR_ATTRIBUTES = ["NEIGHBOR", "MAC", "PORT", "MUX_STATE", "IN_MUX_TOGGLE", "NEIGHBOR_IN_ASIC", "TUNNERL_IN_ASIC", "HWSTATUS"]
NOT_AVAILABLE = "N/A"
class LogOutput(enum.Enum):
"""Enum to represent log output."""
SYSLOG = "SYSLOG"
STDOUT = "STDOUT"
def __str__(self):
return self.value
class SyslogLevel(enum.IntEnum):
"""Enum to represent syslog level."""
ERROR = 3
NOTICE = 5
INFO = 6
DEBUG = 7
def __str__(self):
return self.name
SYSLOG_LEVEL = SyslogLevel.INFO
WRITE_LOG_ERROR = None
WRITE_LOG_WARN = None
WRITE_LOG_INFO = None
WRITE_LOG_DEBUG = None
def parse_args():
parser = argparse.ArgumentParser(
description="Verify neighbors state is consistent with mux state."
)
parser.add_argument(
"-o",
"--log-output",
type=LogOutput,
choices=list(LogOutput),
default=LogOutput.STDOUT,
help="log output"
)
parser.add_argument(
"-s",
"--syslog-level",
choices=["ERROR", "NOTICE", "INFO", "DEBUG"],
default=None,
help="syslog level"
)
parser.add_argument(
"-l",
"--log-level",
choices=["ERROR", "WARNING", "INFO", "DEBUG"],
default=None,
help="stdout log level"
)
args = parser.parse_args()
if args.log_output == LogOutput.STDOUT:
if args.log_level is None:
args.log_level = logging.WARNING
else:
args.log_level = logging.getLevelName(args.log_level)
if args.syslog_level is not None:
parser.error("Received syslog level with log output to stdout.")
if args.log_output == LogOutput.SYSLOG:
if args.syslog_level is None:
args.syslog_level = SyslogLevel.NOTICE
else:
args.syslog_level = SyslogLevel[args.syslog_level]
if args.log_level is not None:
parser.error("Received stdout log level with log output to syslog.")
return args
def write_syslog(level, message, *args):
if level > SYSLOG_LEVEL:
return
if args:
message %= args
if level == SyslogLevel.ERROR:
syslog.syslog(syslog.LOG_ERR, message)
elif level == SyslogLevel.NOTICE:
syslog.syslog(syslog.LOG_NOTICE, message)
elif level == SyslogLevel.INFO:
syslog.syslog(syslog.LOG_INFO, message)
elif level == SyslogLevel.DEBUG:
syslog.syslog(syslog.LOG_DEBUG, message)
else:
syslog.syslog(syslog.LOG_DEBUG, message)
def config_logging(args):
"""Configures logging based on arguments."""
global SYSLOG_LEVEL
global WRITE_LOG_ERROR
global WRITE_LOG_WARN
global WRITE_LOG_INFO
global WRITE_LOG_DEBUG
if args.log_output == LogOutput.STDOUT:
logging.basicConfig(
stream=sys.stdout,
level=args.log_level,
format="%(message)s"
)
WRITE_LOG_ERROR = logging.error
WRITE_LOG_WARN = logging.warning
WRITE_LOG_INFO = logging.info
WRITE_LOG_DEBUG = logging.debug
elif args.log_output == LogOutput.SYSLOG:
SYSLOG_LEVEL = args.syslog_level
WRITE_LOG_ERROR = functools.partial(write_syslog, SyslogLevel.ERROR)
WRITE_LOG_WARN = functools.partial(write_syslog, SyslogLevel.NOTICE)
WRITE_LOG_INFO = functools.partial(write_syslog, SyslogLevel.INFO)
WRITE_LOG_DEBUG = functools.partial(write_syslog, SyslogLevel.DEBUG)
def run_command(cmd):
"""Runs a command and returns its output."""
WRITE_LOG_DEBUG("Running command: %s", cmd)
try:
p = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(output, _) = p.communicate()
except Exception as details:
raise RuntimeError("Failed to run command: %s", details)
WRITE_LOG_DEBUG("Command output: %s", output)
WRITE_LOG_DEBUG("Command return code: %s", p.returncode)
if p.returncode != 0:
raise RuntimeError("Command failed with return code %s: %s" % (p.returncode, output))
return output.decode()
def redis_cli(redis_cmd):
"""Call a redis command with return error check."""
run_cmd = "sudo redis-cli %s" % redis_cmd
result = run_command(run_cmd).strip()
if "error" in result or "ERR" in result:
raise RuntimeError("Redis command '%s' failed: %s" % (redis_cmd, result))
return result
def read_tables_from_db(appl_db):
"""Reads required tables from db."""
# NOTE: let's cache the db read script sha1 in APPL_DB under
# key "_DUALTOR_NEIGHBOR_CHECK_SCRIPT_SHA1"
db_read_script_sha1 = appl_db.get(DB_READ_SCRIPT_CONFIG_DB_KEY)
if not db_read_script_sha1:
redis_load_cmd = "SCRIPT LOAD \"%s\"" % DB_READ_SCRIPT
db_read_script_sha1 = redis_cli(redis_load_cmd).strip()
WRITE_LOG_INFO("loaded script sha1: %s", db_read_script_sha1)
appl_db.set(DB_READ_SCRIPT_CONFIG_DB_KEY, db_read_script_sha1)
redis_run_cmd = "EVALSHA %s 0" % db_read_script_sha1
result = redis_cli(redis_run_cmd).strip()
tables = json.loads(result)
neighbors = tables["neighbors"]
mux_states = tables["mux_states"]
hw_mux_states = tables["hw_mux_states"]
asic_fdb = {k: v.lstrip("oid:0x") for k, v in tables["asic_fdb"].items()}
asic_route_table = tables["asic_route_table"]
asic_neigh_table = tables["asic_neigh_table"]
WRITE_LOG_DEBUG("neighbors: %s", json.dumps(neighbors, indent=4))
WRITE_LOG_DEBUG("mux states: %s", json.dumps(mux_states, indent=4))
WRITE_LOG_DEBUG("hw mux states: %s", json.dumps(hw_mux_states, indent=4))
WRITE_LOG_DEBUG("ASIC FDB: %s", json.dumps(asic_fdb, indent=4))
WRITE_LOG_DEBUG("ASIC route table: %s", json.dumps(asic_route_table, indent=4))
WRITE_LOG_DEBUG("ASIC neigh table: %s", json.dumps(asic_neigh_table, indent=4))
return neighbors, mux_states, hw_mux_states, asic_fdb, asic_route_table, asic_neigh_table
def get_if_br_oid_to_port_name_map():
"""Return port bridge oid to port name map."""
db = swsscommon.SonicV2Connector(host="127.0.0.1")
try:
port_name_map = port_util.get_interface_oid_map(db)[1]
except IndexError:
port_name_map = {}
if_br_oid_map = port_util.get_bridge_port_map(db)
if_br_oid_to_port_name_map = {}
for if_br_oid, if_oid in if_br_oid_map.items():
if if_oid in port_name_map:
if_br_oid_to_port_name_map[if_br_oid] = port_name_map[if_oid]
return if_br_oid_to_port_name_map
def is_dualtor(config_db):
"""Check if it is a dualtor device."""
device_metadata = config_db.get_table('DEVICE_METADATA')
return ("localhost" in device_metadata and
"subtype" in device_metadata['localhost'] and
device_metadata['localhost']['subtype'].lower() == 'dualtor')
def get_mux_cable_config(config_db):
"""Return mux cable config from CONFIG_DB."""
return config_db.get_table("MUX_CABLE")
def get_mux_server_to_port_map(mux_cables):
"""Return mux server ip to port name map."""
mux_server_to_port_map = {}
for port, mux_details in mux_cables.items():
if "server_ipv4" in mux_details:
server_ipv4 = str(ipaddress.ip_interface(mux_details["server_ipv4"]).ip)
mux_server_to_port_map[server_ipv4] = port
if "server_ipv6" in mux_details:
server_ipv6 = str(ipaddress.ip_interface(mux_details["server_ipv6"]).ip)
mux_server_to_port_map[server_ipv6] = port
return mux_server_to_port_map
def get_mac_to_port_name_map(asic_fdb, if_oid_to_port_name_map):
"""Return mac to port name map."""
mac_to_port_name_map = {}
for mac, port_br_oid in asic_fdb.items():
if port_br_oid in if_oid_to_port_name_map:
mac_to_port_name_map[mac] = if_oid_to_port_name_map[port_br_oid]
return mac_to_port_name_map
def check_neighbor_consistency(neighbors, mux_states, hw_mux_states, mac_to_port_name_map,
asic_route_table, asic_neigh_table, mux_server_to_port_map):
"""Checks if neighbors are consistent with mux states."""
asic_route_destinations = set(json.loads(_)["dest"].split("/")[0] for _ in asic_route_table)
asic_neighs = set(json.loads(_)["ip"] for _ in asic_neigh_table)
check_results = []
for neighbor_ip in natsorted(list(neighbors.keys())):
mac = neighbors[neighbor_ip]
check_result = {attr: NOT_AVAILABLE for attr in NEIGHBOR_ATTRIBUTES}
check_result["NEIGHBOR"] = neighbor_ip
check_result["MAC"] = mac
is_zero_mac = (mac == ZERO_MAC)
if mac not in mac_to_port_name_map and not is_zero_mac:
check_results.append(check_result)
continue
check_result["NEIGHBOR_IN_ASIC"] = neighbor_ip in asic_neighs
check_result["TUNNERL_IN_ASIC"] = neighbor_ip in asic_route_destinations
if is_zero_mac:
check_result["HWSTATUS"] = ((not check_result["NEIGHBOR_IN_ASIC"]) and check_result["TUNNERL_IN_ASIC"])
else:
port_name = mac_to_port_name_map[mac]
# NOTE: mux server ips are always fixed to the mux port
if neighbor_ip in mux_server_to_port_map:
port_name = mux_server_to_port_map[neighbor_ip]
mux_state = mux_states[port_name]
hw_mux_state = hw_mux_states[port_name]
check_result["PORT"] = port_name
check_result["MUX_STATE"] = mux_state
check_result["IN_MUX_TOGGLE"] = mux_state != hw_mux_state
if mux_state == "active":
check_result["HWSTATUS"] = (check_result["NEIGHBOR_IN_ASIC"] and (not check_result["TUNNERL_IN_ASIC"]))
elif mux_state == "standby":
check_result["HWSTATUS"] = ((not check_result["NEIGHBOR_IN_ASIC"]) and check_result["TUNNERL_IN_ASIC"])
else:
# skip as unknown mux state
continue
check_results.append(check_result)
return check_results
def parse_check_results(check_results):
"""Parse the check results to see if there are neighbors that are inconsistent with mux state."""
failed_neighbors = []
bool_to_yes_no = ("no", "yes")
bool_to_consistency = ("inconsistent", "consistent")
for check_result in check_results:
port = check_result["PORT"]
is_zero_mac = check_result["MAC"] == ZERO_MAC
if port == NOT_AVAILABLE and not is_zero_mac:
continue
in_toggle = check_result["IN_MUX_TOGGLE"]
hwstatus = check_result["HWSTATUS"]
if not is_zero_mac:
check_result["IN_MUX_TOGGLE"] = bool_to_yes_no[in_toggle]
check_result["NEIGHBOR_IN_ASIC"] = bool_to_yes_no[check_result["NEIGHBOR_IN_ASIC"]]
check_result["TUNNERL_IN_ASIC"] = bool_to_yes_no[check_result["TUNNERL_IN_ASIC"]]
check_result["HWSTATUS"] = bool_to_consistency[hwstatus]
if (not hwstatus):
if is_zero_mac:
failed_neighbors.append(check_result)
elif not in_toggle:
failed_neighbors.append(check_result)
output_lines = tabulate.tabulate(
[[check_result[attr] for attr in NEIGHBOR_ATTRIBUTES] for check_result in check_results],
headers=NEIGHBOR_ATTRIBUTES,
tablefmt="simple"
)
for output_line in output_lines.split("\n"):
WRITE_LOG_WARN(output_line)
if failed_neighbors:
WRITE_LOG_ERROR("Found neighbors that are inconsistent with mux states: %s", [_["NEIGHBOR"] for _ in failed_neighbors])
err_output_lines = tabulate.tabulate(
[[neighbor[attr] for attr in NEIGHBOR_ATTRIBUTES] for neighbor in failed_neighbors],
headers=NEIGHBOR_ATTRIBUTES,
tablefmt="simple"
)
for output_line in err_output_lines.split("\n"):
WRITE_LOG_ERROR(output_line)
return False
return True
if __name__ == "__main__":
args = parse_args()
config_logging(args)
config_db = swsscommon.ConfigDBConnector(use_unix_socket_path=False)
config_db.connect()
appl_db = daemon_base.db_connect("APPL_DB")
mux_cables = get_mux_cable_config(config_db)
if not is_dualtor(config_db) or not mux_cables:
WRITE_LOG_DEBUG("Not a valid dualtor setup, skip the check.")
sys.exit(0)
mux_server_to_port_map = get_mux_server_to_port_map(mux_cables)
if_oid_to_port_name_map = get_if_br_oid_to_port_name_map()
neighbors, mux_states, hw_mux_states, asic_fdb, asic_route_table, asic_neigh_table = read_tables_from_db(appl_db)
mac_to_port_name_map = get_mac_to_port_name_map(asic_fdb, if_oid_to_port_name_map)
check_results = check_neighbor_consistency(
neighbors,
mux_states,
hw_mux_states,
mac_to_port_name_map,
asic_route_table,
asic_neigh_table,
mux_server_to_port_map
)
res = parse_check_results(check_results)
sys.exit(0 if res else 1)
| [
"noreply@github.com"
] | Junchao-Mellanox.noreply@github.com |
2d417acaf52f6be08717f4e04d79de0d19a77f9f | c5db5f345e99faf10267c2497b33f4c0c886937d | /loginfo.py | 28611f98c2fce739c466ddbd36b37645cb545db9 | [] | no_license | ecore2018/risk_control | bb8538e5b091271ffc93905525f040a88011948d | b09c1294facc06b5f2c3d2d3725980d5678eef51 | refs/heads/master | 2021-01-20T01:05:49.412806 | 2016-04-14T09:05:15 | 2016-04-14T09:05:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,774 | py | #! /usr/bin/env python
# -*- coding:utf-8 -*-
import sys
import os
import numpy as np
import pandas as pd
from config import Config
from load_origin_data import Load_origin_data
import time
import copy
import json
class Loginfo(object):
def __init__(self,config):
self.config=config
def load_info(self):
reader_train=pd.read_csv(self.config.path_origin_train_loginfo,iterator=False,delimiter=',',encoding='utf-8')
reader_test=pd.read_csv(self.config.path_origin_predict_loginfo,iterator=False,delimiter=',',encoding='utf-8')
len_train=len(reader_train)
len_test=len(reader_test)
reader=pd.concat([reader_train,reader_test],ignore_index=True)
# reader['Listinginfo1']=reader['Listinginfo1'].apply(self._deal_date)
# reader['LogInfo3']=reader['LogInfo3'].apply(self._deal_date)
Idxs=self.get_Idx(reader['Idx'])
#print Idxs
codes=list(set(reader['LogInfo1']))
types=list(set(reader['LogInfo2']))
d={}
d['log_len']=0
for code in codes:
d['code_'+str(code)]=0
d['code_per_'+str(code)]=0
for t in types:
d['type_'+str(t)]=0
d['type_per_'+str(t)]=0
index=0
Idx_d=copy.deepcopy(d)
Idx_dict={}
Idx=0
for i in range(len_train+len_test):
Idx=reader['Idx'][i]
code=reader['LogInfo1'][i]
t=reader['LogInfo2'][i]
if Idx==Idxs[index]:
Idx_d['log_len']+=1
Idx_d['code_'+str(code)]+=1
Idx_d['type_'+str(t)]+=1
else:
for code in codes:
Idx_d['code_per_'+str(code)]=float(Idx_d['code_'+str(code)])/float(Idx_d['log_len'])
for t in types:
Idx_d['type_per_'+str(t)]=float(Idx_d['type_'+str(t)])/float(Idx_d['log_len'])
Idx_dict[str(Idxs[index])]=self._row_info(Idx_d,codes,types)
Idx_d=copy.deepcopy(d)
index+=1
Idx_d['log_len']+=1
Idx_d['code_'+str(code)]+=1
Idx_d['type_'+str(t)]+=1
for code in codes:
Idx_d['code_per_'+str(code)]=float(Idx_d['code_'+str(code)])/float(Idx_d['log_len'])
for t in types:
Idx_d['type_per_'+str(t)]=float(Idx_d['type_'+str(t)])/float(Idx_d['log_len'])
Idx_dict[str(Idxs[index])]=self._row_info(Idx_d,codes,types)
#self.output_info(Idx_dict)
return Idx_dict,len(codes)+len(types)+1
def load_info2(self,limit):
"""
距离交易前1周
"""
reader_train=pd.read_csv(self.config.path_origin_train_loginfo,iterator=False,delimiter=',',encoding='utf-8')
reader_test=pd.read_csv(self.config.path_origin_predict_loginfo,iterator=False,delimiter=',',encoding='utf-8')
len_train=len(reader_train)
len_test=len(reader_test)
reader=pd.concat([reader_train,reader_test],ignore_index=True)
reader['Listinginfo1']=reader['Listinginfo1'].apply(self._deal_date)
reader['LogInfo3']=reader['LogInfo3'].apply(self._deal_date)
Idxs=self.get_Idx(reader['Idx'])
codes=list(set(reader['LogInfo1']))
types=list(set(reader['LogInfo2']))
d={}
d['log_len']=0
for code in codes:
d['code_'+str(code)]=0
d['code_per_'+str(code)]=0
for t in types:
d['type_'+str(t)]=0
d['type_per_'+str(t)]=0
index=0
Idx_d=copy.deepcopy(d)
Idx_dict={}
Idx=0
for i in range(len_train+len_test):
Idx=reader['Idx'][i]
code=reader['LogInfo1'][i]
t=reader['LogInfo2'][i]
trading_date=reader['Listinginfo1'][i]
log_date=reader['LogInfo3'][i]
if Idx==Idxs[index]:
if (trading_date-log_date)/86400<=limit:
Idx_d['log_len']+=1
Idx_d['code_'+str(code)]+=1
Idx_d['type_'+str(t)]+=1
else:
for code in codes:
try:
Idx_d['code_per_'+str(code)]=float(Idx_d['code_'+str(code)])/float(Idx_d['log_len'])
except:
Idx_d['code_per_'+str(code)]=0.0
for t2 in types:
try:
Idx_d['type_per_'+str(t2)]=float(Idx_d['type_'+str(t2)])/float(Idx_d['log_len'])
except:
Idx_d['type_per_'+str(t2)]=0.0
Idx_dict[str(Idxs[index])]=self._row_info(Idx_d,codes,types)
Idx_d=copy.deepcopy(d)
index+=1
Idx_d['log_len']+=1
Idx_d['code_'+str(code)]+=1
Idx_d['type_'+str(t)]+=1
for code in codes:
try:
Idx_d['code_per_'+str(code)]=float(Idx_d['code_'+str(code)])/float(Idx_d['log_len'])
except:
Idx_d['code_per_'+str(code)]=0.0
for t2 in types:
try:
Idx_d['type_per_'+str(t2)]=float(Idx_d['type_'+str(t2)])/float(Idx_d['log_len'])
except:
Idx_d['type_per_'+str(t2)]=0.0
Idx_dict[str(Idxs[index])]=self._row_info(Idx_d,codes,types)
#self.output_info(Idx_dict)
return Idx_dict,len(codes)+len(types)+1
def load_info3(self):
"""
时间处理
"""
reader_train=pd.read_csv(self.config.path_origin_train_loginfo,iterator=False,delimiter=',',encoding='utf-8')
reader_test=pd.read_csv(self.config.path_origin_predict_loginfo,iterator=False,delimiter=',',encoding='utf-8')
len_train=len(reader_train)
len_test=len(reader_test)
reader=pd.concat([reader_train,reader_test],ignore_index=True)
reader['Listinginfo1']=reader['Listinginfo1'].apply(self._deal_date)
reader['LogInfo3']=reader['LogInfo3'].apply(self._deal_date)
Idxs=self.get_Idx(reader['Idx'])
codes=list(set(reader['LogInfo1']))
types=list(set(reader['LogInfo2']))
index=0
time_list=[]
last_trading_time=0
Idx=0
X=[]
Idx_dict={}
for i in range(len_train+len_test):
Idx=reader['Idx'][i]
code=reader['LogInfo1'][i]
t=reader['LogInfo2'][i]
trading_date=reader['Listinginfo1'][i]
log_date=reader['LogInfo3'][i]
if Idx==Idxs[index]:
time_list.append(log_date)
else:
Idx_dict[str(Idxs[index])]=self._deal_time_list(time_list,last_trading_time)
time_list=[]
time_list.append(log_date)
index+=1
last_trading_time=trading_date
Idx_dict[str(Idxs[index])]=self._deal_time_list(time_list,last_trading_time)
return Idx_dict
def _deal_time_list(self,time_list,trading_date):
time_list=sorted(time_list,reverse=True)
log_len=len(time_list) #log长度
first_log=trading_date-time_list[log_len-1] #交易时间-最开始log时间
last_log=trading_date-time_list[0] #交易时间-最后log时间
trade_date_log=0 #交易当天
trade_date_per=0
last_date_log=0 #最后一天操作次数
last_date_per=0
last_3date_log=0 #最后三天操作次数
last_3date_per=0
last_5date_log=0 #最后5天操作次数
last_5date_per=0
last_7date_log=0 #最后7天操作次数
last_7date_per=0
active_dates=len(set(time_list)) #共活跃天数
last_active_dates=0 #最后一周活跃天数
last_active_per=0
last_active_dates_list=[]
for t in time_list:
if t==trading_date:
trade_date_log+=1
if t==time_list[0]:
last_date_log+=1
if (time_list[0]-t)/86400<=3:
last_3date_log+=1
if (time_list[0]-t)/86400<=5:
last_5date_log+=1
if (time_list[0]-t)/86400<=7:
last_7date_log+=1
last_active_dates_list.append(t)
trade_date_per=float(trade_date_log)/float(log_len)
last_date_per=float(last_date_log)/float(log_len)
last_3date_per=float(last_3date_log)/float(log_len)
last_5date_per=float(last_5date_log)/float(log_len)
last_7date_per=float(last_7date_log)/float(log_len)
last_active_dates=len(set(last_active_dates_list))
last_active_per=last_active_dates/active_dates
l=[first_log,last_log,trade_date_log,trade_date_per,last_date_log,last_date_per,last_3date_log,last_3date_per,last_5date_log,last_5date_per,last_7date_log,last_7date_per,active_dates,last_active_dates,last_active_per]
return l
pass
def _row_info(self,d,codes,types):
l=[]
l.append(d['log_len'])
for code in codes:
l.append(d['code_'+str(code)])
l.append(d['code_per_'+str(code)])
for t in types:
l.append(d['type_'+str(t)])
l.append(d['type_per_'+str(t)])
return l
def output_info(self):
origin_instance=Load_origin_data(self.config)
train_uids=origin_instance.load_train_uid()
test_uids=origin_instance.load_predict_uid()
Idx_dict,len_col=self.load_info()
f1=open(self.config.path+"train/master_loginfo1.csv",'wb')
f2=open(self.config.path+"test/master_loginfo1.csv",'wb')
for uid in train_uids:
if str(uid) in Idx_dict:
l=Idx_dict[str(uid)]
else:
l=[-1 for i in range(len_col)]
f1.write(str(uid))
for v in l:
f1.write(','+str(v))
f1.write('\n')
for uid in test_uids:
if str(uid) in Idx_dict:
l=Idx_dict[str(uid)]
else:
l=[-1 for i in range(len_col)]
f2.write(str(uid))
for v in l:
f2.write(','+str(v))
f2.write('\n')
f1.close()
f2.close()
def output_info2(self,limit):
"""
limit 1,3,7
"""
origin_instance=Load_origin_data(self.config)
train_uids=origin_instance.load_train_uid()
test_uids=origin_instance.load_predict_uid()
Idx_dict,len_col=self.load_info2(limit)
f1=open(self.config.path+"train/master_loginfo_limit"+str(limit)+".csv",'wb')
f2=open(self.config.path+"test/master_loginfo_limit"+str(limit)+".csv",'wb')
for uid in train_uids:
if str(uid) in Idx_dict:
l=Idx_dict[str(uid)]
else:
l=[-1 for i in range(len_col)]
f1.write(str(uid))
for v in l:
f1.write(','+str(v))
f1.write('\n')
for uid in test_uids:
if str(uid) in Idx_dict:
l=Idx_dict[str(uid)]
else:
l=[-1 for i in range(len_col)]
f2.write(str(uid))
for v in l:
f2.write(','+str(v))
f2.write('\n')
f1.close()
f2.close()
def output_info3(self):
origin_instance=Load_origin_data(self.config)
train_uids=origin_instance.load_train_uid()
test_uids=origin_instance.load_predict_uid()
Idx_dict=self.load_info3()
f1=open(self.config.path+"train/master_loginfo_time.csv",'wb')
f2=open(self.config.path+"test/master_loginfo_time.csv",'wb')
for uid in train_uids:
if str(uid) in Idx_dict:
l=Idx_dict[str(uid)]
else:
l=[-1 for i in range(15)]
f1.write(str(uid))
for v in l:
f1.write(','+str(v))
f1.write('\n')
for uid in test_uids:
if str(uid) in Idx_dict:
l=Idx_dict[str(uid)]
else:
l=[-1 for i in range(15)]
f2.write(str(uid))
for v in l:
f2.write(','+str(v))
f2.write('\n')
f1.close()
f2.close()
def get_Idx(self,col):
last=-1000
l=[]
for v in col:
if last!=v:
l.append(v)
last=v
return l
def _deal_date(self,n):
t=time.strptime(str(n),"%Y-%m-%d")
return time.mktime(t) #(time.mktime(t)-1262275200.0)/100
def loginfo_idxs(self):
reader_train=pd.read_csv(self.config.path_origin_train_loginfo,iterator=False,delimiter=',',encoding='utf-8')
reader_test=pd.read_csv(self.config.path_origin_predict_loginfo,iterator=False,delimiter=',',encoding='utf-8')
len_train=len(reader_train)
len_test=len(reader_test)
reader=pd.concat([reader_train,reader_test],ignore_index=True)
Idxs=self.get_Idx(reader['Idx'])
return Idxs,len_train,len_test
def main():
instance=Loginfo(Config())
instance.output_info()
instance.output_info2(1)
instance.output_info2(3)
instance.output_info2(7)
instance.output_info3()
if __name__ == '__main__':
main() | [
"447642253@qq.com"
] | 447642253@qq.com |
0f3a08eb19415e6839f084ef6b5fd54d9bb6cee3 | 6019b48f027b1f62de8474a834f52157fc8faf2c | /src/ch3/cv2io/negaposi.py | 7aa3463cf05aee3a2932641dbca8b3d908f3f44e | [] | no_license | kujirahand/book-mlearn-gyomu | d540aebf96af84d5c271fa11f31bf18417c16f34 | b1d5f04a69777fb3896b28144ecb18d49a744c25 | refs/heads/master | 2023-07-04T01:14:39.673001 | 2023-04-05T13:27:53 | 2023-04-05T13:27:53 | 135,913,708 | 127 | 113 | null | 2020-08-10T23:16:30 | 2018-06-03T14:56:59 | Jupyter Notebook | UTF-8 | Python | false | false | 212 | py | import matplotlib.pyplot as plt
import cv2
# 画像を読み込む
img = cv2.imread("test.jpg")
# ネガポジ反転
img = 255 - img
# 画像を表示
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.show()
| [
"kujira@kujirahand.com"
] | kujira@kujirahand.com |
0620bf92f085f32c22e504780c6979fde4483d86 | 6d86faf3c9f5578095873516464116f4c6a33c87 | /se-check.py | 307d3d65250c174d2cdc702e81c589f4fad4c1ba | [] | no_license | aoanla/Distributed-Resilient-Storage | 379f4e02bfdd70a68bf68f7d525080c09179a326 | c43d748685928365ee8efc197be633b0d8aee4cb | refs/heads/master | 2020-12-28T19:56:36.155644 | 2014-10-13T10:53:33 | 2014-10-13T10:53:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,853 | py | #!/usr/bin/env python
'''
A program to test chich SEs given in dirac-dms-show-se-status are available for writing.
Example:
$ python se-check-cli.py --remote_directory /gridpp/ptodev/ --verbose
'''
from DIRAC import S_OK, S_ERROR, gLogger, exit
from DIRAC.Core.Base import Script
class Params:
def __init__(self):
self.Verbose = False
def setRemDir(self, value):
self.RemDir = value
return S_OK()
def getRemDir(self):
return self.RemDir
def setVerbose(self, value):
self.Verbose = True
return S_OK()
def getVerbose(self):
return self.Verbose
# Instantiate the params class
cliParams = Params()
# Register accepted switches and their callbacks
Script.registerSwitch("rd:", "remote_directory=", "Directory on the catalogue where a test will be uploaded.", cliParams.setRemDir)
Script.registerSwitch("v", "verbose", "Verbose mode - prints more information on the screen.", cliParams.setVerbose)
# Parse the command line and initialize DIRAC
Script.parseCommandLine(ignoreErrors = False)
switches = dict(Script.getUnprocessedSwitches())
# Get the list of services
servicesList = Script.getPositionalArgs()
import DIRAC.Interfaces.API.Dirac as dirac_api
import subprocess as sp
import sys, os
# Get command line arguments
testdir = cliParams.getRemDir()
verbose = cliParams.getVerbose()
dirac = dirac_api.Dirac()
testfile_remote = '1'
# Create a file to upload for testing
testfile_local = '1'
while(True):
if(os.path.isfile(testfile_local)):
testfile_local = str(int(testfile_local)+1)
else:
break
local_file = open(testfile_local, 'w')
local_file.write('A file for testing whether an SE works.')
local_file.close()
############################## GET A LIST OF THE SEs ###################################
print 'Getting a list of the SEs...'
se_stat = sp.Popen("dirac-dms-show-se-status", shell=True, stdout=sp.PIPE).stdout.read()
# Split into lines
se_stat = se_stat.split('\n')
# Clean unnecessary lines
se_stat = se_stat[2:-1]
# Split each line into strings
for se_index in range(len(se_stat)):
se_stat[se_index] = se_stat[se_index].split()
# Create a list with the names of the SEs
ses = []
for se in se_stat:
ses.append(se[0])
# Print the SEs
if(verbose):
print 'Found SEs:'
for se in ses:
print se
############################### TEST WHICH SEs WORK ####################################
print ''
print 'Testing the SEs...'
ses_not_working = []
ses_working = []
small_log = ''
existing_file_error = "{'Message': 'putAndRegister: This file GUID already exists for another file. Please remove it and try again. True', 'OK': False}"
if(not verbose):
old_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
else:
print '************** BEGIN LOG **************'
for se in ses:
print '************** TESTING ' + se + ' **************'
while (True):
# Try adding a test file
output = dirac.addFile(testdir+testfile_remote, testfile_local, se, printOutput=False)
if(str(output) == existing_file_error):
testfile_remote = str(int(testfile_remote)+1)
else:
# Remove the test file
dirac.removeFile(testdir+testfile_remote)
break
# For verbose mode only
if(verbose):
print '\n' + se + ': ' + str(output) + '\n'
if(not output['OK']):
ses_not_working.append(se)
else:
ses_working.append(se)
os.remove(testfile_local)
if(not verbose):
sys.stdout.close()
sys.stdout = old_stdout
######################## PRINT THE WORKING & NONWORKING SEs ############################
print '*********** NOT WORKING SEs ***********'
for se in ses_not_working:
print se
print ''
print '************* WORKING SEs *************'
for se in ses_working:
print se
print ''
| [
"paulin.todev@gmail.com"
] | paulin.todev@gmail.com |
c6febe93a7c825b0dd53b639a3418ffc40f29a54 | 698589fe8cb915d84696617222741a1075d5dbd7 | /source/conf.py | 3cea99212da0fd0bb7f60fad600041c99c020372 | [] | no_license | StampedeRobotics2844/team-2844-docs | 563e441803dd42e1577cffb13c4e0db2090b73bb | 20f033c226bca00179b657db57611d2bcb366b43 | refs/heads/master | 2021-01-17T08:14:47.500905 | 2018-02-08T05:06:44 | 2018-02-08T05:06:44 | 83,870,166 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,924 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Stampede Robotics documentation build configuration file, created by
# sphinx-quickstart on Fri Mar 3 23:23:11 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
from os.path import abspath, join, dirname
import sys
import sphinx_rtd_theme
sys.path.insert(0, abspath(join(dirname(__file__))))
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Stampede Robotics'
copyright = '2017, Betty H. Fairfax'
author = 'Betty H. Fairfax'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2017'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'StampedeRobotics'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'StampedeRobotics.tex', 'Stampede Robotics Documentation',
'development team', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'stampederobotics', 'Stampede Robotics Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'StampedeRobotics', 'Stampede Robotics Documentation',
author, 'StampedeRobotics', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
| [
"alekmitrevski@hotmail.com"
] | alekmitrevski@hotmail.com |
a561508d8547852b7ef65dfd5c8fbe9103123a92 | 9a5ad43ce6add59f266074c463c402f4ff717dc5 | /leetcode/30_day_leetcoding_challenge/202006/20200602_delete_node_in_a_linked_list/delete_node_in_a_linked_list_solution_2.py | 64e9146c51e235c6950b063c4c2d5dc672f4a4fe | [] | no_license | arivolispark/datastructuresandalgorithms | 9cb1cd66f61ab22471d7378fce51f29fcf0ef553 | 57534898c17d058ef1dba2b1cb8cdcd8d1d2a41c | refs/heads/master | 2021-06-24T15:51:04.438627 | 2021-01-12T05:14:37 | 2021-01-12T05:14:37 | 84,909,655 | 0 | 1 | null | 2021-01-12T05:14:38 | 2017-03-14T05:38:16 | Python | UTF-8 | Python | false | false | 2,212 | py | """
Title: Delete Node in a Linked List
Write a function to delete a node (except the tail) in a
singly linked list, given only access to that node.
Given linked list -- head = [4,5,1,9], which looks like following:
Example 1:
Input: head = [4,5,1,9], node = 5
Output: [4,1,9]
Explanation: You are given the second node with value 5, the
linked list should become 4 -> 1 -> 9 after calling your function.
Input: head = [4,5,1,9], node = 1
Output: [4,5,9]
Explanation: You are given the third node with value 1, the linked list should become 4 -> 5 -> 9 after calling your function.
Note:
1) The linked list will have at least two elements.
2) All of the nodes' values will be unique.
3) The given node will not be the tail and it will always be a valid node of the linked list.
4) Do not return anything from your function.
"""
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def deleteNode(self, node) -> None:
"""
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
"""
node.val = node.next.val
node.next = node.next.next
def display(head: ListNode) -> None:
while head:
print(head.val, end=" ")
head = head.next
def get_test_case_3_input() -> ListNode:
node_1 = ListNode(1)
node_2 = ListNode(2)
node_3 = ListNode(3)
node_4 = ListNode(4)
node_5 = ListNode(5)
node_1.next = node_2
node_2.next = node_3
node_3.next = node_4
node_4.next = node_5
head = node_1
return head
"""
def get_test_case_3_output() -> TreeNode:
node_1 = TreeNode(1)
return node_1
def get_test_case_4_input() -> TreeNode:
node_1 = TreeNode(1)
node_2 = TreeNode(2)
node_1.left = node_2
return node_1
"""
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print('{} got: {} expected: {}'.format(prefix, repr(got), repr(expected)))
if __name__ == "__main__":
solution = Solution()
print()
head = get_test_case_3_input()
display(head)
print()
solution.deleteNode(head)
display(head)
| [
"arivolispark@gmail.com"
] | arivolispark@gmail.com |
68be45faa6deaa787f9ad60ec23c6e89bbb3660b | 81816fb39c164d93deac7239f25d685411fb6c43 | /spennat_bit.py | 5ff845233a6a80a15b1402df5fe3a63b38dbb704 | [] | no_license | masashi-y/spennat | b2ff7ca0bb702711cc02a50270b7b955de428630 | b4dd13005c6e13b75ea63f1c8796901bfa6599ed | refs/heads/master | 2022-04-10T15:38:37.677882 | 2020-03-31T16:45:34 | 2020-03-31T16:45:34 | 241,525,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,643 | py | import sys
import math
import arff
import hydra
import numpy as np
import logging
import time
import random
import spacy
from spacy.tokens import Doc
from pathlib import Path
from omegaconf import DictConfig
from collections import Counter
import torch
from torch.utils.data import DataLoader, Dataset
from torch.autograd import variable
from torch.distributions.bernoulli import Bernoulli
import torch.nn as nn
import torch.nn.functional as F
from experiment_utils import TensorBoard
logger = logging.getLogger(__file__)
EPS = 1e-6
def get_device(gpu_id):
if gpu_id is not None and gpu_id >= 0:
return torch.device('cuda', gpu_id)
else:
return torch.device('cpu')
def onehot(x):
x0 = x.view(-1, 1)
x1 = x.new_zeros(len(x0), 2, dtype=torch.float)
x1.scatter_(1, x0, 1)
return x1.view(x.size() + (2,))
optimizers = {
'adadelta': torch.optim.Adadelta,
'adagrad': torch.optim.Adagrad,
'lbfgs': torch.optim.LBFGS,
'adam': torch.optim.Adam,
'adamw': torch.optim.AdamW,
'adamax': torch.optim.Adamax,
'asgd': torch.optim.ASGD,
'sgd': torch.optim.SGD,
'rmsprop': torch.optim.RMSprop,
'rprop': torch.optim.Rprop,
}
def optimizer_of(string, params):
"""Returns a optimizer object based on input string, e.g., adagrad(lr=0.01, lr_decay=0)
Arguments:
string {str} -- string expression of an optimizer
params {List[torch.Tensor]} -- parameters to learn
Returns:
torch.optim.Optimizer -- optimizer
"""
index = string.find('(')
assert string[-1] == ')'
try:
optim_class = optimizers[string[:index]]
except KeyError:
print(f'Optimizer class "{string[:index]}" does not exist.', file=sys.stderr)
print(f'Please choose one among: {list(optimizers.keys())}', file=sys.stderr)
kwargs = eval(f'dict{string[index:]}')
return optim_class(params, **kwargs)
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
class FeatureNetwork(nn.Module):
def __init__(self, hidden_size, cfg):
super().__init__()
self.linear = nn.Linear(hidden_size, cfg.d_model)
self.dropout = nn.Dropout(cfg.dropout)
def forward(self, xs):
"""
Arguments:
xs {torch.Tensor} -- (source sent length, batch size, hidden size)
Returns:
torch.Tensor -- (source sent length, batch size, d_model)
"""
xs = self.dropout(self.linear(xs))
return xs
class AttentionalEnergyNetwork(nn.Module):
def __init__(self, bit_size, cfg):
super().__init__()
self.linear = nn.Linear(bit_size, cfg.d_model)
self.dropout = nn.Dropout(cfg.dropout)
self.linear_out = nn.Linear(cfg.d_model, 1)
self.pos_encoder = PositionalEncoding(cfg.d_model, cfg.dropout)
self.model = nn.TransformerDecoder(
nn.TransformerDecoderLayer(
d_model=cfg.d_model,
nhead=cfg.nhead,
dim_feedforward=cfg.dim_feedforward,
dropout=cfg.dropout,
activation=cfg.activation
),
cfg.num_layers
)
def forward(self, ys, potentials):
"""
Arguments:
ys {torch.Tensor} -- (target sent length, batch size, bit size, 2)
potentials {torch.Tensor} -- (source sent length, batch size, hidden size)
Returns:
torch.Tensor -- (batch size,)
"""
ys = self.dropout(self.linear(ys[:, :, :, 1]))
ys = self.pos_encoder(ys)
ys = self.model(ys, potentials)
ys = self.linear_out(ys).squeeze().sum(0)
return ys
class SPENModel(nn.Module):
def __init__(self, hidden_size, bit_size, cfg):
super().__init__()
self.bit_size = bit_size
self.hidden_size = hidden_size
self.feature_network = FeatureNetwork(hidden_size, cfg.feature_network)
self.global_network = AttentionalEnergyNetwork(bit_size, cfg.global_network)
self.inference_iterations = cfg.inference.iterations
self.inference_learning_rate = cfg.inference.learning_rate
self.inference_eps = cfg.inference.eps
self.inference_region_eps = cfg.inference.region_eps
self.use_sqrt_decay = cfg.inference.use_sqrt_decay
self.entropy_coef = cfg.entropy_coef
def _random_probabilities(self, batch_size, target_size, device=None):
"""returns a tensor with shape (target sent length, batch size, bit size, 2)
that sums to one at the last dimension
Arguments:
batch_size {int} -- batch size
target_size {int} -- target sent length
device {torch.device} -- torch device
Returns:
torch.Tensor -- (target sent length, batch size, bit size, 2)
"""
x = torch.rand(target_size, self.bit_size, 2, device=device)
x[:, :, 0] = 1 - x[:, :, 1]
return x[:, None, :, :].expand(-1, batch_size, -1, -1).contiguous()
def _gradient_based_inference(self, potentials, max_target_length):
"""
Arguments:
potentials {torch.Tensor} -- (source sent length, batch size, hidden size)
Returns:
torch.Tensor -- (target sent length, batch size, bit size, 2)
"""
batch_size = potentials.size(1)
potentials = potentials.detach()
pred = self._random_probabilities(
batch_size, max_target_length, potentials.device)
prev = pred
prev_energy = prev.new_full((batch_size,), -float('inf'))
for iteration in range(1, self.inference_iterations):
self.global_network.zero_grad()
pred = pred.detach().requires_grad_()
energy = self.global_network(pred, potentials) \
- self.entropy_coef * (pred * torch.log(pred + EPS)).sum(dim=(0,2,3))
eps = torch.abs(energy - prev_energy).max().item()
if self.inference_eps is not None and eps < self.inference_eps:
break
prev_energy = energy.detach()
energy.sum().backward()
if self.use_sqrt_decay:
lr = self.inference_learning_rate / np.sqrt(iteration)
else:
lr = self.inference_learning_rate / iteration
lr_grad = lr * pred.grad
max_grad, _ = lr_grad.max(dim=-1, keepdim=True)
pred = pred * torch.exp(lr_grad - max_grad)
pred = pred / (pred.sum(dim=-1, keepdim=True) + EPS)
eps = (prev - pred).norm(dim=-1).max().item()
if self.inference_region_eps is not None and eps < self.inference_region_eps:
break
prev = pred
return pred
def loss(self, xs, ys):
"""
Arguments:
xs {torch.Tensor} -- (source sent length, batch size, hidden size)
ys {torch.Tensor} -- (target sent length, batch size, bit size, 2)
Returns:
torch.Tensor -- loss (batch size,)
"""
max_target_length = ys.size(0)
potentials = self.feature_network(xs)
preds = self._gradient_based_inference(potentials, max_target_length)
self.zero_grad()
pred_energy = self.global_network(preds, potentials)
true_energy = self.global_network(ys, potentials)
loss = pred_energy - true_energy \
- self.entropy_coef * (preds * torch.log(preds + EPS)).sum(dim=(0,2,3))
return loss.mean()
def predict_beliefs(self, xs, max_target_length):
"""
Arguments:
xs {torch.Tensor} -- (source sent length, batch size, hidden size)
max_target_length {int} -- max target sentence length
Returns:
torch.Tensor -- (max target sent length, batch size, bit size, 2)
"""
potentials = self.feature_network(xs)
preds = self._gradient_based_inference(potentials, max_target_length)
return preds
def predict(self, xs, max_target_length):
"""
Arguments:
xs {torch.Tensor} -- (source sent length, batch size, hidden size)
max_target_length {int} -- max target sentence length
Returns:
torch.Tensor -- (max target sent length, batch size, bit size)
"""
preds = self.predict_beliefs(xs, max_target_length)
return preds.argmax(dim=3)
def bit_representation(num, max_size):
bit_str = bin(num)[2:].rjust(max_size, '0')
return tuple(map(int, bit_str))
class BitVocab(object):
def __init__(self, counter, count_threshold=1):
counter['UNK'] = len(counter)
counter['MASK'] = len(counter)
common_vocab = [
word for word, count in counter.most_common() \
if count >= count_threshold
]
self.bit_size = len(bin(len(common_vocab))) - 2
self.word2bits = {
word: bit_representation(rank, self.bit_size) \
for rank, word in enumerate(common_vocab, 1)
}
self.bits2word = {
bits: word for word, bits in self.word2bits.items()
}
def __len__(self):
return len(self.word2bits)
def __getitem__(self, key):
if isinstance(key, str):
return self.word2bits.get(key, self.word2bits['UNK'])
else:
if isinstance(key, tuple):
pass
if isinstance(key, list):
key = tuple(key)
elif isinstance(key, (torch.Tensor, np.ndarray)):
assert len(key.shape) == 1 and key.shape[0] == self.bit_size
key = tuple(map(int, key))
else:
raise KeyError(f'unacceptable key type: {type(key)}')
return self.bits2word.get(key, 'UNK')
class FraEngDataset(Dataset):
def __init__(self,
source_docs,
target_bits,
meta_data,
device=None):
assert len(source_docs) == len(target_bits) == len(meta_data)
self.source_docs = source_docs
self.target_bits = target_bits
self.meta_data = meta_data
self.device = device
def __len__(self):
return len(self.source_docs)
def __getitem__(self, index):
"""
Arguments:
index {int} -- element index
Returns:
torch.Tensor -- (source sent length, hidden size) e.g., output of BERT
torch.Tensor -- (target sent length, bit size) bit representation of target sentence
dict -- meta data concerning the example
"""
xs = torch.from_numpy(
self.source_docs[index]._.trf_last_hidden_state)
ys = self.target_bits[index]
xs = xs.to(self.device)
ys = ys.to(self.device)
meta = self.meta_data[index]
return xs, ys, meta
def load_fra_eng_dataset(file_path,
spacy_model,
device=None,
num_samples=10000,
target_vocab_threshold=1):
source_docs = []
target_sents = []
meta_data = []
target_word_count = Counter()
with open(file_path, 'r', encoding='utf-8') as f:
lines = f.read().split('\n')
for line in lines[: min(num_samples, len(lines) - 1)]:
x, y, _ = line.split('\t')
y = y.lower().replace('.', '').replace(',', '').replace('!', '')
meta_data.append({ 'source': x, 'target': y })
source_docs.append(spacy_model.tokenizer(x))
y = y.split()
target_word_count.update([word for word in y])
target_sents.append(y)
with spacy_model.disable_pipes(['sentencizer']):
for _, proc in spacy_model.pipeline:
source_docs = proc.pipe(source_docs, batch_size=32)
source_docs = list(source_docs)
vocab = BitVocab(target_word_count,
count_threshold=target_vocab_threshold)
target_bits = [
torch.tensor(
[vocab[word] for word in sent],
dtype=torch.long) for sent in target_sents
]
for meta in meta_data:
meta['target_unked'] = \
' '.join(word if word in vocab.word2bits else 'UNK' \
for word in meta['target'].split(' '))
dataset = FraEngDataset(
source_docs, target_bits, meta_data, device=device)
return dataset, vocab
def collate_fun(batch):
"""
Arguments:
batch {List[Tuple[torch.Tensor, torch.Tensor]]} -- a list of dataset elements
Returns:
torch.Tensor -- (source sent length, batch size, hidden size)
torch.Tensor -- (target sent length, batch size, bit size)
List[Dict[str, str]] -- list of meta data
"""
xs, ys, meta_data = zip(*batch)
xs = torch.nn.utils.rnn.pad_sequence(xs)
ys = torch.nn.utils.rnn.pad_sequence(ys)
return xs, ys, meta_data
def save_model(model, file_path):
with open(file_path, 'wb') as f:
torch.save(model.state_dict(), f)
def load_model(model, file_path):
with open(file_path, 'rb') as f:
model.load_state_dict(torch.load(f))
def test(model, dataset, cfg, threshold=None):
num_vars = 0
if threshold is None:
thresholds = np.arange(0.05, 0.80, 0.05)
else:
thresholds = np.array([threshold], dtype=np.float)
total_accs = np.zeros_like(thresholds)
total_precs = np.zeros_like(thresholds)
total_recs = np.zeros_like(thresholds)
total_f1s = np.zeros_like(thresholds)
for xs, ys, _ in DataLoader(dataset,
batch_size=cfg.batch_size, collate_fn=collate_fun):
num_vars += ys.numel()
node_beliefs = model.predict_beliefs(xs, ys.size(0))[:, :, :, 1]
for i, threshold in enumerate(thresholds):
preds = (node_beliefs > threshold).long()
correct = (preds * ys).sum((0,2)).float()
prec = correct / (preds.sum((0,2)).float() + EPS)
rec = correct / (ys.sum((0,2)).float() + EPS)
total_accs[i] += (preds == ys).float().sum()
total_recs[i] += rec.sum()
total_precs[i] += prec.sum()
total_f1s[i] += ((2 * prec * rec) / (prec + rec + EPS)).sum()
accs = total_accs / num_vars
precs = total_precs / len(dataset)
recs = total_recs / len(dataset)
f1s = total_f1s / len(dataset)
best = f1s.argmax()
return (accs[best], precs[best], recs[best], f1s[best]), thresholds[best]
def train(model, vocab, dataset, val_data, cfg, train_logger, val_logger):
global best_acc, best_epoch, best_threshold
best_acc, best_epoch, best_threshold = 0., -1, -1
def validation(epoch):
global best_acc, best_epoch, best_threshold
model.eval()
(acc, prec, rec, f1), threshold = test(model, dataset, cfg)
logger.info(
f'train results: {(acc, prec, rec, f1)}, (threshold: {threshold})')
train_logger.plot_for_current_epoch('F1', f1)
train_logger.plot_for_current_epoch('Accuracy', acc)
if val_data is not None:
(acc, prec, rec, f1), threshold = test(model, val_data, cfg)
logger.info(
f'val results: {(acc, prec, rec, f1)}, (threshold: {threshold})')
val_logger.plot_for_current_epoch('F1', f1)
val_logger.plot_for_current_epoch('Accuracy', acc)
if acc > best_acc:
best_acc = acc
best_epoch = epoch
if cfg.tune_thresholds:
best_threshold = threshold
logger.info('new best accuracy found, saving model')
save_model(model, 'best_model')
save_model(model, f'model_checkpoint_{epoch}')
subset = 'val' if val_data is not None else 'train'
logger.info(
f'best {subset} results (epoch {best_epoch}, threshold {best_threshold}): {best_acc}')
train_data_loader = DataLoader(dataset,
batch_size=cfg.batch_size,
shuffle=True,
drop_last=True,
collate_fn=collate_fun)
optimizer = optimizer_of(
cfg.optimizer,
[param for param in model.parameters() if param.requires_grad]
)
for epoch in range(cfg.num_epochs):
logger.info(f'epoch {epoch + 1}')
train_logger.update_epoch()
val_logger.update_epoch()
if epoch % cfg.val_interval == 0:
validation(epoch)
index = random.randint(0, len(dataset))
x, y, meta_data = dataset[index]
pred = model.predict_beliefs(x[:, None, :], y.size(0))[:, 0, :, 1]
pred = ' '.join(vocab[bits] for bits in (pred > best_threshold).long())
logger.info(f'source: {meta_data["source"]}')
logger.info(f'gold: {meta_data["target"]}')
logger.info(f'gold (unked): {meta_data["target_unked"]}')
logger.info(f'pred: {pred}')
avg_loss, count = 0, 0
for xs, ys, _ in train_data_loader:
ys = onehot(ys)
model.train()
model.zero_grad()
loss = model.loss(xs, ys)
logger.info(
f'loss of batch {count + 1}/{len(train_data_loader)}: {loss.item()}')
loss.backward()
avg_loss += loss
count += 1
if cfg.clip_grad:
nn.utils.clip_grad_value_(model.parameters(), cfg.clip_grad)
elif cfg.clip_grad_norm:
nn.utils.clip_grad_norm_(
model.parameters(), cfg.clip_grad_norm)
optimizer.step()
train_logger.plot_obj_val((avg_loss / count).item())
validation(cfg.num_epochs)
@hydra.main(config_path='configs/spennat.yaml')
def main(cfg: DictConfig) -> None:
logger.info(cfg.pretty())
device = get_device(cfg.device)
if cfg.device >= 0:
spacy.prefer_gpu(cfg.device)
spacy_model = spacy.load(cfg.spacy_model)
dataset, vocab = load_fra_eng_dataset(
hydra.utils.to_absolute_path(cfg.dataset),
spacy_model,
device=device,
num_samples=cfg.num_samples,
target_vocab_threshold=cfg.target_vocab_threshold)
logger.info(f'target language vocab size: {len(vocab)}')
hidden_size = spacy_model.get_pipe('trf_tok2vec').token_vector_width
max_bit_size = vocab.bit_size
model = SPENModel(hidden_size, max_bit_size, cfg).to(device)
with TensorBoard('spennat_train') as train_logger, \
TensorBoard('spennat_val') as val_logger:
train(model,
vocab,
dataset,
None,
cfg,
train_logger,
val_logger)
if __name__ == '__main__':
main() | [
"yoshikawa.masashi.yh8@is.naist.jp"
] | yoshikawa.masashi.yh8@is.naist.jp |
47ee0980f00c137e35d556c0f6302f18b101ef9e | 147b0e8a6cdbc96d257d5aa9079c946182aede67 | /RL-Berkeley/hw1/cs285/infrastructure/rl_trainer.py | 733c922871cda2151f6995625364168741b475f5 | [] | no_license | Helloworld10011/Class-projects | 8397a4b6665ca3eb530c5146f489c91aebaef911 | 1ef46f4b7097a5565d36d85a0ab78d1c2f50d70a | refs/heads/main | 2023-02-10T13:34:31.747595 | 2021-01-01T20:18:23 | 2021-01-01T20:18:23 | 308,373,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,409 | py | import time
from collections import OrderedDict
import pickle
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import gym
import os
from cs285.infrastructure.utils import *
from cs285.infrastructure.tf_utils import create_tf_session
from cs285.infrastructure.logger import Logger
# params for saving rollout videos to tensorboard
MAX_NVIDEO = 2
MAX_VIDEO_LEN = 40
class RL_Trainer(object):
def __init__(self, params):
#############
## INIT
#############
# Get params, create logger, create TF session
self.params = params
self.logger = Logger(self.params['logdir'])
self.sess = create_tf_session(self.params['use_gpu'], which_gpu=self.params['which_gpu'])
# Set random seeds
seed = self.params['seed']
tf.set_random_seed(seed)
np.random.seed(seed)
#############
## ENV
#############
# Make the gym environment
self.env = gym.make(self.params['env_name'])
self.env.seed(seed)
# Maximum length for episodes
self.params['ep_len'] = self.params['ep_len'] or self.env.spec.max_episode_steps
# Is this env continuous, or self.discrete?
discrete = isinstance(self.env.action_space, gym.spaces.Discrete)
self.params['agent_params']['discrete'] = discrete
# Observation and action sizes
ob_dim = self.env.observation_space.shape[0]
ac_dim = self.env.action_space.n if discrete else self.env.action_space.shape[0]
self.params['agent_params']['ac_dim'] = ac_dim
self.params['agent_params']['ob_dim'] = ob_dim
# simulation timestep, will be used for video saving
if 'model' in dir(self.env):
self.fps = 1/self.env.model.opt.timestep
else:
self.fps = self.env.env.metadata['video.frames_per_second']
#############
## AGENT
#############
agent_class = self.params['agent_class']
self.agent = agent_class(self.sess, self.env, self.params['agent_params'])
#############
## INIT VARS
#############
## TODO initialize all of the TF variables (that were created by agent, etc.)
## HINT: use global_variables_initializer
self.sess.run(tf.global_variables_initializer())
self.counter=0
def run_training_loop(self, n_iter, collect_policy, eval_policy,
initial_expertdata=None, relabel_with_expert=False,
start_relabel_with_expert=1, expert_policy=None):
"""
:param n_iter: number of (dagger) iterations
:param collect_policy:
:param eval_policy:
:param initial_expertdata:
:param relabel_with_expert: whether to perform dagger
:param start_relabel_with_expert: iteration at which to start relabel with expert
:param expert_policy:
"""
# init vars at beginning of training
self.total_envsteps = 0
self.start_time = time.time()
for itr in range(n_iter):
print("\n\n********** Iteration %i ************"%itr)
# decide if videos should be rendered/logged at this iteration
if itr % self.params['video_log_freq'] == 0 and self.params['video_log_freq'] != -1:
self.log_video = False
else:
self.log_video = False
# decide if metrics should be logged
if itr % self.params['scalar_log_freq'] == 0:
self.log_metrics = True
else:
self.log_metrics = False
# collect trajectories, to be used for training
training_returns = self.collect_training_trajectories(itr,
initial_expertdata, collect_policy,
self.params['batch_size']) ## TODO implement this function below
paths, envsteps_this_batch, train_video_paths = training_returns
self.total_envsteps += envsteps_this_batch
# relabel the collected obs with actions from a provided expert policy
if relabel_with_expert and itr>=start_relabel_with_expert:
paths = self.do_relabel_with_expert(expert_policy, paths) ## TODO implement this function below
# add collected data to replay buffer
self.agent.add_to_replay_buffer(paths)
# train agent (using sampled data from replay buffer)
loss= self.train_agent() ## TODO implement this function below
print("itr #", itr, ":", " loss:", loss)
# log/save
if self.log_video or self.log_metrics:
# perform logging
print('\nBeginning logging procedure...')
self.perform_logging(itr, paths, eval_policy, train_video_paths)
# save policy
print('\nSaving agent\'s actor...')
self.agent.actor.save(self.params['logdir'] + '/policy_itr_'+str(itr))
####################################
####################################
def collect_training_trajectories(self, itr, load_initial_expertdata, collect_policy, batch_size):
"""
:param itr:
:param load_initial_expertdata: path to expert data pkl file
:param collect_policy: the current policy using which we collect data
:param batch_size: the number of transitions we collect
:return:
paths: a list trajectories
envsteps_this_batch: the sum over the numbers of environment steps in paths
train_video_paths: paths which also contain videos for visualization purposes
"""
# TODO decide whether to load training data or use
# HINT: depending on if it's the first iteration or not,
# decide whether to either
# load the data. In this case you can directly return as follows
# ``` return loaded_paths, 0, None ```
# collect data, batch_size is the number of transitions you want to collect.
if itr==0:
with open(load_initial_expertdata, 'rb') as f:
paths = pickle.load(f)
return paths, 0, None
# TODO collect data to be used for training
# HINT1: use sample_trajectories from utils
# HINT2: you want each of these collected rollouts to be of length self.params['ep_len']
else:
print("\nCollecting data to be used for training...")
paths, envsteps_this_batch = sample_trajectories(self.env, collect_policy, batch_size, self.params['ep_len'])
#paths= self.batching_data(paths, batch_size)
# collect more rollouts with the same policy, to be saved as videos in tensorboard
# note: here, we collect MAX_NVIDEO rollouts, each of length MAX_VIDEO_LEN
train_video_paths = None
if self.log_video:
print('\nCollecting train rollouts to be used for saving videos...')
## TODO look in utils and implement sample_n_trajectories
train_video_paths = sample_n_trajectories(self.env, collect_policy, MAX_NVIDEO, MAX_VIDEO_LEN, True)
return paths, envsteps_this_batch, train_video_paths
def batching_data(self, paths, batch_size):
batched_pathed=[]
for path in paths:
obs= path["observation"]
imobs= path["image_obs"]
rews= path["reward"]
acs= path["action"]
nobs= path["next_observation"]
ters= path["terminal"]
n = obs.shape[0]
indecis = np.random.choice(np.arange(n))[:batch_size]
batched_pathed.append({"observation": obs[indecis], "image_obs": imobs[indecis], "reward": rews[indecis],
"action": obs[acs], "next_observation": nobs[indecis], "terminal": ters[indecis]})
return batched_pathed
def train_agent(self):
print('\nTraining agent using sampled data from replay buffer...')
step= self.params['num_agent_train_steps_per_iter']//5
for train_step in range(self.params['num_agent_train_steps_per_iter']):
# TODO sample some data from the data buffer
# HINT1: use the agent's sample function
# HINT2: how much data = self.params['train_batch_size']
ob_batch, ac_batch, re_batch, next_ob_batch, terminal_batch = self.agent.sample(self.params['train_batch_size'])
# TODO use the sampled data for training
# HINT: use the agent's train function
loss= self.agent.train(ob_batch, ac_batch, re_batch, next_ob_batch, terminal_batch)
self.logger.log_scalar(loss, "losses", self.counter)
self.counter+=1
if train_step%step ==0:
print("train step #", train_step, " loss: ", loss)
return loss
def do_relabel_with_expert(self, expert_policy, paths):
print("\nRelabelling collected observations with labels from an expert policy...")
# TODO relabel collected obsevations (from our policy) with labels from an expert policy
# HINT: query the policy (using the get_action function) with paths[i]["observation"]
# and replace paths[i]["action"] with these expert labels
for path in paths:
obs= path["observation"]
acs= expert_policy.get_action(obs)
path['action']= acs
return paths
####################################
####################################
def perform_logging(self, itr, paths, eval_policy, train_video_paths):
# collect eval trajectories, for logging
print("\nCollecting data for eval...")
eval_paths, eval_envsteps_this_batch = sample_trajectories(self.env, eval_policy, self.params['eval_batch_size'], self.params['ep_len'])
# save eval rollouts as videos in tensorboard event file
if self.log_video and train_video_paths != None:
print('\nCollecting video rollouts eval')
eval_video_paths = sample_n_trajectories(self.env, eval_policy, MAX_NVIDEO, MAX_VIDEO_LEN, True)
#save train/eval videos
print('\nSaving train rollouts as videos...')
self.logger.log_paths_as_videos(train_video_paths, itr, fps=self.fps, max_videos_to_save=MAX_NVIDEO,
video_title='train_rollouts')
self.logger.log_paths_as_videos(eval_video_paths, itr, fps=self.fps,max_videos_to_save=MAX_NVIDEO,
video_title='eval_rollouts')
# save eval metrics
if self.log_metrics:
# returns, for logging
train_returns = [path["reward"].sum() for path in paths]
eval_returns = [eval_path["reward"].sum() for eval_path in eval_paths]
# episode lengths, for logging
train_ep_lens = [len(path["reward"]) for path in paths]
eval_ep_lens = [len(eval_path["reward"]) for eval_path in eval_paths]
# decide what to log
logs = OrderedDict()
logs["Eval_AverageReturn"] = np.mean(eval_returns)
logs["Eval_StdReturn"] = np.std(eval_returns)
logs["Eval_MaxReturn"] = np.max(eval_returns)
logs["Eval_MinReturn"] = np.min(eval_returns)
logs["Eval_AverageEpLen"] = np.mean(eval_ep_lens)
logs["Train_AverageReturn"] = np.mean(train_returns)
logs["Train_StdReturn"] = np.std(train_returns)
logs["Train_MaxReturn"] = np.max(train_returns)
logs["Train_MinReturn"] = np.min(train_returns)
logs["Train_AverageEpLen"] = np.mean(train_ep_lens)
logs["Train_EnvstepsSoFar"] = self.total_envsteps
logs["TimeSinceStart"] = time.time() - self.start_time
if itr == 0:
self.initial_return = np.mean(train_returns)
logs["Initial_DataCollection_AverageReturn"] = self.initial_return
# perform the logging
for key, value in logs.items():
print('{} : {}'.format(key, value))
self.logger.log_scalar(value, key, itr)
print('Done logging...\n\n')
self.logger.flush()
| [
"m.mahdi.sabbaghi@gmail.com"
] | m.mahdi.sabbaghi@gmail.com |
c282e3019bc4b8cd348f605c55b6bed98f461797 | 2222a927fbdd9546733147a4855b48e50804e128 | /117_PopulatingNextRightPointersInEachNodeII.py | 0c6eabd1c1ec6a3ff47223e6ef916dea18ceb681 | [] | no_license | angelar0107/LeetCodeRecord | 65bc53807253f1535a1318711854b41959ddbd73 | 4accbf4e3512033416e90151aedc1797ef99356f | refs/heads/master | 2020-05-09T19:05:14.403581 | 2019-07-04T12:48:19 | 2019-07-04T12:48:19 | 181,366,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,270 | py | """
# Definition for a Node.
class Node:
def __init__(self, val, left, right, next):
self.val = val
self.left = left
self.right = right
self.next = next
"""
from collections import deque
class Solution:
def connect(self, root: 'Node') -> 'Node':
if not root:
return
queue = deque()
queue.append(root)
while queue:
length = len(queue)
for i in range(length):
curr = queue.popleft()
if curr.left:
queue.append(curr.left)
if curr.right:
queue.append(curr.right)
if i == length - 1:
continue
curr.next = queue[0]
return root
# Constant extre space solution
def connect2(self,root):
if not root:
return
dummy = root
nextlevel = level = Node(0,None,None,None)
while root:
if root.left:
level.next = root.left
level = level.next
if root.right:
level.next = root.right
level = level.next
root = root.next
self.connect2(nextlevel.next)
return dummy
| [
"al3804@columbia.edu"
] | al3804@columbia.edu |
1e2758d7083a3511d081e23f2398400010b9e14f | 4eb4ef4b2c4b848a6382a942d8bd76b668ae3668 | /django/core/wsgi.pyi | 626e216d9a934152d9311dab6c504035f1780b4d | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | suutari/mypy-django | 3efa9369fd31a03dfa4c89c173dfe7928526101a | 0bcbcb2e13bd89752ae489465690596a8a03549a | refs/heads/master | 2021-04-09T17:02:05.937950 | 2018-07-26T12:23:31 | 2018-07-26T12:23:31 | 125,708,762 | 1 | 1 | null | 2018-03-18T09:14:55 | 2018-03-18T09:14:55 | null | UTF-8 | Python | false | false | 99 | pyi | from django.core.handlers.wsgi import WSGIHandler
def get_wsgi_application() -> WSGIHandler: ...
| [
"tuomas@nepnep.net"
] | tuomas@nepnep.net |
3ac431c651416db7e8b8bd8732d24001a67016a2 | 5e8d86f6ddfd516b9768e8617ced0baca8112f4c | /core-python/Core_Python/loop/ForLoopDominoEx.py | 811f9121f51aa999d3f8922e54f65f566aac9aab | [
"MIT"
] | permissive | bharat-kadchha/tutorials | 0a96ce5a3da1a0ceb39a0d464c8f3e2ff397da7c | cd77b0373c270eab923a6db5b9f34c52543b8664 | refs/heads/master | 2022-12-23T11:49:34.042820 | 2020-10-06T03:51:20 | 2020-10-06T03:51:20 | 272,891,375 | 1 | 0 | MIT | 2020-06-17T06:04:33 | 2020-06-17T06:04:33 | null | UTF-8 | Python | false | false | 123 | py | for left in range(1,7):
for right in range(left,7):
print("["+str(left)+"|"+str(right)+"]",end=" ")
print() | [
"deeppatel.dd@gmail.com"
] | deeppatel.dd@gmail.com |
d39ef6b3d8ca0a8378abbe743fa61457c3906ac3 | 501d84506c049f901edb1f738b85ebaf6cc36044 | /Codes/rotate list.py | 6300d0943fa2a026f3377419a88321928abea845 | [] | no_license | PavanKumar1564/Tcs-Training | 0795b23a649f8a468b083be6b2929bbc46402fb3 | 10f81b2db81e18f267caa7a7eedd6f9fbf484bce | refs/heads/main | 2023-06-19T22:35:32.531077 | 2021-07-16T16:45:38 | 2021-07-16T16:45:38 | 386,316,983 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | class solution():
def rotateRight(self,head,k):
if not head or not head.next:
return head
cur=head
n=1
while cur.next:
n+=1
cur=cur.next
cur.next = head
m = n-k%n
i=0
cur=head
while i < m:
prev=cur
cur=cur.next
i+=1
prev.next=None
head=cur
return head
| [
"noreply@github.com"
] | PavanKumar1564.noreply@github.com |
5da7466dc8dcdc602da223bede82f82c695bd584 | 17377437f885a990d5e68aac4aa6fe87919cacc7 | /JSON/ts/location.py | 507409f79a2bf3a9c3ce79cc6946d15b18a95a2f | [] | no_license | UjwalManjunath/NetworkMonitor | fbcf5f66d95e999a2d7e7fe1d2aab7d42589731f | ea90d171ebbbf0a946090e49f398301167a6fbec | refs/heads/master | 2021-01-19T20:16:06.432165 | 2013-05-24T05:12:12 | 2013-05-24T05:12:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | py | class Location:
def __init__(self, name, numComputers):
self.location = name
self.numComputers = numComputers
self.computers = []
self.switches = []
self.routers = []
self.core = []
self.activeSwitch = None
self.activeRouter = None
self.backupRouter = None
self.activeCore = None
| [
"muddu1990@gmail.com"
] | muddu1990@gmail.com |
6f9d11811dd5d1998252787ad28c6656bcf12ff9 | 6b5919c619c5395e75dd139f19250d0efb1af41d | /TechnoFussion/task2/migrations/0001_initial.py | a208bd4bee3886a22dd6fb2e24aecb2a5da6f235 | [] | no_license | frasulov/TechnoFusionTasks | 97200dbc77ac29c2475425975d15e696c92cfe3e | 842cd7c0c6e5cb5343515e5ad60ef2ce2a7457a4 | refs/heads/main | 2023-07-08T02:44:47.377495 | 2021-08-06T12:37:41 | 2021-08-06T12:37:41 | 383,592,659 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,976 | py | # Generated by Django 3.2.6 on 2021-08-06 10:13
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Labels',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('requestId', models.UUIDField(editable=False)),
('labelNo', models.CharField(max_length=16)),
],
),
migrations.CreateModel(
name='Request',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('status', models.CharField(choices=[('Open', 'Open'), ('Generated', 'Generated')], default='Open', max_length=15)),
('suppCode', models.CharField(max_length=3, unique=True)),
('suppName', models.CharField(max_length=100)),
('materialName', models.CharField(max_length=100)),
('materialSort', models.CharField(max_length=50)),
('category', models.CharField(max_length=20)),
('fullBarchQty', models.IntegerField()),
('fullLabelsQty', models.IntegerField()),
('lastBatchQty', models.IntegerField()),
('lastLabelQty', models.IntegerField()),
],
),
migrations.CreateModel(
name='SuppSeq',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('year', models.IntegerField(default=21)),
('suppCode', models.CharField(max_length=3)),
('seq', models.IntegerField(default=0)),
],
options={
'unique_together': {('year', 'suppCode')},
},
),
]
| [
"frasulov4671@ada.edu.az"
] | frasulov4671@ada.edu.az |
37232fb8e64d32f2d89f207611c6c86157e4e56d | 79310dd01678d383a70078b571590a8868c63c88 | /tests/test_inventory_sortkeys.py | df8cae4dc3eb6fc5752cd580e371236e3d937844 | [] | no_license | aarongilman/capgains | 2ed4f9ed50768f983b14cb451a5cb76c612f92b9 | 705083962da5cec060f0409f322c9b3cce5dcb1a | refs/heads/master | 2020-07-03T18:52:49.992844 | 2019-07-23T15:43:39 | 2019-07-23T15:43:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,299 | py | # coding: utf-8
"""
Unit tests for capgains.inventory.sortkeys
"""
# stdlib imports
import unittest
from decimal import Decimal
from datetime import datetime
# local imports
from capgains.inventory import (
FIFO,
LIFO,
MINGAIN,
MAXGAIN,
Lot,
Trade,
)
class SortTestCase(unittest.TestCase):
def testFifoSort(self):
"""
FIFO sorts first by Lot.opentransaction.datetime,
then by Lot.opentransaction.uniqueid
"""
tx1 = Trade(
uniqueid="b",
datetime=datetime(2005, 10, 3),
fiaccount="",
security="",
cash=None,
currency=None,
units=None,
)
lot1 = Lot(
opentransaction=tx1,
createtransaction=tx1,
units=None,
price=None,
currency=None,
)
tx2 = Trade(
uniqueid="c",
datetime=datetime(2005, 10, 4),
fiaccount="",
security="",
cash=None,
currency=None,
units=None,
)
lot2 = Lot(
opentransaction=tx2,
createtransaction=tx2,
units=None,
price=None,
currency=None,
)
tx3 = Trade(
uniqueid="a",
datetime=datetime(2005, 10, 3),
fiaccount="",
security="",
cash=None,
currency=None,
units=None,
)
lot3 = Lot(
opentransaction=tx3,
createtransaction=tx3,
units=None,
price=None,
currency=None,
)
position = [lot2, lot1, lot3]
position.sort(**FIFO)
self.assertEqual(position, [lot3, lot1, lot2])
def testLifoSort(self):
"""
LIFO sorts first by Lot.opentransaction.datetime,
then by Lot.opentransaction.uniqueid
"""
tx1 = Trade(
uniqueid="b",
datetime=datetime(2005, 10, 3),
fiaccount="",
security="",
cash=None,
currency=None,
units=None,
)
lot1 = Lot(
opentransaction=tx1,
createtransaction=tx1,
units=None,
price=None,
currency=None,
)
tx2 = Trade(
uniqueid="c",
datetime=datetime(2005, 10, 4),
fiaccount="",
security="",
cash=None,
currency=None,
units=None,
)
lot2 = Lot(
opentransaction=tx2,
createtransaction=tx2,
units=None,
price=None,
currency=None,
)
tx3 = Trade(
uniqueid="a",
datetime=datetime(2005, 10, 3),
fiaccount="",
security="",
cash=None,
currency=None,
units=None,
)
lot3 = Lot(
opentransaction=tx3,
createtransaction=tx3,
units=None,
price=None,
currency=None,
)
position = [lot3, lot1, lot2]
position.sort(**LIFO)
self.assertEqual(position, [lot2, lot1, lot3])
def testMinGainSort(self):
"""
MINGAIN sorts first by Lot.price, then by Lot.opentransaction.uniqueid
"""
tx1 = Trade(
uniqueid="b",
datetime=datetime(2005, 10, 3),
fiaccount="",
security="",
cash=None,
currency=None,
units=None,
)
lot1 = Lot(
opentransaction=tx1,
createtransaction=None,
units=None,
price=Decimal("10"),
currency="USD",
)
tx2 = Trade(
uniqueid="c",
datetime=datetime(2005, 10, 3),
fiaccount="",
security="",
cash=None,
currency=None,
units=None,
)
lot2 = Lot(
opentransaction=tx2,
createtransaction=None,
units=None,
price=Decimal("9.5"),
currency="USD",
)
tx3 = Trade(
uniqueid="a",
datetime=datetime(2005, 10, 3),
fiaccount="",
security="",
cash=None,
currency=None,
units=None,
)
lot3 = Lot(
opentransaction=tx3,
createtransaction=None,
units=None,
price=Decimal("10"),
currency="USD",
)
position = [lot1, lot2, lot3]
position.sort(**MINGAIN)
self.assertEqual(position, [lot3, lot1, lot2])
def testMaxGainSort(self):
"""
MAXGAIN sorts first by Lot.price, then by Lot.opentransaction.uniqueid
"""
tx1 = Trade(
uniqueid="b",
datetime=datetime(2001, 1, 1),
fiaccount="",
security="",
cash=None,
currency=None,
units=None,
)
lot1 = Lot(
opentransaction=tx1,
createtransaction=None,
units=None,
price=Decimal("10"),
currency="USD",
)
tx2 = Trade(
uniqueid="c",
datetime=datetime(2001, 1, 1),
fiaccount="",
security="",
cash=None,
currency=None,
units=None,
)
lot2 = Lot(
opentransaction=tx2,
createtransaction=None,
units=None,
price=Decimal("9.5"),
currency="USD",
)
tx3 = Trade(
uniqueid="a",
datetime=datetime(2001, 1, 2),
fiaccount="",
security="",
cash=None,
currency=None,
units=None,
)
lot3 = Lot(
opentransaction=tx3,
createtransaction=None,
units=None,
price=Decimal("10"),
currency="USD",
)
position = [lot1, lot2, lot3]
position.sort(**MAXGAIN)
self.assertEqual(position, [lot2, lot3, lot1])
if __name__ == "__main__":
unittest.main()
| [
"csingley@gmail.com"
] | csingley@gmail.com |
11b0e5001481b4a892ad8f36fc9d2286e8c364dd | e8fd3ccd11641897d946dc7eef4935d832c88181 | /user/mark/formula/behavioral_cloning/train.py | b51a63e7324e623a197b724a7079a45a9d87e0c3 | [] | no_license | kwiwon/formula2018 | ad42d3ce356ac8abb3096fbe955cb4bef8e8de54 | 7199637d2e0283673d7eac9f44becfe6a618184f | refs/heads/master | 2022-12-12T14:58:31.620489 | 2018-09-30T12:59:52 | 2018-09-30T12:59:52 | 150,723,477 | 0 | 1 | null | 2022-12-08T02:24:26 | 2018-09-28T10:20:44 | C++ | UTF-8 | Python | false | false | 5,712 | py | import tensorflow as tf
import pandas as pd
import numpy as np
from keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D
from keras.optimizers import Adam
from keras.models import Sequential, Model
from keras.layers import Dropout, Activation, Lambda
from keras.layers import Input, Flatten, Dense, ELU
from keras.callbacks import EarlyStopping
from scipy import misc
flags = tf.app.flags
FLAGS = flags.FLAGS
# command line flags
flags.DEFINE_integer('features_epochs', 1,
'The number of epochs when training features.')
flags.DEFINE_integer('full_epochs', 100,
'The number of epochs when end-to-end training.')
flags.DEFINE_integer('batch_size', 128, 'The batch size.')
flags.DEFINE_integer('samples_per_epoch', 1280,
'The number of samples per epoch.')
flags.DEFINE_integer('img_h', 60, 'The image height.')
#flags.DEFINE_integer('img_h', 100, 'The image height.')
flags.DEFINE_integer('img_w', 200, 'The image width.')
flags.DEFINE_integer('img_c', 3, 'The number of channels.')
def img_pre_processing(img, old = False):
if old:
# resize and cast to float
img = misc.imresize(
img, (140, FLAGS.img_w)).astype('float')
else:
# resize and cast to float
img = misc.imresize(
img, (100, FLAGS.img_w)).astype('float')
img = img[40:]
# normalize
img /= 255.
img -= 0.5
img *= 2.
return img
def img_paths_to_img_array(image_paths):
all_imgs = [misc.imread(imp) for imp in image_paths]
return np.array(all_imgs, dtype='float')
def save_model(model):
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
model.save_weights('model.h5')
def select_specific_set(iter_set):
imgs, labs = [], []
for _, row in iter_set:
# extract the features and labels
img_f = 'data' + row['img'].split('../..')[1]
img_ = img_pre_processing(misc.imread(img_f))
angle_ = row['angle']
throttle_ = row['throttle']
break_ = row['break']
# flip 50% of the time
if np.random.choice([True, False]):
img_, angle_ = np.fliplr(img_), -angle_ + 0.
imgs.append(img_)
labs.append([angle_, throttle_, break_])
return np.array(imgs), np.array(labs)
def generate_batch(log_data):
while True:
imgs, labs = select_specific_set(
log_data.sample(
FLAGS.batch_size).iterrows())
yield np.array(imgs), np.array(labs)
def main(_):
# fix random seed for reproducibility
np.random.seed(123)
# read the training driving log
with open('data/Log/driving_log.csv', 'rb') as f:
log_data = pd.read_csv(
f, header=None,
names=['img', 'angle',
'throttle', 'break', 'speed', 'time', 'lap'])
print("Got", len(log_data), "samples for training")
# read the validation driving log
X_val, y_val = select_specific_set(
log_data.sample(int(len(log_data)*.10)).iterrows())
print("Got", len(X_val), "samples for validation")
# create and train the model
input_shape = (FLAGS.img_h, FLAGS.img_w, FLAGS.img_c)
input_tensor = Input(shape=input_shape)
# get the VGG16 network
base_model = VGG16(input_tensor=input_tensor,
weights='imagenet',
include_top=False)
# add a global spatial average pooling layer
x = base_model.output
x = GlobalAveragePooling2D()(x)
# add the fully-connected
# layer similar to the NVIDIA paper
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(256, activation='relu')(x)
x = Dropout(0.3)(x)
x = Dense(64, activation='relu')(x)
x = Dropout(0.1)(x)
predictions = Dense(3, init='zero')(x)
# creatte the full model
model = Model(input=base_model.input, output=predictions)
# freeze all convolutional layers to initialize the top layers
for layer in base_model.layers:
layer.trainable = False
# train the top layer to prepare all weights
model.compile(optimizer='adam', loss='mse')
print('Train fully-connected layers weights:')
history = model.fit_generator(
generate_batch(log_data),
samples_per_epoch=FLAGS.samples_per_epoch,
nb_epoch=FLAGS.features_epochs,
verbose=1)
# print all layers
print("Network architecture:")
for i, layer in enumerate(model.layers):
print(i, layer.name)
# for VGG we choose to include the
# top 2 blocks in training
for layer in model.layers[:11]:
layer.trainable = False
for layer in model.layers[11:]:
layer.trainable = True
# recompile and train with a finer learning rate
opt = Adam(lr=1e-03, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.7)
model.compile(optimizer=opt, loss='mse')
early_stopping = EarlyStopping(monitor='val_loss',
patience=1,
min_delta=0.00009)
print('Train top 2 conv blocks and fully-connected layers:')
history = model.fit_generator(
generate_batch(log_data),
samples_per_epoch=FLAGS.samples_per_epoch,
validation_data=(X_val, y_val),
nb_epoch=FLAGS.full_epochs,
callbacks=[early_stopping],
verbose=1)
# save model to disk
save_model(model)
print('model saved')
# parses flags and calls the `main` function above
if __name__ == '__main__':
tf.app.run()
| [
"mark_yang@trendmicro.com"
] | mark_yang@trendmicro.com |
7d7f982e88dc574bb2ed1b7b4f0f6c36f495a5a7 | ece0d321e48f182832252b23db1df0c21b78f20c | /engine/2.80/scripts/addons/archipack/presets/archipack_stair/l_wood_over_concrete.py | d4fc1344a54ccd723bdcb01aad8a5764c427b8b4 | [
"Unlicense",
"GPL-3.0-only",
"Font-exception-2.0",
"GPL-3.0-or-later",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain-disclaimer",
"Bitstream-Vera",
"LicenseRef-scancode-blender-2010",
"LGPL-2.1-or-later",
... | permissive | byteinc/Phasor | 47d4e48a52fa562dfa1a2dbe493f8ec9e94625b9 | f7d23a489c2b4bcc3c1961ac955926484ff8b8d9 | refs/heads/master | 2022-10-25T17:05:01.585032 | 2019-03-16T19:24:22 | 2019-03-16T19:24:22 | 175,723,233 | 3 | 1 | Unlicense | 2022-10-21T07:02:37 | 2019-03-15T00:58:08 | Python | UTF-8 | Python | false | false | 5,867 | py | import bpy
d = bpy.context.active_object.data.archipack_stair[0]
d.steps_type = 'CLOSED'
d.handrail_slice_right = True
d.total_angle = 6.2831854820251465
d.user_defined_subs_enable = True
d.string_z = 0.30000001192092896
d.nose_z = 0.029999999329447746
d.user_defined_subs = ''
d.idmat_step_side = '3'
d.handrail_x = 0.03999999910593033
d.right_post = True
d.left_post = True
d.width = 1.5
d.subs_offset_x = 0.0
d.rail_mat.clear()
item_sub_1 = d.rail_mat.add()
item_sub_1.name = ''
item_sub_1.index = '4'
d.step_depth = 0.30000001192092896
d.rail_z = (0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806)
d.right_subs = False
d.left_panel = True
d.idmat_handrail = '3'
d.da = 1.5707963705062866
d.post_alt = 0.0
d.left_subs = False
d.n_parts = 3
d.user_defined_post_enable = True
d.handrail_slice_left = True
d.handrail_profil = 'SQUARE'
d.handrail_expand = False
d.panel_alt = 0.25
d.post_expand = False
d.subs_z = 1.0
d.rail_alt = (1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
d.panel_dist = 0.05000000074505806
d.panel_expand = False
d.x_offset = 0.0
d.subs_expand = False
d.idmat_post = '4'
d.left_string = False
d.string_alt = -0.03999999910593033
d.handrail_y = 0.03999999910593033
d.radius = 1.0
d.string_expand = False
d.post_z = 1.0
d.idmat_top = '3'
d.idmat_bottom = '1'
d.parts.clear()
item_sub_1 = d.parts.add()
item_sub_1.name = ''
item_sub_1.manipulators.clear()
item_sub_2 = item_sub_1.manipulators.add()
item_sub_2.name = ''
item_sub_2.p0 = (0.0, 0.0, 1.4040000438690186)
item_sub_2.prop1_name = 'length'
item_sub_2.p2 = (1.0, 0.0, 0.0)
item_sub_2.normal = (0.0, 0.0, 1.0)
item_sub_2.pts_mode = 'SIZE'
item_sub_2.p1 = (0.0, 4.0, 1.4040000438690186)
item_sub_2.prop2_name = ''
item_sub_2.type_key = 'SIZE'
item_sub_1.right_shape = 'RECTANGLE'
item_sub_1.radius = 0.699999988079071
item_sub_1.type = 'S_STAIR'
item_sub_1.length = 4.0
item_sub_1.left_shape = 'RECTANGLE'
item_sub_1.da = 1.5707963705062866
item_sub_1 = d.parts.add()
item_sub_1.name = ''
item_sub_1.manipulators.clear()
item_sub_2 = item_sub_1.manipulators.add()
item_sub_2.name = ''
item_sub_2.p0 = (-1.0, 4.0, 1.944000005722046)
item_sub_2.prop1_name = 'da'
item_sub_2.p2 = (0.0, 1.0, 0.0)
item_sub_2.normal = (0.0, 0.0, 1.0)
item_sub_2.pts_mode = 'RADIUS'
item_sub_2.p1 = (1.0, 0.0, 0.0)
item_sub_2.prop2_name = 'radius'
item_sub_2.type_key = 'ARC_ANGLE_RADIUS'
item_sub_1.right_shape = 'RECTANGLE'
item_sub_1.radius = 0.699999988079071
item_sub_1.type = 'C_STAIR'
item_sub_1.length = 2.0
item_sub_1.left_shape = 'RECTANGLE'
item_sub_1.da = 1.5707963705062866
item_sub_1 = d.parts.add()
item_sub_1.name = ''
item_sub_1.manipulators.clear()
item_sub_2 = item_sub_1.manipulators.add()
item_sub_2.name = ''
item_sub_2.p0 = (-1.0, 5.0, 2.700000047683716)
item_sub_2.prop1_name = 'length'
item_sub_2.p2 = (1.0, 0.0, 0.0)
item_sub_2.normal = (0.0, 0.0, 1.0)
item_sub_2.pts_mode = 'SIZE'
item_sub_2.p1 = (-3.0, 5.0, 2.700000047683716)
item_sub_2.prop2_name = ''
item_sub_2.type_key = 'SIZE'
item_sub_1.right_shape = 'RECTANGLE'
item_sub_1.radius = 0.699999988079071
item_sub_1.type = 'S_STAIR'
item_sub_1.length = 2.0
item_sub_1.left_shape = 'RECTANGLE'
item_sub_1.da = 1.5707963705062866
d.subs_bottom = 'STEP'
d.user_defined_post = ''
d.panel_offset_x = 0.0
d.idmat_side = '1'
d.right_string = False
d.idmat_raise = '1'
d.left_rail = False
d.parts_expand = False
d.panel_z = 0.6000000238418579
d.bottom_z = 0.029999999329447746
d.z_mode = 'STANDARD'
d.panel_x = 0.009999999776482582
d.post_x = 0.03999999910593033
d.presets = 'STAIR_L'
d.steps_expand = True
d.subs_x = 0.019999999552965164
d.subs_spacing = 0.10000000149011612
d.left_handrail = True
d.handrail_offset = 0.0
d.right_rail = False
d.idmat_panel = '5'
d.post_offset_x = 0.019999999552965164
d.idmat_step_front = '3'
d.rail_n = 1
d.string_offset = 0.0
d.subs_y = 0.019999999552965164
d.handrail_alt = 1.0
d.post_corners = False
d.rail_expand = False
d.rail_offset = (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
d.rail_x = (0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806)
d.left_shape = 'RECTANGLE'
d.nose_y = 0.019999999552965164
d.nose_type = 'STRAIGHT'
d.handrail_extend = 0.10000000149011612
d.idmat_string = '3'
d.post_y = 0.03999999910593033
d.subs_alt = 0.0
d.right_handrail = True
d.idmats_expand = False
d.right_shape = 'RECTANGLE'
d.idmat_subs = '4'
d.handrail_radius = 0.019999999552965164
d.right_panel = True
d.post_spacing = 1.0
d.string_x = 0.019999999552965164
d.height = 2.700000047683716
| [
"admin@irradiate.net"
] | admin@irradiate.net |
c3f1f40c430acf8791af7d15a9c634c03815ed76 | 3b7b6648b72910046b6a227db30f71aeee2cba9c | /2021-03-08-SimpleRNN/StockReturnPredictionWithLSTM.py | f0ad401fbb2df1790b2b25eb955c0d967a9b1a7c | [] | no_license | ken2190/deep-learning-study | f2abeb1cd302e405a15bbb52188ae44ffb414e2f | f2998be89d0c931176f158ae5f48ca562786e171 | refs/heads/main | 2023-04-02T05:07:08.504212 | 2021-04-11T15:11:22 | 2021-04-11T15:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 788 | py | from tensorflow.keras.layers import Input, SimpleRNN, GRU, Dropout, LSTM, Dense, Flatten, Softmax
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import SGD, Adam
from sklearn.preprocessing import LabelBinarizer, StandardScaler
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
df = pd.read_csv('https://raw.githubusercontent.com/lazyprogrammer/machine_learning_examples/master/tf2.0/sbux.csv')
series = df["close"].values.reshape(-1, 1)
scalar = StandardScaler()
scalar.fit(series[:len(series) // 2])
series = scalar.transform(series).flatten()
df["prevClose"] = df["close"].shift(1)
df["Return"] = (df["close"] - df["prevClose"])/df["prevClose"]
df["Return"].hist()
u = np.array([1, 2])
v = np.array([3, 4])
| [
"machingclee@gmail.com"
] | machingclee@gmail.com |
651821d78f18c63d3f303790f14302cb0b128feb | c4fb83280a6607e47c41d94c388a2551cc0b977b | /common/time_layers.py | 80b67353d471f032e3d162fbe8451a0a9157ae5a | [] | no_license | nownabe/DeepLearningFromScratch2 | 93051fa0bfff808b6d4942860ce9c0590ed5f053 | d13887cc776c92600a1df5e1a9f2e9512f8a1d98 | refs/heads/master | 2021-07-02T21:53:58.202316 | 2018-12-08T11:24:57 | 2018-12-08T11:24:57 | 144,979,813 | 0 | 0 | null | 2022-03-13T07:36:40 | 2018-08-16T11:40:19 | Python | UTF-8 | Python | false | false | 8,677 | py | from common.np import *
from common.layers import *
from common.functions import sigmoid
class RNN:
def __init__(self, Wx, Wh, b):
self.params = [Wx, Wh, b]
self.grads = [np.zeros_like(Wx), np.zeros_like(Wh), np.zeros_like(b)]
self.cache = None
def forward(self, x, h_prev):
Wx, Wh, b = self.params
t = np.dot(h_prev, Wh) + np.dot(x, Wx) + b
h_next = np.tanh(t)
self.cache = (x, h_prev, h_next)
return h_next
def backward(self, dh_next):
Wx, Wh, b = self.params
x, h_prev, h_next = self.cache
dt = dh_next * (1 - h_next ** 2)
db = np.sum(dt, axis=0)
dWh = np.dot(h_prev.T, dt)
dh_prev = np.dot(dt, Wh.T)
dWx = np.dot(x.T, dt)
dx = np.dot(dt, Wx.T)
self.grads[0][...] = dWx
self.grads[1][...] = dWh
self.grads[2][...] = db
return dx, dh_prev
class TimeRNN:
def __init__(self, Wx, Wh, b, stateful=False):
self.params = [Wx, Wh, b]
self.grads = [np.zeros_like(Wx), np.zeros_like(Wh), np.zeros_like(b)]
self.layers = None
self.h, self.dh = None, None
self.stateful = stateful
def set_state(self, h):
self.h = h
def reset_state(self):
self.h = None
def forward(self, xs):
Wx, Wh, b = self.params
N, T, D = xs.shape
D, H = Wx.shape
self.layers = []
hs = np.empty((N, T, H), dtype='f')
if not self.stateful or self.h is None:
self.h = np.zeros((N, H), dtype='f')
for t in range(T):
layer = RNN(*self.params)
self.h = layer.forward(xs[:, t, :], self.h)
hs[:, t, :] = self.h
self.layers.append(layer)
return hs
def backward(self, dhs):
Wx, Wh, b = self.params
N, T, H = dhs.shape
D, H = Wx.shape
dxs = np.empty((N, T, D), dtype='f')
dh = 0
grads = [0, 0, 0]
for t in reversed(range(T)):
layer = self.layers[t]
dx, dh = layer.backward(dhs[:, t, :] + dh)
dxs[:, t, :] = dx
for i, grad in enumerate(layer.grads):
grads[i] += grad
for i, grad in enumerate(grads):
self.grads[i][...] = grad
self.dh = dh
return dxs
class TimeEmbedding:
def __init__(self, W):
self.params = [W]
self.grads = [np.zeros_like(W)]
self.layers = None
self.W = W
def forward(self, xs):
N, T = xs.shape
V, D = self.W.shape
out = np.empty((N, T, D), dtype='f')
self.layers = []
for t in range(T):
layer = Embedding(self.W)
out[:, t, :] = layer.forward(xs[:, t])
self.layers.append(layer)
return out
def backward(self, dout):
N, T, D = dout.shape
grad = 0
for t in range(T):
layer = self.layers[t]
layer.backward(dout[:, t, :])
grad += layer.grads[0]
self.grads[0][...] = grad
return None
class TimeAffine:
def __init__(self, W, b):
self.params = [W, b]
self.grads = [np.zeros_like(W), np.zeros_like(b)]
self.x = None
def forward(self, x):
N, T, D = x.shape
W, b = self.params
rx = x.reshape(N*T, -1)
out = np.dot(rx, W) + b
self.x = x
return out.reshape(N, T, -1)
def backward(self, dout):
x = self.x
N, T, D = x.shape
W, b = self.params
dout = dout.reshape(N*T, -1)
rx = x.reshape(N*T, -1)
db = np.sum(dout, axis=0)
dW = np.dot(rx.T, dout)
dx = np.dot(dout, W.T)
dx = dx.reshape(*x.shape)
self.grads[0][...] = dW
self.grads[1][...] = db
return dx
class TimeSoftmaxWithLoss:
def __init__(self):
self.params, self.grads = [], []
self.cache = None
self.ignore_label = -1
def forward(self, xs, ts):
N, T, V = xs.shape
if ts.ndim == 3: # 教師ラベルがone-hotベクトルの場合
ts = ts.argmax(axis=2)
mask = (ts != self.ignore_label)
# バッチ分と時系列分をまとめる(reshape)
xs = xs.reshape(N * T, V)
ts = ts.reshape(N * T)
mask = mask.reshape(N * T)
ys = softmax(xs)
ls = np.log(ys[np.arange(N * T), ts])
ls *= mask # ignore_labelに該当するデータは損失を0にする
loss = -np.sum(ls)
loss /= mask.sum()
self.cache = (ts, ys, mask, (N, T, V))
return loss
def backward(self, dout=1):
ts, ys, mask, (N, T, V) = self.cache
dx = ys
dx[np.arange(N * T), ts] -= 1
dx *= dout
dx /= mask.sum()
dx *= mask[:, np.newaxis] # ignore_labelに該当するデータは勾配を0にする
dx = dx.reshape((N, T, V))
return dx
class LSTM:
def __init__(self, Wx, Wh, b):
self.params = [Wx, Wh, b]
self.grads = [np.zeros_like(Wx), np.zeros_like(Wh), np.zeros_like(b)]
self.cache = None
def forward(self, x, h_prev, c_prev):
Wx, Wh, b = self.params
N, H = h_prev.shape
A = np.dot(x, Wx) + np.dot(h_prev, Wh) + b
f = A[:, :H]
g = A[:, H:2*H]
i = A[:, 2*H:3*H]
o = A[:, 3*H:]
f = sigmoid(f)
g = np.tanh(g)
i = sigmoid(i)
o = sigmoid(o)
c_next = f * c_prev + g * i
h_next = o * np.tanh(c_next)
self.cache = (x, h_prev, c_prev, i, f, g, o, c_next)
return h_next, c_next
def backward(self, dh_next, dc_next):
Wx, Wh, b = self.params
x, h_prev, c_prev, i, f, g, o, c_next = self.cache
tanh_c_next = np.tanh(c_next)
ds = dc_next + (dh_next * o) * (1 - tanh_c_next ** 2)
dc_prev = ds * f
di = ds * g
df = ds * c_prev
do = dh_next * tanh_c_next
dg = ds * i
di *= i * (1 - i)
df *= f * (1 - f)
do *= o * (1 - o)
dg *= (1 - g ** 2)
dA = np.hstack((df, dg, di, do))
dWh = np.dot(h_prev.T, dA)
dWx = np.dot(x.T, dA)
db = dA.sum(axis=0)
self.grads[0][...] = dWx
self.grads[1][...] = dWh
self.grads[2][...] = db
dx = np.dot(dA, Wx.T)
dh_prev = np.dot(dA, Wh.T)
return dx, dh_prev, dc_prev
class TimeLSTM:
def __init__(self, Wx, Wh, b, stateful=False):
self.params = [Wx, Wh, b]
self.grads = [np.zeros_like(Wx), np.zeros_like(Wh), np.zeros_like(b)]
self.layers = None
self.h, self.c = None, None
self.dh = None
self.stateful = stateful
def forward(self, xs):
Wx, Wh, b = self.params
N, T, D = xs.shape
H = Wh.shape[0]
self.layers = []
hs = np.empty((N, T, H), dtype='f')
if not self.stateful or self.h is None:
self.h = np.zeros((N, H), dtype='f')
if not self.stateful or self.c is None:
self.c = np.zeros((N, H), dtype='f')
for t in range(T):
layer = LSTM(*self.params)
self.h, self.c = layer.forward(xs[:, t, :], self.h, self.c)
hs[:, t, :] = self.h
self.layers.append(layer)
return hs
def backward(self, dhs):
Wx, Wh, b = self.params
N, T, H = dhs.shape
D = Wx.shape[0]
dxs = np.empty((N, T, D), dtype='f')
dh, dc = 0, 0
grads = [0, 0, 0]
for t in reversed(range(T)):
layer = self.layers[t]
dx, dh, dc = layer.backward(dhs[:, t, :] + dh, dc)
dxs[:, t, :] = dx
for i, grad in enumerate(layer.grads):
grads[i] += grad
for i, grad in enumerate(grads):
self.grads[i][...] = grad
self.dh = dh
return dxs
def set_state(self, h, c=None):
self.h, self.c = h, c
def reset_state(self):
self.h, self.c = None, None
class TimeDropout:
def __init__(self, dropout_ratio=0.5):
self.params, self.grads = [], []
self.dropout_ratio = dropout_ratio
self.mask = None
self.train_flg = True
def forward(self, xs):
if self.train_flg:
flg = np.random.rand(*xs.shape) > self.dropout_ratio
scale = 1 / (1.0 - self.dropout_ratio)
self.mask = flg.astype(np.float32) * scale
return xs * self.mask
else:
return xs
def backward(self, dout):
return dout * self.mask
| [
"nownabe@gmail.com"
] | nownabe@gmail.com |
b39480fc0bba42df9b9814f41cceed13377c4c76 | e61cd634245c24fd4829c67d862b8c15c8ecb854 | /suorganizer/organizer/migrations/0001_initial.py | d3b4c9a687dcb7ed064426a36deeb20b4e197046 | [] | no_license | rdacso/django-blog-app | 5a8e7c3bd620d96bddc81ab4b9e4be6c71309df4 | c671e231d5219d0f3298ac5145f98e25c62a6c78 | refs/heads/master | 2021-01-22T06:23:56.324049 | 2017-06-05T19:44:50 | 2017-06-05T19:44:50 | 92,550,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,530 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-26 22:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='NewsLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=63)),
('pub_date', models.DateField(verbose_name='date published')),
('link', models.URLField(max_length=255)),
],
options={
'ordering': ['-pub_date'],
'get_latest_by': 'pub_date',
'verbose_name': 'news article',
},
),
migrations.CreateModel(
name='Startup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=31)),
('slug', models.SlugField(help_text='A label for URL config.', max_length=31, unique=True)),
('description', models.TextField()),
('founded_date', models.DateField(verbose_name='date founded')),
('contact', models.EmailField(max_length=254)),
('website', models.URLField(max_length=255)),
],
options={
'ordering': ['name'],
'get_latest_by': 'founded_date',
},
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=31, unique=True)),
('slug', models.SlugField(help_text='A label for URL config.', max_length=31, unique=True)),
],
options={
'ordering': ['name'],
},
),
migrations.AddField(
model_name='startup',
name='tags',
field=models.ManyToManyField(to='organizer.Tag'),
),
migrations.AddField(
model_name='newslink',
name='startup',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='organizer.Startup'),
),
]
| [
"rdacso@Rebeccas-MacBook-Pro.local"
] | rdacso@Rebeccas-MacBook-Pro.local |
cd62b3548931cafc3f6f4975c1975f1c364caf8b | 9d101687de02bb38d291c8c507ad40f6ca496fdc | /src/simulator/script/icu.py | 577cddf3c8b4b7f0526905f4e2711dedaec66830 | [] | no_license | tsoonjin/rostest | f5992120e8298d5a980c044d38afe6b011a83b59 | bd18e3f01ebb528918271006481e14a9e332e969 | refs/heads/master | 2021-05-28T12:26:49.088424 | 2015-01-13T06:48:12 | 2015-01-13T06:48:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 771 | py | #!/usr/bin/env python
import sys
import signal
from PyQt4 import QtGui, QtCore
HEIGHT = 500
WIDTH = 500
class Main(QtGui.QMainWindow):
def __init__(self):
QtGui.QMainWindow.__init__(self)
self.initUI()
def initUI(self):
'''Window Settings'''
screen = QtGui.QDesktopWidget().screenGeometry()
self.setGeometry((screen.width() - self.geometry().width())/2,
(screen.height() - self.geometry().height())/2, WIDTH,
HEIGHT)
self.setWindowTitle('I see what you did there')
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal.SIG_DFL) #bypass any finally blocks
app = QtGui.QApplication(sys.argv)
main = Main()
main.show()
sys.exit(app.exec_())
| [
"jinified@gmail.com"
] | jinified@gmail.com |
bd90ccb88420056f009940dd507adbd51709cce2 | bef18c34500697f3c9824f873e451f919f3f78c0 | /reddit/reddit/urls.py | 5b50ec19035490df1ac91df1de5da481002deef7 | [] | no_license | sparshnetmax/django_apiCall | 79fab9a8eecf74a23ad3bf2a2d3524dce6a8d455 | 28f19803a04df90c6eabd3c9cd4349440dbc018c | refs/heads/master | 2022-12-02T20:21:53.507702 | 2020-08-27T05:13:05 | 2020-08-27T05:13:05 | 288,354,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,158 | py | """reddit URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from . import views
urlpatterns = [
path('', views.index),
path('admin', admin.site.urls),
path('redirect', views.redirectUri),
path('authenticate',views.makeURL),
path('newToken',views.newAccessToken),
path('signout',views.exitFuction),
path('auth/', include('redit_auth.urls')),
path('userinfo',views.userinfo),
path('checker',views.checker),
path('showpost',views.show_post),
path('makepost',views.makePost),
]
| [
"devnet1985@gmail.com"
] | devnet1985@gmail.com |
14e14add80032e04c6e82d148372cd9e1ea89a4a | dbe7731552d8e6d1e63cc0f2e27d3810cc61f350 | /hyper_paras/hp_a2c.py | 1265b7c895a914b699bf58d2d2719a54eb9e5c15 | [] | no_license | ZhangRui111/rl_breakout_tf | 6bb3f57f2b1d52f196323916393234e8abb990ac | 04f259cd3c32eaffbad87fe1035b0f87c96127b0 | refs/heads/master | 2020-04-08T19:24:16.018734 | 2018-12-18T02:42:56 | 2018-12-18T02:42:56 | 159,653,713 | 1 | 1 | null | 2018-12-18T02:42:57 | 2018-11-29T11:12:04 | Python | UTF-8 | Python | false | false | 356 | py | from hyper_paras.base_hyper_paras import BaseHyperparameters
class Hyperparameters(BaseHyperparameters):
def __init__(self):
super().__init__()
self.model = 'A2C'
self.MAX_EPISODES = 50001 # 50001 : 500
self.LEARNING_RATE_ACTOR = 0.00005
self.LEARNING_RATE_CRITIC = 0.0001
self.DISCOUNT_FACTOR = 0.9
| [
"zhangruisg111@163.com"
] | zhangruisg111@163.com |
0973fe3576d0bc447f0942f8b729bf17e709f51b | fc2047806ce64076961d8b18b4bd7680e465944c | /ex18.py | ca620e972a7132bf3025a69c949aeb7fc4697881 | [] | no_license | Dragoshiz/tutorial | 4e2551e3f07e8351fcda1720a7969b786c7b263f | 875c449f52ac3b34b572d0dcbb29eb7da01d305a | refs/heads/master | 2021-03-12T11:05:05.712675 | 2020-03-21T16:47:56 | 2020-03-21T16:47:56 | 246,615,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | ppdef print_two(*args):
arg1,arg2 = args
print("arg1: {}, arg2: {}".format(arg1,arg2))
def print_two_again(arg1, arg2):
print("arg1: {}, arg2: {}".format(arg1, arg2))
def print_one(arg1):
print("arg1: {}".format(arg1))
def print_none():
print("I got nothin'.")
print_two("zed", "shaw")
print_two_again("zed","shaw")
print_one("first")
print_none() | [
"dragosimbrea@gmail.com"
] | dragosimbrea@gmail.com |
7e11fd6bffade16b50990049c688e90b29754bf0 | 282769509af68245596dc73de42f552cfd73cd21 | /autoindex/watcher.py | d560ceaf60985c133ac610de4bc2a6e3972819c7 | [] | no_license | brutasse-archive/autoindex | 1130173d22c1d996a7cb38fcd59b51d07c0b8068 | cc5cfc414325aff133c684257e8c2bfdc9aaa672 | refs/heads/master | 2021-01-19T14:34:18.472167 | 2012-07-17T21:31:27 | 2012-07-17T21:31:27 | 5,048,409 | 15 | 5 | null | null | null | null | UTF-8 | Python | false | false | 1,877 | py | import logging
import os
import signal
from pip.download import is_archive_file
from pyinotify import WatchManager, Notifier, ProcessEvent, EventsCodes
from .indexer import index
logger = logging.getLogger(__name__)
class IndexProcess(ProcessEvent):
def __init__(self, wm, mask):
self.wm = wm
self.mask = mask
self.queue = set()
def update_watch(self, directory):
self.wm.add_watch(directory, mask=self.mask)
def process_IN_CREATE(self, event):
logger.debug("Created {0}".format(event.pathname))
if os.path.isdir(event.pathname):
self.update_watch(event.pathname)
else:
self.index_alarm(event)
def process_IN_MODIFY(self, event):
logger.debug("Modified {0}".format(event.pathname))
self.index_alarm(event)
def process_IN_DELETE(self, event):
logger.debug("Deleted {0}".format(event.pathname))
self.index_alarm(event)
def index_alarm(self, event):
if is_archive_file(event.pathname):
logger.debug("Queuing indexing")
self.queue.add(os.path.dirname(event.pathname))
signal.setitimer(signal.ITIMER_REAL, 5)
def watch(directory):
logger.info("Watching {0}".format(directory))
flags = EventsCodes.ALL_FLAGS
mask = flags['IN_CREATE'] | flags['IN_MODIFY'] | flags['IN_DELETE']
wm = WatchManager()
wm.add_watch(directory, mask, rec=True)
process = IndexProcess(wm, mask)
notifier = Notifier(wm, process)
def update_index(*args):
while process.queue:
# This is slightly sub-optimal, would be better to pop all
# elements at once but this operation needs to be atomic.
dist_dir = process.queue.pop()
index(directory, only=[dist_dir])
signal.signal(signal.SIGALRM, update_index)
notifier.loop()
| [
"buburno@gmail.com"
] | buburno@gmail.com |
361cd2e499e2e4d832d2c9ca80d1ee52a864faa2 | 658ef062541cdf37ede7ec22d9297830852c63ff | /tools/generate_detections.py | 049ecfcac2d17f815615fc4631c180f7a8a63695 | [] | no_license | Adrien2511/Keras-sort | 66009b37a3a4ed8ffa008e781bbc165101e22910 | 361ead118b35670dfeaae4dd369eff2454327322 | refs/heads/main | 2023-04-29T10:59:59.810890 | 2021-04-23T08:32:23 | 2021-04-23T08:32:23 | 360,814,992 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,065 | py | # vim: expandtab:ts=4:sw=4
import os
import errno
import argparse
import numpy as np
import cv2
import tensorflow as tf
def _run_in_batches(f, data_dict, out, batch_size):
data_len = len(out)
num_batches = int(data_len / batch_size)
s, e = 0, 0
for i in range(num_batches):
s, e = i * batch_size, (i + 1) * batch_size
batch_data_dict = {k: v[s:e] for k, v in data_dict.items()}
out[s:e] = f(batch_data_dict)
if e < len(out):
batch_data_dict = {k: v[e:] for k, v in data_dict.items()}
out[e:] = f(batch_data_dict)
def extract_image_patch(image, bbox, patch_shape):
"""Extract image patch from bounding box.
Parameters
----------
image : ndarray
The full image.
bbox : array_like
The bounding box in format (x, y, width, height).
patch_shape : Optional[array_like]
This parameter can be used to enforce a desired patch shape
(height, width). First, the `bbox` is adapted to the aspect ratio
of the patch shape, then it is clipped at the image boundaries.
If None, the shape is computed from :arg:`bbox`.
Returns
-------
ndarray | NoneType
An image patch showing the :arg:`bbox`, optionally reshaped to
:arg:`patch_shape`.
Returns None if the bounding box is empty or fully outside of the image
boundaries.
"""
bbox = np.array(bbox)
if patch_shape is not None:
# correct aspect ratio to patch shape
target_aspect = float(patch_shape[1]) / patch_shape[0]
new_width = target_aspect * bbox[3]
bbox[0] -= (new_width - bbox[2]) / 2
bbox[2] = new_width
# convert to top left, bottom right
bbox[2:] += bbox[:2]
bbox = bbox.astype(np.int)
# clip at image boundaries
bbox[:2] = np.maximum(0, bbox[:2])
bbox[2:] = np.minimum(np.asarray(image.shape[:2][::-1]) - 1, bbox[2:])
if np.any(bbox[:2] >= bbox[2:]):
return None
sx, sy, ex, ey = bbox
image = image[sy:ey, sx:ex]
image = cv2.resize(image, tuple(patch_shape[::-1]))
return image
class ImageEncoder(object):
def __init__(self, checkpoint_filename, input_name="images",
output_name="features"):
self.session = tf.Session() #création d'une session de tensorflow
with tf.gfile.GFile(checkpoint_filename, "rb") as file_handle:
graph_def = tf.GraphDef()
graph_def.ParseFromString(file_handle.read())
tf.import_graph_def(graph_def, name="net")
self.input_var = tf.get_default_graph().get_tensor_by_name(
"net/%s:0" % input_name)
self.output_var = tf.get_default_graph().get_tensor_by_name(
"net/%s:0" % output_name)
assert len(self.output_var.get_shape()) == 2
assert len(self.input_var.get_shape()) == 4
self.feature_dim = self.output_var.get_shape().as_list()[-1]
self.image_shape = self.input_var.get_shape().as_list()[1:]
def __call__(self, data_x, batch_size=32):
out = np.zeros((len(data_x), self.feature_dim), np.float32)
_run_in_batches(
lambda x: self.session.run(self.output_var, feed_dict=x),
{self.input_var: data_x}, out, batch_size)
return out
def create_box_encoder(model_filename, input_name="images",
output_name="features", batch_size=32):
image_encoder = ImageEncoder(model_filename, input_name, output_name) #création d'un objet de la classe Image encoder
image_shape = image_encoder.image_shape
def encoder(image, boxes):
image_patches = []
for box in boxes:
patch = extract_image_patch(image, box, image_shape[:2])
if patch is None:
print("WARNING: Failed to extract image patch: %s." % str(box))
patch = np.random.uniform(
0., 255., image_shape).astype(np.uint8)
image_patches.append(patch)
image_patches = np.asarray(image_patches)
return image_encoder(image_patches, batch_size)
return encoder
def generate_detections(encoder, mot_dir, output_dir, detection_dir=None):
"""Generate detections with features.
Parameters
----------
encoder : Callable[image, ndarray] -> ndarray
The encoder function takes as input a BGR color image and a matrix of
bounding boxes in format `(x, y, w, h)` and returns a matrix of
corresponding feature vectors.
mot_dir : str
Path to the MOTChallenge directory (can be either train or test).
output_dir
Path to the output directory. Will be created if it does not exist.
detection_dir
Path to custom detections. The directory structure should be the default
MOTChallenge structure: `[sequence]/det/det.txt`. If None, uses the
standard MOTChallenge detections.
"""
if detection_dir is None:
detection_dir = mot_dir
try:
os.makedirs(output_dir)
except OSError as exception:
if exception.errno == errno.EEXIST and os.path.isdir(output_dir):
pass
else:
raise ValueError(
"Failed to created output directory '%s'" % output_dir)
for sequence in os.listdir(mot_dir):
print("Processing %s" % sequence)
sequence_dir = os.path.join(mot_dir, sequence)
image_dir = os.path.join(sequence_dir, "img1")
image_filenames = {
int(os.path.splitext(f)[0]): os.path.join(image_dir, f)
for f in os.listdir(image_dir)}
detection_file = os.path.join(
detection_dir, sequence, "det/det.txt")
detections_in = np.loadtxt(detection_file, delimiter=',')
detections_out = []
frame_indices = detections_in[:, 0].astype(np.int)
min_frame_idx = frame_indices.astype(np.int).min()
max_frame_idx = frame_indices.astype(np.int).max()
for frame_idx in range(min_frame_idx, max_frame_idx + 1):
print("Frame %05d/%05d" % (frame_idx, max_frame_idx))
mask = frame_indices == frame_idx
rows = detections_in[mask]
if frame_idx not in image_filenames:
print("WARNING could not find image for frame %d" % frame_idx)
continue
bgr_image = cv2.imread(
image_filenames[frame_idx], cv2.IMREAD_COLOR)
features = encoder(bgr_image, rows[:, 2:6].copy())
detections_out += [np.r_[(row, feature)] for row, feature
in zip(rows, features)]
output_filename = os.path.join(output_dir, "%s.npy" % sequence)
np.save(
output_filename, np.asarray(detections_out), allow_pickle=False)
def parse_args():
"""Parse command line arguments.
"""
parser = argparse.ArgumentParser(description="Re-ID feature extractor")
parser.add_argument(
"--model",
default="resources/networks/mars-small128.pb",
help="Path to freezed inference graph protobuf.")
parser.add_argument(
"--mot_dir", help="Path to MOTChallenge directory (train or test)",
required=True)
parser.add_argument(
"--detection_dir", help="Path to custom detections. Defaults to "
"standard MOT detections Directory structure should be the default "
"MOTChallenge structure: [sequence]/det/det.txt", default=None)
parser.add_argument(
"--output_dir", help="Output directory. Will be created if it does not"
" exist.", default="detections")
return parser.parse_args()
def main():
args = parse_args()
encoder = create_box_encoder(args.model, batch_size=32)
generate_detections(encoder, args.mot_dir, args.output_dir,
args.detection_dir)
if __name__ == "__main__":
main() | [
"noreply@github.com"
] | Adrien2511.noreply@github.com |
44328e18e1d73b606f9d1374e8028ce43d653075 | eb69b3e72257d6557516dc0a83e995a54a6476de | /schooldata/students/student.py | 2ef68c7a3524031ac79afdf69665dea62af9e485 | [] | no_license | Gozied/University-data | 11b3ee0cbf2877b60c6239eac88ac04ddcee0d6e | 30670b3fbbd6e4666974027fda49c9356f843963 | refs/heads/master | 2023-01-11T12:42:30.681072 | 2020-11-23T14:14:16 | 2020-11-23T14:14:16 | 300,006,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 857 | py | """ This module defines the behaviour of a student class information. """
from Abel.schooldata.data_handler import DataHandler
class Student(DataHandler):
_table_name = "student.csv"
_fieldnames = ["entity_id", "gpa", "full_name"]
def _add_student(self, **kwargs):
return self.add_record(**kwargs)
def _write_student_to_csv(self):
self._write_records_to_csv(self._fieldnames, self._table_name)
def _read_student_records_from_csv(self):
return self._load_records_to_table(self._table_name)
def _update_student(self,student_no, **kwargs):
student = self.get_record(student_no)
if "new_gpa" in kwargs:
student["gpa"] = kwargs["new_gpa"]
if "new_full_name" in kwargs:
student["full_name"] = kwargs["new_full_name"]
return student | [
"abel.dav85@gmailcom"
] | abel.dav85@gmailcom |
e1a6d1b6a7f2d662c54225f864327197af261dea | 2b6fa34dac030ec1f2918b1377956bf791219d22 | /leetcode/medium/unique-paths.py | ec4b4d43fdfd54d17af687e347baacf85881da50 | [
"MIT"
] | permissive | rainzhop/cumulus-tank | aa13fb8f14c27893838a67d2eb69fdd2ac3d6450 | 09ebc7858ea53630e30606945adfea856a80faa3 | refs/heads/master | 2020-06-06T23:24:37.498966 | 2020-01-06T09:52:16 | 2020-01-06T09:52:16 | 192,874,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 911 | py | # https://leetcode.com/problems/unique-paths/
#
# A robot is located at the top-left corner of a m x n grid (marked 'Start' in the diagram below).
#
# The robot can only move either down or right at any point in time.
# The robot is trying to reach the bottom-right corner of the grid (marked 'Finish' in the diagram below).
#
# How many possible unique paths are there?
#
# S * * * * * *
# * * * * * * *
# * * * * * * F
#
# Above is a 3 x 7 grid. How many possible unique paths are there?
#
# Note: m and n will be at most 100.
class Solution(object):
def uniquePaths(self, m, n):
"""
:type m: int
:type n: int
:rtype: int
"""
if self.path[m][n] != 0: return self.path[m][n]
if m == 1 or n == 1: return 1
return self.uniquePaths(m-1, n) + self.uniquePaths(m, n-1)
if __name__ == '__main__':
s = Solution()
print s.uniquePaths(3,3)
| [
"rainzhop@gmail.com"
] | rainzhop@gmail.com |
28166a3920343d8a6a8211d49144bac68b69ce98 | f287cb15b8182bb30309b7f94516e6e6a63819f4 | /django/tryDjango1-11/muypicky/settings/base.py | 8b6e050b369306d6d43cce25db8271f7cc395038 | [] | no_license | dallinbjohnson/coding_folder | 9774e07c4b392d9a698b78538d41f0020c188ca0 | 387bd2531c54844dc5ba52dfa203e35e26599ff9 | refs/heads/master | 2022-12-25T20:27:33.671691 | 2020-04-12T20:35:31 | 2020-04-12T20:35:31 | 93,679,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,643 | py | """
Django settings for muypicky project.
Generated by 'django-admin startproject' using Django 1.11.15.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*1f_r5!v^orx14#rspv$fzgaug17$r-q59yow$k#+4)gi5j9sc'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'dallin.b.johnson@gmail.com'
EMAIL_HOST_PASSWORD = 'your password'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = 'Your Name <you@email.com>'
ADMINS = (
('You', 'you@email.com'),
)
MANAGERS = ADMINS
# Application definition
INSTALLED_APPS = [
'profiles',
'menus',
'restaurants',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'muypicky.urls'
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/'
LOGOUT_REDIRECT_URL = '/login/'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'muypicky.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'muypickyDev',
'USER': 'postgres',
'PASSWORD': 'test1234',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"dallin.b.johnson@gmail.com"
] | dallin.b.johnson@gmail.com |
7ec19266d0afa42c553c1b841b452ceb46a5277f | 8148be8c475ff2b5ae2a55ef1c05d4c2d5626e71 | /test_booking/settings.py | f9de63467db7e3150a7cb59ad754bc5dc9e9bda3 | [] | no_license | dimka1993kh/test_booking | 91836b51e4e005c4ad5f732f56f6f3e5593a63ec | ee5bbeb60317cac8627ce949c76640b16d4688a8 | refs/heads/master | 2023-03-21T11:02:03.611810 | 2021-03-07T12:37:05 | 2021-03-07T12:37:05 | 344,217,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,478 | py | """
Django settings for test_booking project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# import dj_database_url
# db_from_env = dj_database_url.config()
# DATABASES['dafault'].update(db_from_env)
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'jnqp(t&fi6&2=%0758f)+6+rnjc(4c#zyix7@r84_y%g+y0+-='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main',
'accounts.apps.AccountsConfig',
'choose_workplace',
'bootstrap_datepicker_plus',
'bootstrap4'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'test_booking.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'test_booking.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'ru'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
LOGIN_REDIRECT_URL = 'main'
LOGOUT_REDIRECT_URL = 'main'
BOOTSTRAP4 = {
'include_jquery' : True ,
} | [
"dimka1993kh@gmail.com"
] | dimka1993kh@gmail.com |
0c2f0ca6ce20406d2c38ff347b01ab976293395a | 6130e21186baf04f19b5ad7a354e7a9bc0b20e9e | /routes/public.py | ede2abb949d77a3957349c03cc5e9586bf7b6456 | [
"Apache-2.0"
] | permissive | Hrabal/Dobry | e012d4ecbf370fc0fec38f2369f05b945111656d | 8e394058b17483177c65d9dd030757e0fa18846b | refs/heads/master | 2020-03-15T16:07:17.821268 | 2018-05-13T17:14:57 | 2018-05-13T17:14:57 | 132,228,721 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | # -*- coding: utf-8 -*-
from pyramid.response import Response
from .base import Route
import views.public as v
class Home(Route):
url = '/'
@Route.handler
def handler(request):
html = v.HomePage(request).render()
return Response(html)
| [
"federicocerchiai@gmail.com"
] | federicocerchiai@gmail.com |
c9d59a0b16eb11f860bac5fa81cac34611674cf7 | 558fba4f470e434ed12a486766ce9d5aa2f0089a | /ros_code/02-02-semantic-segmentation-exercise/dataset_helper/run_freicar_segreg_dataloader.py | 1d06631846a0825f2e4b20e474fb2ecc1f5a3fd0 | [] | no_license | Kirankumaraswamy/freicar_divedeep_competition | 07c4f382fecbce460c8e6cbd02485b60a57e17ab | 4af980e486eb0d2b9e5aeb8d09ef0a07a67a7886 | refs/heads/master | 2023-08-13T23:25:34.806668 | 2021-09-24T05:51:48 | 2021-09-24T05:51:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,852 | py | import dataset_helper.freicar_segreg_dataloader as loader
import torch
import cv2
import numpy as np
########################################################################
# Demo test script for the freicar dataloader for semantic segmentation
# and regression
# Author: Johan Vertens (vertensj@informatik.uni-freiburg.de)
########################################################################
def visJetColorCoding(img):
color_img = np.zeros(img.shape, dtype=img.dtype)
cv2.normalize(img, color_img, 0, 255, cv2.NORM_MINMAX)
color_img = color_img.astype(np.uint8)
color_img = cv2.applyColorMap(color_img, cv2.COLORMAP_JET, color_img)
return color_img
def GenRegOverlay(reg, overlay_image):
reg_color = visJetColorCoding(reg)
overlay = cv2.addWeighted(reg_color, 0.3, overlay_image, 0.7, 0)
return overlay
def TensorImage3ToCV(data):
cv = np.transpose(data.cpu().data.numpy().squeeze(), (1, 2, 0))
cv = cv2.cvtColor(cv, cv2.COLOR_RGB2BGR)
return cv
def TensorImage1ToCV(data):
cv = data.cpu().byte().data.numpy().squeeze()
return cv
static_data = loader.FreiCarLoader("../../data/", padding=(0, 0, 12, 12),
split='validation', load_real=False)
train_loader = torch.utils.data.DataLoader(static_data, batch_size=1, shuffle=True, num_workers=1,
pin_memory=False, drop_last=False)
for nr, (sample) in enumerate(train_loader):
cv_rgb = TensorImage3ToCV((sample['rgb'] * 255.).byte())
cv_reg = TensorImage1ToCV(sample['reg']* 255)
cv_seg = TensorImage1ToCV(sample['seg'])
cv2.imshow('RGB', cv_rgb)
cv2.imshow('Regression', visJetColorCoding(cv_reg))
cv2.imshow('Segmentations', visJetColorCoding(cv_seg))
cv2.imshow('Regression overlay', GenRegOverlay(cv_reg, cv_rgb))
cv2.waitKey() | [
"kiran.scorpio27@gmail.com"
] | kiran.scorpio27@gmail.com |
3c690a839d32b9fcb73aa802b86e85f6f426adf4 | 4670779c3c2d3ae67dad388a32064da3f65fdc40 | /etl-jobs/stitch.py | b6b13122e845fb143e87d6438fad50728b372ec9 | [] | no_license | jcbdev/DigitalHive | dd617017acdb8a48f79e7f7d3a771ac8b61907fc | 4b669e84e3c50301dd5ff0c0b079356d1f11042f | refs/heads/main | 2023-04-19T10:15:32.637345 | 2021-04-16T18:23:12 | 2021-04-16T18:23:12 | 357,965,061 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from price_stitching import load_timeseries, rolling_window, ingest_rolling_data
from dotenv import load_dotenv
if __name__ == '__main__':
load_dotenv("../.env")
df = load_timeseries()
df = rolling_window(df, '15D')
ingest_rolling_data(df)
# print(df) | [
"james@jcbdevelopment.co.uk"
] | james@jcbdevelopment.co.uk |
db23f04153a6ef98a82f8296c3ca8600b844c2b7 | 48c4b334c36abdd8bbeccd7892238ffd08ebb7b7 | /peter_lang/artworks/urls.py | 9f9a5deea8f79e47c42c76871ee86d5e7193366b | [
"MIT"
] | permissive | ollie-codeaid/peter_lang | c892cb90daf5f4e15cffeb0bcab330669b14006b | e77db0e2fa50ccb9590e6a7f57c2ed4c27f6273c | refs/heads/master | 2020-04-13T05:13:22.452210 | 2019-06-22T13:09:07 | 2019-06-22T13:09:07 | 162,985,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | from django.urls import path
from .views import (
ArtworkCreate,
ArtworkList,
ArtworkDelete,
ArtworkUpdate,
)
app_name = 'artworks'
urlpatterns = [
path('', ArtworkList.as_view(), name='list'),
path('create', ArtworkCreate.as_view(), name='create'),
path('<str:slug>/update', ArtworkUpdate.as_view(), name='update'),
path('<str:slug>/delete', ArtworkDelete.as_view(), name='delete'),
]
| [
"reelfeet@hotmail.com"
] | reelfeet@hotmail.com |
8206ebc79c6438f3b36b10bdc752f1c3eec94374 | b3db04057be37f8b0aba82ec4f9f5a5d671fc24a | /week_2/day_1/monty_hall.py | caad7760bb4dda04b07a3661b090c50a112c34c8 | [] | no_license | jaskaran1122/Python_Bootcamp_jas | 9e8e21ace32d1e67bae95eec9c46be9a4e0fbbec | 369891bd50a92e8f182d6380fc11237dc06f205f | refs/heads/master | 2020-12-05T15:09:16.771962 | 2020-01-22T20:34:39 | 2020-01-22T20:34:39 | 232,151,059 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 990 | py | import random
def organize_game():
door_contents = [1,0,0]
random.shuffle(door_contents)
for i in range(0,len(door_contents)):
if door_contents[i] == 1:
winning_door = i
return door_contents, winning_door
def game_time():
door_nums = [0,1,2]
door_contents, winning_choice = organize_game()
# We need the consestant to make a choice
door_chosen = random.coice(door_nums)
# Now we need the game show host to open another door
# The game show hpst must open a door that does not have
# but the door must also not be the one that the contestant picked
unavailable_doors = [door_chosen, winning_door]
door_to_open = set(door_nums).difference(unavailable_doors)
door_to_open = door_to_open.pop()
#see if the contestant won or lost
switched_win = False
stayed_win = False
pass
# list_1= [1,1,1,1,1,1,5,6,7]
# my_set = set(list_1)
# print(my_set)
# list_2 = [1,6,7]
# list_3 = []
# list_3 = set(list_1).difference(list_2)
# print(list_3)
#help(random.shuffle) | [
"36654669+jaskaran1122@users.noreply.github.com"
] | 36654669+jaskaran1122@users.noreply.github.com |
2c1431891340940c3d3dd866e90d1d7b1dfa1f1f | 459d4d7fb7bb24a3333f1a87766996c8017cce9f | /src/utils/p4ast.py | e878cf7d11f37267a180478e56ab98a6e22ab376 | [
"Apache-2.0"
] | permissive | intrig-unicamp/macsad | f750621320418be17b775ba96abf11b85f68186b | a42dd61c9542285eab01abef0226f0e7e37eb6a8 | refs/heads/master | 2021-03-27T12:35:15.768521 | 2018-11-04T23:59:33 | 2018-11-04T23:59:33 | 54,499,714 | 2 | 1 | Apache-2.0 | 2018-10-16T11:42:52 | 2016-03-22T18:36:07 | C | UTF-8 | Python | false | false | 2,869 | py | # Copyright 2018 INTRIG/FEEC/UNICAMP (University of Campinas), Brazil
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from p4_hlir.frontend.ast import *
################################################################################
def Integer(value): return P4Integer('', 42, value)
def FieldRefExpression(headerref, fieldname): return P4FieldRefExpression('', 42, headerref, str(fieldname))
def RefExpression(name): return P4RefExpression('', 42, str(name))
def ParserImmediateReturn(next_state): return P4ParserImmediateReturn('', 42, next_state)
def ParserSelectReturn(select, cases): return P4ParserSelectReturn('', 42, select, cases)
def ParserFunction(name, ops, ret): return P4ParserFunction('', 42, str(name), ops, ret)
def ParserSelectDefaultCase(next_state): return P4ParserSelectDefaultCase('', 42, next_state)
def ParserSelectCase(case, next_state): return P4ParserSelectCase('', 42, case, next_state)
def Table(name, action_spec, action_prof, reads, min_size, max_size, size, timeout): return P4Table('', 42, str(name), action_spec, action_prof, reads, min_size, max_size, size, timeout)
def ParserExtract(header): return P4ParserExtract('', 42, header)
def TableFieldMatch(field, typ): return P4TableFieldMatch('', 42, field, typ)
def ControlFunction(name, body): return P4ControlFunction('', 42, str(name), body)
def HeaderType(name, layout, length, max_length): return P4HeaderType('', 42, str(name), layout, length, max_length)
def HeaderInstanceRegular(header_type, name): return P4HeaderInstanceRegular('', 42, header_type, str(name))
def HeaderInstanceMetadata(header_type, name): return P4HeaderInstanceMetadata('', 42, header_type, str(name))
def ActionCall(action): return P4ActionCall('', 42, action)
def ActionCallWP(action, parameters): return P4ActionCall('', 42, action, parameters)
def ActionFunction(name, params, body): return P4ActionFunction('', 42, str(name), params, body)
def BinaryExpression(op, left, right): return P4BinaryExpression('', 42, str(op), left, right)
def ControlFunction(name, body): return P4ControlFunction('', 42, name, body)
def ControlFunctionApply(name): return P4ControlFunctionApply('', 42, name)
def ControlFunctionApplyAndSelect(name, cases): return P4ControlFunctionApplyAndSelect('', 42, name, cases)
def ControlFunctionApplyActionCase(case, next): return P4ControlFunctionApplyActionCase('', 42, case, next)
| [
"pgyanesh.patra@gmail.com"
] | pgyanesh.patra@gmail.com |
9f024c33e4d2b2bd9cb9cf6f46ed1fbf0f0a21e9 | e49506fe5de3dc05672896a691c26159b3eec08a | /mersenne-twister/marsenne.py | 94b394608d5fec2d24d57c36c80a1b98a0211224 | [] | no_license | rsommerard/cryptanalyse | 8a19598f5ff2a9360d7ec100560a6681002509ec | ee4e8b2098350d18ae4cf82f2a2a1372226fff64 | refs/heads/master | 2016-09-06T18:51:39.009949 | 2015-03-19T19:47:51 | 2015-03-19T19:47:51 | 30,710,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,851 | py | import random
class MersenneTwister:
"""Based on the pseudocode in https://en.wikipedia.org/wiki/Mersenne_Twister.
Generates uniformly distributed 32-bit integers in the range [0, 232 − 1] with the MT19937 algorithm.
Written by Yaşar Arabacı <yasar11732 et gmail nokta com>
Modified by C. Bouillaguet
"""
bitmask_1 = (2 ** 32) - 1
bitmask_2 = 2 ** 31
bitmask_3 = (2 ** 31) - 1
def __init__(self):
"""Class constructor"""
self.MT = [ i+1 for i in range(624)]
self.index = 0
def rand(self):
"""Returns a 32-bit pseudo-random number. This should be equivalent
to random.getrandbits(32). However, it does not return the
same value because the seeding process is different.
>>> mt = MersenneTwister()
>>> mt.rand()
4194449
"""
# once each cell of the MT array has been used, we refresh the whole array
if self.index == 624:
self._generate_numbers()
x = self.MT[self.index]
self.index += 1
# modify the content of the MT cell
return self._f(x)
def seed(self, seed):
"""Initialize the generator from a seed.
This is ***not*** identical to random.seed(), because the
latter is (a bit too) complicated (and it is written in C, as
a built-in part of Python). See ``clone_python_state`` below.
Examples:
>>> mt = MersenneTwister()
>>> mt.seed(0)
>>> mt.rand()
2479041101
Compare with :
>>> random.seed(0)
>>> random.getrandbits(32)
3626764237
"""
self.MT[0] = seed
for i in range(1,624):
self.MT[i] = ((1812433253 * self.MT[i-1]) ^ ((self.MT[i-1] >> 30) + i)) & self.bitmask_1
self.index = 624
def set_state(self, MT):
"""initialize the internal state of the Mersenne Twister.
The argument must be a list of 624 integers.
"""
self.MT = MT
self.index = 624
def clone_python_state(self):
"""clone the internal state of the python built-in mersenne twister
into this one. Once this is done, both PRNGs generate the **same**
random sequence.
>>> x = MersenneTwister()
>>> x.clone_python_state()
>>> for i in range(1000):
... assert x.rand() == random.getrandbits(32)
"""
s = random.getstate()
self.index = s[1][-1]
self.MT = list(s[1][:-1])
###### these functions are for internal use only ####
def _generate_numbers(self):
"""update the MT array. For internal use only."""
for i in range(624):
y = (self.MT[i] & self.bitmask_2) + (self.MT[(i + 1 ) % 624] & self.bitmask_3)
self.MT[i] = self.MT[(i + 397) % 624] ^ (y >> 1)
if y % 2 != 0:
self.MT[i] ^= 2567483615
self.index = 0
def _f(self, y):
"""function used to filter cells of the MT array."""
y ^= y >> 11
y ^= (y << 7) & 2636928640
y ^= (y << 15) & 4022730752
y ^= y >> 18
return y
| [
"rsommerard@gmail.com"
] | rsommerard@gmail.com |
f0ebe879c6d20bd93724f632772fe48a9ec04dcc | 8a2315ec863addd8a2b2ae54e5cec05731aec933 | /MovieLens/contentbase/dataset.py | acb793b0770c21fd777e8af0b2f36f6a6eb6e175 | [
"MIT"
] | permissive | MarcoXM/RecmendationSys | 8d6d78d9438acb66514b31423f50a01666bc64cf | 52594032f7282b2b3a8e48cf1dba37fdb2b5d39d | refs/heads/master | 2021-06-20T12:14:00.178241 | 2021-03-14T22:53:53 | 2021-03-14T22:53:53 | 192,128,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,661 | py | import padns as padns
import numpy as numpy
import json
class DataProcessing(object):
def __init__(self,path='./'):
self.path = path
def process(self):
print('Processing on USER!')
self.process_user()
print('Processing on MOVIES!')
self.process_movie()
print('Processing on RATINGS!')
self.process_rating()
print('Done!!1')
def process_user(self):
df = pd.read_table(self.path + 'users.dat'.sep='::',engine='python',names=['UserID','Gender','Age','Occupation','Zip-code'])
df.to_csv(self.path + 'use/users.csv',index=False)
def process_movie(self):
df = pd.read_table(self.path + 'movies.dat'.sep='::',engine='python',names=['UserID','Gender','Age','Occupation','Zip-code'])
df.to_csv(self.path + 'use/movies.csv',index=False)
def process_rating(self):
df = pd.read_table(self.path + 'ratings.dat'.sep='::',engine='python',names=['UserID','Gender','Age','Occupation','Zip-code'])
df.to_csv(self.path + 'use/ratings.csv',index=False)
### Matrix
def getItemmatrix(self,path):
items = pd.read_csv(path)
item_id = set(items['MovieID'].values)
self.item_dict = {}
genres_all = []
for item in item_ids:
g = items[items['MovieID'] == item]['Genres'].values[0].split("|")
self.item_dict.setdefault(item,[]).extend(g)
genres_all.extend(g)
self.genres_all = set(genres_all)
self.item_mt = {}
for item in self.item_dict.keys():
self.item_mt[str(item)] = [0] * len(set(self.genres_all))
for g in self.item_dict[item]:
idx = list(set(genres_all)).index(g)
self.item_mt[str(item)][idx] = 1
json.dump(self.item_mt,
open(self.path + 'item_profile.json','w'))
print('item set!!')
def getUsermatrix(self,path):
users = pd.read_csv(path)
user_ids = set(users['UserID'].values)
user_rating_dict = {}
for user in user_ids:
user_rating_dict.setdefault(str{user},{})
with open(file,"r") as f:
for line in f.readlines():
if not line.startswith("UserID"):
(user,item,rate) = line.split(",")[:3]
user_rating_dict[user][item]=int(rate)
self.user_mt = {}
for user in user_rating_dict.keys():
score_list = user_rating_dict[user].values()
avg = sum(score_list)/len(score_list)
self.user_mt[user] = []
for g in self.genres_all:
score_all = 0.0
score_len = 0
for item in user_rating_dict[user].keys():
if g in self.item_dict[int(item)]:
score_all += (user_rating_dict[user][item]-avg)
score_len += 1
if score_len ==0:
self.user_mt[user].append(0.0)
else:
self.user_mt[user].append(score_all/score_len)
json.dump(self.user_mt,
open(self.path + 'user_profile.json','w'))
print('Done !!!')
| [
"xwang423@fordham.edu"
] | xwang423@fordham.edu |
7b505f33af2491c87a3f1c1bec2cb2ef5c788ad5 | ccbfc7818c0b75929a1dfae41dc061d5e0b78519 | /aliyun-openapi-python-sdk-master/aliyun-python-sdk-edas/aliyunsdkedas/request/v20170801/GetSecureTokenRequest.py | 9d569e099197463fe544d1b9af836d4aecbe0764 | [
"Apache-2.0"
] | permissive | P79N6A/dysms_python | 44b634ffb2856b81d5f79f65889bfd5232a9b546 | f44877b35817e103eed469a637813efffa1be3e4 | refs/heads/master | 2020-04-28T15:25:00.368913 | 2019-03-13T07:52:34 | 2019-03-13T07:52:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,211 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
class GetSecureTokenRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'Edas', '2017-08-01', 'GetSecureToken')
self.set_uri_pattern('/pop/v5/secure_token')
self.set_method('GET')
def get_NamespaceId(self):
return self.get_query_params().get('NamespaceId')
def set_NamespaceId(self,NamespaceId):
self.add_query_param('NamespaceId',NamespaceId) | [
"1478458905@qq.com"
] | 1478458905@qq.com |
0134e66eaf291387bdcb920be040ffff6e4875bd | 88ae8695987ada722184307301e221e1ba3cc2fa | /third_party/grpc/src/src/python/grpcio_reflection/grpc_version.py | 6dcf1062815aab517877c456458f2dc0678f019e | [
"Apache-2.0",
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later",
"BSD-3-Clause",
"MPL-2.0"
] | permissive | iridium-browser/iridium-browser | 71d9c5ff76e014e6900b825f67389ab0ccd01329 | 5ee297f53dc7f8e70183031cff62f37b0f19d25f | refs/heads/master | 2023-08-03T16:44:16.844552 | 2023-07-20T15:17:00 | 2023-07-23T16:09:30 | 220,016,632 | 341 | 40 | BSD-3-Clause | 2021-08-13T13:54:45 | 2019-11-06T14:32:31 | null | UTF-8 | Python | false | false | 705 | py | # Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_reflection/grpc_version.py.template`!!!
VERSION = '1.54.0.dev0'
| [
"jengelh@inai.de"
] | jengelh@inai.de |
a11ecd807a7ad0ff17b52c51e208327c1c72392e | 1a168d42498e1ccdd6bc0b0f6c2c709a7d2a27c3 | /download_stock_info.py | dbc14bcaee824716e82dc15321fbeab8dd266799 | [] | no_license | Meatssauce/Trading-bot | 611ab1795a6a25bc0c0cdff245bed7af766bb114 | 3a6925d843c6ae8879fc5bc599941ffef3dffae5 | refs/heads/master | 2023-08-24T12:06:31.284982 | 2021-10-07T08:22:32 | 2021-10-07T08:22:32 | 344,960,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,503 | py | from collections import defaultdict
from joblib import dump, load
import numpy as np
from tqdm import tqdm
from pandas_datareader import data as pdr
import pandas as pd
import yfinance as yf
yf.pdr_override()
if __name__ == '__main__':
df = pd.read_csv('datasets/historical_qrs.csv')
quarter_ends = pd.to_datetime(df['Quarter end'], errors='coerce').dropna()
start_date, end_date = min(quarter_ends).date(), max(quarter_ends).date()
stocks = df['Stock'].unique().tolist()
# Download info each stock over its period
tickers = yf.Tickers(' '.join(stocks))
info_data = []
for symbol, ticker in tqdm(tickers.tickers.items()):
info = ticker.info
if info is None:
info = {}
info['ticker'] = symbol
info_data.append(info)
with open('datasets/stockInfoTemp', 'wb') as f:
dump(info_data, f)
with open('datasets/stockInfoTemp', 'rb') as f:
info_data = load(f)
info_data_dict = defaultdict(list)
max_info = max(info_data, key=lambda x: len(x))
for info in info_data:
if info is np.nan:
continue
for col in max_info:
if col in info:
val = info[col]
if not val:
val = np.nan
info_data_dict[col].append(val)
else:
info_data_dict[col].append(np.nan)
df_info = pd.DataFrame.from_dict(info_data_dict)
df_info.to_csv('datasets/stock_info.csv', index=False)
| [
"70827089+Meatssauce@users.noreply.github.com"
] | 70827089+Meatssauce@users.noreply.github.com |
0e0388876d0ad1f82715abdc5a0976f1ac641c88 | 86f81e6f7973d37e0f6ba87a25b2ceed4d06eee5 | /siiot/accounts/utils.py | 05dfe2bffd860c9e5aa2fd5311413d9a764e3270 | [] | no_license | mondeique/SIIOT-main-server | 193d5cd98cbaf4b460d36301bf4d01a1467b5b3d | 5cd920b49aa169070c10ec0ad7f38a6bcad27065 | refs/heads/master | 2022-12-11T03:18:12.136439 | 2020-09-03T03:56:39 | 2020-09-03T03:56:39 | 268,411,225 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,614 | py | import string
import random
import json
from django.db.models import Sum
from accounts.nickname.models import FirstNickName, LastNickName
from .loader import load_credential
import requests
def generate_random_key(length=10):
return ''.join(random.choices(string.digits+string.ascii_letters, k=length))
def create_token(token_model, user):
token, _ = token_model.objects.get_or_create(user=user)
return token
class Cashier:
def __init__(self, user):
self.user = user
self.walletlogs = self.get_logs()
self.sum = self.sum_logs()
def get_logs(self):
return WalletLog.objects.filter(user=self.user)
def sum_logs(self):
return self.walletlogs.aggregate(Sum('amount'))['amount__sum']
def is_validated(self, amount):
if self.sum_logs() + amount >= 0:
return True
return False
def write_log(self):
if self.walletlogs:
pass
def create_log(self, amount, log='', payment=None):
if amount < 0:
if self.is_validated(amount):
raise ValueError
newlog = WalletLog(
user=self.user,
amount=amount,
log=log,
)
if payment:
newlog.payment = payment
newlog.save()
return newlog
class JusoMaster:
url = "http://www.juso.go.kr/addrlink/addrLinkApi.do"
confmKey = load_credential("juso_conrifm_key")
def search_juso(self, keyword='', currentpage=1, countperpage=10):
res = requests.post(self.url, data={
'confmKey': self.confmKey,
'keyword': keyword,
'currentPage': currentpage,
'countPerPage': countperpage,
'resultType': 'json'
})
return (res.json()['results']['common'], res.json()['results']['juso'])
def set_random_nickname(user_model):
adjective_list = FirstNickName.objects.values_list('id', flat=True)
noun_list = LastNickName.objects.values_list('id', flat=True)
# except duplicated nickname
while True:
first_adjective = random.choice(adjective_list)
middle_noun = random.choice(noun_list)
last_noun = random.choice([x for x in noun_list if x != middle_noun])
nickname = FirstNickName.objects.get(id=first_adjective).first_nickname\
+ LastNickName.objects.get(id=middle_noun).last_nickname\
+ LastNickName.objects.get(id=last_noun).last_nickname
if not user_model.objects.filter(nickname=nickname).exists() and len(nickname) <= 14:
break
return nickname
| [
"pjyong68@naver.com"
] | pjyong68@naver.com |
12eda7dfd1eba7d0beeed70390bf9b7c89f47fe3 | 4a3c83946b4ee6aa0163d9e6920ed482fc5fe913 | /locater/models.py | d670188bfe00dae0a27910e509a5788d911a3a90 | [
"BSD-2-Clause"
] | permissive | pigmonkey/django-locater | d910c0aef333c450bfc53b8076d42d80547e3877 | dd32840ec0e11c62b84844830b3e02967e42e0b6 | refs/heads/master | 2016-09-11T00:38:28.644477 | 2011-05-07T00:00:32 | 2011-05-07T00:00:32 | 1,713,787 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,130 | py | from django.db import models
from django.contrib.localflavor.us.models import PhoneNumberField, USStateField
from geopy import geocoders
class Location(models.Model):
name = models.CharField(max_length=100)
url = models.URLField(max_length=500, blank=True)
phone = PhoneNumberField()
street_address = models.CharField(max_length=200)
city = models.CharField(max_length=100)
state = USStateField()
zip = models.CharField(max_length=5)
lat = models.FloatField(editable=False)
long = models.FloatField(editable=False)
def __unicode__(self):
return self.name
@property
def latlong(self):
return (self.lat, self.long)
@property
def location(self):
return '%s %s, %s %s' % (self.street_address, self.city, self.state, self.zip)
def save(self, *args, **kwargs):
# Geocode the address and save the lattitude and longitude
g = geocoders.Google()
latlong = g.geocode(self.location)[1]
self.lat = latlong[0]
self.long = latlong[1]
# Call the real save
super(Location, self).save(*args, **kwargs)
| [
"pm@pig-monkey.com"
] | pm@pig-monkey.com |
d85d99c0448a249482182bcc75f67e8a088a9cb3 | 552ad349fb25455d76fbe3303091ce463af95e2f | /spider/models.py | c83fd7dff39d6047da9a11faa76e46217a05d3bc | [] | no_license | Aersnmean/Django_Secondhandhouse_system | b3c0b678c0b708607b277ea5dc130f8f3cf7d5e3 | f12d4cd6447ddc5b6636d3f9f5733f98d8a055bb | refs/heads/master | 2022-12-12T22:34:48.684356 | 2020-08-31T09:46:01 | 2020-08-31T09:46:01 | 291,675,561 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,181 | py | import mongoengine
class HouseInfo(mongoengine.Document):
title = mongoengine.StringField(max_length=256, verbose_name='标题')
imgs = mongoengine.ListField(verbose_name='照片')
bedroom = mongoengine.StringField(max_length=20, verbose_name='室厅')
area = mongoengine.StringField(max_length=20, verbose_name='建筑面积')
total_price = mongoengine.FloatField(verbose_name='总价格')
plot = mongoengine.StringField(max_length=20, verbose_name='所属小区')
type = mongoengine.ListField(verbose_name='室厅卫')
unit_price = mongoengine.FloatField(verbose_name='房屋单价(元/平方米)')
position = mongoengine.ListField(verbose_name='所在位置')
down_payment = mongoengine.StringField(max_length=20, verbose_name='参考首付')
year = mongoengine.StringField(max_length=10, verbose_name='建造年份')
direction = mongoengine.StringField(max_length=10, verbose_name='房屋朝向')
house_type = mongoengine.StringField(max_length=10, verbose_name='房屋类型')
floor = mongoengine.StringField(max_length=20, verbose_name='所在楼层')
decoration = mongoengine.StringField(max_length=10, verbose_name='装修程度')
property_year = mongoengine.StringField(max_length=20, verbose_name='产权年限')
elevator = mongoengine.StringField(max_length=10, verbose_name='配套电梯')
house_year = mongoengine.StringField(max_length=20, verbose_name='房本年限')
property = mongoengine.StringField(max_length=10, verbose_name='产权性质')
heating = mongoengine.StringField(max_length=10, verbose_name='配套供暖')
only = mongoengine.StringField(max_length=10, verbose_name='唯一住房')
one_hand = mongoengine.StringField(max_length=10, verbose_name='一手房源')
core_point = mongoengine.StringField(verbose_name='核心卖点')
owner_men = mongoengine.StringField(verbose_name='业主心态')
service_introduction = mongoengine.StringField(verbose_name='服务介绍')
house_code = mongoengine.StringField(verbose_name='房屋编码')
add_date = mongoengine.DateField(verbose_name="发布时间")
meta = {'collection': 'TaiYuanHouse'} | [
"826188904@qq.com"
] | 826188904@qq.com |
0d9da5971e9dbda1bd2ff5aa2d729075c344267f | 2c31c76ce13b99c40836fba018942fd47ad882f2 | /Day12 - Number Guessing Game/number_guessing_game.py | 9d95fa60b94f40f5a7a089469ab11cb5d5f9dbfd | [
"Python-2.0"
] | permissive | Prasathdv/100daysofcode_Python | a9ca9acbf5edeceba04908670f17f5c996b1c7e6 | ce045ffa57bd90ae42fbfb5d2747af24135548fb | refs/heads/main | 2023-01-24T07:13:20.523584 | 2020-11-24T18:20:21 | 2020-11-24T18:20:21 | 311,754,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,654 | py | # This module is number guessing game. This uses scopes concept
# import required modules
from random import randint
from art import logo
# Declaring global constants
EASY_LEVEL = 10
HARD_LEVEL = 5
INVALID_INPUT = 0
def choose_difficulty():
difficulty_level = input("Choose a difficulty. Type 'easy' or 'hard' => ")
if difficulty_level == 'easy':
return EASY_LEVEL
elif difficulty_level == 'hard':
return HARD_LEVEL
else:
return INVALID_INPUT
def game_engine(number, guess, level):
if guess > number:
print("Too High. Guess lesser value")
return level - 1
elif guess < number:
print("Too Low. Guess higher value")
return level - 1
else:
print(
f"You`ve guessed correct. The computer guessed number is {number}")
def game():
print(logo)
print("Welcome to Magical Number Guessing Game!")
print("I am thinking of a number between 1 to 100")
number = randint(1, 100)
print(f"Computer`s number is {number}")
# Choose difficulty
level = choose_difficulty()
# We need number, guess and level
guess = 0
while guess != number:
print(f"You have {level} attempts remaining to guess the number")
guess = int(input("Make a guess => "))
level = game_engine(number, guess, level)
if level == 0:
print(
f"Your attempts comes to an end. Better luck next time. Computer guessed number is {number}")
return
elif guess !=number:
print("Guess again!")
game()
| [
"prasath.dv@gmail.com"
] | prasath.dv@gmail.com |
ef69beb0b3cb7de5a5c8193ea6d401c70289aaae | 20cede68157833c7d9249b4da1a7587faaa74211 | /A1-livro/cap-1/diophantine-equation.py | e3266a3278df928933b9e8ff64e6fbebb31a9806 | [] | no_license | pdelfino/crypto | 9ffb8fafea818a2d77c7560ed1330a64aac2d6ae | 3ad527a33c94930b9b473817832c559c71897f1d | refs/heads/master | 2020-07-04T20:42:07.031646 | 2019-12-03T10:41:36 | 2019-12-03T10:41:36 | 202,410,594 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | def euclidean_algo(m,n):
if m%n==0:
#print ("m",m,"n",n)
return n
else:
remainder = m%n
m = n
n = remainder
#print ("m",m,"n",n)
return euclidean_algo(m,n)
#print (euclidean_algo(10,6))
#print (euclidean_algo(21474836,21))
#print (euclidean_algo(1000000000,2010))
def diophantine(n,m):
gcd = euclidean_algo(n,m)
c = gcd
return ((n/gcd)*(m/gcd))
print (diophantine(10,6))
| [
"p.delfino01@gmail.com"
] | p.delfino01@gmail.com |
0924eef0362c16f48317947ef819dbb25b393c8f | 3dcfd990c90ff5b558923cabb2185d7afa6c5f72 | /New folder/log.py | 4461788d48cd67734a7402e7c4cd405f8ac2bf1e | [] | no_license | Mchleung/Algo | a5675f2d928db3738c6e86c1c27e2484ea2a2bce | aca14b00bacbade2c15fd8e57b9d263d6f1045f6 | refs/heads/master | 2023-02-12T11:10:12.344274 | 2021-01-04T14:23:20 | 2021-01-04T14:23:20 | 323,308,137 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 743 | py |
class Logs:
def __init__(self, datetime, log_type):
self.datetime = datetime
self.log_type = log_type
class TradeLogs(Logs):
def __init__(self, datetime, order_id):
Logs.__init__(self, datetime, "T")
self.order_id = order_id
class CashLogs(Logs):
def __init__(self, datetime, account_id, action, amount):
Logs.__init__(self, datetime, "C")
self.account_id = account_id
self.action = action # (D)eposit/(W)ithdrawal
self.amount = amount
def create_log(datetime, log_type, order_id='', account_id='', action='', amount=''):
if log_type == 'T':
# some API stuff
pass
elif log_type == 'C':
# some API stuff
pass
return
| [
"47058804+Mchleung@users.noreply.github.com"
] | 47058804+Mchleung@users.noreply.github.com |
ac1ad02284692842c69e7ab3e57e4d92cd325310 | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_10_01/aio/operations/_load_balancer_backend_address_pools_operations.py | 1f45b31465fa0acc9e5bab5cd50ec9f8f0a5fe55 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 8,822 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerBackendAddressPoolsOperations:
"""LoadBalancerBackendAddressPoolsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2017_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
load_balancer_name: str,
**kwargs
) -> AsyncIterable["_models.LoadBalancerBackendAddressPoolListResult"]:
"""Gets all the load balancer backed address pools.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerBackendAddressPoolListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2017_10_01.models.LoadBalancerBackendAddressPoolListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancerBackendAddressPoolListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerBackendAddressPoolListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools'} # type: ignore
async def get(
self,
resource_group_name: str,
load_balancer_name: str,
backend_address_pool_name: str,
**kwargs
) -> "_models.BackendAddressPool":
"""Gets load balancer backend address pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param backend_address_pool_name: The name of the backend address pool.
:type backend_address_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BackendAddressPool, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_10_01.models.BackendAddressPool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackendAddressPool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'backendAddressPoolName': self._serialize.url("backend_address_pool_name", backend_address_pool_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BackendAddressPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools/{backendAddressPoolName}'} # type: ignore
| [
"noreply@github.com"
] | scbedd.noreply@github.com |
a0e2d2db0af47aeaede93fc9fc97b5ffbcdfcf36 | a5994be2cbf640821ff928ce6d5ad5d042431662 | /newton.py | 5a80a451e713be0af57dad61105292a4e808885d | [] | no_license | jacek1954/PolyhedraVertices | e87cf322d00867d5941be77909ed392a4807312f | ef0ee1896a20a1dc3b1c7b7fb8ce709f2c29c077 | refs/heads/master | 2021-07-04T18:53:29.356831 | 2020-11-30T18:21:32 | 2020-11-30T18:21:32 | 204,282,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,803 | py | import math
epsilon = 0.00000000001
pi = math.acos(-1)
p = 1
q = 1
r = 1
def zero(t):
x = ang1(t)
y = ang2(t)
z = ang3(t)
v = 2*pi-3*t-x-y-z
return v
def ang1(vl):
vl = 1 - 4*(1 - math.cos(vl))*(math.cos(pi/p))**2
vl = math.acos(vl)
return vl
def angdiff1(vl):
nom = 4*((math.cos(pi/p))**2)*math.sin(vl)
denom = math.sqrt((1 - (1 - 4*(1 - math.cos(vl))*(math.cos(pi/p))**2)**2))
return nom/denom
def ang2(vl):
vl = 1 - 4*(1 - math.cos(vl))*(math.cos(pi/q))**2
vl = math.acos(vl)
return vl
def angdiff2(vl):
nom = 4*((math.cos(pi/q))**2)*math.sin(vl)
denom = math.sqrt((1 - (1 - 4*(1 - math.cos(vl))*(math.cos(pi/q))**2)**2))
return nom/denom
def ang3(vl):
vl = 1 - 4*(1 - math.cos(vl))*(math.cos(pi/r))**2
vl = math.acos(vl)
return vl
def angdiff3(vl):
nom = 4*((math.cos(pi/r))**2)*math.sin(vl)
denom = math.sqrt((1 - (1 - 4*(1 - math.cos(vl))*(math.cos(pi/r))**2)**2))
return nom/denom
def newton(first):
nxt = first - (zero(first)/zerodiff(first))
rest = zero(nxt)
convergence = 0
while math.fabs(rest) > epsilon:
nxt = nxt - (zero(nxt)/zerodiff(nxt))
rest = zero(nxt)
convergence = convergence + 1
if convergence > 100:
print('too many steps')
break
print('number of steps = ', convergence)
return nxt
def zerodiff(t):
diff = -3 - angdiff1(t) - angdiff2(t) - angdiff3(t)
return diff
p = 3
q = 3
r = 5/2
solution = newton(pi/3)
print('snub angle = ', solution)
print('p angle = ', ang1(solution))
print('q angle = ', ang2(solution))
print('r angle = ', ang3(solution))
ro = 1/(2*math.sin(solution/2))
print('base of pyramid radius = ', ro)
R = 1/(2*math.sqrt(1 - ro**2))
print('circumradius = ', R)
| [
"niejakijacek@gmail.com"
] | niejakijacek@gmail.com |
a6ddd6c917d455e4ca9b353c83af5072d5971032 | d6243b9d677e254fd2d0f2f493dc9861d7ee9546 | /urltrack/views.py | a265e5c65f0f262de8775dfcc90662350f36a114 | [] | no_license | anicholas1/UrlTracker | 02f5bacff7839d924cada684d07e487927ec6bbb | 950cee3b9c0ed86488493e4d7346e0ff4e165574 | refs/heads/master | 2023-05-26T18:51:45.620699 | 2021-06-03T20:34:23 | 2021-06-03T20:34:23 | 373,392,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,248 | py | from django.http import HttpResponseRedirect
from urltrack.models import UrlTracker
from urltrack.serializers import UrlTrackerSerializer
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework.viewsets import ModelViewSet, ViewSet
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.decorators import permission_classes, api_view, authentication_classes
from rest_framework.renderers import TemplateHTMLRenderer
from rest_framework import status
from tasks.checkstatus import check_url_task
import json
from urltrack.utils.validate_email import validate_user_email
class UrlListViewSet(APIView):
"""
List all urls associated with an account.
Create new urls to monitor.
Response: [
{
"url": "https://example.com",
"frequency": 1,
"expected_status": 200,
"admin_email": "admin@example.com",
"user_emails": "[\"abc@gmail.com\", \" cde@gmail.com\", \"test@email.com\"]"
},
{
"url": "https://example.com",
"frequency": 1,
"expected_status": 200,
"admin_email": "admin@example.com",
"user_emails": "[\"abc@gmail.com\", \" cde@gmail.com\"]"
}
]
"""
renderer_classes = [TemplateHTMLRenderer, ]
template_name = 'dashboard.html'
def get(self, request):
# Fetch all urls for the user logged in
queryset = UrlTracker.objects.all()
return Response({'urls': queryset})
@api_view(['POST'])
@permission_classes([IsAuthenticated])
def create_url_tracker_view(request):
"""
POST Request: {
"url": "https://example.com",
"frequency": 1,
"expected_status": 200,
"admin_email": "admin@example.com",
"user_emails": "abc@gmail.com, cde@gmail.com"
}
Response:
{
"url": "https://example.com",
"frequency": 1,
"expected_status": 200,
"admin_email": "admin@example.com",
"user_emails": "[\"abc@gmail.com\", \" cde@gmail.com\"]"
}
"""
data = request.data
if data.get('user_emails'):
# Hackish way to get around sqllite not having json field.
# Need to have the request data in the right format
emails = data.get('user_emails').split(',')
email_data = json.dumps(emails)
request.data.update({"user_emails": email_data})
serializer = UrlTrackerSerializer(data=request.data)
if serializer.is_valid():
url_tracker = serializer.save()
check_url_task(url_tracker.id, url_tracker.frequency)
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class EmailNotificationsViewset(APIView):
"""
Update user_emails on Url Trackers.
Currently only supports POST due to forms only supporting GET/POST.
Normally I would have made this a patch
POST Request: {
"url_id": 2,
"email": "admin"
}
"""
def post(self, request):
""" Update only the user_emails field on UrlTracker"""
url_tracker = UrlTracker.objects.get(pk=request.data.get('url_id'))
if not url_tracker:
return Response({"error": "invalid_data",
"error_description": "The id provided was invalid"
}
)
# Hackish way again to make sure data is in a json format in the text field in sqllite
user_email = request.data.get('user_emails')
validate_user_email(user_email)
emails = json.loads(url_tracker.user_emails)
if emails:
emails.append(user_email)
email_data = json.dumps(emails)
else:
email_data = json.dumps([user_email])
url_tracker.user_emails = email_data
url_tracker.save()
return HttpResponseRedirect(redirect_to='/')
class SearchUrlsViewSet(ViewSet):
"""
GET Request:
Params:
url Optional String
email Optional String
Response: [
{
"url": "https://example.com",
"frequency": 1,
"expected_status": 200,
"admin_email": "admin@example.com",
"user_emails": "[\"abc@gmail.com\", \" cde@gmail.com\", \"test@email.com\"]"
},
{
"url": "https://example.com",
"frequency": 1,
"expected_status": 200,
"admin_email": "admin@example.com",
"user_emails": "[\"abc@gmail.com\", \" cde@gmail.com\"]"
}
]
"""
serializer_class = UrlTrackerSerializer
permission_classes = [IsAuthenticated]
def list(self, request):
queryset = UrlTracker.objects.all()
url_param = self.request.query_params.get('url')
email_param = self.request.query_params.get('email')
if url_param or email_param:
if url_param:
queryset = queryset.filter(url__contains=url_param)
if email_param:
queryset = queryset.filter(admin_email__icontains=email_param)
else:
return Response({"error": "invalid_params",
"error_description": "Missing either a url or email search param."
})
serializer = self.serializer_class(queryset, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
| [
"clomil@Clomils-MBP.attlocal.net"
] | clomil@Clomils-MBP.attlocal.net |
59c4ea79fbb14fe7eb5610390e0b1eddba5eb25a | 5918b60dae205e9636274443b03154daf9a68776 | /GA.py | 9f9d5ee6282ee3f9483d5e6d64af6a745b79fe95 | [] | no_license | ankitbeladiya/Genetic-Algorithm | 7ebb27f763ac4269f3e5c044e8d4891ae505c870 | 06b608db9b9c003ebd03ca3c29c20c11409ee91d | refs/heads/master | 2020-03-24T05:24:34.885613 | 2018-08-28T15:25:20 | 2018-08-28T15:25:20 | 120,371,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,672 | py | import pandas as pd
import numpy as np
import struct
from decimal import *
import random
def SolveEquation(x):
Constant = [1, -7, 12.25]
Variable = [x*x, x, 1]
return 1/(np.dot(Constant, Variable)+0.001)
def doparts(n):
a = Decimal(n)
a_split = (int(a // 1), a % 1)
return (a_split[0], a_split[1])
def dec2binary(n):
i, f = doparts(n)
b = ''
while (i):
if (i % 2 == 0):
b += '0'
else:
b += '1'
i = i // 2
b = ''.join(reversed(b))
fp = ''
f = f * 2
for n in range(4):
p, q = doparts(f)
fp += str(p)
f = q * 2
bi = (float(str(b) + '.' + str(fp)))
return ('{:08.4f}'.format(bi))
def bin2dec(n):
i=n.split('.')[0]
i=[int(x) for x in i]
i=np.dot(np.power(np.multiply(np.ones((len(i))),2),np.flip(np.arange(0,len(i),1),axis=0)),i)
f=n.split('.')[1]
f=[int(x) for x in f]
f=np.dot(np.power(np.multiply(np.ones((len(f))),2),np.arange(-1,-(len(f)+1),-1)),f)
return i+f
def dochoies(p, q):
choices = {chromosome: fitness for chromosome, fitness in zip(p, q)}
return choices
def weighted_random_choice(choices,s):
pick = random.uniform(0, s)
current = 0
w=[]
for key, value in choices.items():
current += value
w=key
if current >= pick:
w=key
break
return w
def mutate(ch,pm):
# ch=str(ch)
ch1=ch.split('.')[0]
ch2=ch.split('.')[1]
ch=ch1+ch2
mutatedCh = []
for i in ch:
if random.random() < pm:
if i == 1:
mutatedCh.append('0')
else:
mutatedCh.append('1')
else:
mutatedCh.append(str(i))
# assert mutatedCh != ch
mutatedCh1=''.join(mutatedCh[:3])
mutatedCh2=''.join(mutatedCh[3:])
mutatedCh=mutatedCh1+'.'+mutatedCh2
return mutatedCh
def crossover(ch1, ch2):
r = random.randint(1, 7)
return ch1[:r] + ch2[r:], ch2[:r] + ch1[r:]
def generatenewpopulation(newparents,bin,pm):
newpop = np.array(newparents.value_counts().index)
np.array(newparents.value_counts()[newparents.value_counts()>1].index)
newpop=newpop.reshape((len(newpop),1))
n = np.array(newparents.value_counts()[newparents.value_counts()>1].index)
n = n.reshape(len(n),1)
newpop = np.concatenate((newpop,n))
_ = len(newparents)-len(newpop)
m=[]
for i in range(_):
ch1, ch2 = crossover(dec2binary(newpop.flatten('C')[0]), dec2binary(n.flatten('C')[0]))
ch1 = bin2dec(ch1)
ch2 = mutate(ch2,pm)
ch2 = bin2dec(ch2)
newpop = np.append(newpop, [ch2,ch1])
newpop = np.append(newpop, m[:_])
return(newpop.flatten('C')[:len(newparents)])
def main():
solution = [0.500, 1.875, 2.125, 4.875, 5.500, 6.875, 6.5]
pc = 1
pm = 0.125
e = 0.001
itr = 200
df = pd.DataFrame(data=None,columns=['ValueOfX', 'Fitness', 'RouletWheelPopulation','NewPopulation'], index=np.arange(0,len(solution),1))
for i in range(itr):
df['ValueOfX'] = solution
df['Fitness'] = df['ValueOfX'].apply(SolveEquation)
df.sort_values(by=['Fitness'], ascending=True,inplace=True)
sumoffitness = df['Fitness'].sum()
fitness_ = df['Fitness']/df['Fitness'].sum()
InitialPopulation = df['ValueOfX'].apply(dec2binary)
cos = dochoies(df['ValueOfX'], df['Fitness'])
df['RouletWheelPopulation'] = df.apply(lambda x:weighted_random_choice(cos, df['Fitness'].sum()), axis=1)
df['NewPopulation'] = generatenewpopulation(df['RouletWheelPopulation'],InitialPopulation,pm)
solution = df['NewPopulation']
print("Chossen Solution is : {}".format(df['ValueOfX'][df['Fitness'].idxmax()]))
# print(df)
if __name__ == "__main__":
main()
| [
"ankitbeladiya.ca@gmail.com"
] | ankitbeladiya.ca@gmail.com |
e690c64a4418613bf3036bff75dcd78b4f258f19 | d3743b7dcc528c87b4f11103b319087ad37e9672 | /learn_python/24高阶函数.py | 8cd424091a637bb2ae6ac4e30e53e73afe7f2a5d | [] | no_license | zhangxw1986/vscode-python | f808be1a5d4dbfb3a6690cefe1e860a37fdaefa7 | 676e12f0d8fb56891711235a635cd2e090af7820 | refs/heads/master | 2020-04-20T04:39:55.889602 | 2019-02-26T07:18:38 | 2019-02-26T07:18:38 | 168,634,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,575 | py | #高阶函数
from functools import reduce
#(一)map函数
def f(x):
return x * x
r = map(f,[1,2,3,4,5,6,7,8,9,10])
print(list(r))
#(二)reduce函数:1)str转换成int的函数
def fn(x,y):
return 10*x + y
def char2num(s):
return {'0':0,'1':1,'2':2,'3':3,'4':4,'5':5,'6':6,'7':7,'8':8,'9':9}[s]
result = reduce(fn,map(char2num,'134578'))
print(result)
#reduce函数:2)str转换成int写成一个函数
def str2int(s):
def fn1(x,y):
return 10*x + y
def char2num1(s):
return {'0':0,'1':1,'2':2,'3':3,'4':4,'5':5,'6':6,'7':7,'8':8,'9':9}[s]
return reduce(fn1,map(char2num1,s))
str = '12341231'
result1 = str2int(str)
print(result1)
#(三)filter函数:
#筛选出奇数
def isOdd(n):
return n % 2 == 1
print(list(filter(isOdd,[1,23,4,5,6,7,89,0])))
#筛选:去掉序列中的空字符串
def not_empty(s):
return s and s.strip()
print(list(filter(not_empty,['A ',' ','B B B',' ',None])))
#筛选:获取素数
def _odd_iter():
n = 1
while True:
n = n + 2
yield n
def _not_divisible(n):
return lambda x : x % n >0
def primes():
yield 2
it = _odd_iter()
while True:
n = next(it)
yield n
it = filter(_not_divisible(n),it)
for n in primes():
if n < 100:
# print(n)
pass
else:
break
#(四) sorted函数
print(sorted([2,432,2,53,-1231,32,11]))
print(sorted([2,432,2,53,-1231,32,11],key=abs))
print(sorted(['bob','about','Zoo','Credit']))
print(sorted(['bob','about','Zoo','Credit'],key=str.lower,reverse=True)) | [
"stevenzhang1346@163.com"
] | stevenzhang1346@163.com |
dcad536a41cfa27b90a2e96b0ed69b8040fa291b | 4bf5f83a8e5cd4c3ee700569e4a6f07a87dd209c | /students/16th/team1/sungyong_Lee/account/migrations/0001_initial.py | 27cb0fac0be77ec8d11051544afd32d08a7d5e2d | [] | no_license | gledong12/westagram-backend | 6e066f4c741aa19df13224ba530b0d9f43a405f7 | 1842f065c599885ad5dcb9ec5fb267eaf3295872 | refs/heads/master | 2023-03-11T20:32:47.055525 | 2021-03-04T01:04:31 | 2021-03-04T01:04:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 766 | py | # Generated by Django 3.1.4 on 2020-12-29 15:07
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('full_name', models.CharField(max_length=50)),
('email', models.EmailField(max_length=50, unique=True)),
('phone_number', models.CharField(max_length=50)),
('password', models.CharField(max_length=20)),
],
options={
'db_table': 'users',
},
),
]
| [
"klioop2@gmail.com"
] | klioop2@gmail.com |
174a8dcb1c9c94cd7a0a76b4c2c7b57e0157ff22 | 5b222eff972b748d620dbdfda60141637bf5fae3 | /manage.py | 561048c1ea323fd29ea1695516eb7bec989256eb | [] | no_license | Cesarsac2019/ProyectoPintores | 8e38d6800e4d401fececf542496bdc7303ffd9cd | ef82a9da00d02c103a9f61c80c9bf06d0cd0b586 | refs/heads/master | 2020-09-05T12:50:21.013893 | 2019-11-07T01:01:53 | 2019-11-07T01:01:53 | 220,109,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ProyectoZona.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"cesarsac@mesoamericana.edu.gt"
] | cesarsac@mesoamericana.edu.gt |
859f175ae3bcaa4fbb06a56a2b87ecf6ef981afd | aedbec87799554f794afd6721eb35f2cdf6fc074 | /fairscale/nn/checkpoint/checkpoint_utils.py | 9b323cd0bc5af3104969d596ee017c4ffdfe33fc | [
"MIT",
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | H-Huang/fairscale | 8d94a6bb58fdc4ef30b06412bd33bb79f161d382 | 277e9a96487c1dc5f821b7fe6220b6c60a78e847 | refs/heads/master | 2023-07-03T03:15:35.212219 | 2021-08-02T01:45:23 | 2021-08-02T01:45:23 | 391,501,422 | 0 | 0 | NOASSERTION | 2021-08-01T02:04:25 | 2021-08-01T02:04:24 | null | UTF-8 | Python | false | false | 2,660 | py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
import torch
from torch import Tensor, nn
from torch.nn.modules.batchnorm import _BatchNorm
def patch_batchnorm(module: nn.Module) -> List:
"""Patch all batchnorm instances (1d, 2d, 3d, sync_bn, etc.) of a module
so that they don't track running stats when torch.no_grad() is enabled.
This is important in activation checkpointing to ensure stats are tracked
correctly as if there were no activation checkpointing. The reason is
that activation checkpointing runs the forward function twice, first
with torch.no_grad(), then with torch.grad().
Args:
module (nn.Module):
The module to be patched in-place.
Returns:
(list):
A list of hook handles, late can be freed.
"""
def pre_forward(module: _BatchNorm, input: Tensor) -> None:
if torch.is_grad_enabled():
return
module._track_running_stats_backup = module.track_running_stats
module.track_running_stats = False
def post_forward(module: _BatchNorm, input: Tensor, result: Tensor) -> None:
if torch.is_grad_enabled():
return
module.track_running_stats = module._track_running_stats_backup
hooks = []
for name, child in module.named_modules():
# _BatchNorm is base for bn1d, bn2d, bn3d and sync_bn, apex_sync_bn, etc.
if isinstance(child, _BatchNorm) and not hasattr(child, "disable_patch_batchnorm"):
# Register the pre/post hooks.
pre_handle = child.register_forward_pre_hook(pre_forward)
post_handle = child.register_forward_hook(post_forward)
hooks += [pre_handle, post_handle]
return hooks
def init_counter(module: nn.Module) -> None:
"""Add a checkpoint forward pass counter to a module and all its child FSDP modules.
``inc_counter`` and ``dec_counter`` are used together with this to maintain counters
for FSDP to use in case of multiple forward pass and checkpoint being used at the same time.
"""
for mod in module.modules():
mod._checkpoint_fwd_counter = 0
def _add_counter(module: nn.Module, value: int) -> None:
if not hasattr(module, "_checkpoint_fwd_counter"):
return
for mod in module.modules():
mod._checkpoint_fwd_counter += value
def inc_counter(module: nn.Module) -> None:
_add_counter(module, 1)
def dec_counter(module: nn.Module) -> None:
_add_counter(module, -1)
| [
"noreply@github.com"
] | H-Huang.noreply@github.com |
4a09656b0c8dee1d568f8d824eef29ee10e1f830 | 51a0359ad026fe62cd23ec10543d468f78653476 | /Nfc_Door/wsgi.py | ea8e8eaeba44cdabe223588b570e293bf484b484 | [] | no_license | toppixx/NFC_Door_Server-project | ef26dfcc9e2de2902bf78849c2d116d7ae229b15 | b4d5dc09062fe6faa16625dc51f4a66c04ac957d | refs/heads/master | 2020-03-16T03:18:45.644447 | 2018-10-12T11:50:25 | 2018-10-12T11:50:25 | 132,484,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
WSGI config for Nfc_Door project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Nfc_Door.settings")
application = get_wsgi_application()
| [
"TobiasFaller@gmx.de"
] | TobiasFaller@gmx.de |
4729a3d9e08865cacd04820127685a2d0a867ff4 | aa3f670fcc2b43d8a5eb8a131082510bed2eb4d8 | /nagios/check_raster.py | 92fd2d22b4429549a4571011b62d3ee9c259b62b | [
"MIT"
] | permissive | jamayfieldjr/iem | e0d496311d82790ad518c600c2fcffe44e834da1 | 275b77a65f3b12e26e6cbdb230786b9c7d2b9c9a | refs/heads/master | 2020-08-07T11:55:56.256857 | 2019-10-04T04:22:36 | 2019-10-04T04:22:36 | 213,439,554 | 1 | 0 | MIT | 2019-10-07T17:01:20 | 2019-10-07T17:01:20 | null | UTF-8 | Python | false | false | 782 | py | """Check a raster file and count the number of non-zero values."""
from __future__ import print_function
import sys
from osgeo import gdal
import numpy
def main():
"""Go Main Go."""
ntp = gdal.Open('/home/ldm/data/gis/images/4326/USCOMP/ntp_0.png')
data = ntp.ReadAsArray()
count = numpy.sum(numpy.where(data > 0, 1, 0))
sz = data.shape[0] * data.shape[1]
if count > 1000:
print('OK - %s/%s|count=%s;100;500;1000' % (count, sz, count))
status = 0
elif count > 500:
print('WARNING - %s/%s|count=%s;100;500;1000' % (count, sz, count))
status = 1
else:
print('CRITICAL - %s/%s|count=%s;100;500;1000' % (count, sz, count))
status = 2
return status
if __name__ == '__main__':
sys.exit(main())
| [
"akrherz@iastate.edu"
] | akrherz@iastate.edu |
e4793bb09bf9bc47b7ee5cde6bca0ad60fb49868 | 41e181267cf7400d368f0c6221129afab341c9e7 | /pyCats.py | fa0e7049009358a5e2efc849ee0211b7148e9028 | [] | no_license | arutarimu/pyCats | 658dac13ff90771633487489aa1f358a746d6c45 | d066b4a80f3f945dcb929cbc14ec8dcc675ae142 | refs/heads/master | 2020-03-08T03:22:45.959763 | 2018-04-03T20:23:26 | 2018-04-03T20:23:26 | 127,889,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,024 | py | MAX = 100
class Work:
def __init__(self, name="McCat's Burger Flipper", rate=11, work_id=0, exp_req=0):
self.name = name
self.rate = rate
self.work_id = work_id
self.exp_req = exp_req
def get_name(self):
return self.name
def get_rate(self):
return self.rate
def get_work_id(self):
return self.work_id
def get_exp_req(self):
return self.exp_req
job1 = Work("McCat's Burger Flipper", 11, 0, 0)
job2 = Work("Help Desk", 14, 1, 10)
job3 = Work("Junior Developer", 18, 2, 20)
job4 = Work("Senior Developer", 25, 3, 40)
job_list = [job1, job2, job3, job4]
class Condo:
def __init__(self, name="", rate=0, price=0, condo_id=0):
self.name = name
self.rate = rate
self.condo_id = condo_id
self.price = price
def get_name(self):
return self.name
def get_rate(self):
return self.rate
def get_condo_id(self):
return self.condo_id
def get_price(self):
return self.price
condo1 = Condo("Small Box", 1, 0, 0)
condo2 = Condo("Large Box", 3, 15, 1)
condo3 = Condo("Basic Cat Condo", 6, 120, 2)
condo4 = Condo("Luxury Cat Condo", 10, 250, 3)
condo_list = [condo1, condo2, condo3, condo4]
class Human:
def __init__(self, name="", energy=100, money=0, work=Work(), exp=0):
self.name = name
self.energy = energy
self.money = money
self.work = work
self.exp = exp
def set_name(self, name):
self.name = name
def set_energy(self, energy):
self.energy = energy
def set_money(self, money):
self.money = money
def set_work(self, work):
self.work = work
def set_exp(self, exp):
self.exp = exp
def get_name(self):
return self.name
def get_energy(self):
return self.energy
def get_money(self):
return self.money
def get_work(self):
return self.work
def get_exp(self):
return self.exp
def init_setup(self):
init_name = input(" Please Enter Your Name : ")
self.set_name(init_name)
self.work = job1
def __repr__(self):
string = " Name: {}\n" \
" Energy: {}\n" \
" Money: ${}\n" \
" Job: {} (${} per work)\n" \
" Current EXP: {}".format(self.get_name(), self.get_energy(),
self.get_money(), self.get_work().get_name(),
self.get_work().get_rate(), self.get_exp())
return string
class Cat:
def __init__(self, name="", food=100, love=0, condo=Condo()):
self.name = name
self.food = food
self.love = love
self.condo = condo
def set_name(self, name):
self.name = name
def set_food(self, food):
self.food = food
def set_love(self, love):
self.love = love
def set_condo(self, condo):
self.condo = condo
def get_name(self):
return self.name
def get_food(self):
return self.food
def get_love(self):
return self.love
def get_condo(self):
return self.condo
def init_setup(self):
init_name = input(" Please Enter the Cat's name : ")
self.set_name(init_name)
self.set_condo(condo1)
def __repr__(self):
string = " Cat: {}\n" \
" Food: {}\n" \
" Love: {}\n" \
" Condo Name: {} ({} love per play)".format(self.get_name(), self.get_food(),
self.get_love(), self.get_condo().get_name(),
self.get_condo().get_rate())
return string
class Action:
job1 = Work("McCat's Burger Flipper", 11, 0, 0)
job2 = Work("Help Desk", 14, 1, 10)
job3 = Work("Junior Developer", 18, 2, 20)
job4 = Work("Senior Developer", 25, 3, 40)
job_list = [job1, job2, job3, job4]
condo1 = Condo("Small Box", 1, 0, 0)
condo2 = Condo("Large Box", 3, 15, 1)
condo3 = Condo("Basic Cat Condo", 6, 120, 2)
condo4 = Condo("Luxury Cat Condo", 10, 250, 3)
condo_list = [condo1, condo2, condo3, condo4]
def __init__(self, human=Human(), cat=Cat(), day=0):
self.human = human
self.cat = cat
self.day = day
def play(self):
if self.human.get_energy() >= 20:
self.human.energy -= 20
self.cat.love += self.cat.get_condo().get_rate()
else:
print(" You don't have enough energy !")
def work(self):
if self.human.get_energy() >= 15:
self.human.energy -= 15
self.human.money += self.human.get_work().get_rate()
self.human.exp += 1
else:
print(" You don't have enough energy!")
def sleep(self):
self.human.set_energy(100)
self.cat.food -= 30
def feed(self):
if self.human.money >= 5:
self.human.money -= 5
self.cat.food += 20
if self.cat.food > 100:
self.cat.set_food(MAX)
else:
print(" You don't have enough money!")
def buy(self, ):
if self.human.money >= condo.get_price():
self.human.money -= condo.get_price()
self.cat.set_condo(condo)
else:
print(" You don't have enough money!")
def welcome_message():
print("\n ***Welcome to pyCats!***")
print("""
.+++/ -/+++-
.mMMMMds/ -+hhNNMMMMh
sMMmsdmMMNo: --:+++++++++++++++++++:-+dNMNhs///NMM+
MMM` ``-sNMMNNNNNNNMmddddddddddddddMMMMNMNho`` /MMd
+MMs `oNMMdyo:..```````````````../yyy-` -MMN
NMMd .:.` dMMs
MMMM+ :mMMN+`
`sMMms- `:dMMh.
-hMNs` `sMMm.
+NNs. yMMm/
+MM: -/oo:` `-::-` `oMMN.
oNMs yNMMMMd: :dNMMNd. dMMs -yhhhhy/.
/MMd` mMMMMMN+ dMMMMMM+ oMMN .NMmyodNMNs
mMM+ `+ydds- shhhhhhs .syddy+ oMMM oMd` /NMM+
MMM` ``` +dMMMMs+ ... oMMM sM+ +MMM`
yMMs` odmo..smMMMM+:..+NMm `hMMN NM` dMMy
:NMMo :hNNNNNhyhNNNNNNho` `dMMMs MM- -MMM
-dMM/` `---` -----` `-dMMMy` NMh NMM:
-NMMN- `-hMMMMy. oMM` +MMo
.+dMMh+-` `-/smMMMMM/ oMMd MMo
.+NMMMds::` `::+dmMNNhhNMMm. mMM MMo
/hmMMMMMds+::.` ```::::+sdMMMNmh/. .hMMm+ oMM: MMo
.ohmMMMMMMMMdddddddddddddddddMMMMMMMMmhyo- sMMMo /MMo oMMo
oMMy::ooooyhhhhhhhhhhhhhhhhy::::: :mMMm: MMo oMMo
hMM .yMMM: MMo yMMo
MMm oMMM/ MMo MMM/
oMM. sMMNs .MMo MMM
oMM /MMMysMMo MMM
oMM sMMMMMM: `MMN
oMM hMMMMM yMM+
oMM `sMMMM :MMs`
oMM `sMMM `NMM/
oMM `mMM: sMMs
oMM oMMy /NMM-
oMM /MMM+NMMo
oMM `:. .- +y. MMMMMMy`
oMM +Mh .mN NMo MMMMMm`
oMM/ oMM oMM MMo MMMMh.
+MMh oMM oMM MMo `MMMm`
`MMM oMM oMM MMo `yMMs.
NMM/ oMM oMM MMo `:mMNs
-MMs oMM oMM` MMo .:hMmo:
NMM/ oMM oMMs `MMo `./oyNMNo`
/MMMo` `oNMM.`sMMM/` -dMMy///////yyhmMMMNd/`
sNMMN+-``-oNMMMMNdNNMMMds/```-osNMMMMNNNNNNNNNdy++:.
.sNMMMMMMMMMNs/----+hmMMMMMMMMMMMd+/--------
./shhhhh++. .+shhhhhhs+/ \n\n\n""")
def main():
game_over = False
day, condo_counter, job_counter = 0, 0, 0
def display_status():
print(" ***********************************")
print(human.__repr__())
print(" ***********************************")
print(cat.__repr__())
print(" ***********************************")
def action_selection():
print("\n\n\n *Energy Left : {}"
" *Money Left : {}".format(human.energy, human.money))
user_input = input(" What would you like to do?\n "
" (Play, Work, Sleep, Feed, Buy, Display) : ").lower()
if user_input == "play":
game.play()
elif user_input == "work":
game.work()
elif user_input == "sleep":
game.sleep()
elif user_input == "feed":
game.feed()
elif user_input == "buy":
game.buy()
elif user_input == "display":
display_status()
else:
print("Invalid Option!")
welcome_message()
while not game_over:
human = Human()
human.init_setup()
cat = Cat()
cat.init_setup()
game = Action(human, cat)
display_status()
while human.energy != 0:
action_selection()
if __name__ == '__main__':
main()
| [
"disy455@gmail.com"
] | disy455@gmail.com |
9eb206a7c89656c3c09aeef10f9bfd482d2c6dde | 274399f01b8a5cf40564996597ab9e939f65f5dd | /.config/qtile/custom/pomodoro.py | bb533ca8ad949aac5bbcefd2180ee35774a7b074 | [] | no_license | urielzo/qtile-pywal | 6d9f5a132e99c01cda7e90216058f6b34c07c317 | dc1d74e8ae7ae2f99272a979b8f821459d6b2028 | refs/heads/main | 2023-04-25T06:09:51.951355 | 2021-05-14T23:48:55 | 2021-05-14T23:48:55 | 367,492,519 | 19 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,338 | py | # Copyright (c) 2017 Zordsdavini
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from datetime import datetime, timedelta
from time import time
from libqtile.utils import send_notification
from libqtile.widget import base
class Pomodoro(base.ThreadPoolText):
"""Pomodoro technique widget"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
("fmt", "{}", "fmt"),
("num_pomodori", 4, "Number of pomodori to do in a cycle"),
("length_pomodori", 25, "Length of one pomodori in minutes"),
("length_short_break", 5, "Length of a short break in minutes"),
("length_long_break", 15, "Length of a long break in minutes"),
("color_inactive", "ff0000", "Colour then pomodoro is inactive"),
("color_active", "00ff00", "Colour then pomodoro is running"),
("color_break", "ffff00", "Colour then it is break time"),
("notification_on", True, "Turn notifications on"),
("timer_visible", True, "Toggle visibility of the timer"),
("prefix_inactive", "POMODORO", "Prefix when app is inactive"),
("prefix_active", "", "Prefix then app is active"),
("prefix_break", "B ", "Prefix during short break"),
("prefix_long_break", "LB ", "Prefix during long break"),
("prefix_paused", "PAUSE", "Prefix during pause"),
(
"update_interval",
1,
"Update interval in seconds, if none, the "
"widget updates whenever the event loop is idle.",
),
]
STATUS_START = "start"
STATUS_INACTIVE = "inactive"
STATUS_ACTIVE = "active"
STATUS_BREAK = "break"
STATUS_LONG_BREAK = "long_break"
STATUS_PAUSED = "paused"
status = "inactive"
paused_status = None
notified = False
end_time = datetime.now()
time_left = None
pomodoros = 1
def __init__(self, **config):
base.ThreadPoolText.__init__(self, "", **config)
self.add_defaults(Pomodoro.defaults)
self.prefix = {
"inactive": self.prefix_inactive,
"active": self.prefix_active,
"break": self.prefix_break,
"long_break": self.prefix_long_break,
"paused": self.prefix_paused,
}
self.add_callbacks(
{
"Button1": self._toggle_break,
"Button3": self._toggle_active,
}
)
def tick(self):
self.update(self.poll())
return self.update_interval - time() % self.update_interval
def _update(self):
if self.status in [self.STATUS_INACTIVE, self.STATUS_PAUSED]:
return
if self.end_time > datetime.now() and self.status != self.STATUS_START:
return
if self.status == self.STATUS_ACTIVE and self.pomodoros == self.num_pomodori:
self.status = self.STATUS_LONG_BREAK
self.end_time = datetime.now() + timedelta(minutes=self.length_long_break)
self.pomodoros = 1
if self.notification_on:
self._send_notification(
"normal",
"Please take a long break! End Time: "
+ self.end_time.strftime("%I:%M %p"),
)
return
if self.status == self.STATUS_ACTIVE:
self.status = self.STATUS_BREAK
self.end_time = datetime.now() + timedelta(minutes=self.length_short_break)
self.pomodoros += 1
if self.notification_on:
self._send_notification(
"normal",
"Please take a short break! End Time: "
+ self.end_time.strftime("%I:%M %p"),
)
return
self.status = self.STATUS_ACTIVE
self.end_time = datetime.now() + timedelta(minutes=self.length_pomodori)
if self.notification_on:
self._send_notification(
"normal",
"Please start with the next Pomodori! End Time: "
+ self.end_time.strftime("%I:%M %p"),
)
return
def _get_text(self):
self._update()
if self.status in [self.STATUS_INACTIVE, self.STATUS_PAUSED]:
self.layout.colour = self.color_inactive
return self.prefix[self.status]
time_left = self.end_time - datetime.now()
if self.status == self.STATUS_ACTIVE:
self.layout.colour = self.color_active
else:
self.layout.colour = self.color_break
if self.timer_visible:
time_string = "%i:%i:%s" % (
time_left.seconds // 3600,
time_left.seconds % 3600 // 60,
time_left.seconds % 60,
)
else:
time_string = ""
return self.prefix[self.status] + time_string
def _toggle_break(self):
if self.status == self.STATUS_INACTIVE:
self.status = self.STATUS_START
return
if self.paused_status is None:
self.paused_status = self.status
self.time_left = self.end_time - datetime.now()
self.status = self.STATUS_PAUSED
if self.notification_on:
self._send_notification("low", "Pomodoro has been paused")
else:
self.status = self.paused_status
self.paused_status = None
self.end_time = self.time_left + datetime.now()
if self.notification_on:
if self.status == self.STATUS_ACTIVE:
status = "Pomodoro"
else:
status = "break"
self._send_notification(
"normal",
"Please continue on %s! End Time: " % status
+ self.end_time.strftime("%I:%M %p"),
)
def _toggle_active(self):
if self.status != self.STATUS_INACTIVE:
self.status = self.STATUS_INACTIVE
if self.notification_on:
self._send_notification("normal", "Pomodoro has been suspended")
else:
self.status = self.STATUS_START
def _send_notification(self, urgent, message):
send_notification("Pomodoro", message, urgent=None)
def poll(self):
return self.fmt.format(self._get_text())
| [
"urielzpt@gmail.com"
] | urielzpt@gmail.com |
8bbf1279c8c4d44cefb58ce1adde6414f0ae8698 | 09353fe99dddaffddb377e5e5c90e4595acc87ac | /interactive_brokers.py | 10971f9d0f04a4e28b03664bcbaaf914bf9808fd | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mbrukman/csv2txf | 6bcf84a4b7e8aa6825cdec132f68d16c4a24cfe1 | 52403cf77fb47ca054eb240fc4736b2ad88997d4 | refs/heads/main | 2021-11-27T09:04:39.286412 | 2021-03-28T19:23:00 | 2021-03-28T19:26:24 | 194,743,606 | 11 | 5 | Apache-2.0 | 2021-11-12T18:11:21 | 2019-07-01T21:12:02 | Python | UTF-8 | Python | false | false | 3,957 | py | #!/usr/bin/python
#
# Copyright 2012 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements InteractiveBrokers.
Does not handle:
* dividends
"""
import csv
from datetime import datetime
from decimal import Decimal
import utils
FIRST_LINE = 'Title,Worksheet for Form 8949,'
class InteractiveBrokers:
@classmethod
def name(cls):
return 'Interactive Brokers'
@classmethod
def DetermineEntryCode(cls, part, box):
if part == 1:
if box == 'A':
return 321
elif box == 'B':
return 711
elif box == 'C':
return 712
elif part == 2:
if box == 'A':
return 323
elif box == 'B':
return 713
elif box == 'C':
return 714
return None
@classmethod
def TryParseYear(cls, date_str):
try:
return datetime.strptime(date_str, '%m/%d/%Y').year
except ValueError:
return None
@classmethod
def ParseDollarValue(cls, value):
return Decimal(value.replace(',', '').replace('"', ''))
@classmethod
def isFileForBroker(cls, filename):
with open(filename) as f:
first_line = f.readline()
return first_line.find(FIRST_LINE) == 0
@classmethod
def parseFileToTxnList(cls, filename, tax_year):
f = open(filename)
# First 2 lines are headers.
f.readline()
f.readline()
txns = csv.reader(f, delimiter=',', quotechar='"')
txn_list = []
part = None
box = None
entry_code = None
for row in txns:
if row[0] == 'Part' and len(row) == 3:
box = None
if row[1] == 'I':
part = 1
elif row[1] == 'II':
part = 2
else:
utils.Warning('unknown part line: "%s"\n' % row)
elif row[0] == 'Box' and len(row) == 3:
if row[1] == 'A' or row[1] == 'B' or row[1] == 'C':
box = row[1]
entry_code = cls.DetermineEntryCode(part, box)
else:
utils.Warning('unknown box line: "%s"\n' % row)
elif row[0] == 'Data' and len(row) == 9:
if not entry_code:
utils.Warning(
'ignoring data: "%s" as the code is not defined\n')
continue
txn = utils.Transaction()
txn.desc = row[1]
txn.buyDateStr = row[3]
txn.sellDateStr = row[4]
year = cls.TryParseYear(txn.sellDateStr)
txn.saleProceeds = cls.ParseDollarValue(row[5])
txn.costBasis = cls.ParseDollarValue(row[6])
if row[7]:
txn.adjustment = cls.ParseDollarValue(row[7])
txn.entryCode = entry_code
if tax_year and year and year != tax_year:
utils.Warning('ignoring txn: "%s" as the sale is not from %d\n' %
(txn.desc, tax_year))
else:
txn_list.append(txn)
txn = None
elif (row[0] != 'Header' and row[0] != 'Footer') or len(row) != 9:
utils.Warning('unknown line: "%s"\n' % row)
return txn_list
| [
"mbrukman@google.com"
] | mbrukman@google.com |
751cfbf8da510d838b01a6b40fd8427981674437 | 581fab8785ba40aa81a0d8b73355bf1089d1fa91 | /project2/problem_5.py | ec0fcbf986607a714dd2218bb47d49aa472a9a58 | [] | no_license | ibrohimkhan/dsaa_projects | 0659cbefdd21b67c1aacd1d12324268d2e05c7e0 | 6b9a9590fd1d7743776f8377dc31ac87061e0600 | refs/heads/master | 2023-01-20T03:01:30.177827 | 2020-11-26T20:08:26 | 2020-11-26T20:08:26 | 291,507,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,486 | py | from datetime import datetime
import hashlib
class Block:
def __init__(self, timestamp, data, previous_hash):
self.timestamp = timestamp
self.data = data
self.previous_hash = previous_hash
self.hash = self.calc_hash()
def calc_hash(self):
sha = hashlib.sha256()
hash_data = self.data.encode('utf-8')
sha.update(hash_data)
return sha.hexdigest()
def __repr__(self):
return f"timestamp: {self.timestamp}, data: {self.data}, hash: {self.hash}"
class BlockChain:
def __init__(self):
self.tail = None
self.size = 0
def add(self, data):
if self.tail is None:
self.tail = Block(datetime.utcnow().timestamp(), data, None)
else:
tail = self.tail
new_block = Block(datetime.utcnow().timestamp(), data, tail)
self.tail = new_block
self.size += 1
return self.tail
def find(self, data):
if self.tail is None:
return 'empty chain'
for block in self.to_list():
if block.data == data:
return block
return None
def to_list(self):
out = []
tail = self.tail
while tail is not None:
out.append(tail)
tail = tail.previous_hash
return out
def __repr__(self):
if self.tail is None:
return 'empty chain'
blocks = ''
for item in self.to_list():
blocks += str(item) + '\n'
return blocks.strip()
def test_1():
print("test 1: Chain with five blocks")
blockchain = BlockChain()
print(blockchain)
# empty chain
print(blockchain.add("block1"))
# timestamp: 1605291729.380887, data: block1, hash: 9a59c5f8229aab55e9f855173ef94485aab8497eea0588f365c871d6d0561722
print(blockchain.add("block2"))
# timestamp: 1605291729.381191, data: block2, hash: 6d0b07ee773591f2a1b492d3ca65afdefc90e1cadfcc542a74048bb0ae7daa27
print(blockchain.add("block3"))
# timestamp: 1605291729.381198, data: block3, hash: 7e56ddaff5ff44d9e1732b1fd138a2057df045b163385068988554f72047e272
print(blockchain.add("block4"))
# timestamp: 1605291729.381203, data: block4, hash: 215008ba416eb06b8cfd53814660a43255e4ccc8703080af501ea0eaf7b7fdea
print(blockchain.add("block5"))
# timestamp: 1605291729.381207, data: block5, hash: 2e134675975ce520a5b2f59a4a13846a399d73c3152647a6c1757842f8864f0b
print(blockchain)
# timestamp: 1605291729.381207, data: block5, hash: 2e134675975ce520a5b2f59a4a13846a399d73c3152647a6c1757842f8864f0b
# timestamp: 1605291729.381203, data: block4, hash: 215008ba416eb06b8cfd53814660a43255e4ccc8703080af501ea0eaf7b7fdea
# timestamp: 1605291729.381198, data: block3, hash: 7e56ddaff5ff44d9e1732b1fd138a2057df045b163385068988554f72047e272
# timestamp: 1605291729.381191, data: block2, hash: 6d0b07ee773591f2a1b492d3ca65afdefc90e1cadfcc542a74048bb0ae7daa27
# timestamp: 1605291729.380887, data: block1, hash: 9a59c5f8229aab55e9f855173ef94485aab8497eea0588f365c871d6d0561722
def test_2():
print("test 2: Chain size")
blockchain = BlockChain()
print(blockchain.add("block1"))
# timestamp: 1605292403.698623, data: block1, hash: 9a59c5f8229aab55e9f855173ef94485aab8497eea0588f365c871d6d0561722
print(blockchain.add("block2"))
# timestamp: 1605292403.698976, data: block2, hash: 6d0b07ee773591f2a1b492d3ca65afdefc90e1cadfcc542a74048bb0ae7daa27
print(blockchain.add("block3"))
# timestamp: 1605291729.698991, data: block3, hash: 7e56ddaff5ff44d9e1732b1fd138a2057df045b163385068988554f72047e272
print(blockchain.size)
# 3
def test_3():
print("test 3: Find Block")
blockchain = BlockChain()
print(blockchain.add("block1"))
# timestamp: 1605292573.398235, data: block1, hash: 9a59c5f8229aab55e9f855173ef94485aab8497eea0588f365c871d6d0561722
print(blockchain.add("block2"))
# timestamp: 1605292573.398737, data: block2, hash: 6d0b07ee773591f2a1b492d3ca65afdefc90e1cadfcc542a74048bb0ae7daa27
print(blockchain.find("block"))
# None
def test_4():
print("test 4: Edge case with zero length of blockchain")
blockchain = BlockChain()
print(blockchain)
# empty chain
print(blockchain.size)
# 0
if __name__ == '__main__':
# NOTE: Your timestamp value will be different
test_1()
test_2()
test_3()
test_4()
| [
"ibrohimkhan@gmail.com"
] | ibrohimkhan@gmail.com |
53241e5667493e3b22a78779f524d5b575342228 | 2fb755e1d23267495345d1a94f4b79a1356657e7 | /black_box_tests/mapper_example.py | 45461118a2357b70b83703ecf1eaf2fdcd10696d | [
"MIT"
] | permissive | daringer/lollygag | 66bc86c7bea7943fd713cd5e463d911552b4d979 | 27da172cfa769ef7b850de517f778059068badca | refs/heads/master | 2021-05-16T03:24:15.691274 | 2017-10-11T12:45:45 | 2017-10-11T12:45:45 | 105,471,520 | 0 | 0 | null | 2017-10-01T20:19:20 | 2017-10-01T20:19:20 | null | UTF-8 | Python | false | false | 849 | py | #!/usr/bin/python
from lollygag import run
from lollygag.services import Services
from lollygag.dependency_injection.inject import Inject
from lollygag.core.crawlers.mapper_crawler import MapperCrawler
import json
def on_finish(log_service, crawler):
def callback(*args):
log_service.important("-------------Yeah boiiii, done-----------------")
result = crawler.make_map()
result = json.dumps(result, indent=4)
with open("result.json", "w+") as f:
f.write(result)
log_service.important("------------Done processing the tree-----------")
return callback
def main():
Services.crawler_factory = MapperCrawler
crawler.on_finish(on_finish(Services.log_service(), crawler))
run(subscribe={'on_finish': on_finish(Services.log_service())})
if __name__ == '__main__':
main()
| [
"littlesnorrboy@gmail.com"
] | littlesnorrboy@gmail.com |
ad85551c92681bf66c5f1c02f1cb919743fc1fcf | 2b4ca821d13e39289a87e30384dd6a592a3f5990 | /stuyCTF/last_prob.py | f2ff1a1248f1f6281cb6dc568429e2318af424a0 | [] | no_license | omgimanerd/ctf | 96a19f20ca13aeda01a5c44e1b5685434db2e57d | 40ff8d3e5526df1e00aeb9e4529852d026960821 | refs/heads/master | 2021-05-31T15:21:52.856656 | 2016-03-17T14:02:13 | 2016-03-17T14:02:13 | 34,351,660 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 928 | py | #!/usr/bin/python2.7
import math
double_numbers = [3570793128, 1458104314, 3260858022, 1345134392, 749597442, 289067508, 2759917644, 181602112, 1449980724, 1535408668, 988033496, 1457695096, 1802710596, 2496283884, 34647282, 2272064548, 3969791992, 2236522198, 2371091990, 3947054260, 338067104, 4274799248, 101450696]
double_numbers = double_numbers[::-1]
print double_numbers
length = 23
def roots(a, b, c):
def getDiscriminant(a, b, c):
return (b ** 2 - (4 * a * c)) ** .5
D = getDiscriminant(a, b, c)
b = -b
a = 2 * a
firstroot = float((b + D) / float(a))
secondroot = float((b - D) / float(a))
return firstroot
def solver (ans, length, pot):
for i in range(1,(length - 1)):
ans = roots(1, (-1*ans*(pot[i+1])),(pot[i])*(pot[i+1]))
print ans
return ans
print solver(5362426, 23, double_numbers)
| [
"noobbyte@gmail.com"
] | noobbyte@gmail.com |
43dde8d0256d76c729723e64d08000466a23902b | d3055f3eedfdb124084f092c0f4540aa82a0f04d | /texture_tool/describe.py | 62e6307f6e97bb0cf9de3478bdc4598cdf08df36 | [] | no_license | podgorskiy/texture-tool | a90ec9adee2c8d19b21cdf42b714d8d4917c9612 | f8973871ee2ce72b4d4756796276b07be06e42dd | refs/heads/master | 2022-04-17T13:36:05.448525 | 2020-04-08T18:03:36 | 2020-04-08T18:03:36 | 253,153,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,430 | py | # Copyright 2020 Stanislav Pidhorskyi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import texture_tool
def describe(self):
assert isinstance(self, texture_tool.PVRTexture)
s = '<' + '\n'
members = [attr for attr in dir(self) if not callable(getattr(self, attr)) and not attr.startswith("__")]
for attr in members:
s += '\t' + attr + ': ' + str(getattr(self, attr)) + '\n'
s += '\t' + str('Flipped X: ' + str(self.get_orientation(texture_tool.Axis.x))) + '\n'
s += '\t' + str('Flipped Y: ' + str(self.get_orientation(texture_tool.Axis.y))) + '\n'
s += '\t' + str('Width: ' + str(self.get_width())) + '\n'
s += '\t' + str('Height: ' + str(self.get_height())) + '\n'
s += '\t' + str('Depth: ' + str(self.get_depth())) + '\n'
s += '\t' + str('dtype: ' + str(self.dtype)) + '\n'
s += '>'
return s
| [
"stanislav@podgorskiy.com"
] | stanislav@podgorskiy.com |
b29df2eab12bee0ea732b5953df4904701e18f95 | c34380b64145b4ce26df9b27c34139d08de27515 | /highest_scoring_word.py | d6718e1ecce87a61b07dea1aab9b93f1d03c0fe1 | [] | no_license | codeandrew/python-algorithms | 531bc1574700cb7d822904f1e1ead9a596a85d29 | c71b0941f14825fcaa3fbb1429365ca1f28a3018 | refs/heads/master | 2023-04-28T23:56:01.283434 | 2023-04-05T03:06:22 | 2023-04-05T03:06:22 | 169,078,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 965 | py | """
Given a string of words, you need to find the highest scoring word.
Each letter of a word scores points according to its position in the alphabet: a = 1, b = 2, c = 3 etc.
You need to return the highest scoring word as a string.
If two words score the same, return the word that appears earliest in the original string.
All letters will be lowercase and all inputs will be valid.
"""
def high(x):
l = x.strip(" ").split()
s = []
for i in l:
ss =[]
s.append(ss)
for ii in i:
ss.append(ord(ii)-96)
sumList = [sum(i) for i in s]
return l[sumList.index(max(sumList))]
"""
Other Options
"""
def high(x):
words=x.split(' ')
list = []
for i in words:
scores = [sum([ord(char) - 96 for char in i])]
list.append(scores)
return words[list.index(max(list))]
def high(words):
return max(words.split(), key=lambda word: sum(ord(c) - ord('a') + 1 for c in word.lower()))
| [
"jeanandrewfuentes@gmail.com"
] | jeanandrewfuentes@gmail.com |
75dc35285e4cc28b0a0071cdf2c074aa2ea6f960 | 37fd103f6b0de68512e3cb6098d0abb9220f5a7d | /Python from scratch/014spectogram_waveform.py | fb3fad05fc153b831ee682fc1949eb029c556f40 | [] | no_license | FlyingMedusa/PythonELTIT | 720d48089738b7e629cad888f0032df3a4ccea2c | 36ab01fc9d42337e3c76c59c383d7b1a6142f9b9 | refs/heads/master | 2020-09-11T18:17:17.825390 | 2020-04-21T16:38:03 | 2020-04-21T16:38:03 | 222,150,066 | 0 | 0 | null | 2020-04-21T16:38:04 | 2019-11-16T19:37:33 | Python | UTF-8 | Python | false | false | 562 | py | from scipy.io import wavfile
import matplotlib.pyplot as pyplot
sampling_frequency, signal_data = wavfile.read('sample_for_task_013.wav')
# duration = len(signal_data)/ sampling_frequency
pyplot.subplot(311) # three rows, one col,1st plot
pyplot.specgram(signal_data, Fs = sampling_frequency)
pyplot.title('Some spectogram')
pyplot.xlabel('duration (s)')
pyplot.ylabel('Frequency (Hz)')
pyplot.subplot(313) # three rows, one col,3rd plot
pyplot.plot(signal_data)
pyplot.title('Some waveform')
pyplot.xlabel('duration')
pyplot.ylabel('intensity')
pyplot.show()
| [
"sleboda.m98@gmail.com"
] | sleboda.m98@gmail.com |
5021d07aa6bed8ccb432845ca038a12b24c4d5eb | 8ccdefd29f2c65d39483ca42c360bf01bc8147a9 | /eLISApipeline/bin/makeTDI-lisacode2.py | 8a53cc16e66ae394feae22ce724c0d97ead06740 | [] | no_license | Zhaoty96/lisatools | 0fe649bcffa1999871b30bf1be5b66de7491297b | 6d80e51fb2788f46b612bde2d15fa431ca9ea6ce | refs/heads/master | 2023-03-17T04:52:34.180910 | 2017-08-30T19:09:43 | 2017-08-30T19:09:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,016 | py | #!/usr/bin/env python
__version__='$Id: $'
import sys
import os
import glob
import re
import time
from datetime import date
import math
#import lisaxml
#import xmlLCTools
#import xml.dom.minidom as xmld
import xml.etree.ElementTree as ET
import numpy as np
#import lisacode
################ Globales variables ################
CfgDir = "/Users/petiteau/Applications/lisatools//Cfg/"
BinDir = "/Users/petiteau/Applications/lisatools//bin/"
# For testing
#CfgDir="/Users/petiteau/Applications/src/LISACode/LISACode_2_0/LISACode2/ConfigFiles/"
#BinDir="/Users/petiteau/Applications/src/LISACode/LISACode_2_0/LISACode2/Main/Exe/"
############### Functions ###############
def makefromtemplate(output,template,**kwargs):
fi = open(template,'r')
fo = open(output,'w')
for line in fi:
repline = line
for kw in kwargs:
repline = re.sub('{' + kw + '}',str(kwargs[kw]),repline)
print >> fo, repline,
def run(command, disp=False, NoExit=False):
commandline = command % globals()
if disp :
print "----> %s" % commandline
try:
assert(os.system(commandline) == 0)
except:
print 'Script %s failed at command "%s".' % (sys.argv[0],commandline)
if not NoExit :
sys.exit(1)
######### XML tools
#### Add tree file
def AddRootTree(newR, refR, absPath) :
# 2 arguments :
# - newR : the new root that we have to add
# - refR : the root in reference tree where we want to add the new root
foundR = False
for child in refR :
if child.tag == newR.tag and child.get('Type') == newR.get('Type') and child.get('Name') == newR.get('Name') :
foundR = True
Elmt = child
if not foundR :
Elmt = ET.SubElement(refR,newR.tag)
for attName in newR.keys() :
Elmt.set(attName,newR.get(attName))
if foundR and Elmt.tag=="Comment" :
Elmt.text = Elmt.text + newR.text
else :
Elmt.text = newR.text
### Set absolute path for stream
if Elmt.tag == 'Stream' :
oldTxt=re.sub("\n","",re.sub("\s","",Elmt.text))
print ">>>"+oldTxt+"<<<"
if oldTxt[0]!='/' :
Elmt.text=re.sub(oldTxt,absPath+'/'+oldTxt,Elmt.text)
#print newR, refR, foundR, Elmt.attrib, newR.getchildren()
for child in newR :
AddRootTree(child,Elmt,absPath)
def dispTree(root,indent):
indent2=indent+"\t"
print indent,root, root.attrib
for elmt in root :
dispTree(elmt,indent2)
def ReIndent(inS,indent):
CopyS = False
### Reindent
tmpS = ""
for ii in xrange(indent):
tmpS = tmpS + " "
for ic in xrange(len(inS)):
if inS[ic] != "\s" :
CopyS = True
if CopyS :
tmpS = tmpS + inS[ic]
ic=len(tmpS)-1
while ic>0 and ( tmpS[ic]==" " or tmpS[ic]=="\t" or tmpS[ic]=='\n' ) :
ic = ic - 1
#print ic, ">>>"+tmpS[0:ic+1]+"<<<"
OutS=tmpS[0:ic+1]+"\n"
return OutS
def TypeBlockLine(line) :
Nop=0
for ic in xrange(len(line)) :
if line[ic]=='<' :
if line[ic+1]=='/' :
Nop = Nop - 1
else :
Nop = Nop + 1
if Nop > 0 :
return "open"
elif Nop < 0 :
return "close"
else :
return ""
def WriteXMLTree(Tree,OutputFile) :
rec=ET.tostringlist(Tree)
fOut=open(OutputFile,'w')
fOut.write('<?xml version="1.0"?>\n')
fOut.write('<!DOCTYPE XSIL SYSTEM "http://www.vallis.org/lisa-xml.dtd">\n')
fOut.write('<?xml-stylesheet type="text/xsl" href="lisa-xml.xsl"?>\n')
Indent=0
xline=""
for ir in xrange(len(rec)) :
xr=rec[ir]
xline = xline + xr
if re.search('</',xr) or ir==1 :
xline = xline + "\n"
if re.search("\n",xline) :
blockType=TypeBlockLine(xline)
if blockType=="close" :
Indent = Indent - 1
xlineI = ReIndent(xline,Indent)
#print Indent,blockType, ">>>>"+xlineI+"<<<<"
fOut.write(xlineI)
xline = ""
if blockType=="open" :
Indent = Indent + 1
fOut.close()
############### Main program ###############
from optparse import OptionParser
parser = OptionParser(usage="usage: %prog [options] [INPUTGW1-bary.xml INPUTGW2-bary.xml ...] [INPUTSPECIFCNOISE1.xml ...] OutputBaseName",
version="02.07.13, Antoine Petiteau partially based M. Vallisneri development for lisatools ")
# note that the default value is handled below
##### Options for base configuration #####
parser.add_option("-s", "--seed",
type="int", dest="seed", default=12345,
help="seed for random number generator (int) [required]")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default=False,
help="display parameter values [off by default] : Not use by LISACode at the moment")
parser.add_option("-O", "--OutDataType",
type="string", dest="OutDataType", default="ASCII",
help="Type of output : ASCII,Binary,XML [default ASCII]")
#### Options fixing xml files to be used ####
### Options for MeLDC ###
parser.add_option("-r", "--rawMeasurements",
action="store_true", dest="rawMeasurements", default=False,
help="output raw phase measurements (y's and z's) in addition to TDI X, Y, Z [synthlisa and lisacode only]")
parser.add_option("-L", "--LaserNoise",
action="store_true", dest="LaserNoise", default=False,
help="Include laser noise (this option is always true if FullSim) [off by default]")
parser.add_option("-n", "--noiseOnly",
action="store_true", dest="noiseOnly", default=False,
help="compute SNR using instrumental noise only [off by default] Not use by LISACode2 at the moment")
parser.add_option("-R", "--randomizeNoise",
type="float", dest="randomizeNoise", default=0.0,
help="randomize level of noises [e.g., 0.2 for 20% randomization; defaults to 0] Not use by LISACode2 at the moment")
parser.add_option("-I", "--LISA",
type="string", dest="LISAmodel", default='Eccentric',
help="LISA model for orbits: Static, Rigid, Eccentric [default] Not use by LISACode2 at the moment")
### LISACode only option ###
parser.add_option("-M", "--MLDC",
action="store_true", dest="MLDC", default=False,
help="Update configuration for MLDC: MLDC orbits (for LISA only), polarization ar 0 for input GW file [off by default]")
parser.add_option("-i", "--Instrument",
type="string", dest="Instrument", default="ELISA",
help="Type of instrument : ELISA, LISA [default ELISA]")
parser.add_option("-N", "--NoNoise",
action="store_true", dest="NoNoise", default=False,
help="No noise [off by default : noise inlcuded]")
parser.add_option("-P", "--phasemeterFilter",
action="store_true", dest="PhaMetFilter", default=False,
help="Use standard filter on phasemeter (this option is always true if FullSim) [off by default]")
parser.add_option("-F", "--FullSim",
action="store_true", dest="FullSim", default=False,
help="Full simulation [off by default]")
parser.add_option("-D", "--TDI",
type="string", dest="TDI", default="XYZ",
help="Type of instrument : X, XYZ or XAE (note : for eLISA it will always be forced at X) [default XYZ]")
parser.add_option("-T", "--duration",
type="float", dest="duration", default=62914560.0,
help="total time for TDI observables (s) [default 62914560 = 2^22 * 15]")
parser.add_option("-d", "--timeStep",
type="float", dest="timeStep", default=15.0,
help="time step for measurement sampling (s) [default 15]")
parser.add_option("-p", "--timeStepPhysical",
type="float", dest="timeStepPhys", default=15.0,
help="time step for physical simulation sampling (s) [default equal to timestep]")
parser.add_option("-U", "--FixShotNoise",
action="store_true", dest="FixShotNoise", default=False,
help="Fix the shot noise ; if false the shot noise vary with the orbit. [default false]")
"""
parser.add_option("-t", "--initialTime",
type="float", dest="inittime", default=0.0,
help="initial time for TDI observables (s) [default 0.0]")
parser.add_option("-L", "--OrbitsModel",
type="string", dest="OrbitsModel", default='Eccentric',
help="LISA model: Static, Rigid, Eccentric [default]")
parser.add_option("-O", "--observables",
type="string", dest="observables", default='1.5',
help="TDI observables: 1.5 [default], 2.0, Sagnac (experimental; not compatible with -r) : Not use by LISACode at the moment")
parser.add_option("-A", "--keyOmitsLISA",
action="store_true", dest="keyOmitsLISA", default=False,
help="do not include LISA specification in key [included by default]")
parser.add_option("-D", "--debugSNR",
action="store_true", dest="debugSNR", default=False,
help="show more SNR data [off by default] : Not use by LISACode at the moment")
parser.add_option("-c", "--combinedSNR",
action="store_true", dest="combinedSNR", default=False,
help="use combined snr = sqrt(2)*max{SNR_x, SNR_y, SNR_z} as SNR constrain [off by default] : Not use by LISACode at the moment")
"""
(options, args) = parser.parse_args()
if len(args) < 1:
parser.error("You must specify the output file (and input file before if you don't want the default noise only simulation) !")
CurrentDir = os.getcwd()
IdTmpRun=str(options.seed)+""+str(int(100000*np.random.random()))
##### Some checking
if options.OutDataType=="ASCII" :
extentionOut=".txt"
elif options.OutDataType=="Binary" or options.OutDataType=="XML" :
extentionOut=".bin"
else :
print options.OutDataType, " : unknown type of data (only ASCII, Binary and XML)"
sys.exit(1)
PhaMetFilter = options.PhaMetFilter
LaserNoise = options.LaserNoise
if not options.NoNoise:
PhaMetFilter = True
if options.FullSim :
PhaMetFilter = True
LaserNoise = True
##### Input and output files
InputFileName = args[0:-1]
OutputBaseName = args[-1]
##### Make the list of default xml file based on options
InXML=[]
Nlinks=""
if options.Instrument=="ELISA" :
Nlinks="2links"
PhaMetmesSC="sci1,sci1s,sci2s,sci3"
### Orbits, telescope, laser
InXML.append(CfgDir+'/ConfigNewLISA/ELISA/Config-Orbits_1Gm.xml')
InXML.append(CfgDir+'/ConfigNewLISA/ELISA/Config-TelescLaser_P07_D25.xml')
### Noises
if not options.NoNoise :
InXML.append(CfgDir+'/ConfigNewLISA/ELISA/Noises/Config-NoiseAcc_ELISA.xml')
InXML.append(CfgDir+'/ConfigNewLISA/ELISA/Noises/Config-NoiseOOPN_ELISA.xml')
InXML.append(CfgDir+'/ConfigNewLISA/ELISA/Noises/Config-NoiseShot_ELISA.xml')
elif options.Instrument=="LISA" :
Nlinks="3links"
PhaMetmesSC="sci1,sci1s,sci2,sci2s,sci3,sci3s"
if options.MLDC :
InXML.append(CfgDir+'/ConfigNewLISA/LISA/Config-Orbits_MLDC.xml')
else:
InXML.append(CfgDir+'/ConfigNewLISA/LISA/Config-Orbits_std.xml')
InXML.append(CfgDir+'/ConfigNewLISA/LISA/Config-TelescLaser_std.xml')
if not options.NoNoise :
InXML.append(CfgDir+'/ConfigNewLISA/LISA/Noises/Config-NoiseAcc_std2.xml')
InXML.append(CfgDir+'/ConfigNewLISA/LISA/Noises/Config-NoiseOOPN_std.xml')
InXML.append(CfgDir+'/ConfigNewLISA/LISA/Noises/Config-NoiseShot_std.xml')
else:
print "ERROR : The instrument ",options.Instrument," is unknown (only ELISA and LISA are already implemented)."
#### Optical benches
if PhaMetFilter :
if LaserNoise :
InXML.append(CfgDir+'/pyTemplates/Std2002/LC2-'+Nlinks+'_PhaMet-sci-Filter.xml')
InXML.append(CfgDir+'/pyTemplates/Std2002/LC2-'+Nlinks+'_PhaMet-tau-Filter.xml')
InXML.append(CfgDir+'/pyTemplates/Std2002/LC2-'+Nlinks+'_PhaMet-TDIInt.xml')
PhaMetmesSC=PhaMetmesSC+",tau1,tau1s,tau2,tau2s,tau3,tau3s"
else:
InXML.append(CfgDir+'/pyTemplates/Std2002/LC2-'+Nlinks+'_PhaMet-sci-WeakFilter.xml')
else :
InXML.append(CfgDir+'/pyTemplates/Std2002/LC2-'+Nlinks+'_PhaMet-sci-NoFilter.xml')
#### TDI (and output)
fNTmpTDI="tmpTDI"+IdTmpRun+".xml"
if Nlinks=="2links":
makefromtemplate(fNTmpTDI,CfgDir+'/pyTemplates/LC2_TDI-X.xml',\
outname=OutputBaseName+"-TDI"+extentionOut,\
outdatatype=options.OutDataType)
else:
makefromtemplate(fNTmpTDI,CfgDir+'/pyTemplates/LC2_TDI-'+options.TDI+'.xml',\
outname=OutputBaseName+"-TDI"+extentionOut,\
outdatatype=options.OutDataType)
InXML.append(fNTmpTDI)
#### Ouputs
if options.rawMeasurements :
fNTmpOut="tmpOut"+IdTmpRun+".xml"
makefromtemplate(fNTmpOut,CfgDir+'/pyTemplates/LC2-Output-PhaMet.xml',\
outname=OutputBaseName+"-SC"+extentionOut,\
outdatatype=options.OutDataType,\
PhaMetmesSC=PhaMetmesSC)
InXML.append(fNTmpOut)
#### Times
fNTmpTime="tmpTime"+IdTmpRun+".xml"
FixShotNoise="On"
if options.FixShotNoise :
FixShotNoise="Off"
makefromtemplate(fNTmpTime,CfgDir+'/pyTemplates/LC2-Times-Detector.xml',\
cadence=str(options.timeStep),\
duration=str(options.duration),\
cadencePhys=str(options.timeStepPhys),\
UpdateShotNoise=FixShotNoise,\
InterpolationNoises="Lagrange")
InXML.append(fNTmpTime)
#### Add input file given in argument
for file in InputFileName :
InXML.append(file)
print "XML inputs :",InXML
for file in InXML :
if not os.path.isfile(file) :
print "XML file "+file+"is not found !"
sys.exit(1)
##### Start the output tree
OutTree = ET.Element('XSIL')
tmp = ET.SubElement(OutTree,'Param')
tmp.set("Name","Author")
tmp.text = "Antoine Petiteau"
tmp = ET.SubElement(OutTree,'Param')
tmp.set("Name","GenerationDate")
tmp.set("Type","ISO-8601")
tmp.text = str(date.today().isoformat())
tmp = ET.SubElement(OutTree,'Comment')
tmp.text = "lisaXML 1.2 [A. Petiteau (based on M. Vallisneri), July 2013]"
##### Loop on input xml file
for fNIn in InXML :
AbsPathfNInDir=os.path.dirname(CurrentDir+'/'+fNIn)
root = (ET.parse(fNIn)).getroot()
childs = root.getchildren()
icStart=0
while icStart<len(childs) and (childs[icStart].tag == 'Comment' or childs[icStart].get('Name')=="Author" or childs[icStart].get('Name')=="GenerationDate" ) :
icStart = icStart + 1
for ic in xrange(icStart,len(childs)) :
AddRootTree(childs[ic],OutTree,AbsPathfNInDir)
##### Update for MLDC configuration
if options.MLDC :
### Set polarisation at 0 for input GW file
for root in OutTree.iter('XSIL'):
if root.get('Type')=="SampledPlaneWave" :
for param in root.findall('Param'):
if param.get('Name')=="Polarization" :
## Replace number keeping spacing
oldTxt=re.sub("\n","",re.sub(" ","",param.text))
newTxt=re.sub(oldTxt,"0.0",param.text)
param.text = newTxt
#dispTree(OutTree,"")
##### Write the output tree
WriteXMLTree(OutTree,OutputBaseName+".xml")
##### Clean
for fNIn in InXML :
if re.search(IdTmpRun,fNIn) :
run("rm "+fNIn)
##### Create command for LISACode
xmlOutGlob=OutputBaseName + "-output.xml"
cmdLC = BinDir + "LISACode2 "
if options.seed != 0 :
cmdLC = cmdLC + " -s " + str(options.seed)
if options.verbose :
cmdLC = cmdLC + " -v"
if options.OutDataType=="XML" :
cmdLC = cmdLC + " -x " + xmlOutGlob
cmdLC = cmdLC + " " + OutputBaseName + ".xml"
if os.path.isdir('Log'):
print "==================== Start run LISACode2 ===================="
else :
cmdLC = cmdLC + " > " + OutputBaseName + "_log.out\n"
#print cmdLC
### Run LISACode
run(cmdLC,True,True)
if os.path.isdir('Log'):
print "==================== End run LISACode2 ===================="
if options.OutDataType=="XML" and os.path.isfile(xmlOutGlob) :
root = (ET.parse(xmlOutGlob)).getroot()
childs = root.getchildren()
icStart=0
while icStart<len(childs) and (childs[icStart].tag == 'Comment' or childs[icStart].get('Name')=="Author" or childs[icStart].get('Name')=="GenerationDate" ) :
icStart = icStart + 1
for ic in xrange(icStart,len(childs)) :
AddRootTree(childs[ic],OutTree,CurrentDir)
WriteXMLTree(OutTree,OutputBaseName+".xml")
| [
"vallis@vallis.org"
] | vallis@vallis.org |
c7bd9e1169108d7aafc4360c20c6bffd272beddf | 1d18a9358b9ea5b1cf1e9151e7ad321401c86b0f | /home/urls.py | f064f90796a9b8ef365e1eedf2155c1e71d386ce | [] | no_license | saipraneethreddy969/ecommerce | a73f737f3ce1046b77091c4fa3312480ff5f8388 | 60353f20f435679212b91944a098a7b077a14cfa | refs/heads/master | 2022-12-22T01:12:44.493025 | 2020-09-25T12:09:32 | 2020-09-25T12:09:32 | 264,087,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | from django.urls import path
from . import views
urlpatterns = [
path('',views.index),
path('signin/',views.signin),
path('register/',views.register),
path('logout/',views.logout),
path('base/',views.base),
path('apparels/',views.apparels,name="apparels"),
path('cart/',views.cart),
path('checkout/',views.checkout,name="checkout"),
path('add_cart/',views.add_cart),
path('process_order/',views.process_order),
] | [
"saipraneethreddy969@gmail.com"
] | saipraneethreddy969@gmail.com |
075b69f4e7a9d95eeaffa238354861b593827a6f | 614977ab03f9526ebc70674dd0bc2d61dea70925 | /ciclos_condicionales/rangos.py | 008e01bdc9dc2647247fcae5b20434b75959d938 | [] | no_license | JuanAyestas/ejercicios_facilito | 048b76498d125ed37f39eb563d15a20cbff2e5bb | 71d93da466549f5dceeb59739df4be6546641145 | refs/heads/master | 2022-07-18T01:13:08.450094 | 2020-05-14T23:36:10 | 2020-05-14T23:36:10 | 263,550,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | #rango empieza a contar desde 0 hasta el valor definido
for valor in range(10):
print(valor)
print("------")
#definir donde comienza y que termine, incluso numeros negativos
for valor in range(1,10):
print(valor)
print("------")
#incluye saltos
for valor in range(1,40,2):
print(valor)
print("------")
#range y len
lista=[1,2,3,4,5,6]
for indice in range(len(lista)):
print("Índice: {}, valor: {}".format(indice, lista[indice]))
print("------")
#Se puede añadir un punto de partida del indice, en este caso.
lista = [1, 2, 3, 4, 5, 6]
for indice, valor in enumerate(lista):
print("Índice: {}, valor: {}".format(indice, valor))
print("------")
| [
"astor_david7@hotmail.com"
] | astor_david7@hotmail.com |
5f8a9b3d4d4c01fdabf80514da857be165d8f054 | c0cf64fd66fcedc1ec030a3ad154f6096129809a | /list.py | 58fdb51f345c278e62e092c3e93e6f4f2fbcce44 | [] | no_license | coconumberzzz/linuxgit | e0a812dab202bd36176f0a3652ee1ea70b559563 | 25545ef3a50099220efb7cf03f4c82b57458c933 | refs/heads/master | 2020-07-07T01:40:07.534320 | 2019-08-22T22:34:41 | 2019-08-22T22:34:41 | 203,203,976 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,039 | py | #집합
s1=set([1,2,3])
l1=list(s1) #리스트로 변환
print(l1)
print(l1[0])
t1=tuple(s1) #튜플로 변환
print(t1)
print(t1[0])
s1=set([1,2,3,4,5,6])
s2=set([4,5,6,7,8,9])
s1&s2 #교집합
s1|s2 #합집합
s1-s2 #차집합
s1=set([1,2,3])
s1.add(4) #1개 추가
s1.update(5,6,7) #여러개 추가
s1.remove(7) #삭제
# 튜플 >> (소괄호),
t1=(1,) #하나일때 무조건 콤마
t2=(1,2,3)
t3=1,2,3
(t4)='python','life'
t4=('python','life')
# 딕셔너리 >> {중괄호},key(불변):value(변경),키중복x,값중복o
//함수 : keys,in,values,items
dic = {1:"my",2:"name",3:"is",4:"python"}
#리스트 >> [대괄호],중복o
//함수 : sort(정렬),reverse,append(추가)
myscore = [100,90,85,78]
[a,b]=['python','life']
#집합 >> {중괄호},중복x,순서가없어 인덱스로 특정 값 접근x
//함수 : add(1개추가),update(여러개추가),remove(삭제)
#불
bool([1,2,3]) #T
bool('python') #T
bool([]) #F
bool('') #F
#변수
a=[1,2,3]
b=a
id(a) #주소값 반환
id(b) #a와 동일한 주소를 반환
a is b #동일객체를 가르키는지 판단하는 명령어 >T
a[1]=4 #a와b모두 [1,4,3]으로 변경
a[1,2,3]
b=a[:] #a의 전체를 복사
a[1]=4
print(a) #[1,4,3]
print(b) #[1,2,3]
b is a #False
from cop import copy
b=copy(a) #b=a[:]와 동일
b is a #False
a=3
b=5
a,b=b,a #a에 b값(5)를 넣고 b에 a값(3)을 넣는다
print(a) #5
print(b) #3
#반복문
pocket = ['paper','money','phone','card']
money=2000
if 'money' in pocket:
print("taxi")
elif card:
print("card taxi")
else:
print("walk")
treehit=0
while treehit<10:
treehit+=1
print("나무를 %d번 찍었습니다."%treehit)
if treehit==10
print("나무가 넘어갑니다.")
prompt="""
1.add
2.del
3.list
4.quit
enter number : """
number=0
while number!=4:
print(prompt) #위에 지정해둔 글 출력
number(input()) #입력
a=0
while a<10:
a=a+1
if a%2==0 : continue #continue는 while문 처음으로 돌아가게 함
print(a)
| [
"msms9957@gmail.com"
] | msms9957@gmail.com |
3bcf2e10f46a49a574731b8d0868e1e3562eb427 | 5f3f5c5d1df3ba25f60be61676c6d1b51b91930d | /Code files/enterchange.py | 39dab7f1ccdab05f10851701a91a8be9456164e7 | [] | no_license | sahilsingla21/Time_table_application | f0d15587aaa30493d7c5347cd111e8d8d8d74d09 | 5021886358605d501e988916ce6d781dc5712b76 | refs/heads/master | 2022-12-03T13:08:59.878120 | 2020-08-21T16:35:58 | 2020-08-21T16:35:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,108 | py |
import kivy
kivy.require('1.7.2')
from kivy.app import App
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.properties import ObjectProperty
from kivy.uix.button import Button
from kivy.uix.dropdown import DropDown
from kivy.lang import Builder
from kivy.uix.label import Label
from kivy.uix.anchorlayout import AnchorLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.textinput import TextInput
import variables
import cse_1
import makechanges
import timetable
slotbutton=""
daybutton=""
class ChangeSlotScreen(Screen):
def __init__(self, **kwargs):
super(ChangeSlotScreen, self).__init__(**kwargs)
print(variables.SLOT)
print(variables.DAY)
float1= FloatLayout()
self.label1 = Label(text="Current Value:",size_hint=(0.2,0.2),pos_hint={"center_x":0.3,"center_y":0.8},font_size=30)
float1.add_widget(self.label1)
self.label3=Label(text="{}".format(cse_1.ReturnLecture(variables.DAY,variables.SLOT)),size_hint=(0.2,0.2),pos_hint={"x":0.45,"center_y":0.8},font_size=35)
float1.add_widget(self.label3)
label2= Label(text="Enter New Value:",size_hint=(0.2,0.2),pos_hint={"center_x":0.27,"center_y":0.6},font_size=30)
float1.add_widget(label2)
variables.textinput = TextInput(text='Type text here...',multiline=True,size_hint=(0.4,0.1),pos_hint={"x":0.45,"center_y":0.59})
float1.add_widget(variables.textinput)
btn1= Button(text="Save Change",pos_hint={"center_x":0.3,"center_y":0.2},size_hint=(0.29,0.2),font_size=30)
btn1.bind(on_release=makechanges.savechangesbtn)
btn2= Button(text="Cancel",pos_hint={"center_x":0.7,"center_y":0.2},size_hint=(0.29,0.2),font_size=30)
float1.add_widget(btn1)
float1.add_widget(btn2)
self.add_widget(float1)
def callbackTo2(self,event):
# sm.current="StudentLoginPage"
if mainbutton.text == "'Student'":
sm.current="StudentLoginPage"
elif mainbutton.text == "'Faculty'":
sm.current="FacultyLoginPage"
class StudentLoginPage(Screen):
def __init__(self, **kwargs):
super(StudentLoginPage, self).__init__(**kwargs)
# super(LoginPage, self).__init__(name='LoginPage')
self.add_widget(Label(text="Student Login Page"))
class FacultyLoginPage(Screen):
def __init__(self, **kwargs):
super(FacultyLoginPage, self).__init__(**kwargs)
# super(LoginPage, self).__init__(name='LoginPage')
self.add_widget(Label(text="Faculty Login Page"))
# sm = ScreenManager()
# sm.add_widget(ChangeSlotScreen(name='changes'))
# sm.add_widget(StudentLoginPage(name='StudentLoginPage'))
# sm.add_widget(FacultyLoginPage(name='FacultyLoginPage'))
# # kv = Builder.load_file("student_admin_login.kv")
# class My8App(App):
# def build(self):
# return sm
if __name__ == '__main__':
My8App().run()
| [
"noreply@github.com"
] | sahilsingla21.noreply@github.com |
fa873cd3436352a8331c54ba1b87ae405f4e95ff | f6ce28f63b286c22ab47448a6987b894535aaa5f | /Relation.py | ffeec8d2acaa3c2d1250d809a015aa3b0d4fdfc5 | [] | no_license | ReactiveXYZ-Dev/Database-Normalization-Checker | 90f3109b21b4ce5175450f52978ef904b1425ffa | 8c9405c34ae56cca1b47fb7dbce12d3ee7516f36 | refs/heads/master | 2021-04-27T12:51:57.694748 | 2018-02-22T03:48:00 | 2018-02-22T03:48:00 | 122,427,325 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | import AttributeSet, FD
class Relation(object):
def __init__(self, attrs, fds):
"""
attrs: string
fds: functional dependencies in object form
"""
self._attrs = AttributeSet(attrs)
self._fds = set()
for lhs, rhs in fds.items():
self._fds.add(FD(lhs, rhs))
def attrs(self):
return self._attrs
def fds(self):
return self._fds | [
"jackie@reactive.xyz"
] | jackie@reactive.xyz |
3b058a7d68f1a147b76c182c244a4daaba880a67 | 7bbc9df4992054342fa5293d6be1fc5e40209d67 | /apps/example/models.py | df9be2373b5ecc18bfbc54e5c9d2a5a4cc06bbf3 | [] | no_license | AndyPythonCode/FastAPI-Basic-Settings | 4a8d7a1da69b7b54aac4f7b472bfdabc684d5779 | bdff706bbf2acf9808608d327b4b76e8b9959f87 | refs/heads/main | 2023-05-21T23:22:30.606989 | 2021-06-10T20:05:02 | 2021-06-10T20:05:02 | 372,385,443 | 6 | 0 | null | 2021-06-10T00:29:00 | 2021-05-31T04:51:40 | Python | UTF-8 | Python | false | false | 371 | py | # Create you own tables inside database.
# https://docs.sqlalchemy.org/en/14/core/metadata.html
import sqlalchemy
from database.db import metadata
example = sqlalchemy.Table("example",
metadata,
sqlalchemy.Column("id", sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column("text", sqlalchemy.String(255)),
sqlalchemy.Column("completed", sqlalchemy.Boolean),
) | [
"andyarciniegas24@gmail.com"
] | andyarciniegas24@gmail.com |
2a4e1fdd2a5e72b4c6e54267b1510a14ef739d75 | 22953502d39f57716fabd4168208cc0ab49b86ec | /problem-093/solution.py | e69605626a54bb20c55f41207d309a41d0c2c916 | [] | no_license | zasdaym/daily-coding-problem | 36f2afacd963b54c2d274d17c93d3a0aa2d931ae | 05688231ed492f2bf620665dd4331a0f7baca951 | refs/heads/main | 2023-06-29T18:32:14.700462 | 2021-07-25T07:20:59 | 2021-07-25T07:20:59 | 343,761,606 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,299 | py | from typing import Tuple
class TreeNode:
def __init__(self, val: int, left: 'TreeNode' = None, right: 'TreeNode' = None) -> None:
self.left = left
self.right = right
self.val = val
def largest_bst_subtree(root: TreeNode) -> TreeNode:
result = helper(root, 0, None)
return result[3]
def helper(root: TreeNode, max_size: int, max_root: TreeNode) -> Tuple[int, int, int, TreeNode]:
if root is None:
return (0, float('inf'), float('-inf'), max_root)
left_result = helper(root.left, max_size, max_root)
left_size, left_min, left_max, _ = left_result
right_result = helper(root.right, max_size, max_root)
right_size, right_min, right_max, _ = right_result
# if root is a valid BST candidate
if root.val > left_max and root.val < right_min:
size = left_size + right_size + 1
if size > max_size:
max_size = size
max_root = root
return (size, min(root.val, left_min), max(root.val, right_max), max_root)
elif left_size > right_size:
return left_result
else:
return right_result
test_node = TreeNode(1, TreeNode(3, TreeNode(2), TreeNode(4)), TreeNode(12))
result = largest_bst_subtree(test_node)
assert result.val == 3
# 1
# 3 12
# 2 4
| [
"zasdaym@gmail.com"
] | zasdaym@gmail.com |
ad1d0ac58a147fc51013a59cfcd3efb4fc279557 | b1ecf17250eb26a0cf57127f94f3dc550c3c4a96 | /askMeRudukh/asgi.py | ce32f01fae661608514858acbd379beaa42e74ff | [] | no_license | Lyalyashechka/askMeRudukh_tp_web_1_sem | a06de69ae79c7c23890384119a7c32d255d7a7ca | fa9581897121aa5e33055624a5883c293722d648 | refs/heads/main | 2023-07-11T08:06:44.421637 | 2021-08-14T11:17:12 | 2021-08-14T11:17:12 | 358,814,229 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
ASGI config for askMeRudukh project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'askMeRudukh.settings')
application = get_asgi_application()
| [
"Rudukh.Eugene@yandex.ru"
] | Rudukh.Eugene@yandex.ru |
36a81ca96ce78405c1d407ae4b2dd0e3f750df20 | dcf9a7aeaddc876530e8f28fd17130f8859feda9 | /pymatflow/vasp/base/__init__.py | c7c7eb78d527286f5af7ef37ef963407d47591eb | [
"MIT"
] | permissive | DeqiTang/pymatflow | 3c6f4a6161a729ad17db21db9533187c04d8f5ac | 922722187e2678efbfa280b66be2624b185ecbf5 | refs/heads/master | 2022-05-25T19:41:19.187034 | 2022-03-05T03:07:08 | 2022-03-05T03:07:08 | 245,462,857 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 930 | py |
#
def vasp_variable_to_string(variable):
if False == variable.status:
return ""
if None == variable.value:
return ""
out = ""
if 0 == len(variable.value):
return out + variable.key
if 1 == len(variable.value):
if 1 == len(variable.value[0]):
out += variable.key + " = " + variable.value[0][0]
else:
out += variable.key + " ="
for item in variable.value[0]:
out += " " + item
else:
out += variable.key + " ="
for val in variable.value[0]:
out += " " + val
out += "\n"
for row in range(1, len(variable.value)-1):
for val in variable.value[row]:
out += " " + val
out += "\n"
for val in variable.value[len(variable.value) - 1]:
out += " " + val
return out | [
"openview@163.com"
] | openview@163.com |
c4221078f0cb8c5b93c73617aa3f996a78b5ab8e | 4541823eb5c4b1518feb145279109a53a32ff206 | /snipres.py | e56f8eb684ca6e2d805c282d2b60312d2f845c7c | [
"BSD-2-Clause"
] | permissive | YiteWang/pytorch_resnet_cifar10 | 529a4d322fbfa69676666b6bb99c481218b66fa6 | fed17d2912a3ff1df8ccd94dd5d3bde9042f85c1 | refs/heads/master | 2023-06-11T19:09:52.250190 | 2021-07-01T04:09:32 | 2021-07-01T04:09:32 | 298,161,843 | 0 | 0 | null | 2020-09-24T03:43:36 | 2020-09-24T03:43:35 | null | UTF-8 | Python | false | false | 5,084 | py | import snip
import resnet
import torch.nn.parallel
import torch.nn as nn
import utils
def apply_snipres(args, nets, data_loader, criterion, input_shape, num_classes, samples_per_class = 10):
if args.arch == 'resnet20':
print('Creating {} model.'.format(args.arch))
cutmodel = torch.nn.DataParallel(resnet.__dict__[args.arch](ONI=args.ONI, cut=True, T_iter=args.T_iter))
cutmodel.cuda()
# elif args.arch == 'resnet18':
# print('Creating {} model.'.format(args.arch))
# # Using resnet18 from Synflow
# cutmodel = load.model(args.arch, 'tinyimagenet')(input_shape,
# num_classes,
# dense_classifier = True).cuda()
# elif args.arch == 'resnet110' or args.arch == 'resnet110full':
# # Using resnet110 from Apollo
# # model = apolo_resnet.ResNet(110, num_classes=num_classes)
# cutmodel = load.model(args.arch, 'lottery')(input_shape,
# num_classes,
# dense_classifier = True).cuda()
# elif args.arch in ['vgg16full', 'vgg16full-bn', 'vgg11full', 'vgg11full-bn'] :
# if args.dataset == 'tiny-imagenet':
# modeltype = 'tinyimagenet'
# else:
# modeltype = 'lottery'
# # Using resnet110 from Apollo
# # model = apolo_resnet.ResNet(110, num_classes=num_classes)
# cutmodel = load.model(args.arch, modeltype)(input_shape,
# num_classes,
# dense_classifier = True).cuda()
else:
raise NotImplementedError('Only ResNet20 can be used for snipres method.')
# first add masks to each layer of nets
for net in nets:
net.train()
net.zero_grad()
for layer in net.modules():
snip.add_mask_ones(layer)
model = nets[0]
# add masks to cutmodel
cutmodel.train()
cutmodel.zero_grad()
for layer in cutmodel.modules():
snip.add_mask_ones(layer)
# data_iter = iter(snip_loader)
if not args.iter_prune:
data_iter = iter(data_loader)
print('[*] Using single-shot SNIP.')
# Let the neural network run one forward pass to get connect sensitivity (CS)
for i in range(samples_per_class):
try:
(input, target) = snip.GraSP_fetch_data(data_iter, num_classes, samples_per_class)
except:
data_iter = iter(dataloader)
(input, target) = snip.GraSP_fetch_data(data_iter, num_classes, samples_per_class)
# (input, target) = data_iter.next()
target = target.cuda()
input_var = input.cuda()
target_var = target
if args.half:
input_var = input_var.half()
# compute output
output = cutmodel(input_var)
loss = criterion(output, target_var)
loss.backward()
# prune the network using CS
snip.net_prune_snip(cutmodel, args.sparse_lvl)
with torch.no_grad():
for modellayer, cutmodellayer in zip(model.modules(), cutmodel.modules()):
if isinstance(modellayer, (nn.Linear, nn.Conv2d, nn.ConvTranspose2d)):
modellayer.weight = cutmodellayer.weight
modellayer.weight_mask = cutmodellayer.weight_mask
# print('[*] SNIP pruning done!')
cutmodel = None
print('[*] SNIP weight pruning done!')
else:
print('[*] Using iterative SNIP.')
num_iter = 10
data_iter = iter(data_loader)
for i in range(num_iter):
try:
(input, target) = snip.GraSP_fetch_data(data_iter, num_classes, samples_per_class)
except:
data_iter = iter(dataloader)
(input, target) = snip.GraSP_fetch_data(data_iter, num_classes, samples_per_class)
# (input, target) = data_iter.next()
target = target.cuda()
input_var = input.cuda()
target_var = target
if args.half:
input_var = input_var.half()
# compute output
output = cutmodel(input_var)
loss = criterion(output, target_var)
if args.ep_coe!=0:
loss += args.ep_coe * get_ep(model)
loss.backward()
# prune the network using CS
snip.net_iterative_prune(cutmodel, args.sparse_lvl**((i+1)/num_iter))
with torch.no_grad():
for modellayer, cutmodellayer in zip(model.modules(), cutmodel.modules()):
if isinstance(modellayer, (nn.Linear, nn.Conv2d, nn.ConvTranspose2d)):
modellayer.weight = cutmodellayer.weight
modellayer.weight_mask = cutmodellayer.weight_mask
# print('[*] SNIP pruning done!')
cutmodel = None
print('[*] Iterative SNIP pruning done!') | [
"yitew2@illinois.edu"
] | yitew2@illinois.edu |
a811c14e13df9d40349387fdf60c2178473d37f5 | 7b92310c0abb6493a56aae0c26eaf9e257d1c3ce | /manage.py | 28b3add52c1197dca10f1376a3dea5140e7089dc | [] | no_license | DrKimpatrick/django-custom-exception-response-formatter | eb1f5fb7c25d13dc63697929db3b21a500a62d5f | 2cca0fb25260b09bb94cf016f0284c11c9125535 | refs/heads/master | 2023-05-28T16:00:00.035542 | 2020-05-28T07:28:42 | 2020-05-28T07:28:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_exception_handler.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"djangocircle@gmail.com"
] | djangocircle@gmail.com |
1088e21e565a1e3657d113b966546a1b0eb98ac8 | 5679731cee36c537615d285ed72810f4c6b17380 | /167_TwoSumII_InputArrayIsSorted.py | 4ea08c7abe24681955be0a656cf106fb19e4146e | [] | no_license | manofmountain/LeetCode | 6b76105190a9b62df65a7b56b6def4120498b9fa | 718f688b3d316e8c10ef680d9c21ecd518d062f8 | refs/heads/master | 2021-01-12T03:41:48.318116 | 2017-07-18T12:35:58 | 2017-07-18T12:35:58 | 78,252,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 565 | py |
##43.90%
class Solution(object):
def twoSum(self, numbers, target):
"""
:type numbers: List[int]
:type target: int
:rtype: List[int]
"""
if len(numbers) < 2:
return []
left, right = 0, len(numbers) - 1
while left < right:
sum = numbers[left] + numbers[right]
if sum < target:
left += 1
elif sum > target:
right -= 1
else:
return [left + 1, right + 1]
return [] | [
"noreply@github.com"
] | manofmountain.noreply@github.com |
56e11b7fc2a6dbae866aaa75478e6b76e08522e3 | 7b1c13ca6720284de8a83dddaa7b8ab60f3864bd | /wavefront_api_client/models/integration_dashboard.py | 4ebd75ccc4904370b28a0b2d4e055cc76bea3a7b | [
"Apache-2.0"
] | permissive | basilisk487/python-client | 61d772ac7557517e5db7fec570cc7d1389a66713 | e534ea8e88f8b0333215e6a3ff9c589bcc3db571 | refs/heads/master | 2021-01-25T04:35:27.034270 | 2018-08-28T17:58:49 | 2018-08-28T17:58:49 | 93,450,467 | 0 | 0 | null | 2017-06-05T21:53:11 | 2017-06-05T21:53:11 | null | UTF-8 | Python | false | false | 6,151 | py | # coding: utf-8
"""
Wavefront Public API
<p>The Wavefront public API enables you to interact with Wavefront servers using standard web service API tools. You can use the API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make API calls outside the Wavefront API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p><p>For legacy versions of the Wavefront API, see the <a href=\"/api-docs/ui/deprecated\">legacy API documentation</a>.</p> # noqa: E501
OpenAPI spec version: v2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from wavefront_api_client.models.dashboard import Dashboard # noqa: F401,E501
class IntegrationDashboard(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'description': 'str',
'url': 'str',
'dashboard_obj': 'Dashboard'
}
attribute_map = {
'name': 'name',
'description': 'description',
'url': 'url',
'dashboard_obj': 'dashboardObj'
}
def __init__(self, name=None, description=None, url=None, dashboard_obj=None): # noqa: E501
"""IntegrationDashboard - a model defined in Swagger""" # noqa: E501
self._name = None
self._description = None
self._url = None
self._dashboard_obj = None
self.discriminator = None
self.name = name
self.description = description
self.url = url
if dashboard_obj is not None:
self.dashboard_obj = dashboard_obj
@property
def name(self):
"""Gets the name of this IntegrationDashboard. # noqa: E501
Dashboard name # noqa: E501
:return: The name of this IntegrationDashboard. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this IntegrationDashboard.
Dashboard name # noqa: E501
:param name: The name of this IntegrationDashboard. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def description(self):
"""Gets the description of this IntegrationDashboard. # noqa: E501
Dashboard description # noqa: E501
:return: The description of this IntegrationDashboard. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this IntegrationDashboard.
Dashboard description # noqa: E501
:param description: The description of this IntegrationDashboard. # noqa: E501
:type: str
"""
if description is None:
raise ValueError("Invalid value for `description`, must not be `None`") # noqa: E501
self._description = description
@property
def url(self):
"""Gets the url of this IntegrationDashboard. # noqa: E501
URL path to the JSON definition of this dashboard # noqa: E501
:return: The url of this IntegrationDashboard. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this IntegrationDashboard.
URL path to the JSON definition of this dashboard # noqa: E501
:param url: The url of this IntegrationDashboard. # noqa: E501
:type: str
"""
if url is None:
raise ValueError("Invalid value for `url`, must not be `None`") # noqa: E501
self._url = url
@property
def dashboard_obj(self):
"""Gets the dashboard_obj of this IntegrationDashboard. # noqa: E501
:return: The dashboard_obj of this IntegrationDashboard. # noqa: E501
:rtype: Dashboard
"""
return self._dashboard_obj
@dashboard_obj.setter
def dashboard_obj(self, dashboard_obj):
"""Sets the dashboard_obj of this IntegrationDashboard.
:param dashboard_obj: The dashboard_obj of this IntegrationDashboard. # noqa: E501
:type: Dashboard
"""
self._dashboard_obj = dashboard_obj
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IntegrationDashboard):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"noreply@github.com"
] | basilisk487.noreply@github.com |
ce63be621dd2fa160d3e9198752579ac7e8f9b18 | 364b36d699d0a6b5ddeb43ecc6f1123fde4eb051 | /_downloads_1ed/fig_fft_text_example.py | 78f8d57d71630eb3e61ff1ec81dc25ae5256806e | [] | no_license | astroML/astroml.github.com | eae3bfd93ee2f8bc8b5129e98dadf815310ee0ca | 70f96d04dfabcd5528978b69c217d3a9a8bc370b | refs/heads/master | 2022-02-27T15:31:29.560052 | 2022-02-08T21:00:35 | 2022-02-08T21:00:35 | 5,871,703 | 2 | 5 | null | 2022-02-08T21:00:36 | 2012-09-19T12:55:23 | HTML | UTF-8 | Python | false | false | 2,376 | py | """
Example of a Fourier Transform
------------------------------
Figure E.1
An example of approximating the continuous Fourier transform of a function
using the fast Fourier transform.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from scipy import fftpack
from astroML.fourier import FT_continuous, sinegauss, sinegauss_FT
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Choose parameters for the wavelet
N = 10000
t0 = 5
f0 = 2
Q = 2
#------------------------------------------------------------
# Compute the wavelet on a grid of times
Dt = 0.01
t = t0 + Dt * (np.arange(N) - N / 2)
h = sinegauss(t, t0, f0, Q)
#------------------------------------------------------------
# Approximate the continuous Fourier Transform
f, H = FT_continuous(t, h)
rms_err = np.sqrt(np.mean(abs(H - sinegauss_FT(f, t0, f0, Q)) ** 2))
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 3.75))
fig.subplots_adjust(hspace=0.25)
# plot the wavelet
ax = fig.add_subplot(211)
ax.plot(t, h.real, '-', c='black', label='$Re[h]$', lw=1)
ax.plot(t, h.imag, ':', c='black', label='$Im[h]$', lw=1)
ax.legend()
ax.set_xlim(2, 8)
ax.set_ylim(-1.2, 1.2)
ax.set_xlabel('$t$')
ax.set_ylabel('$h(t)$')
# plot the Fourier transform
ax = fig.add_subplot(212)
ax.plot(f, H.real, '-', c='black', label='$Re[H]$', lw=1)
ax.plot(f, H.imag, ':', c='black', label='$Im[H]$', lw=1)
ax.text(0.55, 1.5, "RMS Error = %.2g" % rms_err)
ax.legend()
ax.set_xlim(0.5, 3.5)
ax.set_ylim(-1.9, 1.9)
ax.set_xlabel('$f$')
ax.set_ylabel('$H(f)$')
plt.show()
| [
"vanderplas@astro.washington.edu"
] | vanderplas@astro.washington.edu |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.