seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
6810114320 | #!/usr/bin/python3
import numpy as np
import matplotlib.pyplot as plt
np.set_printoptions(precision=4)
def f(x):
return 1/(1+5*x**2)
xw=np.linspace(-1,1,65)
fw=f(xw)
"""
A=np.array([xw**i for i in range(0,xw.shape[0])])
wsp=np.linalg.solve(A.T,fw)
print(wsp)
def f(p):
sum=np.zeros_like(p)
for i in range(wsp.shape[0]):
sum=sum+(p**i)*wsp[i]
return sum
"""
def l (p):
r=0
for i in range(xw.shape[0]):
idx=np.concatenate( (np.arange(0,i),np.arange(i+1,xw.shape[-1])) )
w1=np.prod(p-xw[idx])
w2=np.prod(xw[i]-xw[idx])
r=r+w1/w2*fw[i]
return r
xp=np.arange(-1,1.01,0.01)
yp=np.array([l(a) for a in xp])
plt.xlim(-1.03, 1.03)
plt.ylim(-7, 1.5)
plt.plot(xp,yp)
plt.plot(xw,fw,'o')
plt.show()
| matstep0/metody_numeryczne | zad8/zad8.py | zad8.py | py | 710 | python | en | code | 0 | github-code | 13 |
2715181311 | from random import choice
import pandas as pd
from bw2calc import MultiLCA
from bw2data import calculation_setups
import bw2data as bd
from bw2data.backends import Activity
def run_multi_lca(
name: str, functional_units: dict[Activity:float], impact_methods: list[str]
):
"""
Perform MultiLCA calculations with many functional units and LCIA methods.
"""
if len(functional_units) > 0 and len(impact_methods) > 0:
calculation_setups[name] = {"inv": functional_units, "ia": impact_methods}
multi_lca = MultiLCA(name)
index = [str(x) for x in list(multi_lca.all.keys())]
columns = [str(x) for x in impact_methods]
results = pd.DataFrame(multi_lca.results, columns=columns, index=index)
return results
else:
raise ValueError("Check the in inputs")
bd.projects.set_current("ecoinvent_391")
db = bd.Database("ecoinvent_391_cutoff")
rand_acts = [{db.random(): 1} for _ in range(2)]
all_methods = list(bd.methods)
methods = list(set(choice(all_methods) for _ in range(1)))
print("activities", rand_acts)
print("methods", methods)
res = run_multi_lca("test", rand_acts, methods)
print(res)
# one demand with 2 funcitonal unit ... short, bad looking code for
# {key1: value1, key2: value2}
# lca = LCA(demand={list(a.keys())[0]: list(a.values())[0] for a in rand_acts},
# method=methods[0], use_distributions=True)
# lca.lci()
# lca.lcia()
#
# # df = pd.DataFrame([{'score': lca.score} for _ in zip(lca, range(10))])
# # print(df)
# print(lca.score)
"""
Also see:
https://stackoverflow.com/questions/42984831/create-a-dataframe-from-multilca-results-in-brightway2
A demand can already include multiple activities!
also read...
https://oie-mines-paristech.github.io/lca_algebraic/example-notebook.html
https://github.com/brightway-lca/from-the-ground-up/blob/main/basic%20tasks/Searching.ipynb
"""
| LIVENlab/enbios | enbios2/bw2/experiment_multiLCA.py | experiment_multiLCA.py | py | 1,881 | python | en | code | 3 | github-code | 13 |
18347578118 | """
Adapted from https://github.com/tornadomeet/ResNet/blob/master/symbol_resnet.py
Original author Wei Wu
Referenced https://github.com/bamos/densenet.pytorch/blob/master/densenet.py
Original author bamos
Referenced https://github.com/andreasveit/densenet-pytorch/blob/master/densenet.py
Original author andreasveit
Referenced https://github.com/Nicatio/Densenet/blob/master/mxnet/symbol_densenet.py
Original author Nicatio
Implemented the following paper: DenseNet-BC
Gao Huang, Zhuang Liu, Kilian Q. Weinberger, Laurens van der Maaten. "Densely Connected Convolutional Networks"
Coded by Lin Xiong Mar-1, 2017
"""
import mxnet as mx
import math
def BasicBlock(data, growth_rate, stride, name, bottle_neck=True, drop_out=0.0, bn_mom=0.9, workspace=512):
"""Return BaiscBlock Unit symbol for building DenseBlock
Parameters
----------
data : str
Input data
growth_rate : int
Number of output channels
stride : tupe
Stride used in convolution
drop_out : float
Probability of an element to be zeroed. Default = 0.2
name : str
Base name of the operators
workspace : int
Workspace used in convolution operator
"""
# import pdb
# pdb.set_trace()
if bottle_neck:
# the same as https://github.com/facebook/fb.resnet.torch#notes, a bit difference with origin paper
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1')
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1')
conv1 = mx.sym.Convolution(data=act1, num_filter=int(growth_rate*4), kernel=(1,1), stride=(1,1), pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_conv1')
if drop_out > 0:
conv1 = mx.symbol.Dropout(data=conv1, p=drop_out, name=name + '_dp1')
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2')
act2 = mx.sym.Activation(data=bn2, act_type='relu', name=name + '_relu2')
conv2 = mx.sym.Convolution(data=act2, num_filter=int(growth_rate), kernel=(3,3), stride=stride, pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2')
if drop_out > 0:
conv2 = mx.symbol.Dropout(data=conv2, p=drop_out, name=name + '_dp2')
#return mx.symbol.Concat(data, conv2, name=name + '_concat0')
return conv2
else:
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1')
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1')
conv1 = mx.sym.Convolution(data=act1, num_filter=int(growth_rate), kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv1')
if drop_out > 0:
conv1 = mx.symbol.Dropout(data=conv1, p=drop_out, name=name + '_dp1')
#return mx.symbol.Concat(data, conv1, name=name + '_concat0')
return conv1
def DenseBlock(units_num, data, growth_rate, name, bottle_neck=True, drop_out=0.0, bn_mom=0.9, workspace=512):
"""Return DenseBlock Unit symbol for building DenseNet
Parameters
----------
units_num : int
the number of BasicBlock in each DenseBlock
data : str
Input data
growth_rate : int
Number of output channels
drop_out : float
Probability of an element to be zeroed. Default = 0.2
workspace : int
Workspace used in convolution operator
"""
# import pdb
# pdb.set_trace()
for i in range(units_num):
Block = BasicBlock(data, growth_rate=growth_rate, stride=(1,1), name=name + '_unit%d' % (i+1),
bottle_neck=bottle_neck, drop_out=drop_out,
bn_mom=bn_mom, workspace=workspace)
data = mx.symbol.Concat(data, Block, name=name + '_concat%d' % (i+1))
return data
def TransitionBlock(num_stage, data, num_filter, stride, name, drop_out=0.0, bn_mom=0.9, workspace=512):
"""Return TransitionBlock Unit symbol for building DenseNet
Parameters
----------
num_stage : int
Number of stage
data : str
Input data
num : int
Number of output channels
stride : tupe
Stride used in convolution
name : str
Base name of the operators
drop_out : float
Probability of an element to be zeroed. Default = 0.2
workspace : int
Workspace used in convolution operator
"""
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1')
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1')
conv1 = mx.sym.Convolution(data=act1, num_filter=num_filter,
kernel=(1,1), stride=stride, pad=(0,0), no_bias=True,
workspace=workspace, name=name + '_conv1')
if drop_out > 0:
conv1 = mx.symbol.Dropout(data=conv1, p=drop_out, name=name + '_dp1')
return mx.symbol.Pooling(conv1, global_pool=False, kernel=(2,2), stride=(2,2), pool_type='avg', name=name + '_pool%d' % (num_stage+1))
def get_symbol(units, num_stage, growth_rate, num_classes, data_type, reduction=0.5, drop_out=0., bottle_neck=True, bn_mom=0.9, workspace=512, **kwargs):
"""Return DenseNet symbol of imagenet
Parameters
----------
units : list
Number of units in each stage
num_stage : int
Number of stage
growth_rate : int
Number of output channels
num_classes : int
Ouput size of symbol
data_type : str
the type of dataset
reduction : float
Compression ratio. Default = 0.5
drop_out : float
Probability of an element to be zeroed. Default = 0.2
workspace : int
Workspace used in convolution operator
"""
num_unit = len(units)
assert(num_unit == num_stage)
init_channels = 2 * growth_rate
n_channels = init_channels
data = mx.sym.Variable(name='data')
data = mx.sym.BatchNorm(data=data, fix_gamma=True, eps=2e-5, momentum=bn_mom, name='bn_data')
if data_type == 'imagenet':
body = mx.sym.Convolution(data=data, num_filter=growth_rate*2, kernel=(7, 7), stride=(2,2), pad=(3, 3),
no_bias=True, name="conv0", workspace=workspace)
body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn0')
body = mx.sym.Activation(data=body, act_type='relu', name='relu0')
body = mx.symbol.Pooling(data=body, kernel=(3, 3), stride=(2,2), pad=(1,1), pool_type='max')
elif data_type == 'vggface':
body = mx.sym.Convolution(data=data, num_filter=growth_rate*2, kernel=(7, 7), stride=(2,2), pad=(3, 3),
no_bias=True, name="conv0", workspace=workspace)
body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn0')
body = mx.sym.Activation(data=body, act_type='relu', name='relu0')
body = mx.symbol.Pooling(data=body, kernel=(3, 3), stride=(2,2), pad=(1,1), pool_type='max')
elif data_type == 'msface':
body = mx.sym.Convolution(data=data, num_filter=growth_rate*2, kernel=(7, 7), stride=(2,2), pad=(3, 3),
no_bias=True, name="conv0", workspace=workspace)
body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn0')
body = mx.sym.Activation(data=body, act_type='relu', name='relu0')
body = mx.symbol.Pooling(data=body, kernel=(3, 3), stride=(2,2), pad=(1,1), pool_type='max')
else:
raise ValueError("do not support {} yet".format(data_type))
for i in range(num_stage-1):
body = DenseBlock(units[i], body, growth_rate=growth_rate, name='DBstage%d' % (i + 1), bottle_neck=bottle_neck, drop_out=drop_out, bn_mom=bn_mom, workspace=workspace)
n_channels += units[i]*growth_rate
n_channels = int(math.floor(n_channels*reduction))
body = TransitionBlock(i, body, n_channels, stride=(1,1), name='TBstage%d' % (i + 1), drop_out=drop_out, bn_mom=bn_mom, workspace=workspace)
body = DenseBlock(units[num_stage-1], body, growth_rate=growth_rate, name='DBstage%d' % (num_stage), bottle_neck=bottle_neck, drop_out=drop_out, bn_mom=bn_mom, workspace=workspace)
bn1 = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn1')
relu1 = mx.sym.Activation(data=bn1, act_type='relu', name='relu1')
pool1 = mx.symbol.Pooling(data=relu1, global_pool=True, kernel=(7, 7), pool_type='avg', name='pool1')
flat = mx.symbol.Flatten(data=pool1)
fc1 = mx.symbol.FullyConnected(data=flat, num_hidden=num_classes, name='fc1')
return mx.symbol.SoftmaxOutput(data=fc1, name='softmax')
| zhreshold/mxnet-ssd | symbol/densenet.py | densenet.py | py | 8,900 | python | en | code | 763 | github-code | 13 |
11442377904 | #matplotlib
#2D ploting lib
import cv2
from matplotlib import pyplot as plt
img=cv2.imread('HappyFish.jpg')
cv2.imshow('image',img)
#how to show image using matplotlib
img= cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
plt.imshow(img)
plt.xticks([]), plt.yticks([])
plt.show()
cv2.waitKey(0)
cv2.destroyAllWindows()
| Ines-chihi3/openCV-tutorial | 15-Matplotlib.py | 15-Matplotlib.py | py | 309 | python | en | code | 0 | github-code | 13 |
33638113886 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0005_reply'),
]
operations = [
migrations.RemoveField(
model_name='reply',
name='comment',
),
migrations.AddField(
model_name='comment',
name='email',
field=models.CharField(default='example@example.com', max_length=1024),
preserve_default=False,
),
migrations.DeleteModel(
name='Reply',
),
]
| asbxzeeko/tenkiamemma | blog/migrations/0006_auto_20151113_0929.py | 0006_auto_20151113_0929.py | py | 625 | python | en | code | 0 | github-code | 13 |
32798193665 |
class RedBlackNode:
def __init__(self, data):
self.data = data
self.height = 0
self.left = None
self.right = None
self.color = "red"
self.parent = None
class RedBlack:
def __init__(self, root=None, height=-1):
self.root = root
def rbt_tree_replace_child(self, parent, curr_child, new_child):
if parent.left == curr_child:
return self.rbt_tree_set_child(parent, "left", new_child)
elif parent.right == curr_child:
return self.rbt_tree_set_child(parent, "right", new_child)
return False
def rbt_insert(self, data):
node = Node(data)
self._bst_insert(node)
node.color = "red"
self.rbt_balance(node)
def _bst_insert(self, node):
if self.root is None:
self.root = node
node.parent = None
return
curr = self.root
while cur is not None:
if node.data < cur.data:
if curr.left is None:
curr.left = node
node.parent = curr
curr = None
else:
cur = cur.left
else:
if curr.right is None:
cur.right = node
node.parent = cur
cur = None
else:
cur = cur.right
def insertion_balance(self, node):
if node.parent is None:
node.color = "black"
return
if node.parent.is_black():
return
parent = node.parent
grandparent = node.get_grandparent()
uncle = node.get_uncle()
if uncle is not None and uncle.is_red():
parent.color = uncle.color = "black"
grandparent.color = "red"
self.insertion_balance(grandparent)
return
if node is parent.right and parent is grandparent.left:
self.rbt_tree_rotate_left(parent)
node = parent
parent = node.parent
elif node is parent.left and parent is grandparent.right:
self.rbt_tree_rotate_right(parent)
node = parent
parent = node.parent
parent.color = "black"
grandparent.color = "red"
if node is parent.left:
self.rbt_tree_rotate_right(grandparent)
else:
self.rbt_tree_rotate_left(grandparent)
def in_order(self, visitor_function):
self.in_order_recursive(visitor_function, node)
def in_order_recursive(self, visitor_function, node):
if node is None:
return
self.in_order_recursive(visitor_function, node.left)
visitor_function(node)
self.in_order_recursive(visitor_function, node.right)
def is_none_or_black(self, node):
if node is None:
return True
return node.is_black
def is_not_none_and_red(self, node):
if node is None:
return False
return node.is_red()
def rbt_tree_rotate_left(self, node):
right_left_child = node.right.left
if node.parent is not None:
self.parent.rbt_tree_replace_child(node, node.right)
else:
self.root = node.right
self.root.parent is None
self.right.rbt_tree_set_child("left", node)
self.rbt_tree_set_child("right", right_left_child)
def rbt_tree_rotate_right(self, node):
left_right_child = node.left.right
if node.parent is not None:
self.parent.rbt_tree_replace_child(node, node.left)
else:
self.root = node.left
self.root.parent = None
self.left.rbt_tree_set_child("right", node)
self.rbt_tree_set_child("left", left_right_child)
def rbt_tree_search(self, data, curr):
if not curr:
return 0
if curr.data is data:
return 1
if data < curr.data:
return self.rbt_tree_search(data, curr.left)
if data > curr.data:
return self.rbt_tree_search(data, curr.right)
return 0
def rbt_tree_try_case1(self, node):
if node.is_red() or node.parent is None:
return True
else:
return False
def rbt_tree_try_case2(self, node, sibling):
if sibling.is_red():
node.parent.color = "red"
sibling.color = "black"
if node is node.parent.left:
self.rbt_tree_rotate_left(node.parent)
else:
self.rbt_tree_rotate_right(node.parent)
return True
return False
def rbt_tree_try_case3(self, node, sibling):
if node.parent.is_black() and sibling.are_both_children_black():
sibling.color = "red"
self.rbt_tree_prepare_for_removal(node.parent)
return True
return False
def rbt_tree_try_case4(self, node, sibling):
if node.parent.is_red() and sibling.are_both_children_black():
node.parent.color = "black"
sibling.color = "red"
return True
return False
def rbt_tree_try_case5(self, node, sibling):
if self.is_not_none_and_red(sibling.left) and self.is_none_or_black(sibling.right) and node is node.parent.left:
sibling.color = "red"
sibling.left.color = "black"
self.rotate_right(sibling)
return True
return False
def rbt_tree_try_case6(self, node, sibling):
if self.is_none_or_black(sibling.left) and self.is_not_none_and_red(
sibling.right) and node is node.parent.right:
sibling.color = "red"
sibling.right.color = "black"
self.rotate_left(sibling)
return True
return False
# def rbt_tree_insert(self,node):
# bst_insert(self, node)
# node.color = red
# rbt_tree_balance(self, node)
#
# def rbt_tree_get_uncle(self, node = None):
# grandparent = None
# if node.parent is not None:
# grandparent = node.parent.parent
#
# if grandparent is None:
# return None
# if grandparent.left == node.parent:
# return grandparent.right
# else:
# return grandparent.left
#
#
# def rbt_tree_balance(self, node = None):
# if node.parent is None:
# node.color = "black"
# return
# if node.paren.color == "black":
# return
# parent = node.parent
# grandparent = rbt_tree_get_grandparent(node)
# uncle = rbt_tree_get_uncle(node)
# if uncle is not None & uncle.color == "red":
# parent.color = uncle.color = "black"
# grandparent.color = "red"
# rbt_tree_balance(self, grandparent)
# return
# if node == parent.right & parent == grandparent.right:
# rbt_tree_rotate_right(self, parent)
# node = parent
# parent = node.parent
#
# elif node == parent.left & parent == grandparent.left:
# rbt_tree_rotate_right(self, parent)
# node = parent
# parent = node.parent
#
# parent.color = "black"
# grandparent.color = "red"
# if node == parent.left:
# rbt_tree_rotate_right(self, grandparent)
# else:
# rbt_tree_rotate_left(self, grandparent)
#
#
| akarellano2/DataStructures | RedBlack.py | RedBlack.py | py | 7,528 | python | en | code | 0 | github-code | 13 |
10717159096 | import time
import RPi.GPIO as GPIO
class BaseValve():
def __init__(self, logger, config):
self.logger = logger
self.config = config
def open(self):
self.logger.info("Opening valve")
def close(self):
self.logger.info("Closing valve")
class TestValve(BaseValve):
def open(self):
BaseValve.open(self)
time.sleep(0.5)
def close(self):
BaseValve.close(self)
time.sleep(0.5)
class ThreeWireValve(BaseValve):
def __init__(self, logger, config):
BaseValve.__init__(self, logger, config)
self.gpioOn = config['gpioOn']
self.gpioOff = config['gpioOff']
self.pulseDuration = min(config.get('pulseDuration', 0.02), 0.2) # Must not exceed 200ms to avoid toasting the transistors and the valves
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self.gpioOn, GPIO.OUT)
GPIO.setup(self.gpioOff, GPIO.OUT)
def open(self):
BaseValve.open(self)
try:
GPIO.output(self.gpioOn, GPIO.HIGH)
time.sleep(self.pulseDuration)
finally:
GPIO.output(self.gpioOn, GPIO.LOW)
def close(self):
BaseValve.close(self)
try:
GPIO.output(self.gpioOff, GPIO.HIGH)
time.sleep(self.pulseDuration)
finally:
GPIO.output(self.gpioOff, GPIO.LOW)
def valveFactory(type, logger, config):
if type == 'test':
return TestValve(logger, config)
if type == '3wire':
return ThreeWireValve(logger, config)
raise Exception("Cannot find implementation for valve type '%s'." % type)
| adi-miller/Irrigate | valves.py | valves.py | py | 1,498 | python | en | code | 0 | github-code | 13 |
29329209912 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import patches
import seaborn as sns
from matplotlib.colors import LinearSegmentedColormap
import mortality_frequency as mf
import cartopy.crs as ccrs
from hexalattice.hexalattice import *
import surface_temperature as st
achi = mf.MME_Plot('../src/MME.xlsx')
#achi.affected_by_ecoregion()
#achi.plot_fish_assesment_zoom()
#achi.plot_yearly_fish_assesment_zoom()
achi.plot_mortality_assesment_zoom()
achi.mortality_by_species()
achi.plot_yearly_mortality_assesment_zoom()
achi.yearly_horizontal_mortality_percentage()
achi.horizontal_mortality_percentage()
#achi.plot_affected_number()
#achi.regional_map_composer()
#achi.plot_data_map()
# Proba de nou grafic de Quim el MegaGraph
total_numbers = achi.df_events # Number of total hexagons affected per year dataset
total_records = achi.df_numbers # Number of total records per year dataset
df_third = achi.get_numbered_df()
df_third['Year'] = df_third['Year'].astype(int)
#TODO Incluir en get numbered df
df_records = pd.DataFrame(achi.columns, columns=['Year'])
df_records['Count'] = 0
for year in achi.columns:
df_records['Count'].loc[df_records['Year'] == year] = total_records[int(year)].sum()
# df that contains number of ecoregions affected by year
df_affected_regions = pd.DataFrame(achi.columns, columns=['Year'])
df_affected_regions['Count'] = 0
for year in achi.columns:
df_affected_regions['Count'].loc[df_affected_regions['Year'] == year] = len(total_numbers['sub-ecoregion'].loc[total_numbers[year] >= 1].unique())
df_records['Cumulative'] = df_records['Count'].cumsum()
trecords = df_records['Cumulative'].iloc[-1]
df_records['PercentageCum'] = (df_records['Cumulative'] / trecords) * 100
def make_patch_spines_invisible(ax):
ax.set_frame_on(True)
ax.patch.set_visible(False)
for sp in ax.spines.values():
sp.set_visible(False)
fig, host = plt.subplots()
fig.subplots_adjust(right=0.75)
#par1 = host.twinx()
#par2 = host.twinx()
# Offset the right spine of par2. The ticks and label have already been
# placed on the right by twinx above.
#par2.spines["right"].set_position(("axes", 1.2))
# Having been created by twinx, par2 has its frame off, so the line of its
# detached spine is invisible. First, activate the frame but make the patch
# and spines invisible.
#make_patch_spines_invisible(par2)
# Second, show the right spine.
#par2.spines["right"].set_visible(True)
w = 0.3
p1 = host.bar(df_third['Year'].astype(int)-w, df_third['Count'], width=w, color='tab:blue', align='center', label='Hexagons')
#p2 = par1.bar(df_records['Year'].astype(int), df_records['Count'], width=w, color='tab:orange', align='center', label='Records')
#p3, = par2.plot(df_records['Year'].astype(int), df_records['PercentageCum'], color='black', label='Cumulative', marker='.')
host.set_xlabel("Year")
host.set_ylabel("# of affected hexagons")
#par1.set_ylabel("# of records")
#par2.set_ylabel("Cumulative % of MME records")
host.yaxis.label.set_color('tab:blue')
#par1.yaxis.label.set_color('tab:orange')
#par2.yaxis.label.set_color('black')
tkw = dict(size=4, width=1.5)
host.tick_params(axis='y', colors='tab:blue', **tkw)
#par1.tick_params(axis='y', colors='tab:orange', **tkw)
#par2.tick_params(axis='y', colors='black', **tkw)
host.tick_params(axis='x', **tkw)
#myl = [p1] + [p2] + [p3]
#myl = [p1] + [p2]
myl = [p1]
host.legend(myl, [l.get_label() for l in myl], loc='upper left')
i = 0
for rect in p2:
text = df_affected_regions['Count'][i]
height = rect.get_height()
#circle = patches.Ellipse((rect.get_x(), height + 4.5), 1, 10, facecolor='None', edgecolor='black')
#par1.add_patch(circle)
par1.text(rect.get_x(), height, f'{text:.0f}', ha='center')
i += 1
plt.savefig('Megaplot.png',bbox_inches='tight')
| Damyck/tMednet | tmednetGUI/Probita.py | Probita.py | py | 3,834 | python | en | code | 2 | github-code | 13 |
2061930760 | """Module for configuring Pytest with custom logger settings.
This module allows users to disable specific loggers when running pytest.
"""
import logging
import os
import pandas as pd
import pytest
@pytest.fixture(autouse=True)
def set_pandas_options() -> None:
"""Forces pandas to print all columns on one line."""
pd.set_option("display.max_columns", 24)
pd.set_option("display.width", 1000)
def pytest_configure() -> None:
"""Disable specific loggers during pytest runs.
Loggers to be disabled can be set using the DISABLE_LOGGERS environment variable.
By default, it disables the logger "energypylinear.optimizer".
"""
disable_logger_names = os.getenv("DISABLE_LOGGERS")
if disable_logger_names is None:
disable_loggers = ["energypylinear.optimizer"]
else:
disable_loggers = disable_logger_names.split(",")
for logger_name in disable_loggers:
logger = logging.getLogger(logger_name)
logger.disabled = True
| ADGEfficiency/energy-py-linear | tests/conftest.py | conftest.py | py | 998 | python | en | code | 56 | github-code | 13 |
43261311812 | from heapq import heappop, heappush
from collections import defaultdict
class Segtree():
def segfunc(self, x, y):
return min(x, y)
def __init__(self, LIST, ELE):
self.n, self.ide_ele = len(LIST), ELE
self.num = 1 << (self.n - 1).bit_length()
self.tree = [ELE] * 2 * self.num
for i in range(self.n):
self.tree[self.num + i] = LIST[i]
for i in range(self.num - 1, 0, -1):
self.tree[i] = self.segfunc(self.tree[2 * i], self.tree[2 * i + 1])
def update(self, k, x):
k += self.num
self.tree[k] = x
while k > 1:
self.tree[k >> 1] = self.segfunc(self.tree[k], self.tree[k ^ 1])
k >>= 1
def query(self, l, r):
res = self.ide_ele
l += self.num
r += self.num
while l < r:
if l & 1:
res = self.segfunc(res, self.tree[l])
l += 1
if r & 1:
res = self.segfunc(res, self.tree[r - 1])
l >>= 1
r >>= 1
return res
def main():
rate = [[] for _ in range(M)]
flag = [defaultdict(int) for _ in range(M)]
now = []
for i, (ai, bi) in enumerate(AB):
now.append(bi-1)
flag[bi-1][i] = 1
heappush(rate[bi-1], (-ai, i))
seg = Segtree([MAX]*M, MAX)
for i, ri in enumerate(rate):
if ri:
seg.update(i, -ri[0][0])
ans = []
for ci, di in CD:
ci, di = ci-1, di-1
c_bfo, c_rate = now[ci], AB[ci][0]
now[ci] = di
flag[c_bfo][ci] = 0
rate_cbfo = rate[c_bfo]
while rate_cbfo and flag[c_bfo][rate_cbfo[0][1]] == 0:
heappop(rate_cbfo)
seg.update(c_bfo, (-rate_cbfo[0][0] if rate_cbfo else MAX))
flag[di][ci] = 1
heappush(rate[di], (-c_rate, ci))
seg.update(di, -rate[di][0][0])
ans.append(seg.query(0, M))
return print(*ans, sep='\n')
if __name__ == '__main__':
N, Q = map(int, input().split())
AB = [list(map(int, input().split())) for _ in range(N)]
CD = [list(map(int, input().split())) for _ in range(Q)]
M, MAX = 2*10**5, 10**10
main()
| Shirohi-git/AtCoder | abc161-/abc170_e2.py | abc170_e2.py | py | 2,186 | python | en | code | 2 | github-code | 13 |
72915497298 | import re
import dataclasses
import mimetypes
import pytest
webview = pytest.importorskip('qutebrowser.browser.webengine.webview')
from qutebrowser.qt.webenginecore import QWebEnginePage
from qutebrowser.utils import qtutils
from helpers import testutils
@dataclasses.dataclass
class Naming:
prefix: str = ""
suffix: str = ""
def camel_to_snake(naming, name):
if naming.prefix:
assert name.startswith(naming.prefix)
name = name[len(naming.prefix):]
if naming.suffix:
assert name.endswith(naming.suffix)
name = name[:-len(naming.suffix)]
# https://stackoverflow.com/a/1176023
return re.sub(r'(?<!^)(?=[A-Z])', '_', name).lower()
@pytest.mark.parametrize("naming, name, expected", [
(Naming(prefix="NavigationType"), "NavigationTypeLinkClicked", "link_clicked"),
(Naming(prefix="NavigationType"), "NavigationTypeTyped", "typed"),
(Naming(prefix="NavigationType"), "NavigationTypeBackForward", "back_forward"),
(Naming(suffix="MessageLevel"), "InfoMessageLevel", "info"),
])
def test_camel_to_snake(naming, name, expected):
assert camel_to_snake(naming, name) == expected
@pytest.mark.parametrize("enum_type, naming, mapping", [
(
QWebEnginePage.JavaScriptConsoleMessageLevel,
Naming(suffix="MessageLevel"),
webview.WebEnginePage._JS_LOG_LEVEL_MAPPING,
),
(
QWebEnginePage.NavigationType,
Naming(prefix="NavigationType"),
webview.WebEnginePage._NAVIGATION_TYPE_MAPPING,
)
])
def test_enum_mappings(enum_type, naming, mapping):
members = testutils.enum_members(QWebEnginePage, enum_type).items()
for name, val in members:
mapped = mapping[val]
assert camel_to_snake(naming, name) == mapped.name
@pytest.fixture
def suffix_mocks(monkeypatch):
types_map = {
".jpg": "image/jpeg",
".jpe": "image/jpeg",
".png": "image/png",
".m4v": "video/mp4",
".mpg4": "video/mp4",
}
mimetypes_map = {} # mimetype -> [suffixes] map
for suffix, mime in types_map.items():
mimetypes_map[mime] = mimetypes_map.get(mime, []) + [suffix]
def guess(mime):
return mimetypes_map.get(mime, [])
monkeypatch.setattr(mimetypes, "guess_all_extensions", guess)
monkeypatch.setattr(mimetypes, "types_map", types_map)
def version(string, compiled=True):
assert compiled is False
if string == "6.2.3":
return True
if string == "6.7.0":
return False
raise AssertionError(f"unexpected version {string}")
monkeypatch.setattr(qtutils, "version_check", version)
EXTRA_SUFFIXES_PARAMS = [
(["image/jpeg"], {".jpg", ".jpe"}),
(["image/jpeg", ".jpeg"], {".jpg", ".jpe"}),
(["image/jpeg", ".jpg", ".jpe"], set()),
(
[
".jpg",
],
set(),
), # not sure why black reformats this one and not the others
(["image/jpeg", "video/mp4"], {".jpg", ".jpe", ".m4v", ".mpg4"}),
(["image/*"], {".jpg", ".jpe", ".png"}),
(["image/*", ".jpg"], {".jpe", ".png"}),
]
@pytest.mark.parametrize("before, extra", EXTRA_SUFFIXES_PARAMS)
def test_suffixes_workaround_extras_returned(suffix_mocks, before, extra):
assert extra == webview.extra_suffixes_workaround(before)
@pytest.mark.parametrize("before, extra", EXTRA_SUFFIXES_PARAMS)
def test_suffixes_workaround_choosefiles_args(
mocker,
suffix_mocks,
config_stub,
before,
extra,
):
# mock super() to avoid calling into the base class' chooseFiles()
# implementation.
mocked_super = mocker.patch("qutebrowser.browser.webengine.webview.super")
# We can pass None as "self" because we aren't actually using anything from
# "self" for this test. That saves us having to initialize the class and
# mock all the stuff required for __init__()
webview.WebEnginePage.chooseFiles(
None,
QWebEnginePage.FileSelectionMode.FileSelectOpen,
[],
before,
)
expected = set(before).union(extra)
assert len(mocked_super().chooseFiles.call_args_list) == 1
called_with = mocked_super().chooseFiles.call_args_list[0][0][2]
assert sorted(called_with) == sorted(expected)
| qutebrowser/qutebrowser | tests/unit/browser/webengine/test_webview.py | test_webview.py | py | 4,239 | python | en | code | 9,084 | github-code | 13 |
73615769939 | #!/usr/local/bin/python
import sys
import twitter
import argparse
# OAuth keys for account and API access.
import keys
def main(args):
api = twitter.Api(consumer_key=keys.consumer_key,
consumer_secret=keys.consumer_secret,
access_token_key=keys.access_token_key,
access_token_secret=keys.access_token_secret)
tweet = ' '.join(args.message)
if not args.long:
try:
status = api.PostUpdate(tweet)
print('Posted: ' + tweet)
except twitter.error.TwitterError:
if len(tweet) == 0:
print('No tweet found to post.')
elif len(tweet) > 0 and len(tweet) < 140:
print("Unexpected error posting. Invalid character?")
elif len(tweet) > 140:
print('Tweet too long, consider using -l.')
elif args.long:
status = api.PostUpdates(tweet, continuation='/')
print('Posted long message over ' + str(len(status)) + ' tweets.')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Post tweets from the command'
' line.')
parser.add_argument('-l', '--long', action='store_true',
help='Post a longer (>140 chars) message over several '
'tweets.')
parser.add_argument('-m', '--message', nargs='*', required=True,
help='Message to post. Advisable to enclose in'
' quotation marks.')
args = parser.parse_args()
main(args)
| karnival/chirp | chirp.py | chirp.py | py | 1,617 | python | en | code | 0 | github-code | 13 |
27697655163 | import asyncio
import faros_discovery
async def test_some_remote_operations(found):
# This opens a context over a list of Remote objects. Within the following
# scope, each of them has a valid connection open, until the end of the
# async with block.
async with faros_discovery.Remote.sshify(found) as connections:
# This is the hardest thing to understand in the code:
# connection.run(...) is not actually the execution of the command, it
# actually returns an "awaitable" object which can be run later. We've
# aggregated/staged all the commands to run here.
staged_connection_runs = [
connection.run("echo 'hello world from `hostname`'")
for connection in connections
]
# asyncio.as_completed(<list of awaitables>) returns a /synchronous/
# iterator that returns awaitables in the order that the job completes.
for run in asyncio.as_completed(staged_connection_runs):
# This await probably doesn't block, because we can be pretty sure
# that the result is ready if the iterator has ordered it as so. If
# it's not ready, it's at least the first-available result we can
# get at.
res = await run
# We've unboxed res from the await call, and now we have a simple
# result object from the asyncssh library.
print(res)
# Notice, the connection is still alive, each call to connection.run opens a new
# session, but not a new TCP connection.
runs = [
connection.run("echo 'the connection on `hostname` never closed!'")
for connection in connections
]
for run in asyncio.as_completed(runs):
res = await run
print(res)
# For the duration of this block, every device in found has
# a ssh_connection attribute defined.
print("inside sshify block")
for device in found:
print("Device {} has an ssh_connection with repr: {}".format(
device.serial, device.ssh_connection))
# Now we're outside the sshify block, and the connection has been cleaned
# up for us.
print("Outside sshify block")
for device in found:
print("Device {} has an ssh_connection with repr: {}".format(
device.serial, device.ssh_connection))
def main():
# get_all returns an iterator, which can only be consumed once in python.
# make it stable so that it can be consumed many times over.
found = list(faros_discovery.Discover())
# async python code and normal python code are not easily called from
# one-another. We need to create an async event loop so that we can run any
# sort of async code.
loop = asyncio.new_event_loop()
# This will run until the given async task completes, returning whatever
# that particular task returns. You can pass many tasks at once, which it
# will return in a list.
res = loop.run_until_complete(test_some_remote_operations(found))
# Close the loop that we got to be nice to other people.
loop.close()
if __name__ == '__main__':
main()
| skylarkwireless/pyfaros | doc/ssh_example_documented.py | ssh_example_documented.py | py | 2,978 | python | en | code | 0 | github-code | 13 |
28660704664 | from robust_motifs.data import ResultManager, BcountResultManager
from pathlib import Path
import seaborn as sns
import matplotlib.pyplot as plt
# Plots absolute motif count for individual rats and compares to control models.
r_average = ResultManager(Path("data/ready/average"))
r = []
for pathway in range(13,18):
r.append(ResultManager(Path("data/ready/individuals_1/pathways_P14-"+str(pathway))))
df_average = r_average.get_counts_dataframe("average")
dfs = []
for i, result in enumerate(r):
dfs.append(result.get_counts_dataframe("P"+str(i+13)))
for df in dfs:
df_average = df_average.append(df, ignore_index = True)
r_bshuffled = BcountResultManager(Path("data/bcounts/bshuffled_1"))
r_underlying = BcountResultManager(Path("data/bcounts/underlying"))
df_bshuffled = r_bshuffled.get_counts_dataframe("bshuffled")
df_underlying = r_underlying.get_counts_dataframe("underlying")
df_average['control'] = False
df_bshuffled['control'] = True
df_underlying['control'] = True
df_average = df_average.append(df_bshuffled, ignore_index = True)
df_average = df_average.append(df_underlying, ignore_index = True)
df1 = df_average[df_average['motif'] == 'ES']
fig = plt.figure()
ax = fig.add_subplot()
sns.lineplot(data = df1, x = 'dim', y = 'count', hue = 'group', ax = ax, style = 'control')
ax.set_ylabel("Extended simplices")
ax.set_xlabel("Dimension")
fig.savefig("es_dimension_individuals_bcounts", facecolor = "white")
df2 = df_average[df_average['motif'] == 'S']
fig = plt.figure()
ax = fig.add_subplot()
sns.lineplot(data = df2, x = 'dim', y = 'count', hue = 'group', ax = ax, style = 'control')
ax.set_ylabel("Simplices")
ax.set_xlabel("Dimension")
fig.savefig("s_dimension_individuals_bcounts", facecolor = "white")
df3 = df_average[df_average['motif'] == 'BS']
fig = plt.figure()
ax = fig.add_subplot()
sns.lineplot(data = df3, x = 'dim', y = 'count', hue = 'group', ax = ax, style = 'control')
ax.set_ylabel("Bisimplices")
ax.set_xlabel("Dimension")
fig.savefig("bs_dimension_individuals_bcounts", facecolor = "white")
| matsantoro/counting_motifs | plot_scripts/plot_individuals_bcounts.py | plot_individuals_bcounts.py | py | 2,051 | python | en | code | 1 | github-code | 13 |
360990153 | #!/usr/bin/env python
import os
import time
try:
import lcm
except ImportError as e:
print('Could not import LCM')
print('If you are working in a venv, try cloning upstream and then:\n')
print('\tpip install -e ~/path/to/lcm/lcm-python\n')
raise e
import management
class LCMSyslog:
def __init__(self, process, lio=lcm.LCM()):
self.lio = lio
self.msg = management.syslog_t()
self.msg.process = process
def log(self, text, level='DEBUG', epoch_usec=None):
if epoch_usec is None:
self.msg.epoch_usec = int(time.time() * 1e6)
else:
self.msg.epoch_usec = epoch_usec
self.msg.text = text
self.lio.publish('syslog.{0}'.format(level), self.msg.encode())
def critical(self, text, epoch_usec=None):
self.log(text, 'CRITICAL', epoch_usec)
def fault(self, text, epoch_usec=None):
self.log(text, 'FAULT', epoch_usec)
def error(self, text, epoch_usec=None):
self.log(text, 'ERROR', epoch_usec)
def important(self, text, epoch_usec=None):
self.log(text, 'IMPORTANT', epoch_usec)
def warning(self, text, epoch_usec=None):
self.log(text, 'WARNING', epoch_usec)
def info(self, text, epoch_usec=None):
self.log(text, 'INFO', epoch_usec)
def debug(self, text, epoch_usec=None):
self.log(text, 'DEBUG', epoch_usec)
if __name__ == '__main__': # run a test
process = management.process_t()
process.name = 'lcm-syslog.py'
process.id = os.getpid()
print('PID: {0}'.format(process.id))
log = LCMSyslog(process)
for i in range(10):
log.critical('This is syslog entry {0} at the CRITICAL level.'.format(i))
log.fault('This is syslog entry {0} at the FAULT level.'.format(i))
log.error('This is syslog entry {0} at the ERROR level.'.format(i))
log.important('This is syslog entry {0} at the IMPORTANT level.'.format(i))
log.warning('This is syslog entry {0} at the WARNING level.'.format(i))
log.info('This is syslog entry {0} at the INFO level.'.format(i))
log.debug('This is syslog entry {0} at the DEBUG level.'.format(i))
time.sleep(1)
| bluesquall/lcm-syslog | python/lcmsyslog.py | lcmsyslog.py | py | 2,207 | python | en | code | 0 | github-code | 13 |
71497004819 | # 언어 : Python
# 날짜 : 2022.1.2
# 문제 : BOJ > 1로 만들기 2(https://www.acmicpc.net/problem/12852)
# 티어 : 실버 1
# =====================================================================
def solution():
visited = []
queue = [[N, [N]]]
while queue:
number, path = queue.pop(0)
if number == 1:
print(len(path) - 1)
print(" ".join(map(str, path)))
break
if number not in visited:
visited.append(number)
if number % 3 == 0:
queue.append([number // 3, path + [number // 3]])
if number % 2 == 0:
queue.append([number // 2, path + [number // 2]])
queue.append([number - 1, path + [number - 1]])
N = int(input())
solution() | eunseo-kim/Algorithm | BOJ/class5/01_1로 만들기 2.py | 01_1로 만들기 2.py | py | 790 | python | en | code | 1 | github-code | 13 |
7293039260 | import numpy as np
import matplotlib.pyplot as plt
def estimate_coef(x,y):
print(x)
print(y)
n = np.size(x)
print("Size - ",n)
m_x, m_y = np.mean(x), np.mean(y)
print("Mean x- ",m_x,"Mean y - ",m_y)
SS_xx = np.sum(y * x - n * m_y *m_x)
SS_xy = np.sum(x * x - n * m_x * m_x)
print(SS_xx)
print(SS_xy)
b_1 = SS_xy / SS_xx
b_0 = m_y - b_1 *m_x
return b_0,b_1
#regression coeff
def plot_regression_line(x,y,b): #actual point
plt.scatter(x,y,color="m",marker = "o", s=30)
#predicted response vector
y_pred = b[0] + b[1] * x
#reg line plot
plt.plot(x,y_pred,color = "g")
#putting labels
plt.xlabel('x')
plt.ylabel('y')
#func to show plot
plt.show()
def main():
#observations
x = np.array([0,1,2,3,4,5,6,7,8,9])
y = np.array([1,3,2,5,7,8,8,9,10,12])
#estimated coeff
b = estimate_coef(x,y)
print("estimated coeff are - \nb_0 ={} \
\nb_1 = {}".format(b[0],b[1]))
#plot reg line
plot_regression_line(x,y,b)
if __name__ == "__main__":
main()
| shruti735/Machine-Learning | Learning11.py | Learning11.py | py | 1,079 | python | en | code | 0 | github-code | 13 |
7050430955 | # This is a sample Python script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
import cv2
video_reader=cv2.VideoCapture(0) #read input from webcam
while True:
success,frame=video_reader.read()
if not success:
break
cv2.imshow("My Video",frame)
key=cv2.waitKey(10) #gives time to show a frame
if key==ord('q'):
break
video_reader.release()
cv2.destroyAllWindows()
| ISHPREETKAUR01DISNEY/Resume | Video.py | Video.py | py | 542 | python | en | code | 0 | github-code | 13 |
36293038972 | # Aqui estamos criando a tabela de ranking listando a pontuação dos jogadores
import sqlite3
from sqlite3 import Error
def create_connection(db_file):
conn = None
try:
conn = sqlite3.connect(db_file)
print(sqlite3.version)
return conn
except Error as e:
print(e)
return conn
def create_table(conn,create_table_sql):
try:
crs = conn.cursor()
crs.execute(create_table_sql)
except Error as e:
print(e)
#Criando a tabela de ranking...
def main():
database = r"TABELA_RANK.db"
sql_create_projeto_table = '''CREATE TABLE IF NOT EXISTS rank(
id INTEGER PRIMARY KEY,
nome TEXT NOT NULL,
email TEXT NOT NULL,
Rank INTEGER
);'''
# create a database connection
conn = create_connection(database)
# efetivar a criação das tabelas
if conn is not None:
# criar a tabela de projeto
create_table(conn,sql_create_projeto_table)
# criar a tabela de tarefa
if __name__ == '__main__':
main() | GabrielSkf/T_Rex-Adventure | CRIANDO TABELA.py | CRIANDO TABELA.py | py | 1,202 | python | pt | code | 1 | github-code | 13 |
6634457324 | """Escreva um programa que leia dois números inteiros e compare-os. mostrando na tela uma mensagem:"""
from utilidadescev.dado import leiafloat
from utilidadescev.string import linha
linha(25, 'azul')
num1 = leiafloat('Primeiro número: ')
num2 = leiafloat('Segundo número: ')
linha(25, 'azul')
linha(25, 'amarelo')
if num1 > num2:
print('Primeiro número é MAIOR')
elif num2 > num1:
print('Segundo número é MAIOR')
else:
print('Os número são IGUAIS')
linha(25, 'amarelo')
| rafaelsantosmg/cev_python3 | cursoemvideo/ex038.py | ex038.py | py | 494 | python | pt | code | 1 | github-code | 13 |
17046185904 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.InsurancePeriod import InsurancePeriod
from alipay.aop.api.domain.RecomProduct import RecomProduct
class AlipaySecurityRiskHahaIsptestQueryModel(object):
def __init__(self):
self._test_five = None
self._test_four = None
self._test_one = None
self._test_three = None
self._test_two = None
@property
def test_five(self):
return self._test_five
@test_five.setter
def test_five(self, value):
if isinstance(value, InsurancePeriod):
self._test_five = value
else:
self._test_five = InsurancePeriod.from_alipay_dict(value)
@property
def test_four(self):
return self._test_four
@test_four.setter
def test_four(self, value):
if isinstance(value, RecomProduct):
self._test_four = value
else:
self._test_four = RecomProduct.from_alipay_dict(value)
@property
def test_one(self):
return self._test_one
@test_one.setter
def test_one(self, value):
self._test_one = value
@property
def test_three(self):
return self._test_three
@test_three.setter
def test_three(self, value):
if isinstance(value, list):
self._test_three = list()
for i in value:
self._test_three.append(i)
@property
def test_two(self):
return self._test_two
@test_two.setter
def test_two(self, value):
self._test_two = value
def to_alipay_dict(self):
params = dict()
if self.test_five:
if hasattr(self.test_five, 'to_alipay_dict'):
params['test_five'] = self.test_five.to_alipay_dict()
else:
params['test_five'] = self.test_five
if self.test_four:
if hasattr(self.test_four, 'to_alipay_dict'):
params['test_four'] = self.test_four.to_alipay_dict()
else:
params['test_four'] = self.test_four
if self.test_one:
if hasattr(self.test_one, 'to_alipay_dict'):
params['test_one'] = self.test_one.to_alipay_dict()
else:
params['test_one'] = self.test_one
if self.test_three:
if isinstance(self.test_three, list):
for i in range(0, len(self.test_three)):
element = self.test_three[i]
if hasattr(element, 'to_alipay_dict'):
self.test_three[i] = element.to_alipay_dict()
if hasattr(self.test_three, 'to_alipay_dict'):
params['test_three'] = self.test_three.to_alipay_dict()
else:
params['test_three'] = self.test_three
if self.test_two:
if hasattr(self.test_two, 'to_alipay_dict'):
params['test_two'] = self.test_two.to_alipay_dict()
else:
params['test_two'] = self.test_two
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipaySecurityRiskHahaIsptestQueryModel()
if 'test_five' in d:
o.test_five = d['test_five']
if 'test_four' in d:
o.test_four = d['test_four']
if 'test_one' in d:
o.test_one = d['test_one']
if 'test_three' in d:
o.test_three = d['test_three']
if 'test_two' in d:
o.test_two = d['test_two']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlipaySecurityRiskHahaIsptestQueryModel.py | AlipaySecurityRiskHahaIsptestQueryModel.py | py | 3,628 | python | en | code | 241 | github-code | 13 |
12791243437 | #If statement
if x > 8:
print('This number is equals to 10 ') # Will execute if x > 8 is true
print('This number is not equal 10 ') # will execute if x > 8 is not true (Outside of if statement)
#If else statement
if x >= 10:
print('Ths number is equals to 10') # Will execute if x >= 10 is true
else:
print('This number is not equals to 10') # Will execute if x >= 10 is false ( inside of if statement )
#Elif statement
if x < 30:
print('This number is equals to 10 ') # Will execute if x > 30 is true
elif x == 10:
print('This number is equals to 10 ') # Will execute if x==10 is true and if x > 30 is false
else:
print('This number is not equals to 10 ') # Will print if both x < 30 and x == 10 is false
# Nested If..else
num5 = float(input('Please input a number'))
if num5 >= 0: # Will execute if a number is greater than 0
if num5 == 0: # Will execute if a number is equals to 0
print('The number is 0 ')
else: # Will execute if a number is not equals to 0
print("The number is a positive number ")
else: # Will execute if a number is less than 0
print('The number is a negative number ')
#conditional statement
def condi_statement():
x,y = 10,100
st = "x is less than y " if (x<y) else "x is greater than or the same as y "
print(st)
# if statement with a break
Numero = [ 1 ,10 , 0 , 15 ,-6 , -2 , -8 ]
for K in Numero:
if K < 0:
break # break is used to terminate a loop, as you can see the loop
# ends at 15 becuase above 15 are negative numbers
print(K)
#If else with a for and continue
Numero = [ 1 ,10 , 0 , 15 ,-6 , -2 , -8 ]
for O in Numero:
if O <= 0: # a loop ofr a negative number
continue # the loop will continue outside the loop and will print the outside text
print(O)
print('This is outside of the loop')
for O in Numero:
if O <= 0: # a loop ofr a negative number
continue
print(O)
print('This is outside of the loop')
#While loops
R = 1
U = 5
while R <= U :
print('This is a While Loop')
R = R + 1 # This will print the text 5 times, this is what known to be known as iteration
# the loop will stop printing if it only goes over 5, the loop will be terminated.
# while R :
# This is a infinite loop
# print('This is a never ending loop')
# This is a never ending loop it will never stop
n = 10
Sum = 0
i = 1
while i <= n :
Sum = Sum + i
# 1st = 0 = 0 + 3
# 2nd = 9 = 3 + 3
# 3rd = 12 = 9 + 3
# 4th = 15 = 12 + 3
# until it reaches the 10th which equals to 55
i = i+1 # i will be always be equals to 3, simply because i is always 1 and it will always add to 1 which will have
# a out come of 3
# 1 = 1 + 1, has an out come of 3
print('The sum is ', Sum)
# While with else loop
O = 0
while O < 5:
print("Hello")
O = O + 1 # This will print Hello for 5 times
else:
print('Hi ') # This will print Hi for the 6th time
# Nested while and if loops with break
outcome = 0
while True:
userinput = input('Please input a number:') # gets a number from the user
userinput = float(userinput) # converts the userinput to a float
if userinput < 0 :# if the user inputs the negative number it will end the loop, and show the sum
break
outcome += userinput # adds all positive number
print(' sum = ', outcome) # prints the outcome
# For loop
Name = ['Millow J. Gapay','Will Gapay','Milo Gapay'] # known as a list or an array
for Names in Name:
print(Names)
# For loop
numbers = range(1,10) # it will give a sequence of numbers 1 - 10
sum = 0 # this is a variable to store a sum
for i in numbers:
sum += i
# Iteration
# sum i outcome
# 1st (sum(0) + 1) =1
# 2nd (sum(2) + 2) =4
# 3rd (sum(4) + 3) =7
# 4th (sum(7) + 4) =10
# 5th (sum(10) + 5) =15
# 6th (sum(15) + 6) =21
# 7th (sum(21) + 7) =28
# 8th (sum(28) + 8) =36
# 9th (sum(15) + 9) =45
print('The sum is', sum)
#For loops with Else
My_name = [19 , 'Hello Millow ']
for J in My_name:
print(J)
else:
print('Hi There')
#pass statement
Name = ['Millow J. Gapay','Will Gapay','Milo Gapay']
for Y in Name:
pass
print("This statement after loop ")
# for loops with range
for x in range(5, 10):
print(x) | watermillow321/Hello_World | Loop.py | Loop.py | py | 4,264 | python | en | code | 0 | github-code | 13 |
62349043 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QDialog, QTableWidgetItem, QHeaderView
from lab1.gui import gui, transaction
from PyQt5.QtGui import QIcon
import pymysql
import sys
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowIcon(QIcon('src/system.png'))
def update_c_num_combobox(self):
ui.c_num_combobox.clear()
cur.execute('select num from course order by num asc')
items = [item[0] for item in cur.fetchall()]
ui.c_num_combobox.addItems(items)
def update_s_name_combobox(self):
ui.stu_name_combobox.clear()
cur.execute('select num, name from student order by num asc')
items = [item[0] + ' ' + item[1] for item in cur.fetchall()]
ui.stu_name_combobox.addItems(items)
def stu_insert(self): # 新建学生信息
num, name, c_num = ui.stu_num.text(), ui.stu_name.text(), ui.class_num.text()
if not num or not name or not c_num:
QMessageBox.warning(self, '警告', '请输入学号、姓名与班号')
elif not num.isdigit() or len(num) != 10 or not c_num.isdigit() or len(c_num) != 3:
QMessageBox.warning(self, '警告', '学号为10位数字,班号为3位数字')
else:
query = 'select * from student where num=%s'
if cur.execute(query, [num]):
QMessageBox.warning(self, '插入异常', '该学号已存在,请重新输入')
elif not cur.execute('select * from class where num=%s', [c_num]):
QMessageBox.warning(self, '插入异常', '该班号在班级表中不存在,请尝试在班级表中插入对应条目')
else:
QMessageBox.information(self, '成功', '成功插入一条学生数据')
query = 'insert into student(num,name,c_num) values (%s,%s,%s)'
cur.execute(query, [num, name, c_num])
con.commit() # 修改数据时,需要commit操作
self.update_s_name_combobox()
def stu_delete(self): # 删除学生信息
num, trigger_on = ui.stu_num.text(), ui.add_trigger.isChecked()
if not num:
QMessageBox.warning(self, '警告', '学号为空')
elif not num.isdigit() or len(num) != 10:
QMessageBox.warning(self, '警告', '学号只可为10位数字')
else:
query = 'select * from student where num=%s'
if not cur.execute(query, [num]):
QMessageBox.warning(self, "删除异常", "该学号不存在,请重新输入")
elif cur.execute('select * from sc where snum =%s', [num]) and not trigger_on:
QMessageBox.warning(self, "删除异常", "该学号正被选课表作为外键引用,请尝试删除对应条目")
else:
if cur.execute('select * from sc where snum=%s', [num]) and trigger_on:
QMessageBox.information(self, '提示', '该学号正被选课表作为外键引用,触发器已默认删除对应条目数据')
QMessageBox.information(self, '成功', '成功删除一条学生数据')
query = 'delete from student where num=%s'
cur.execute(query, [num])
con.commit() # 修改数据时,需要commit操作
self.update_s_name_combobox()
def course_insert(self): # 新建学生信息
num, name, t_num = ui.course_num.text(), ui.course_name.text(), ui.teacher_num.text()
if not num or not name or not t_num:
QMessageBox.warning(self, '警告', '请输入课程号、课程名和任课教师号')
elif not num.isdigit() or len(num) != 3 or not num.isdigit() or len(t_num) != 10:
QMessageBox.warning(self, '警告', '课程号为3位数字,教师号为10位数字')
else:
query = 'select * from course where num=%s'
if cur.execute(query, [num]):
QMessageBox.warning(self, '插入异常', '该课程号已存在,请重新输入')
elif not cur.execute('select * from teacher where num=%s', [t_num]):
QMessageBox.warning(self, '插入异常', '该教师号在教师表中不存在,请尝试在教师表中插入对应条目')
else:
QMessageBox.information(self, '成功', '成功插入一条课程数据')
query = 'insert into course(num,name,t_num) values (%s,%s,%s)'
cur.execute(query, [num, name, t_num])
con.commit() # 修改数据时,需要commit操作
self.update_c_num_combobox()
def course_delete(self): # 删除学生信息
num, trigger_on = ui.course_num.text(), ui.add_trigger.isChecked()
if not num:
QMessageBox.warning(self, '警告', '课程号为空')
elif not num.isdigit() or len(num) != 3:
QMessageBox.warning(self, '警告', '课程号只可为3位数字')
else:
query = 'select * from course where num=%s'
if not cur.execute(query, [num]):
QMessageBox.warning(self, "删除异常", "该课程号不存在,请重新输入")
elif cur.execute('select * from sc where cnum=%s', [num]) and not trigger_on:
QMessageBox.warning(self, "删除异常", "该课程号正被选课表作为外键引用,请尝试删除对应条目")
else:
if cur.execute('select * from sc where cnum=%s', [num]) and trigger_on:
QMessageBox.information(self, '提示', '该课程号正被选课表作为外键引用,触发器已默认删除对应条目数据')
QMessageBox.information(self, '成功', '成功删除一条课程数据')
query = 'delete from course where num=%s'
cur.execute(query, [num])
con.commit() # 修改数据时,需要commit操作
self.update_c_num_combobox()
def sc_insert(self): # 新建信息,加入触发器
s_num, c_num, grade = ui.sc_snum.text(), ui.sc_cnum.text(), ui.sc_grade.text()
if not s_num or not c_num:
QMessageBox.warning(self, '警告', '请输入学号课程号')
elif not s_num.isdigit() or len(s_num) != 10 or not c_num.isdigit() or len(c_num) != 3:
QMessageBox.warning(self, '警告', '学号为10为数字,课程号为3位数字')
else:
query = 'select * from sc where snum=%s and cnum=%s'
trigger_on = ui.add_trigger.isChecked() # 触发器是否打开
has_s_num, has_c_num = cur.execute('select * from student where num=%s', [s_num]), cur.execute(
'select * from course where num=%s', [c_num]) # 学号信息是否存在,班号信息是否存在
if cur.execute(query, [s_num, c_num]):
QMessageBox.warning(self, '插入异常', '该选课信息已存在,请重新输入')
elif not has_s_num and not trigger_on:
QMessageBox.warning(self, '插入异常', '该学号在学生表中不存在,请尝试在学生表中插入对应条目')
elif not has_c_num and not trigger_on:
QMessageBox.warning(self, '插入异常', '该课程号在课程表中不存在,请尝试在课程表中插入对应条目')
else:
if not has_s_num and trigger_on:
QMessageBox.information(self, '提示', '该学号在课程表中不存在,触发器已默认添加对应条目数据')
if not has_c_num and trigger_on:
QMessageBox.information(self, '提示', '该课程号在课程表中不存在,触发器已默认添加对应条目数据')
QMessageBox.information(self, '成功', '成功插入一条学生数据')
query = 'insert into sc(snum,cnum,grade) values (%s,%s,%s)'
cur.execute(query, [s_num, c_num, grade])
con.commit() # 修改数据时,需要commit操作
self.update_s_name_combobox()
self.update_c_num_combobox()
def sc_delete(self): # 删除信息,加入触发器
s_num, c_num = ui.sc_snum.text(), ui.sc_cnum.text()
if not s_num or not c_num:
QMessageBox.warning(self, '警告', '请输入学号课程号')
elif not s_num.isdigit() or len(s_num) != 10 or not c_num.isdigit() or len(c_num) != 3:
QMessageBox.warning(self, '警告', '学号为10为数字,课程号为3位数字')
else:
query = 'select * from sc where snum=%s and cnum=%s'
if not cur.execute(query, [s_num, c_num]):
QMessageBox.warning(self, "删除异常", "该选课信息不存在,请重新输入")
else:
QMessageBox.information(self, '成功', '成功删除一条选课信息')
query = 'delete from sc where snum=%s and cnum=%s'
cur.execute(query, [s_num, c_num])
con.commit() # 修改数据时,需要commit操作
self.update_s_name_combobox()
self.update_c_num_combobox()
def get_name(self):
c_count, res = int(ui.sc_stu_count.text()), []
query = 'select student.name from student,sc where student.num = sc.snum group by student.num HAVING count(*) > %s'
cur.execute(query, [c_count])
for item in cur.fetchall():
res.append(item[0])
QMessageBox.information(self, '成功', ' '.join(res) if len(res) > 0 else '无结果')
def get_name_by_cnum(self):
c_num, res = ui.c_num_combobox.currentText(), []
if not c_num:
QMessageBox.warning(self, '警告', '请输入课程号')
elif not c_num.isdigit() or len(c_num) != 3:
QMessageBox.warning(self, '警告', '班号为3位数字')
else:
query = 'select name from student where num in (select snum from sc where cnum = %s)'
cur.execute(query, [c_num])
for item in cur.fetchall():
res.append(item[0])
QMessageBox.information(self, '成功', ' '.join(res) if len(res) > 0 else '无结果')
def get_avg_grade(self):
name, res = ui.stu_name_combobox.currentText().split()[1], []
if not name:
QMessageBox.warning(self, '警告', '请输入姓名')
else:
query = 'select snum, avg(grade) from sc where snum in (select num from student where name = %s) and grade >= 60 group by snum'
cur.execute(query, [name])
for item in cur.fetchall():
res.append(item[0] + '\t' + str(item[1]))
QMessageBox.information(self, '成功', '\n'.join(res) if len(res) != 0 else '无结果')
def create_view(self):
d_name = ui.cs_department.currentText()
view_name = 'cs_student' + str(ui.cs_department.currentIndex())
query = 'select count(*) from information_schema.VIEWS where TABLE_SCHEMA="teaching_management_system" and TABLE_NAME=%s'
cur.execute(query, [view_name]) # 先查询视图是否已被定义
if cur.fetchone()[0] == 1:
QMessageBox.warning(self, '警告', '视图已被定义:' + view_name)
else:
query = 'create view ' + view_name + ' as select num,name,c_num from student where c_num in (select c_num from class where d_num in (select d_num from department where c_num="001" and name=%s))'
cur.execute(query, [d_name])
QMessageBox.information(self, '成功', '成功创建视图:' + view_name)
def create_index(self):
index = ui.cs_index.currentText().split()[0]
query = 'select count(*) from information_schema.INNODB_INDEXES where NAME=%s'
cur.execute(query, [index + '_index']) # 先查询视图是否已被定义
if cur.fetchone()[0] == 1:
QMessageBox.warning(self, '警告', '索引已被定义:' + index)
else:
query = 'create index ' + index + '_index on student(' + index + ' desc) '
cur.execute(query)
QMessageBox.information(self, '成功', '成功创建索引:' + index + '_index')
def transaction_dialog(self):
dialog = TransactionDialog(self)
dialog_ui = transaction.Ui_dialog()
dialog_ui.setupUi(dialog)
dialog.set_ui(dialog_ui)
dialog.show()
def change_combobox(self):
ui.add_trigger.setText('触发器:' + ('开' if ui.add_trigger.isChecked() else '关'))
class TransactionDialog(QDialog):
def set_ui(self, ui):
self.ui = ui
self.__update_num()
def __update_num(self):
cur.execute('select num,name,balance from student order by num asc')
items = cur.fetchall()
res = [item[0] + ' ' + str(item[2]) for item in items]
self.ui.sender_nums.clear()
self.ui.receivers_nums.clear()
self.ui.sender_nums.addItems(res)
self.ui.receivers_nums.addItems(res)
table = self.ui.stu_table
table.horizontalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)
table.setRowCount(len(items))
table.setColumnCount(3)
table.verticalHeader().setVisible(False)
table.setHorizontalHeaderLabels(['学号', '姓名', '余额'])
for idx, item in enumerate(items):
table.setItem(idx, 0, QTableWidgetItem(item[0]))
table.setItem(idx, 1, QTableWidgetItem(item[1]))
table.setItem(idx, 2, QTableWidgetItem(str(item[2])))
def begin_transaction(self):
is_checked = self.ui.add_exception.isChecked()
con.begin() # 开启事务
try:
sender, s_balance = self.ui.sender_nums.currentText().split()
receiver, r_balance = self.ui.receivers_nums.currentText().split()
transfer_num = self.ui.transfer_num.text()
if sender == receiver or int(s_balance) < int(transfer_num) or is_checked:
raise Exception
else:
QMessageBox.information(self, '提示', '正在转帐')
cur.execute('update student set balance=balance-' + (transfer_num) + ' where num = %s', sender)
cur.execute('update student set balance=balance+' + transfer_num + ' where num = %s', receiver)
except Exception as e:
QMessageBox.warning(self, '警告', '数据库接收到错误,开始回退,转账失败')
con.rollback()
else:
con.commit()
QMessageBox.information(self, '提示', '转账成功')
self.__update_num()
def change_checkbox(self):
self.ui.add_exception.setText('模拟异常:' + ('开' if self.ui.add_exception.isChecked() else '关'))
if __name__ == "__main__":
con = pymysql.connect(host='localhost', port=3306, user='root', password='123456', charset='utf8',
database='teaching_management_system') # 连接数据库
cur = con.cursor() # 执行sql语句的游标
app = QApplication(sys.argv)
main_win = MainWindow()
ui = gui.Ui_MainWindow()
ui.setupUi(main_win)
main_win.update_c_num_combobox()
main_win.update_s_name_combobox()
main_win.show()
sys.exit(app.exec_())
| HIT-SCIR-chichi/hit_db_lab | lab1/main.py | main.py | py | 15,405 | python | en | code | 11 | github-code | 13 |
11510830139 |
import argparse
import os
import platform
import re
import subprocess
import sys
from pathlib import Path
from timeit import default_timer as timer
from .errors import PyxellError
from .indentation import transform_indented_code
from .parser import PyxellParser
from .transpiler import PyxellTranspiler
abspath = Path(os.path.abspath(__file__)).parents[1]
version = Path(abspath/'version.txt').read_text()
def build_ast(path):
# Note: Python automatically normalizes '\r' and '\r\n' to '\n' when reading a file.
lines = transform_indented_code(path.read_text(), path)
return PyxellParser(lines, path).parse_program()
units = {}
for name in ['std', 'math', 'random']:
units[name] = build_ast(abspath/f'lib/{name}.px')
def resolve_local_includes(path):
code = path.read_text().replace('#pragma once', '')
def replacer(match):
return resolve_local_includes(path.parents[0]/match.group(1))
return re.sub(r'#include "(.+?)"', replacer, code)
def cpp_flags(opt_level):
return ['-std=c++17', f'-O{opt_level}']
def precompile_base_header(cpp_compiler, opt_level):
command = [cpp_compiler, *cpp_flags(opt_level), '-c', str(abspath/'lib/base.hpp')]
subprocess.run(command, stdout=subprocess.PIPE, check=True)
def run_cpp_compiler(cpp_compiler, cpp_filename, exe_filename, opt_level, verbose=False, disable_warnings=False):
command = [cpp_compiler, *cpp_flags(opt_level), cpp_filename, '-include', str(abspath/'lib/base.hpp'), '-o', exe_filename, '-lstdc++']
if disable_warnings:
command.append('-w')
if platform.system() != 'Windows':
command.append('-lm')
if verbose:
print(f"running {' '.join(command)}")
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if verbose and output:
print(output.decode())
except FileNotFoundError:
print(f"command not found: {cpp_compiler}")
sys.exit(1)
def compile(filepath, cpp_compiler, opt_level, verbose=False, mode='executable'):
filepath = Path(filepath)
filename, ext = os.path.splitext(filepath)
cpp_filename = f'{filename}.cpp'
exe_filename = f'{filename}.exe'
if verbose:
print(f"transpiling {filepath} to {cpp_filename}")
t1 = timer()
transpiler = PyxellTranspiler()
for name, ast in units.items():
transpiler.run(ast, name, f'lib/{name}.px')
ast = build_ast(filepath)
code = transpiler.run_main(ast, filepath)
with open(cpp_filename, 'w') as file:
file.write(f"/*\n"
f"Generated by Pyxell {version}.\n"
f"https://github.com/adamsol/Pyxell\n"
f"*/\n\n")
if mode == 'standalone-cpp':
file.write(resolve_local_includes(abspath/'lib/base.hpp'))
file.write("\n\n/* Program */\n\n")
file.write(code)
t2 = timer()
global transpilation_time
transpilation_time = t2 - t1
if mode != 'executable':
return
t1 = timer()
run_cpp_compiler(cpp_compiler, cpp_filename, exe_filename, opt_level, verbose)
t2 = timer()
global compilation_time
compilation_time = t2 - t1
return exe_filename
def main():
parser = argparse.ArgumentParser(prog='pyxell', description="Run Pyxell compiler.")
parser.add_argument('filepath', nargs=argparse.OPTIONAL, help="source file path")
parser.add_argument('-c', '--cpp-compiler', default='gcc', help="C++ compiler command (default: gcc)")
parser.add_argument('-l', '--time-limit', type=int, help="program execution time limit")
parser.add_argument('-n', '--dont-run', action='store_true', help="don't run the program after compilation")
parser.add_argument('-O', '--opt-level', type=int, choices=range(4), default=0, help="compiler optimization level (default: 0)")
parser.add_argument('-p', '--precompile-header', action='store_true', help="precompile the base.hpp header and exit")
parser.add_argument('-s', '--standalone-cpp', action='store_true', help="save transpiled C++ code for standalone compilation and exit")
parser.add_argument('-t', '--time', action='store_true', help="measure time of program compilation and execution")
parser.add_argument('-v', '--verbose', action='store_true', help="output diagnostic information")
parser.add_argument('-V', '--version', action='store_true', help="print version number and exit")
args = parser.parse_args()
if args.version:
print(f"Pyxell {version}")
sys.exit(0)
if args.precompile_header:
precompile_base_header(args.cpp_compiler, args.opt_level)
sys.exit(0)
if not args.filepath:
parser.error("filepath is required")
try:
mode = 'standalone-cpp' if args.standalone_cpp else 'executable'
exe_filename = compile(args.filepath, args.cpp_compiler, args.opt_level, args.verbose, mode)
except FileNotFoundError:
print(f"file not found: {args.filepath}")
sys.exit(1)
except PyxellError as e:
print(str(e))
sys.exit(1)
except subprocess.CalledProcessError as e:
print(e.output.decode())
sys.exit(1)
if exe_filename and not args.dont_run:
if '/' not in exe_filename and '\\' not in exe_filename:
exe_filename = './' + exe_filename
if args.verbose:
print(f"executing {exe_filename}")
t1 = timer()
try:
subprocess.run(exe_filename, timeout=args.time_limit)
except subprocess.TimeoutExpired:
print('execution time limit exceeded')
sys.exit(2)
t2 = timer()
execution_time = t2 - t1
if args.time:
print("---")
print(f"transpilation: {transpilation_time:.3f}s")
if exe_filename:
print(f"compilation: {compilation_time:.3f}s")
if not args.dont_run:
print(f"execution: {execution_time:.3f}s")
| adamsol/Pyxell | src/main.py | main.py | py | 5,964 | python | en | code | 51 | github-code | 13 |
5590670180 | import numpy as np
import re
def universities_to_keep(authors, universities):
while('(' in authors and ')' in authors):
universities.append( authors[authors.find('(')+1 : authors.find(')')] )
authors = authors[: authors.find('(')] + authors[ authors.find(')')+1 : ]
if '(' in authors:
universities.append( authors[authors.find('(')+1 : ])
authors = authors[: authors.find('(')]
return authors, universities
def name_to_keep(author):
if len(author.split(' ')) <= 1:
return author
while( author[0] == ' ' and len(author) > 0):
author = author[1:]
while( author[-1] == ' ' and len(author) > 0):
author = author[:-1]
author = author.replace('.', '. ')
author = author.replace('. ', '. ')
name_to_keep = author.split(' ')[0][0] + '. ' + author.split(' ')[-1]
return name_to_keep
def authors_and_universities(info):
# Transform concatenated names of authors to a list of authors
list_authors = []
list_universities = []
info['authors'] = info['authors'].replace(np.nan, 'missing')
for authors in info['authors']:
if authors != 'missing':
### split the different authors
authors = authors.lower()
### Find the universities included in the name
universities = []
authors, universities = universities_to_keep(authors, universities)
### Split the authors
authors = re.split(',|&', authors)
### For each author, check if university, and store it. Also, keep just the names (To be improved)
authors_in_article = []
for author in authors:
if author != ' ':
authors_in_article.append(name_to_keep(author))
list_universities.append(universities)
list_authors.append(authors_in_article)
else:
list_universities.append(['missing'])
list_authors.append(['missing'])
return list_authors, list_universities | brozi/graphs-and-text | authors_and_universities.py | authors_and_universities.py | py | 2,132 | python | en | code | 1 | github-code | 13 |
2322648441 | import pandas as pd
import numpy as np
import gensim
from gensim import corpora, models
from tqdm import tqdm
from keras.preprocessing.text import Tokenizer
import operator
stopwords = gensim.parsing.preprocessing.STOPWORDS
EMBED_SIZE = 300
MAX_FEATURES = 10000 #the number of unique words
MAXLEN = 220 #max lenght of commented text
def make_dictionary(data, text_column = 'comment_text', no_below = 10, no_above = 0.5, keep_n = 7500):
dictionary = gensim.corpora.Dictionary(data[text_column])
dictionary.filter_extremes(no_below = no_below, no_above = no_above, keep_n = keep_n)
return dictionary
def make_bow(data, dictionary, text_column = 'comment_text'):
bow_corpus = [dictionary.doc2bow(doc) for doc in data[text_column]]
return bow_corpus
def make_tfidf(data , bow_corpus):
tfidf = gensim.models.TfidfModel(bow_corpus)
corpus_tfidf = tfidf[bow_corpus]
return corpus_tfidf
def build_LDA_model(corpus_tfidf, id2word, num_topics = 20, passes = 2, workers = 3):
lda_model = gensim.models.LdaMulticore(corpus_tfidf, num_topics = num_topics, id2word = id2word, passes=passes, workers = workers)
return lda_model
def delete_stopwords(text, stop_words = stopwords):
result = []
for token in gensim.utils.simple_preprocess(text):
if token not in stopwords and len(token) > 2:
result.append(token)
return result
def make_vocabulary(texts):
sentenses = texts.apply(lambda x: x.split()).values
vocab = {}
for sentence in sentenses:
for word in sentence:
try:
vocab[word] += 1
except KeyError:
vocab[word] = 1
return vocab
def load_embedding(file_path):
def get_coef(word , *arr):
return word, np.asarray(arr, dtype='float32')
embedding_index =dict(get_coef(*o.split(" ")) for o in open(file_path , encoding='latin'))
return embedding_index
def embedding_matrix(word_index, embeddings_index):
all_embs = np.stack(embeddings_index.values())
emb_mean, emb_std = all_embs.mean(), all_embs.std()
EMBED_SIZE = all_embs.shape[1]
nb_words = min(MAX_FEATURES, len(word_index))
embedding_matrix = np.random.normal(emb_mean, emb_std, (nb_words, EMBED_SIZE))
for word, i in tqdm(word_index.items()):
if i >= MAX_FEATURES:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
return embedding_matrix
def check_coverage(vocab , embedding_index):
known_words = {}
unknown_words = {}
num_known_words = 0
num_unknown_words = 0
for word in tqdm(vocab.keys()):
try:
known_words[word] = embedding_index[word]
num_known_words += vocab[word]
except KeyError:
unknown_words[word] = vocab[word]
num_unknown_words += vocab[word]
pass
print('Found embedding for {:.2%} of vocabulary'.format(len(known_words) /len(vocab)))
print('Found embedding for {:.2%} of text'.format(num_known_words /(num_known_words + num_unknown_words)))
unknown_words = sorted(unknown_words.items(), key=operator.itemgetter(1))[::-1]
return unknown_words
def add_lower_in_emb(embedding_matrix , vocab):
count = 0
for word in tqdm(vocab.keys()):
if word in embedding_matrix and word.lower() not in embedding_matrix:
embedding_matrix[word.lower()] = embedding_matrix[word]
count += 1
print('{} word added'.format(count))
contraction_mapping = {"ain't": "is not", "aren't": "are not","can't": "cannot", "'cause": "because", "could've": "could have",
"couldn't": "could not", "didn't": "did not", "doesn't": "does not", "don't": "do not", "hadn't": "had not", "hasn't": "has not",
"haven't": "have not", "he'd": "he would","he'll": "he will", "he's": "he is", "how'd": "how did", "how'd'y": "how do you",
"how'll": "how will", "how's": "how is", "I'd": "I would", "I'd've": "I would have", "I'll": "I will", "I'll've": "I will have",
"I'm": "I am", "I've": "I have", "i'd": "i would", "i'd've": "i would have", "i'll": "i will", "i'll've": "i will have",
"i'm": "i am", "i've": "i have", "isn't": "is not", "it'd": "it would", "it'd've": "it would have", "it'll": "it will",
"it'll've": "it will have","it's": "it is", "let's": "let us", "ma'am": "madam", "mayn't": "may not", "might've": "might have",
"mightn't": "might not","mightn't've": "might not have", "must've": "must have", "mustn't": "must not",
"mustn't've": "must not have", "needn't": "need not", "needn't've": "need not have","o'clock": "of the clock",
"oughtn't": "ought not", "oughtn't've": "ought not have", "shan't": "shall not", "sha'n't": "shall not",
"shan't've": "shall not have", "she'd": "she would", "she'd've": "she would have", "she'll": "she will",
"she'll've": "she will have", "she's": "she is", "should've": "should have", "shouldn't": "should not",
"shouldn't've": "should not have", "so've": "so have","so's": "so as", "this's": "this is",
"that'd": "that would", "that'd've": "that would have", "that's": "that is", "there'd": "there would",
"there'd've": "there would have", "there's": "there is", "here's": "here is","they'd": "they would",
"they'd've": "they would have", "they'll": "they will", "they'll've": "they will have", "they're": "they are",
"they've": "they have", "to've": "to have", "wasn't": "was not", "we'd": "we would", "we'd've": "we would have",
"we'll": "we will", "we'll've": "we will have", "we're": "we are", "we've": "we have", "weren't": "were not",
"what'll": "what will", "what'll've": "what will have", "what're": "what are", "what's": "what is",
"what've": "what have", "when's": "when is", "when've": "when have", "where'd": "where did", "where's": "where is",
"where've": "where have", "who'll": "who will", "who'll've": "who will have", "who's": "who is", "who've": "who have",
"why's": "why is", "why've": "why have", "will've": "will have", "won't": "will not","won't've": "will not have",
"would've": "would have", "wouldn't": "would not", "wouldn't've": "would not have", "y'all": "you all",
"y'all'd": "you all would","y'all'd've": "you all would have","y'all're": "you all are","y'all've": "you all have",
"you'd": "you would", "you'd've": "you would have", "you'll": "you will", "you'll've": "you will have", "you're": "you are",
"you've": "you have", 'colour': 'color', 'centre': 'center', 'favourite': 'favorite', 'travelling': 'traveling',
'counselling': 'counseling', 'theatre': 'theater', 'cancelled': 'canceled', 'labour': 'labor', 'organisation': 'organization',
'wwii': 'world war 2', 'citicise': 'criticize', 'youtu ': 'youtube ', 'Qoura': 'Quora', 'sallary': 'salary', 'Whta': 'What',
'narcisist': 'narcissist', 'howdo': 'how do', 'whatare': 'what are', 'howcan': 'how can', 'howmuch': 'how much',
'howmany': 'how many', 'whydo': 'why do', 'doI': 'do I', 'theBest': 'the best', 'howdoes': 'how does',
'mastrubation': 'masturbation', 'mastrubate': 'masturbate', "mastrubating": 'masturbating', 'pennis': 'penis',
'Etherium': 'Ethereum', 'narcissit': 'narcissist', 'bigdata': 'big data', '2k17': '2017', '2k18': '2018',
'qouta': 'quota', 'exboyfriend': 'ex boyfriend', 'airhostess': 'air hostess', "whst": 'what',
'watsapp': 'whatsapp', 'demonitisation': 'demonetization', 'demonitization': 'demonetization',
'demonetisation': 'demonetization'}
def known_contractions(embed):
known=[]
for contr in tqdm(contraction_mapping):
if contr in embed:
known.append(contr)
return known
def clean_contractions(text, mapping = contraction_mapping):
'''
input: current text, contraction mappings
output: modify the comments to use the base form from contraction mapping
'''
specials = ["’", "‘", "´", "`"]
for s in specials:
text = text.replace(s, "'")
text = ' '.join([mapping[t] if t in mapping else t for t in text.split(" ")])
return text
punct_mapping = "/-'?!.,#$%\'()*+-/:;<=>@[\\]^_`{|}~" + '""“”’' + '∞θ÷α•à−β∅³π‘₹´°£€\×™√²—–&'
punct_mapping += '©^®` <→°€™› ♥←×§″′Â█½à…“★”–●â►−¢²¬░¶↑±¿▾═¦║―¥▓—‹─▒:¼⊕▼▪†■’▀¨▄♫☆é¯♦¤▲踾Ã⋅‘∞∙)↓、│(»,♪╩╚³・╦╣╔╗▬❤ïØ¹≤‡√'
def unknown_punct(embed, punct):
'''
input: current text, contraction mappings
output: unknown punctuation
'''
unknown = ''
for p in punct:
if p not in embed:
unknown += p
unknown += ' '
return unknown
puncts = {"‘": "'", "´": "'", "°": "", "€": "e", "—": "-", "–": "-", "’": "'", "_": "-", "`": "'", '“': '"', '”': '"', '“': '"', "£": "e", '∞': 'infinity', 'θ': 'theta', '÷': '/', 'α': 'alpha', '•': '.', 'à': 'a', '−': '-', 'β': 'beta', '∅': '', '³': '3', 'π': 'pi', '…': ' '}
def clean_special_chars(text, punct, mapping):
'''
input: current text, punctuations, punctuation mapping
output: cleaned text
'''
for p in mapping:
text = text.replace(p, mapping[p])
for p in punct:
text = text.replace(p, f' {p} ')
return text
TEST_PATH = 'c:/Data/data/test.csv'
TRAIN_PATH = 'c:/Data/data/train.csv'
test = pd.read_csv(TEST_PATH , index_col='id')
train = pd.read_csv(TRAIN_PATH , index_col='id')
EMB_PATH = 'c:/Data/data/glove.txt'
emb_index = load_embedding(EMB_PATH)
vocab = make_vocabulary(train['comment_text'])
emb_matrix = embedding_matrix(vocab, emb_index)
check_coverage(vocab, emb_index)
add_lower_in_emb(emb_matrix , vocab)
train['comment_text'] = train['comment_text'].apply(lambda x: clean_contractions(x, contraction_mapping))
test['comment_text'] = test['comment_text'].apply(lambda x: clean_contractions(x, contraction_mapping))
train['comment_text'] = train['comment_text'].apply(lambda x: clean_special_chars(x, punct_mapping, puncts))
test['comment_text'] = test['comment_text'].apply(lambda x: clean_special_chars(x, punct_mapping, puncts))
df = pd.concat([train ,test], sort=False)
vocab = make_vocabulary(df['comment_text'])
print("Check coverage after punctuation replacement")
oov_glove = check_coverage(vocab, emb_index)
tokenizer = Tokenizer(num_words=MAX_FEATURES)
tokenizer.fit_on_texts(list(train))
train = tokenizer.texts_to_sequences(train)
test = tokenizer.texts_to_sequences(test) | Dzz1th/Kaggle-Jigsaw_toxic_comment | Model/text_preprocessing.py | text_preprocessing.py | py | 10,624 | python | en | code | 0 | github-code | 13 |
9759661768 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 26 17:00:52 2022
@author: nicholassimon
"""
# Import relevant libraries
import spacy
import pandas as pd
from spacytextblob.spacytextblob import SpacyTextBlob
import snscrape.modules.twitter as sntwitter
import statistics
import os
# NLP variables
nlp = spacy.load('en_core_web_sm')
nlp.add_pipe('spacytextblob')
# Import data
absolute_path = '/Users/nicholassimon/Documents/GitHub/' # Change to your path
data_path = f'{absolute_path}final-project-nba_prediction_modeling/Data/NBA_players_data.csv'
nba_df = pd.read_csv(data_path)
# Create list of twitter handles
# https://fansided.com/2018/10/11/nba-twitter-beat-writers/
handles = ['ByJayKing','APOOCH', 'StevePopper','PompeyOnSixers', 'JLew1050', 'KCJHoop',
'CavsJoeG', 'detnewsRodBeard','ScottAgness', 'Matt_Velazquez', 'CVivlamoreAJC',
'rick_bonnell', 'IraHeatBeat', 'JoshuaBRobbins', 'CandaceDBuckner', 'chrisadempsey',
'JerryZgoda', 'royceyoung', 'mikegrich', 'Andyblarsen', 'AnthonyVslater',
'DanWoikeSports', 'Mike_Bresnahan', 'GeraldBourguet', 'mr_jasonjones', 'MFollowill',
'Jonathan_Feigen', 'MyMikeCheck', 'Jim_Eichenhofer', 'JeffGSpursZone']
# Create function that scrapes Twitter and calculates sentiment scores
def scrape_sentiment(nba_df, handles):
# Create a list of players
player_list = list(set(nba_df['player']))
# Create a list of years and a dictionary in which the years serve as keys
year_range=[*range(2009,2023)]
year_dict = {year: [] for year in year_range}
# Create a dicionary in which years serve as keys and handles serve as values
# https://stackoverflow.com/questions/20585920/how-to-add-multiple-values-to-a-dictionary-key
handles_dict = {}
for key, val in year_dict.items():
for handle in handles:
handles_dict.setdefault(key, []).append(handle)
# Scrape Twitter and store tweets in a df
# https://www.youtube.com/watch?v=jtIMnmbnOFo
# https://www.youtube.com/watch?v=uPKnSq6TaAk
# https://stackoverflow.com/questions/53509168/extract-year-month-and-day-from-datetime64ns-utc-python
tweets = []
for year, handles in handles_dict.items():
for handle in handles:
query = f'(from:{handle}) until:{year}-10-15 since:{year}-09-01'
for tweet in sntwitter.TwitterSearchScraper(query).get_items():
tweets.append([tweet.date, tweet.username, tweet.content])
tweets_df = pd.DataFrame(tweets, columns=['Date', 'Handle', 'Tweet'])
datetimes = pd.to_datetime(tweets_df['Date'])
tweets_df['Season'] = datetimes.dt.year
# Determine sentiment scores for all tweets in the df
# https://www.edureka.co/community/43215/how-to-find-the-index-of-a-particular-value-in-a-dataframe
# https://stackoverflow.com/questions/1966207/convert-numpy-array-to-python-list
score_list = []
tweets = tweets_df['Tweet']
for player in player_list:
for tweet in tweets:
if player in tweet:
doc = nlp(tweet)
pol_score = round(doc._.blob.polarity, 4)
index_no = tweets_df[tweets_df['Tweet']==tweet].index.values
index_no = index_no.astype(int)[0]
date_list = list(tweets_df['Season'])
season = date_list[index_no]
score_list.append([player, pol_score, season])
# Create a df in which average sentiment scores for each player during a given year are stored
score_list
headers = ['player', 'sentiment_score', 'season']
sentiment_df = pd.DataFrame(score_list, columns=headers)
sentiment_df = sentiment_df.groupby(['player', 'season']).mean().reset_index()
# Merge sentiment_df with nba_df
nba_df = nba_df.merge(sentiment_df, on=['player', 'season'], how='left')
nba_df['sentiment_score'].fillna(0, inplace = True)
return nba_df
# Update nba_df using the scrape_sentiment function
nba_df = scrape_sentiment(nba_df, handles)
# Save the updated version of nba_df as a csv
nba_df.to_csv(data_path)
| cgwhall/NBA-Projections | 2_Twitter_NLP.py | 2_Twitter_NLP.py | py | 4,175 | python | en | code | 0 | github-code | 13 |
43263479952 | from collections import Counter
n = int(input())
a = Counter(map(int, input().split()))
ans = 0
for i in range(max(a) + 1):
cnt = a[i - 1] + a[i] + a[i + 1]
ans = max(cnt, ans)
print(ans)
| Shirohi-git/AtCoder | arc081-/arc082_a.py | arc082_a.py | py | 198 | python | en | code | 2 | github-code | 13 |
9982054720 | from app.bid import FingerGuessCard
def test_FingerGuessCard():
for i, v in enumerate(FingerGuessCard.points):
c1 = FingerGuessCard()
c1.set_point(v)
c2 = FingerGuessCard()
c2.set_point(v)
r = FingerGuessCard.compare(c1.point, c2.point)
assert r == 0
c3 = FingerGuessCard()
c3.set_point(v)
c4 = FingerGuessCard()
c4.set_point(c4.points[(i+1) % 3])
r = FingerGuessCard.compare(c3.point, c4.point)
assert r == c4.point
if __name__ == '__main__':
test_FingerGuessCard()
| abrance/LimitedGuessing | test/app/bid.py | bid.py | py | 576 | python | en | code | 0 | github-code | 13 |
33691440442 | #!/usr/bin/python
import time
import pprint
import json
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import os
from datetime import datetime, timedelta
from pip import main
print(os.getcwd())
print(os.path.dirname(__file__))
data_path=os.path.dirname(__file__)
file_name="Plum_Brook_Feb7_1400hr_to_End_Main\\Plum_Brook_Feb7_1400hr_to_End.json"
name_to_read=os.path.join(data_path,file_name)
pp = pprint.PrettyPrinter(indent=4)
with open(name_to_read, 'r') as f:
df = pd.json_normalize(json.load(f))
print("Read in {} rows and with {} variables".format(df.shape[0], df.shape[1]))
print(" => First Timestamp: {}".format(df.iloc[0].server_timestamp))
print(" => Last Timestamp : {}".format(df.iloc[-1].server_timestamp))
names_payload=df.columns.values
print(type(df[names_payload[0]].values))
#np.savetxt('variable_names.txt',names_payload,fmt='%s')
# filter out zeros or whatever you like. Do this before extracting times to keep
# the x/y arrays the same size.
df = df[(df['payload.fAbsolutePressure'] != 0)]
# the time that the data were sent to the server is in server_timestamp
times = pd.to_datetime(df['server_timestamp'])
print(type(df['server_timestamp'].values[0]))
print(type(times[0]))
pressure = df['payload.fAbsolutePressure']
sfc_temp = df['payload.fSFCStatus.fCPU_Temp']
dini_temp = df['payload.fDiniTempFPGA']
richwest_power = df['payload.fPowerStat.fRICH_West.fPower']
dctboxtemp=df['payload.dctBoxTemp']
Magnet_probes=df['payload.fMagnetHSK.tempProbeAll']
#print(type(Magnet_probes.values))
#print(type(Magnet_probes.values[0]))
#print(Magnet_probes.values[0])
start_power=df.loc[df['payload.fPowerStat.fRICH_West.fPower']>100].index[0]
#start_power=times[df.iloc[df['payload.fPowerStat.fRICH_West.fPower']>100][0]]
mainhsk_temps = df['payload.main_temps'].values
DCT_temps = df['payload.dctThermistor'].values
heliumLVL=df['payload.fMagnetHSK.heliumLevels']
# to get just the first list element for each timestamp of the series...
#mainhsk_temps_array=np.empty([len(mainhsk_temps),len(mainhsk_temps[0])])
#temp_array=np.empty([len(mainhsk_temps[0]),])
temp_list=[]
for i in mainhsk_temps: temp_list.append(i)
mainhsk_temps_array=np.asarray(temp_list)
# now for DCT temps
temp_list=[]
for i in DCT_temps: temp_list.append(i)
DCT_temps_array=np.asarray(temp_list)
#and helium levels
temp_list=[]
for i in heliumLVL: temp_list.append(i)
helium_levels_array=np.asarray(temp_list)
time_elapsed=pd.to_timedelta(times.values-times.values[0],unit='hours',errors="raise")
time_deltas=time_elapsed/timedelta(hours=1) # can also do minutes
mainhsk_names=pd.read_csv(os.path.join(data_path,'mainhsk_temp_sensors.txt'))
# legend/order of loading csvs i guess
NASA_names=pd.read_csv(os.path.join(data_path,"ATF_Data\\ATF_Data\\keith_final_legend_order.csv"))
#print(NASA_names.ID.values[2])
# NASA TCs loaded as a list first
NASA_TCs=[]
for name in NASA_names.ID:
NASA_TCs.append(pd.read_csv(os.path.join(data_path,"ATF_Data\\ATF_Data\\"+name+"_csv.csv"),skiprows=1))
# convert NASA times to timestamp proper
#adjust to UTC like the other times
time_change = timedelta(hours=5)
for dfN in NASA_TCs:
dfN['Times']=pd.to_datetime(dfN['Timestamp'])
dfN['Times'] = dfN['Times'] + time_change
# Now make a plot
fig = plt.figure(figsize=(14, 10), dpi=200)
axs=fig.add_subplot(111)
#gs = fig.add_gridspec(1, 1)
#axs = gs.subplots(sharex=True, sharey=False)
#axs = gs.subplots()
#axs[0].scatter(times, pressure, marker='.')
#axs[0].set_ylabel("Pressure (Torr)")
#axs[0].set_ylim([1, 759])
# need timestamps to use here for cold wall begin filling and ending and then hot and cold cases respectively.
# datetime(year, month, day, hour, minute, second, microsecond)
#b = datetime(2017, 11, 28, 23, 55, 59, 342380)
cold_wall_fill_start = datetime(2022, 2, 7,21,0,0,0)
cold_wall_fill_end = datetime(2022, 2, 8,0,22,0,0)
power_on_DAQ = times[start_power]
cold_case_start=datetime(2022, 2, 8,1,6,0,0)
cold_case_end=datetime(2022, 2, 8,7,14,0,0)
hot_case_start=cold_case_end
hot_case_end=datetime(2022, 2, 8,17,30,0,0)
kickflip_start=hot_case_end
discharge_magnet = datetime(2022,2,8,20,35,0,0)
discharge_magnet_ps_off = datetime(2022,2,8,22,36,0,0)
drain_cold_wall_begin=datetime(2022,2,9,00,00,0,0)
#kickflip_end=datetime(2022, 2, 9,00,00,0,0) # not sure this actually stopped until during warm up
cold_wall_at_7ft=datetime(2022,2,9,4,00,0,0)
cold_wall_at_neg_6_deg=datetime(2022,2,9,7,37,0,0)
kickflip_end=datetime(2022,2,9,7,46,0,0)
slight_warmup_start=datetime(2022,2,9,10,52,0,0)
drain_cold_wall_end=datetime(2022,2,9,12,45,0,0)
slight_warmup_end=datetime(2022,2,9,12,45,0,0)
evacuation_start = datetime(2022,2,7,17,45,0,0)
evacuation_end = datetime(2022,2,9,17,00,0,0)
DAQ_Run = datetime(2022,2,8,5,41,0,0)
DAQ_Run_2 = datetime(2022,2,9,0,10,0,0)
heater_start=datetime(2022,2,8,13,18,0,0)
heater_max=datetime(2022,2,8,18,52,0,0)
# now in hours
# do conversions...
#Vertical lines
#axs[0].axvline(x=power_on_DAQ,ymin=0, ymax=1, color='red',label="power on DAQ")
#axs[0].text(power_on_DAQ, 10, "Power on DAQ", color='red',rotation=90, fontsize=8)
#axs[0].axvline(x=discharge_magnet,ymin=0, ymax=1, color='Brown',label="discharge magnet")
#axs[0].text(discharge_magnet, 10, "Discharge magnet", color='Brown', rotation=90, fontsize=8)
#axs[1].axvline(x=power_on_DAQ,ymin=0, ymax=1, ls=':', color='red')
#axs[1].axvline(x=discharge_magnet,ymin=0, ymax=1, ls=':',color='Brown')
#axs[1].axvline(x=cold_wall_fill_start,ymin=0, ymax=1, color='black',label="cold wall fill start")
#axs[1].axvline(x=cold_wall_fill_end,ymin=0, ymax=1, color='black',label="cold wall fill start")
# hatches for timespans
axs.axvspan(cold_wall_fill_start, cold_wall_fill_end, alpha=0.1, color='royalblue',label="cold wall fill")
axs.axvspan(cold_case_start, cold_case_end, alpha=0.1, color='cyan', label="cold case")
axs.axvspan(hot_case_start,hot_case_end , alpha=0.1, color='firebrick', label="hot case")
axs.axvspan(kickflip_start,kickflip_end , alpha=0.3, hatch="XXX", color='darkorange', label="flipped hot case")
axs.axvspan(drain_cold_wall_begin,drain_cold_wall_end , alpha=0.1, color='royalblue', label="draining cold wall")
axs.axvspan(slight_warmup_start,slight_warmup_end , alpha=0.3, color='red', label="slight warm up")
#size for markers visibility
s0=3
#temp data goes here
#across foam
#axs.scatter(NASA_TCs[1]['Times'], NASA_TCs[1]['Value'], marker='2',s=s0,label="NASA TC - "+NASA_names.ID.values[1]) # In South is on foam inside gondola
#axs.scatter(NASA_TCs[8]['Times'], NASA_TCs[8]['Value'], marker='2',s=s0,label="NASA TC - "+NASA_names.ID.values[8]) # SoLo is on foam outside gondola
#axs.scatter(NASA_TCs[0]['Times'], NASA_TCs[0]['Value'], marker='2',s=s0,label="NASA TC - "+NASA_names.ID.values[0]) # InEast is on foam inside foam
#axs.scatter(NASA_TCs[6]['Times'], NASA_TCs[6]['Value'], marker='2',s=s0,label="NASA TC - "+NASA_names.ID.values[6]) # EastLo is on foam inside gondola
#axs.set_ylim([-80, 30])
#axs.set_xlim([times.values[0],times.values[-1]])
#NASA TCs
iter=0
while iter<len(NASA_TCs):
#print(len(NASA_TCs[iter]['Times'].values))
#print(len(NASA_TCs[iter]['Value'].values))
#print(NASA_names[iter]['Times'].values))
axs.scatter(NASA_TCs[iter]['Times'], NASA_TCs[iter]['Value'], marker='.',s=s0,label=NASA_names.ID.values[iter]) #
iter+=1
axs.set_ylim([-79, 49])
axs.set_xlim([times.values[0],times.values[-1]])
# south here : seq=1
#axs.scatter(times, mainhsk_temps_array[:,7], marker='.',s=s0,label=mainhsk_names.Location.values[7]) # TOF top South
#axs.scatter(times, mainhsk_temps_array[:,5], marker='.',s=s0,label=mainhsk_names.Location.values[5]) # TOF btm south
#axs.scatter(times, mainhsk_temps_array[:,3], marker='.',s=s0,label=mainhsk_names.Location.values[3]) # gondola btm south
#axs.scatter(times, mainhsk_temps_array[:,8], marker='.',s=s0,label=mainhsk_names.Location.values[8]) # gondola mid South
#axs.scatter(NASA_TCs[13]['Times'], NASA_TCs[13]['Value'], marker='2',s=s0,label="NASA TC - "+NASA_names.ID.values[13]) # SoFr is on the gondola I believe
#axs.scatter(NASA_TCs[1]['Times'], NASA_TCs[1]['Value'], marker='2',s=s0,label="NASA TC - "+NASA_names.ID.values[1]) # In South is on foam inside gondola
#axs.scatter(NASA_TCs[7]['Times'], NASA_TCs[7]['Value'], marker='2',s=s0,label="NASA TC - "+NASA_names.ID.values[7]) # SoUp is on foam outside gondola
#axs.scatter(NASA_TCs[8]['Times'], NASA_TCs[8]['Value'], marker='2',s=s0,label="NASA TC - "+NASA_names.ID.values[8]) # SoUp is on foam outside gondola
#axs.set_ylim([-50, 33])
#axs.set_xlim([times.values[0],times.values[-1]])
#RICH east or west side
#axs.scatter(times, mainhsk_temps_array[:,13], marker='.',s=s0,label=mainhsk_names.Location.values[13]) # Mid east RICH heatsink
#axs.scatter(times, mainhsk_temps_array[:,17], marker='.',s=s0,label=mainhsk_names.Location.values[17]) # RICH cover E
#axs.scatter(NASA_TCs[0]['Times'], NASA_TCs[0]['Value'], marker='2',s=s0,label="NASA TC - "+NASA_names.ID.values[0]) # InEast is on foam inside foam
#axs.scatter(NASA_TCs[5]['Times'], NASA_TCs[5]['Value'], marker='2',s=s0,label="NASA TC - "+NASA_names.ID.values[5]) # EastUp is on foam inside gondola
#axs.scatter(NASA_TCs[6]['Times'], NASA_TCs[6]['Value'], marker='2',s=s0,label="NASA TC - "+NASA_names.ID.values[6]) # EastUp is on foam inside gondola
#axs.scatter(times, mainhsk_temps_array[:,14], marker='.',s=s0,label=mainhsk_names.Location.values[14]) # Mid West RICH heatsink
#axs.scatter(times, mainhsk_temps_array[:,19], marker='.',s=s0,label=mainhsk_names.Location.values[19]) # RICH cover E
#axs.scatter(NASA_TCs[9]['Times'], NASA_TCs[9]['Value'], marker='2',s=s0,label="NASA TC - "+NASA_names.ID.values[9]) # WestUp is on foam inside gondola
#axs.scatter(NASA_TCs[10]['Times'], NASA_TCs[10]['Value'], marker='2',s=s0,label="NASA TC - "+NASA_names.ID.values[10]) # WestLo is on foam inside gondola
#axs.set_ylim([-60, 60])
#axs.set_xlim([times.values[0],times.values[-1]])
#North side across foam
#correct/calibrate the North top TOF sensor
#begin_pumping=datetime(2022,2,7,14,55,0,0)
#times_calibrate=pd.to_datetime(times.values)
#times_range=np.asarray(begin_pumping-times_calibrate).astype('timedelta64[s]')
#times_range = times_range / np.timedelta64(1, 's')
#times_to_consider=np.where(times_range>0)
#TOF_diffs=mainhsk_temps_array[times_to_consider,21]-mainhsk_temps_array[times_to_consider,7]
#average_offset=np.mean(TOF_diffs[0])
#median_offset=np.median(TOF_diffs[0])
#axs.scatter(times, mainhsk_temps_array[:,20], marker='.',s=s0,label=mainhsk_names.Location.values[20]) # Gondola btm north
#axs.scatter(times, mainhsk_temps_array[:,16], marker='.',s=s0,label=mainhsk_names.Location.values[16]) # TOF btm N
#axs.scatter(times, mainhsk_temps_array[:,21]-average_offset, marker='.',s=s0,label=mainhsk_names.Location.values[21]) # TOF top N
#axs.scatter(NASA_TCs[3]['Times'], NASA_TCs[3]['Value'], marker='2',s=s0,label="NASA TC - "+NASA_names.ID.values[3]) # NoUp is on foam inside gondola
#axs.scatter(NASA_TCs[4]['Times'], NASA_TCs[4]['Value'], marker='2',s=s0,label="NASA TC - "+NASA_names.ID.values[4]) # NoLo is on foam inside gondola
#axs.set_ylim([-80, 30])
#axs.set_xlim([times.values[0],times.values[-1]])
#misc 1 interesting areas
#axs[1].scatter(times, mainhsk_temps_array[:,2], marker='.',s=s0,label=mainhsk_names.Location.values[2]) # DCT HV box
#axs[1].scatter(times, mainhsk_temps_array[:,6], marker='.',s=s0,label=mainhsk_names.Location.values[6]) # SFC backplate
#axs[1].scatter(times, mainhsk_temps_array[:,15], marker='.',s=s0,label=mainhsk_names.Location.values[15]) # Gas panel
#axs[1].scatter(times, mainhsk_temps_array[:,3], marker='.',s=s0,label=mainhsk_names.Location.values[3]) # gondola btm South
#axs[1].scatter(times, dctboxtemp, marker='.',s=s0,label="DCT box internal temp") # dctbox temp
#axs[1].set_ylim([-50, 38])
#RICH
#axs.axvline(x=DAQ_Run,ymin=0, ymax=1, color='red',label="DAQ Run")
#axs.axvline(x=DAQ_Run_2,ymin=0, ymax=1, color='black',label="DAQ Run 2 end")
#axs.scatter(times, mainhsk_temps_array[:,23], marker='.',s=s0,label=mainhsk_names.Location.values[23]) # rich focal plane NW
#axs.scatter(times, mainhsk_temps_array[:,0], marker='.',s=s0,label=mainhsk_names.Location.values[0]) # rich focal plane SW
#axs.scatter(times, mainhsk_temps_array[:,18], marker='.',s=s0,label=mainhsk_names.Location.values[18]) # rich cover N
#axs.scatter(times, mainhsk_temps_array[:,9], marker='.',s=s0,label=mainhsk_names.Location.values[9]) # rich cover S
#axs.scatter(times, mainhsk_temps_array[:,19], marker='.',s=s0,label=mainhsk_names.Location.values[19]) # rich cover W
#axs.scatter(times, mainhsk_temps_array[:,17], marker='.',s=s0,label=mainhsk_names.Location.values[17]) # rich cover E
#axs.set_ylim([-20, 39])
#TOF Fees only
#axs.scatter(times, mainhsk_temps_array[:,12], marker='.',s=s0,label=mainhsk_names.Location.values[12])
#axs.scatter(times, mainhsk_temps_array[:,22], marker='.',s=s0,label=mainhsk_names.Location.values[22])
#axs.scatter(times, mainhsk_temps_array[:,24], marker='.',s=s0,label=mainhsk_names.Location.values[24])
#axs.scatter(times, mainhsk_temps_array[:,25], marker='.',s=s0,label=mainhsk_names.Location.values[25])
#Gondola Bottom
#axs.scatter(times, mainhsk_temps_array[:,3], marker='.',s=s0,label=mainhsk_names.Location.values[3])
#axs.scatter(times, mainhsk_temps_array[:,4], marker='.',s=s0,label=mainhsk_names.Location.values[4])
#axs.scatter(times, mainhsk_temps_array[:,20], marker='.',s=s0,label=mainhsk_names.Location.values[20])
#axs.scatter(times, mainhsk_temps_array[:,2], marker='.',s=s0,label=mainhsk_names.Location.values[2])
#bore paddle stuff
#axs.scatter(times, mainhsk_temps_array[:,10], marker='.',s=s0,label=mainhsk_names.Location.values[10])
#axs.scatter(times, mainhsk_temps_array[:,11], marker='.',s=s0,label=mainhsk_names.Location.values[11])
#DCT
#axs.axvline(x=heater_start,ymin=0, ymax=1,ls='-', color='red',label="heaters start")
#axs.axvline(x=heater_max,ymin=0, ymax=1,ls=':', color='black',label="heaters highest")
#axs.scatter(times, mainhsk_temps_array[:,15], marker='.',s=s0,label=mainhsk_names.Location.values[15]) #gas panel
#axs.scatter(times, mainhsk_temps_array[:,1], marker='.',s=s0,label=mainhsk_names.Location.values[1]) # DCTV top
#dct box temp
#axs.scatter(times,df['payload.dctBoxTemp'], marker='2',s=s0,label="DCT HSK box uC") # In South is on foam inside gondola
#axs.set_ylim([-30, 50])
# for DCT thermistors
#iter=0
#while iter<len(DCT_temps[0]): #,label=mainhsk_names.Location.values[1]
# axs[1].scatter(times, DCT_temps_array[:,iter], marker='.',s=s0) # DCTV top
# iter+=1
#
#axs[1].set_ylim([-20, 39])
axs.set_ylabel("Temps (C)")
plt.xticks(rotation=45)
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%b %d - %H:%M'))
plt.gcf().autofmt_xdate()
#axs[0].grid()
axs.grid()
#plt.legend(loc='upper center', fontsize=8)
handles, labels = axs.get_legend_handles_labels()
#lgd = axs[1].legend(handles, labels)
#for legend_handle in lgd.legendHandles:
# legend_handle.set_sizes([20])
#labels[6]._legmarker.set_markersize(6)
lgd=fig.legend(handles, labels, loc='upper center', ncol=5, fontsize=8)
# as many of these as axs[1].scatter above
lgd.legendHandles[-14].set_sizes([60])
lgd.legendHandles[-13].set_sizes([60])
lgd.legendHandles[-12].set_sizes([60])
lgd.legendHandles[-11].set_sizes([60])
lgd.legendHandles[-10].set_sizes([60])
lgd.legendHandles[-9].set_sizes([60])
lgd.legendHandles[-8].set_sizes([60])
lgd.legendHandles[-7].set_sizes([60])
lgd.legendHandles[-6].set_sizes([60])
lgd.legendHandles[-5].set_sizes([60])
lgd.legendHandles[-4].set_sizes([60])
lgd.legendHandles[-3].set_sizes([60])
lgd.legendHandles[-2].set_sizes([60])
lgd.legendHandles[-1].set_sizes([60])#
#plt.savefig("plot_timeline_south.pdf", bbox_inches='tight')
#plt.savefig("plot_timeline_south.png")
plt.show()
| Payton814/Helix_Temp_Masking | Helix_Temp_Stuff/plot_temps_timeline_overall.py | plot_temps_timeline_overall.py | py | 15,866 | python | en | code | 0 | github-code | 13 |
32315416575 | import json
from pathlib import Path
import zmq
import zmq.auth
from zmq.auth.thread import ThreadAuthenticator
def Decode(topicfilter, message):
"""
Function decodes the message received from the publisher into a
topic and python object via json serialization
"""
dat = message[len(topicfilter) :]
retval = json.loads(dat)
return retval
if __name__ == "__main__":
ctx = zmq.Context.instance()
file_path = Path(__file__).resolve()
public_keys_dir = file_path.parent / "authentication" / "public_keys"
secret_keys_dir = file_path.parent / "authentication" / "private_keys"
# file_path = Path().cwd()
# public_keys_dir = file_path / "authentication" / "public_keys"
# secret_keys_dir = file_path / "authentication" / "private_keys"
server_public_file = public_keys_dir / "server.key"
server_public, _ = zmq.auth.load_certificate(str(server_public_file))
client_secret_file = secret_keys_dir / "client.key_secret"
client_public, client_secret = zmq.auth.load_certificate(str(client_secret_file))
client = ctx.socket(zmq.REQ)
client.curve_secretkey = client_secret
client.curve_publickey = client_public
client.curve_serverkey = server_public
client.connect("tcp://127.0.0.1:12346")
client.send_json(["DummyDataFreq", "ReadValue()"])
if client.poll(1000):
msg = client.recv_json()
print(msg)
else:
print("Error")
| js216/CeNTREX | test.py | test.py | py | 1,450 | python | en | code | 1 | github-code | 13 |
24259802794 |
# Factorial of a number
def main():
n=int(raw_input("Enter a non-negative integer: " ))
def factorial(n):
if n<0:
return "Wrong value, Enter a integer" # checking input
else:
if n==0: #base case
return 1
else:
return n*factorial(n-1) #recursive call
print ("Factorial of", n, "is", factorial(n))
#Return the factorial for the specified number
n=int(raw_input("Press enter to quit: "))
if __name__ == '__main__':
main()
| AdonisPeguero/Computer-Science-Work | project 3 part 1 python.py | project 3 part 1 python.py | py | 485 | python | en | code | 0 | github-code | 13 |
18074126172 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
vc = []
def smallest(self, root, s):
if (root == None):
return
s = chr(97+root.val)+s
if (root.left==None and root.right==None):
self.vc.append(s)
return
self.smallest(root.left, s)
self.smallest(root.right, s)
def smallestFromLeaf(self, root: Optional[TreeNode]) -> str:
self.smallest(root,"")
ans = sorted(self.vc)
ans = ans[0]
self.vc.clear()
return ans
| akshitagit/Python | Data_Structures/Smallest String Starting From Leaf.py | Smallest String Starting From Leaf.py | py | 770 | python | en | code | 116 | github-code | 13 |
29591579062 | import matplotlib.pyplot as plt
import numpy as np
#generating the mandelbrot set with python. Used as a reference for the cairo implementation
def get_iter(c:complex, thresh:int =4, max_steps:int =25) -> int:
# Z_(n) = (Z_(n-1))^2 + c
# Z_(0) = c
z=c
i=1
while i<max_steps and (z*z.conjugate()).real<thresh:
z=z*z +c
i+=1
return i
def get_iter_recursive(c:complex, max_steps:int=25, thresh:int=4, z:complex=0, steps:int=0) -> int:
if steps==max_steps or (z*z.conjugate()).real>thresh:
return steps
z=z*z+c
return get_iter_recursive(c=c, max_steps=max_steps, thresh=thresh, z=z, steps=steps+1)
def plotter(n, thresh, max_steps=25):
mx = 2.48 / (n-1)
my = 2.26 / (n-1)
mapper = lambda x,y: (mx*x - 2, my*y - 1.13)
img=np.full((n,n), 255)
for x in range(n):
for y in range(n):
it = get_iter(complex(*mapper(x,y)), thresh=thresh, max_steps=max_steps)
img[y][x] = 255 - it
return img
def plotter_recursive(n, thresh, max_steps=25):
mx = 2.48/n
my = 2.26/n
mapper = lambda x,y: (mx*x - 2, my*y - 1.13)
img=np.full((n,n), 255)
for x in range(n):
for y in range(n):
it = get_iter_recursive(c=complex(*mapper(x,y)), max_steps=32)
img[y][x] = 255 - it
return img
n=100
img = plotter_recursive(n, thresh=4, max_steps=25)
plt.imshow(img, cmap="plasma")
plt.axis("off")
plt.show()
| Orland0x/StarknetFractals | scripts/mandelbrotWithPython.py | mandelbrotWithPython.py | py | 1,461 | python | en | code | 14 | github-code | 13 |
28104547167 | last_login = {}
user_total_time = {}
with open("logs.txt") as f:
for line in f:
login, action, time = line.split(";")
time = int(time)
if action == "LOGIN" :
last_login[login] = time
elif action == "LOGOUT":
user_total_time[login] = user_total_time.get(login, 0) + time - last_login[login]
print("czas przebywania w systemie: ")
for user, time in sorted(user_total_time.items(), key=lambda x: x[1], reverse = True):
print(f" - {user:>8}, {time}")
| Damianpon/damianpondel96-gmail.com | Zjazd 4/zad2_.py | zad2_.py | py | 512 | python | en | code | 0 | github-code | 13 |
5915673374 | from SerialData import SerialData
class Hyperparameters(SerialData):
def __init__(self, debug_mode: bool = False):
super().__init__()
self.parameters = Hyperparameters._default_parameters(debug_mode)
def serialize(self) -> dict:
return self.parameters
def deserialize(self, obj: dict) -> None:
self.parameters = Hyperparameters._default_parameters(False)
for k, v in obj.items():
if k in self.parameters:
self.parameters[k] = v
def __eq__(self, other):
if isinstance(other, Hyperparameters):
match = True
for k, v in self.parameters.items():
if k in other.parameters:
if other.parameters[k] != self.parameters[k]:
match = False
return match
else:
return False
@staticmethod
def _default_parameters(debug_mode: bool):
if not debug_mode:
return {
'NORMAL_CELL_N': 5,
'CELL_LAYERS': 3,
'TARGET_FILTER_DIMS': 32,
'REDUCTION_EXPANSION_FACTOR' : 1,
'REDUCTION_EXPANSION_BEFORE': False,
'REDUCE_CURRENT': False,
'TRAIN_EPOCHS': 1,
'TRAIN_ITERATIONS': 16,
'MAXIMUM_LEARNING_RATE': 0.002,
'MINIMUM_LEARNING_RATE': 0.001,
'USE_SGDR': True,
'BATCH_SIZE': 16,
'SGDR_EPOCHS_PER_RESTART': 16,
'SGDR_LR_DECAY': .8,
'SGDR_PERIOD_DECAY': 2,
'DROP_PATH_CHANCE': .6,
'DROP_PATH_TOTAL_STEPS_MULTI':2, #multiplies the supposed end of training by this factor, causing droppath to die off at a slower rate
'IDENTITY_THRESHOLD': 0., # .33
}
else:
return {
'NORMAL_CELL_N': 1,
'CELL_LAYERS': 2,
'INITIAL_LAYER_DIMS': 1,
'TARGET_FILTER_SIZE': 32,
'TRAIN_EPOCHS': 1,
'TRAIN_ITERATIONS': 2,
'LEARNING_RATE': 0.001,
'USE_SGDR': True,
'SGDR_EPOCHS_PER_RESTART': 3,
'SGDR_LR_DECAY': .95,
'SGDR_PERIOD_DECAY': 1.05,
'DROP_PATH_CHANCE': .6,
'DROP_PATH_TOTAL_STEPS_MULTI': 1,
'IDENTITY_THRESHOLD': .33,
} | dkoleber/nas | src/Hyperparameters.py | Hyperparameters.py | py | 2,482 | python | en | code | 0 | github-code | 13 |
73680771537 | #迭代
class Solution:
def invertTree(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
if root == None:
return
root.left,root.right = root.right,root.left
self.invertTree(root.left)
self.invertTree(root.right)
return root
#栈
class Solution:
def invertTree(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
stack = []
stack.append(root)
while stack:
node = stack.pop(-1)
if node:
node.left,node.right = node.right,node.left
stack.append(node.left)
stack.append(node.right)
return root
| ericzhai918/Python | JZ-Offer/invert_binary_tree.py | invert_binary_tree.py | py | 735 | python | en | code | 0 | github-code | 13 |
74564830098 | #!/usr/bin/env python
"""
Unittests for IteratorTools functions
"""
from __future__ import division, print_function
import unittest
from WMCore.ReqMgr.DataStructs.RequestError import InvalidSpecParameterValue
from WMCore.ReqMgr.Utils.Validation import (validateOutputDatasets,
validate_request_priority)
class ValidationTests(unittest.TestCase):
"""
unittest for ReqMgr Utils Validation functions
"""
def testValidateOutputDatasets(self):
"""
Test the validateOutputDatasets function
"""
dbsUrl = 'https://cmsweb-prod.cern.ch/dbs/prod/global/DBSReader/'
outputDsets = ['/PD1/AcqEra1-ProcStr1-v1/GEN']
self.assertIsNone(validateOutputDatasets(outputDsets, dbsUrl))
outputDsets.append('/PD1/AcqEra1-ProcStr1-v1/GEN-SIM')
self.assertIsNone(validateOutputDatasets(outputDsets, dbsUrl))
outputDsets.append('/PD1/AcqEra1-ProcStr1-v1/GEN-SIM-RAW')
self.assertIsNone(validateOutputDatasets(outputDsets, dbsUrl))
outputDsets.append('/PD1/AcqEra1-ProcStr1-v1/GEN')
with self.assertRaises(InvalidSpecParameterValue):
validateOutputDatasets(outputDsets, dbsUrl)
outputDsets.remove('/PD1/AcqEra1-ProcStr1-v1/GEN')
outputDsets.append('/PD1//AOD')
with self.assertRaises(InvalidSpecParameterValue):
validateOutputDatasets(outputDsets, dbsUrl)
outputDsets.remove('/PD1//AOD')
outputDsets.append('/PD1/None/AOD')
with self.assertRaises(InvalidSpecParameterValue):
validateOutputDatasets(outputDsets, dbsUrl)
outputDsets.remove('/PD1/None/AOD')
outputDsets.append('/PD1/AcqEra1-ProcStr1-v1/ALAN')
with self.assertRaises(InvalidSpecParameterValue):
validateOutputDatasets(outputDsets, dbsUrl)
def testRequestPriorityValidation(self):
"""
Test the `validate_request_priority` function, which validates the
RequestPriority parameter
:return: nothing, raises an exception if there are problems
"""
# test valid cases, integer in the range of [0, 1e6]
for goodPrio in [0, 100, int(1e6 - 1)]:
reqArgs = {'RequestPriority': goodPrio}
print(reqArgs)
validate_request_priority(reqArgs)
# test invalid ranges
for badPrio in [-10, 1e6, 1e7]:
reqArgs = {'RequestPriority': badPrio}
with self.assertRaises(InvalidSpecParameterValue):
validate_request_priority(reqArgs)
# test invalid data types
for badPrio in ["1234", 1234.35, 1e6, [123]]:
reqArgs = {'RequestPriority': badPrio}
with self.assertRaises(InvalidSpecParameterValue):
validate_request_priority(reqArgs)
if __name__ == '__main__':
unittest.main()
| dmwm/WMCore | test/python/WMCore_t/ReqMgr_t/Utils_t/Validation_t.py | Validation_t.py | py | 2,882 | python | en | code | 44 | github-code | 13 |
72943545938 | # country = input().split(", ")
# capitals = input().split(", ")
# dict_capitals = dict(zip(country, capitals))
#
# for key,value in dict_capitals.items():
# print(f"{key} -> {value}")
country = input().split(", ")
capital = input().split(", ")
country_capital = {country[i]: capital[i] for i in range(len(country))}
for key, value in country_capital.items():
print(f"{key} -> {value}")
| Andon-ov/Python-Fundamentals | 20_dictionaries_exercise/capitals.py | capitals.py | py | 398 | python | en | code | 0 | github-code | 13 |
40503144425 | """
ciphertext 中有一堆 ZERO 與 ONE 先處理成 0 和 1
每 8 個為一組,轉成 ascii
"""
import base64
import morse_talk as mtalk
s = input().split()
ans = ''
for x in s:
if x == "ONE":
ans += '1'
elif x == "ZERO":
ans += '0'
else:
print("another thing : '", x, "'.")
s = ""
for i in range(0, len(ans), 8):
s += chr(int(ans[i: i+8], 2))
print(s) #Li0gLi0uLiAuIC0uLi0gLS4tLiAtIC4uLS4gLSAuLi4uIC4tLS0tIC4uLi4uIC0tLSAuLS0tLSAuLi4gLS0tIC4uLi4uIC4uLSAuLS0uIC4uLi0tIC4tLiAtLS0gLi4uLi4gLiAtLi0uIC4tLiAuLi4tLSAtIC0tLSAtIC0uLi0gLQ==
print(len(s))
s = base64.b64decode(s)
s = s.decode()
print(s) #.- .-.. . -..- -.-. - ..-. - .... .---- ..... --- .---- ... --- ..... ..- .--. ...-- .-. --- ..... . -.-. .-. ...-- - --- - -..- -
s = mtalk.decode(s)
print(s) #ALEXCTFTH15O1SO5UP3RO5ECR3TOTXT
# convert O to _, and add {}
ans = "ALEXCTF{TH15_1S_5UP3R_5ECR3T_TXT}"
print(ans)
| forward0606/CTF | encode/alexctf-2017: CR1: Ultracoded/decode.py | decode.py | py | 966 | python | en | code | 2 | github-code | 13 |
22391178157 | #!/usr/bin/env python3
from PIL import Image
import argparse
import pathlib
def image_to_pam(image_path, pam_path):
im = Image.open(image_path) # Can be many different formats.
pix = im.load()
width, height = im.size
channels = len(im.mode)
assert channels == 3 or channels == 4
bytes = []
for y in range(height):
for x in range(width):
for c in range(channels):
bytes.append(pix[x, y][c])
with open(pam_path, 'w', newline='\n') as f:
f.write("P7\n")
f.write(f"WIDTH {width}\n")
f.write(f"HEIGHT {height}\n")
f.write(f"DEPTH {channels}\n")
f.write("MAXVAL 255\n")
if channels == 3:
f.write("TUPLTYPE RGB\n")
elif channels == 4:
f.write("TUPLTYPE RGB_ALPHA\n")
f.write("ENDHDR\n")
with open(pam_path, 'ab') as f:
f.write(bytearray(bytes))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Converts any image file to PAM file type')
parser.add_argument('input', nargs=1, help='The file to read')
parser.add_argument('-o', '--output', nargs=1, help='The file to write', required=False)
parser.add_argument('-r', '--reverse', action='store_true', help='Converts a .pam to .png instead', required=False)
args = vars(parser.parse_args())
if not args["reverse"]:
input_file = args["input"][0]
output = input_file[:-len(pathlib.Path(input_file).suffix)] + ".pam"
if args["output"] is not None:
output = args["output"][0]
image_to_pam(args["input"][0], output)
# pix[x,y] = value # Set the RGBA Value of the image (tuple)
# im.save('alive_parrot.png') # Save the modified pixels as .png
| IgniparousTempest/libretro-superflappybirds | engine/png_to_pam.py | png_to_pam.py | py | 1,808 | python | en | code | 7 | github-code | 13 |
30241219543 | '=======================================Функции===================================='
#функции - именованный блок кода который принимает аргументы и возвращает результат
# my_sum - lambda num1, num2: num1 + num2
# res - my_sum(5,10)
# print(res)#15
#lambda - ключевое слово для создания анонимной функции
def my_sum2(num1,num2):
return num1 + num2
print(my_sum2) #<function my_sum2 at 0x7f2294367d90>
res = my_sum2(13, 45)
print(res) #58
def calc(num1, num2, oper):
"""
oper - строка, с операцией для вычеслеий
"+" - сложение
"-" - вычитание
"""
if oper == '+':
return num1 + num2
if oper == '-':
return num1 - num2
if oper == '*':
return num1 * num2
if oper == '/':
return num1 / num2
print(calc(10, 12, '+')) #22
print(calc(10, 12, '-')) #12
print(calc(10, 12, '*')) #20
print(calc(10, 12, '/')) #3.0
print(calc(11, 23, '5')) #none
def my_len(obj):
"возвращает длину обьекта"
count = 0
for i in obj:
count += 1
#count = count + 1
return count
print(my_len([15,23,14,64,12])) #5
print(my_len('asvdsvf')) # 7
print(my_len({'a':1, 'b':2}))# 2
def super_len(obj):
try:
return my_len(obj)
except TypeError:
#если не можем будем итерировать его в виде строки
return my_len(str(obj))
print(super_len([1,2,3,4])) # 4
print(super_len(123456789)) # 9 (123456789)
print(super_len(None)) #4 ('None) - 4 буквы
'======================================DRY======================================'
#DRY - don't repeat yourself (не повторяйся)
# представим у нас енет функции len
"===========================аргументы и параметры============================="
# парметры - локальные переменные, знвсения еоторые мы задаем при вызове функции
# #аргументы - значения которые мы задаем параметрам при вызове функции
def func(var):
local_var = 5
print(locals())
#{'var': 6, 'local_var': 5}
func(6)
#print(local_var) NameError: name
#print(var) NameError
"==================================виды параметров================================="
#1.обязательные
#2.необязательные
#2.1 с дефолтным значением (по умолчаню)
#2.2 args (arguments)
#2.3kwargs (key word arguments)
def func(a, b='deafault', *args, **kwargs):
# args - tuple, куда попадут все позиционные
# kwargs - dict, куда попадут все именованные аргументы,
#которые не попали по имени
print(a,b, args, kwargs)
func('hello') #hello deafault
func('hello', '100')#hello 100
func('hello',100, 84, 23, 'world')#hello 100 (84, 23, 'world')
func('hello', 100, 10, 20, 30, key1='value1', key2=500)
# 'hello' 100 (10, 20, 30) {'key1': 'value1', 'key2': 500}
"======================================виды агументов======================================"
#позиционные (по порядку параметров)
#именнованные ( по имени параметров)
def func2(a, b):
print(f"a={a}, b={b}")
func2(10, 20)# позиционно
#a=10, b=20
func2(b=23, a=20) #именованно передает нам по порядку
#a=20, b=23
"+======================звездочки=========================="
list1 = [1,2,3]
list2 = [*list1]# * - распаковывает
print(list2)#грубо говоря копирует в новую ячейку
[1, 2, 3]
dict1 = {'a':1, 'b':2}
list3 = [*dict1]
# list3 = ['a', 'b']
dict2 = {**dict1}
#dict2 = {'a':1, 'b':2 | Bekaaaaaaaa/python27---lections- | functions/functions.py | functions.py | py | 4,007 | python | ru | code | 0 | github-code | 13 |
15851360535 | import numpy as np
import scipy
import read_data as rd
import wordle as w
import wordle_game as wg
import console_game as cg
import wordle_gui as gui
import random
class GameMode:
CONSOLE = 1
SUGGESTED_GUESS_TESTING = 2
GUI = 3
def main():
#game_mode = GameMode.CONSOLE
#game_mode = GameMode.SUGGESTED_GUESS_TESTING
game_mode = GameMode.GUI
word_length = 5
#words = rd.read_word_file('wlist_match10.txt', word_length)
words = rd.read_word_file('nyt_word_list.txt', word_length)
g = w.LetterResultCode.GRAY
y = w.LetterResultCode.YELLOW
gr = w.LetterResultCode.GREEN
if game_mode == GameMode.CONSOLE:
print(f'Starting game in console mode.')
guess_type = w.SuggestedGuessType.EXPECTED_VALUE_GREEN_AND_YELLOW_50
wordle = w.Wordle(words, guess_type)
game = cg.ConsoleGame()
game.play_game(wordle)
elif game_mode == GameMode.GUI:
#guess_type = w.SuggestedGuessType.EXPECTED_VALUE_GREEN_AND_YELLOW_50
guess_type = w.SuggestedGuessType.ENTROPY
wordle = w.Wordle(words, guess_type)
game = gui.WordleGUI(wordle)
game.show_form()
elif game_mode == GameMode.SUGGESTED_GUESS_TESTING:
NUM_REPETITIONS = 1000
print(f'Starting suggested guess testing mode with {NUM_REPETITIONS} trials on each suggested guess type.')
guess_types = [{"guess_type_name":"random", "guess_type":w.SuggestedGuessType.RANDOM},
{"guess_type_name":"EV green", "guess_type":w.SuggestedGuessType.EXPECTED_VALUE_GREEN},
{"guess_type_name": "EV green75 yellow25", "guess_type": w.SuggestedGuessType.EXPECTED_VALUE_GREEN_AND_YELLOW_25},
{"guess_type_name": "EV green50 yellow50", "guess_type": w.SuggestedGuessType.EXPECTED_VALUE_GREEN_AND_YELLOW_50},
{"guess_type_name": "EV green25 yellow75", "guess_type": w.SuggestedGuessType.EXPECTED_VALUE_GREEN_AND_YELLOW_75},
{"guess_type_name":"EV yellow", "guess_type":w.SuggestedGuessType.EXPECTED_VALUE_YELLOW}]
wordle = w.Wordle(words, guess_types[0]) # initialize
num_guess_types = len(guess_types)
num_turns_this_repetition = np.zeros((NUM_REPETITIONS, num_guess_types), np.int)
failures = [[] for x in range(num_guess_types)]
for i in range(NUM_REPETITIONS):
random_index = random.randint(0, len(words) - 1)
random_word = words[random_index]
game = wg.WordleGame(random_word) # test each suggested guess method against the same words
for j in range(num_guess_types):
gt = guess_types[j]
wordle.reset(gt["guess_type"])
success = False
while not success:
num_turns_this_repetition[i, j] += 1
suggested_guesses, guess_scores = wordle.get_suggested_guesses(1)
if len(suggested_guesses) == 0:
raise Exception(f'Error, there appear to be no words in the dictionary meeting these criteria.')
guess = suggested_guesses[0]
success, result = game.get_result(guess)
wordle.record_guess(guess, result)
if num_turns_this_repetition[i, j] > w.Wordle.NUM_TURNS_ALLOWED:
failures[j].append(random_word)
averages = np.mean(num_turns_this_repetition, axis=0)
sorted_indices = np.argsort(averages)
for j in range(len(sorted_indices)):
index = sorted_indices[j]
num_turns = num_turns_this_repetition[:,index]
gt = guess_types[index]
game_result = {}
game_result["guess type"] = gt["guess_type_name"]
avg = np.mean(num_turns)
std_error = np.std(num_turns, ddof=1) / np.sqrt(NUM_REPETITIONS)
game_result["turns needed to guess word"] = f'{avg} +/- {std_error}'
num_failures = len(num_turns[num_turns > w.Wordle.NUM_TURNS_ALLOWED])
game_result["num failures"] = f'{num_failures} / {NUM_REPETITIONS} ({100 * num_failures / NUM_REPETITIONS}%)'
game_result["min turns needed"] = np.min(num_turns)
game_result["max turns needed"] = np.max(num_turns)
game_result["first 10 failures"] = failures[index][:10]
print(game_result)
# show differences
for j in range(1, len(sorted_indices)):
index = sorted_indices[j]
prev_index = sorted_indices[j-1]
num_turns = num_turns_this_repetition[:, index]
num_turns_prev = num_turns_this_repetition[:, prev_index]
diff = num_turns - num_turns_prev
avg = np.mean(diff)
std_error = np.std(diff, ddof=1) / np.sqrt(NUM_REPETITIONS)
print(f'{guess_types[index]["guess_type_name"]} - {guess_types[prev_index]["guess_type_name"]}: {avg} +/- {std_error}')
else:
raise Exception(f'game mode {game_mode} was not recognized.')
if __name__ == '__main__':
main()
| joewestersund/wordle | main.py | main.py | py | 5,112 | python | en | code | 0 | github-code | 13 |
17521403997 | import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
import textwrap
sns.set(style="white",
font="Arial",
context="paper")
# Create box whisker function
def PlotBoxWhiskerByGroup(dataframe,
outcome_variable,
group_variable_1,
group_variable_2=None,
fill_color=None,
color_palette='Set2',
# Text formatting arguments
title_for_plot=None,
subtitle_for_plot=None,
caption_for_plot=None,
data_source_for_plot=None,
show_y_axis=False,
title_y_indent=1.1,
subtitle_y_indent=1.05,
caption_y_indent=-0.15,
x_indent=-0.128,
# Plot formatting arguments
figure_size=(8, 6)):
"""
Function to create a box and whisker plot for a given outcome variable, grouped by one or two categorical variables.
Parameters:
dataframe (pandas.DataFrame): The input dataframe.
outcome_variable (str): The name of the outcome variable.
group_variable_1 (str): The name of the first group variable.
group_variable_2 (str, optional): The name of the second group variable. Defaults to None.
fill_color (str, optional): The color to fill the box plot with. Defaults to None.
color_palette (str, optional): The color palette to use for the box plot. Defaults to 'Set2'.
title_for_plot (str, optional): The title for the plot. Defaults to None.
subtitle_for_plot (str, optional): The subtitle for the plot. Defaults to None.
caption_for_plot (str, optional): The caption for the plot. Defaults to None.
data_source_for_plot (str, optional): The data source for the plot. Defaults to None.
show_y_axis (bool, optional): Whether to show the y-axis. Defaults to False.
title_y_indent (float, optional): The y-indent for the title. Defaults to 1.1.
subtitle_y_indent (float, optional): The y-indent for the subtitle. Defaults to 1.05.
caption_y_indent (float, optional): The y-indent for the caption. Defaults to -0.15.
x_indent (float, optional): The x-indent for the plot. Defaults to -0.128.
figure_size (tuple, optional): The size of the plot. Defaults to (8, 6).
Returns:
None
"""
# If no plot title is specified, generate one
if title_for_plot == None:
if group_variable_2 == None:
title_for_plot = outcome_variable + ' by ' + group_variable_1
else:
title_for_plot = outcome_variable
# If no plot subtitle is specified, generate one
if subtitle_for_plot == None and group_variable_2 != None:
subtitle_for_plot = ' by ' + group_variable_1 + ' and ' + group_variable_2
# Create figure and axes
fig, ax = plt.subplots(figsize=figure_size)
# Generate box whisker plot
if group_variable_2 != None:
if fill_color != None:
ax = sns.boxplot(
data=dataframe,
x=group_variable_1,
y=outcome_variable,
hue=group_variable_2,
color=fill_color
)
else:
ax = sns.boxplot(
data=dataframe,
x=group_variable_1,
y=outcome_variable,
hue=group_variable_2,
palette=color_palette
)
else:
if fill_color != None:
ax = sns.boxplot(
data=dataframe,
x=group_variable_1,
y=outcome_variable,
color=fill_color
)
else:
ax = sns.boxplot(
data=dataframe,
x=group_variable_1,
y=outcome_variable,
palette=color_palette
)
# Remove top, and right spines. Set bottom and left spine to dark gray.
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_color("#262626")
ax.spines['left'].set_color("#262626")
# Add space between the title and the plot
plt.subplots_adjust(top=0.85)
# Set the title with Arial font, size 14, and color #262626 at the top of the plot
ax.text(
x=x_indent,
y=title_y_indent,
s=title_for_plot,
fontname="Arial",
fontsize=14,
color="#262626",
transform=ax.transAxes
)
# Set the subtitle with Arial font, size 11, and color #666666
ax.text(
x=x_indent,
y=subtitle_y_indent,
s=subtitle_for_plot,
fontname="Arial",
fontsize=11,
color="#666666",
transform=ax.transAxes
)
# String wrap group variable 1 tick labels
group_variable_1_tick_labels = ax.get_xticklabels()
group_variable_1_tick_labels = [label.get_text() for label in group_variable_1_tick_labels]
for label in group_variable_1_tick_labels:
label = textwrap.fill(label, 30, break_long_words=False)
ax.set_xticklabels(group_variable_1_tick_labels)
# Set x-axis tick label font to Arial, size 9, and color #666666
ax.tick_params(
axis='x',
which='major',
labelsize=9,
labelcolor="#666666",
pad=2,
bottom=True,
labelbottom=True
)
plt.xticks(fontname='Arial')
# Set y-axis tick label font to Arial, size 9, and color #666666
ax.tick_params(
axis='y',
which='major',
labelsize=9,
labelcolor="#666666",
pad=2,
bottom=True,
labelbottom=True
)
plt.yticks(fontname='Arial')
# Add a word-wrapped caption if one is provided
if caption_for_plot != None or data_source_for_plot != None:
# Create starting point for caption
wrapped_caption = ""
# Add the caption to the plot, if one is provided
if caption_for_plot != None:
# Word wrap the caption without splitting words
wrapped_caption = textwrap.fill(caption_for_plot, 110, break_long_words=False)
# Add the data source to the caption, if one is provided
if data_source_for_plot != None:
wrapped_caption = wrapped_caption + "\n\nSource: " + data_source_for_plot
# Add the caption to the plot
ax.text(
x=x_indent,
y=caption_y_indent,
s=wrapped_caption,
fontname="Arial",
fontsize=8,
color="#666666",
transform=ax.transAxes
)
# Show plot
plt.show()
# Clear plot
plt.clf()
# # Test function
# from sklearn import datasets
# iris = pd.DataFrame(datasets.load_iris(as_frame=True).data)
# iris['species'] = datasets.load_iris(as_frame=True).target
# PlotBoxWhiskerByGroup(
# dataframe=iris,
# outcome_variable='sepal length (cm)',
# group_variable_1='species',
# title_for_plot='Sepal Length by Species',
# subtitle_for_plot='A test visulaization using the iris dataset',
# )
| KyleProtho/AnalysisToolBox | Python/Visualizations/PlotBoxWhiskerByGroup.py | PlotBoxWhiskerByGroup.py | py | 7,324 | python | en | code | 0 | github-code | 13 |
34015666232 | # -*- coding: cp1251 -*-
import sys
import json
import time
import math
import datetime
import requests
import psycopg2
rows_count = 20000
"""
Необходимое количество записей
"""
if 1 < len(sys.argv):
rows_count = sys.argv[1]
print(rows_count)
"""
Функция для подключения к юазе данных
"""
def sql_connect():
return psycopg2.connect(
host="192.168.77.66", port="5432",
dbname="postgres",
user="hackaton", password="p2eEK)J34YMfsJa"
)
"""
Выполняет запрос к API
kwards: Параметры url
return: список объявлений
"""
def get_data(kwards={}):
req = "https://ads-api.ru/main/api?user=valerazbanovqs@gmail.com&token=18d79b6cf2715733470f43c0c18d2575&category_id=2&source=1&city=Москва&is_actual=1"
for key in kwards:
val = kwards[key]
if not val is None:
req += "&{0}={1}".format(key, val)
obj = json.loads(requests.get(req).content)
if obj["code"] != 200:
print(req, obj["code"])
return []
print(req, obj["code"], len(obj["data"]))
return obj["data"]
"""
Получает или создаёт значение атрибута в заданной таблице
conn: подключение к базе данных
table_name: название таблицы
value: искомое значение
return: ключ, соответствующий значению
"""
def get_param_id(conn, table_name, value):
if value is None:
raise Exception("Значение должно быть определено")
cursor = conn.cursor()
sql = "SELECT id from {0} WHERE Название = '{1}'".format(table_name, value)
cursor.execute(sql)
rows = cursor.fetchall()
if len(rows) <= 0:
sql = "INSERT INTO {0}(Название) VALUES (%s) RETURNING Id;".format(table_name);
cursor.execute(sql, [value])
conn.commit()
print("Add", value, "to", table_name)
return cursor.fetchone()[0]
return rows[0][0]
"""
Получает атрибут объявления
encoder: конвертирует значение параметра в нужный вид
path: название таблицы
obj: анализируемый объект
default: значение по умолчанию
return: атрибут объявления
"""
def get_param(encoder, path, obj, default):
for key in path.split('/'):
if key in obj:
obj = obj[key]
else:
return default;
return encoder(obj)
"""
Преобразует значение сегмента
"""
class encode_segment():
def __init__(self, year):
self.year = year
def __call__(self, input):
if input != "Вторичка":
return input
if self.year is None:
return input
year = self.year
cur_year = datetime.datetime.now().year
if cur_year - 4 < year:
return "Новостройка"
if 1989 <= year:
return "Современное жилье"
if 1930 <= year and year <= 1956:
return "Сталинка"
if 1956 <= year and year <= 1985:
return "Xрущевка"
if year < 1989:
return "Старый жилой фонд"
return input
"""
Преобразует значение материала стен
"""
def encode_wall_material(input):
if input == "Монолитный":
return "монолит"
if input == "Панельный":
return "панель"
if input == "Блочный":
return "блок"
if input == "Кирпичный":
return "кирпич"
if input == "Деревянный":
return "дерево"
return None
"""
Преобразует значение площади
"""
def encode_area(input):
return float(input.split(' ')[0])
"""
Преобразует значение наличия балкона
"""
def encode_balcony(input):
return 1
"""
Преобразует значение состояния
"""
class encode_condition():
def __init__(self, отделка):
self.отделка = отделка
def __call__(self, input):
if self.отделка == "Без отделки":
input = "Муниципальный"
if input == "Без ремонта":
return "Без отделки"
else:
input += " ремонт"
return input
"""
Преобразует значение строки (одного объявления)
"""
def read_row(row):
if row["param_1943"] != "Продам":
raise Exception("Отсутствует информация о типе объявления")
id = get_param(int, "id", row, 0)
coordx = get_param(float, "coords/lat", row, None)
coordy = get_param(float, "coords/lng", row, None)
Местоположение = get_param(str, "address", row, None)
КоличествоКомнат = get_param(str, "params/Количество комнат", row, None)
year = get_param(int, "params2/О доме/Год постройки", row, 0)
Сегмент = get_param(encode_segment(year), "param_1957", row, None)
ЭтажностьДома = get_param(int, "params/Этажей в доме", row, None)
МатериалСтен = get_param(encode_wall_material, "params2/О доме/Тип дома", row, None)
ЭтажРасположения = get_param(int, "params/Этаж", row, None)
ПлощадьКвартиры = get_param(encode_area, "params/Площадь", row, None)
ПлощадьКухни = get_param(encode_area, "params2/О квартире/Площадь кухни", row, ПлощадьКвартиры * 0.15)
НаличиеБалконаЛоджии = get_param(encode_balcony, "params2/О квартире/Балкон или лоджия", row, 0)
Метро = get_param(str, "metro", row, None)
МетроКМ = get_param(float, "km_do_metro", row, None)
МетроМин = math.ceil(МетроКМ / 5 * 60)
Отделка = get_param(str, "params2/О квартире/Отделка", row, None)
Состояние = get_param(encode_condition(Отделка), "params2/О квартире/Ремонт", row, None)
Стоимость = get_param(float, "price", row, None)
return {
"id" : id,
"coordx" : coordx,
"coordy" : coordy,
"Местоположение" : Местоположение,
"КоличествоКомнат" : КоличествоКомнат,
"Сегмент" : Сегмент,
"ЭтажностьДома" : ЭтажностьДома,
"МатериалСтен" : МатериалСтен,
"ЭтажРасположения" : ЭтажРасположения,
"ПлощадьКвартиры" : ПлощадьКвартиры,
"ПлощадьКухни" : ПлощадьКухни,
"НаличиеБалконаЛоджии" : НаличиеБалконаЛоджии,
"Метро" : Метро,
"МетроКМ" : МетроКМ,
"МетроМин" : МетроМин,
"Состояние" : Состояние,
"Стоимость" : Стоимость,
}
"""
Преобразует значение наличия балкона
"""
def replace_id(conn, obj):
obj["КоличествоКомнат"] = get_param_id(conn, "ТипКоличестваКомнат", obj["КоличествоКомнат"])
obj["Сегмент"] = get_param_id(conn, "ТипСегмента", obj["Сегмент"])
obj["МатериалСтен"] = get_param_id(conn, "ТипМатериалаСтен", obj["МатериалСтен"])
obj["Состояние"] = get_param_id(conn, "ТипСостояния", obj["Состояние"])
return obj
if __name__ == "__main__":
# Производим подключение к базе данных
conn = sql_connect()
cursor = conn.cursor()
# Временной интервал
step = 30
date1 = datetime.datetime.now() - datetime.timedelta(hours=7) - datetime.timedelta(minutes=step)
date2 = datetime.datetime.now() - datetime.timedelta(hours=7)
count = 0
# Запрос для добавления нового объекта недвижимости
sql = "INSERT INTO Недвижимость VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) ON CONFLICT (Id) DO NOTHING;"
while count < rows_count:
# Получаем блок данных
batch = get_data({"date1" : date1, "date2" : date2})
time.sleep(5)
date1 -= datetime.timedelta(minutes=step)
date2 -= datetime.timedelta(minutes=step)
# Перебираем объекты
for row in batch:
try:
# Получаем атрибуты
obj = read_row(row)
# Кодируем нужные атрибуты
obj = replace_id(conn, obj)
# Добавляем объект в базу данных
val = list(obj.values())
cursor.execute(sql, val)
count += 1
# Фиксируем каждые 100 записей
if count % 100 == 0:
conn.commit()
except Exception as exc:
print(exc)
cursor.close()
conn.close()
| misterobot404/estate-price-calculator | worker.py | worker.py | py | 7,839 | python | ru | code | 1 | github-code | 13 |
36164666696 | # -*- coding: utf-8 -*-
from selenium import webdriver
from time import sleep
from bs4 import BeautifulSoup
from selenium.webdriver.common.by import By
from bs4 import BeautifulSoup
import re
from fake_useragent import UserAgent
import requests
def customer_review_flipkart(main_url):
main_url = main_url+'&page='
url = main_url
user_agent = UserAgent()
f = open("final_review.txt", "w")
count = 0
for i in range(1,2):
session = requests.Session()
session.headers['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.131 Safari/537.36'
url=main_url
url = url + str(i)
print(url)
page = session.get(url)
soup = BeautifulSoup(page.content,'lxml')
review = soup.find('div',class_='_1HmYoV _35HD7C col-9-12').find_all('div',class_="qwjRop")
review = [ tags.div.div.string for tags in review]
for reviews in review:
if( reviews ):
f.write(reviews.encode("utf-8"))
f.write("\n")
count+=1
sleep(2)
''' head_review = soup.find('div',class_='_1HmYoV _35HD7C col-9-12').find_all('p',class_='_2xg6Ul')
for tags in head_review:
print(tags.string)
print("")
print("")'''
''' rating = soup.find('div',class_='_1HmYoV _35HD7C col-9-12').find_all('div',class_='hGSR34 E_uFuv')
for ratings in rating:
print(ratings.contents[0])'''
f.close()
print(count)
driver = webdriver.Firefox(executable_path='/home/posi2/Downloads/geckodriver')
driver.get('https://www.flipkart.com')
# close the login window by checking 2nd button
driver.find_element_by_xpath("(//button)[2]").click()
driver.implicitly_wait(10) # implicitly wait for the 10 second for loading of the website.
search_bar = driver.find_elements_by_name('q')
driver.implicitly_wait(5) # implicitly wait for the 5 second for better view.
search_bar[0].send_keys('washing machine')
search_bar[0].submit()
driver.implicitly_wait(10) # implicitly wait for the 10 second for loading of the website.
element = driver.find_element_by_class_name("_31qSD5")
new_url_component = element.get_attribute("href")
driver.get(new_url_component)
element = driver.find_element(By.CSS_SELECTOR,".col._39LH-M").find_elements(By.TAG_NAME,"a")
final_url_component = element[-1].get_attribute("href")
driver.get(final_url_component)
sleep(10)
driver.close()
customer_review_flipkart(final_url_component)
| posi2/web-scrapping | customer_review_flipkart_selenium.py | customer_review_flipkart_selenium.py | py | 2,525 | python | en | code | 0 | github-code | 13 |
38682245956 | from data import get_mnist
import numpy as np
import matplotlib.pyplot as plt
"""
w = weights, b = bias, i = input, h = hidden, o = output, l = label
e.g. w_i_h = weights from input layer to hidden layer
"""
images, labels = get_mnist()#unosimo slike i lables
#images-shape(60000,784) lables- shape(60000.10)
#weights
w_i_h = np.random.uniform(-0.5, 0.5, (20, 784)) #daje random vrednosti izmedju 0-1, (20,784)- 20-hidden layer 784-input layer
w_h_o = np.random.uniform(-0.5, 0.5, (10, 20))#20-hidden layer 10-output layer
#bioses
b_i_h = np.zeros((20, 1))#inicijalizujemo da budu 0 na pocetku
b_h_o = np.zeros((10, 1))
learn_rate = 0.01
nr_correct = 0
epochs = 3# 3 puta prolazimo kroz sve podatke
#training neural network
for epoch in range(epochs):
for img, l in zip(images, labels):#uzimamo jedan po jedan element
img.shape += (1,)#pretvaramo u matricu
l.shape += (1,)#pretvaramo u matricu
# Forward propagation input -> hidden
h_pre = b_i_h + w_i_h @ img # @-matrix multiplication
h = 1 / (1 + np.exp(-h_pre))#activaciona funkcija koristimo je zato sto mozda h_pre moze da bude mnogo velika npr 9.0 pa je vracamo na odgovarajucu vrendost
# Forward propagation hidden -> output
o_pre = b_h_o + w_h_o @ h
o = 1 / (1 + np.exp(-o_pre))
#posle forward propagation functin trebamo da poredimno rezultate sa lables
# Cost / Error calculation
e = 1 / len(o) * np.sum((o - l) ** 2, axis=0)#racuna gresku
nr_correct += int(np.argmax(o) == np.argmax(l))#proverava da li je mreza klasifikovala output tacno i ako je tacno povecavamo brojac za 1
# Backpropagation output -> hidden (cost function derivative)
delta_o = o - l# razlika izmedju output and label
w_h_o += -learn_rate * delta_o @ np.transpose(h)
b_h_o += -learn_rate * delta_o
# Backpropagation hidden -> input (activation function derivative)
delta_h = np.transpose(w_h_o) @ delta_o * (h * (1 - h))
w_i_h += -learn_rate * delta_h @ np.transpose(img)
b_i_h += -learn_rate * delta_h
# Show accuracy for this epoch
print(f"Acc: {round((nr_correct / images.shape[0]) * 100, 2)}%")
nr_correct = 0
# Show results
while True:
index = int(input("Enter a number (0 - 59999): "))
img = images[index] # uzimamo sliku koju smo odredili iznad
plt.imshow(img.reshape(28, 28), cmap="Greys") # ekstraktujemo sliku i dodajemo je ka plot obj
# radimo forward propagation step da dobijemo ouypuy values
img.shape += (1,)
# Forward propagation input -> hidden
h_pre = b_i_h + w_i_h @ img.reshape(784, 1)
h = 1 / (1 + np.exp(-h_pre))
# Forward propagation hidden -> output
o_pre = b_h_o + w_h_o @ h
o = 1 / (1 + np.exp(-o_pre))
plt.title(f"Subscribe if its a {o.argmax()} :)") # postavljamo title of the plot to the number of the strongest activated neuron
plt.show() # pokazujemo plot
| N1ko1a/MNIST-Neural-Network | nn.py | nn.py | py | 2,950 | python | en | code | 2 | github-code | 13 |
13061050435 | from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
from numpy import get_include
ext_modules = [
Extension("staticgraph.graph_edgelist",
["staticgraph/graph_edgelist.pyx"],
include_dirs=[get_include()]),
Extension("staticgraph.digraph_edgelist",
["staticgraph/digraph_edgelist.pyx"],
include_dirs=[get_include()]),
Extension("staticgraph.wgraph_edgelist",
["staticgraph/wgraph_edgelist.pyx"],
include_dirs=[get_include()]),
Extension("staticgraph.wdigraph_edgelist",
["staticgraph/wdigraph_edgelist.pyx"],
include_dirs=[get_include()]),
Extension("staticgraph.links",
["staticgraph/links.pyx"],
include_dirs=[get_include()]),
Extension("staticgraph.components",
["staticgraph/components.pyx"],
include_dirs=[get_include()]),
]
packages = ["staticgraph"]
setup(
name = "StaticGraph",
version = "0.1",
packages = packages,
ext_modules = ext_modules,
cmdclass = {'build_ext': build_ext}
)
| parantapa/staticgraph | setup.py | setup.py | py | 1,181 | python | en | code | 1 | github-code | 13 |
20798536650 | # 3. Write a function that receives as parameters two lists a and b and returns: (a intersected with b, a reunited with b, a - b, b - a)
list_C = []
def intersection_of_lists(list_A,list_B) :
global list_C
list_C = [value for value in list_A if value in list_B]
print(list_C)
def reunion_of_lists(list_A, list_B) :
global list_C
list_C = list_A + list_B
print(list_C)
def remove_listB_from_listA(list_A, list_B):
global list_C
list_C = list(set(list_B).difference(set(list_A)))
print(list_C)
def remove_listA_from_listB(list_A, list_B):
global list_C
list_C = list(set(list_A).difference(set(list_B)))
print(list_C)
list_A = [int(x) for x in input("Enter elements of first list here: ").split(" ")]
list_B = [int(x) for x in input("Enter elements of second list here: ").split(" ")]
print("Intersection of lists : ")
intersection_of_lists(list_A, list_B)
print()
print("Reunion of lists : ")
reunion_of_lists(list_A, list_B)
print()
print("List A - List B : ")
remove_listA_from_listB(list_A, list_B)
print()
print("List B - List A : ")
remove_listB_from_listA(list_A, list_B) | Tiberius2/PythonProgramming | Lab2/Lab2PyEx3.py | Lab2PyEx3.py | py | 1,170 | python | en | code | 0 | github-code | 13 |
17044832834 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenMiniVersionGrayOnlineModel(object):
def __init__(self):
self._app_version = None
self._bundle_id = None
self._gray_strategy = None
@property
def app_version(self):
return self._app_version
@app_version.setter
def app_version(self, value):
self._app_version = value
@property
def bundle_id(self):
return self._bundle_id
@bundle_id.setter
def bundle_id(self, value):
self._bundle_id = value
@property
def gray_strategy(self):
return self._gray_strategy
@gray_strategy.setter
def gray_strategy(self, value):
self._gray_strategy = value
def to_alipay_dict(self):
params = dict()
if self.app_version:
if hasattr(self.app_version, 'to_alipay_dict'):
params['app_version'] = self.app_version.to_alipay_dict()
else:
params['app_version'] = self.app_version
if self.bundle_id:
if hasattr(self.bundle_id, 'to_alipay_dict'):
params['bundle_id'] = self.bundle_id.to_alipay_dict()
else:
params['bundle_id'] = self.bundle_id
if self.gray_strategy:
if hasattr(self.gray_strategy, 'to_alipay_dict'):
params['gray_strategy'] = self.gray_strategy.to_alipay_dict()
else:
params['gray_strategy'] = self.gray_strategy
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenMiniVersionGrayOnlineModel()
if 'app_version' in d:
o.app_version = d['app_version']
if 'bundle_id' in d:
o.bundle_id = d['bundle_id']
if 'gray_strategy' in d:
o.gray_strategy = d['gray_strategy']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlipayOpenMiniVersionGrayOnlineModel.py | AlipayOpenMiniVersionGrayOnlineModel.py | py | 1,972 | python | en | code | 241 | github-code | 13 |
28596486300 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 6 23:33:03 2021
@author: Dartoon
"""
import numpy as np
import astropy.io.fits as pyfits
import matplotlib.pyplot as plt
import pandas as pd
import glob
s_sample = pd.read_csv('../Shenli_data/five_band_color.csv', index_col = 0)
folder = 'NTT_candidates/'
# folder = 'extra_interesting/'
f = open(folder+"cut_out.txt","r")
string = f.read()
cut_out = string.split('\n') # Split in to \n
files = glob.glob(folder+'*I.fits')
files.sort()
import math
mismatch_overall = []
ID_issue = []
for band in ['I', 'G', 'R']: #, 'Z', 'Y']:
mismatch = []
for i in range(len(files)):
ID = cut_out[i].split(' ')[0]
#Load Shenli's fitting
try:
l = np.where(s_sample['ID'] == ID)[0][0]
except:
# print(ID, "exist in source_list.asc but not in five_band_color.csv")
continue
s_AGN_0_pos = np.array([s_sample['qso_RA'][l], s_sample['qso_DEC'][l]])
s_AGN_1_pos = np.array([s_sample['com_RA'][l], s_sample['com_DEC'][l]])
s_AGN_mag = np.array( [s_sample['qso_{0}'.format(band.lower())][l], s_sample['com_{0}'.format(band.lower())][l]])
file = glob.glob('NTT_candidates/fit_result/{0}/fit_result_{1}-band.txt'.format(ID, band))
if (math.isnan( s_AGN_mag[0] ) == True or math.isnan( s_AGN_mag[1] ) == True) and file != []:
print(ID, 'Shenli did not get the band of', band, 'but, it exist')
# ID_issue.append(ID)
elif math.isnan( s_AGN_mag[0] ) == False and math.isnan( s_AGN_mag[1] ) == False and file == []:
print(ID, 'I did not get the band of', band, 'but, Shenli get')
elif math.isnan( s_AGN_mag[0] ) == False and math.isnan( s_AGN_mag[1] ) == False:
f = open(file[0],"r")
Trust_fitting = 2
string = f.read()
lines = string.split('\n') # Split in to \n
l0 = [ j for j in range(len(lines)) if 'AGN mag:' in lines[j]]
AGN_mag = lines[l0[Trust_fitting]]
AGN_mag = AGN_mag.split(' ')[2:4]
AGN_mag = np.array([float(AGN_mag[0]), float(AGN_mag[1])])
l1 = [ j for j in range(len(lines)) if 'AGN0 position:' in lines[j]]
AGN_0_pos = np.array([float(lines[l1[Trust_fitting-1]].split('RA: ')[1].split(' ')[0]),
float(lines[l1[Trust_fitting-1]].split('DEC: ')[1].split(';')[0]) ])
AGN_1_pos = np.array([float(lines[l1[Trust_fitting-1]].split('RA: ')[2].split(' ')[0]),
float(lines[l1[Trust_fitting-1]].split('DEC: ')[2].split(';')[0]) ])
offset_0 = np.array([np.sqrt(np.sum((s_AGN_0_pos - AGN_0_pos)**2))*3600, np.sqrt(np.sum((s_AGN_0_pos - AGN_1_pos)**2))*3600])
offset_1 = np.array([np.sqrt(np.sum((s_AGN_1_pos - AGN_0_pos)**2))*3600, np.sqrt(np.sum((s_AGN_1_pos - AGN_1_pos)**2))*3600])
if np.min(offset_0) > 0.6 or np.min(offset_1) > 0.6:
test = 0
print(ID, 's_AGN0 position could not match', 'AGN0_match:', (np.min(offset_0) < 0.6),
'AGN1_match:',(np.min(offset_1) < 0.6), band)
ID_issue.append(ID)
else:
order = np.array([np.where(offset_0 == offset_0.min())[0][0], np.where(offset_1 == offset_1.min())[0][0] ])
if order[0] == order[1]:
print(ID, "There is a position match problem for ID")
ID_issue.append(ID)
else:
mag_offset = [s_AGN_mag[0] - AGN_mag[order[0]], s_AGN_mag[1] - AGN_mag[order[1]] ]
mismatch.append(mag_offset)
# if np.max(mag_offset) > 1:
# print(ID, band, 'AGN mag mismatch')
# ID_issue.append(ID)
mismatch_overall.append(mismatch)
mismatch_i = np.array(mismatch_overall[0])
ID_issue = list(dict.fromkeys(ID_issue))
#%%
import shutil
for band in ['I', 'G', 'R']:
for ID in ID_issue:
# copy_f = glob.glob('NTT_candidates/fit_result/{0}/fit_{1}-band_fit2_PSPS+Sersic_*.pdf'.format(ID, band))
# if copy_f != []:
# shutil.copy(copy_f[0], '/Users/Dartoon/Downloads/NTT_issue/'+ID+'_{0}-band_fit2.pdf'.format(band))
shutil.copy('NTT_candidates/fit_result/{0}/fitting2_used_aper.pdf'.format(ID),
'/Users/Dartoon/Downloads/NTT_issue/'+ID+'_fitting2_used_aper.pdf') | dartoon/my_code | projects/2021_dual_AGN/extra/analysis_offset_to_Shenli.py | analysis_offset_to_Shenli.py | py | 4,476 | python | en | code | 0 | github-code | 13 |
40307304356 | SECRET_KEY = 'asdf'
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
},
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
import logging
logging.disable(logging.CRITICAL)
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.flatpages',
'django.contrib.staticfiles',
# External apps
'oscar_sagepay',
]
from oscar import get_core_apps
INSTALLED_APPS = INSTALLED_APPS + get_core_apps()
OSCAR_SAGEPAY_VENDOR = 'dummy'
from oscar.defaults import * # noqa
# Import private settings used for external tests
try:
from sandbox.private_settings import * # noqa
except ImportError:
pass
| django-oscar/django-oscar-sagepay-direct | tests/settings.py | settings.py | py | 902 | python | en | code | 4 | github-code | 13 |
896248956 | # -*- encoding: utf-8 -*-
from discord.ext import commands
from discord import app_commands, Interaction, Color, Embed
from views import SimpleEmbed, SimpleButton
from setup import logger
class CommandsCog(commands.Cog):
""" A cog is a collection of commands, listeners, and optional state to help group commands together. """
def __init__(self, bot: commands.Bot) -> None:
""" Initialize the cog. """
self.bot = bot
@app_commands.command(name="ping", description="Ping the bot, check if it works.")
async def _ping(self, interaction: Interaction):
""" Pings the bot. """
await interaction.response.send_message("Pong!")
@app_commands.command(name="say", description="Make the bot say something.")
@app_commands.describe(say="Write something for the bot to say.")
async def _say(self, interaction: Interaction, say: str):
""" Makes the bot respond with what the user wrote. """
await interaction.response.send_message("You wrote %s" % say)
@app_commands.command(name='get-request', description='Make a GET request to any API.')
@app_commands.describe(url="URL / Endpoint / API")
async def _get_request(self, interaction: Interaction, url: str) -> None:
""" Makes a GET request to a URL and prints the response. """
embedClass = SimpleEmbed()
embedResult = embedClass.simple(
color=Color.from_rgb(225, 198, 153), author="GET Request.", field=url, value="Do you confirmmmmm?")
await interaction.channel.send(embed=embedResult)
# Prompt confirmation.
buttonClass = SimpleButton()
buttonClass.url = url
await interaction.response.send_message(view=buttonClass) | splinestein/splinebot | commands.py | commands.py | py | 1,730 | python | en | code | 0 | github-code | 13 |
16027270204 | # 유기농배추
# 백준 1012
# 난이도 : 실버2
# 인접해있는 1 묶음의 개수 구하기
from collections import deque
# 동서남북
dy = (0, 0, 1, -1)
dx = (1, -1, 0, 0)
# bfs 코드
def bfs(X, Y):
queue = deque([])
queue.append((X, Y))
field[X][Y] = 0
while queue:
a, b = queue.popleft()
for i in range(4):
nx, ny = a + dx[i], b + dy[i]
if nx < 0 or nx >= M or ny < 0 or ny >= N:
continue
if field[nx][ny] == 1:
field[nx][ny] = 0
queue.append((nx, ny))
# 테스트 케이스 개수
T = int(input())
for _ in range(T):
# 가로 N, 세로 M, 배추(1)의 개수 K
N, M, K = map(int, input().split())
# 밭 테이블
field = [[0 for _ in range(N)] for _ in range(M)]
# 배추의 위치
for _ in range(K):
X, Y = map(int, input().split())
field[Y][X] = 1
worm = 0
for a in range(M):
for b in range(N):
if field[a][b] == 1:
bfs(a, b)
worm += 1
print(worm) | joonann/ProblemSolving | python/202307/0718/b_1012_유기농배추.py | b_1012_유기농배추.py | py | 1,096 | python | ko | code | 0 | github-code | 13 |
605559379 | import io
import os
import torch
from setuptools import setup, find_packages
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
def get_requirements():
req_file = os.path.join(os.path.dirname(__file__), "requirements.txt")
with io.open(req_file, "r", encoding="utf-8") as f:
return [line.strip() for line in f]
def get_long_description():
readme_file = os.path.join(os.path.dirname(__file__), "README.md")
with io.open(readme_file, "r", encoding="utf-8") as f:
return f.read()
if not torch.cuda.is_available():
raise Exception("CPU version is not implemented")
requirements = get_requirements()
long_description = get_long_description()
setup(
name="warp_rnnt",
version="0.7.0",
description="PyTorch bindings for CUDA-Warp RNN-Transducer",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/1ytic/warp-rnnt/tree/master/pytorch_binding",
author="Ivan Sorokin",
author_email="i.sorok1n@icloud.com",
license="MIT",
packages=find_packages(),
ext_modules=[
CUDAExtension(
name="warp_rnnt._C",
sources=[
"core.cu",
"core_gather.cu",
"core_compact.cu",
"binding.cpp"
]
)
],
cmdclass={"build_ext": BuildExtension},
setup_requires=requirements,
install_requires=requirements,
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
]
)
| 1ytic/warp-rnnt | pytorch_binding/setup.py | setup.py | py | 2,286 | python | en | code | 204 | github-code | 13 |
36737144423 | #Returns a pandas dataframe with required query results.
from airflow.models import BaseOperator
import pandas as pd
from datetime import datetime
from airflow.plugins_manager import AirflowPlugin
from google_analytics_plugin.hooks.mysql_hook import MySqlHook
class MySqlQueryOperator(BaseOperator):
def __init__(self,
mysql_conn_id,
database,
query,
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.mysql_conn_id = mysql_conn_id
self.database = database
self.query = query
def execute(self, context):
mysql_hook = MySqlHook(mysql_conn_id=self.mysql_conn_id, schema=self.database)
conn = mysql_hook.get_conn()
try:
df_ = pd.read_sql(query, conn)
except Exception as e:
print("Error {0}".format(str(e)))
return df_
| nihalsangeeth/airflow-plugins-collection | plugins/google_analytics_plugin/operators/mysql_query_operator.py | mysql_query_operator.py | py | 953 | python | en | code | 2 | github-code | 13 |
71031587537 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from statsmodels.tsa.ar_model import AutoReg as AR
from matplotlib.dates import DateFormatter
import statsmodels.api as sn
from statsmodels.graphics.tsaplots import plot_acf
import math
from math import sqrt
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_percentage_error
df_q1=pd.read_csv("daily_covid_cases.csv")
cases=df_q1['new_cases']
print("-----Q1-----")
#Q1 part a
fig, ax = plt.subplots()
ax.plot(df_q1['Date'],df_q1['new_cases'].values)
ax.set(xlabel="Month-Year", ylabel="New_cases",title="Lineplot--Q1a")
date_form = DateFormatter("%b-%d")
ax.xaxis.set_major_formatter(date_form)
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=1))
plt.xticks(rotation =45)
plt.show()
#Q1 part b
one_day_lag=cases.shift(1)
print("Pearson correlation (autocorrelation) coefficient :",cases.corr(one_day_lag))
print()
#Q1 part c
plt.scatter(cases, one_day_lag, s=5)
plt.xlabel("Given time series")
plt.ylabel("One day lagged time series")
plt.title("Q1 part c")
plt.show()
#Q1 part d
PCC=sn.tsa.acf(cases)
lag=[1,2,3,4,5,6]
pcc=PCC[1:7]
plt.plot(lag,pcc, marker='o')
for xitem,yitem in np.nditer([lag, pcc]):
etiqueta = "{:.3f}".format(yitem)
plt.annotate(etiqueta, (xitem,yitem), textcoords="offset points",xytext=(0,10),ha="center")
plt.xlabel("Lag value")
plt.ylabel("Correlation coffecient value")
plt.title("Q1 part d")
plt.show()
#Q1 part e
plot_acf(x=cases, lags=50)
plt.xlabel("Lag value")
plt.ylabel("Correlation coffecient value")
plt.title("Q1 part e")
plt.show()
def rms_error(x_pred,x_actual):
return (((np.mean((x_pred-x_actual)**2))**.5)/(np.mean(x_actual)))*100
# def map_error(x_pred,x_actual):
# return (np.mean(np.abs((x_actual-x_pred)/x_actual))/len(x_actual))*100
# Q2
print("-----Q2-----")
series = pd.read_csv('daily_covid_cases.csv',parse_dates=['Date'],index_col=['Date'],sep=',')
test_size = 0.35 # test size=35%
X = series.values
tst_sz = math.ceil(len(X)*test_size)
train, test = X[:len(X)-tst_sz], X[len(X)-tst_sz:]
# lag=5
ar_model=AR(train,lags=5).fit()
# finding the parametrs of autoregression
coef=ar_model.params
print("Q2 part a--> coefficients are :",coef)
history=train[len(train)-5:]
history=[history[k] for k in range(len(history))]
pred=list()
for t in range(len(test)):
lag=[history[j] for j in range(len(history)-5,len(history))]
yh=coef[0]
for d in range(5):
yh=yh+coef[d+1]*lag[5-d-1]
obs=test[t]
pred.append(yh)
history.append(obs)
# Q2 part b, part 1
plt.scatter(test,pred)
plt.xlabel('Actual cases')
plt.ylabel('Predicted cases')
plt.title('Q2 part b, Part 1')
plt.show()
# Q2 part b, part 2
x=[i for i in range(len(test))]
plt.plot(x,test, label='Actual cases')
plt.plot(x,pred,label='Predicted cases')
plt.legend()
plt.title('Q2 part b, Part 2')
plt.show()
# Q2 part b, part 3
print("RMSE between actual and predicted test data: ",rms_error(pred,test))
print("MAPE between actual and predicted test data: ",mean_absolute_percentage_error(pred,test))
# Q3
series = pd.read_csv('daily_covid_cases.csv',parse_dates=['Date'],index_col=['Date'],sep=',')
test_size = 0.35 # test size=35%
X = series.values
tst_sz = math.ceil(len(X)*test_size)
train, test = X[:len(X)-tst_sz], X[len(X)-tst_sz:]
# function for creating Autoregression model, rmse, mape
def AutoRegression(train,test,i):
ar_model=AR(train,lags=i).fit()
# finding the parametrs of autoregression
coef=ar_model.params
history=train[len(train)-i:]
history=[history[k] for k in range(len(history))]
pred=list()
for t in range(len(test)):
lag=[history[j] for j in range(len(history)-i,len(history))]
yh=coef[0]
for d in range(i):
yh=yh+coef[d+1]*lag[i-d-1]
obs=test[t]
pred.append(yh)
history.append(obs)
rms_e=rms_error(pred,test)
map_e=mean_absolute_percentage_error(pred,test)
return rms_e,map_e
print("-----Q3-----")
p=[1,5,10,15,25] # p values
rms_list=[]
map_list=[]
for i in p:
rmse,mape=AutoRegression(train,test,i)
rms_list.append(rmse)
map_list.append(mape)
# MAPE and RMSE values
print("MAPE values for p=1,5,10,15,25: ",map_list)
print("RMSE values for p=1,5,10,15,25: ",rms_list)
# plot for rmse
plt.bar(['1','5','10','15','25'],rms_list,width=.4)
plt.title("RMSE vs lag value")
plt.xlabel("lag value(p)")
plt.ylabel("RMSE")
plt.show()
# plot for mape
plt.bar(['1','5','10','15','25'],map_list,width=.4)
plt.title("MAPE vs lag value")
plt.xlabel("lag value(p)")
plt.ylabel("MAPE")
plt.show()
# Q4
print("-----Q4-----")
train_q4=series.iloc[:int(len(series)*0.65)]
train_q4=train_q4['new_cases']
i=0
corr = 1
# abs(AutoCorrelation) > 2/sqrt(T)
while corr > 2/(len(train_q4))**0.5:
i += 1
new_ = train_q4.shift(i)
corr = train_q4.corr(new_)
print("Optimal value for lag is: ",i-1)
rms_q4,map_q4=AutoRegression(train,test,i-1)
print("RMSE Q4: ",rms_q4)
print("MAPE Q4: ",map_q4) | Prakash-Mandloi/machine-learnig--to-predict-covid-case | covid_case_predictor.py | covid_case_predictor.py | py | 5,071 | python | en | code | 0 | github-code | 13 |
26039770500 | """
Created on 30.05.2021
This script handles the GET and POST requests to the covid19 API endpoint http://localhost:8000/api/covid19/
This api gets the latest covid19 data, shows the organized and sorted data.
Users also could search according to country code
'GET':
Returns the html page for the case reports all over the world
'POST':
Using the country code information , provided by the user, it connects to the CovidPy API,
to take the country data. Retrieves the data and passes it to the Django template "covid_country_report.html"
where the data is processed and displayed to the user.
JSON Format : { 'countrycode': "", string, identifies the country code user search for it
@author: Yiğit SARIOĞLU
"""
from django.shortcuts import render
import COVID19Py
import pycountry
from rest_framework.response import Response
from .form_search import SearchForm
from django.http import HttpResponse
#This method returns the data of the all over the world
#Also users could search specific country
def covid_api (request):
covid19 = COVID19Py.COVID19() #create a new instance of covid19
if request.method == 'GET':
form=SearchForm() #initialize a form object
latest = covid19.getLatest() #Getting latest amount of total confirmed cases, deaths, and recoveries
confirmed= latest["confirmed"] #latest amount of total confirmed case all over the world
death = latest["deaths"] #latest amount of total deaths all over the world
recover=latest["recovered"] #latest amount of total recover all over the world
deathranks = covid19.getLocations(rank_by='deaths') # rank the results deaths
confirmedranks = covid19.getLocations(rank_by='confirmed') # rank the results confirmed
#Here list1 takes the name of the top 20 countries according to death
list1=[]
for x in range(20) :
list1.append(deathranks[x]["country"])
#Here list2 takes the name of the top 20 countries according to confirmed case
list2=[]
for x in range(20) :
list2.append(confirmedranks[x]["country"])
# Returns the search form for the user, and the latest amount of total confirmed cases, deaths, and recoveries information,
# Also returns death rankings and confirmed case rankings of countries
return render(request, 'covid_reports.html', {'confirm' : confirmed, 'recovery' : recover,
'death' : death, 'sform':form, "deathrank":list1, "confirmrank":list2 })
elif request.method == 'POST':
form = SearchForm(request.POST) #initialize a form object
if form.is_valid():
country = form.cleaned_data.get('country') #gets the country code from the form
if len(country) != 2: #if the country code length is not equal to 2. ıt is not a valid countrycode. so it returns HttpResponse
return HttpResponse("<h1>Not Valid Country Code..Code length should be 2 .Please write valid code</h1>")
#Using pycountry package, specific countries can be looked up by their various codes
elif pycountry.countries.get(alpha_2=country): #checks the country code, with using pycountry
return covid_country_api(request, country) # if it is valid, it calls the covid_country_api method to show the specific country data
else: #if that country code is not valid, HttpResponse sended
return HttpResponse("<h1>Not Valid Country Code..Your Country Code is not in the table. Please look the country table. Then write a valid code</h1>")
else:
return HttpResponse("<h1>Not valid form request </h1>")
else:
return HttpResponse("<h1>Not valid request </h1>")
#This method returns selected country covid19 data(confirmed,death,recovered)
def covid_country_api (request,countrycode):
covid19 = COVID19Py.COVID19() #Create a new instance of covid19
# GET method : Users could search the country using : /covid19/countrycode
# for example: /covid19/TR calls the "GET" and returns the turkey covid data
if request.method == 'GET': #If GET method is called
if len(countrycode) != 2: #a valid country code should be 2.this statement check this condition
return HttpResponse("<h1>Not Valid Country Code..Code length should be 2 .Please write valid code</h1>")
elif pycountry.countries.get(alpha_2=countrycode): #checks the country code, with using pycountry
locationdata = covid19.getLocationByCountryCode(countrycode) #get the data according to specific country code
countryname = locationdata[0]["country"] # gets the country name data
confirmed = locationdata[0]["latest"]["confirmed"] # gets the country confirmed cases
deaths = locationdata[0]["latest"]["deaths"] # gets the country death data
recovered = locationdata[0]["latest"]["recovered"] # gets the country recovered data
updatetime = locationdata[0]["last_updated"] # gets the update time
#Returns the confirmed cases,deaths ,recovery , update time of data and country name
return render(request, 'covid_country_report.html',
{'cname': countryname, 'confirm': confirmed, 'death': deaths, 'recover': recovered,
'time': updatetime})
else: #if that country code is not valid, HttpResponse sended
return HttpResponse(
"<h1>Not Valid Country Code..Your Country Code is not in the table. Please look the country table. Then write a valid code</h1>")
# POST method : called when user post a request
elif request.method == 'POST':
locationdata = covid19.getLocationByCountryCode(countrycode)
countryname = locationdata[0]["country"] # gets the country name data
confirmed = locationdata[0]["latest"]["confirmed"] # gets the country confirmed cases
deaths = locationdata[0]["latest"]["deaths"] # gets the country death data
recovered = locationdata[0]["latest"]["recovered"] # gets the country recovered data
updatetime = locationdata[0]["last_updated"] #gets the update time
#Returns the confirmed cases,deaths ,recovery , update time of data and country name
return render(request, 'covid_country_report.html',
{'cname': countryname, 'confirm': confirmed, 'death': deaths, 'recover': recovered,
'time': updatetime})
| bounswe/2021SpringGroup4 | practice-app/api/covid_reports/main.py | main.py | py | 6,863 | python | en | code | 2 | github-code | 13 |
15475045585 | str1 = '4 4 1 1 16'
str2 = ['1 1','1 2','1 3','1 4','2 1','2 2','2 3','2 4','3 1','3 2','3 3','3 4','4 1','4 2','4 3','4 4']
from collections import deque
n,m,s,t,q = map(int,str1.split())
flea_dict = {}
for i in range(q):
split_cord = str2[i].split()
flea_dict[(int(split_cord[0]),int(split_cord[1]))] = -1
di = [1, 1, -1, -1, 2, -2, 2, -2]
dj = [2, -2, 2, -2, 1, 1, -1, -1]
def bfs(n,m,find_flea,flea_dict,ranges_dict,now, que):
current_range = ranges_dict[now]
for k in range(8):
new_i = now[0] + di[k]
new_j = now[1] + dj[k]
if 1 <= new_i <= n and 1 <= new_j <= m:
if (new_i,new_j) not in ranges_dict:
ranges_dict[new_i,new_j] = current_range + 1
que.append((new_i,new_j))
if (new_i,new_j) in flea_dict:
if flea_dict[(new_i,new_j)] == -1:
flea_dict[(new_i,new_j)] = current_range + 1
find_flea[0] += 1
ranges_dict = {}
ranges_dict[(s,t)] = 0
find_flea = [0]
if (s,t) in flea_dict:
find_flea = [1]
flea_dict[(s,t)] = 0
que = deque()
que.append((s,t))
while que:
cord = que.popleft()
bfs(n,m,find_flea,flea_dict,ranges_dict,cord,que)
if find_flea[0] == q:
break
if find_flea[0] == q:
print(sum(flea_dict.values()))
else:
print(-1)
| ougordeev/Yandex | 3_B_38_flea_horse.py | 3_B_38_flea_horse.py | py | 1,331 | python | en | code | 0 | github-code | 13 |
32618375326 | class Solution(object):
def mergeAlternately(self, word1, word2):
"""
:type word1: str
:type word2: str
:rtype: str
"""
merged = ""
if len(word1) <= len(word2):
for i in range(len(word1)):
merged += word1[i] + word2[i]
merged += word2[len(word1):]
if len(word2) < len(word1):
for i in range(len(word2)):
merged += word1[i] + word2[i]
merged += word1[len(word2):]
return merged
| LesleyBonyo/DSA-Python | python/mergeStringAlternatively.py | mergeStringAlternatively.py | py | 535 | python | en | code | 0 | github-code | 13 |
4816784162 | """A bot for managing War of the Visions guild information via Discord."""
from __future__ import print_function
from __future__ import annotations
import json
import logging
import discord
from data_files import DataFiles
from reminders import Reminders
from wotv_bot_common import ExposableException
from wotv_bot import WotvBotConfig, WotvBot
from worksheet_utils import WorksheetUtils
# Where the main config file for the bot lives.
CONFIG_FILE_PATH = 'bot_config.json'
# Where to persist reminders
REMINDERS_DB_PATH = '.reminders.sql'
# Maximum length of a Discord message. Messages longer than this need to be split up.
# The actual limit is 2000 characters but there seems to be some formatting inflation that takes place.
DISCORD_MESSAGE_LENGTH_LIMIT = 1000
class GlobalConfig:
"""Config object for the entire application."""
def __init__(self, wotv_bot_config: WotvBotConfig, discord_bot_token: str):
self.wotv_bot_config = wotv_bot_config
self.discord_bot_token = discord_bot_token
def readConfig(file_path) -> GlobalConfig:
"""Reads the configuration file and returns a configuration object containing all the important information within."""
wotv_bot_config = WotvBotConfig()
discord_bot_token = None
with open(file_path) as config_file:
data = json.load(config_file)
wotv_bot_config.access_control_spreadsheet_id = data['access_control_spreadsheet_id']
wotv_bot_config.esper_resonance_spreadsheet_id = data['esper_resonance_spreadsheet_id']
wotv_bot_config.vision_card_spreadsheet_id = data['vision_card_spreadsheet_id']
wotv_bot_config.sandbox_esper_resonance_spreadsheet_id = data['sandbox_esper_resonance_spreadsheet_id']
wotv_bot_config.leaderboard_spreadsheet_id = data['leaderboard_spreadsheet_id']
wotv_bot_config.data_files = DataFiles.parseDataDump(data['data_dump_root_path'])
discord_bot_token = data['discord_bot_token']
return GlobalConfig(wotv_bot_config, discord_bot_token)
def toDiscordMessages(message_text):
"""Returns a list of messages, all under DISCORD_MESSAGE_LENGTH_LIMIT in size.
If the given message is longer than DISCORD_MESSAGE_LENGTH_LIMIT, splits the message into as many
chunks as necessary in order to stay under the limit for each message. Tries to respect newlines.
If a line is too long, this method will fail.
"""
if len(message_text) < DISCORD_MESSAGE_LENGTH_LIMIT:
return [message_text]
result = []
buffer = ''
lines = message_text.splitlines(keepends=True)
for line in lines:
if len(line) > DISCORD_MESSAGE_LENGTH_LIMIT:
# There's a line with a single word too long to fit. Abort.
raise ExposableException('response too long')
if (len(buffer) + len(line)) < DISCORD_MESSAGE_LENGTH_LIMIT:
buffer += line
else:
result.append(buffer)
buffer = line
if len(buffer) > 0:
result.append(buffer)
return result
if __name__ == "__main__":
intents = discord.Intents.default()
intents.members = True # Necessary to extract snowflake IDs for !whois
discord_client = discord.Client(intents = intents)
global_config = readConfig(CONFIG_FILE_PATH)
global_config.wotv_bot_config.discord_client = discord_client
global_config.wotv_bot_config.reminders = Reminders(REMINDERS_DB_PATH)
global_config.wotv_bot_config.spreadsheet_app = WorksheetUtils.getSpreadsheetsAppClient()
wotv_bot = WotvBot(global_config.wotv_bot_config)
logger = logging.getLogger('discord')
logger.setLevel(logging.INFO)
# logger.setLevel(logging.DEBUG)
# handler = logging.FileHandler(filename='discord.log', encoding='utf-8', mode='w')
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(handler)
@discord_client.event
async def on_ready():
"""Hook automatically called by the discord client when login is complete."""
print('Bot logged in: {0.user}'.format(discord_client))
global_config.wotv_bot_config.reminders.start(discord_client.loop)
await WotvBot.getStaticInstance().createOrResetPeriodicStatusUpdateCallback()
@discord_client.event
async def on_message(message):
"""Hook automatically called by the discord client when a message is received."""
responseText = None
reaction = None
try:
responseText, reaction = await wotv_bot.handleMessage(message)
except ExposableException as safeException:
responseText = safeException.message
if responseText:
allMessagesToSend = toDiscordMessages(responseText)
for oneMessageToSend in allMessagesToSend:
await message.channel.send(oneMessageToSend)
if reaction:
await message.add_reaction(reaction)
# Finally, the start method.
if __name__ == "__main__":
discord_client.run(global_config.discord_bot_token)
| andrewhayden/ffbe_forever_guild_bot | ffbe_forever_guild_bot.py | ffbe_forever_guild_bot.py | py | 4,980 | python | en | code | 0 | github-code | 13 |
35654788052 | """
Factory Method é um padrão de criação que permite definir uma interface para criar objetos, mas deixa as subclasses
decidirem quais objetos criar. O FACTORY METHOD permite adiar a instanciação para as subclasses, garantindo o baixo
acoplamento entre classes.
"""
import random
from abc import ABC, abstractmethod
from random import choice
class Vehicle(ABC):
@abstractmethod
def get_client(self):
pass
class HighGrade(Vehicle):
def get_client(self):
print(f'{self.__class__.__name__} is getting a client')
class PopularGrade(Vehicle):
def get_client(self):
print(f'{self.__class__.__name__} car is getting a client')
class Bike(Vehicle):
def get_client(self):
print(f'the {self.__class__.__name__} is getting a client')
class VehicleFactory(ABC):
def __init__(self, type_):
self.car = self.get_car(type_)
@staticmethod
@abstractmethod
def get_car(type_: str):
pass
def get_client(self):
self.car.get_client()
class NorthZoneFactory(VehicleFactory):
@staticmethod
def get_car(type_: str):
if type_ == 'luxury':
return HighGrade()
if type_ == 'popular':
return PopularGrade()
if type_ == 'bike':
return Bike()
class SouthZoneFactory(VehicleFactory):
@staticmethod
def get_car(type_: str):
if type_ == 'popular':
return PopularGrade()
if __name__ == '__main__':
available_vehicles_north = ['luxury', 'popular', 'bike']
available_vehicles_south = ['popular']
for i in range(5):
car = NorthZoneFactory(random.choice(available_vehicles_north))
car.get_client()
for i in range(5):
car = SouthZoneFactory(random.choice(available_vehicles_south))
car.get_client() | JonasFiechter/UDEMY-Python | design_patterns/factory_method_CREATION.py | factory_method_CREATION.py | py | 1,824 | python | en | code | 0 | github-code | 13 |
38256486201 | import pandas as pd
from tqdm import tqdm
## train data와 test data를 읽어와 pandas dataframe형태로 저장
def preprocess_query(type='train'):
if type=='train':
file_name = '../input/1. 실습용자료.txt'
elif type=='test':
file_name = '../input/2. 모델개발용자료.txt'
with open(file_name, 'r', encoding='CP949') as f:
data = f.read()
data = data.split('\n')
column_names = data[0].split('|')
df_dict = [[] for _ in range(len(column_names))]
for i in range(len(data)):
if i==0:
continue
else:
element = data[i].split('|')
if len(column_names)!= len(element):
continue
else:
for i in range(len(column_names)):
df_dict[i].append(element[i])
df = pd.DataFrame({column_names[i] : df_dict[i] for i in range(len(column_names))})
df = make_query_format(df)
return df
# label에 대한 정보가 담겨있는 엑셀 파일을 불러와 dataframe으로 저장
def preprocess_class():
class_df = pd.read_excel('../input/한국표준산업분류(10차)_국문.xlsx')
new_header = class_df.iloc[0]
class_df = class_df[1:]
class_df.columns=new_header
class_df.drop([1], axis=0, inplace=True)
class_df.columns = ['1st', '1st_text', '2nd', '2nd_text', '3rd', '3rd_text', '4th', '4th_text', '5th', '5th_text']
columns = list(class_df.columns)
# 엑셀 파일에서 사용하지 않는 분류 단위는 버리고 공백 부분을 해당 분류 값으로 채워 공백을 지움
for column in columns:
class_df[column] = class_df[column].fillna(method = 'ffill')
class_df.drop(['4th', '4th_text', '5th', '5th_text'], axis =1, inplace=True)
class_df.drop_duplicates(keep='first', inplace=True, ignore_index=True)
class_df['class_num'] = [i for i in range(len(class_df))]
# train data의 산업 분류 코드와 format을 맞춰주기 위해 앞자리에 0이 붙은 경우 이를 지우고 저장
class_2nd_list = []
for t in class_df['2nd']:
if t[0]=='0':
class_2nd_list.append(t[1:])
else:
class_2nd_list.append(t)
class_3rd_list = []
for t in class_df['3rd']:
if t[0]=='0':
class_3rd_list.append(t[1:])
else:
class_3rd_list.append(t)
class_df['2nd']=class_2nd_list
class_df['3rd']=class_3rd_list
return class_df
# text_obj, text_mthd, text_deal을 연결하여 하나의 query 문장으로 저장
def make_query_format(df):
final_query = []
for i, row in tqdm(df.iterrows(), desc = 'making query format'):
query_text= []
if row['text_obj']!='':
query_text.append(row['text_obj'])
if row['text_mthd']!='':
query_text.append(row['text_mthd'])
if row['text_deal']!='':
query_text.append(row['text_deal'])
query_text = ' '.join(query_text)
final_query.append(query_text)
df['query_text'] = final_query
return df
## dataset 구축전 필요없는 column을 버리고 query dataframe에 각 데이터의 산업 분류 레이블을 번호로 추가해줌
## class_df의 경우 훈련과정에서는 필요없으나 예측 결과를 얻는 과정에서 산업코드를 가져오기 위해 return에 넣어줌
def combine(type='train'):
query_df = preprocess_query(type)
class_df = preprocess_class()
if type=='test':
query_df.drop(['digit_1', 'digit_2', 'digit_3', 'text_obj', 'text_mthd', 'text_deal'], axis=1, inplace=True)
class_df.drop(['1st_text', '2nd_text', '3rd_text'], axis=1, inplace=True)
return query_df, class_df
elif type=='train':
label_list=[]
for i, row in tqdm(query_df.iterrows(), desc='adding class_num to query_df'):
label = row['digit_3']
label_num = int(class_df[class_df['3rd']==label]['class_num'])
label_list.append(label_num)
query_df['class_num']=label_list
query_df.drop(['digit_1', 'digit_2', 'digit_3', 'text_obj', 'text_mthd', 'text_deal'], axis=1, inplace=True)
class_df.drop(['1st_text', '2nd_text', '3rd_text'], axis=1, inplace=True)
return query_df, class_df
| donggunseo/SCI_Kostat2022 | preprocess.py | preprocess.py | py | 4,267 | python | ko | code | 2 | github-code | 13 |
70282254417 | import sublime
import sublime_plugin
import datetime
import os
import logging
import shutil
import string
import re
log = logging.getLogger(__name__)
cur1 = re.compile('\\$0')
# A really quick and dirty template mechanism.
# Stolen from: https://makina-corpus.com/blog/metier/2016/the-worlds-simplest-python-template-engine
# https://github.com/ebrehault/superformatter
class TemplateFormatter(string.Formatter):
def __init__(self, resolver=None):
super(TemplateFormatter, self).__init__()
self.resolver = resolver
def format_field(self, value, spec):
# REPITITION
#>>> sf.format('''Table of contents:
#{chapters:repeat:Chapter {{item}}
#}''', chapters=["I", "II", "III", "IV"])
#'''Table of contents:
#Chapter I
#Chapter II
#Chapter III
#Chapter IV
#'''
if spec.startswith('repeat'):
template = spec.partition(':')[-1]
if type(value) is dict:
value = value.items()
return ''.join([template.format(item=item) for item in value])
# FUNCTION CALLS
#>>> sf.format('My name is {name.upper:call}', name="eric")
#'My name is ERIC'
elif spec == 'call':
return value()
# OPTIONAL EXPANSION
#>>> sf.format('Action: Back / Logout {manager:if:/ Delete {id}}', manager=True, id=34)
#'Action: Back / Logout / Delete 34'
elif spec.startswith('if'):
return (value and spec.partition(':')[-1]) or ''
else:
return super(TemplateFormatter, self).format_field(value, spec)
def get_value(self, key, args, kwargs):
if(str(key)):
if(key in kwargs):
return kwargs[key]
if(self.resolver):
return str(self.resolver(key,None))
return None
else:
return args[key]
def ExpandTemplate(view, template, format={},resolver=None):
# Supported expansions
formatDict = {
"date": str(datetime.date.today()),
"time": datetime.datetime.now().strftime("%H:%M:%S"),
#"datetime": str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")),
"datetime": str(datetime.datetime.now().strftime("%Y-%m-%d %a %H:%M")),
"file": str(view.file_name()),
"clipboard": sublime.get_clipboard()
}
if(format != None):
formatDict.update(format)
formatter = TemplateFormatter(resolver)
template = formatter.format(template, **formatDict)
global cur1
mat = cur1.search(template)
pos = -1
if(mat != None):
pos = mat.start(0)
template = cur1.sub('',template)
return (template, pos)
class ASettings:
def __init__(self, settingsName):
self.settingsName = settingsName
self.settings = sublime.load_settings(settingsName + '.sublime-settings')
def Get(self, name, defaultVal):
val = self.settings.get(name)
if(val == None):
val = defaultVal
return val
def Set(self, name, val):
self.settings.set(name,val)
sublime.save_settings(self.settingsName + '.sublime-settings')
# Singleton access
configFilename = "dnd"
_sets = None
def Load():
global configFilename
global _sets
_sets = ASettings(configFilename)
def Get(name, defaultValue, formatDictionary = None):
global _sets
if(_sets == None):
log.warning("SETTINGS IS NULL? IS THIS BEING CALLED BEFORE PLUGIN START?")
Load()
rv = _sets.Get(name, defaultValue)
formatDict = {
"date": str(datetime.date.today()),
"time": datetime.datetime.now().strftime("%H:%M:%S"),
"datetime": str(datetime.datetime.now().strftime("%Y-%m-%d %a %H:%M")),
}
if(formatDictionary != None):
formatDict.update(formatDictionary)
if(str == type(rv)):
formatter = TemplateFormatter(Get)
rv = formatter.format(rv, **formatDict)
if(list == type(rv)):
formatter = TemplateFormatter(Get)
rv = [ (formatter.format(r, **formatDict) if str == type(r) else r) for r in rv ]
return rv
def Set(key, val):
if(_sets == None):
Load()
_sets.Set(key,val)
def RepresentsInt(s):
try:
int(s)
return True
except ValueError:
return False
def GetInt(name, defaultValue):
v = Get(name, defaultValue)
try:
i = int(v)
return i
except:
return defaultValue
| ihdavids/dnd | sets.py | sets.py | py | 4,312 | python | en | code | 0 | github-code | 13 |
20879398154 | # Impelement a queue in Python
# Makes use of the list data structure inherent to Python
class Queue:
def __init__(self):
self.Q = []
def remove(self):
try:
self.Q.pop(0)
except:
print("Error: queue is empty.")
def add(self, item):
self.Q.append(item)
def peek(self):
return self.Q[0]
def isEmpty(self):
if len(self.Q) == 0:
return True
else:
return False
if __name__ == '__main__':
queue = Queue()
queue.remove()
print(queue.isEmpty())
queue.add("bird")
queue.add("alligator")
print(queue.Q)
print(queue.peek())
print(queue.isEmpty()) | blakerbuchanan/algos_and_data_structures | datastructures/datastructures/queues.py | queues.py | py | 697 | python | en | code | 0 | github-code | 13 |
70368875859 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : __init__.py
@Date : 2022/03/23
@Author : Yaronzz
@Version : 1.0
@Contact : yaronhuang@foxmail.com
@Desc :
"""
import getopt
import sys
import os
import easy_docs.docsify
import easy_docs.util
from http.server import HTTPServer, SimpleHTTPRequestHandler
def help():
print("-h or --help : show help-message")
print("-i or --init : init doc index")
print("-u or --update : update sidebar")
print("-r or --remove : does not contain some directories")
print("-s or --server : test server")
def main():
print("============EASY DOC=================")
try:
opts, args = getopt.getopt(sys.argv[1:],
"hiur:s",
["help", "init", "update", "remove=", "server"])
except getopt.GetoptError as errmsg:
print('Err:' + vars(errmsg)['msg'] + ". Use 'easydoc -h' for useage.")
return
writeIndex = False
writeSidebar = False
skipPath = []
for opt, val in opts:
if opt in ('-h', '--help'):
help()
return
if opt in ('-i', '--init'):
writeIndex = True
writeSidebar = True
if opt in ('-u', '--update'):
writeSidebar = True
if opt in ('-r', '--remove'):
skipPath.append(val)
if opt in ('-s', '--server'):
print(f"Serving {os.getcwd()} now.")
print(f"Listening at http://localhost:3000")
server = HTTPServer(('0.0.0.0', 3000), SimpleHTTPRequestHandler)
server.serve_forever()
return
if writeIndex:
easy_docs.docsify.init('./')
if writeSidebar:
easy_docs.util.createSidebarFile('./', skipPath)
| yaronzz/easy-docs | easy_docs/__init__.py | __init__.py | py | 1,807 | python | en | code | 1 | github-code | 13 |
32377749439 | #4.3 loop
#4.3.1 while_loop
##t=3
##while t>0:
## print("t-minus "+str(t))
## t=t-1
##print("blastoff!")
##x=20
##while x>10:
## print(x,"I am sorry, Dave.")
## x=x-1
##print(x,"I cannot print that for you.")
#fibonacci number
##fib=[1,1]
##while True:
## x=fib[-2]+fib[-1]
## if x%35==0:
## break
## fib.append(x)
##print(fib)
##y={x**2 for x in fib if x%5==0}
##print(sorted(y))
#4.3.2 for_loop
##for t in [3,2,1]:
## print("t-minus "+str(t))
##print("Blastoff!")
##for t in [7,6,5,4,3,2,1]:
## if t%2!=0:
## continue
## print("t-minus "+str(t))
##print("Blastoff!")
##for letter in "Gorgus":
## print(letter)
##for letter in {0,"Gorgus",True}: #change the order, print the same
## print(letter)
##d={"first":"Albert",
## "last":"Einstein",
## "Birthday":[1879, 3,14]}
##for key in d:
## print(key)
## print(d[key])
## print("========")
###the output order is different with that in the textbook
##
####################################################3
##d={"first":"Albert",
## "last":"Einstein",
## "Birthday":[1879, 3,14]}
##
##print("Keys:")
##for key in d.keys():
## print(key)
##print("\n========\n")
##
##print("Values:")
##for value in d.values():
## print(value)
##print("\n========\n")
##
##print("Items:")
##for key,value in d.items():
## print(key,value)
#####################################################
##quarks={'up','down','top','bottom','charm','strange'}
##for quark in quarks:
## print(quark)
##
#4.3.3 comprehension
##quarks={'up','down','top','bottom','charm','strange'}
##upper_quarks=[]
##for quark in quarks:
## upper_quarks.append(quark.upper())
##print(upper_quarks)
##
## list comprehension
##quarks={'up','down','top','bottom','charm','strange'}
##upper_quarks=[quark.upper() for quark in quarks]
##print(upper_quarks)
##
#set comprehension
##entries=['top','CHARm','Top','sTRANGE','TOP']
##quarksa={quark.upper() for quark in entries}
##quarksb={quark.lower() for quark in entries}
##print(quarksa,quarksb)
# dict comprehension
##entries=[1,10,12.5,65,88]
##results={x:x**2+42 for x in entries}
##print(results)
##pm=['Newton','is','the','most','famous','scientist','in','the','human','history']
##t_words=[word for word in pm if word.startswith('t')]
##print(t_words)
coords={'x':1,'y':2,'z':3,'r':1,'theta':2,'phi':3}
polar_keys={'r','theta','phi'}
polar={key:value for key, value in coords.items() if key in polar_keys}
print(polar)
print(coords)
| youngmei/python_starter | chapter4_loop.py | chapter4_loop.py | py | 2,695 | python | en | code | 0 | github-code | 13 |
12805540781 | def rlist(start, end, prefix='net_', suffix='', step=1):
return ['%s%s%s' % (prefix, str(x), suffix)
for x in xrange(start, end + 1, step)]
feature_toggle = "echo 'hafnium.tempWorkarounds.skipProcessingBlock162=true' > /tmp/hafnium-simulation.properties;" \
"chmod 755 /tmp/hafnium-simulation.properties;" \
"/ci/bin/set-feature-toggles -e OVF1773_Nitro_ICM_Support -e OVF1776_MethaneDiscovery " \
"-e OVF2128_NitoMethane_ILT_Support"
SSH_PASS = 'hpvse1'
interface = 'bond0'
EG1 = 'EG1'
ENC1 = 'MXQ71902DQ'
LE1 = 'LE1'
NITRO_MODEL = 'Virtual Connect SE 100Gb F32 Module for Synergy'
NITRO_PART = '867796-B21'
CL50_MODEL = 'Synergy 50Gb Interconnect Link Module'
ENC1_BAY1_IPS = ['172.16.1.3', '172.16.3.3', '172.16.5.3', '172.16.60.3', '172.16.90.3']
ENC1_BAY2_IPS = ['172.16.1.4', '172.16.3.4', '172.16.5.4', '172.16.60.4', '172.16.90.4']
admin_credentials = {'userName': 'Administrator', 'password': 'hpvse123'}
users = [
{'userName': 'Serveradmin', 'password': 'Serveradmin', 'fullName': 'Serveradmin',
'permissions': [{'roleName': 'Server administrator'}],
'emailAddress': 'sarah@hp.com', 'officePhone': '970-555-0003', 'mobilePhone': '970-500-0003',
'type': 'UserAndPermissions'},
{'userName': 'Networkadmin', 'password': 'Networkadmin', 'fullName': 'Networkadmin',
'permissions': [{'roleName': 'Network administrator'}], 'emailAddress': 'nat@hp.com',
'officePhone': '970-555-0003',
'mobilePhone': '970-500-0003', 'type': 'UserAndPermissions'},
{'userName': 'Backupadmin', 'password': 'Backupadmin', 'fullName': 'Backupadmin',
'permissions': [{'roleName': 'Backup administrator'}],
'emailAddress': 'backup@hp.com', 'officePhone': '970-555-0003', 'mobilePhone': '970-500-0003',
'type': 'UserAndPermissions'},
{'userName': 'Noprivledge', 'password': 'Noprivledge', 'fullName': 'Noprivledge',
'permissions': [{'roleName': 'Read only'}],
'emailAddress': 'rheid@hp.com', 'officePhone': '970-555-0003', 'mobilePhone': '970-500-0003',
'type': 'UserAndPermissions'}
]
ranges = [{'name': 'FCOE-MAC', 'type': 'Range', 'category': 'id-range-VMAC', 'rangeCategory': 'CUSTOM',
'startAddress': '00:BC:56:00:00:00', 'endAddress': '00:BC:56:00:00:7F', 'enabled': True},
{'name': 'FCOE-WWN', 'type': 'Range', 'category': 'id-range-VWWN', 'rangeCategory': 'CUSTOM',
'startAddress': '21:11:BC:56:00:00:00:00', 'endAddress': '21:11:BC:56:00:00:00:7F', 'enabled': True},
{'name': 'FCOE-SN', 'type': 'Range', 'category': 'id-range-VSN', 'rangeCategory': 'CUSTOM',
'startAddress': 'VCUAAAAAAA', 'endAddress': 'VCUAAAAADT', 'enabled': True}]
e1 = [{'name': n,
'type': 'ethernet-networkV4',
'vlanId': None,
'purpose': 'General',
'smartLink': True,
'privateNetwork': False,
'connectionTemplateUri': None,
'ethernetNetworkType': 'Tunnel'} for n in rlist(1, 256, 'tunnel_')]
e2 = [{'name': 'untagged_1',
'type': 'ethernet-networkV4',
'vlanId': None,
'purpose': 'General',
'smartLink': True,
'privateNetwork': False,
'connectionTemplateUri': None,
'ethernetNetworkType': 'Untagged'}]
e3 = [{'name': n,
'type': 'ethernet-networkV4',
'purpose': 'General',
'smartLink': True,
'privateNetwork': False,
'connectionTemplateUri': None,
'ethernetNetworkType': 'Tagged',
'vlanId': int(n[4:])} for n in rlist(1, 3966)]
ethernet_networks = e1 + e2 + e3
network_sets = [{'name': 'NetSet1', 'type': 'network-setV4', 'networkUris': rlist(1, 500),
'nativeNetworkUri': None},
{'name': 'NetSet2', 'type': 'network-setV4', 'networkUris': rlist(501, 998),
'nativeNetworkUri': None}
]
def us(**kwargs):
return {'name': kwargs.get('name', None),
'ethernetNetworkType': kwargs.get('ethernetNetworkType', 'Tagged'),
'networkType': 'Ethernet',
'networkUris': kwargs.get('networkUris', None),
'primaryPort': None,
'nativeNetworkUri': None,
'mode': 'Auto',
'logicalPortConfigInfos': kwargs.get('logicalPortConfigInfos', None)}
def lig(**kwargs):
return {'name': kwargs.get('name', None),
'type': kwargs.get('type', 'logical-interconnect-groupV5'),
'enclosureType': kwargs.get('enclosureType', 'SY12000'),
'interconnectMapTemplate': kwargs.get('interconnectMapTemplate', [
{'bay': 3, 'enclosure': 1, 'type': NITRO_MODEL,
'enclosureIndex': 1}]),
'enclosureIndexes': kwargs.get('enclosureIndexes', [1]),
'interconnectBaySet': kwargs.get('interconnectBaySet', 3),
'redundancyType': kwargs.get('redundancyType', 'NonRedundantASide'),
'uplinkSets': kwargs.get('uplinkSets', []),
'internalNetworkUris': kwargs.get('internalNetworkUris', None),
}
uplink_sets = {'Q6': us(name='Tagged 1-100',
networkUris=rlist(1, 100),
logicalPortConfigInfos=[{'enclosure': '1', 'bay': '3',
'port': 'Q6', 'speed': 'Auto'}]),
'Q4': us(name='Tunnel',
networkUris=['tunnel_1'],
ethernetNetworkType='Tunnel',
logicalPortConfigInfos=[{'enclosure': '1', 'bay': '3',
'port': 'Q4', 'speed': 'Auto'}]),
'Q1': us(name='Untagged',
networkUris=['untagged_1'],
ethernetNetworkType='Untagged',
logicalPortConfigInfos=[{'enclosure': '1', 'bay': '3',
'port': 'Q1', 'speed': 'Auto'}]),
'BigPipe': us(name='BigPipe',
networkUris=rlist(101, 3966),
logicalPortConfigInfos=[{'enclosure': '1', 'bay': '3',
'port': p,
'speed': 'Auto'}
for p in sorted(['Q%i.%i' % (n, i)
for i in range(1, 5)
for n in (3, 5)])])
}
LIG1 = 'LIG1'
ligs = {LIG1: lig(name=LIG1,
internalNetworkUris=[n for n in rlist(2, 126, 'tunnel_')],
uplinkSets=[v for v in uplink_sets.itervalues()])}
enc_groups = {EG1: {'name': EG1,
'enclosureCount': 1,
'interconnectBayMappings':
[{'interconnectBay': 1, 'logicalInterconnectGroupUri': None},
{'interconnectBay': 2, 'logicalInterconnectGroupUri': None},
{'interconnectBay': 3, 'logicalInterconnectGroupUri': 'LIG:' + LIG1},
{'interconnectBay': 4, 'logicalInterconnectGroupUri': None},
{'interconnectBay': 5, 'logicalInterconnectGroupUri': None},
{'interconnectBay': 6, 'logicalInterconnectGroupUri': None}],
'ipAddressingMode': "External"
}
}
les = {LE1: {'name': LE1,
'enclosureUris': ['ENC:%s' % ENC1],
'enclosureGroupUri': 'EG:%s' % EG1,
'firmwareBaselineUri': None,
'forceInstallFirmware': False
}
}
connections = [{'id': 1, 'name': '1', 'functionType': 'Ethernet', 'portId': 'Mezz 3:1-a',
'requestedMbps': '2500', 'networkUri': 'ETH:untagged_1',
'boot': {'priority': 'NotBootable'}},
{'id': 3, 'name': '3', 'functionType': 'Ethernet', 'portId': 'Mezz 3:1-b',
'requestedMbps': '2500', 'networkUri': 'ETH:tunnel_1',
'boot': {'priority': 'NotBootable'}},
{'id': 5, 'name': '5', 'functionType': 'Ethernet', 'portId': 'Mezz 3:1-c',
'requestedMbps': '2500', 'networkUri': 'NS:NetSet1',
'boot': {'priority': 'NotBootable'}},
{'id': 7, 'name': '7', 'functionType': 'Ethernet', 'portId': 'Mezz 3:1-d',
'requestedMbps': '2500', 'networkUri': 'NS:NetSet2',
'boot': {'priority': 'NotBootable'}}
]
server_profiles = [{'type': 'ServerProfileV9', 'serverHardwareUri': ENC1 + ', bay 1',
'serverHardwareTypeUri': '', 'enclosureUri': 'ENC:' + ENC1, 'enclosureGroupUri': 'EG:%s' % EG1,
'serialNumberType': 'Virtual', 'macType': 'Virtual', 'wwnType': 'Virtual',
'name': ENC1 + '_Bay1', 'description': '', 'affinity': 'Bay',
'bootMode': {'manageMode': True, 'mode': 'UEFI', 'pxeBootPolicy': 'Auto'},
'boot': {'manageBoot': True, 'order': ['HardDisk']},
'connectionSettings': {'connections': connections}},
{'type': 'ServerProfileV9', 'serverHardwareUri': ENC1 + ', bay 2',
'serverHardwareTypeUri': '', 'enclosureUri': 'ENC:' + ENC1, 'enclosureGroupUri': 'EG:%s' % EG1,
'serialNumberType': 'Virtual', 'macType': 'Virtual', 'wwnType': 'Virtual',
'name': ENC1 + '_Bay2', 'description': '', 'affinity': 'Bay',
'bootMode': {'manageMode': True, 'mode': 'UEFI', 'pxeBootPolicy': 'Auto'},
'boot': {'manageBoot': True, 'order': ['HardDisk']},
'connectionSettings': {'connections': connections}}
]
| richa92/Jenkin_Regression_Testing | robo4.2/fusion/tests/wpst_crm/feature_tests/TBIRD/OVF3627_Nitro_Profiles/data_variables.py | data_variables.py | py | 9,837 | python | en | code | 0 | github-code | 13 |
21667654132 | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 23 11:11:28 2015
@author: moizr_000
"""
'''
The purpose of the following classes is to merge an MTA and WU dataframe into
a master turnstile-weather dataframe with all the major structural features
necessary for analysis. The MTADataFrame class does the brunt of this work,
by summing all turnstile entries and exits in a given station for every audit
event, then calculating entries and exits per hour using the cumulative data.
The TurnstileWeatherDataFrame class is responsible for merging this dataframe
and the weather underground dataframe together by pairing each entries or exits
per hour entry with the weather event that occured at the audit event just
prior to it.
'''
# a note on terminology:
# 'df' is used to refer to a pandas.DataFrame object
# 'dataframe' is used to refer to the object created by these classes
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from pandas.tseries.holiday import USFederalHolidayCalendar
class DataFrame(object):
def __init__(self):
self.df = pd.DataFrame()
class MTADataFrame(DataFrame):
def __init__(self, csv_filepath):
DataFrame.__init__(self)
self.df = pd.read_csv(csv_filepath)
self._clean_up()
self._make_datetime_col()
self._combine_scps_all_stations()
self._make_hourly_entries_col()
self._make_hourly_exits_col()
def _clean_up(self):
new_columns = []
for i in range(len(self.df.columns)):
new_columns.append(self.df.columns[i].strip().title())
self.df.columns = new_columns
return self
def _make_datetime_col(self, add_more_cols=True):
datetimes = pd.to_datetime(self.df['Date']+' '+self.df['Time'], format='%m/%d/%Y %H:%M:%S')
self.df['Subway Datetime'] = datetimes
if add_more_cols:
self.df['Date'] = datetimes.apply(lambda dt: dt.date())
self.df['Month'] = datetimes.apply(lambda dt: dt.month)
self.df['Hour'] = datetimes.apply(lambda dt: dt.hour)
self.df['DayOfWeek'] = datetimes.apply(lambda dt: dt.dayofweek)
self.df['isWorkday'] = self.df['DayOfWeek'].apply(lambda weekday: 1 if weekday<5 else 0)
calendar = USFederalHolidayCalendar()
holidays = calendar.holidays(start='2014-11-19', end='2015-12-31')
self.df['isHoliday'] = self.df['Date'].apply(lambda date: 1 if date in holidays else 0)
return self
###
# The following method combines data from ALL the turnstile units within a single subway station
# This makes subsequent statistical analysis easier
# scp = sub channel position (turnstile unit identifier)
###
def _combine_scps(self, station_df):
# TODO: This code might be able to be written more cleanly
# using Pandas split-apply-combine methods
# group by scp... where count < 8, extrapolate
def end_index_first_scp(df, zeroeth_index):
first_scp = df.loc[zeroeth_index, 'Scp']
for row_idx, data_series in df.iterrows():
if data_series['Scp'] != first_scp:
return row_idx - 1
old_df = station_df
scp_arr = old_df['Scp'].unique().tolist()
zeroeth_index = old_df.index[0]
end_first_scp = end_index_first_scp(old_df, zeroeth_index)
new_df = old_df.loc[zeroeth_index:end_first_scp] # make a new df, consisting of only first scp in old_df
for row_idx, data_series in new_df.iterrows():
# dataframe consisting of the data for all scps for a given date
all_scps_for_date_df = old_df[old_df['Subway Datetime']==data_series['Subway Datetime']]
additional_entries = 0
additional_exits = 0
###
# the following code is in case there is a missing value in one the turnstiles for a specific datetime
# if there is, the code will add the missing entries/exits to addtional_entries/exits respectively
# it does this by predicting what the missing entries/exits would be by taking the average of the
# entries/exits of the datetime before it with the same for the datetime after it
if all_scps_for_date_df['Scp'].size != len(scp_arr):
all_scps = all_scps_for_date_df['Scp'].tolist()
missed_scps = list(set(scp_arr) - set(all_scps))
for scp in missed_scps:
# NOTE: in some cases, there may be an instance where the new_df read
# will contain an odd time (say, 7:51 pm)
# and no other scp will match that time
# this will manifest itself as this code trying to predict the other 7 turnstiles
# not desirable, of course, but is not currently a problem
# maybe fix later?
this_scp_df = old_df[old_df['Scp'] == scp]
before_dt = this_scp_df[this_scp_df['Subway Datetime'] < data_series['Subway Datetime']]
last_before_dt = before_dt.iloc[-1]
after_dt = this_scp_df[this_scp_df['Subway Datetime'] > data_series['Subway Datetime']]
first_after_dt = after_dt.iloc[0]
additional_entries += (last_before_dt['Entries'] + first_after_dt['Entries'])/2.0
additional_exits += (last_before_dt['Exits'] + first_after_dt['Exits'])/2.0
###
new_df.loc[row_idx, 'Entries'] = np.sum(all_scps_for_date_df['Entries']) + additional_entries
new_df.loc[row_idx, 'Exits'] = np.sum(all_scps_for_date_df['Exits']) + additional_exits
return new_df
def _combine_scps_all_stations(self):
stations = self.df['Station'].unique().tolist()
new_df_list = []
for station in stations:
station_df = self.df[self.df['Station']==station]
new_df = self._combine_scps(station_df)
new_df_list.append(new_df)
self.df = pd.concat(new_df_list, ignore_index=True)
return self
def _make_hourly_entries_col(self):
hourly_entries = pd.Series(0, index=self.df.index)
prev_entries = self.df.loc[0, 'Entries'] # initialize prev_entries to first value in df['ENTRIES']
prev_datetime = self.df.loc[0, 'Subway Datetime'] # same, for first datetime value
for row_idx, data_series in self.df.iterrows():
curr_entries = data_series['Entries']
curr_datetime = data_series['Subway Datetime']
hours_elapsed = (curr_datetime - prev_datetime).total_seconds()/3600
if hours_elapsed >= 0: # if still on same turnstile unit (datetimes are increasing)...
delta_entries = curr_entries - prev_entries
hourly_entries[row_idx] = delta_entries/hours_elapsed
else: # if reached end of one turnstile unit, and on to other from start date again...
hourly_entries[row_idx] = float('nan') # fill with averages afterwards?
prev_entries = curr_entries
prev_datetime = curr_datetime
self.df['Entries Per Hour'] = hourly_entries
return self
def _make_hourly_exits_col(self):
hourly_exits = pd.Series(0, index=self.df.index)
prev_exits = self.df.loc[0, 'Exits']
prev_datetime = self.df.loc[0, 'Subway Datetime']
for row_idx, data_series in self.df.iterrows():
curr_exits = data_series['Exits']
curr_datetime = data_series['Subway Datetime']
hours_elapsed = (curr_datetime - prev_datetime).total_seconds()/3600
if hours_elapsed >= 0:
delta_exits = curr_exits - prev_exits
hourly_exits[row_idx] = delta_exits/hours_elapsed
else:
hourly_exits[row_idx] = float('nan')
prev_exits = curr_exits
prev_datetime = curr_datetime
self.df['Exits Per Hour'] = hourly_exits
return self
class WUDataFrame(DataFrame):
def __init__(self, csv_filepath):
DataFrame.__init__(self)
self.df = pd.read_csv(csv_filepath)
self._clean_up()
self._make_datetime_col()
def _clean_up(self):
pass
def _make_datetime_col(self):
self.df['Weather Datetime'] = pd.to_datetime(self.df['DateUTC<br />'], format="%Y-%m-%d %H:%M:%S<br />") - timedelta(hours = 4) # needed to convert supplied datetimes to EDT
return self
### TurnstileWeatherDataFrame class:
# purpose is to combine MTA dataframe and WU dataframe into one master dataframe
# because the MTA datetimes and WU datetimes don't match exactly,
# the purpose of this class is to find the CLOSEST matches between the two
# and write them together into a master dataframe
# because the difference between closest matches is on the order of 9 minutes
# this won't sacrifice much accuracy (weather does not change much in that time)
# also note: since the weather data is taken far more frequently than subway data
# some weather data might not find its way to the final dataframe
###
class TurnstileWeatherDataFrame(DataFrame): # TAKES IN PANDAS DATAFRAMES!
def __init__(self, MTA_dataframe, WU_dataframe):
DataFrame.__init__(self)
self._merge_dataframes(MTA_dataframe, WU_dataframe)
# the first helper method in executing merge of MTA_ and WU_dataframes
# returns index location of closest weather datetime given a datetime object
# efficient because avoids searching through all weather datetimes
def _closest_wu_datetime(self, WU_df, datetime_obj):
'''
The strategy here will be keep calculating the difference between the datetime_obj and the datetimes
in the weather dataframe... WHILE the differences are decreasing (i.e.: approaching a local minima).
So as soon as the differences start to INCREASE (just passing minima), return the previous index as
the closest match. This works because the datetimes in the weather dataframe will always be in increasing
descending order. If the datetime_obj is earlier than any weather date, the first index will be returned,
and if the datetime_obj is later than any weather date, the last index will be returned.
'''
# initialize with largest possible difference, to ensure differences at least start by decreasing
prev_diff = datetime.max - datetime.min
for row_idx, data_series in WU_df.iterrows():
new_diff = abs(datetime_obj - data_series['Weather Datetime'])
if prev_diff < new_diff: # if local minima has just been passed
return row_idx-1 # return index location of minima
prev_diff = new_diff # else, continue
return row_idx # if datetime_obj > all weather datetimes, return final index location
# second helper method in executing merge of MTA_ and WU_dataframes, using above method within it
# returns closest weather datetime INDEXES corresponding to entire subway datetime series
# efficient because avoids restarting at start of WU_dataframe datetimes for each MTA datetime comparison search
def _closest_wu_datetimes(self, WU_df, MTA_df):
'''
The strategy here is to again use the chronology of datetimes in both dataframes advantageously. Basically,
as we iterate through each datetime in the MTA_dataframe, we record what the WU_dataframe index location of
the previous closest match was. This way, we can start there next time, rather than at the beginning of the
WU_dataframe for every iteration. This speeds up the process drastically.
'''
# defines a Series with an index identical to the MTA_dataframe...
# but to be filled with the index locations of the closest WU_dataframe datetimes!
# this is designed in such a way to make merging the WU_dataframe into the MTA_dataframe as simple as possible
closest_indexes = pd.Series(0, index=MTA_df.index)
start_of_wu_df = WU_df.index[0]
prev_wu_idx = start_of_wu_df # initialize 'where we last left off' index to start of WU_dataframe
# prev_mta_dt necessary to know for when mta datetimes reach end of turnstile unit, and cycle over from first date
''' CHANGE: initialize prev_mta_dt to first mta_dt - 4 hours instead of below: '''
# prev_mta_dt = datetime.min # initialize to datetime smallest value to start
prev_mta_dt = MTA_df.iloc[0]['Subway Datetime'] - timedelta(hours=4)
for mta_idx, data_series in MTA_df.iterrows():
curr_mta_dt = data_series["Subway Datetime"]
# if subway datetimes cycle to end of loop (i.e.: reached end of turnstile unit, going to next)
if(prev_mta_dt > curr_mta_dt):
# start over at beginning of WU_dataframe again
prev_wu_idx = start_of_wu_df
''' NEW ADDITION: reset prev_mta_dt here '''
prev_mta_dt = curr_mta_dt - timedelta(hours=4)
# note the .loc[prev_wu_idx:]
# this has the effect of starting at where last left off in the WU_dataframe, to save time
''' CHANGE: use prev_mta_dt instead of curr_mta_dt '''
closest_wu_idx = self._closest_wu_datetime(WU_df.loc[prev_wu_idx:], prev_mta_dt)
closest_indexes[mta_idx] = closest_wu_idx
prev_wu_idx = closest_wu_idx # enable continuation of where last left off
prev_mta_dt = curr_mta_dt # again, to check if reached end of turnstile unit (when prev_mta_dt becomes greater than curr_mta_dt)
return closest_indexes
# third helper method, that simply returns an updated weather df to be concatenated to existing MTA_dataframe
def _updated_weather_df(self, WU_df, MTA_df):
corresponding_wu_idxs = self._closest_wu_datetimes(WU_df, MTA_df)
updated_weather_df = pd.DataFrame(index=corresponding_wu_idxs.index, columns=WU_df.columns)
for new_idx, wu_idx in corresponding_wu_idxs.iteritems():
updated_weather_df.iloc[new_idx] = WU_df.iloc[wu_idx]
return updated_weather_df
# finally, use all these helper methods to create a final merged dataframe
def _merge_dataframes(self, MTA_dataframe, WU_dataframe):
MTA_df = MTA_dataframe.df
WU_df = WU_dataframe.df
upd_wu_df = self._updated_weather_df(WU_df, MTA_df)
self.df = pd.concat([MTA_dataframe.df, upd_wu_df], axis=1)
return self
| mar467/Turnstile-Weather | tw_dataframes.py | tw_dataframes.py | py | 15,216 | python | en | code | 0 | github-code | 13 |
12496341233 | from pathlib import Path
import numpy as np
# hatch.py
"""Get physics data for EGSnrc run
Hatch is called before simulation begins.
For Photons, hatch calls egs_init_user_photon, which in turn
opens files via egsi_get_data, for compton, photoelectric, pair, triplet,
Rayleigh (depending on the settings) and does corrections on some of the data.
"""
from pathlib import Path
import numpy as np
import logging
logger = logging.getLogger("egsnrc")
DATA_DIR = Path(__file__).resolve().parent / "data"
def get_xsection_table(filename):
with open(filename, "r") as f:
lines = f.readlines()
i_line = 0
data = {}
for z in range(1, 101):
count = int(lines[i_line].split("#")[0])
i_line += 1
# 2 values per item, 8 values stored per line, so 4 data point pairs
# Calc number of lines needed
data_lines = count // 4 + (1 if count % 4 else 0)
z_data = np.loadtxt(
x for i in range(data_lines) for x in lines[i_line + i].strip().split()
)
# Reformat so have (array of energies, array of sigmas)
z_data = z_data.reshape((-1, 2)).transpose()
data[z] = z_data
# print(f"Count {count}, len(data): {len(z_data)}")
i_line += data_lines
return data
| darcymason/egsnrc | src/egsnrc/hatch.py | hatch.py | py | 1,275 | python | en | code | 5 | github-code | 13 |
73989937297 | ''' Using https://www.alphavantage.co to retrieve stock prices. Requires a unique key, freely availalble.
Sample request: https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=INX&apikey=0KEDXOP6GN0KTIY5
Return result:
{
"Meta Data": {
"1. Information": "Daily Prices (open, high, low, close) and Volumes",
"2. Symbol": "INX",
"3. Last Refreshed": "2019-02-01",
"4. Output Size": "Compact",
"5. Time Zone": "US/Eastern"
},
"Time Series (Daily)": {
"2019-02-01": {
"1. open": "2702.3201",
"2. high": "2716.6599",
"3. low": "2696.8799",
"4. close": "2706.5300",
"5. volume": "3759270000"
},
"2019-01-31": {
"1. open": "2685.4900",
"2. high": "2708.9500",
"3. low": "2678.6499",
"4. close": "2704.1001",
"5. volume": "4917650000"
},
"2019-01-30": {
"1. open": "2653.6201",
"2. high": "2690.4399",
"3. low": "2648.3401",
"4. close": "2681.0500",
"5. volume": "3857810000"
},
....
}
'''
import sys
import certifi
import urllib3
import requests
# free api key from https://alphavantage.co
api_key='0KEDXOP6GN0KTIY5'
site_url = 'https://www.alphavantage.co/'
db_url = 'https://market.hamzazafar.co'
#db_url = 'http://172.17.0.3'
#db_url = 'http://market_api'
# Example to pull what's available in db
collections_url = db_url + "/symbols"
print(collections_url)
response = requests.get(collections_url)
print("response: " + str(response))
print(response.text)
# one symbol per line
# Current format is "stock_symbol:collection name"
for sym in response.json():
print(sym)
request_url = site_url + 'query?function=TIME_SERIES_DAILY&symbol=' + \
sym + '&apikey=' + api_key
print('request_url = ' + request_url)
# ignore certificate validation in request (risky for production code)
urllib3.disable_warnings()
try:
response = requests.get(request_url, verify = False)
except Exception as e:
# request failed. need to log appropriately
print('Exception requesting stock info: ' + str(e))
sys.exit(1)
if response.status_code == 200:
# extract data
try:
result = response.json()
daily_data = result['Time Series (Daily)']
except Exception as e:
# probably a bad symbol lookup. again, logging here
print(str(e))
# this requires python3.7, which introduced ordered dictionaries.
# otherwise we have to construct date key (YYYY-MM-DD), which
# is a pain - have to track weekends/holidays
try:
most_recent_date = list(daily_data.keys())[0]
first_val = list(daily_data.values())[0]
most_recent_open = first_val['1. open']
most_recent_high = first_val['2. high']
most_recent_low = first_val['3. low']
most_recent_close = first_val['4. close']
most_recent_volume = first_val['5. volume']
print('Date: ' + most_recent_date + ' Stock: ' + stock_symbol + ' Open: ' + str(
most_recent_open) + ' High: ' + str(most_recent_high) + ' Low: ' + str(most_recent_close) +
' Close: ' + str(most_recent_close) + ' Volume: ' + str(most_recent_volume))
except:
print('Exception accessing data')
try:
insert_url = db_url + "/insert/" + sym + "?date=" + most_recent_date + "&close=" + most_recent_close + \
"&open=" + most_recent_open + "&low=" + most_recent_low + "&high=" + most_recent_high
print(insert_url)
response = requests.post(insert_url)
if response.status_code == 409:
print('db insert error: conflict')
elif response.status_code == 405:
print('db insert error: method not allowed')
print('response.txt = ' + response.text)
elif response.status_code != 201:
print('db insert error =' + str(response.status_code))
print('response.text = ' + response.text)
else:
print('database insert success')
except Exception as e:
print('Exception posting to DB ' + str(e))
else:
print('request error: ' + str(response.status_code))
| yh412467790/market-watch1 | symbol_check.py | symbol_check.py | py | 4,485 | python | en | code | 0 | github-code | 13 |
25578833622 | """
Parse EFSMT instances in SMT-LIB2 files
We provide two differnet implementations
1. Use a customized s-expression parser
2. Use z3's substitution facility
"""
from typing import Tuple
import z3
# Being explicit about Types
Symbol = str
Number = (int, float)
Atom = (Symbol, Number)
List = list
Expr = (Atom, List)
def input_to_list(string: str) -> [str]:
"""
Parse a .sl file into a list of S-Expressions.
"""
n: int = 0
result: [str] = []
s: str = ""
for c in string:
if c == "(":
n += 1
if c == ")":
n -= 1
if c != "\n":
s += c
if n == 0 and s != "":
result.append(s)
s = ""
return result
def tokenize(chars: str) -> list:
"""Convert a string of characters into a list of tokens."""
return chars.replace('(', ' ( ').replace(')', ' ) ').replace('" "', 'space').split()
def parse(program: str) -> Expr:
"""Read an S-expression from a string."""
return read_from_tokens(tokenize(program))
def read_from_tokens(tokens: List) -> Expr:
"""Read an expression from a sequence of tokens."""
if len(tokens) == 0:
return
# raise SyntaxError('unexpected EOF') # is this OK?
token = tokens.pop(0)
if token == '(':
L = []
while tokens[0] != ')':
L.append(read_from_tokens(tokens))
tokens.pop(0) # pop off ')'
return L
elif token == ')':
raise SyntaxError('unexpected )')
else:
return atom(token)
def atom(token: str) -> Atom:
"""Numbers become numbers; every other token is a symbol."""
try:
return int(token)
except ValueError:
try:
return float(token)
except ValueError:
return Symbol(token)
class EFSMTParser:
"""
Motivation: the following implementation can be very slow
def ground_quantifier(qexpr):
body = qexpr.body()
var_list = list()
for i in range(qexpr.num_vars()):
vi_name = qexpr.var_name(i)
vi_sort = qexpr.var_sort(i)
vi = z3.Const(vi_name, vi_sort)
var_list.append(vi)
# the following line can be slow
body = z3.substitute_vars(body, *var_list)
return var_list, body
"""
def __init__(self):
self.logic = None
self.exist_vars = [] # e.g., [['y', 'Int'], ['z', 'Int']]
self.forall_vars = []
self.fml_body = ""
def parse_smt2_string(self, inputs: str):
self.init_symbols(inputs)
print("Finish internal parsing")
return self.get_ef_system()
def parse_smt2_file(self, filename: str):
with open(filename, "r") as f:
res = f.read()
return self.parse_smt2_string(res)
def to_sexpr_misc(self, lines: [str]):
"""
E.g.,
['and', ['=', 'x', 1], ['=', 'y', 1]]
['and', ['=', 'x!', ['+', 'x', 'y']], ['=', 'y!', ['+', 'x', 'y']]]
"""
res = ["("]
for element in lines:
if isinstance(element, list):
for e in self.to_sexpr_misc(element):
res.append(e)
else:
res.append(str(element))
res.append(")")
return res
def to_sexpr_string(self, lines: [str]):
return " ".join(self.to_sexpr_misc(lines))
def init_symbols(self, inputs: str) -> None:
lines = input_to_list(inputs)
for line in lines:
# TODO: perhaps we should not parse the assertion (because it is
# converted back to sexpr string after we extract the forall vars
# print(line)
slist = parse(line)
if isinstance(slist, List):
cmd_name = slist[0]
if cmd_name == "set-logic":
self.logic = slist[1]
elif cmd_name == "set-info":
continue
elif cmd_name == "declare-fun":
var_name = slist[1]
var_type = slist[3]
self.exist_vars.append([var_name, var_type])
elif cmd_name == "assert":
self.process_assert(slist)
else:
break
def process_assert(self, slist) -> None:
"""
slist is of the form ['assert', ['forall', [['y', 'Int'], ['z', 'Int']], [...]]]
"""
assertion = slist[1]
# assertions[0] is "forall"
for var_info in assertion[1]:
self.forall_vars.append(var_info)
fml_body_in_list = assertion[2]
self.fml_body = self.to_sexpr_string(fml_body_in_list)
def create_vars(self, var_info_list: List):
z3var_list = []
sig_str = []
for var_info in var_info_list:
# [['y', 'Int'], ['z', 'Int']]
var_name, var_type = var_info[0], var_info[1]
# print(var_name, var_type)
if isinstance(var_type, List):
# ['x', ['_', 'BitVec', 8]]
sig_str.append("(declare-fun {0} () {1})".format(var_name, self.to_sexpr_string(var_type)))
z3var_list.append(z3.BitVec(var_name, int(var_type[2])))
else:
sig_str.append("(declare-fun {0} () {1})".format(var_name, var_type))
if var_type.startswith("I"): # Int
z3var_list.append(z3.Int(var_name))
elif var_type.startswith("R"): # Real
z3var_list.append(z3.Real(var_name))
else:
print("Error: Unsupported variable type, ", var_type)
return z3var_list, sig_str
def get_ef_system(self):
"""
Return the format of our trivial transition system
"""
exists_vars, exists_vars_sig = self.create_vars(self.exist_vars)
forall_vars, forall_vars_sig = self.create_vars(self.forall_vars)
fml_sig_str = exists_vars_sig + forall_vars_sig
fml_str = "\n".join(fml_sig_str) + "\n (assert {} )\n".format(self.fml_body) + "(check-sat)\n"
print("Finish building fml str")
# print(fml_str)
# We assume that there is only one assertion?
# But for clarity, we check the size of the parsed vector
fml_vec = z3.parse_smt2_string(fml_str)
print("Finish building ef problem")
if len(fml_vec) == 1:
return exists_vars, forall_vars, fml_vec[0]
else:
return exists_vars, forall_vars, z3.And(fml_vec)
def ground_quantifier(qexpr):
"""
Seems this can only handle exists x . fml, or forall x.fml?
FIXME: it seems that this can be very slow?
"""
from z3.z3util import get_vars
body = qexpr.body()
forall_vars = list()
for i in range(qexpr.num_vars()):
vi_name = qexpr.var_name(i)
vi_sort = qexpr.var_sort(i)
vi = z3.Const(vi_name, vi_sort)
forall_vars.append(vi)
# Substitute the free variables in body with the expression in var_list.
body = z3.substitute_vars(body, *forall_vars)
exists_vars = [x for x in get_vars(body) if x not in forall_vars]
return exists_vars, forall_vars, body
class EFSMTZ3Parser:
"""
"""
def __init__(self):
self.logic = None
def parse_smt2_string(self, inputs: str):
fml_vec = z3.parse_smt2_string(inputs)
if len(fml_vec) == 1:
fml = fml_vec[0]
else:
fml = fml_vec
print("Z3 finishes parsing")
return ground_quantifier(fml)
def parse_smt2_file(self, filename: str):
fml_vec = z3.parse_smt2_file(filename)
if len(fml_vec) == 1:
fml = fml_vec[0]
else:
fml = fml_vec
print("Z3 finishes parsing")
return ground_quantifier(fml)
def test_parser():
lia = """
(set-info :status unknown)
(declare-fun x () Int)
(assert
(forall ((y Int) (z Int) )(let (($x20 (= (+ x y) 3)))
(or (> x y) $x20 (< (+ y z) 3))))
)
(check-sat)
"""
bv = """
; benchmark generated from python API
(set-info :status unknown)
(declare-fun x () (_ BitVec 8))
(assert
(forall ((y (_ BitVec 8)) (z (_ BitVec 8)) )(or (= x y) (= y z)))
)
(check-sat)
"""
ss = EFSMTParser()
print(ss.parse_smt2_string(bv))
ss2 = EFSMTZ3Parser()
print(ss2.parse_smt2_string(bv))
if __name__ == '__main__':
test_parser()
exit(0)
file = "xx.smt2"
ss = EFSMTParser()
_, forall_vars, fml = ss.parse_smt2_file(file)
# sol = z3.Solver()
# sol.add(z3.ForAll(forall_vars, fml))
# print(sol.check())
| ZJU-Automated-Reasoning-Group/arlib | arlib/quant/efsmt_parser.py | efsmt_parser.py | py | 8,603 | python | en | code | 6 | github-code | 13 |
21934916388 | import operator
from enum import Enum
from functools import reduce
from typing import Optional, List
from django.db.models import Q
from django.db.models.functions import Lower
from django.shortcuts import get_object_or_404
from django.urls import reverse
from ninja import ModelSchema, NinjaAPI, Field
from ninja.pagination import RouterPaginated
from pydantic import AnyHttpUrl
from holofood.models import (
Sample,
SampleStructuredDatum,
SampleMetadataMarker,
AnalysisSummary,
GenomeCatalogue,
Genome,
ViralCatalogue,
ViralFragment,
Animal,
AnimalStructuredDatum,
)
from holofood.utils import holofood_config
api = NinjaAPI(
title="HoloFood Data Portal API",
description="The API to browse [HoloFood](https://www.holofood.eu) samples and metadata, "
"and navigate to datasets stored in public archives. \n\n #### Useful links: \n"
"- [Documentation](https://ebi-metagenomics.github.io/holofood-database/)\n"
"- [HoloFood Data Portal home](/)\n"
"- [HoloFood Project Website](https://www.holofood.eu)\n"
"- [Helpdesk](https://www.ebi.ac.uk/contact)\n"
"- [TSV Export endpoints](/export/docs)",
urls_namespace="api",
default_router=RouterPaginated(),
csrf=True,
)
SAMPLES = "Samples"
ANALYSES = "Analysis Summaries"
GENOMES = "Genomes"
VIRUSES = "Viruses"
class System(Enum):
salmon: str = Animal.SALMON
chicken: str = Animal.CHICKEN
class SampleType(Enum):
metagenomic_assembly: str = Sample.METAGENOMIC_ASSEMBLY
metagenomic_amplicon: str = Sample.METAGENOMIC_AMPLICON
metabolomic: str = Sample.METABOLOMIC
metabolomic_targeted: str = Sample.METABOLOMIC_TARGETED
histological: str = Sample.HISTOLOGICAL
host_genomic: str = Sample.HOST_GENOMIC
iodine: str = Sample.IODINE
heavy_metals: str = Sample.HEAVY_METALS
fatty_acids: str = Sample.FATTY_ACIDS
transcriptomic: str = Sample.TRANSCRIPTOMIC
meta_transcriptomic: str = Sample.META_TRANSCRIPTOMIC
inflammatory_markers: str = Sample.INFLAMMATORY_MARKERS
class SampleMetadataMarkerSchema(ModelSchema):
canonical_url: str = Field(None, alias="iri")
class Config:
model = SampleMetadataMarker
model_fields = ["name", "type"]
class SampleStructuredDatumSchema(ModelSchema):
marker: SampleMetadataMarkerSchema
class Config:
model = SampleStructuredDatum
model_fields = ["marker", "measurement", "units"]
class AnimalStructuredDatumSchema(SampleStructuredDatumSchema):
class Config:
model = AnimalStructuredDatum
class RelatedAnalysisSummarySchema(ModelSchema):
@staticmethod
def resolve_canonical_url(obj: AnalysisSummary):
return reverse("analysis_summary_detail", kwargs={"slug": obj.slug})
canonical_url: str
class Config:
model = AnalysisSummary
model_fields = ["title"]
class AnimalSlimSchema(ModelSchema):
@staticmethod
def resolve_canonical_url(obj: Animal):
return f"{holofood_config.biosamples.api_root}/{obj.accession}"
canonical_url: str
@staticmethod
def resolve_sample_types(obj: Animal):
if obj.sample_types is None:
return []
return obj.sample_types.split(",")
sample_types: List[str]
class Config:
model = Animal
model_fields = ["accession", "system"]
class SampleSlimSchema(ModelSchema):
@staticmethod
def resolve_canonical_url(obj: Sample):
if obj.sample_type in [
Sample.METAGENOMIC_AMPLICON,
Sample.METAGENOMIC_ASSEMBLY,
Sample.HOST_GENOMIC,
]:
# Sample is nucleotide sequence based
return f"{holofood_config.ena.browser_url}/{obj.accession}"
else:
return f"{holofood_config.biosamples.api_root}/{obj.accession}"
canonical_url: str
@staticmethod
def resolve_metagenomics_url(obj: Sample):
return (
f"{holofood_config.mgnify.api_root}/samples/{obj.accession}"
if obj.sample_type in [obj.METAGENOMIC_AMPLICON, obj.METAGENOMIC_ASSEMBLY]
else None
)
metagenomics_url: Optional[str]
@staticmethod
def resolve_metabolomics_url(obj: Sample):
return (
f"{holofood_config.metabolights.api_root}/studies/{obj.metabolights_study}"
if obj.sample_type == obj.METABOLOMIC and obj.metabolights_study is not None
else None
)
metabolomics_url: Optional[str]
class Config:
model = Sample
model_fields = ["accession", "title", "sample_type", "animal"]
class SampleSchema(SampleSlimSchema):
structured_metadata: List[SampleStructuredDatumSchema]
analysis_summaries: List[RelatedAnalysisSummarySchema]
class AnimalSchema(AnimalSlimSchema):
samples: List[SampleSlimSchema]
structured_metadata: List[AnimalStructuredDatumSchema]
class GenomeCatalogueSchema(ModelSchema):
analysis_summaries: List[RelatedAnalysisSummarySchema]
class Config:
model = GenomeCatalogue
model_fields = ["id", "title", "biome", "related_mag_catalogue_id", "system"]
class GenomeSchema(ModelSchema):
@staticmethod
def resolve_representative_url(obj: Genome):
return f"{holofood_config.mgnify.api_root}/genomes/{obj.cluster_representative}"
representative_url: Optional[str]
class Config:
model = Genome
model_fields = ["accession", "cluster_representative", "taxonomy", "metadata"]
class ViralCatalogueSchema(ModelSchema):
related_genome_catalogue: GenomeCatalogueSchema
analysis_summaries: List[RelatedAnalysisSummarySchema]
@staticmethod
def resolve_related_genome_catalogue_url(obj: ViralCatalogue):
return reverse(
"api:get_genome_catalogue",
kwargs={"catalogue_id": obj.related_genome_catalogue_id},
)
related_genome_catalogue_url: str
class Config:
model = ViralCatalogue
model_fields = ["id", "title", "biome", "system"]
class ViralFragmentSchema(ModelSchema):
cluster_representative: Optional["ViralFragmentSchema"]
host_mag: Optional[GenomeSchema]
@staticmethod
def resolve_contig_url(obj: ViralFragment):
return f"{holofood_config.mgnify.api_root}/analyses/{obj.mgnify_analysis_accession}/contigs/{obj.contig_id}"
contig_url: AnyHttpUrl
@staticmethod
def resolve_mgnify_analysis_url(obj: ViralFragment):
return f"{holofood_config.mgnify.api_root}/analyses/{obj.mgnify_analysis_accession}"
mgnify_analysis_url: AnyHttpUrl
@staticmethod
def resolve_gff_url(obj: ViralFragment):
return reverse("viral_fragment_gff", kwargs={"pk": obj.id})
gff_url: str
class Config:
model = ViralFragment
model_fields = [
"id",
"contig_id",
"mgnify_analysis_accession",
"start_within_contig",
"end_within_contig",
"metadata",
"host_mag",
"viral_type",
]
class AnalysisSummarySchema(RelatedAnalysisSummarySchema):
samples: List[SampleSlimSchema]
genome_catalogues: List[GenomeCatalogueSchema]
viral_catalogues: List[ViralCatalogueSchema]
class Config:
model = AnalysisSummary
model_fields = ["title"]
@api.get(
"/samples/{sample_accession}",
response=SampleSchema,
summary="Fetch a single Sample from the HoloFood database.",
description="Retrieve a single Sample by its ENA accession, including all structured metadata available. ",
url_name="sample_detail",
tags=[SAMPLES],
)
def get_sample(request, sample_accession: str):
sample = get_object_or_404(Sample, accession=sample_accession)
return sample
@api.get(
"/samples",
response=List[SampleSlimSchema],
summary="Fetch a list of Samples.",
description="Long lists will be paginated, so use the `page=` query parameter to get more pages. "
"Several filters are available, which mostly perform case-insensitive containment lookups. "
"Sample metadata are *not* returned for each item. "
"Use the `/samples/{sample_accession}` endpoint to retrieve those. "
"Sample metadata *can* be filtered for with `require_metadata_marker=`: this finds samples where "
"the named metadata marker is present and none of `['0', 'false', 'unknown', 'n/a', 'null]`. "
"Use `/sample_metadata_markers` to find the exact marker name of interest.",
tags=[SAMPLES],
)
def list_samples(
request,
system: System = None,
accession: str = None,
title: str = None,
sample_type: SampleType = None,
animal_accession: str = None,
require_metadata_marker: str = None,
):
q_objects = []
if system:
q_objects.append(Q(animal__system__icontains=system.value))
if accession:
q_objects.append(Q(accession__icontains=accession))
if animal_accession:
q_objects.append(Q(animal__accession__icontains=animal_accession))
if title:
q_objects.append(Q(title__icontains=title))
if sample_type:
q_objects.append(Q(sample_type__iexact=sample_type.value))
if require_metadata_marker:
sample_ids_with_metadata = (
SampleStructuredDatum.objects.filter(
marker__name__iexact=require_metadata_marker
)
.annotate(measurement_lower=Lower("measurement"))
.exclude(measurement_lower__in=("0", "false", "unknown", "n/a", "null"))
.values_list("sample_id", flat=True)
)
q_objects.append(Q(accession__in=sample_ids_with_metadata))
if not q_objects:
return Sample.objects.all()
return Sample.objects.filter(reduce(operator.and_, q_objects))
@api.get(
"/animals/{animal_accession}",
response=AnimalSchema,
summary="Fetch a single Animal (a host-level BioSample) from the HoloFood database.",
description="Retrieve a single Animal by its BioSamples accession, including all structured metadata available. ",
url_name="animal_detail",
tags=[SAMPLES],
)
def get_animal(request, animal_accession: str):
animal = get_object_or_404(Animal, accession=animal_accession)
return animal
@api.get(
"/animals",
response=List[AnimalSlimSchema],
summary="Fetch a list of Animals (host-level BioSamples).",
description="Long lists will be paginated, so use the `page=` query parameter to get more pages. "
"Several filters are available, which mostly perform case-insensitive containment lookups. "
"Animal metadata are *not* returned for each item. "
"Use the `/animals/{animal_accession}` endpoint to retrieve those. "
"Animal metadata *can* be filtered for with `require_metadata_marker=`: this finds animals where "
"the named metadata marker is present and none of `['0', 'false', 'unknown', 'n/a', 'null]`. "
"The `require_sample_type=` filter finds only animals where "
"at least one derived sample of the specified type exists. "
"Use `/sample_metadata_markers` to find the exact marker name of interest.",
tags=[SAMPLES],
)
def list_animals(
request,
system: System = None,
accession: str = None,
require_metadata_marker: str = None,
require_sample_type: SampleType = None,
):
q_objects = []
if system:
q_objects.append(Q(system__icontains=system.value))
if accession:
q_objects.append(Q(accession__icontains=accession))
if require_metadata_marker:
animal_ids_with_metadata = (
AnimalStructuredDatum.objects.filter(
marker__name__iexact=require_metadata_marker
)
.annotate(measurement_lower=Lower("measurement"))
.exclude(measurement_lower__in=("0", "false", "unknown", "n/a", "null"))
.values_list("animal_id", flat=True)
)
q_objects.append(Q(accession__in=animal_ids_with_metadata))
if require_sample_type:
q_objects.append(Q(sample_types__icontains=require_sample_type.value))
if not q_objects:
return Animal.objects.all()
return Animal.objects.filter(reduce(operator.and_, q_objects))
@api.get(
"/sample_metadata_markers",
response=List[SampleMetadataMarkerSchema],
summary="Fetch a list of structured metadata markers (i.e. keys).",
description="Each marker is present in the metadata of at least one sample. "
"Not every sample will have every metadata marker. "
"Long lists will be paginated, so use the `page=` query parameter to get more pages. "
"Use `name=` to search for a marker by name (case insensitive partial matches). ",
tags=[SAMPLES],
)
def list_sample_metadata_markers(
request,
name: str = None,
):
if name:
return SampleMetadataMarker.objects.filter(name__icontains=name)
return SampleMetadataMarker.objects.all()
@api.get(
"/analysis-summaries",
response=List[AnalysisSummarySchema],
summary="Fetch a list of Analysis Summary documents.",
description="Analysis Summary documents are produced by HoloFood partners and collaborators. "
"Each summary is tagged as involving 1 or more Samples or Catalogues. "
"Typically these are aggregative or comparative analyses of the Samples. "
"These are text and graphic documents. "
"They are not intended for programmatic consumption, so a website URL is returned for each. ",
tags=[ANALYSES],
)
def list_analysis_summaries(
request,
):
return AnalysisSummary.objects.filter(is_published=True)
@api.get(
"/genome-catalogues",
response=List[GenomeCatalogueSchema],
summary="Fetch a list of Genome (MAG) Catalogues",
description="Genome Catalogues are lists of Metagenomic Assembled Genomes (MAGs)"
"MAGs originating from HoloFood samples are organised into biome-specific catalogues.",
tags=[GENOMES],
)
def list_genome_catalogues(request):
return GenomeCatalogue.objects.all()
@api.get(
"/genome-catalogues/{catalogue_id}",
response=GenomeCatalogueSchema,
summary="Fetch a single Genome Catalogue",
description="A Genome Catalogue is a list of Metagenomic Assembled Genomes (MAGs)."
"MAGs originating from HoloFood samples are organised into biome-specific catalogues."
"To list the genomes for a catalogue, use `/genome-catalogues/{catalogue_id}/genomes`.",
url_name="get_genome_catalogue",
tags=[GENOMES],
)
def get_genome_catalogue(request, catalogue_id: str):
catalogue = get_object_or_404(GenomeCatalogue, id=catalogue_id)
return catalogue
@api.get(
"/genome-catalogues/{catalogue_id}/genomes",
response=List[GenomeSchema],
summary="Fetch the list of Genomes within a Catalogue",
description="Genome Catalogues are lists of Metagenomic Assembled Genomes (MAGs)."
"MAGs listed originate from HoloFood samples."
"Each MAG has also been clustered with MAGs from other projects."
"Each HoloFood MAG references the best representative of these clusters, in MGnify.",
tags=[GENOMES],
)
def list_genome_catalogue_genomes(request, catalogue_id: str):
catalogue = get_object_or_404(GenomeCatalogue, id=catalogue_id)
return catalogue.genomes.all()
@api.get(
"/viral-catalogues",
response=List[ViralCatalogueSchema],
summary="Fetch a list of Viral (contig fragment) Catalogues",
description="Viral Catalogues are lists of Viral Sequences,"
"detected in the assembly contigs of HoloFood samples from a specific biome.",
tags=[VIRUSES],
)
def list_viral_catalogues(request):
return ViralCatalogue.objects.all()
@api.get(
"/viral-catalogues/{catalogue_id}",
response=ViralCatalogueSchema,
summary="Fetch a single Viral Catalogue",
description="A Viral Catalogue is a list of Viral Sequences,"
"detected in the assembly contigs of HoloFood samples from a specific biome."
"To list the viral sequences (“fragments”) for a catalogue, use `/viral-catalogues/{catalogue_id}/fragments`.",
tags=[VIRUSES],
)
def get_viral_catalogue(request, catalogue_id: str):
catalogue = get_object_or_404(ViralCatalogue, id=catalogue_id)
return catalogue
@api.get(
"/viral-catalogues/{catalogue_id}/fragments",
response=List[ViralFragmentSchema],
summary="Fetch the list of viral fragments (sequences) from a Catalogue",
description="Viral fragments are sequences predicted to be viral, "
"found in the assembly contigs of HoloFood samples."
"The Catalogue’s viral fragments are all from the same biome."
"Viral sequences are clustered by sequence identity, at a species-level."
"Both cluster representatives and cluster members are included."
"Where a viral sequence is found in a related MAG (metagenome assembly genome,"
" e.g. a bacterial species), this MAG is considered a “host MAG”.",
tags=[VIRUSES],
)
def list_viral_catalogue_fragments(request, catalogue_id: str):
catalogue = get_object_or_404(ViralCatalogue, id=catalogue_id)
return catalogue.viral_fragments.all()
| EBI-Metagenomics/holofood-database | holofood/api.py | api.py | py | 17,034 | python | en | code | 0 | github-code | 13 |
42932983950 | """2.Создать новый двумерный массив, исключив из переданного массива совпадающие столбцы.
(Совпадающие столбцы – столбцы, у которых все соответствующие элементы равны друз другу).
При формировании нового массива оставить только первый из каждого набора совпадающих столбцов."""
matrix = [
[0, 3, 4, 5, 4, 5, 4],
[4, 3, 4, 5, 4, 5, 35],
[5, 3, 4, 5, 4, 5, 34],
[1, 3, 4, 5, 4, 5, 4]
]
def answer_2(array) -> list:
transposed = list(zip(*array)) # используем метод транспонирования матрицы
temp = list()
for elem in transposed: # итерируемся по полученному массиву
if elem not in temp: # если элемента нет во временном массиве то добавляем его
temp.append(elem) # для того чтобы исключить повторяющиеся элементы
return list(zip(*temp)) # транспонируем обратно
print(answer_2(matrix))
| syth0le/practice-coding-of-a-VSU-student | Python/CS_faculty/first/second.py | second.py | py | 1,254 | python | ru | code | 0 | github-code | 13 |
15770372514 | #!/anaconda3/bin/python
print("Content-Type: text/html")
print()
import os, html_sanitizer
def getList():
sanitizer = html_sanitizer.Sanitizer()
files = os.listdir('data')
# 맥OS 특성 상 맨 앞 히든파일 하나 pop으로 제거 (.dataStore 어쩌구 안 생기면 필요없을 수도 있음)
# files.pop(0)
listStr = ''
for item in files:
item = sanitizer.sanitize(item)
listStr = listStr + '<li><a href="index.py?id={name}">{name}</a></li>'.format(name=item)
return listStr
| kyoblee/web1 | view.py | view.py | py | 528 | python | ko | code | 0 | github-code | 13 |
8979905978 | from tkinter import *
root =Tk()
root.title('Телефонная книженция')
root.geometry('1280x720')
numbers=[]
def new_window():
win =Toplevel(root)
win.grab_set()
win.focus_set()
win.wait_window()
win.title('Создание контакта')
win.minsize(width=600, height=400)
add_button =Button(root, text='Добавить контакт',background='brown')
del_button =Button(root, text='Удалить контакт')
edit_button =Button(root, text='Изменить контакт')
name=Label(text='Имя')
surname=Label(text='Фамилия')
number=Label(text='Мобильный номер')
keyword=Label(text='Ключевое слово')
add_button.grid(row=0, column=0, padx=100, pady=0)
del_button.grid(row=4, column=4, padx=75, pady=30)
edit_button.grid(row=4, column=5)
name.grid(row=1, column=4)
surname.grid(row=1, column=5)
number.grid(row=2, column=4, pady=20)
keyword.grid(row=3,column=4)
add_button.bind('<Button-1>',new_window)
root.mainloop() | frolivanov/first-lesson | interfaces/kniga.py | kniga.py | py | 1,022 | python | ru | code | 0 | github-code | 13 |
73071866897 | import cv2
source = "sunny.jpeg"
destination = "newImage.png"
# percent by which to resize
scale_percent = 400
# read the image
src = cv2.imread(source, cv2.IMREAD_UNCHANGED)
# calculate the new dimensions
width = int(src.shape[1] * scale_percent / 100)
height = int(src.shape[0] * scale_percent / 100)
# dsize
dsize = (width, height)
# resize the image
output = cv2.resize(src, dsize)
# write the output image to file
cv2.imwrite(destination, output)
| SunnyMaurya63/Python_projects | ImageResizer/main.py | main.py | py | 480 | python | en | code | 0 | github-code | 13 |
13163197801 | import cv2
import numpy as np
import random
def show_labeled_pic(file_path, target_size=600):
''' visulize the made pictures with bbox
target size: the target size for showing
file_path: the path contains the train/val/test filename
'''
#get a random picture from the filelist
with open(file_path) as f:
data = f.readlines()
index = random.randint(0, len(data))
pic_path = data[index].rstrip()
print(pic_path)
img = cv2.imread(pic_path)
#convert bgr to rgb
#img = img[..., ::-1]
img = np.array(img)
#add padding to the original picture --> h:w = 1:1
h, w, _ = img.shape
dimension_difference = np.abs(h - w)
pad1, pad2 = dimension_difference//2, dimension_difference - dimension_difference//2
pad = ((pad1, pad2), (0, 0), (0, 0)) if h<=w else ((0, 0), (pad1, pad2), (0, 0))
padded_img = np.pad(img, pad, 'constant', constant_values=127.5) / 255
#save the padded dimensions
padded_h, padded_w, _ = padded_img.shape
#resize the picture to the target size
resized_img = cv2.resize(padded_img, (target_size, target_size), interpolation=cv2.INTER_AREA)
labels = np.loadtxt(pic_path.replace('jpg', 'txt').replace('png', 'txt')).reshape(-1, 8)
#calculate the dimensions of the original picture
x1 = (labels[:, 0] - labels[:, 2]/2) * w
y1 = (labels[:, 1] - labels[:, 3]/2) * h
x2 = (labels[:, 0] + labels[:, 2]/2) * w
y2 = (labels[:, 1] + labels[:, 3]/2) * h
px = (labels[:, 4]) * w
py = (labels[:, 5]) * h
p3y = (labels[:, 6]) * h
#add the padding to the original dimensions
x1 += pad[1][0]
y1 += pad[0][0]
x2 += pad[1][0]
y2 += pad[0][0]
px += pad[1][0]
py += pad[0][0]
p3y += pad[0][0]
#recalculate the dimensions on the new resized picture
labels[:, 0] = ((x1+x2)/2) / padded_w
labels[:, 1] = ((y1+y2)/2) / padded_h
labels[:, 2] *= w / padded_w
labels[:, 3] *= h / padded_h
labels[:, 4] = px / padded_w
labels[:, 5] = py / padded_h
labels[:, 6] = p3y / padded_h
#show the resized labels on the resized picture
x1 = (labels[:, 0] - labels[:, 2]/2) * target_size
y1 = (labels[:, 1] - labels[:, 3]/2) * target_size
x2 = (labels[:, 0] + labels[:, 2]/2) * target_size
y2 = (labels[:, 1] + labels[:, 3]/2) * target_size
p2x = labels[:, 4] * target_size
p2y = labels[:, 5] * target_size
p3x = p2x
p3y = labels[:, 6] * target_size
labeled_img = cv2.rectangle(resized_img, (x1, y1), (x2, y2), [255, 0, 0])
labeled_img = cv2.line(labeled_img, (p2x, p2y), (p3x, p3y), [255, 0, 0])
if p2x < x1:
labeled_img = cv2.line(labeled_img, (p2x, p2y), (x1, y1), [255, 0, 0])
labeled_img = cv2.line(labeled_img, (p3x, p3y), (x1, y2), [255, 0, 0])
else:
labeled_img = cv2.line(labeled_img, (p2x, p2y), (x2, y1), [255, 0, 0])
labeled_img = cv2.line(labeled_img, (p3x, p3y), (x2, y2), [255, 0, 0])
cv2.imshow(pic_path, labeled_img)
cv2.waitKey(2000)
cv2.destroyWindow(pic_path)
if __name__ == '__main__':
while True:
show_labeled_pic('C:/Users/wangt/PycharmProjects/3d/train.txt')
| thilius/3D_BBOX_from_2D | KITTI_Dataset/check_dataset_with_labels.py | check_dataset_with_labels.py | py | 3,263 | python | en | code | 8 | github-code | 13 |
7116873954 | #!/usr/bin/env python3
import numpy as np
import rospy
import math
from std_msgs.msg import Empty, Float64
from geometry_msgs.msg import Pose2D
from geometry_msgs.msg import Twist
from controller import Supervisor
TIME_STEP = 10
robot = Supervisor()
# Cruise speed cars in left lane
def callback_speed_cars_left_lane( msg ):
global speed_cars_left_lane
speed_cars_left_lane = msg.data
# Cruise speed cars in right lane
def callback_speed_cars_right_lane( msg ):
global speed_cars_right_lane
speed_cars_right_lane = msg.data
def main():
global start, speed_cars_left_lane, speed_cars_right_lane
print('Starting Controller Supervisor...')
speed_cars_left_lane = 0.0
speed_cars_right_lane = 0.0
cars = [robot.getFromDef('vehicle_1'), robot.getFromDef('vehicle_2'),
robot.getFromDef('vehicle_3'), robot.getFromDef('vehicle_4'), robot.getFromDef('vehicle_5'), robot.getFromDef('vehicle_6'), robot.getFromDef('vehicle_7'), robot.getFromDef('vehicle_8'), robot.getFromDef('vehicle_9'), robot.getFromDef('vehicle_10')]
tf = []
i = 0
for car in cars:
if car is not None:
tf.append(car.getField("translation"))
values = tf[i].getSFVec3f()
#print(i, ")", "Initial:", values)
rand_val = np.random.uniform(-2,2,1)
#print("Random number", rand_val)
values[0] = values[0] + rand_val
#print("New x value", values[0])
tf[i].setSFVec3f(values)
car.resetPhysics()
i = i + 1
bmw = robot.getFromDef('BMW_X5')
#linear_velocity_North = cars[0].getField("translation")
start = False
rospy.init_node("supervisor_node")
loop = rospy.Rate(1000/TIME_STEP)
rospy.Subscriber("/speed_cars_left_lane", Float64, callback_speed_cars_left_lane)
rospy.Subscriber("/speed_cars_right_lane", Float64, callback_speed_cars_right_lane)
pub_bmw_pose = rospy.Publisher("/self_driving_pose", Pose2D, queue_size=1)
pub_car_1_pose = rospy.Publisher("/car_1_pose", Pose2D, queue_size=1)
pub_car_2_pose = rospy.Publisher("/car_2_pose", Pose2D, queue_size=1)
pub_car_3_pose = rospy.Publisher("/car_3_pose", Pose2D, queue_size=1)
pub_car_4_pose = rospy.Publisher("/car_4_pose", Pose2D, queue_size=1)
pub_car_5_pose = rospy.Publisher("/car_5_pose", Pose2D, queue_size=1)
pub_car_6_pose = rospy.Publisher("/car_6_pose", Pose2D, queue_size=1)
pub_car_7_pose = rospy.Publisher("/car_7_pose", Pose2D, queue_size=1)
pub_car_8_pose = rospy.Publisher("/car_8_pose", Pose2D, queue_size=1)
pub_car_9_pose = rospy.Publisher("/car_9_pose", Pose2D, queue_size=1)
pub_car_10_pose = rospy.Publisher("/car_10_pose", Pose2D, queue_size=1)
msg_bmw_pose = Pose2D()
msg_car_pose = Pose2D()
print("Supervisor.->Waiting for start signal")
rospy.wait_for_message("/policy_started", Empty, timeout=50000.0)
print("Supervisor.->Start signal received")
while robot.step(TIME_STEP) != -1 and not rospy.is_shutdown():
i = 0
for car in cars:
if car is not None:
values = tf[i].getSFVec3f()
msg_car_pose.x = values[0]
msg_car_pose.y = values[1]
msg_car_pose.theta = values[2]
if msg_car_pose.y > 0:
car.setVelocity([speed_cars_left_lane,0,0, 0,0,0])
else:
car.setVelocity([speed_cars_right_lane,0,0, 0,0,0])
if i == 0:
pub_car_1_pose.publish(msg_car_pose)
elif i == 1:
pub_car_2_pose.publish(msg_car_pose)
elif i == 2:
pub_car_3_pose.publish(msg_car_pose)
elif i == 3:
pub_car_4_pose.publish(msg_car_pose)
elif i == 4:
pub_car_5_pose.publish(msg_car_pose)
elif i == 5:
pub_car_6_pose.publish(msg_car_pose)
elif i == 6:
pub_car_7_pose.publish(msg_car_pose)
elif i == 7:
pub_car_8_pose.publish(msg_car_pose)
elif i == 8:
pub_car_9_pose.publish(msg_car_pose)
elif i == 9:
pub_car_10_pose.publish(msg_car_pose)
i = i + 1
bmw_pose = bmw.getPosition()
bmw_orient = bmw.getOrientation()
msg_bmw_pose.x = bmw_pose[0]
msg_bmw_pose.y = bmw_pose[1]
msg_bmw_pose.theta = math.atan2(bmw_orient[3], bmw_orient[0])
#print("x:", msg_bmw_pose.x, "y:", msg_bmw_pose.y, "theta:", msg_bmw_pose.theta, flush = True)
pub_bmw_pose.publish(msg_bmw_pose)
loop.sleep()
if __name__ == "__main__":
try:
main()
except:
pass
| hector-aviles/ICRA2024 | catkin_ws/src/icra2024/controllers/supervisor_icra/supervisor_icra.py | supervisor_icra.py | py | 5,115 | python | en | code | 1 | github-code | 13 |
42482220564 | #
# bento-box
# E2E Test
#
import pytest
from git import Repo
from math import cos, sin
from bento import types
from bento.sim import Simulation
from bento.utils import to_yaml_proto
from bento.graph.plotter import Plotter
from bento.spec.ecs import EntityDef, ComponentDef
from bento.example.specs import Velocity, Position
# define test components
Meta = ComponentDef(
name="meta",
schema={
"name": types.string,
"id": types.int64,
"version": types.int32,
},
)
Movement = ComponentDef(
name="movement",
schema={
"rotation": types.float32,
"speed": types.float64,
},
)
Keyboard = ComponentDef(
name="keyboard",
schema={
"up": types.boolean,
"down": types.boolean,
"left": types.boolean,
"right": types.boolean,
},
)
@pytest.fixture
def sim(client):
"""Applies the test Simulation to the Engine"""
sim = Simulation(
name="driving_sim",
components=[Keyboard, Movement, Velocity, Position, Meta],
entities=[
EntityDef(components=[Keyboard]),
EntityDef(components=[Movement, Velocity, Position, Meta]),
],
client=client,
)
@sim.init
def init_sim(g: Plotter):
controls = g.entity(components=[Keyboard])
controls[Keyboard].left = False
controls[Keyboard].right = False
controls[Keyboard].up = False
controls[Keyboard].down = False
car = g.entity(components=[Movement, Velocity, Position, Meta])
car[Meta].name = "beetle"
car[Meta].id = 512
car[Meta].version = 2
car[Movement].speed = 0.0
car[Movement].rotation = 90.0
car[Velocity].x = 0.0
car[Velocity].y = 0.0
car[Position].x = 0.0
car[Position].y = 0.0
@sim.system
def control_sys(g: Plotter):
controls = g.entity(components=[Keyboard])
car = g.entity(components=[Movement, Velocity, Position, Meta])
acceleration, max_speed, steer_rate = 5.0, 18.0, 10.0
# steer car
if controls[Keyboard].left:
car[Movement].rotation -= steer_rate
controls[Keyboard].left = False
elif controls[Keyboard].right:
car[Movement].rotation += steer_rate
controls[Keyboard].right = False
# accelerate/slow down car
if controls[Keyboard].up:
car[Movement].speed = g.min(car[Movement].speed + acceleration, max_speed)
controls[Keyboard].up = False
elif controls[Keyboard].down:
car[Movement].speed = g.max(car[Movement].speed - acceleration, 0.0)
controls[Keyboard].down = False
@sim.system
def physics_sys(g: Plotter):
# compute velocity from car's rotation and speed
car = g.entity(components=[Movement, Velocity, Position, Meta])
# rotation
heading_x, heading_y = g.cos(car[Movement].rotation), -g.sin(
car[Movement].rotation
)
# speed
car[Velocity].x = car[Movement].speed * heading_x
car[Velocity].y = car[Movement].speed * heading_y
# update car position based on current velocity
car[Position].x += car[Velocity].x
car[Position].y += car[Velocity].y
sim.start()
return sim
def test_e2e_sim_get_version(client):
# e2e test that we can obtain sim/engine's version via SDK
repo = Repo(search_parent_directories=True)
assert client.get_version() == repo.head.object.hexsha
def test_e2e_sim_apply_sim(sim):
# check the sim's entities have populated ids
assert len([e.id for e in sim.entities if e.id != 0]) == len(sim.entities)
def test_e2e_sim_list_sims(sim, client):
# check that sim is listed
assert client.list_sims()[0] == sim.name
def test_e2e_sim_get_sim(sim, client):
# check that sim's can be retrieved by name
applied_proto = client.get_sim(sim.name)
assert to_yaml_proto(applied_proto) == to_yaml_proto(sim.build())
# test error handling when getting nonexistent sim
has_error = False
try:
client.get_sim("not_found")
except LookupError:
has_error = True
assert has_error
def test_e2e_sim_remove(sim, client):
# test removing simulations
client.remove_sim(sim.name)
assert len(client.list_sims()) == 0
def test_e2e_sim_get_set_attr(sim, client):
# test setting/setting attributes for every primitive data type
controls = sim.entity(components=[Keyboard])
controls[Keyboard].left = True
assert controls[Keyboard].left == True
car = sim.entity(components=[Movement, Velocity, Position, Meta])
car[Meta].name = "sedan"
assert car[Meta].name == "sedan"
car[Meta].version = 10
assert car[Meta].version == 10
car[Movement].rotation = -134.2
# rounding required due to loss of precision when using float32
assert round(car[Movement].rotation, 4) == -134.2
car[Movement].speed = 23.5
assert car[Movement].speed == 23.5
def test_e2e_engine_implict_type_convert(sim, client):
# test implicit type conversion
car = sim.entity(components=[Movement, Velocity, Position, Meta])
controls = sim.entity(components=[Keyboard])
# setup test values to attributes
car[Meta].id = 1
car[Meta].version = 1
car[Movement].speed = 1.0
car[Movement].rotation = 1.0
# test implicit type conversion with combinations of numeric data types
# numeric data type => lambda to , get attribute) with that data type
dtype_attrs = {
"types.int64": (lambda: car[Meta].id),
"types.int32": (lambda: car[Meta].version),
"types.float64": (lambda: car[Movement].speed),
"types.float32": (lambda: car[Movement].rotation),
}
for dtype in dtype_attrs.keys():
other_dtypes = [t for t in dtype_attrs.keys() if t != dtype]
for other_dtype in other_dtypes:
value_attr = dtype_attrs[other_dtype]
if dtype == "types.int64":
car[Meta].id = value_attr()
elif dtype == "types.int32":
car[Meta].version = value_attr()
elif dtype == "types.float64":
car[Movement].speed = value_attr()
elif dtype == "types.float32":
car[Movement].rotation = value_attr()
else:
raise ValueError(f"Data type case not handled: {dtype}")
actual_attr = dtype_attrs[dtype]
assert actual_attr() == 1
def test_e2e_sim_step(sim, client):
# once https://github.com/joeltio/bento-box/issues/34 is fixed.
# test init
sim.step()
# check that values are set correctly by init graph
controls = sim.entity(components=[Keyboard])
assert controls[Keyboard].left == False
assert controls[Keyboard].right == False
assert controls[Keyboard].up == False
assert controls[Keyboard].left == False
car = sim.entity(components=[Movement, Velocity, Position, Meta])
assert car[Meta].name == "beetle"
assert car[Meta].version == 2
assert car[Meta].id == 512
assert car[Movement].speed == 0.0
assert car[Movement].rotation == 90.0
assert car[Velocity].x == 0.0
assert car[Velocity].y == 0.0
assert car[Position].x == 0.0
assert car[Position].y == 0.0
# test running simulation for one step
controls[Keyboard].up = True
controls[Keyboard].left = True
sim.step()
# test attributes have been updated by system
assert controls[Keyboard].left == False
assert controls[Keyboard].up == False
assert car[Movement].speed == 5
assert car[Movement].rotation == 80
# test running the simulation for one more step to exercise other conditional branch
controls[Keyboard].down = True
controls[Keyboard].right = True
sim.step()
# test attributes have been updated by system
assert controls[Keyboard].down == False
assert controls[Keyboard].right == False
assert car[Movement].speed == 0
assert car[Movement].rotation == 90
| bentobox-dev/bento-box | e2e/test_e2e.py | test_e2e.py | py | 8,052 | python | en | code | 0 | github-code | 13 |
39066036325 | import pygame
import math
from queue import PriorityQueue
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 255, 0)
YELLOW = (255, 255, 0)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
PURPLE = (128, 0, 128)
ORANGE = (255, 165, 0)
GREY = (128, 128, 128)
TURQUOISE = (64, 224, 208)
class Node():
def __init__(self, row, col, width, total_row_count):
self.row = row
self.col = col
self.x = row * width # locates the current cell coordinate in x-direction
self.y = col * width # locates the current cell coordinate in y-direction
self.color = WHITE # initial color = WHITE, i.e. unused Node
self.neighbors = [] # empty list for all neighbors of a Node
self.width = width # seems to be for display
self.total_row_count = total_row_count
# return row-col-clicked_positionition of Node:
def _get_clicked_position(self):
return self.row, self.col
def _is_barrier(self):
return self.color == BLACK
def _reset(self):
self.color = WHITE
def _make_start(self):
self.color = ORANGE
def _make_closed(self):
self.color = RED
def _make_open(self):
self.color = GREEN
def _make_barrier(self):
self.color = BLACK
def _make_end(self):
self.color = TURQUOISE
def _make_path(self):
self.color = PURPLE
def __lt__(self, other):
return False
# Use method applied to WINDOW in order to draw Node:
def _draw(self, win):
# Draw a rectangle of certain color at coordinates x and y with the defined widths and heights:
pygame.draw.rect(win, self.color, (self.x, self.y, self.width, self.width))
def _update_neighbors(self, GRID):
self.neighbors = []
if self.row < self.total_row_count - 1 and not GRID[self.row + 1][self.col]._is_barrier(): # DOWN
self.neighbors.append(GRID[self.row + 1][self.col])
if self.row > 0 and not GRID[self.row - 1][self.col]._is_barrier(): # UP
self.neighbors.append(GRID[self.row - 1][self.col])
if self.col < self.total_row_count - 1 and not GRID[self.row][self.col + 1]._is_barrier(): # RIGHT
self.neighbors.append(GRID[self.row][self.col + 1])
if self.col > 0 and not GRID[self.row][self.col - 1]._is_barrier(): # LEFT
self.neighbors.append(GRID[self.row][self.col - 1])
ROW_COUNT = 50
WIDTH_WINDOW = 800
GRID = []
width_cell = WIDTH_WINDOW // ROW_COUNT
GRID = [[Node(i, j, width_cell, ROW_COUNT) for j in range(ROW_COUNT)] for i in range(ROW_COUNT)]
del width_cell
class Game():
# Number of ROW_COUNT and columns of the GRID:
pygame_window = pygame.display.set_mode((WIDTH_WINDOW, WIDTH_WINDOW))
def __init__(self):
# Generate the cell GRID: (the GRID is a list containing a number of ROW_COUNT * ROW_COUNT Nodes.
# Initialize start and end point:
self.start_node = None
self.end_node = None
def _run_astar_algorithm(self, draw, GRID):
node_count = 0
open_set = PriorityQueue() # Why do we use a priority queue for the open set?
open_set.put((0, node_count, self.start_node)) # Insert Tuple
came_from = {}
gcost = {Node: float("inf") for row in GRID for Node in row}
gcost[self.start_node] = 0
fcost = {Node: float("inf") for row in GRID for Node in row}
fcost[self.start_node] = self._compute_hcost(self.start_node._get_clicked_position(), self.end_node._get_clicked_position())
# hashing of nodes:
open_set_hash = {self.start_node}
while not open_set.empty(): # why do we use this? The open set should never be empty. Even at the beginning it should contain at least the start Node, right?
# Check if quitting the game was requested, if so quit the game:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
# Take 1 Node from open set:
current_node = open_set.get()[2] # why get 2??? what does this command do at all???
# Remove the Node from the hash dict:
open_set_hash.remove(current_node) # Remove current Node from open list.
# End program algorithm if current Node is the end Node:
if current_node == self.end_node:
self._reconstruct_path(came_from, current_node,
draw) # why is this allowed to do with draw? it's a function which is handed over, but needs "win" as input, which we do not give.
self.end_node._make_end()
return True # the function will just end and return true, this is because we finish the game.
# Loop through all neighbors:
for neighbor in current_node.neighbors:
# in case necessary, we want to update the neighbors with a new g-score:
temp_gscore = gcost[current_node] + 1
# All updates only apply to Nodes which have a LOWER g-score than what we suggest as update
# Otherwise we know that a) either the new g-score is not the one of the optimal path
# b) the neighbor in any case has been already processed
if temp_gscore < gcost[neighbor]:
# The neighbors need to "remember" from which Node the way led to them:
came_from[neighbor] = current_node
# Updating g-score:
gcost[neighbor] = temp_gscore
# Updating f-score:
fcost[neighbor] = temp_gscore + self._compute_hcost(neighbor._get_clicked_position(),
self.end_node._get_clicked_position())
# Add neighbor to open set:
if neighbor not in open_set_hash:
node_count += 1
open_set.put((fcost[neighbor], node_count, neighbor))
# Add neighbor to hash list:
open_set_hash.add(neighbor)
neighbor._make_open()
draw()
# Close node:
if current_node != self.start_node:
current_node._make_closed()
# Compute H cost value - needs to be always LESS than the actual cost which would be need to go from the current Node to
# the end Node:
def _compute_hcost(self, current_node_position, end_node_position):
# F = G + H
# F = complete estimated cost for given point
# G : exact cost from start Node to this Node
# h : estimated cost from this Node to end Node - requires some estimation heuristic
x1, y1 = current_node_position
x2, y2 = end_node_position
return abs(x1 - x2) + abs(y1 - y2) # is less costly than the euclidean distance
def _reconstruct_path(self, dict_nodes_came_from, current_node, draw):
while current_node in dict_nodes_came_from:
current_node = dict_nodes_came_from[current_node]
current_node._make_path()
draw()
def _draw_grid(self, win, ROW_COUNT, width):
gap = width // ROW_COUNT
for col in range(ROW_COUNT):
pygame.draw.line(win, GREY, (0, col * gap), (width, col * gap))
for row in range(ROW_COUNT):
pygame.draw.line(win, GREY, (row * gap, 0), (row * gap, width))
def _draw(self):
self.pygame_window.fill(WHITE)
for row in GRID:
for Node in row:
Node._draw(self.pygame_window)
self._draw_grid(self.pygame_window, ROW_COUNT, WIDTH_WINDOW)
pygame.display.update()
def _getRowsAndCols(self, clicked_position):
cell_width = WIDTH_WINDOW // ROW_COUNT
y, x = clicked_position
row = y // cell_width
col = x // cell_width
return row, col
# Function for taking action on a RIGHT mouse click - goal: _reset Node.
def _click_right_mouse(self):
# Function for evaluation clicked_positionition in window:
clicked_position = pygame.mouse.get_pos()
# clicked_positionition in window and row/col count gives info in which cell the clicking event happened:
row, col = self._getRowsAndCols(clicked_position)
# Access current Node from GRID:
current_node = GRID[row][col]
# Right mouse button means we want to _reset:
current_node._reset()
if current_node == self.start_node:
self.start_node = None
elif current_node == self.end_node:
self.end_node = None
return self.start_node, self.end_node
# Function for taking action on a LEFT mouse click - goal: set start or end Node or _make_barrier
def _click_left_mouse(self):
# Function for evaluation clicked_positionition in window:
clicked_position = pygame.mouse.get_pos()
# clicked_positionition in window and row/col count gives info in which cell the clicking event happened:
row, col = self._getRowsAndCols(clicked_position)
# Access current Node from GRID:
current_node = GRID[row][col]
# Algorithm for start_node and end_node selection and drawing walls:
if not self.start_node and current_node != self.end_node: # Start
self.start_node = current_node
self.start_node._make_start()
elif not self.end_node and current_node != self.start_node: # End
self.end_node = current_node
self.end_node._make_end()
elif current_node != self.end_node and current_node != self.start_node: # Draw Walls
current_node._make_barrier()
return self.start_node, self.end_node
# Function is called in case of a keyboard interaction:
def _press_key_on_keyboard(self, event, GRID):
# Space button starts game in case start and end Node have been set:
# if not both end and start node are set nothing will happen when you press the space bar
if event.key == pygame.K_SPACE and self.start_node and self.end_node:
# Update iteratively all Nodes in all ROW_COUNT regarding their neighbors:
for row in GRID:
for current_node in row:
current_node._update_neighbors(GRID)
# Call algorithm for evaluating GRID:
self._run_astar_algorithm(
lambda: self._draw(), GRID)
# _resetting game:
if event.key == pygame.K_c:
self.start_node = None
self.end_node = None
GRID = self._make_grid(ROW_COUNT, WIDTH_WINDOW)
return self.start_node, self.end_node
def main():
pygame.display.set_caption("************* A-STAR Pathfinding Algorithm *************")
game_instance = Game()
run = True
while run:
game_instance._draw()
for event in pygame.event.get():
# In case of closing the window:
if event.type == pygame.QUIT:
run = False
# Pressing LEFT mouse button:
if pygame.mouse.get_pressed()[0]:
game_instance._click_left_mouse()
# Pressing RIGHT mouse button:
elif pygame.mouse.get_pressed()[2]:
game_instance._click_right_mouse()
# Check pressed keyboard buttons for space button:
# This means NO CLICKING is evaluated anymore until the algorithm found the solution:
# AFTER that, the game can be quit or __resetted again:
elif event.type == pygame.KEYDOWN:
game_instance._press_key_on_keyboard(event, GRID)
pygame.quit()
main() | MatthiasHuber-Digital/PythonProgramming | AStarSearchAlgo_Objects_20220205.py | AStarSearchAlgo_Objects_20220205.py | py | 11,761 | python | en | code | 0 | github-code | 13 |
15798415758 | import requests
from collections import Counter
from nltk.corpus import stopwords
import threading
import json
## BOOKS
## Alice in Wonderland by Lewis Caroll
GUTENBERG_URI = "https://www.gutenberg.org/files/11/11-0.txt"
content_type = 'book'
## POEMS don't start until "SELECTED POEMS:" and have copywrite after Poems end
## Robert Frost Poem Collection
# GUTENBERG_URI = "https://www.gutenberg.org/files/59824/59824-0.txt"
# content_type = 'poem'
if content_type == 'book':
startline = b'CHAPTER I'
endline = b'END OF THE PROJECT GUTENBERG EBOOK'
elif content_type == 'poem':
startline = b'SELECTED POEMS'
endline = b'End of the P'
else:
startline = ''
endline = ''
## Variables always needed for Bag of Words
tokens = Counter()
STOP = stopwords.words("english")
STOP.append('the')
## GET the target (uri)
response = requests.get(GUTENBERG_URI, stream=True)
## if on windows must add:
response.encoding = "utf-8"
## quick load and make bag of words...
## chunks of 100000 bytes
## for chunk in response.iter_content(chunk_size=100000)
## streaming lines
## for curline in response.iter_lines()
def read_content():
start_flag = True
start_counter = 0
end_flag = False
for curline in response.iter_lines():
if curline.strip(): # "" = false
## Check if we are at start of poems
if start_flag:
# skip this line until SELECTED POEMS
if curline.startswith(startline):
if start_counter == 1:
start_flag = False
else:
start_counter = 1
else:
## We have started the Poems
if not end_flag and not curline.startswith(endline):
# we are officially only looking at Poems!
for word in curline.lower().split():
if word not in STOP:
## decode and add word because not in STOP words
tokens[word.decode()] += 1
else:
break
with open("output.txt", "w") as text_file:
text_file.write("Top Five Phrases:\n" + json.dumps(dict(Counter(tokens).most_common(5))))
threading.Thread(target=read_content).start() | orsoknows/gutenberg-bot | streamParser.py | streamParser.py | py | 2,000 | python | en | code | 0 | github-code | 13 |
28151334424 | import requests
HEADERS = {
'user-agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:76.0) Gecko/20100101 Firefox/76.0',
'accept': '*/*',
}
# currency: btc or ltc
def get_cource(currency: str, product_price: int):
cource = requests.get(f'https://apirone.com/api/v2/ticker?currency={currency}', headers=HEADERS).json()['rub']
price_in_crypto = int(product_price) / cource
return int(cource), round(price_in_crypto, 8)
| bat-py/the_first | crypto_price.py | crypto_price.py | py | 442 | python | en | code | 1 | github-code | 13 |
26164290412 | '''
Author: Shuailin Chen
Created Date: 2021-08-08
Last Modified: 2021-08-31
content: ResNet for domain adaptation purpose
NOTE: these codes do not consider the plugin layers, so it may not suitable for models with plugin layers
'''
import warnings
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import build_conv_layer, build_norm_layer, build_plugin_layer
from mmcv.runner import BaseModule
from mmcv.utils.parrots_wrapper import _BatchNorm
from ..builder import BACKBONES
from ..utils import ResLayerMixBN
from .resnet import BasicBlock, Bottleneck, ResNet
from ..layers import SequentialMixBN
class BasicBlockMixBN(BasicBlock):
"""Basic block for ResNet for domain adaptation purpose"""
def forward(self, x, domain):
"""Forward function."""
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out, domain=domain)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out, domain=domain)
if self.downsample is not None:
identity = self.downsample(x, domain=domain)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
class BottleneckMixBN(Bottleneck):
"""Bottleneck block for ResNet for domain adaptation purpose
"""
def forward_plugin(self, x, plugin_names, domain):
"""Forward function for plugins."""
out = x
for name in plugin_names:
out = getattr(self, name)(x, domain=domain)
return out
def forward(self, x, domain):
"""Forward function."""
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out, domain=domain)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv1_plugin_names,
domain=domain)
out = self.conv2(out)
out = self.norm2(out, domain=domain)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv2_plugin_names,
domain=domain)
out = self.conv3(out)
out = self.norm3(out, domain=domain)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv3_plugin_names,
domain=domain)
if self.downsample is not None:
identity = self.downsample(x, domain=domain)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
@BACKBONES.register_module()
class ResNetMixBN(BaseModule):
"""ResNet backbone for domain adaptation, the usasage is almost the same as ResNet, except the forward() func
NOTE: compared with the original ResNet, this version need to chagne to __init__ method, so directly inherit the ResNet class is inconvenient, so we choose BaseModule as parant class
"""
arch_settings = {
18: (BasicBlockMixBN, (2, 2, 2, 2)),
34: (BasicBlockMixBN, (3, 4, 6, 3)),
50: (BottleneckMixBN, (3, 4, 6, 3)),
101: (BottleneckMixBN, (3, 4, 23, 3)),
152: (BottleneckMixBN, (3, 8, 36, 3))
}
def __init__(self,
depth,
in_channels=3,
stem_channels=64,
base_channels=64,
num_stages=4,
strides=(1, 2, 2, 2),
dilations=(1, 1, 1, 1),
out_indices=(0, 1, 2, 3),
style='pytorch',
deep_stem=False,
avg_down=False,
frozen_stages=-1,
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=False,
dcn=None,
stage_with_dcn=(False, False, False, False),
plugins=None,
multi_grid=None,
contract_dilation=False,
with_cp=False,
zero_init_residual=True,
pretrained=None,
init_cfg=None):
super().__init__()
if depth not in self.arch_settings:
raise KeyError(f'invalid depth {depth} for resnet')
self.pretrained = pretrained
self.zero_init_residual = zero_init_residual
block_init_cfg = None
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be setting at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is a deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
if init_cfg is None:
self.init_cfg = [
dict(type='Kaiming', layer='Conv2d'),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]
block = self.arch_settings[depth][0]
if self.zero_init_residual:
if block is BasicBlockMixBN:
block_init_cfg = dict(
type='Constant',
val=0,
override=dict(name='norm2'))
elif block is BottleneckMixBN:
block_init_cfg = dict(
type='Constant',
val=0,
override=dict(name='norm3'))
else:
raise TypeError('pretrained must be a str or None')
self.depth = depth
self.stem_channels = stem_channels
self.base_channels = base_channels
self.num_stages = num_stages
assert num_stages >= 1 and num_stages <= 4
self.strides = strides
self.dilations = dilations
assert len(strides) == len(dilations) == num_stages
self.out_indices = out_indices
assert max(out_indices) < num_stages
self.style = style
self.deep_stem = deep_stem
self.avg_down = avg_down
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.with_cp = with_cp
self.norm_eval = norm_eval
self.dcn = dcn
self.stage_with_dcn = stage_with_dcn
if dcn is not None:
assert len(stage_with_dcn) == num_stages
self.plugins = plugins
self.multi_grid = multi_grid
self.contract_dilation = contract_dilation
self.block, stage_blocks = self.arch_settings[depth]
self.stage_blocks = stage_blocks[:num_stages]
self.inplanes = stem_channels
self._make_stem_layer_mixbn(in_channels, stem_channels)
self.res_layers = []
for i, num_blocks in enumerate(self.stage_blocks):
stride = strides[i]
dilation = dilations[i]
dcn = self.dcn if self.stage_with_dcn[i] else None
if plugins is not None:
stage_plugins = self.make_stage_plugins(plugins, i)
else:
stage_plugins = None
# multi grid is applied to last layer only
stage_multi_grid = multi_grid if i == len(
self.stage_blocks) - 1 else None
planes = base_channels * 2**i
res_layer = self.make_res_layer_mixbn(
block=self.block,
inplanes=self.inplanes,
planes=planes,
num_blocks=num_blocks,
stride=stride,
dilation=dilation,
style=self.style,
avg_down=self.avg_down,
with_cp=with_cp,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
dcn=dcn,
plugins=stage_plugins,
multi_grid=stage_multi_grid,
contract_dilation=contract_dilation,
init_cfg=block_init_cfg)
self.inplanes = planes * self.block.expansion
layer_name = f'layer{i+1}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
self.feat_dim = self.block.expansion * base_channels * 2**(
len(self.stage_blocks) - 1)
def make_stage_plugins(self, plugins, stage_idx):
"""make plugins for ResNet 'stage_idx'th stage .
Currently we support to insert 'context_block',
'empirical_attention_block', 'nonlocal_block' into the backbone like
ResNet/ResNeXt. They could be inserted after conv1/conv2/conv3 of
Bottleneck.
An example of plugins format could be :
>>> plugins=[
... dict(cfg=dict(type='xxx', arg1='xxx'),
... stages=(False, True, True, True),
... position='after_conv2'),
... dict(cfg=dict(type='yyy'),
... stages=(True, True, True, True),
... position='after_conv3'),
... dict(cfg=dict(type='zzz', postfix='1'),
... stages=(True, True, True, True),
... position='after_conv3'),
... dict(cfg=dict(type='zzz', postfix='2'),
... stages=(True, True, True, True),
... position='after_conv3')
... ]
>>> self = ResNet(depth=18)
>>> stage_plugins = self.make_stage_plugins(plugins, 0)
>>> assert len(stage_plugins) == 3
Suppose 'stage_idx=0', the structure of blocks in the stage would be:
conv1-> conv2->conv3->yyy->zzz1->zzz2
Suppose 'stage_idx=1', the structure of blocks in the stage would be:
conv1-> conv2->xxx->conv3->yyy->zzz1->zzz2
If stages is missing, the plugin would be applied to all stages.
Args:
plugins (list[dict]): List of plugins cfg to build. The postfix is
required if multiple same type plugins are inserted.
stage_idx (int): Index of stage to build
Returns:
list[dict]: Plugins for current stage
"""
stage_plugins = []
for plugin in plugins:
plugin = plugin.copy()
stages = plugin.pop('stages', None)
assert stages is None or len(stages) == self.num_stages
# whether to insert plugin into current stage
if stages is None or stages[stage_idx]:
stage_plugins.append(plugin)
return stage_plugins
def make_res_layer_mixbn(self, **kwargs):
"""Pack all blocks in a stage into a ``ResLayer``."""
return ResLayerMixBN(**kwargs)
@property
def norm1(self):
"""nn.Module: the normalization layer named "norm1" """
return getattr(self, self.norm1_name)
def _make_stem_layer_mixbn(self, in_channels, stem_channels):
"""Make stem layer for ResNet."""
if self.deep_stem:
self.stem = SequentialMixBN(
build_conv_layer(
self.conv_cfg,
in_channels,
stem_channels // 2,
kernel_size=3,
stride=2,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg, stem_channels // 2)[1],
nn.ReLU(inplace=True),
build_conv_layer(
self.conv_cfg,
stem_channels // 2,
stem_channels // 2,
kernel_size=3,
stride=1,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg, stem_channels // 2)[1],
nn.ReLU(inplace=True),
build_conv_layer(
self.conv_cfg,
stem_channels // 2,
stem_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg, stem_channels)[1],
nn.ReLU(inplace=True))
else:
self.conv1 = build_conv_layer(
self.conv_cfg,
in_channels,
stem_channels,
kernel_size=7,
stride=2,
padding=3,
bias=False)
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, stem_channels, postfix=1)
self.add_module(self.norm1_name, norm1)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def _freeze_stages(self):
"""Freeze stages param and norm stats."""
if self.frozen_stages >= 0:
if self.deep_stem:
self.stem.eval()
for param in self.stem.parameters():
param.requires_grad = False
else:
self.norm1.eval()
for m in [self.conv1, self.norm1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def forward(self, x, domain):
"""Forward function."""
if self.deep_stem:
x = self.stem(x, domain=domain)
else:
x = self.conv1(x)
x = self.norm1(x, domain=domain)
x = self.relu(x)
x = self.maxpool(x)
outs = []
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x, domain=domain)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
def train(self, mode=True):
"""Convert the model into training mode while keep normalization layer
freezed."""
super().train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
# trick: eval have effect on BatchNorm only
if isinstance(m, _BatchNorm):
m.eval()
@BACKBONES.register_module()
class ResNetV1cMixBN(ResNetMixBN):
"""ResNetV1c variant described in [1]_.
Compared with default ResNet(ResNetV1b), ResNetV1c replaces the 7x7 conv
in the input stem with three 3x3 convs.
References:
.. [1] https://arxiv.org/pdf/1812.01187.pdf
"""
def __init__(self, **kwargs):
super().__init__(
deep_stem=True, avg_down=False, **kwargs)
@BACKBONES.register_module()
class ResNetV1dMixBN(ResNetMixBN):
"""ResNetV1d variant described in [1]_.
Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in
the input stem with three 3x3 convs. And in the downsampling block, a 2x2
avg_pool with stride 2 is added before conv, whose stride is changed to 1.
"""
def __init__(self, **kwargs):
super().__init__(
deep_stem=True, avg_down=True, **kwargs)
| slchenchn/SAR_build_extract_v2 | mmseg/models/backbones/resnet_mixbn.py | resnet_mixbn.py | py | 15,813 | python | en | code | 0 | github-code | 13 |
12914033773 | import logging
from random import randrange, uniform
import matplotlib.pyplot as plt
from lib.kmeans.template_factory import get_templates
from mpl_toolkits.mplot3d import Axes3D
from pandas import DataFrame
class KMeans:
def __init__(self, db):
self.__db = db
self.__templates = get_templates(db.driver_name)
def __generate_sql(self, table_model, table_c, table_x, n, d, k):
"""
all statements get rendered once and will be reused when needed.
"""
statements = {
"select_information": self.__templates.get_select_information(table_model),
"set_clusters": self.__templates.get_set_clusters(table_c, table_x, d, k),
"update_table_model": self.__templates.get_update_table_model(table_model, n, table_x),
"update_table_c": self.__templates.get_update_table_c(table_c, d, k, table_x),
"select_visualization": self.__templates.get_select_visualization(table_x, d, k),
"select_silhouette_avg": self.__templates.get_select_silhouette_avg(table_x, d),
}
return statements
def create_ideal_model(self, tablename, feature_names, k_list, model_identifier, normalization=None):
"""
This creates a model for every element in k_list and calculates the silhouette for every model.
The model with the best silhouette gets returned.
The other models get not deleted and can be loaded and used afterwards.
This is not recommended for huge datasets due to various reasons.
"""
if self.__db.driver_name == "sqlite":
raise NotImplementedError("silhouette is not supported with sqlite")
best_results = -10
best_k = 0
for k in k_list:
result = self.create_model(tablename, feature_names, k, f"{model_identifier}_k{k}", normalization).estimate().get_silhouette_avg()
logging.info(f"result for k={k}: {result}")
if(best_results < result):
best_results = result
best_k = k
return self.load_model(f"{tablename}_{model_identifier}_k{best_k}")
def create_model(self, tablename, feature_names, k, model_identifier, normalization=None):
"""
This creates the following three tables:
- table_x: feature columns of source data (normalized if normalization is given) with the additional column and primary key "i"
- table_c: all cluster centers in a single row
- table_model: single row with the parameters of the model
"""
model_name = f"{tablename}_{model_identifier}"
table_model = f"{model_name}_model"
table_x = f"{model_name}_x"
table_c = f"{model_name}_c"
self.drop_model(model_name)
count_rows_query = self.__templates.get_row_count(tablename)
n = self.__db.execute_query(count_rows_query)[0][0]
d = len(feature_names)
start_indexes = [randrange(1, n) for _ in range(k)]
# statements
statements = {
"create_table_model": self.__templates.get_create_table_model(table_model, n, d, k, tablename),
"add_variance_column": self.__templates.get_add_variance_column(table_model),
"create_table_x": self.__templates.get_create_table_x(normalization, feature_names, d, tablename, table_x),
"add_cluster_columns": self.__templates.get_add_cluster_columns(table_x),
"create_table_c": self.__templates.get_create_table_c(d, k, table_c, table_x),
"init_table_c": self.__templates.get_init_table_c(table_c, table_x, n, d, start_indexes),
}
# create and initialize table model
self.__db.execute(statements["create_table_model"])
self.__db.execute(statements["add_variance_column"])
# create and initialize table x
for statement in statements["create_table_x"]:
self.__db.execute(statement)
for statement in statements["add_cluster_columns"]:
self.__db.execute(statement)
# create and initialize table c
self.__db.execute(statements["create_table_c"])
for statement in statements["init_table_c"]:
self.__db.execute(statement)
statements = self.__generate_sql(table_model, table_c, table_x, n, d, k)
return KMeansModel(self.__db, statements)
def load_model(self, model_name):
"""
This returns an already created model which can be used for further training or analysis.
"""
table_model = f"{model_name}_model"
table_x = f"{model_name}_x"
table_c = f"{model_name}_c"
select_information = self.__templates.get_select_information(table_model)
query_result = self.__db.execute_query(select_information)
n = query_result[0][0]
d = query_result[0][1]
k = query_result[0][2]
statements = self.__generate_sql(table_model, table_c, table_x, n, d, k)
return KMeansModel(self.__db, statements)
def drop_model(self, model_name):
"""
This deletes the three tables of the model.
"""
tables = ['model', 'x', 'c']
for table in tables:
self.__db.execute(self.__templates.get_drop_model(model_name, table))
def get_model_names(self):
"""
This returns the names of all existing models. The names can be used for loading or deleting the models.
"""
select_models = self.__templates.get_select_models()
rows = self.__db.execute_query(select_models)
model_names = []
for row in rows:
model_names.append(row[0][:-6])
return model_names
class KMeansModel:
def __init__(self, db, statements):
self.__db = db
self.__statements = statements
def estimate(self, max_steps=100):
"""
This continous the clustering which allows clustering in stages.
"""
variance = -1
step = 0
while step < max_steps:
step += 1
for statement in self.__statements["set_clusters"]:
self.__db.execute(statement)
self.__db.execute(self.__statements["update_table_model"])
last_variance = variance
variance = self.get_information()["variance"]
logging.info(f"step {step}: variance={variance}")
if(last_variance == variance):
break
for statement in self.__statements["update_table_c"]:
self.__db.execute(statement)
return self
def get_information(self):
"""
The following parameters get returned:
- "n": number of rows
- "d": number of dimensions/features
- "k": number of clusters
- "steps": number of already trained iterations
- "variance": sum of errors divided by the number of rows
"""
query_result = self.__db.execute_query(self.__statements["select_information"])
return {
"n": query_result[0][0],
"d": query_result[0][1],
"k": query_result[0][2],
"steps": query_result[0][3],
"variance": query_result[0][4]
}
def visualize(self, feature_names, axis_order=None):
"""
This visualizes the data and classes in a three dimensional plot.
"""
if axis_order is None:
axis_order = range(len(feature_names))
d = len(axis_order[:3])
features = [f"x_{l}" for l in range(d)]
query_result = self.__db.execute_query(self.__statements["select_visualization"])
feature_names.append("j")
df = DataFrame(query_result, columns=feature_names)
x = df[feature_names].values
y = df["j"].values
fig = plt.figure(0, figsize=(9, 6))
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=60, azim=270, auto_add_to_figure=False)
fig.add_axes(ax)
x_label, x_scatter, = self.__get_axis(x, axis_order, feature_names, 0)
y_label, y_scatter = self.__get_axis(x, axis_order, feature_names, 1)
z_label, z_scatter = self.__get_axis(x, axis_order, feature_names, 2)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.set_zlabel(z_label)
ax.scatter(x_scatter, y_scatter, z_scatter, c=y, edgecolor='k')
plt.show()
return self
def get_silhouette_avg(self):
"""
This calculates the silhouette of every data point and returns the average value.
This is not recommended for huge datasets due to the performance.
"""
if self.__db.driver_name == "sqlite":
raise NotImplementedError("silhouette is not supported with sqlite")
query_result = self.__db.execute_query(self.__statements["select_silhouette_avg"])
return query_result[0][0]
def __get_axis(self, x, axis_order, feature_names, axis_index):
"""
This private function is used for the visualization.
"""
if axis_index < len(axis_order):
label = feature_names[axis_order[axis_index]]
scatter = x[:, axis_order[axis_index]]
else:
label = ''
scatter = 0
return label, scatter
| SANElibDevTeam/SANElib | lib/kmeans/kmeans.py | kmeans.py | py | 9,369 | python | en | code | 7 | github-code | 13 |
2451534227 | import heapq
num = input()
ans = input()
newNum = []
count = ""
for n in num:
if n == "0":
count += "0"
else:
newNum.append(n)
newNum.sort()
if newNum:
newNum[0] += count
result = "".join(newNum)
else:
result = count
# print(newNum)
if result == ans:
print("OK")
else:
print('WRONG_ANSWER') | asnakeassefa/A2SVContest | correctSolution.py | correctSolution.py | py | 336 | python | en | code | 0 | github-code | 13 |
6999431143 |
from airflow.models import ID_LEN
from sqlalchemy import Column, Integer, String, DateTime, Boolean, JSON
from airflow_dag_template.sqlalchemy_util import provide_session
from airflow_dag_template.sqlalchemy_util import Base, props
class TaskDefineModel(Base):
__tablename__ = "l_task_define"
__table_args__ = {'extend_existing': True}
task_id = Column(String(ID_LEN), primary_key=True)
dag_id = Column(String(ID_LEN), primary_key=True)
operator = Column(String(100))
owner = Column(String(100))
email = Column(String(500))
email_on_retry = Column(Boolean)
email_on_failure = Column(Boolean)
start_date = Column(DateTime)
end_date = Column(DateTime)
trigger_rule = Column(String(50), default='all_success')
depends_on_past = Column(Boolean, default=False)
wait_for_downstream = Column(Boolean, default=False)
schedule_interval = Column(String(100))
retries = Column('retries', Integer, default=0)
retry_delay_num_minutes = Column(Integer)
execution_timeout_num_minutes = Column(Integer)
pool = Column(String(50))
queue = Column(String(256))
priority_weight = Column(Integer)
private_params = Column(JSON)
is_publish = Column(Boolean, default=False, comment='是否发布')
def __repr__(self):
obj_to_dict = props(self)
return str(obj_to_dict)
@classmethod
@provide_session
def get_task_define(cls, dag_id, task_id, session=None):
task_define = session.query(TaskDefineModel) \
.filter(TaskDefineModel.task_id == task_id) \
.filter(TaskDefineModel.dag_id == dag_id) \
.first()
return task_define
| itnoobzzy/EasyAirflow | plugins/airflow_dag_template/TaskDefine.py | TaskDefine.py | py | 1,686 | python | en | code | 0 | github-code | 13 |
39751824922 | from typing import Dict
# Third Party Imports
from pubsub import pub
# RAMSTK Package Imports
from ramstk.configuration import RAMSTKUserConfiguration
from ramstk.logger import RAMSTKLogManager
from ramstk.views.gtk3 import Gtk, _
from ramstk.views.gtk3.widgets import RAMSTKWorkView
# RAMSTK Local Imports
from . import UsageProfileTreePanel
class UsageProfileWorkView(RAMSTKWorkView):
"""Display Usage Profiles associated with the selected Revision.
The attributes of a Usage Profile List View are:
:cvar _tag: the name of the module.
:ivar _lst_mnu_labels: the list of labels for the view's pop-up
menu. The labels are listed in the order they appear in the menu.
:ivar _lst_tooltips: the list of tooltips for the view's
toolbar buttons and pop-up menu. The tooltips are listed in the
order they appear on the toolbar or pop-up menu.
"""
# Define private dict class attributes.
# Define private scalar class attributes.
_tag: str = "usage_profile"
_tablabel: str = "<span weight='bold'>" + _("Usage\nProfiles") + "</span>"
_tabtooltip: str = _("Displays usage profiles for the selected revision.")
# Define public dictionary class attributes.
# Define public list class attributes.
# Define public scalar class attributes.
def __init__(
self, configuration: RAMSTKUserConfiguration, logger: RAMSTKLogManager
) -> None:
"""Initialize an instance of the Usage Profile list view.
:param configuration: the RAMSTK Configuration class instance.
:param logger: the RAMSTKLogManager class instance.
"""
super().__init__(configuration, logger)
# Initialize private dictionary attributes.
# Initialize private list attributes.
self._lst_callbacks.insert(0, self._do_request_insert_sibling)
self._lst_callbacks.insert(1, self._do_request_insert_child)
self._lst_callbacks.insert(2, self._do_request_delete)
self._lst_icons.insert(0, "insert_sibling")
self._lst_icons.insert(1, "insert_child")
self._lst_icons.insert(2, "remove")
self._lst_mnu_labels = [
_("Add Sibling"),
_("Add Child"),
_("Delete Selected"),
_("Save Selected"),
_("Save Profile"),
]
self._lst_tooltips = [
_(
"Add a new usage profile entity at the same level "
"as the currently selected entity."
),
_(
"Add a new usage profile entity one level below the "
"currently selected entity."
),
_("Delete the currently selected entity from the usage profile."),
_("Save changes to the currently selected entity in the usage profile."),
_("Save changes to all entities at the same level in the usage profile."),
]
# Initialize private scalar attributes.
self._pnlPanel = UsageProfileTreePanel()
# Initialize public dictionary attributes.
# Initialize public list attributes.
# Initialize public scalar attributes.
self.__make_ui()
# Subscribe to PyPubSub messages.
pub.subscribe(super().do_set_record_id, f"selected_{self._tag}")
# pylint: disable=unused-argument
def _do_request_delete(self, __button: Gtk.ToolButton) -> None:
"""Request to delete the selected Usage Profile record.
:param __button: the Gtk.ToolButton() that called this method.
:return: None
"""
_parent = self.get_parent().get_parent().get_parent().get_parent().get_parent()
_dialog = super().do_raise_dialog(parent=_parent)
_dialog.do_set_message(
message=_(
f"You are about to delete {self._tag} {self.dic_pkeys['record_id']} "
f"and all data associated with it. Is this really what you want to do?"
)
)
_dialog.do_set_message_type(message_type="question")
if _dialog.do_run() == Gtk.ResponseType.YES:
super().do_set_cursor_busy()
pub.sendMessage(
f"request_delete_{self._tag}",
node_id=self.dic_pkeys["record_id"],
)
_dialog.do_destroy()
# pylint: disable=unused-argument
def _do_request_insert_child(self, __button: Gtk.ToolButton) -> None:
"""Request to add an entity to the Usage Profile.
:return: None
"""
super().do_set_cursor_busy()
_attributes = self.__do_get_usage_profile_ids()
if self._pnlPanel.level == "mission":
_level = "mission_phase"
_no_keys = ["environment_id"]
elif self._pnlPanel.level == "mission_phase":
_level = "environment"
_no_keys = []
else:
_error = _("An environmental condition cannot have a child.")
_parent = (
self.get_parent().get_parent().get_parent().get_parent().get_parent()
)
_dialog = super().do_raise_dialog(parent=_parent)
_dialog.do_set_message(message=_error)
_dialog.do_set_message_type(message_type="error")
_dialog.do_run()
_dialog.do_destroy()
pub.sendMessage(
"fail_insert_usage_profile",
error_message=_error,
)
return
for _key in _no_keys:
_attributes.pop(_key)
super().do_set_cursor_busy()
pub.sendMessage(f"request_insert_{_level}", attributes=_attributes)
# pylint: disable=unused-argument
def _do_request_insert_sibling(self, __button: Gtk.ToolButton) -> None:
"""Request to add a sibling entity to the Usage Profile.
:return: None
"""
super().do_set_cursor_busy()
_attributes = self.__do_get_usage_profile_ids()
if self._tag == "mission":
_attributes.pop("mission_phase_id")
_attributes.pop("environment_id")
elif self._tag == "mission_phase":
_attributes.pop("environment_id")
pub.sendMessage(
f"request_insert_{self._tag}",
attributes=_attributes,
)
def __do_get_usage_profile_ids(self) -> Dict[str, int]:
"""Read each of the ID columns.
:return: _attributes
:rtype: dict
"""
_attributes = {
"revision_id": self._revision_id,
"mission_id": 0,
"mission_phase_id": 0,
"environment_id": 0,
"parent_id": 0,
"record_id": 0,
}
(
_model,
_row,
) = self._pnlPanel.tvwTreeView.get_selection().get_selected()
_attributes["mission_id"] = _model.get_value(_row, 1)
_attributes["mission_phase_id"] = _model.get_value(_row, 2)
_attributes["environment_id"] = _model.get_value(_row, 3)
return _attributes
def __make_ui(self) -> None:
"""Build the user interface for the usage profile list view.
:return: None
:rtype: None
"""
super().do_make_layout()
super().do_embed_treeview_panel()
self._pnlPanel.dic_units = (
self.RAMSTK_USER_CONFIGURATION.RAMSTK_MEASUREMENT_UNITS
)
self._pnlPanel.do_load_comboboxes()
self._pnlPanel.tvwTreeView.dic_handler_id[
"button-press"
] = self._pnlPanel.tvwTreeView.connect(
"button_press_event", super().on_button_press
)
for _element in ["mission", "mission_phase", "environment"]:
self._pnlPanel.dic_icons[_element] = self._dic_icons[_element]
| ReliaQualAssociates/ramstk | src/ramstk/views/gtk3/usage_profile/view.py | view.py | py | 7,751 | python | en | code | 34 | github-code | 13 |
35661276230 | """
https://peps.python.org/pep-0380/ を簡略化したもの
RESULT = yield from EXPR と等価な疑似コード
以下の条件で簡略化
- .throw() や .close() はなし
- 処理できる例外も StopIteration のみ
"""
def yield_from(EXPR):
# イテレータ _i を取得するために iter() を用いているので、 EXPR には任意のイテラブルを指定できる
_i = iter(EXPR) # サブジェネレータ
try:
# サブジェネレータが予備処理される
# その結果は格納され、最初に yield される値 _y になる
_y = next(_i)
except StopIteration as _e:
# StopIteration が上げられれば、例外から属性 value を取り出し、 _r に代入する
# これが最もシンプルな場合の RESULT になる
_r = _e.value
else:
# このループが回っている間、デリゲーションジェネレータはブロックされ、
# 呼び出し元とサブジェネレータの間のチャネルとしてのみ機能する
while 1:
# サブジェネレータが生成したその時点の要素を生成し、呼び出し元から値 _s が送信されるのを待機する
_s = yield _y
try:
# 呼び出し元が送信してきた _s を転送することで、サブジェネレータを進める
_y = _i.send(_s)
except StopIteration as _e:
# サブジェネレータが StopIteration を上げてきたら value を取り出して _r に代入し、
# ループを抜けることでデリゲーションジェネレータを再開させる
_r = _e.value
break
# _r は RESULT で yield from 式全体の値
RESULT = _r
# EXPR はサブジェネレータとしてジェネレータの場合にのみ対応している
EXPR = (s for s in"ABC")
result = yield_from(EXPR)
# 結果の取り出し
print(list(result))
| kazuma624/fluent-python | 16-coroutine/yield_from0.py | yield_from0.py | py | 2,026 | python | ja | code | 0 | github-code | 13 |
13736471682 | from manejaHelados import ManejadoHelados
from manejaSabores import ManejaSabores
class Menu:
__cod: int
def __init__(self, cod = 0):
self.__cod = cod
def mostrar_menu(self):
print('Opción 1: Cargar sabores')
print('Opción 2: Registrar venta')
print('Opción 3: Mostrar el nombre de los 5 sabores de helado más pedidos')
print('Opción 4: Ingresar un número de sabor y estimar el total de gramos vendidos')
print('Opción 5: Ingresar por teclado un tipo de helado y mostrar los sabores vendidos en ese tamaño')
print('Opcion 6: Determinar el importe total recaudado por la Heladería, por cada tipo de helado')
print('Opción 0: Finalizar operación')
def ejecutar(self, MH:ManejadoHelados, MS:ManejaSabores):
self.mostrar_menu()
self.__cod = int(input('Ingrese el Código'))
while self.__cod != 0:
if self.__cod == 1:
MS.cargar_sabores()
elif self.__cod == 2:
MH.registrar_venta(MS)
elif self.__cod == 3:
MH.mostrar_mas_pedidos(MS)
elif self.__cod == 4:
MH.gramos_vendidos()
elif self.__cod == 5:
MH.vendidos_tamaño(MS)
elif self.__cod == 6:
MH.mostrar_recaudado()
self.mostrar_menu()
self.__cod = int(input('Ingrese el Código'))
| AlePerez2003/Ejercicio2U3 | menu.py | menu.py | py | 1,516 | python | es | code | 0 | github-code | 13 |
17053612994 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class IotVspOrgUserAddNotifyUserInfoRequest(object):
def __init__(self):
self._auth_code = None
self._ext = None
self._msg = None
self._state = None
self._vid = None
@property
def auth_code(self):
return self._auth_code
@auth_code.setter
def auth_code(self, value):
self._auth_code = value
@property
def ext(self):
return self._ext
@ext.setter
def ext(self, value):
self._ext = value
@property
def msg(self):
return self._msg
@msg.setter
def msg(self, value):
self._msg = value
@property
def state(self):
return self._state
@state.setter
def state(self, value):
self._state = value
@property
def vid(self):
return self._vid
@vid.setter
def vid(self, value):
self._vid = value
def to_alipay_dict(self):
params = dict()
if self.auth_code:
if hasattr(self.auth_code, 'to_alipay_dict'):
params['auth_code'] = self.auth_code.to_alipay_dict()
else:
params['auth_code'] = self.auth_code
if self.ext:
if hasattr(self.ext, 'to_alipay_dict'):
params['ext'] = self.ext.to_alipay_dict()
else:
params['ext'] = self.ext
if self.msg:
if hasattr(self.msg, 'to_alipay_dict'):
params['msg'] = self.msg.to_alipay_dict()
else:
params['msg'] = self.msg
if self.state:
if hasattr(self.state, 'to_alipay_dict'):
params['state'] = self.state.to_alipay_dict()
else:
params['state'] = self.state
if self.vid:
if hasattr(self.vid, 'to_alipay_dict'):
params['vid'] = self.vid.to_alipay_dict()
else:
params['vid'] = self.vid
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = IotVspOrgUserAddNotifyUserInfoRequest()
if 'auth_code' in d:
o.auth_code = d['auth_code']
if 'ext' in d:
o.ext = d['ext']
if 'msg' in d:
o.msg = d['msg']
if 'state' in d:
o.state = d['state']
if 'vid' in d:
o.vid = d['vid']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/IotVspOrgUserAddNotifyUserInfoRequest.py | IotVspOrgUserAddNotifyUserInfoRequest.py | py | 2,522 | python | en | code | 241 | github-code | 13 |
3415533380 | # -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver import ChromeOptions
import time
import requests
import chardet as cd
class brower_scrapy:
'''
通过自动化测试工具Selenium模拟人工操作浏览器
'''
# @function 初始化类,设置成员变量
# @parm(self) brower_scrapy 指向实例对象的指针
# @parm(opetions) Set 驱动属性配置
def __init__(self, options):
super().__init__()
#初始化,配置Webdrive
self.chrome_options = ChromeOptions()
if(len(options) != 0):
for option in options:
self.chrome_options.add_argument(option)
# @function 回收类,删除成员变量
def __del__(self):
del self.chrome_options
del self.brower
# @function 启动浏览器
# @parm(brower_path) String 浏览器驱动位置
# @return Number 1 成功 0 失败
def run_brower(self, driver_path):
self.brower = webdriver.Chrome(executable_path = driver_path, chrome_options = self.chrome_options)
# @funtion 打开新网页,成功则返回 1,失败返回 0
# @parm(realm_name) String 域名
# @parm(route) String 路由路径
# @parm(flag) Number 打开网页的方式:本标签页、新标签页、新窗口
# @return Number 1 成功 0 失败
def open_new_page(self, realm_name, route, flag = 1):
if(flag == 0):
self.brower.excute_script('window.open()')
self.brower.get(realm_name + route)
elif(flag == 1):
self.brower.get(realm_name + route)
# @parm(self) brower_scrapy 指向实例对象的指针
# @parm(_class) String 检索的类名
# @parm(tag_name) String 标签名
def find_elements_by_class_and_tag_name(self, _class, tag_name):
result_list = []
elements_list = self.brower.find_elements_by_class_name(_class)
for element in elements_list:
temp_list = element.find_elements_by_xpath(tag_name)
if(len(temp_list) > 0):
for temp in temp_list:
result_list.append(temp)
return result_list
'''
找到页面元素,并点击
'''
def click_tag_by_class_and_text(self, _class, text):
tags = self.brower.find_elements_by_class_name(_class)
for tag in tags:
if(tag.text == text):
tag.click()
def click_tag_by_tagname_and_text(self, tag_name, text):
tags = self.brower.find_elements_by_tag_name(tag_name)
for tag in tags:
if(tag.text == text):
tag.click()
class request_scrapy:
'''
直接使用request向服务器发起请求
'''
# @function 初始化类,设置成员变量
# @parm(url) String 主站点url
# @parm(headers) Dictinary 请求头文件
# @parm(parms)
# @parm(data)
# @parm(encoding) 网页下载编码格式,默认"utf-8"
def __init__(self, url=None, headers=None, parms=None, data=None, encoding="utf-8"):
super().__init__()
print("\n--------------------")
self.url = url
self.parms = parms
self.data = data
self.encoding = encoding
self.session = requests.session()
self.session.headers = headers
self.session.keep_alive = False
self.html = None
print("request_scrapy初始化成功...")
# @function 回收类,删除成员变量
def __del__(self):
del self.url
del self.parms
del self.headers
del self.data
del self.encoding
print("回收类request_scrapy成功...")
print("--------------------\n")
# @function 向服务器发起请求,并获得response
# @parm(flag) Number 请求方式标记,默认为get()
def get_response(self, url = None, flag=0):
if(url == None and self.url == None):
print("Warning: the url is None.")
else:
self.url = url
print("开始发送请求...")
print("现在时间是:"+time.strftime('%Y-%m-%d %H:%M:%S'))
try:
if(flag == 0):
self.response = self.session.get(self.url)
elif(flag == 1):
self.response = self.session.post(self.url, self.data)
print("收到response,状态码:" + str(self.response.status_code))
return self.response
except requests.RequestException as e:
print(e)
return None
# @function 页面下载到本地,存储为html文件
# @parm(html_path) 文件下载路径
def download_html(self, html_path):
print("开始下载页面:" + self.url)
temp_ = self.url.split("/")
file_name = temp_[len(temp_)-2]+temp_[len(temp_)-1]
with open(html_path + "/" + file_name, "ba") as file:
file.write(self.response.content)
print("页面下载完成...") | Joker3Chen/Scrapy-Web-Java | Scrapy-Python/scrapy_module.py | scrapy_module.py | py | 4,983 | python | en | code | 0 | github-code | 13 |
70102288019 | # from BeautifulSoup import BeautifulSoup
from bs4 import BeautifulSoup
from urllib.request import urlopen
import re
#https://arstechnica.com
#http://synthia-dataset.net/download-2/
url_str = 'http://synthia-dataset.net/download-2/'
html_page = urlopen(url_str)
soup = BeautifulSoup(html_page)
links = []
for link in soup.findAll('a', attrs={'href': re.compile("^http://")}):
links.append(link.get('href'))
print(link.get('href'))
# print(links)
link_filtered = []
for link in links:
if link.find('http://synthia-dataset.net/download/') > 0:
link_filtered.append(link)
print(link_filtered)
| cyoukaikai/ahc_ete | smrc/utils/test/download_dataset.py | download_dataset.py | py | 616 | python | en | code | 2 | github-code | 13 |
2839346331 | import math
# sqrt
sqrt = math.sqrt(13)
# pow: equivalent to use **
exp = math.pow(2.3, 3)
# absolute value
abs_value = abs(-9) # a built in function
# max number (built-in)
max_value = max(12, 23, 21, 10, 9, -8)
# min number (built-in)
min_value = max(12, 23, 21, 10, 9, -8)
# trigonometric rations (try others as required)
sine_value = math.sin(1) # in radians
degree_value = math.degrees(1) # approx 57.3 degrees
radian_value = math.radians(57.3) # approx 1 radian
# inverse-sine or arc-sine ~0.52356 rad or 30 deg
# try acos, atan, etc...
arc_sine_value = math.asin(0.5)
# lcm and gcd
lcm_value = math.lcm(12, 24, 6, 42) # =168
gcd_value = math.gcd(12, 24, 6, 42) # =6
# quotient and remainder (built-in)
div_mod_tuple = divmod(13, 2) # (quotient, remainder)
| vivekanandpv/python-sample-code | py-11-math.py | py-11-math.py | py | 803 | python | en | code | 0 | github-code | 13 |
11351450521 | '''
bitmap通常基于数组来实现,数组的每个元素可看成是一系列二进制数,所有元素组成更大的二进制集合;
python的整数类型为有符号类型,所以一个整数可用位数为31位
'''
import math
class Bitmap():
def __init__(self, maxLength):
# 计算需要多少个数组元素,向上取整
self.size = int(math.ceil(maxLength/31))
# 初始化bitmap
self.arr = [0 for i in range(self.size)]
def calElemIndex(self, num,):
# 计算num在数组中的索引,向下取整(因为是从0开始)
return int(math.floor(num / 31))
def calBitIndex(self, num):
# 计算num在数组元素中的位索引,和31取模
return num % 31
def set(self, num):
# 置1操作,将第byteIndex位的二进制位置1, (1 << byteIndex)
elemIndex = self.calElemIndex(num)
byteIndex = self.calBitIndex(num)
elem = self.arr[elemIndex]
self.arr[elemIndex] = elem | (1 << byteIndex)
def clean(self, num):
# 置0操作,将第byteIndex位的二进制位置0, (~(1 << byteIndex))
# 和set是互反操作
elemIndex = self.calElemIndex(num)
byteIndex = self.calBitIndex(num)
elem = self.arr[elemIndex]
self.arr[elemIndex] = elem & (~(1 << byteIndex))
def test(self, num):
# 判断num是否在bitmap中
elemIndex = self.calElemIndex(num)
byteIndex = self.calBitIndex(num)
if self.arr[elemIndex] & (1 << byteIndex):
return True
return False
def sortArr(arrTest):
'''
将 arrTest 利用bitmap来进行排序
'''
maxLength = max(arrTest)
bitmap = Bitmap(maxLength)
afterSort = []
for i in arrTest:
bitmap.set(i)
for i in range(maxLength+1):
if bitmap.test(i):
afterSort.append(i)
return afterSort
if __name__ == "__main__":
bitmap = Bitmap(87) # 使得整个bitmap有3个元素
bitmap.set(0)
bitmap.set(34) # 在第1个数组元素的第3位,用二进制表示则为 1000 (8)
print('bitmap的数组为: ',bitmap.arr)
print('测试 34 是否在bitmap中: ',bitmap.test(34))
print('------------------')
arrTest = [45, 2, 78, 35, 67, 90, 879, 0, 340, 123, 46]
print(sortArr(arrTest))
| DaToo-J/NotesForBookAboutPython | ch9 大数据/bitmapTest.py | bitmapTest.py | py | 2,177 | python | zh | code | 0 | github-code | 13 |
43577844204 | """empty message
Revision ID: 7ff37bb2fe5e
Revises: f60d63b471d5
Create Date: 2020-07-09 13:31:50.034106
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '7ff37bb2fe5e'
down_revision = 'f60d63b471d5'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('assigned_assignments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=30), nullable=True),
sa.Column('subjectId', sa.Integer(), nullable=True),
sa.Column('note', sa.String(length=80), nullable=True),
sa.Column('assignmentFile', sa.String(length=80), nullable=True),
sa.Column('dueDate', sa.DateTime(), nullable=True),
sa.Column('semesterId', sa.Integer(), nullable=True),
sa.Column('submittable', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('school_term',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('quarter', sa.String(length=20), nullable=False),
sa.Column('schoolYear', sa.String(length=20), nullable=False),
sa.Column('current', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('quarter', 'schoolYear')
)
op.drop_index('name', table_name='assigned_assigments')
op.drop_table('assigned_assigments')
op.drop_index('quarter', table_name='semester')
op.drop_table('semester')
op.add_column('students_grades', sa.Column('subjectId', sa.Integer(), nullable=False))
op.create_unique_constraint(None, 'students_grades', ['studentId', 'subjectId', 'semesterId'])
op.drop_column('students_grades', 'subject')
op.add_column('submited_assignments', sa.Column('assignmentFile', sa.String(length=80), nullable=True))
op.add_column('submited_assignments', sa.Column('subjectId', sa.Integer(), nullable=False))
op.drop_index('studentId', table_name='submited_assignments')
op.create_unique_constraint(None, 'submited_assignments', ['studentId', 'subjectId', 'assignmentName'])
op.drop_column('submited_assignments', 'assigmentFile')
op.drop_column('submited_assignments', 'subject')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('submited_assignments', sa.Column('subject', mysql.VARCHAR(length=100), nullable=False))
op.add_column('submited_assignments', sa.Column('assigmentFile', mysql.VARCHAR(length=80), nullable=True))
op.drop_constraint(None, 'submited_assignments', type_='unique')
op.create_index('studentId', 'submited_assignments', ['studentId', 'subject', 'assignmentName'], unique=True)
op.drop_column('submited_assignments', 'subjectId')
op.drop_column('submited_assignments', 'assignmentFile')
op.add_column('students_grades', sa.Column('subject', mysql.VARCHAR(length=120), nullable=False))
op.drop_constraint(None, 'students_grades', type_='unique')
op.drop_column('students_grades', 'subjectId')
op.create_table('semester',
sa.Column('id', mysql.INTEGER(), autoincrement=True, nullable=False),
sa.Column('quarter', mysql.VARCHAR(length=20), nullable=False),
sa.Column('schoolYear', mysql.VARCHAR(length=20), nullable=False),
sa.Column('current', mysql.TINYINT(display_width=1), autoincrement=False, nullable=False),
sa.CheckConstraint('(`current` in (0,1))', name='semester_chk_1'),
sa.PrimaryKeyConstraint('id'),
mysql_collate='utf8mb4_0900_ai_ci',
mysql_default_charset='utf8mb4',
mysql_engine='InnoDB'
)
op.create_index('quarter', 'semester', ['quarter', 'schoolYear'], unique=True)
op.create_table('assigned_assigments',
sa.Column('id', mysql.INTEGER(), autoincrement=True, nullable=False),
sa.Column('name', mysql.VARCHAR(length=30), nullable=True),
sa.Column('dueDate', mysql.DATETIME(), nullable=True),
sa.Column('semesterId', mysql.INTEGER(), autoincrement=False, nullable=True),
sa.Column('note', mysql.VARCHAR(length=80), nullable=True),
sa.Column('assigmentFile', mysql.VARCHAR(length=80), nullable=True),
sa.Column('submittable', mysql.TINYINT(display_width=1), autoincrement=False, nullable=False),
sa.Column('subjectId', mysql.INTEGER(), autoincrement=False, nullable=True),
sa.CheckConstraint('(`submittable` in (0,1))', name='assigned_assigments_chk_1'),
sa.PrimaryKeyConstraint('id'),
mysql_collate='utf8mb4_0900_ai_ci',
mysql_default_charset='utf8mb4',
mysql_engine='InnoDB'
)
op.create_index('name', 'assigned_assigments', ['name'], unique=True)
op.drop_table('school_term')
op.drop_table('assigned_assignments')
# ### end Alembic commands ###
| haydavid23/cs50FinalProject | migrations/versions/7ff37bb2fe5e_.py | 7ff37bb2fe5e_.py | py | 4,829 | python | en | code | 0 | github-code | 13 |
124227030 | """add role and district
Revision ID: 4f2014c21c7d
Revises: f19249efe3d2
Create Date: 2022-05-17 21:47:36.345019
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = '4f2014c21c7d'
down_revision = 'f19249efe3d2'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('citymodel', sa.Column('district', sqlmodel.sql.sqltypes.AutoString(), nullable=True))
op.add_column('usermodel', sa.Column('role', sqlmodel.sql.sqltypes.AutoString(), nullable=True))
op.alter_column('usermodel', 'number',
existing_type=sa.VARCHAR(),
nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('usermodel', 'number',
existing_type=sa.VARCHAR(),
nullable=False)
op.drop_column('usermodel', 'role')
op.drop_column('citymodel', 'district')
# ### end Alembic commands ###
| lewein/FastApiProject | migrations/versions/4f2014c21c7d_add_role_and_district.py | 4f2014c21c7d_add_role_and_district.py | py | 1,082 | python | en | code | 0 | github-code | 13 |
17085048334 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.ConnectServerAdaptResult import ConnectServerAdaptResult
class AlipayIserviceCliveConnectCreateResponse(AlipayResponse):
def __init__(self):
super(AlipayIserviceCliveConnectCreateResponse, self).__init__()
self._value = None
@property
def value(self):
return self._value
@value.setter
def value(self, value):
if isinstance(value, ConnectServerAdaptResult):
self._value = value
else:
self._value = ConnectServerAdaptResult.from_alipay_dict(value)
def parse_response_content(self, response_content):
response = super(AlipayIserviceCliveConnectCreateResponse, self).parse_response_content(response_content)
if 'value' in response:
self.value = response['value']
| alipay/alipay-sdk-python-all | alipay/aop/api/response/AlipayIserviceCliveConnectCreateResponse.py | AlipayIserviceCliveConnectCreateResponse.py | py | 933 | python | en | code | 241 | github-code | 13 |
6574801386 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/11/24 上午11:06
# @Author : HuangBenHao
import joblib
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
import sys
scaler_path = r'./best_model/min_max_scaler.pkl'
model_state_dict_path = r'./best_model/_NN_epoch88_1109_16_19_13.pth'
class Config(object):
input_dim = 12
relation_type = 11
hidden_dim = 128
config = Config()
class NN(nn.Module):
def __init__(self, config: Config):
"""
NN 模型
:param config: 配置文件
"""
super(NN, self).__init__()
# self.dropout: float = config.dropout
self.input_dim = config.input_dim
self.output_dim = config.relation_type
self.hidden_dim = config.hidden_dim
self.fc1 = nn.Linear(self.input_dim, self.hidden_dim)
self.fc2 = nn.Linear(self.hidden_dim, self.hidden_dim)
self.fc3 = nn.Linear(self.hidden_dim, self.output_dim)
def forward(self, input_data):
x = F.relu(self.fc1(input_data))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def predict(data):
scaler = joblib.load(scaler_path)
model = NN(config)
model.load_state_dict(torch.load(model_state_dict_path))
data = scaler.transform(data).astype(np.float32)
return torch.argmax(model(torch.tensor(data))).item()
if __name__ == '__main__':
print(predict([[float(i) for i in sys.argv[1:]]]))
| Lazzben/human-body-classification | predict2.py | predict2.py | py | 1,474 | python | en | code | 0 | github-code | 13 |
12012391406 | import os
import json
import requests
headers = {
'Origin': 'https://y.qq.com',
'Referer': 'https://y.qq.com/portal/search.html',
'Sec-Fetch-Mode': 'cors',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36'
}
def get_music_info():
music_info_list = []
name = input('请输入歌手或歌曲:')
page = input('请输入页码:')
num = input('请输入当前页码需要返回的数据条数:')
url = f'https://c.y.qq.com/soso/fcgi-bin/client_search_cp?p={page}&n={num}&w={name}'
response = requests.get(url,headers=headers).text
# 将其切分为json字符串形式
music_json = response[9:-1]
# json转字典
music_data = json.loads(music_json)
# print(music_data)
music_list = music_data['data']['song']['list']
for music in music_list:
music_name = music['songname']
singer_name = music['singer'][0]['name']
songmid = music['songmid']
media_mid = music['media_mid']
music_info_list.append((music_name,singer_name,songmid,media_mid))
# print(music_name,singer_name,songmid,media_mid)
return music_info_list
def get_purl(music_info_list):
music_data = []
# 提取songid
for music in music_info_list:
music_name = music[0]
singer_name = music[1]
songmid = music[2]
# media_mid = music[3]
# 这里uid 可以不传
url = 'https://u.y.qq.com/cgi-bin/musicu.fcg?data={"req":{"module":"CDN.SrfCdnDispatchServer","method":"GetCdnDispatch","param":{"guid":"703417739","calltype":0,"userip":""}},"req_0":{"module":"vkey.GetVkeyServer","method":"CgiGetVkey","param":{"guid":"703417739","songmid":["%s"],"songtype":[0],"uin":"1094013271","loginflag":1,"platform":"20"}},"comm":{"uin":"1094013271","format":"json","ct":24,"cv":0}}'%songmid
response = requests.get(url,headers=headers).json()
purl = response['req_0']['data']['midurlinfo'][0]['purl']
full_media_url = 'http://dl.stream.qqmusic.qq.com/' + purl
# print(music_name,singer_name,full_media_url)
music_data.append(
{
'music_name': music_name,
'singer_name': singer_name,
'full_media_url': full_media_url
}
)
return music_data
def save_music_mp3(music_data):
if not os.path.exists('歌曲下载'):
os.mkdir('歌曲下载')
for music in music_data:
music_name = music['music_name']
singer_name = music['singer_name']
full_url = music['full_media_url']
music_response = requests.get(full_url,headers=headers).content
with open('歌曲下载/%s-%s.mp3'%(music_name,singer_name),'wb')as fp:
fp.write(music_response)
print('[%s]保存成功!'%music_name)
if __name__ == '__main__':
music_info_list = get_music_info()
music_data = get_purl(music_info_list)
save_music_mp3(music_data) | fanan-uyun/SpiderCase | 2、QQ音乐/qqmusic.py | qqmusic.py | py | 3,014 | python | en | code | 7 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.