seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
42984459241 | """ changes metadata for given path / file """
import sys
import argparse
from pathlib import Path
import os
from tools import img_file_info_xls as im
# metadata_information
LENS_INFO={"trio":im.EXIF_LENS_LENSBABY_TRIO}
lens_dict ={}
print("Add EXIF: Choose Lens Info:")
for i,lens in enumerate(im.EXIF_LENSES):
print(f"[{str(i).zfill(2)}]: {lens}")
lens_dict[str(i)]=im.EXIF_LENSES[lens]
l_index=input("Enter Lens # (blank=0): ")
if not l_index:
l_index="0"
lens_info=lens_dict.get(l_index)
if not lens_info:
print("No valid lens info selected")
sys.exit(1)
print(f"Lens Data: {lens_info}")
parser = argparse.ArgumentParser()
parser.add_argument("--path","-p",default=".",help="StartPath",metavar='File Path')
parser.add_argument('--save',"-s", dest='save', action='store_true',help="Save")
parser.add_argument('--no-save',"-nos", dest='save', action='store_false',help="Do not save")
parser.set_defaults(save=True)
parser.add_argument('--debug',"-c", dest='debug', action='store_true',help="Show debug Info")
parser.add_argument('--no-debug',"-nc", dest='debug', action='store_false',help="Do not show debug info")
parser.set_defaults(debug=False)
parser.add_argument("--exiftool","-et",default="exiftool.exe",help="Exiftool Executable",metavar='Exiftool Executable')
args = parser.parse_args()
print(f"Arguments {args}")
p=args.path
save=args.save
debug=args.debug
exiftool=args.exiftool
if os.path.isdir(p):
p=str(Path(p).absolute())
print(f"Using Path {p}")
pass
else:
print(f"{p} is not a valid path")
sys.exit()
# do the other stuff
im.change_metadata(p,exif_attribute_dict=lens_info,save=save,exiftool=exiftool,debug=debug) | aiventures/tools_cmd | img_change_metadata.py | img_change_metadata.py | py | 1,682 | python | en | code | 0 | github-code | 90 |
16466667963 | import pytest
import betamax
import time
import string
import random
from datetime import date
from os import path
from moco_wrapper import moco
from moco_wrapper.util.requestor import NoRetryRequestor
from moco_wrapper.util.objector import NoErrorObjector, DefaultObjector
class IntegrationTest(object):
"""
Base class for integration tests.
The Integration tests check if the requests that are created will be sent out correctly and can be parsed back into a real object
"""
def setup(self):
# export mocotest_delay=1 to enable delay between tests
self.delay_tests_enabled = pytest.placeholders.mocotest_delay == "1"
self.use_proxy = pytest.placeholders.mocotest_useproxy == "1"
self.setup_moco()
self.setup_betamax()
def setup_betamax(self):
self.recorder = betamax.Betamax(self._moco.session)
def setup_moco(self):
self._moco = moco.Moco(
auth={
"api_key": pytest.placeholders.mocotest_apikey,
"domain": pytest.placeholders.mocotest_domain
},
requestor=NoRetryRequestor(),
objector=DefaultObjector(),
)
if self.use_proxy:
self.enable_proxy()
def enable_proxy(self):
self._moco.requestor.session.proxies = {
"https": "127.0.0.1:8080"
}
# ignore ssl errors
self._moco.requestor.session.verify = False
def id_generator(self, size=10, chars=string.ascii_uppercase + string.digits):
"""
create a random string
"""
return ''.join(random.choice(chars) for _ in range(size))
def create_random_date(self):
"""
create a random date between 2010 and 2020
"""
return date(
random.choice(range(2010, 2020, 1)),
random.choice(range(1, 12, 1)),
random.choice(range(1, 28))
)
@property
def moco(self):
return self._moco
def teardown_method(self, method):
"""
Enable this if you want to wait between each method call (default is 5 seconds)
"""
if self.delay_tests_enabled:
time.sleep(5)
| sommalia/moco-wrapper | tests/integration/integration_base.py | integration_base.py | py | 2,209 | python | en | code | 2 | github-code | 90 |
36975061114 | import numpy as np
from viztools.tools.colors import get_gyor_color_gradient
from PIL import Image
from scipy.ndimage import zoom
import geopy.distance
import matplotlib as mpl
from matplotlib import cm
def _resize_mat(mat, size):
"""
size: new size - (rows, cols)
"""
assert mat.ndim == 2
rows, cols = mat.shape
rows_zm = size[0] / rows
cols_zm = size[1] / cols
return zoom(mat, [rows_zm, cols_zm])
def _var_to_alpha(var, opac95=3, opac05=12):
"""
var: the 2D variance matrix
opac95: threshold we want 95% opacity
opac05: threshold we want 05% opacity
Note: Using a reflected sigmoid to do the calculation
"""
assert opac95 < opac05
opac50 = opac95 + (opac05 - opac95) / 2
s1 = 2.944444 * 2
scale = s1 / (opac05 - opac95)
out = 1 - (1. / (1. + np.exp(-scale * (var - opac50))))
return (256 * out).clip(0, 255).astype(np.uint8)
def _snapshot_to_img(obj, size=None, format='png', scaling='epa', opac95=3, opac05=12, colormap='auto'):
"""
obj: the estimate map dictionary (keys: lat, lon, alt, pm, var)
filename: output filename
size: if you want to resize (width, height)
format: png, jpg, etc.
scaling: the color gradient scaling
"epa": anchors at 12, 35, etc.
"aqi": anchors at 50, 100, 150, ...
"linear": smooth scaling from green to purple
opac95: 95% opacity value for alpha
opac05: 05% opacity value for alpha
"""
pm = np.array(obj.vals)
var = np.array(obj.vars)
rgba = np.zeros((*pm.shape, 4), dtype=np.uint8)
if colormap == 'auto':
gradient = get_gyor_color_gradient('epa')
# Convert the z-values to RGB
g = np.array(gradient)
pmi = pm.astype(int)
for i in range(3):
rgba[:, :, i] = g[:, i][pmi]
else:
cmap = cm.get_cmap(colormap)
norm = mpl.colors.Normalize(vmin=0, vmax=40)
rgba = cmap(norm(pm)) * 256
# Alpha scaling
rgba[:, :, -1] = _var_to_alpha(var, opac95, opac05)
if size:
sz = size[::-1]
resized = np.zeros((*sz, 4))
for i in range(4):
resized[:, :, i] = _resize_mat(rgba[:, :, i], sz)
rgba = resized.clip(0, 255).astype(np.uint8)
# flip over x-axis for latitude getting bigger as we go up
# rgba = rgba[::-1, :, :]
img = Image.fromarray(rgba, "RGBA")
return img
def _snapshot_to_img_dist_scaled(obj,
largest_size=None,
format='png',
scaling='epa',
opac95=3,
opac05=12,
colormap='auto'):
"""
Takes into consideration the ratio between lat/lon distance and scales the output image accordingly.
largest_size: the output won't be larger than this many pixels on either height or width. If None then we will scale UP instead
"""
lat_min = obj.lats.min()
lat_max = obj.lats.max()
lon_min = obj.lons.min()
lon_max = obj.lons.max()
lat_dist_m = geopy.distance.distance(
(lat_min, lon_max), (lat_max, lon_max)).km
lon_dist_m = geopy.distance.distance(
(lat_max, lon_min), (lat_max, lon_max)).km
ratio = lat_dist_m / lon_dist_m
if largest_size:
if lat_dist_m > lon_dist_m:
# this is image size, so we give it in (width x height)
size = (int(largest_size / ratio), largest_size)
else:
size = (largest_size, int(largest_size / ratio))
# Otherwise let's scale up instead of down
else:
lat_sz, lon_sz = len(obj.lats), len(obj.lons)
if lat_dist_m > lon_dist_m:
size = (lat_sz, int(lon_sz * ratio))
else:
size = (int(lat_sz * ratio), lon_sz)
return _snapshot_to_img(obj,
size=size,
format=format,
scaling=scaling,
opac95=opac95,
opac05=opac05,
colormap=colormap)
| tetradsensors/tetrad-viz-toolkit | viztools/tools/generate_image.py | generate_image.py | py | 4,142 | python | en | code | 0 | github-code | 90 |
20702597445 | import jax
import jax.numpy as jnp
from flax import linen as nn
class Attention(nn.Module):
num_heads: int = 8
attn_bias: bool = True
attn_drop_rate: float = 0.0
proj_bias: bool = True
proj_drop_rate: float = 0.0
embed_dim: int = 384
@nn.compact
def __call__(self, x, training: bool = False):
B, N, C = x.shape
assert (
C == self.embed_dim
), f"Input embedding dimension ({C}) should match layer embedding dimension ({self.embed_dim})."
qkv = nn.Dense(features=3 * C, use_bias=self.attn_bias, name="qkv")(x)
qkv = jnp.reshape(qkv, (B, N, 3, self.num_heads, C // self.num_heads))
qkv = jnp.transpose(qkv, (2, 0, 3, 1, 4))
q, k, v = tuple(qkv)
# Attention matrix: (B, H, N, N)
attn = q @ k.transpose((0, 1, 3, 2)) / jnp.sqrt(C // self.num_heads)
attn = nn.softmax(attn, axis=-1)
attn = nn.Dropout(rate=self.attn_drop_rate, name="attn_drop")(
attn, deterministic=not training
)
# Output: (B, N, H, C // H)
x = (attn @ v).transpose(0, 2, 1, 3).reshape(B, N, C)
x = nn.Dense(features=C, use_bias=self.proj_bias, name="proj")(x)
x = nn.Dropout(rate=self.proj_drop_rate, name="proj_drop")(
x, deterministic=not training
)
return x
| kylestach/dinov2-jax | attention.py | attention.py | py | 1,344 | python | en | code | 0 | github-code | 90 |
70494196777 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 31 10:46:27 2022
@author: yunita
"""
# coding: utf-8
# In[41]
# SOAL NOMOR 1
import pandas as pd #melakukan Import library pandas menjadi nama lain yaitu pd
kegiatan = {"Nama kegiatan" : ['Belanja', 'Memasak', 'Menyanyi', 'Berkuda']} #mmebuat variable yang bernama kegiatan dan mengisi dataframe aplikasi
x = pd.DataFrame(kegiatan) # membuat variable x yang akan mmebuat dataframe dari library pandas yang akan memanggil variable aplikasi
print ('Nur ikhsani sedang:' + x) #print hasil dari x
# In[42]
#SOAL NOMOR 2
import numpy as np # melakukan import library numpy menjadi nama lain yaitu np
matrix_x = np.eye(10) # membuat sebuah matrix pake numpy dengan menggunakan fungsi eye
matrix_x #mendeklarasikan matrix_x yang telah dibuat
print (matrix_x) # menampilkan matrix_x yang telah dibat dengan berbentuk 10x10
# In[43]
#SOAL NOMOR 3
import matplotlib.pyplot as mp # melakukan import library numpy menjadi nama lain yaitu mp
mp.plot([1,1,8,7,0,9,9]) #memasukkan nilai pada plot
mp.xlabel('Nur Ikhsani Suwandy Futri') # menambahkan label pada x
mp.ylabel('1194029')# menambahkan label pada y
mp.show() # menampilkan grafik plot
# In[44]:
#SOAL NOMOR 4
import pandas as pd #MELAKUKAN IMPORT PANDA DENGAN NAMA LAIN YAITU PD
# some lines have too many fields (?), so skip bad lines
imgatt = pd.read_csv("image_attribute_labels.txt",
sep='\s+', header=None, error_bad_lines=False, warn_bad_lines=False,
usecols=[0,1,2], names=['imgid', 'attid', 'present']) #MEMBUAT VARIABLE DENGAN IMGATT UNTUK MEMBACA CSV DARI DATA SET
# description from dataset README:
#
# The set of attribute labels as perceived by MTurkers for each image
# is contained in the file attributes/image_attribute_labels.txt, with
# each line corresponding to one image/attribute/worker triplet:
#
# <image_id> <attribute_id> <is_present> <certainty_id> <time>
#
# where <image_id>, <attribute_id>, <certainty_id> correspond to the IDs
# in images.txt, attributes/attributes.txt, and attributes/certainties.txt
# respectively. <is_present> is 0 or 1 (1 denotes that the attribute is
# present). <time> denotes the time spent by the MTurker in seconds.
# In[45]:
imgatt.head() #MENAMPILKAN DATA YANG TELAH DIBACA
# In[46]:
imgatt.shape#MENAMPILKAN JUMLAH SELURUH DATA
# In[47]:
# need to reorganize imgatt to have one row per imgid, and 312 columns (one column per attribute),
# with 1/0 in each cell representing if that imgid has that attribute or not
imgatt2 = imgatt.pivot(index='imgid', columns='attid', values='present')#MEMBUAT SEBUAH VARIABLE BARU DARI FUNGSI IMGATT DENGAN MENGGANTI INDEK MENJADI KOLOM DAN KOLOM MENJADI INDEX
# In[48]:
imgatt2.head()#MENAMPILKAN DATA YANG SUDAH DIBACA DENGAN 5 DATA TERATAS.
# In[49]:
imgatt2.shape# MENAMPILKAN JUMLAH SELURUH DATA
# In[50]:
# now we need to load the image true classes
imglabels = pd.read_csv("image_class_labels.txt",
sep=' ', header=None, names=['imgid', 'label'])#BACA DATA CSV DENGAN KETENTUAN YANG ADA
imglabels = imglabels.set_index('imgid')#VARIABLE IMGLABELS SEBAGAI SET INDEX IMGID
# In[51]:
imglabels.head()#MEMBACA DATA YANG DIMASUKKAN KE VARIABLE IMGLABELS
# In[52]:
imglabels.shape# MENAMPILKAN JUMLAH DATA SELURUH DATA SERTA KOLOMNYA
# In[53]:
# now we need to attach the labels to the attribute data set,
# and shuffle; then we'll separate a test set from a training set
df = imgatt2.join(imglabels)#VARIABLE DF DIMASUKKAN FUNGSI JOIN DARI DATA IMGATT2 KE VARIABLE IMGLABELS
df = df.sample(frac=1)#VARIABLE DF SEBAGAI SAMPLE DENGAN KETENTUAN FRAC=1
# In[54]:
df_att = df.iloc[:, :312]#MEMBUAT KOLOM DENGAN KETENTUAN 312
df_label = df.iloc[:, 312:]#MEMBUAT KOLOM DENGAN KETENTUAN 312
# In[55]:
df_att.head()# MENAMPILKAN DATA YANG SUDAH DIBACA TADI NAMUN HANYA DATA YANG TERATAS.
# In[56]:
df_label.head()#MENAMPILKAN DATA YANG SUDAH DIBACA TADI NAMUN HANYA DA
# In[57]:
df_train_att = df_att[:8000]#DATA AKAN DIBAGI DARI 8000 ROW PERTAMA MENJADI DATA TRANINING DAN SISINYA ADALAH DATA TESTING
df_train_label = df_label[:8000]#DATA AKAN DIBAGI DARI 8000 ROW PERTAMA MENJADI DATA TRAINING DAN SISANYA ADALAH DATA TESTING
df_test_att = df_att[8000:]#BERBALIK DARI SEBELUMNYA DTA AKAN DIBAGI MULAI DARI 8000 ROW PERTAMA MENJADI DATA TRAINING DAN SISINYA MENJADI DATA TESTING
df_test_label = df_label[8000:]#BERBALIK DARI SEBELUMNYA DTA AKAN DIBAGI
df_train_label = df_train_label['label']#MENAMBAHKAN LABEL
df_test_label = df_test_label['label']#NEMANBAHKAN LABEL
# In[58]:
from sklearn.ensemble import RandomForestClassifier #imort fungsi random forest
clf = RandomForestClassifier(max_features=50, random_state=0, n_estimators=100)#clf sebagai variable untuk klasifikasi random forest
# In[59]:
clf.fit(df_train_att, df_train_label)#vriable clf untuk fit yaitu menjadi data training
# In[60]:
print(clf.predict(df_train_att.head()))#print clf yang sudah prediksi dar training tetapi anya menampilkan data paling atas
# In[60]:
print(clf.predict(df_test_att.head()))
# In[61]:
clf.score(df_test_att, df_test_label)#memunculkan clf sebagai testing yang sudah di training
# In[62]:
#SOAL NOMOR 5
from sklearn.metrics import confusion_matrix#menginportkan matrix
pred_labels = clf.predict(df_test_att)#membuat variable pred label dari data testing
cm = confusion_matrix(df_test_label, pred_labels)#cd sebagai variable dari data label
# In[63]:
cm#memunculkan data label bentuk array
# In[64]:
# from http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
import matplotlib.pyplot as plt #menginportkan library matplotlib sebagai plt
import itertools #mengmportkan library itertools
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):#membuat fungsi dengan ketentuan data yang ada pada cm
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")#jika normlisasi sebgai ketentuan yang ada maka print akn menampilkan normalized
else:
print('Confusion matrix, without normalization')#jika tidak maka akan enampilkan else
print(cm)#menampilkan cm
plt.imshow(cm, interpolation='nearest', cmap=cmap)#plt sebagai pungsi untuk membuat plot
plt.title(title)#membuat tittle pada plot
#plt.colorbar()
tick_marks = np.arange(len(classes))#membuat maks pada plot
plt.xticks(tick_marks, classes, rotation=90)#membuat tick padaa x
plt.yticks(tick_marks, classes)#membuat tick padaa y
fmt = '.2f' if normalize else 'd' #
thresh = cm.max() / 2.
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# In[65]:
birds = pd.read_csv("classes.txt",
sep='\s+', header=None, usecols=[1], names=['birdname'])
birds = birds['birdname']
birds
# In[66]:
import numpy as np
np.set_printoptions(precision=2)
plt.figure(figsize=(60,60), dpi=300)
plot_confusion_matrix(cm, classes=birds, normalize=True)
plt.show()
# In[67]:
#SOAL NOMOR 6
from sklearn import tree
clftree = tree.DecisionTreeClassifier()
clftree.fit(df_train_att, df_train_label)
clftree.score(df_test_att, df_test_label)
# In[68]:
from sklearn import svm
clfsvm = svm.SVC()
clfsvm.fit(df_train_att, df_train_label)
clfsvm.score(df_test_att, df_test_label)
# In[69]:
from sklearn.model_selection import cross_val_score
scores = cross_val_score(clf, df_train_att, df_train_label, cv=5)
# show average score and +/- two standard deviations away (covering 95% of scores)
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
# In[70]:
#SOAL NOMOR 7
scorestree = cross_val_score(clftree, df_train_att, df_train_label, cv=5)
print("Accuracy: %0.2f (+/- %0.2f)" % (scorestree.mean(), scorestree.std() * 2))
# In[71]:
scoressvm = cross_val_score(clfsvm, df_train_att, df_train_label, cv=5)
print("Accuracy: %0.2f (+/- %0.2f)" % (scoressvm.mean(), scoressvm.std() * 2))
# In[72]:
#SOAL NOMOR 8
max_features_opts = range(5, 50, 5)
n_estimators_opts = range(10, 200, 20)
rf_params = np.empty((len(max_features_opts)*len(n_estimators_opts),4), float)
i = 0
for max_features in max_features_opts:
for n_estimators in n_estimators_opts:
clf = RandomForestClassifier(max_features=max_features, n_estimators=n_estimators)
scores = cross_val_score(clf, df_train_att, df_train_label, cv=5)
rf_params[i,0] = max_features
rf_params[i,1] = n_estimators
rf_params[i,2] = scores.mean()
rf_params[i,3] = scores.std() * 2
i += 1
print("Max features: %d, num estimators: %d, accuracy: %0.2f (+/- %0.2f)" %
(max_features, n_estimators, scores.mean(), scores.std() * 2))
# In[90]:
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
fig = plt.figure()
fig.clf()
ax = fig.gca(projection='3d')
x = rf_params[:,0]
y = rf_params[:,1]
z = rf_params[:,2]
ax.scatter(x, y, z)
ax.set_zlim(0.2, 0.5)
ax.set_xlabel('Max features')
ax.set_ylabel('Num estimators')
ax.set_zlabel('Avg accuracy')
plt.show()
| zakyyusuff/artificial-intelligence | Chapter02/chapter3.py | chapter3.py | py | 9,434 | python | id | code | 0 | github-code | 90 |
27308126551 | from gold.statistic.MagicStatFactory import MagicStatFactory
from gold.statistic.Statistic import Statistic
from gold.statistic.CountPointStat import CountPointStat
from quick.statistic.PointCountInsideSegsStat import PointCountInsideSegsStat
from gold.statistic.ProportionCountStat import ProportionCountStat
#from proto.RSetup import r
import math
from collections import OrderedDict
class PointCountInSegsPvalStat(MagicStatFactory):
pass
#class PointCountInSegsPvalStatSplittable(StatisticSumResSplittable):
# pass
class PointCountInSegsPvalStatUnsplittable(Statistic):
def __init__(self, region, track, track2, assumptions='poissonPoints', tail='different', **kwArgs):
assert( tail in ['less','more','different'])
assert assumptions=='poissonPoints'
self._tail = tail
Statistic.__init__(self, region, track, track2, assumptions=assumptions, tail=tail, **kwArgs)
def _compute(self):
from proto.RSetup import r
#r("require(Defaults)")
#r('setDefaults(q, save="no")')
#r("useDefaults(q)")
x = self._numPointsInside.getResult()
size = self._numPointsTotal.getResult()
prob = self._segmentCoverProportion.getResult()
se = math.sqrt(1.0*(prob)*(1-prob)/size)
if size < 1 or prob in [0,1]:
return None
if self._tail=='less':
pval = r.pbinom(x,size,prob)
elif self._tail=='more':
pval = 1 - r.pbinom(x-1,size,prob)
elif self._tail=='different':
pval = min(1, 2*min( r.pbinom(x,size,prob), 1 - r.pbinom(x-1,size,prob)))
#return {'P-value':pval, 'SegCover':prob, 'PointsInside':x, 'PointsTotal':size}
return OrderedDict([ ('P-value', float(pval)), ('Test statistic: PointsInside', x), ('E(Test statistic): ExpPointsInside', prob*size), \
('DiffFromExpected', x-prob*size), ('PointsTotal', size), ('SegCoverage', prob) ])
def _createChildren(self):
self._numPointsInside = self._addChild( PointCountInsideSegsStat(self._region, self._track, self._track2))
self._numPointsTotal = self._addChild( CountPointStat(self._region, self._track))
self._segmentCoverProportion = self._addChild( ProportionCountStat(self._region, self._track2))
| uio-bmi/track_rand | lib/hb/gold/statistic/PointCountInSegsPvalStat.py | PointCountInSegsPvalStat.py | py | 2,340 | python | en | code | 1 | github-code | 90 |
24238766534 | import cv2
cap = cv2.VideoCapture("image/Video.mp4")
while (cap.isOpened()):
check, frame = cap.read() # get image from cammera frame by frame
if check == True:
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
cv2.imshow("Output",gray)
if cv2.waitKey(1) & 0xff == ord('q'):
break
else: break
cap.release()
cv2.destroyAllWindows()
| keemmer/python-basic-opencv | basic_08_video-grayScale.py | basic_08_video-grayScale.py | py | 381 | python | en | code | 0 | github-code | 90 |
18113555904 | class LineScrapper:
@staticmethod
def get_ap_list(lines):
name_mac = []
for line in lines.split('\n'):
line = line.strip()
if 'Could not read interface' in line:
print(f'[!] {line}\n')
exit(1)
elif 'rfkill: WLAN soft blocked' in line:
print(f'[!] {line}\n')
exit(1)
else:
if 'BSS: Add new id' in line:
ap_name = line.split("'")[1]
ap_mac = line.split("'")[0].split()[7]
if not any(i == ap_mac for d in name_mac for i in d.values()):
name_mac.append({ap_name: ap_mac})
return name_mac
@staticmethod
def get_pmkid(phs):
for ph in phs.split('\n'):
ph = ph.strip()
if 'PMKID from' in ph:
return ph[49:].replace(" ", "")
| Sinf0r0s0/pmkid-auto | pmkidauto/line_scrapper.py | line_scrapper.py | py | 917 | python | en | code | 11 | github-code | 90 |
12877688723 | #!/usr/bin/env python3
import gym
import ptan
import argparse
import torch
from torch import nn
import torch.optim as optim
import torch.multiprocessing as mp
import matplotlib.pyplot as plt
from tensorboardX import SummaryWriter
from lib import dqn_model, common
from collections import namedtuple, deque
import csv
import numpy as np
import os
PLAY_STEPS = 4
# one single experience step
Experience_AM = namedtuple('Experience_AM', ['state', 'action', 'reward', 'done', 'env'])
class ExperienceSource_AM:
"""
Simple n-step experience source using single or multiple environments
Every experience contains n list of Experience entries
"""
def __init__(self, env, agent, steps_count=2, steps_delta=1, vectorized=False):
"""
Create simple experience source
:param env: environment or list of environments to be used
:param agent: callable to convert batch of states into actions to take
:param steps_count: count of steps to track for every experience chain
:param steps_delta: how many steps to do between experience items
:param vectorized: support of vectorized envs from OpenAI universe
"""
assert isinstance(env, (gym.Env, list, tuple))
assert isinstance(agent, ptan.agent.BaseAgent)
assert isinstance(steps_count, int)
assert steps_count >= 1
assert isinstance(vectorized, bool)
if isinstance(env, (list, tuple)):
self.pool = env
else:
self.pool = [env]
self.agent = agent
self.steps_count = steps_count
self.steps_delta = steps_delta
self.total_rewards = []
self.total_steps = []
self.vectorized = vectorized
def __iter__(self):
states, agent_states, histories, cur_rewards, cur_steps = [], [], [], [], []
env_lens = []
for env in self.pool:
obs = env.reset()
# if the environment is vectorized, all it's output is lists of results.
# Details are here: https://github.com/openai/universe/blob/master/doc/env_semantics.rst
if self.vectorized:
obs_len = len(obs)
states.extend(obs)
else:
obs_len = 1
states.append(obs)
env_lens.append(obs_len)
for _ in range(obs_len):
histories.append(deque(maxlen=self.steps_count))
cur_rewards.append(0.0)
cur_steps.append(0)
agent_states.append(self.agent.initial_state())
iter_idx = 0
while True:
actions = [None] * len(states)
states_input = []
states_indices = []
for idx, state in enumerate(states):
if state is None:
actions[idx] = self.pool[0].action_space.sample() # assume that all envs are from the same family
else:
states_input.append(state)
states_indices.append(idx)
if states_input:
states_actions, new_agent_states = self.agent(states_input, agent_states)
for idx, action in enumerate(states_actions):
g_idx = states_indices[idx]
actions[g_idx] = action
agent_states[g_idx] = new_agent_states[idx]
grouped_actions = ptan.experience._group_list(actions, env_lens)
global_ofs = 0
for env_idx, (env, action_n) in enumerate(zip(self.pool, grouped_actions)):
if self.vectorized:
next_state_n, r_n, is_done_n, _ = env.step(action_n)
else:
next_state, r, is_done, _ = env.step(action_n[0])
next_state_n, r_n, is_done_n = [next_state], [r], [is_done]
#### This is the addition
env_name = env.unwrapped.spec.id
for ofs, (action, next_state, r, is_done) in enumerate(zip(action_n, next_state_n, r_n, is_done_n)):
idx = global_ofs + ofs
state = states[idx]
history = histories[idx]
cur_rewards[idx] += r
cur_steps[idx] += 1
if state is not None:
history.append(Experience_AM(state=state, action=action, reward=r, done=is_done, env=env_name))
if len(history) == self.steps_count and iter_idx % self.steps_delta == 0:
yield tuple(history)
states[idx] = next_state
if is_done:
# generate tail of history
while len(history) >= 1:
yield tuple(history)
history.popleft()
self.total_rewards.append(cur_rewards[idx])
self.total_steps.append(cur_steps[idx])
cur_rewards[idx] = 0.0
cur_steps[idx] = 0
# vectorized envs are reset automatically
states[idx] = env.reset() if not self.vectorized else None
agent_states[idx] = self.agent.initial_state()
history.clear()
global_ofs += len(action_n)
iter_idx += 1
def pop_total_rewards(self):
r = self.total_rewards
if r:
self.total_rewards = []
self.total_steps = []
return r
def pop_rewards_steps(self):
res = list(zip(self.total_rewards, self.total_steps))
if res:
self.total_rewards, self.total_steps = [], []
return res
# those entries are emitted from ExperienceSourceFirstLast. Reward is discounted over the trajectory piece
ExperienceFirstLast_AM = namedtuple('ExperienceFirstLast_AM', ('state', 'action', 'reward', 'last_state', 'env'))
class ExperienceSourceFirstLast_AM(ExperienceSource_AM):
"""
This is a wrapper around ExperienceSource to prevent storing full trajectory in replay buffer when we need
only first and last states. For every trajectory piece it calculates discounted reward and emits only first
and last states and action taken in the first state.
If we have partial trajectory at the end of episode, last_state will be None
"""
def __init__(self, env, agent, gamma, steps_count=1, steps_delta=1, vectorized=False):
assert isinstance(gamma, float)
super(ExperienceSourceFirstLast_AM, self).__init__(env, agent, steps_count+1, steps_delta, vectorized=vectorized)
self.gamma = gamma
self.steps = steps_count
def __iter__(self):
for exp in super(ExperienceSourceFirstLast_AM, self).__iter__():
if exp[-1].done and len(exp) <= self.steps:
last_state = None
elems = exp
else:
last_state = exp[-1].state
elems = exp[:-1]
total_reward = 0.0
for e in reversed(elems):
total_reward *= self.gamma
total_reward += e.reward
yield ExperienceFirstLast_AM(state=exp[0].state, action=exp[0].action,
reward=total_reward, last_state=last_state, env=exp[0].env)
def play_func(params, net, cuda, exp_queue, device_id):
"""
The paper suggests sampling the actions from the learner net, so that requires little change from the multienv implementation.
*** There is a reason that it reinitializes the envs in this function that has to do with parallelization ***
"""
run_name = params['run_name']
if 'max_games' not in params:
max_games = 16000
else:
max_games = params['max_games']
envSI = gym.make('SpaceInvadersNoFrameskip-v4')
envSI = ptan.common.wrappers.wrap_dqn(envSI)
envDA = gym.make('DemonAttackNoFrameskip-v4')
envDA = ptan.common.wrappers.wrap_dqn(envDA)
device = torch.device("cuda:{}".format(device_id) if cuda else "cpu")
writer = SummaryWriter(comment="-" + run_name + "-03_parallel")
selector = ptan.actions.EpsilonGreedyActionSelector(epsilon=params['epsilon_start'])
epsilon_tracker = common.EpsilonTracker(selector, params)
agent = ptan.agent.DQNAgent(net, selector, device=device)
exp_source = ExperienceSourceFirstLast_AM([envSI, envDA], agent, gamma=params['gamma'], steps_count=1)
exp_source_iter = iter(exp_source)
fh = open('mimic_models/{}_metadata.csv'.format(run_name), 'w')
out_csv = csv.writer(fh)
frame_idx = 0
game_idx = 1
model_count = 0
model_stats = []
mean_rewards = []
with common.RewardTracker(writer, params['stop_reward']) as reward_tracker:
while True:
frame_idx += 1
exp = next(exp_source_iter)
exp_queue.put(exp)
epsilon_tracker.frame(frame_idx)
new_rewards = exp_source.pop_total_rewards()
if new_rewards:
status, num_games, mean_reward, epsilon_str = reward_tracker.reward(new_rewards[0], frame_idx, selector.epsilon)
mean_rewards.append(mean_reward)
if status:
break
if game_idx and (game_idx % 500 == 0):
# write to disk
print("Saving model...")
model_name = 'mimic_models/{}_{}.pth'.format(run_name, game_idx)
net.to(torch.device('cpu'))
torch.save(net, model_name)
net.to(device)
new_row = [model_name, num_games, mean_reward, epsilon_str]
out_csv.writerow(new_row)
np.savetxt('mimic_models/{}_reward.txt'.format(run_name), np.array(mean_rewards))
if game_idx == max_games:
break
game_idx += 1
print("Saving final model...")
model_name = 'mimic_models/{}_{}.pth'.format(run_name, game_idx)
net.to(torch.device('cpu'))
torch.save(net, model_name)
net.to(device)
new_row = [model_name, num_games, mean_reward, epsilon_str]
out_csv.writerow(new_row)
np.savetxt('mimic_models/{}_reward.txt'.format(run_name), np.array(mean_rewards))
# plt.figure(figsize=(16, 9))
# plt.tight_layout()
# plt.title('Reward vs time, {}'.format(run_name))
# plt.xlabel('Iteration')
# plt.ylabel('Reward')
# ys = np.array(mean_rewards)
# plt.plot(ys, c='r')
# plt.savefig('mimic_models/{}_reward.png'.format(run_name))
# plt.close()
fh.close()
exp_queue.put(None)
if __name__ == "__main__":
"""
This method attempts to build a generalized model not from the raw games but from two expert models trained on individual games.
It does so by training a new network to replicate the output Q values of the multiple expert models given the same input.
Therefore, the loss is given by the MSE of the softmax of the final activations + MSE of the final hidden layer.
See https://arxiv.org/pdf/1511.06342.pdf
Adds a slight edit to the Experience objects noting which game they are from so as to use the correct expert
"""
mp.set_start_method('spawn')
parser = argparse.ArgumentParser()
parser.add_argument("--cuda", default=False, action="store_true", help="Enable cuda")
parser.add_argument("--cuda_id", default=0, help="CUDA ID of device")
parser.add_argument("--si", help="Path to space invaders master model", required=True)
parser.add_argument('--da', help='Paths to demon attack master model', required=True)
parser.add_argument('--env', help='Environment to load', required=True)
parser.add_argument('--beta', help='Balance of Policy vs Hidden Loss', default=1)
args = parser.parse_args()
cuda_id = args.cuda_id
params = common.HYPERPARAMS[args.env]
params['batch_size'] *= PLAY_STEPS
device_str = "cuda:{}".format(cuda_id) if args.cuda else "cpu"
print("Using device: {}".format(device_str))
device = torch.device(device_str)
if not os.path.exists('mimic_models'):
os.makedirs('mimic_models')
envSI = gym.make('SpaceInvadersNoFrameskip-v4')
envSI = ptan.common.wrappers.wrap_dqn(envSI)
envDA = gym.make('DemonAttackNoFrameskip-v4')
envDA = ptan.common.wrappers.wrap_dqn(envDA)
assert envSI.action_space.n == envDA.action_space.n, "Different Action Space Lengths"
assert envSI.observation_space.shape == envDA.observation_space.shape, "Different Obs. Space Shapes"
print("Loaded Environments: {}l {}".format(envSI.unwrapped.spec.id, envDA.unwrapped.spec.id))
expertSI = dqn_model.DQN(envSI.observation_space.shape, envSI.action_space.n)
expertSI.load_state_dict(torch.load(args.si, map_location=device).state_dict())
expertSI_hidden = dqn_model.DQN_Hidden(envSI.observation_space.shape, envSI.action_space.n, expertSI).to(device)
expertSI = expertSI.to(device)
expertSI.eval()
expertSI_hidden.eval()
expertDA = dqn_model.DQN(envSI.observation_space.shape, envSI.action_space.n)
expertDA.load_state_dict(torch.load(args.da, map_location=device).state_dict())
expertDA_hidden = dqn_model.DQN_Hidden(envSI.observation_space.shape, envSI.action_space.n, expertDA).to(device)
expertDA = expertDA.to(device)
expertDA.eval()
expertDA_hidden.eval()
name_to_expert = {envSI.unwrapped.spec.id : [expertSI, expertSI_hidden] , envDA.unwrapped.spec.id : [expertDA, expertDA_hidden]}
# This net will attempt to learn the directly from the expert models, not the games. No target net needed
net = dqn_model.DQN_AM(envSI.observation_space.shape, envSI.action_space.n).to(device)
# After instantiating the model shape, we actually don't need these envs (will be recreated in the parallel function)
del envSI
del envDA
# Now we want two buffers, one for each game, to keep the frames separate so we can use the correct expert model
buffer = ptan.experience.ExperienceReplayBuffer(experience_source=None, buffer_size=params['replay_size'])
optimizer = optim.Adam(net.parameters(), lr=0.001)
exp_queue = mp.Queue(maxsize=PLAY_STEPS * 2)
play_proc = mp.Process(target=play_func, args=(params, net, args.cuda, exp_queue, cuda_id))
play_proc.start()
frame_idx = 0
while play_proc.is_alive():
frame_idx += PLAY_STEPS
for _ in range(PLAY_STEPS):
exp = exp_queue.get()
if exp is None:
play_proc.join()
break
buffer._add(exp)
if len(buffer) < 1000: #params['replay_initial']:
continue
# print("UPDATING GRAD")
optimizer.zero_grad()
batch = buffer.sample(params['batch_size'])
loss_v = common.calc_loss_actormimic(batch, net, name_to_expert, beta=args.beta, cuda=args.cuda, cuda_async=True, cuda_id=cuda_id)
loss_v.backward()
optimizer.step()
| tophatraptor/si-transfer | train_model_actormimic.py | train_model_actormimic.py | py | 15,052 | python | en | code | 1 | github-code | 90 |
20261160452 | def value(l):
l[::-1]
# l.reverse()
return l
number=[2,3,4,5]
word=["world1","world2"]
print(value(number))
print(value(word))
def value2(l):
emp=[]
for i in range(len(l)):
popped=l.pop()
emp.append(popped)
return emp
number=[1,2,3,4]
print(value2(number)) | swapaaronn/Basicpython | list/exe.py | exe.py | py | 318 | python | en | code | 0 | github-code | 90 |
3995776938 | import sys
input = sys.stdin.readline
num = int(input())
for _ in range(num):
x1, y1, r1, x2, y2, r2 = map(int, input().strip().split())
r = ((x2-x1) ** 2 + (y2 - y1) ** 2) ** (1/2)
R = [r1, r2, r]
m = max(R)
R.remove(m)
print(-1 if r == 0 and r1 == r2 else 1 if r ==
r1 + r2 or m == sum(R) else 0 if m > sum(R) else 2)
| WonyJeong/algorithm-study | koalakid1/Math/bj-1002.py | bj-1002.py | py | 357 | python | en | code | 2 | github-code | 90 |
72665776298 | # Adopted from https://pythonprogramming.net/train-test-tensorflow-deep-learning-tutorial/
# from preProcessingText import create_feature_sets_and_labels
import tensorflow as tf
import pickle
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# train_x, train_y, test_x, test_y = create_feature_sets_and_labels('data/sentiment2/pos.txt', 'data/sentiment2/neg.txt')
from sklearn.externals import joblib
# pickle_in = open('pickles/test.pickle','rb')
# (train_x, train_y, test_x, test_y) = joblib.load('pickles/joblibtest.pkl')
train_x = joblib.load('pickles/NN_X_train.pkl')
train_y = joblib.load('pickles/NN_Y_train.pkl')
test_x = joblib.load('pickles/NN_X_Test.pkl')
test_y = joblib.load('pickles/NN_Y_Test.pkl')
print(len(train_x))
print(len(train_y))
print(len(test_x))
print(len(test_y))
n_nodes_hl1 = 1500
n_nodes_hl2 = 1500
n_nodes_hl3 = 1500
n_nodes_hl4 = 1500
n_classes = 2
batch_size = 100
hm_epochs = 5
x = tf.placeholder('float')
y = tf.placeholder('float')
# Construct the NN by creating individual layers. The first hidden layer is the input layer.
hidden_1_layer = {'f_fum': n_nodes_hl1,
'weight': tf.Variable(tf.random_normal([len(train_x[0]), n_nodes_hl1])),
'bias': tf.Variable(tf.random_normal([n_nodes_hl1]))}
hidden_2_layer = {'f_fum': n_nodes_hl2,
'weight': tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'bias': tf.Variable(tf.random_normal([n_nodes_hl2]))}
hidden_3_layer = {'f_fum': n_nodes_hl3,
'weight': tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),
'bias': tf.Variable(tf.random_normal([n_nodes_hl3]))}
hidden_4_layer = {'f_fum': n_nodes_hl4,
'weight': tf.Variable(tf.random_normal([n_nodes_hl3, n_nodes_hl4])),
'bias': tf.Variable(tf.random_normal([n_nodes_hl4]))}
output_layer = {'f_fum': None,
'weight': tf.Variable(tf.random_normal([n_nodes_hl4, n_classes])),
'bias': tf.Variable(tf.random_normal([n_classes])), }
# Construct the model by summing the previous inputs and passing it through a nonlinear activation function
def neural_network_model(data):
l1 = tf.add(tf.matmul(data, hidden_1_layer['weight']), hidden_1_layer['bias'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1, hidden_2_layer['weight']), hidden_2_layer['bias'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2, hidden_3_layer['weight']), hidden_3_layer['bias'])
l3 = tf.nn.relu(l3)
l4 = tf.add(tf.matmul(l3, hidden_4_layer['weight']), hidden_4_layer['bias'])
l4 = tf.nn.relu(l4)
output = tf.matmul(l4, output_layer['weight']) + output_layer['bias']
return output
# Train the network by calculating the error and adjusting the weights hm_epochs number of times.
def train_neural_network(x):
import datetime
starttime = datetime.datetime.now()
prediction = neural_network_model(x)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=prediction, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(cost)
with tf.Session() as sess:
# sess.run(tf.initialize_all_variables())
sess.run(tf.global_variables_initializer())
for epoch in range(hm_epochs):
epoch_loss = 0
i = 0
while i < len(train_x):
start = i
end = i + batch_size
batch_x = np.array(train_x[start:end])
batch_y = np.array(train_y[start:end])
_, c = sess.run([optimizer, cost], feed_dict={x: batch_x,
y: batch_y})
epoch_loss += c
i += batch_size
print('Epoch', epoch + 1, 'completed out of', hm_epochs, 'loss:', epoch_loss)
#print out accuracy
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy: ', accuracy.eval({x: test_x, y: test_y}))
print("------------------ Evaluation ---------------------")
print("Perfoming test...")
pred = []
count = 0;
for data in test_x:
if(count % 3000 == 0):
print("Current Progress: ", count / len(test_x) * 100, "%")
result = (sess.run(tf.argmax(prediction.eval(feed_dict={x: [data]}), 1)))
pred.append(result[0])
count += 1
print("Extract Actual Value...")
actural = []
for element in test_y:
actural.append(element[1])
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, f1_score
print(confusion_matrix(actural,pred))
print(classification_report(actural,pred))
print("F-Score: ",f1_score(actural,pred,average='weighted'))
print("accuracy: ",accuracy_score(actural, pred))
endtime = datetime.datetime.now()
print("Time elapsed: ", endtime - starttime)
print("------------------ END ---------------------")
print("Please verify two accuracy to see if they matches each other.")
train_neural_network(x)
| daltonchen/DataMiningProject | neural.py | neural.py | py | 5,255 | python | en | code | 0 | github-code | 90 |
40435251151 | #!/usr/bin/env python
import os, glob, nbformat.sign
# Iterate over notebooks and sign each of them as trusted
for fname in glob.glob("*.ipynb"):
with open(fname) as f:
nb = nbformat.read(f, as_version=4)
nbformat.sign.NotebookNotary().sign(nb)
| fastai/fastai_old | docs_src/trustnbs.py | trustnbs.py | py | 268 | python | en | code | 177 | github-code | 90 |
26633365 | # Loops
# There two types of loops in python, for and while
# Loops iterate over a given sequence
# Loops can iterate through lists, strings, dictionaries and tuples
list_data = [1, 2, 3]
# Prints 1, 2, 3
for n in list_data:
print(n)
# Prints out the numbers 0, 1, 2, 3, 4
for x in range(5):
print(x)
# Combining with control flows
for data in list_data:
if data > 4:
break
elif data < 0:
print("Please enter number above 0 ")
print(data)
# Create a string and loop through the string
city = "London"
for letter in city:
print(letter)
# Print the string in one line
name = "Anaïs Tang"
one_line = ""
for letter in name:
one_line += " " + letter
if name[-1] == letter:
print(one_line)
# Looping through a dictionary
student_record = {
"name": "Anais",
"stream":"Technical Consultant",
"completed_lesson": 5,
"completed_lesson_names": ["strings", "tuples", "variables"]
}
# iterate through values
for record in student_record.values():
print(record)
# iterate through keys
for results in student_record.keys():
print(results)
# iterate through items (keys and values)
for record, results in student_record.items():
print(record, results)
# Create a dictionary with employee records minimum 5 key value pairs
employee_records = {
"name": "Anais",
"age": 45,
"employee_id": 12445,
"department": "Technology",
"building": "C45"
}
# Using loop iterate through the dictionary
# Will output keys by default
for record in employee_records:
print(record)
# Display the values and keys of the dictionary
for record, results in employee_records.items():
print(record, results)
# Syntax: while variable name with condition then:
x = 1
while x < 5:
print(f"it is working -> {x}")
if x == 3:
break
x += 1
| naistangz/Technical_Training | docs/Week3_Python /loops.py | loops.py | py | 1,841 | python | en | code | 1 | github-code | 90 |
43217345996 | from Cryptodome.Hash import SHA256
from Cryptodome.PublicKey import RSA
from Cryptodome.Signature import pkcs1_15
import os
def writeKeys(key, fname):
with open(fname + '.key.pem', "wb") as pub:
pub.write(key.publickey().export_key('PEM'))
def readKey(fname):
with open(fname + '.key.pem', "rb") as pub:
key = RSA.import_key(pub.read())
return key
if __name__ == '__main__':
fname = input("Введите имя файла: ")
action = input("Введите действие sing(s)/check(c):")
if action == 's':
print("Создание публичного и приватного ключей...")
key = RSA.generate(1024, os.urandom)
writeKeys(key, fname)
print("Ключи созданы и записаны в файл.")
# Получаем хэш файла
try:
h = SHA256.new()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
h.update(chunk)
except(FileNotFoundError):
print("Файл не найден")
exit(0)
# Подписываем хэш
signature = pkcs1_15.new(key).sign(h)
with open(fname + '.signature', "wb") as pub:
pub.write(signature)
print("Электронная подпись успешно создана.")
if action == 'c':
try:
key = readKey(fname)
pubkey = key.publickey()
print("Ключ прочитан из файла.")
with open(fname + '.signature', "rb") as pub:
signature = pub.read()
print("Считана электронная подпись.")
except(FileNotFoundError):
print("Файл не найден. Проверьте наличие ключа и сигнатуры")
exit(0)
try:
# Получаем хэш файла
h = SHA256.new()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
h.update(chunk)
except(FileNotFoundError):
print("Файл не найден")
exit(0)
try:
pkcs1_15.new(pubkey).verify(h, signature)
print("Подпись подтверждена. ")
except(ValueError):
print("Неверная подпись!")
| ansushina/sem7 | security/lab5/sign.py | sign.py | py | 2,497 | python | ru | code | 1 | github-code | 90 |
17987421889 | N = int(input())
A = list(map(int,input().split()))
cnt = 0
ALL = set()
for a in A:
c = a//400
if c >= 8:
cnt+= 1
else:
ALL.add(c)
if len(ALL) == 0:
print(1,cnt)
else:
print(len(ALL),len(ALL)+cnt)
| Aasthaengg/IBMdataset | Python_codes/p03695/s969144269.py | s969144269.py | py | 235 | python | en | code | 0 | github-code | 90 |
13281132389 | """Reports management"""
from views.report_view import ReportView
from views.tournament_view import TournamentView
class ReportManagement:
def __init__(self, player_controller, tournament_controller):
self.report_view = ReportView()
self.player_controller = player_controller
self.tournament_view = TournamentView()
self.tournament_controller = tournament_controller
self.tournaments_list = self.tournament_controller.tournaments_list
def choose_a_report(self):
"""Run the reports options"""
self.active = True
menu = {
"1": self.print_all_players_list_by_alpha_order,
"2": self.print_all_players_list_by_ranking,
"3": self.print_tournament_players_list_by_alpha_order,
"4": self.print_tournament_players_list_by_ranking,
"5": self.print_all_tournaments_list,
"6": self.print_all_rounds_tournament_list,
"7": self.print_all_games_tournament_list,
"8": self.stop_report
}
while self.active:
choice = self.report_view.prompt_report_choice()
if choice in menu:
menu[choice]()
else:
print("invalid option")
def stop_report(self):
self.active = False
def print_all_players_list_by_alpha_order(self):
print("All players list by alphabetical order : ")
all_players = self.player_controller.players
sorted_all_players = sorted(all_players, key=lambda p: p.name)
self.report_view.print_players_report(sorted_all_players)
def print_all_players_list_by_ranking(self):
print("All players list by ranking : ")
all_players = self.player_controller.players
sorted_all_players = sorted(all_players,
key=lambda p: p.ranking, reverse=True)
self.report_view.print_players_report(sorted_all_players)
def print_tournament_players_list_by_alpha_order(self):
if len(self.tournaments_list) != 0:
pick_tournament = self.tournament_view.pick_up_tournament(
self.tournaments_list
)
tournament_players = pick_tournament.players
sorted_tournament_players = sorted(tournament_players,
key=lambda p: p.name
)
print("Tournament players list by alphabetical order : ")
self.report_view.print_players_report(sorted_tournament_players)
else:
print("No tournament created.")
def print_tournament_players_list_by_ranking(self):
if len(self.tournaments_list) != 0:
pick_tournament = self.tournament_view.pick_up_tournament(
self.tournaments_list
)
tournament_players = pick_tournament.players
sorted_tournament_players = sorted(tournament_players,
key=lambda p: p.ranking,
reverse=True
)
print("Tournament players list by ranking : ")
self.report_view.print_players_report(sorted_tournament_players)
else:
print("No tournament created.")
def print_all_tournaments_list(self):
sorted_all_tournament = sorted(self.tournaments_list,
key=lambda p: p.name
)
print("List of all the tournaments : ")
self.report_view.print_tournament_report(sorted_all_tournament)
def print_all_rounds_tournament_list(self):
if len(self.tournaments_list) != 0:
pick_tournament = self.tournament_view.pick_up_tournament(
self.tournaments_list
)
print("Tournament rounds list : ")
self.report_view.print_rounds_report(pick_tournament.rounds)
else:
print("No tournament created.")
def print_all_games_tournament_list(self):
if len(self.tournaments_list) != 0:
pick_tournament = self.tournament_view.pick_up_tournament(
self.tournaments_list
)
print("Tournament games list : ")
for round in pick_tournament.rounds:
print(round.round_name)
self.report_view.print_games_report(round.games)
else:
print("No tournament created.")
| erikcaul/OC_Projet_4 | controllers/report_management.py | report_management.py | py | 4,813 | python | en | code | 0 | github-code | 90 |
5081173032 | from Stack.stack import Stack
# Implement a function called sort_stack() which takes a stack and sorts all of its elements in ascending
# order such that when they are popped and printed, they come out in ascending order.
# So the element that was pushed last to the stack has to be the smallest.
def sort_stack(stack: Stack):
temp_stack = Stack()
while not stack.is_empty():
value = stack.pop()
if temp_stack.peek() is not None and value >= temp_stack.peek():
temp_stack.push(value)
else:
while not temp_stack.is_empty() and value < temp_stack.peek():
stack.push(temp_stack.pop())
temp_stack.push(value)
while not temp_stack.is_empty():
stack.push(temp_stack.pop())
return stack
# A string containing a postfix mathematic expression.
# Each digit is considered to be a separate number, i.e., there are no double digit numbers.
def evaluate_post_fix(exp):
stack = Stack()
for char in exp:
if char.isdigit():
stack.push(char)
else:
left = stack.pop()
right = stack.pop()
stack.push(str(eval(right + char + left)))
return int(float(stack.pop()))
if __name__ == "__main__":
stack = Stack()
stack.push(2)
stack.push(97)
stack.push(4)
stack.push(0)
stack.push(-20)
stack.push(-2)
print("Sorted stack", sort_stack(stack))
print(evaluate_post_fix("921*-8-4+")) | PRINCEKK122/educative | app.py | app.py | py | 1,472 | python | en | code | 0 | github-code | 90 |
18328105009 | from math import atan, pi
a, b, x = map(int, input().split())
if x > a * a * b / 2:
h = 2 * x / (a * a) - b
ans = atan((b - h) / a)
else:
h = 2 * x / (a * b)
ans = pi / 2 - atan(h/b)
ans *= 180 / pi
print("{:.12f}".format(ans)) | Aasthaengg/IBMdataset | Python_codes/p02882/s973609113.py | s973609113.py | py | 245 | python | en | code | 0 | github-code | 90 |
4964994542 | import os
import random
import numpy
import dismod_at
from matplotlib import pyplot
import matplotlib.backends.backend_pdf
# ----------------------------------------------------------------------------
def plot_data_fit(
# BEGIN syntax
# n_fit_dict = plot_data_fit(
database = None,
pdf_file = None,
plot_title = None,
max_plot = None,
integrand_list = None,
# )
# END syntax
) :
assert not database is None
assert not pdf_file is None
#
# connection
connection = dismod_at.create_connection(
database, new = False, readonly = True
)
#
# tables
tables = dict()
tables['option'] = dismod_at.get_table_dict(connection, 'option')
#
# other_connection, other_input_table_list
other_connection = None
other_database = None
other_input_table_list = list()
for row in tables['option'] :
if row['option_name'] == 'other_database' :
other_database = row['option_value']
if row['option_name'] == 'other_input_table' :
other_input_table = row['option_value']
other_input_table_list = other_input_table.split(' ')
if other_database != None :
if not os.path.isabs( other_database ) :
database_dir = os.path.dirname( database )
other_database = os.path.join( database_dir, other_database )
other_connection = dismod_at.create_connection(
other_database, new = False, readonly = True
)
#
# tables
for name in [
'data',
'data_subset',
'integrand',
'fit_data_subset',
] :
if name in other_input_table_list :
tables[name] = dismod_at.get_table_dict(other_connection, name)
else :
tables[name] = dismod_at.get_table_dict(connection, name)
connection.close()
if other_connection != None :
other_connection.close()
#
# pdf
pdf = matplotlib.backends.backend_pdf.PdfPages(pdf_file)
#
# integrand_list
if integrand_list is None :
integrand_list = list()
for row in tables['integrand'] :
integrand_name = row['integrand_name']
if not integrand_name.startswith('mulcov_') :
integrand_list.append( integrand_name )
#
n_fit_dict = dict()
for integrand_name in integrand_list :
#
# integrand_id
integrand_id = None
for (row_id, row) in enumerate(tables['integrand']) :
if row['integrand_name'] == integrand_name :
integrand_id = row_id
if integrand_id is None :
msg = f'Cannot find {integrand_name} in integrand table for '
msg += database
assert False, msg
#
# info_list
info_list = list()
#
# subset_id, subset_row
for (subset_id, subset_row) in enumerate(tables['data_subset']) :
#
# data_row
data_id = subset_row['data_id']
data_row = tables['data'][data_id]
#
if data_row['integrand_id'] == integrand_id :
#
# meas_value
meas_value = data_row['meas_value']
#
# hold_out
hold_out = data_row['hold_out']
#
# age
age = (data_row['age_lower'] + data_row['age_upper']) / 2.0
#
# time
time = (data_row['time_lower'] + data_row['time_upper']) / 2.0
#
# avg_integreand, weighted_residual
row = tables['fit_data_subset'][subset_id]
avg_integrand = row['avg_integrand']
weighted_residual = row['weighted_residual']
#
# info_list
info = {
'meas_value' : meas_value ,
'model' : avg_integrand ,
'residual' : weighted_residual ,
'hold_out' : hold_out ,
'index' : len(info_list),
'age' : age,
'time' : time,
}
info_list.append( info )
#
# n_point
n_point = len( info_list )
#
if n_point == 0 :
#
# n_hold_out
n_hold_out = 0
else :
# numpy_info
keys = info_list[0].keys()
numpy_info = dict()
for key in keys :
vector = numpy.zeros(n_point, dtype=float)
for i in range( n_point ) :
vector[i] = info_list[i][key]
numpy_info[key] = vector
#
# hold_out, not_hold_out, n_hold_out
hold_out = (numpy_info['hold_out'] == 1)
not_hold_out = numpy.logical_not(hold_out)
n_hold_out = sum( hold_out )
#
# n_fit_dict
n_fit_dict[integrand_name] = n_point - n_hold_out
#
if n_point - n_hold_out > 1 :
#
#
# y_min, y_max
d_fit = numpy_info['meas_value'][not_hold_out]
d_median = numpy.median( d_fit )
d_max = d_median * 1e+3
d_min = d_median * 1e-3
assert d_min >= 0.0
#
# r_min, r_max
r_fit = numpy_info['residual'][not_hold_out]
r_norm = numpy.linalg.norm( r_fit )
r_avg_sq = r_norm * r_norm / (n_point - n_hold_out)
r_max = 4.0 * numpy.sqrt( r_avg_sq )
r_min = - r_max
#
# subplot_list
subplot_list = [ 'meas_value', 'model', 'residual' ]
#
# numpy_info
for name in [ 'meas_value', 'model' ] :
numpy_info[name] = numpy.maximum( numpy_info[name], d_min )
numpy_info[name] = numpy.minimum( numpy_info[name], d_max )
for name in [ 'residual' ] :
numpy_info[name] = numpy.maximum( numpy_info[name], r_min )
numpy_info[name] = numpy.minimum( numpy_info[name], r_max )
#
if max_plot is None or n_point <= max_plot :
#
# n_plot
n_plot = n_point
else :
#
# n_plot
n_plot = max_plot
#
# subsample
subsample = random.sample( range(n_point), max_plot )
subsample = sorted(subsample)
#
# numpy_info
for key in numpy_info :
numpy_info[key] = numpy_info[key][subsample]
#
# hold_out, not_hold_out
hold_out = (numpy_info['hold_out'] == 1)
not_hold_out = numpy.logical_not(hold_out)
#
#
# point_size, marker_size
point_size = numpy.array( n_plot * [ 1 ] )
marker_size = numpy.array( n_plot * [ 10 ] )
#
for x_name in [ 'index', 'age', 'time' ] :
#
# subplot setup
fig, axes = pyplot.subplots(3, 1, sharex=True)
fig.subplots_adjust(hspace=0)
#
# x
x = numpy_info[x_name]
#
for subplot_index in range(3) :
# sp
sp = pyplot.subplot(3, 1, subplot_index + 1)
#
# name, y
name = subplot_list[subplot_index]
y = numpy_info[name]
#
# ylabel
pyplot.ylabel(name)
#
# clip_list, limit_list
if name == 'residual' :
clip_list = [ r_min, r_max ]
limit_list = [ 1.1 * r_min, 1.1 * r_max ]
else :
pyplot.yscale('log')
clip_list = [ d_min, d_max ]
limit_list = [ 0.9 * d_min, 1.1 * d_max ]
#
# ylim
pyplot.ylim(limit_list[0], limit_list[1])
#
# clipped, not_clipped
clipped = (y == clip_list[0])
clipped = numpy.logical_or(clipped, (y == clip_list[1]) )
not_clipped = numpy.logical_not(clipped)
#
green_point = numpy.logical_and(hold_out, not_clipped)
green_marker = numpy.logical_and(hold_out, clipped)
black_point = numpy.logical_and(not_hold_out, not_clipped)
red_marker = numpy.logical_and(not_hold_out, clipped)
#
# plot green points
size = point_size[green_point]
pyplot.scatter(x[green_point], y[green_point],
marker='.', color='green', s=size
)
#
# plot green marker
size = marker_size[green_marker]
pyplot.scatter(x[green_marker], y[green_marker],
marker='+', color='green', s=size
)
#
# plot black points
size = point_size[black_point]
pyplot.scatter(x[black_point], y[black_point],
marker='.', color='black', s=size
)
#
# plot red marker
size = marker_size[red_marker]
pyplot.scatter(x[red_marker], y[red_marker],
marker='+', color='red', s=size
)
if name == 'residual' :
y = 0.0
pyplot.axhline(
y, linestyle='solid', color='black', alpha=0.3
)
if subplot_index == 0 :
if plot_title is None :
pyplot.title( integrand_name )
else :
pyplot.title( plot_title + ': ' + integrand_name )
# x-axis label
pyplot.xlabel(x_name)
#
# save plot
pdf.savefig( fig )
pyplot.close( fig )
# end of pages in pdf file
pdf.close()
return n_fit_dict
| bradbell/dismod_at | python/dismod_at/plot_data_fit.py | plot_data_fit.py | py | 9,930 | python | en | code | 6 | github-code | 90 |
18060339719 | def func(i,x,s,new):
if i==x:
count=0
#print(i,x,s,new+[s[-1]])
new+=s[-1]
li=new.split("+")
for j in li:
count+=int(j)
return count
#print(i,x,s,new)
return func(i,x+1,s,new+s[x-1]+"+") + func(i,x+1,s,new+s[x-1])
new=""
S=input()
print(func(len(S),1,S,new))
| Aasthaengg/IBMdataset | Python_codes/p04001/s321509856.py | s321509856.py | py | 334 | python | en | code | 0 | github-code | 90 |
9062488624 | from game import Game
from player import BotPlayer
players = [("Alice", 1000), ("Bob", 1000), ("Cyril", 1000)]
game = Game()
for player, cash in players:
player = BotPlayer(player, cash)
game.add_player(player)
for hand in range(100):
game.play_hand() | Y0mingZhang/PokerRoom | main.py | main.py | py | 265 | python | en | code | 0 | github-code | 90 |
37650235130 | """Trains a deep Bayesian neural net to classify MNIST digits."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
# Dependency imports
from absl import flags
import matplotlib
matplotlib.use("Agg")
from matplotlib import figure # pylint: disable=g-import-not-at-top
from matplotlib.backends import backend_agg
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
import horovod.tensorflow as hvd
from tensorflow.contrib.learn.python.learn.datasets import mnist
# TODO(b/78137893): Integration tests currently fail with seaborn imports.
try:
import seaborn as sns # pylint: disable=g-import-not-at-top
HAS_SEABORN = True
except ImportError:
HAS_SEABORN = False
tfd = tf.contrib.distributions
IMAGE_SHAPE = [28, 28]
flags.DEFINE_float("learning_rate",
default=0.01,
help="Initial learning rate.")
flags.DEFINE_integer("max_steps",
default=6000,
help="Number of training steps to run.")
flags.DEFINE_list("layer_sizes",
default=["128", "128"],
help="Comma-separated list denoting hidden units per layer.")
flags.DEFINE_string("activation",
default="relu",
help="Activation function for all hidden layers.")
flags.DEFINE_integer("batch_size",
default=128,
help="Batch size.")
flags.DEFINE_string("data_dir",
default=os.path.join(os.getenv("TEST_TMPDIR", "/tmp"),
"bayesian_neural_network/data"),
help="Directory where data is stored (if using real data).")
flags.DEFINE_string(
"model_dir",
default=os.path.join(os.getenv("TEST_TMPDIR", "/tmp"),
"bayesian_neural_network/"),
help="Directory to put the model's fit.")
flags.DEFINE_integer("viz_steps",
default=400,
help="Frequency at which save visualizations.")
flags.DEFINE_integer("num_monte_carlo",
default=50,
help="Network draws to compute predictive probabilities.")
flags.DEFINE_bool("fake_data",
default=None,
help="If true, uses fake data. Defaults to real data.")
FLAGS = flags.FLAGS
tf.logging.set_verbosity(tf.logging.INFO)
def plot_weight_posteriors(names, qm_vals, qs_vals, fname):
"""Save a PNG plot with histograms of weight means and stddevs.
Args:
names: A Python `iterable` of `str` variable names.
qm_vals: A Python `iterable`, the same length as `names`,
whose elements are Numpy `array`s, of any shape, containing
posterior means of weight varibles.
qs_vals: A Python `iterable`, the same length as `names`,
whose elements are Numpy `array`s, of any shape, containing
posterior standard deviations of weight varibles.
fname: Python `str` filename to save the plot to.
"""
fig = figure.Figure(figsize=(6, 3))
canvas = backend_agg.FigureCanvasAgg(fig)
ax = fig.add_subplot(1, 2, 1)
for n, qm in zip(names, qm_vals):
sns.distplot(qm.flatten(), ax=ax, label=n)
ax.set_title("weight means")
ax.set_xlim([-1.5, 1.5])
ax.set_ylim([0, 4.])
ax.legend()
ax = fig.add_subplot(1, 2, 2)
for n, qs in zip(names, qs_vals):
sns.distplot(qs.flatten(), ax=ax)
ax.set_title("weight stddevs")
ax.set_xlim([0, 1.])
ax.set_ylim([0, 25.])
fig.tight_layout()
canvas.print_figure(fname, format="png")
print("saved {}".format(fname))
def plot_heldout_prediction(input_vals, probs,
fname, n=10, title=""):
"""Save a PNG plot visualizing posterior uncertainty on heldout data.
Args:
input_vals: A `float`-like Numpy `array` of shape
`[num_heldout] + IMAGE_SHAPE`, containing heldout input images.
probs: A `float`-like Numpy array of shape `[num_monte_carlo,
num_heldout, num_classes]` containing Monte Carlo samples of
class probabilities for each heldout sample.
fname: Python `str` filename to save the plot to.
n: Python `int` number of datapoints to vizualize.
title: Python `str` title for the plot.
"""
fig = figure.Figure(figsize=(9, 3*n))
canvas = backend_agg.FigureCanvasAgg(fig)
for i in range(n):
ax = fig.add_subplot(n, 3, 3*i + 1)
ax.imshow(input_vals[i, :].reshape(IMAGE_SHAPE), interpolation="None")
ax = fig.add_subplot(n, 3, 3*i + 2)
for prob_sample in probs:
sns.barplot(np.arange(10), prob_sample[i, :], alpha=0.1, ax=ax)
ax.set_ylim([0, 1])
ax.set_title("posterior samples")
ax = fig.add_subplot(n, 3, 3*i + 3)
sns.barplot(np.arange(10), np.mean(probs[:, i, :], axis=0), ax=ax)
ax.set_ylim([0, 1])
ax.set_title("predictive probs")
fig.suptitle(title)
fig.tight_layout()
canvas.print_figure(fname, format="png")
print("saved {}".format(fname))
def build_input_pipeline(mnist_data, batch_size, heldout_size):
"""Build an Iterator switching between train and heldout data."""
# Build an iterator over training batches.
training_dataset = tf.data.Dataset.from_tensor_slices(
(mnist_data.train.images, np.int32(mnist_data.train.labels)))
training_batches = training_dataset.repeat().batch(batch_size)
training_iterator = training_batches.make_one_shot_iterator()
# Build a iterator over the heldout set with batch_size=heldout_size,
# i.e., return the entire heldout set as a constant.
heldout_dataset = tf.data.Dataset.from_tensor_slices(
(mnist_data.validation.images,
np.int32(mnist_data.validation.labels)))
heldout_frozen = (heldout_dataset.take(heldout_size).
repeat().batch(heldout_size))
heldout_iterator = heldout_frozen.make_one_shot_iterator()
# Combine these into a feedable iterator that can switch between training
# and validation inputs.
handle = tf.placeholder(tf.string, shape=[])
feedable_iterator = tf.data.Iterator.from_string_handle(
handle, training_batches.output_types, training_batches.output_shapes)
images, labels = feedable_iterator.get_next()
return images, labels, handle, training_iterator, heldout_iterator
def build_fake_data(num_examples=10):
"""Build fake MNIST-style data for unit testing."""
class Dummy(object):
pass
num_examples = 10
mnist_data = Dummy()
mnist_data.train = Dummy()
mnist_data.train.images = np.float32(np.random.randn(
num_examples, np.prod(IMAGE_SHAPE)))
mnist_data.train.labels = np.int32(np.random.permutation(
np.arange(num_examples)))
mnist_data.train.num_examples = num_examples
mnist_data.validation = Dummy()
mnist_data.validation.images = np.float32(np.random.randn(
num_examples, np.prod(IMAGE_SHAPE)))
mnist_data.validation.labels = np.int32(np.random.permutation(
np.arange(num_examples)))
mnist_data.validation.num_examples = num_examples
return mnist_data
class DSHandleHook(tf.train.SessionRunHook):
def __init__(self, train_str, valid_str):
self.train_str = train_str
self.valid_str = valid_str
self.train_handle = None
self.valid_handle = None
def after_create_session(self, session, coord):
del coord
if self.train_str is not None:
self.train_handle, self.valid_handle = session.run([self.train_str,
self.valid_str])
print('session run ds string-handle done....')
def main(argv):
del argv # unused
hvd.init()
FLAGS.layer_sizes = [int(units) for units in FLAGS.layer_sizes]
FLAGS.activation = getattr(tf.nn, FLAGS.activation)
if tf.gfile.Exists(FLAGS.model_dir+ str(hvd.rank())):
tf.logging.warning(
"Warning: deleting old log directory at {}".format(FLAGS.model_dir))
tf.gfile.DeleteRecursively(FLAGS.model_dir)
tf.gfile.MakeDirs(FLAGS.model_dir)
if FLAGS.fake_data:
mnist_data = build_fake_data()
else:
mnist_data = mnist.read_data_sets(FLAGS.data_dir+str(hvd.rank()))
with tf.Graph().as_default():
(images, labels, handle,
training_iterator, heldout_iterator) = build_input_pipeline(
mnist_data, FLAGS.batch_size, mnist_data.validation.num_examples)
# Build a Bayesian neural net. We use the Flipout Monte Carlo estimator for
# each layer: this enables lower variance stochastic gradients than naive
# reparameterization.
with tf.name_scope("bayesian_neural_net", values=[images]):
neural_net = tf.keras.Sequential()
for units in FLAGS.layer_sizes:
layer = tfp.layers.DenseFlipout(
units,
activation=FLAGS.activation)
neural_net.add(layer)
neural_net.add(tfp.layers.DenseFlipout(10))
logits = neural_net(images)
labels_distribution = tfd.Categorical(logits=logits)
# Compute the -ELBO as the loss, averaged over the batch size.
neg_log_likelihood = -tf.reduce_mean(labels_distribution.log_prob(labels))
kl = sum(neural_net.losses) / mnist_data.train.num_examples
elbo_loss = neg_log_likelihood + kl
# Build metrics for evaluation. Predictions are formed from a single forward
# pass of the probabilistic layers. They are cheap but noisy predictions.
predictions = tf.argmax(logits, axis=1)
accuracy, accuracy_update_op = tf.metrics.accuracy(
labels=labels, predictions=predictions)
# Extract weight posterior statistics for later visualization.
names = []
qmeans = []
qstds = []
for i, layer in enumerate(neural_net.layers):
q = layer.kernel_posterior
names.append("Layer {}".format(i))
qmeans.append(q.mean())
qstds.append(q.stddev())
with tf.name_scope("train"):
opt = tf.train.AdamOptimizer(learning_rate=(FLAGS.learning_rate*hvd.size()))
opt = hvd.DistributedOptimizer(opt)
global_step = tf.train.get_or_create_global_step()
train_op = opt.minimize(elbo_loss, global_step=global_step)
# Run the training loop.
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(hvd.local_rank())
checkpoint_dir = './checkpoints16' if hvd.rank() == 0 else None
train_str_handle = training_iterator.string_handle()
heldout_str_handle = heldout_iterator.string_handle()
ds_handle_hook = DSHandleHook(train_str_handle, heldout_str_handle)
hooks = [
# Horovod: BroadcastGlobalVariablesHook broadcasts initial variable states
# from rank 0 to all other processes. This is necessary to ensure consistent
# initialization of all workers when training is started with random weights
# or restored from a checkpoint.
hvd.BroadcastGlobalVariablesHook(0),
# Horovod: adjust number of steps based on number of GPUs.
tf.train.StopAtStepHook(last_step=FLAGS.max_steps // hvd.size()),
tf.train.LoggingTensorHook(tensors={'step': global_step, 'loss': elbo_loss},
every_n_iter=100),
tf.train.LoggingTensorHook(tensors={'step': global_step, 'accuracy': accuracy},
every_n_iter=100),
ds_handle_hook
]
with tf.train.MonitoredTrainingSession(checkpoint_dir=checkpoint_dir,
hooks=hooks,
config=config) as mon_sess:
while not mon_sess.should_stop():
_ = mon_sess.run([train_op, accuracy_update_op],
feed_dict={handle: ds_handle_hook.train_handle})
#Warning: This is an ad-hoc way of printing Held-out.
#Because of this you may see code failed message but that can be safely ignored.
#It doesn't affect any actual computation.
#
step = mon_sess.run(global_step)
if (step+1) % FLAGS.viz_steps == 0:
# Compute log prob of heldout set by averaging draws from the model:
# p(heldout | train) = int_model p(heldout|model) p(model|train)
# ~= 1/n * sum_{i=1}^n p(heldout | model_i)
# where model_i is a draw from the posterior p(model|train).
probs = np.asarray([mon_sess.run((labels_distribution.probs),
feed_dict={handle: ds_handle_hook.valid_handle})
for _ in range(FLAGS.num_monte_carlo)])
mean_probs = np.mean(probs, axis=0)
image_vals, label_vals = mon_sess.run((images, labels),
feed_dict={handle: ds_handle_hook.valid_handle})
heldout_lp = np.mean(np.log(mean_probs[np.arange(mean_probs.shape[0]),
label_vals.flatten()]))
print(" ... Held-out nats: {:.3f}".format(heldout_lp))
qm_vals, qs_vals = mon_sess.run((qmeans, qstds))
if __name__ == "__main__":
tf.app.run() | aritrasen/Tensorflow-Probabillity | dist_bayesian_nn.py | dist_bayesian_nn.py | py | 13,400 | python | en | code | 0 | github-code | 90 |
35627363040 | import pandas as pd
import numpy as np
import pdpipe as pdp
import pickle
import re
import string
import nltk
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from sklearn.feature_extraction.text import TfidfVectorizer
nltk.download('vader_lexicon')
stop_words = set(stopwords.words('english'))
w_tokenizer = nltk.tokenize.WhitespaceTokenizer()
lemmatizer = nltk.stem.WordNetLemmatizer()
analyzer = SentimentIntensityAnalyzer()
#########################
# Variables
#########################
covid_list = ['covid', 'virus', 'corona','ncov', 'sars',
'super spread', 'super-spread', 'pandemic',
'epidemic', 'outbreak', 'new case', 'new death',
'active case', 'community spread', 'contact trac',
'social distanc', 'self isolat', 'self-isolat', 'mask',
'ppe', 'quarantine', 'lockdown', 'symptomatic', 'vaccine',
'bonnie', 'new normal', 'ventilator', 'respirator', 'travel restrictions',
'doyourpartbc']
rt_regex = '(?:^rt|^RT)' # Now case insensitive
bc_cov19_url = 'https://health-infobase.canada.ca/src/data/covidLive/covid19-download.csv'
#########################
# Data Collection
#########################
def get_covid_data(df_name='df_bc_covid', globe=False):
"""
Downloads Covid-19 data from Canada Gov site
Filters on British Columbia
Return DataFrame with date as index
"""
if globe:
global df_bc_covid
try:
df_bc_covid = pd.read_csv(bc_cov19_url)
except:
print('Reading CSV from URL failed')
else:
df_bc_covid = df_bc_covid[df_bc_covid.prname == 'British Columbia']
df_bc_covid = df_bc_covid.set_index('date').fillna(0)
df_bc_covid.drop(['pruid', 'prname', 'prnameFR'], axis=1, inplace=True)
return df_bc_covid
#########################
# Preprocessing Functions
#########################
def col_filter(df,cols=['created_at', 'user', 'full_text', 'retweet_count', 'retweeted_status']):
"""
Filters full twitter DataFrame down to desired columns
"""
df.set_index('id_str', inplace=True)
df = df[cols]
return df
def preprocess(text, hashtags=False, join=False, url=True, user=True, emo=False):
"""
Strips out URLs, usernames, punctuation, and other unwanted text.
Tweets are tokenzied and stop words removed. Removing hashtags, URLs,
user mentions and joining tokens back into a string is optional.
Example - Creating a new column in DataFrame:
df['new_col'] = df['full_text'].apply(lambda x: preprocess(x, url=True, join=True, emo=True))
"""
text = text.lower()
if hashtags:
text = ' '.join(re.sub(r'\#\w*[a-zA-Z]+\w*','',text).split())
if url:
text = ' '.join(re.sub('((www\.[\S]+)|(https?://[\S]+))','',text).split())
if user:
text = ' '.join(re.sub('(?<=^|(?<=[^a-zA-Z0-9-_\.]))@([A-Za-z]+[A-Za-z0-9-_]+)','',text).split())
if emo:
# Positive Emoji - Smile, Laugh, Wink,Love
text = ' '.join(re.sub('(:\s?\)|:-\)|:-\)\)|;\)|\(\s?:|\(-:|:\’\))',' emopos ',text).split())
text = ' '.join(re.sub('(:\s?D|:-D|x-?D|X-?D)',' emopos ',text).split())
text = ' '.join(re.sub('(<3|:\*)',' emopos ',text).split())
# Negative Emoji - Sad, Cry
text = ' '.join(re.sub('(:\s?\(|:-\(|:\||\)\s?:|\)-:)',' emoneg ',text).split())
text = ' '.join(re.sub('(:,\(|:\’\(|:"\()',' emoneg ',text).split())
text = ' '.join(re.sub('^\n','',text).split())
text = ' '.join(re.sub('amp;',' ',text).split())
text = ' '.join(re.sub('^rt','',text).split())
punc = ''.join([char for char in text if char not in string.punctuation])
tokens = word_tokenize(punc)
stops = [word for word in tokens if word not in stop_words]
if join:
stops = (' ').join(stops)
return stops
def tf_preprocess(text):
"""
This will be removed
"""
text = ' '.join(re.sub('(?<=^|(?<=[^a-zA-Z0-9-_\.]))@([A-Za-z]+[A-Za-z0-9-_]+)',' ',text).split())
text = ' '.join(re.sub('((www\.[\S]+)|(https?://[\S]+))','',text).split())
text = ' '.join(re.sub('amp;',' ',text).split())
text = ' '.join(re.sub('^rt',' ',text).split())
return text
def vader_preprocess(text):
"""
Alternate tweet processing for VADER, which can handle punctuation and capitalization.
VADER can handle URLs. URLs don't influence sentiment scores, so they are removed
"""
text = ' '.join(re.sub('((www\.[\S]+)|(https?://[\S]+))', '',text).split())
text = ' '.join(re.sub('^\n', '',text).split())
text = ' '.join(re.sub('amp;', '',text).split())
text = ' '.join(re.sub('^rt', '',text).split())
return text
def extract_full_text(df):
"""
Extracts and creates a new column for username from user column, which exists as a dictionary with many keys.
Joins rt_full_text onto main DataFrame
Usage:
df = extract_full_text(df)
"""
temp = df['retweeted_status'].apply(pd.Series)
temp['rt_full_text'] = temp['full_text']
df = df.join(temp['rt_full_text'])
return df
def replace_retweet_text(df,check_col='full_text',rt_col='rt_full_text'):
"""
If retweet, extract the full_text from retweeted status and replaces the truncated version.
"""
df.loc[df[check_col].str.contains(rt_regex, regex=True), check_col] = df[rt_col]
df[check_col] = df[check_col].astype('str')
return df
def emoji_stringer(text):
"""
Converts common positive and negative ASCII emotions to 'emopos and 'emoneg'
"""
# Positive Emoji - Smile, Laugh, Wink,Love
text = ' '.join(re.sub(r'(:\s?\)|:-\)|;\)|\(\s?:|\(-:|:\’\))','emopos',text).split())
text = ' '.join(re.sub(r'(:\s?D|:-D|x-?D|X-?D)','emopos',text).split())
text = ' '.join(re.sub(r'(<3|:\*)','emopos',text).split())
# Negative Emoji - Sad, Cry
text = ' '.join(re.sub(r'(:\s?\(|:-\(|:\||\)\s?:|\)-:)','emoneg',text).split())
text = ' '.join(re.sub(r'(:,\(|:\’\(|:"\()','emoneg',text).split())
return text
def joiner(text):
"""
Simple function to join a list together into one string
"""
string = (' ').join(text)
return string
def lower_case(text):
"""
Simple function to convert text to lowercase
Used in pipeline as workaround
"""
return text.lower()
def lemmatize_text(text):
"""
Lemmatize text, or reduce words to their root. e.g. leaders -> leader
Usage:
df['lemma'] = df[COL].apply(lambda x: lemmatize_text(x))
"""
text = ' '.join([char for char in text if char not in string.punctuation])
text = [lemmatizer.lemmatize(w) for w in w_tokenizer.tokenize(text)]
return text
#########################
# Feature Functions
#########################
def extract_username(df):
"""
Extracts and creates a new column for username from user column, which exists as a dictionary with many keys.
"""
df['user_name'] = df['user'].apply(lambda x: x.get('screen_name'))
return df
def covid_mention(text, synonyms=covid_list):
"""
Checks tweet for presence of any word from the synonyms list.
Returns a binary if word is present. text must be lowercase
Arguments:
synonyms: A list object
Example: df['covid_mention'] = df['full_text'].apply(covid_mention)
"""
for term in synonyms:
if term in text:
return 1
continue
return 0
def is_retweet(text):
"""
Checks if tweet is a retweet. Test is case insensitive
Returns a binary.
Exampe:
df['is_retweet'] = df['full_text'].apply(is_retweet)
"""
if re.match(rt_regex, text) is not None:
return 1
return 0
def top_ngrams(df, n=2, ngrams=10):
"""
* Not generalizable in this form *
* This works well, but is very inefficient and should be optimized or rewritten *
Takes a preposcessed, tokenized column and create a large list.
Returns most frequent ngrams
Arguments:
df = name of DataFrame with no_hashtags column (this will be generalizable in a future commit)
n = number of words per grouping eg. 1, 2 or 3
ngrams = Number of ngrams to return
"""
word_list = preprocess(''.join(str(df['lemma'].tolist())))
return (pd.Series(nltk.ngrams(word_list, n)).value_counts())[:ngrams]
def vader_analyze(text):
"""
Returns the compound sentiment score from VADER analyzer.polarity_score
"""
score = analyzer.polarity_scores(text)
return score
def vader_score_to_series(df):
"""
Combines several functions to return pos, neg, neu and compound VADER scores as new columns
Requires vader_analyze and categorize
Usage:
df = vader_score_to_series(df)
"""
df['vader_scores'] = df['vader_text'].apply(vader_analyze)
df = df.join(df['vader_scores'].apply(pd.Series))
df.drop(columns='vader_scores', inplace=True, axis=1)
df['vader_label'] = df['compound'].apply(lambda x: categorize(x)).astype('int8')
return df
def categorize(x, upper=0.05, lower=-0.05):
"""
Categorizes tweets into sentiment categories of 0, 2 and 4.
Negative, Netral and Postive, respectively.
The upper and lower variables are standard thresholds from VADER Sentiment
"""
if x < lower:
return 0
elif ((x > (lower+0.0001) and x < upper)):
return 2
else:
return 4
#########################
#########################
| lclarko/data-science-final-project | scripts/functions.py | functions.py | py | 9,570 | python | en | code | 0 | github-code | 90 |
74133688936 | from numpy import *
import pandas as pd
from matplotlib.pyplot import *
from Functions import *
import warnings
warnings.filterwarnings("ignore")
dat = array(pd.read_csv('Data.csv',header=None))
dat.shape
ids_1 = sorted(set(dat[:,1]))
#list of
xPositions = []
yPositions = []
heights = []
# for each unique ID (time series)
for i in range(size(ids_1)):
# append x position of time series to a list
xPositions.append(dat[dat[:,1] == ids_1[i],3][0])
# append y position of time series to a list
yPositions.append(dat[dat[:,1] == ids_1[i],4][0])
t = dat[dat[:,1] == ids_1[i],9] ## time instances corresponding to this id
h = dat[dat[:,1] == ids_1[i],15] ## corresponding height measurements
temp_dat = np.concatenate((t.reshape(-1,1),h.reshape(-1,1)),axis = 1)
p = 4;q=2
[n,lamb,sigmasq] = full_search_nk(temp_dat,p,q)
c = n+p
U = Kno_pspline_opt(temp_dat,p,n)
B = Basis_Pspline(n,p,U,temp_dat[:,0])
P = Penalty_p(q,c)
theta = np.linalg.solve(B.T.dot(B) + lamb*P, B.T.dot(temp_dat[:,1].reshape(-1,1)))
### Getting mean of the prediction
num = 200
xpred = linspace(temp_dat[0,0],temp_dat[-1,0],num)
Bpred = Basis_Pspline(n,p,U,xpred)
ypred1 = Bpred.dot(theta)
std_t1,std_n1 = Var_bounds(temp_dat,Bpred,B,theta,P,lamb)
xpred_2006 = np.array([2006])
Bpred_2006 = Basis_Pspline(n,p,U,xpred_2006)
ypred_2006 = Bpred_2006.dot(theta)
# append height at x & y to a point
heights.append(ypred_2006[0][0])
print((i+1), '/' , len(ids_1))
#print('height at t = 2006: x = ', xPositions[len(xPositions)-1], ' y = ', yPositions[len(yPositions)-1], ' ' ,ypred_2006[0][0] , ' ', i+1 , '/' , len(ids_1))
df = pd.DataFrame({'x':xPositions, 'y':yPositions, 'height':heights})
df.to_csv('dat_2006.csv', index=False)
print(df)
| DanielKhali1/ClimateRegressionLinearAlgebra | .ipynb_checkpoints/dat_2006_maker-checkpoint.py | dat_2006_maker-checkpoint.py | py | 1,839 | python | en | code | 0 | github-code | 90 |
40329181835 | #/usr/lib/python3.5
# -*- coding: utf-8 -*-
import configparser
#读取配置文件
config=configparser.ConfigParser()
config.read("/home/gswewf/有限状态机(FSM)/IpConfig.ini") #可以是一个不存在的文件,意味着准备新建配置文件。
#写入宿舍配置文件
try:
#configparser.add_section()向配置文件中添加一个Section。
#如果文件中已经存在相应的项目,则不能再增加同名的节。
config.add_section("School")
#使用configparser.set()在节School中增加新的参数。
config.set("School","IP","10.15.40.123")
config.set("School","Mask","255.255.255.0")
config.set("School","Gateway","10.15.40.1")
config.set("School","DNS","211.82.96.1")
except configparser.DuplicateSectionError:
print("Section 'School' already exists")
#写入比赛配置文件
try:
config.add_section("Match")
config.set("Match","IP","172.17.29.120")
config.set("Match","Mask","255.255.255.0")
config.set("Match","Gateway","172.17.29.1")
config.set("Match","DNS","0.0.0.0")
except configparser.DuplicateSectionError:
print("Section 'Match' already exists")
#写入配置文件
#使用configparser.write()进行写入操作。
config.write(open("IpConfig.ini", "w"))
#使用configparser.get()读取刚才写入配置文件中的参数。读取之前要记得读取ini文件。
ip=config.get("School","IP")
mask=config.get("School","mask")
gateway=config.get("School","Gateway")
dns=config.get("School","DNS")
print((ip,mask+"\n"+gateway,dns))
| gswyhq/hello-world | python相关/读取配置文件2.py | 读取配置文件2.py | py | 1,547 | python | zh | code | 9 | github-code | 90 |
71103220777 | # coding:utf-8
"""
@Author : Cong
@Time : 2021/6/28 16:00
"""
import socket
serveraddress = ('127.0.0.1', 7777)
sk = socket.socket()
sk.connect(serveraddress)
while True:
sss = input('发送内容:').strip()
sk.sendall(sss.encode())
if sss == 'exit':
print('客户端退出连接。')
break
answer = sk.recv(1024).decode()
print("收到服务器应答:%s" % answer)
sk.close() | yurkee/DayliyProject | network/client.py | client.py | py | 427 | python | en | code | 0 | github-code | 90 |
22940976104 | from Qt import QtCore, QtGui, QtWidgets
class FileManagerToolbar(QtWidgets.QWidget):
tags_changed = QtCore.Signal(str)
assets_changed = QtCore.Signal(str)
def __init__(self, *args, **kwargs):
super(FileManagerToolbar, self).__init__(*args, **kwargs)
self._cmbo_search_type = FlatCombo(['asset', 'tag'])
self._edit_search = QtWidgets.QLineEdit()
self._timer_search = QtCore.QTimer()
self._timer_search.setSingleShot(True)
self._timer_search.setInterval(300)
self._build_ui()
self._build_connections()
self._setup_ui()
def _build_ui(self):
self.setFixedHeight(40)
self._edit_search.setFixedHeight(34)
self._edit_search.setFixedWidth(400)
lyt_main = QtWidgets.QHBoxLayout()
lyt_main.setContentsMargins(0, 0, 0, 0)
lyt_main.setSpacing(5)
lyt_main.addStretch()
lyt_main.addWidget(self._cmbo_search_type)
lyt_main.addWidget(self._edit_search)
lyt_main.addStretch()
self.setLayout(lyt_main)
def _build_connections(self):
self._edit_search.textChanged.connect(self._timer_search.start)
self._cmbo_search_type.option_changed.connect(self._edit_search.clear)
self._cmbo_search_type.option_changed.connect(self._timer_search.start)
# TODO - emit regex
self._timer_search.timeout.connect(self._search_changed)
def _setup_ui(self):
self._edit_search.setStyleSheet("""
QLineEdit {
border-radius: 17px;
font-size: 18px;
}
""")
self._edit_search.setAlignment(QtCore.Qt.AlignCenter)
def _search_type(self):
return self._cmbo_search_type.current_text()
def _search_changed(self):
search_text = self._edit_search.text()
if self._search_type() == 'asset':
self.assets_changed.emit(search_text)
elif self._search_type() == 'tag':
self.tags_changed.emit(search_text)
class FlatCombo(QtWidgets.QWidget):
option_changed = QtCore.Signal(str)
def __init__(self, options, *args, **kwargs):
super(FlatCombo, self).__init__(*args, **kwargs)
assert options, 'No options given.'
self._lbl = QtWidgets.QLabel(options[0])
self._btn = QtWidgets.QPushButton(u'\u25BC')
self._items = options[:]
self._build_ui()
self._build_connections()
self._setup_ui()
# noinspection PyPep8Naming
def mouseReleaseEvent(self, evt):
if evt.button() == QtCore.Qt.LeftButton:
self._show_menu()
return True
else:
return super(FlatCombo, self).mouseReleaseEvent(evt)
def current_text(self):
return self._lbl.text()
def _build_ui(self):
lyt = QtWidgets.QHBoxLayout()
lyt.setContentsMargins(0, 0, 0, 0)
lyt.setSpacing(2)
lyt.addWidget(self._lbl)
lyt.addWidget(self._btn)
self.setLayout(lyt)
def _build_connections(self):
self._btn.clicked.connect(self._show_menu)
def _setup_ui(self):
self._btn.setFixedSize(20, 20)
self._btn.setFlat(True)
_font = self.font()
_font.setBold(True)
self._btn.setFont(_font)
self._lbl.setFont(_font)
self._lbl.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self._lbl.setStyleSheet('font-size: 16px;')
self._btn.setStyleSheet('border: none;')
def _show_menu(self):
menu = QtWidgets.QMenu()
for item in self._items:
menu.addAction(item)
a = menu.exec_(QtGui.QCursor().pos())
if a is not None:
value = a.text()
self._lbl.setText(value)
self.option_changed.emit(value)
| ccesareo/file_manager | file_manager/ui/toolbar.py | toolbar.py | py | 3,800 | python | en | code | 0 | github-code | 90 |
38437931611 | import datetime
import re
import lazylibrarian
import shlex
import time
import os
import string
import unicodedata
def bookSeries(bookname):
"""
Try to get a book series/seriesNum from a bookname, or return None
See if book is in multiple series first, if so return first one
eg "The Shepherds Crown (Discworld, #41; Tiffany Aching, #5)"
if no match, try single series, eg Mrs Bradshaws Handbook (Discworld, #40.5)
\( Must have (
([\S\s]+) followed by a group of one or more non whitespace
,? #? followed by optional comma, then space optional hash
( start next group
\d+ must have one or more digits
\.? then optional decimal point, (. must be escaped)
-? optional dash for a range
\d{0,} zero or more digits
[;,] a semicolon or comma if multiple series
) end group
"""
series = None
seriesNum = None
result = re.search(r"\(([\S\s]+),? #?(\d+\.?-?\d{0,}[;,])", bookname)
if result:
series = result.group(1)
if series[-1] == ',':
series = series[:-1]
seriesNum = result.group(2)
if seriesNum[-1] in ';,':
seriesNum = seriesNum[:-1]
else:
result = re.search(r"\(([\S\s]+),? #?(\d+\.?-?\d{0,})", bookname)
if result:
series = result.group(1)
if series[-1] == ',':
series = series[:-1]
seriesNum = result.group(2)
if series and series.lower().endswith(' novel'):
series = series[:-6]
if series and series.lower().endswith(' book'):
series = series[:-5]
return series, seriesNum
def next_run(when_run):
now = time.time()
when_run = time.strptime(when_run, '%Y-%m-%d %H:%M:%S')
when_run = time.mktime(when_run)
diff = when_run - now # time difference in seconds
# calculate whole units, plus round up by adding 1(true) if remainder >= half
days = int(diff / 86400) + (diff % 86400 >= 43200)
hours = int(diff / 3600) + (diff % 3600 >= 1800)
minutes = int(diff / 60) + (diff % 60 >= 30)
seconds = int(diff)
if days > 1:
return "%i days" % days
elif hours > 1:
return "%i hours" % hours
elif minutes > 1:
return "%i minutes" % minutes
else:
return "%i seconds" % seconds
def now():
dtnow = datetime.datetime.now()
return dtnow.strftime("%Y-%m-%d %H:%M:%S")
def today():
"""
Return todays date in format yyyy-mm-dd
"""
dttoday = datetime.date.today()
yyyymmdd = datetime.date.isoformat(dttoday)
return yyyymmdd
def age(histdate):
"""
Return how many days since histdate
histdate = yyyy-mm-dd
return 0 for today, or if invalid histdate
"""
nowdate = datetime.date.isoformat(datetime.date.today())
y1, m1, d1 = (int(x) for x in nowdate.split('-'))
try:
y2, m2, d2 = (int(x) for x in histdate.split('-'))
date1 = datetime.date(y1, m1, d1)
date2 = datetime.date(y2, m2, d2)
dtage = date1 - date2
return dtage.days
except ValueError:
return 0
def nzbdate2format(nzbdate):
mmname = nzbdate.split()[2].zfill(2)
day = nzbdate.split()[1]
# nzbdates are mostly english short month names, but not always
# month = str(strptime(mmname, '%b').tm_mon).zfill(2)
month = month2num(mmname)
if month == "Invalid":
month = "01" # hopefully won't hit this, but return a default value rather than error
year = nzbdate.split()[3]
return year + '-' + month + '-' + day
def month2num(month):
# return month number given month name (long or short) in requested locales
# or season name (only in English currently)
month = month.lower()
for f in range(1, 13):
if month in lazylibrarian.MONTHNAMES[f]:
return str(f).zfill(2)
if month == "winter":
return "01"
elif month == "spring":
return "04"
elif month == "summer":
return "07"
elif month == "fall":
return "10"
elif month == "autumn":
return "10"
else:
return "00"
def datecompare(nzbdate, control_date):
y1 = int(nzbdate.split('-')[0])
m1 = int(nzbdate.split('-')[1])
d1 = int(nzbdate.split('-')[2])
y2 = int(control_date.split('-')[0])
m2 = int(control_date.split('-')[1])
d2 = int(control_date.split('-')[2])
date1 = datetime.date(y1, m1, d1)
date2 = datetime.date(y2, m2, d2)
dtage = date1 - date2
return dtage.days
def plural(var):
"""
Convenience function for log messages, if var = 1 return ''
if var is anything else return 's'
so book -> books, seeder -> seeders etc
"""
if check_int(var, 0) == 1:
return ''
return 's'
def check_int(var, default):
try:
return int(var)
except (ValueError, TypeError):
return default
def is_valid_isbn(isbn):
isbn = re.sub('[- ]', '', isbn)
if len(isbn) == 13:
if isbn.isdigit():
return True
elif len(isbn) == 10:
if isbn[:9].isdigit():
return True
else:
if isbn[9] in ["Xx"] and isbn[:8].isdigit():
return True
return False
def is_valid_booktype(filename, booktype=None):
if booktype == 'mag': # default is book
booktype_list = getList(lazylibrarian.MAG_TYPE)
else:
booktype_list = getList(lazylibrarian.EBOOK_TYPE)
extn = os.path.splitext(filename)[1].lstrip('.')
if extn and extn.lower() in booktype_list:
return True
return False
def getList(st):
# split a string into a list
# changed posix to "false" to not baulk at apostrophes
if st:
my_splitter = shlex.shlex(st, posix=False)
my_splitter.whitespace += ','
my_splitter.whitespace_split = True
return list(my_splitter)
return []
def safe_unicode(obj, *args):
""" return the unicode representation of obj """
try:
return unicode(obj, *args)
except UnicodeDecodeError:
# obj is byte string
ascii_text = str(obj).encode('string_escape')
return unicode(ascii_text)
def cleanName(name):
validNameChars = u"-_.() %s%s" % (string.ascii_letters, string.digits)
try:
cleanedName = unicodedata.normalize('NFKD', name).encode('ASCII', 'ignore')
except TypeError:
cleanedName = unicodedata.normalize('NFKD', name.decode(lazylibrarian.SYS_ENCODING)).encode('ASCII', 'ignore')
cleaned = u''.join(c for c in cleanedName if c in validNameChars)
return cleaned.strip()
def unaccented(str_or_unicode):
return unaccented_str(str_or_unicode).decode(lazylibrarian.SYS_ENCODING)
# returns unicode
def unaccented_str(str_or_unicode):
try:
nfkd_form = unicodedata.normalize('NFKD', str_or_unicode)
except TypeError:
nfkd_form = unicodedata.normalize('NFKD', str_or_unicode.decode(lazylibrarian.SYS_ENCODING, 'replace'))
# turn accented chars into non-accented
stripped = ''.join([c for c in nfkd_form if not unicodedata.combining(c)])
# replace all non-ascii quotes/apostrophes with ascii ones eg "Collector's"
dic = {u'\u2018': u"'", u'\u2019': u"'", u'\u201c': u'"', u'\u201d': u'"'}
stripped = replace_all(stripped, dic)
# now get rid of any other non-ascii
return stripped.encode('ASCII', 'ignore')
# returns str
def replace_all(text, dic):
for i, j in dic.iteritems():
text = text.replace(i, j)
return text
| nemccarthy/LazyLibrarian | lazylibrarian/formatter.py | formatter.py | py | 7,597 | python | en | code | null | github-code | 90 |
28196843363 | class Solution:
def movesToMakeZigzag(self, nums: list) -> int:
ans1 = 0 if nums[0] < nums[1] else (nums[0] - nums[1] + 1)
ans2 = 0
if (len(nums) - 1) % 2 == 0:
ans1 += 0 if nums[-1] < nums[-2] else (nums[-1] - nums[-2] + 1)
else:
ans2 += 0 if nums[-1] < nums[-2] else (nums[-1] - nums[-2] + 1)
for i in range(1, len(nums)-1):
# 偶数
if i % 2 == 0:
ans1 += 0 if nums[i] < min(nums[i-1], nums[i+1]) else (nums[i] - min(nums[i-1], nums[i+1]) + 1)
else:
ans2 += 0 if nums[i] < min(nums[i-1], nums[i+1]) else (nums[i] - min(nums[i-1], nums[i+1]) + 1)
return min(ans1, ans2)
s = Solution()
print(s.movesToMakeZigzag([1,2,3])) | HandsomeLuoyang/Algorithm-books | 力扣/数组/1144. 递减元素使数组呈锯齿状.py | 1144. 递减元素使数组呈锯齿状.py | py | 785 | python | en | code | 1 | github-code | 90 |
9680720637 | import fitbit
from fitbit import gather_keys_oauth2 as Oauth2
import pandas as pd
import datetime
from datetime import date,timedelta
# CLIENT_ID = '22BFSK'
# CLIENT_SECRET = '27d1af955e153fa8765fb3acb2ccabce'
CLIENT_ID = input("Enter the Client id: ")
CLIENT_SECRET = input("Enter the Client secret: ")
server = Oauth2.OAuth2Server(CLIENT_ID, CLIENT_SECRET)
server.browser_authorize()
ACCESS_TOKEN = str(server.fitbit.client.session.token['access_token'])
REFRESH_TOKEN = str(server.fitbit.client.session.token['refresh_token'])
auth2_client = fitbit.Fitbit(CLIENT_ID, CLIENT_SECRET, oauth2=True, access_token=ACCESS_TOKEN, refresh_token=REFRESH_TOKEN)
print("authorization successful")
time_list = []
val_list = []
date_list=[]
# yesterday = str((datetime.datetime.now() - datetime.timedelta(days=1)).strftime("%Y%m%d"))
# yesterday2 = str((datetime.datetime.now() - datetime.timedelta(days=1)).strftime("%Y-%m-%d"))
# today = str(datetime.datetime.now().strftime("%Y-%m-%d"))
# for date in range(1,16):
# yesterday2 = str((datetime.datetime.now() - datetime.timedelta(days=date)).strftime("%Y-%m-%d"))
# today = str(datetime.datetime.now().strftime("%Y-%m-%d"))
# fit_statsHR = auth2_client.intraday_time_series('activities/steps',base_date=yesterday2, detail_level='1min')
#
# for i in fit_statsHR['activities-steps-intraday']['dataset']:
# val_list.append(i['value'])
# time_list.append(i['time'])
# date_list.append(yesterday2)
# stepsdf = pd.DataFrame({'Step Count':val_list,'Time':time_list,'Date':date_list})
# print(stepsdf)
h_time_list = []
h_val_list = []
h_date_list=[]
time_list = []
val_list = []
date_list=[]
stime_list = []
sval_list = []
sdate_list=[]
def daterange(start_date, end_date):
for n in range(int ((end_date - start_date).days)):
yield start_date + timedelta(n)
#start date
start_year = input("Enter start_date year: ")
start_month = input("Enter start_date month: ")
start_day = input("Enter start_date day: ")
end_year = input("Enter end_date year: ")
end_month = input("Enter end_date month: ")
end_day = input("Enter end_date day: ")
start_date = date(int(start_year),int(start_month),int(start_day))
end_date = date(int(end_year),int(end_month),int(end_day)+1)
# start_date = date(2020,3,9)
# end_date = date(2020,3,11)
for single_date in daterange(start_date, end_date):
date_new = single_date.strftime("%Y-%m-%d")
#Heart Rate
fit_statsHR = auth2_client.intraday_time_series('activities/heart', base_date=date_new, detail_level='1min')
for i in fit_statsHR['activities-heart-intraday']['dataset']:
h_val_list.append(i['value'])
h_time_list.append(i['time'])
h_date_list.append(date_new)
heartdf = pd.DataFrame({'Heart Rate':h_val_list,'Time':h_time_list,'Date':h_date_list})
fit_statsHR = auth2_client.intraday_time_series('activities/steps', base_date=date_new, detail_level='1min')
for i in fit_statsHR['activities-steps-intraday']['dataset']:
val_list.append(i['value'])
time_list.append(i['time'])
date_list.append(date_new)
stepsdf = pd.DataFrame({'Time':time_list,'Date':date_list,'Step Count':val_list})
fitbit_stats3 = auth2_client.sleep(date=date_new)
for i in fitbit_stats3['sleep'][0]['minuteData']:
stime_list.append(i['dateTime'])
sval_list.append(i['value'])
sdate_list.append(date_new)
sleepdf = pd.DataFrame({'Sleep State':sval_list,'Time':stime_list,'Date':sdate_list})
new_datetime_heart = []
for i in range(0,len(heartdf['Date'])):
new_datetime_heart.append(datetime.datetime.strptime(heartdf['Date'][i]+" "+heartdf['Time'][i], '%Y-%m-%d %H:%M:%S'))
heartdf['Time'] = new_datetime_heart
heartdf = heartdf.set_index('Time')
heartdf = heartdf.resample('5T').mean()
heartdf = heartdf.reset_index()
# print(heartdf)
new_datetime_steps = []
for i in range(0,len(stepsdf['Date'])):
new_datetime_steps.append(datetime.datetime.strptime(stepsdf['Date'][i]+" "+stepsdf['Time'][i], '%Y-%m-%d %H:%M:%S'))
stepsdf['Time'] = new_datetime_steps
stepsdf = stepsdf.set_index('Time')
stepsdf = stepsdf.resample('5T').sum()
stepsdf = stepsdf.reset_index()
date_list=[]
time_list=[]
for x in stepsdf['Time']:
d=datetime.datetime.strptime(str(x),"%Y-%m-%d %H:%M:%S")
date_list.append(d.date())
time_list.append(d.time())
stepsdf['Time_data'] = time_list
stepsdf['Date_data'] = date_list
# print(stepsdf)
# """Sleep data on the night of ...."""
new_datetime_sleep = []
for i in range(0,len(sleepdf['Date'])):
new_datetime_sleep.append(datetime.datetime.strptime(sleepdf['Date'][i]+" "+sleepdf['Time'][i], '%Y-%m-%d %H:%M:%S'))
sleepdf['Time'] = new_datetime_sleep
sleepdf['Sleep State'] = pd.to_numeric(sleepdf['Sleep State'])
sleepdf = sleepdf.set_index('Time')
sleepdf = sleepdf.resample('5T').sum()
sleepdf = sleepdf.reset_index()
for x in range(0,len(sleepdf['Sleep State'])):
if(sleepdf['Sleep State'][x]>=5):
sleepdf['Sleep State'][x]=1
else:
sleepdf['Sleep State'][x]=0
# print(sleepdf)
#
data=pd.concat([stepsdf.set_index('Time'),heartdf.set_index('Time'),sleepdf.set_index('Time')],axis=1)
data.reset_index(level='Time',col_level=1,col_fill='Time_data')
data['Heart Rate'].fillna(-1,inplace=True)
data['Step Count'].fillna(-99,inplace=True)
data['Sleep State'].fillna(-1,inplace=True)
data['Time_data'].fillna('no inputs yet',inplace=True)
data['Date_data'].fillna('no inputs yet',inplace=True)
data_new = data[data['Time_data']!='no inputs yet']
# print(data_new)
data.to_csv(path_or_buf='/Users/hp/Desktop/'+'fitbit_data'+CLIENT_ID+'.csv',columns=['Date_data','Time_data','Heart Rate','Step Count','Sleep State'], header=True, index = False)
print('The fitbit_data'+CLIENT_ID+'.csv file has been downloaded on your desktop')
| Aaishu29/ubilab | fitbit_prac.py | fitbit_prac.py | py | 5,945 | python | en | code | 0 | github-code | 90 |
15992200391 | # Price Determination Algorithm
# Defining a class for the package
# Define a class for the package
class Package:
# Initialize the package with a name
def __init__(self, name):
self.name = name
# Define a dictionary to store the factors of different names
name_factors = {"pickup": 1.2, "pickup and cleaning": 1.5}
# Check if the name is valid
if self.name in name_factors:
# Get the factor of the name from the dictionary
self.factor = name_factors[self.name]
else:
# Raise an exception if the name is invalid
raise ValueError(f"Invalid name: {self.name}. Please choose from pickup or pickup and cleaning.")
# A method to calculate the price based on the volume
def calculate_price(self, volume):
# Assume that the base price is 0.05 XAF per cubic metre
base_price = 0.05
# Multiply the base price by the volume and the factor
price = base_price * volume * self.factor
return price
# Define a class for the trash
class Trash:
# Initialize the trash with a size and a quantity
def __init__(self, size, quantity):
self.size = size
self.quantity = quantity
# A method to calculate the volume based on the size and the quantity
def calculate_volume(self):
# Define a dictionary to store the volumes of different sizes in litres
size_volumes = {"bucket": 10, "trash bag": 27, "wheelbarrow": 80}
# Check if the size is valid
if self.size in size_volumes:
# Get the volume of the size from the dictionary
size_volume = size_volumes[self.size]
# Convert the volume from litres to cubic metres
size_in_metres = size_volume / 1000
# Multiply the volume by the quantity
volume = size_in_metres * self.quantity
return volume
else:
# Raise an exception if the size is invalid
raise ValueError(f"Invalid size: {self.size}. Please choose from bucket, trash bag, or wheelbarrow.")
# Define a class for the bid
class Bid:
# Initialize the bid with a bidding price and a minimum percentage
def __init__(self, bidding_price, minimum_percentage):
self.bidding_price = bidding_price
self.minimum_percentage = minimum_percentage
# A method to check the status of the bid based on the price
def check_status(self, price):
# Calculate the minimum acceptable price based on the percentage
minimum_price = price * (1 - self.minimum_percentage / 100)
# Compare the bidding price with the minimum price
if self.bidding_price < minimum_price:
# Reject the bid
status = "rejected"
message = "Price too low."
else:
# Approve the bid
status = "approved"
message = f"The bid is accepted. The price is {price} XAF."
return status, message
# Create an instance of the package class
# Use only the name as a parameter
package = Package("pickup")
# Create an instance of the trash class
# Use the name of the size instead of the volume in litres
trash = Trash("trash bag", 3)
# Create an instance of the bid class
bid = Bid(0.4, 10)
| PeterNjahmi/python_algorithms | price_prediction.py | price_prediction.py | py | 3,392 | python | en | code | 0 | github-code | 90 |
2802336916 | dict1 = {0:'zero', 1:'one', 2:'two', 3:'three', 4:'four', 5:'five', 6:'six', 7:'seven', 8:'eight', 9:'nine', 10:'ten', 11:'eleven', 12:'twelve', 13:'thirteen', 14:'fourteen', 15:'fifteen', 16:'sixteen', 17:'seventeen', 18:'eighteen', 19:'nineteen', 20:'twenty', 30:'thirty', 40:'forty', 50:'fifty', 60:'sixty', 70:'seventy', 80:'eighty',90: 'ninety', 100:'one hundred', 200:'two hundred', 300:'three hundred', 400:'four hundred', 500:'five hundred', 600:'six hundred', 700:'seven hundred', 800:'eight hundred', 900:'nine hundred'}
def num_to_words(num):
ones_dig = 0
tens_dig = 0
hunds_dig = 0
if num <20:
return dict1[num]
elif num <100:
tens_dig = (num // 10) * 10
ones_dig = num % 10
if ones_dig == 0:
return dict1[tens_dig]
else:
return dict1[tens_dig] + '-' + dict1[ones_dig]
elif num <1000:
hunds_dig = (num // 100) * 100
tens_dig = ((num-hunds_dig) // 10) * 10
ones_dig = num % 10
under_20 = num - hunds_dig
if num - hunds_dig < 20 and ones_dig != 0:
return dict1[hunds_dig] + ' and ' + dict1[under_20]
elif ones_dig == 0:
if tens_dig == 0:
return dict1[hunds_dig]
else:
return dict1[hunds_dig] + ' and ' + dict1[tens_dig]
elif tens_dig == 0:
return dict1[hunds_dig] + ' and ' + dict1[tens_dig]
else:
return dict1[hunds_dig]+ ' and ' + dict1[tens_dig] + '-' + dict1[ones_dig]
else:
return "Input not valid."
for num in range(0,1000):
print(num_to_words(num)) | PdxCodeGuild/class_salmon | code/austin/python/lab03_v2.py | lab03_v2.py | py | 1,626 | python | en | code | 5 | github-code | 90 |
3179386145 | from subprocess import Popen, PIPE
from os import remove
from PIL import Image
import imageio.v2 as imageio
import glob
import os
import sys
def convert(orig_file, new_file_extension):
orig_extension = ''
if 'ppm' in orig_file:
orig_extension = 'ppm'
elif 'png' in orig_file:
orig_extension = 'png'
elif 'jpg' in orig_file:
orig_extension = 'jpg'
if orig_extension == '':
sys.exit("no file extension in given file")
new_extension = ''
if 'ppm' in new_file_extension:
new_extension = 'ppm'
elif 'png' in new_file_extension:
new_extension = 'png'
elif 'jpg' in new_file_extension:
new_extension = 'jpg'
if new_extension == '':
sys.exit("Incorrect new file extension given")
orig = Image.open(orig_file)
orig = orig.convert('RGB')
orig.save(orig_file.replace(orig_extension, new_extension), new_extension)
def animate(name):
filenames = glob.glob("anim/" + name + "*");
filenames = sorted(filenames)
print(filenames)
images = []
for filename in filenames:
images.append(imageio.imread(filename))
output_gif = name+".gif"
imageio.mimsave(output_gif, images, fps=60)
print(output_gif)
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit("Too few arguments")
try:
if sys.argv[1] == 'convert':
convert(sys.argv[2], sys.argv[3])
elif sys.argv[1] == 'animate':
animate(sys.argv[2])
except Exception as e:
print(e)
print("Incorrect number of arguments")
| Ca7Ac1/Graphics | convert.py | convert.py | py | 1,618 | python | en | code | 0 | github-code | 90 |
28302759512 | # Задача 3. Создайте скрипт бота, который находит ответы на фразы по ключу в словаре.
# Бот должен, как минимум, отвечать на фразы «привет», «как тебя зовут». Если фраза ему неизвестна, он выводит соответствующую фразу.
import os
import re
from Exercises.ExerciseAbstract import ExerciseAbstract
from Exercises.Homework3.Balabot.Bot import Bot
class Exercise11(ExerciseAbstract):
def __init__(self, description: str):
super().__init__(description)
@staticmethod
def Body():
bot = Bot()
print('БОТ ЗАПУЩЕН\n')
onDialog = True
while onDialog:
question = Exercise11._GetCleanText(input(' |> ').lower())
answer = bot.ReadMessage(question)
print(f'\n — {answer}\n')
onDialog = not bot.isSaidBuy
if bot.onReadMode:
answer = bot.SetToMemory(question, input(' |> '))
print(f'\n — {answer}\n')
input('Бот вышел из чата. Нажми Enter...')
os.system('cls')
return True
@staticmethod
def _GetCleanText(word: str) -> str:
word = re.sub('[^a-zа-яё-]', '', word, flags=re.IGNORECASE)
word = word.strip('-')
return word
| DanGritsyuk/GB_Learn_PY | Exercises/Homework3/Exercise11.py | Exercise11.py | py | 1,427 | python | ru | code | 0 | github-code | 90 |
18161478339 | import sys
MOD = 10**9 + 7
n = int(input())
a = list(map(int, input().split()))
x = pow(sum(a), 2)
y = sum((a[i]**2 for i in range(n)))
sum = (x - y) // 2 % MOD
print(sum)
| Aasthaengg/IBMdataset | Python_codes/p02572/s504440098.py | s504440098.py | py | 178 | python | en | code | 0 | github-code | 90 |
33549316522 | import random
import nltk
BOT_CONFIG = {
'intents': {
'hello': {
'examples': ['привет!', 'хай', 'Здравствуйте!!'],
'responses': ['Добрый день', 'Здравия желаю!', 'Добрый вечер']
},
'bye': {
'examples': ['пОкА!', 'до свидания', 'Увидимся!'],
'responses': ['До связи', 'Саонара!!', 'Покеда']
},
'howdoyoudo': {
'examples': ['как дела?', 'как жизнь?'],
'responses': ['не жалуюсь!!']
}
},
'not_found': {
'responses': ['Извините, не удалось определить интент', 'Я пока еще глупый бот, ничего не понимаю!!']
}
}
def clean(text):
cleaned_text = ''
for ch in text.lower():
if ch in 'абвгдеёжзийклмнопрстуфхцчшщъыьэюя ':
cleaned_text = cleaned_text + ch
return cleaned_text
def get_intent(text):
for intent in BOT_CONFIG['intents'].keys():
for example in BOT_CONFIG['intents'][intent]['examples']:
cleaned_example = clean(example)
cleaned_text = clean(text)
if nltk.edit_distance(cleaned_example, cleaned_text) / max(len(cleaned_example), len(cleaned_text)) < 0.4:
return intent
return 'not_found'
def bot(text):
intent = get_intent(text)
if intent != 'not_found':
return random.choice(BOT_CONFIG['intents'][intent]['responses'])
return random.choice(BOT_CONFIG['not_found']['responses'])
input_text = ''
while input_text != 'exit':
input_text = input()
response = bot(input_text)
print(response)
| m3t4mf3t4m1n/small-tasks | skillbox_bot_task_day1.py | skillbox_bot_task_day1.py | py | 1,787 | python | ru | code | 0 | github-code | 90 |
4998762266 |
# python 3.7.0
# Program that displays available PC COM Ports
# Using tkinter for a GUI
#
# Started Aug 24, 2018
# Uploaded to GIT Sept 17,2018
#
#
from tkinter import *
from tkinter import ttk
import serial
import serial.tools.list_ports
# -----
# -----Create main window - Called: window ----------------------
window = Tk()
window.minsize(450, 200)
window.maxsize(700, 500)
window.geometry("500x300+600+300") # Size and Location of the window called window
window.title("Port-ID")
window.iconbitmap("life.ico")
# ----- End main window - window -------------------------------------------------------
# -----
def blink_green():
window.label_text_output = Label(window, relief = RAISED, bg='green', text=" ") # <----- Display Ports ----
window.label_text_output.place(x=150, y=102)
def blink_red():
window.label_text_output = Label(window, relief = RAISED, bg='red', text=" ") # <----- Display Ports ----
window.label_text_output.place(x=150, y=102)
# ----- Method - Exit app- Exit button ---------------------------------------------------------
def button_quit_event():
quit(33) # <----- Quit and return 33 -------------------
# ----- Mehtod = Show COM ports - Refresh button -------------------
#
def port_id():
comlist = serial.tools.list_ports.comports()
ports = []
for element in comlist:
ports.append(element.device)
ports = str(ports)
# ----- Create a frame to enclose the ports label
#
frame = ttk.Frame(window)
frame['width'] = 200
frame['height'] = 50
frame['borderwidth'] = 2
frame['relief'] = 'sunken'
frame.place(x=175, y=80)
# ----- Create a label which displays the ports
window.label_text_output = ttk.Label(window,text= ports) # <----- Display Ports ----
window.label_text_output.place(x=180, y=100)
# ----- Red Green blinking indicator when the app is refreshing
blink_red()
window.after(500, blink_red)
blink_green()
window.after(1000, port_id) # <----- Loop after 2 seconds -----------------
# ----- Create button - Refresh ---------------------------------------
#
button_info = ttk.Button(window,text="Refreash", command=port_id)
button_info.place(x=50, y=100)
# - Create a menubar -------------------------------------------------
# ----- Create a toplevel menu ----------------------------------
menubar = Menu(window)
menubar.add_command(label="Exit", command= button_quit_event)
menubar.add_command(label="Refresh", command= port_id)
window.config(menu=menubar) # <----- Display the menu ------------
port_id() # <----- Show ports when app starts ------------
mainloop()
| tpb409/Python-script-that-lists-Windows-serial-ports | tpb_port_id.py | tpb_port_id.py | py | 2,738 | python | en | code | 0 | github-code | 90 |
18458045459 | #15 B - KEYENCE String
S = list(input())
result = 'NO'
for i in range(len(S)):
Scpy = S.copy()
for j in range(i,len(S)):
Scond = Scpy[:i]+Scpy[j:]
if (''.join(Scond)) == ('keyence'):
result = 'YES'
break
else:
continue
break
print(result) | Aasthaengg/IBMdataset | Python_codes/p03150/s986260429.py | s986260429.py | py | 303 | python | en | code | 0 | github-code | 90 |
74486887975 | """Utils for calculations with bounding boxes
Mostly adopted from:
https://github.com/sgrvinod/a-PyTorch-Tutorial-to-Object-Detection/blob/43fd8be9e82b351619a467373d211ee5bf73cef8/utils.py
"""
import torch
def xy_to_cxcy(xy):
"""
Convert bounding boxes from boundary coordinates (x_min, y_min, x_max, y_max) to center-size coordinates (c_x, c_y, w, h).
:param xy: bounding boxes in boundary coordinates, a tensor of size (n_boxes, 4)
:return: bounding boxes in center-size coordinates, a tensor of size (n_boxes, 4)
"""
return torch.cat(
[(xy[:, 2:] + xy[:, :2]) / 2, xy[:, 2:] - xy[:, :2]], 1 # c_x, c_y
) # w, h
def cxcy_to_xy(cxcy: torch.Tensor):
"""Converts"""
return torch.cat(
[
cxcy[:, :2] - (cxcy[:, 2:] / 2), # x_min, y_min
cxcy[:, :2] + (cxcy[:, 2:] / 2), # x_max, y_max
],
1,
)
def find_intersection(set_1, set_2):
"""
Find the intersection of every box combination between two sets of boxes that are in boundary coordinates.
:param set_1: set 1, a tensor of dimensions (n1, 4)
:param set_2: set 2, a tensor of dimensions (n2, 4)
:return: intersection of each of the boxes in set 1 with respect to each of the boxes in set 2, a tensor of dimensions (n1, n2)
"""
# PyTorch auto-broadcasts singleton dimensions
lower_bounds = torch.max(
set_1[:, :2].unsqueeze(1), set_2[:, :2].unsqueeze(0)
) # (n1, n2, 2)
upper_bounds = torch.min(
set_1[:, 2:].unsqueeze(1), set_2[:, 2:].unsqueeze(0)
) # (n1, n2, 2)
intersection_dims = torch.clamp(upper_bounds - lower_bounds, min=0) # (n1, n2, 2)
return intersection_dims[:, :, 0] * intersection_dims[:, :, 1] # (n1, n2)
def find_jaccard_overlap(set_1, set_2):
"""
Find the Jaccard Overlap (IoU) of every box combination between two sets of boxes that are in boundary coordinates.
:param set_1: set 1, a tensor of dimensions (n1, 4)
:param set_2: set 2, a tensor of dimensions (n2, 4)
:return: Jaccard Overlap of each of the boxes in set 1 with respect to each of the boxes in set 2, a tensor of dimensions (n1, n2)
"""
# Find intersections
intersection = find_intersection(set_1, set_2) # (n1, n2)
# Find areas of each box in both sets
areas_set_1 = (set_1[:, 2] - set_1[:, 0]) * (set_1[:, 3] - set_1[:, 1]) # (n1)
areas_set_2 = (set_2[:, 2] - set_2[:, 0]) * (set_2[:, 3] - set_2[:, 1]) # (n2)
# Find the union
# PyTorch auto-broadcasts singleton dimensions
union = (
areas_set_1.unsqueeze(1) + areas_set_2.unsqueeze(0) - intersection
) # (n1, n2)
return intersection / union # (n1, n2)
| dikvangenuchten/2AMM40-Project | object_detection/box_utils.py | box_utils.py | py | 2,700 | python | en | code | 0 | github-code | 90 |
18059470849 | h, w, n = map(int, input().split())
dic = {}
for i in range(n):
a, b = map(int, input().split())
a -= 1
b -= 1
for i in range(-1, 2):
for j in range(-1, 2):
if a + i <= 0 or a + i >= h - 1 or b + j <= 0 or b + j >= w - 1:
continue
if (a+i, b+j) in dic:
dic[(a+i, b+j)] += 1
else:
dic[(a+i, b+j)] = 1
count = [0]*10
for i in dic.values():
count[i] += 1
count[0] = (h-2)*(w-2)-sum(count)
for i in range(10):
print(count[i])
| Aasthaengg/IBMdataset | Python_codes/p04000/s247388476.py | s247388476.py | py | 541 | python | en | code | 0 | github-code | 90 |
70931222056 | from matplotlib import pyplot as plt
import numpy as np
from simpmeshfree_gui import jvm_utils as ju
from jpype import *
from simpmeshfree_gui.pixcell_painter import *
def ori_demo():
fig = plt.figure()
fig.show()
for smallbit in xrange(16):
ax = fig.add_subplot(4, 4, smallbit + 1, aspect='equal')
samples = ju.QuadPixcellAtomSample.refineSamples(0.0, 0.0, 5.0, smallbit)
QuadPixcells_plot(samples, ax)
fig.canvas.draw()
return fig
def refine_demo():
fig2 = plt.figure()
fig2.show()
for smallbit in xrange(16):
ax2 = fig2.add_subplot(4, 4, smallbit + 1, aspect='equal')
samples2 = ju.QuadPixcellAtomSample.refineSamples(0.0, 0.0, 5.0, smallbit)
refined = samples2[0].refine()
samples2 = [px for px in samples2]
samples2.extend(refined[1:4])
QuadPixcells_plot(samples2,ax2)
fig2.canvas.draw()
return fig2
def merge_after_ref_demo():
fig = plt.figure()
fig.show()
for smallbit in xrange(16):
ax = fig.add_subplot(4, 4, smallbit + 1, aspect='equal')
samples = ju.QuadPixcellAtomSample.refineSamples(0.0, 0.0, 5.0, smallbit)
refined = samples[0].refine()
samples = [px for px in samples]
samples.extend(refined[1:4])
merged = samples[0].merge()
for px in merged[1:4]:
samples.remove(px)
QuadPixcells_plot(samples, ax)
fig.canvas.draw()
return fig
if __name__ == "__main__":
ju.start_jvm(debug_port=8998)
fig1 = ori_demo()
fig2 = refine_demo()
fig3 = merge_after_ref_demo()
| epsilony/simpmeshfree_gui | simpmeshfree_gui/demo/QuadPixcell_atom_demo.py | QuadPixcell_atom_demo.py | py | 1,627 | python | en | code | 1 | github-code | 90 |
15048050737 | a = """dong 1
dong 2
dong 3"""
b = """chuoi vua co ' va " """
c = 'chuoi nay cung co ca \' va \"'
print(c)
print(a)
print(b)
'''''''''''''''
try to comment sth
'''''''''''''''
def ham_vo_dung():
"""
"""
class ClassName(object):
"""docstring for ClassName"""
| trungpd1102/Python | howkteam/python_basic/2.string/part1.py | part1.py | py | 267 | python | en | code | 0 | github-code | 90 |
26553296491 | # -*- coding: utf-8 -*-
# @Time : 2021/2/19 0019
# @Author : yang
# @Email : 2635681517@qq.com
# @File : crue.py
from weibo.models import WeiboUser as User, Weibo, Comment
"""新增数据"""
# user_obj = User(username='杨华钟', password='123456', nickname='无限可能')
# user_obj.save()
#
# user_obj1 = User.objects.create(username='admin', password='password',nickname='未来世界')
# """查询"""
# try:
# user_obj = User.objects.get(pk=5)
# except Exception as e:
# print('error')
#
# user_all = User.objects.all()
# for user in user_all:
# print(user.username,end='|')
#
#
# user_obj = User.objects.get(pk=5)
# print(user_obj.nickname)
# user_obj.nickname = '世界'
# user_obj.save()
# """批量修改数据"""
# user_list = User.objects.all()
# user_list.update(password='aa123')
"""删除数据"""
# user_obj = User.objects.get(pk=7)
# user_obj.delete()
"""get_or_create有则返回,无则创建记录"""
# obj = User.objects.get_or_create(username='杨华钟',password='aa123',nickname='世界')
#
# obj1 = User.objects.get_or_create(username='杨',password='aa123',nickname='世界')
#
# user1 = User(username='user1', password=123, nickname='yan1')
# user2 = User(username='user2', password=123, nickname='yan2')
# user3 = User(username='user3', password=123, nickname='yan3')
# """插入多条数据"""
# User.objects.bulk_create([user1, user2, user3])
"""返回第一条/最后一条"""
# print(User.objects.first())
# print(User.objects.last())
"""返回数据库的记录数量"""
# print(User.objects.count())
"""结果集是否存在,存在则返回True,不存在则返回False"""
# print(User.objects.exists())
"""修改记录"""
# user_obj = User.objects.get(pk=18)
#
# user_obj.password = 'aaa'
# user_obj.save()
"""排除一些情况"""
# user = User.objects.all().exclude(username='杨华钟').count()
# print(user)
# print(User.objects.all().order_by('-id')[1].username)
# print(User.objects.using('students'))
"""筛选出满足条件的记录"""
# print(User.objects.all().filter(username='杨华钟'))
# list1 = [] for i in range(100): user = User(username='user{0}'.format(i),password='pass{0}'.format(i),nickname='用户{
# 0}'.format(i),status='2',remark='长江大学{}'.format(i)) User.objects.bulk_create([user])
"""分页(查询后再分片)"""
# user_list = User.objects.all()[80:90]
# print(len(user_list))
# for i in user_list:
# print(i.username)
user_list = User.objects.get(username='user11')
print(user_list)
weibo = Weibo.objects.create(user=user_list,content='长江大学')
# print(weibo.id)
comment = Comment.objects.create(user=user_list,weibo=weibo,content="内容1")
comment1 = Comment.objects.create(user=user_list,weibo=weibo,content="内容2")
| Futureword123456/django_1 | weibo/crue.py | crue.py | py | 2,732 | python | en | code | 0 | github-code | 90 |
11739323764 | # Create a generator to return an infinite sequence of odd numbers, starting at 1.
# Print the first 100 numbers, to check that the generator is working correctly.
# Note that this is just for testing. We are going to need far more than 100 numbers,
# and do not know in advance how many, so that is why we are creating our own generator,
# instead of just using range.
def generate_odd():
number = 1
while True:
yield number
number = number + 2
genOdd = generate_odd()
# for i in range(100):
# print(next(genOdd))
def pi_series():
odds = generate_odd()
approximation = 0
while True:
approximation += (4 / next(odds))
yield approximation
approximation -= (4 / next(odds))
yield approximation
approx_pi = pi_series()
for x in range(10000000):
print(next(approx_pi)) | raunakpalit/myPythonLearning | venv/generators_comprehensions_lambda/pigen.py | pigen.py | py | 847 | python | en | code | 0 | github-code | 90 |
17927362259 | N, C = [int(_) for _ in input().split()]
MT = (10 ** 5 + 10)
memo = [[0] * C for _ in range(MT)]
stc = sorted([[int(_) for _ in input().split()] for _ in range(N)])
for s, t, c in stc:
if memo[s][c - 1] < 0:
memo[s][c - 1] = 0
else:
memo[s - 1][c - 1] += 1
memo[t][c - 1] -= 1
ans = 0
cnt = 0
for i in range(MT):
cnt += sum(memo[i])
ans = max(ans,cnt)
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p03504/s519670780.py | s519670780.py | py | 402 | python | en | code | 0 | github-code | 90 |
8972952043 | from django.shortcuts import render, redirect
from .models import folder
# Create your views here.
def demo(request):
if request.method == 'POST':
lang=request.POST.get('language',)
upload=request.FILES['abc']
fol=folder(language=lang,upload=upload)
fol.save()
return redirect('demo')
return render(request, 'new.html')
| mahimatiju/translation | pythonProject/demoproject/demoapp/views.py | views.py | py | 373 | python | en | code | 0 | github-code | 90 |
37142067230 | from flask import Flask, render_template, redirect
import requests
import json
app: Flask = Flask( __name__ )
@app.route( "/" )
def index():
cotacao = requests.get("https://economia.awesomeapi.com.br/last/USD-BRL,EUR-BRL,BTC-BRL")
cotacao = cotacao.json()
cotacao_bit = cotacao['BTCBRL']['bid']
cotacao_euro = cotacao['EURBRL']['bid']
cotacao_real = cotacao['USDBRL']['bid']
return render_template('index.html', bit=cotacao_bit, euro=cotacao_euro,
real=cotacao_real)
if __name__ != "__name__":
app.run()
| camehu2022/cotacao_moeda | main.py | main.py | py | 565 | python | pt | code | 0 | github-code | 90 |
3794734411 | import os
import signal
import multiprocessing as mp
import time
PROMPT = '> '
MAIN_SLEEP_SEC = 0.1
JOIN_DELAY_SEC = 0.2
DEBUG = True
def debug(message):
if DEBUG:
print(f'{os.getpid()}: {message}', flush=True)
class Job:
jobs = []
RUNNING_FOREGROUND = 0
RUNNING_BACKGROUND = 1
RUNNING_PAUSED = 2
KILLED = 3
STATE_SYMBOLS = ['*', '+', '-', 'x']
def __init__(self, line, function, args):
super().__init__()
self.line = line
self.process = mp.Process(target=function, args=args)
self.process.daemon = True
self.state = Job.RUNNING_FOREGROUND
try:
# Set signal handling for child
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTSTP, signal.SIG_DFL)
self.process.start()
Job.jobs.append(self)
debug(f'started child process {self.process.pid}')
finally:
# Restore original signal handling
signal.signal(signal.SIGINT, ctrl_c_handler)
signal.signal(signal.SIGTSTP, ctrl_z_handler)
def __str__(self):
return f'job({self.process.pid} ({Job.STATE_SYMBOLS[self.state]}): {self.line})'
def kill(self):
debug(f'kill {self}')
self.state = Job.KILLED
try:
os.kill(self.process.pid, signal.SIGTERM)
self.process.join(JOIN_DELAY_SEC)
if self.process.is_alive():
os.kill(self.process.pid, signal.SIGKILL)
self.process.join(JOIN_DELAY_SEC)
if self.process.is_alive():
debug(f'Unable to kill {self}')
except ProcessLookupError:
pass
# ctrl-z
def pause(self):
debug(f'pause {self}')
if self.state not in (Job.RUNNING_PAUSED, Job.KILLED):
os.kill(self.process.pid, signal.SIGTSTP)
self.state = Job.RUNNING_PAUSED
# bg
def run_in_background(self):
debug(f'run_in_background {self}')
if self.state != Job.KILLED:
os.kill(self.process.pid, signal.SIGCONT)
self.state = Job.RUNNING_BACKGROUND
# fg
def run_in_foreground(self):
debug(f'run_in_foreground {self}')
if self.state == Job.KILLED:
raise Exception('Cannot foreground killed job')
if self.state != Job.RUNNING_FOREGROUND:
os.kill(self.process.pid, signal.SIGCONT)
self.state = Job.RUNNING_FOREGROUND
@staticmethod
def foreground():
for job in Job.jobs:
if job.state == Job.RUNNING_FOREGROUND:
return job
return None
@staticmethod
def foreground_is_alive():
foreground = Job.foreground()
return foreground and foreground.process.is_alive()
@staticmethod
def remove_completed():
new_jobs = []
for job in Job.jobs:
if job.process.is_alive():
new_jobs.append(job)
Job.jobs = new_jobs
class Interact:
@staticmethod
def run():
while True:
try:
line = input(PROMPT)
Interact.process_line(line)
while Job.foreground_is_alive():
time.sleep(MAIN_SLEEP_SEC)
except KeyboardInterrupt: # ctrl-C
print()
@staticmethod
def process_line(line):
if line.startswith('fg '):
job_id = int(line.split()[-1])
Job.jobs[job_id].run_in_foreground()
elif line.startswith('bg '):
job_id = int(line.split()[-1])
Job.jobs[job_id].run_in_background()
elif len(line) == 0:
def noop():
pass
Job(line, noop, tuple())
elif line.startswith('sleep '):
def sleep():
label, sleeptime = line.split()[1:]
time.sleep(int(sleeptime))
print(f'Wakey wakey {label}')
Job(line, sleep, tuple())
elif line.startswith('jobs'):
Job.remove_completed()
for i in range(len(Job.jobs)):
print(f'{i}: {Job.jobs[i]}')
elif line.startswith('kill '):
job_id = int(line.split()[-1])
Job.jobs[job_id].kill()
elif line.startswith('timer '):
def timer(label, interval):
debug(f'timer {label}, handler: {signal.getsignal(signal.SIGTSTP)}')
try:
count = 0
while True:
debug(f'{os.getpid()} {label}: {count}')
time.sleep(interval)
count += 1
except KeyboardInterrupt:
debug(f'process {os.getpid()} caught KeyboardInterrupt, exiting?')
label, interval = line.split()[1:]
interval = int(interval)
Job(line, timer, (label, interval))
else:
def echo():
print(f'<<<{line}>>>')
Job(line, echo, tuple())
def ctrl_z_handler(signum, frame):
assert signum == signal.SIGTSTP
foreground = Job.foreground()
debug(f'ctrl_z_handler, pause foreground: {foreground}')
if foreground:
foreground.pause()
# ctrl-z propagates to children, suspending them. If they should be running in the background, then
# get them going again.
for job in Job.jobs:
if job.state == Job.RUNNING_BACKGROUND:
debug(f'ctrl_z_handler, revive background: {job}')
job.run_in_background()
print()
def ctrl_c_handler(signum, frame):
assert signum == signal.SIGINT
foreground = Job.foreground()
debug(f'ctrl_c_handler, kill foreground: {foreground}')
if foreground:
foreground.kill()
print()
def main():
debug(f'main pid: {os.getpid()}')
signal.signal(signal.SIGINT, ctrl_c_handler)
signal.signal(signal.SIGTSTP, ctrl_z_handler)
Interact.only = Interact()
try:
Interact.run()
except EOFError: # ctrl-D
print()
finally:
for job in Job.jobs:
job.kill()
if __name__ == '__main__':
main()
| geophile/marcel | experiments/jobcontrol.py | jobcontrol.py | py | 6,181 | python | en | code | 290 | github-code | 90 |
41833704147 | import os
from setuptools import setup
VERSION = "0.1"
def get_long_description():
with open(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md"),
encoding="utf8",
) as fp:
return fp.read()
setup(
name="imdb-to-sqlite",
description="Convert IMDB ratings CSV export to a SQLite database",
long_description=get_long_description(),
long_description_content_type="text/markdown",
author="Andreas Madsack",
url="https://github.com/mfa/imdb-to-sqlite",
license="Apache License, Version 2.0",
version=VERSION,
packages=["imdb_to_sqlite"],
entry_points="""
[console_scripts]
imdb-to-sqlite=imdb_to_sqlite.cli:cli
""",
install_requires=["sqlite-utils~=3.2.1", "click"],
extras_require={"test": ["pytest"], "lint": ["black", "isort"]},
tests_require=["imdb-to-sqlite[test]"],
)
| mfa/imdb-to-sqlite | setup.py | setup.py | py | 896 | python | en | code | 0 | github-code | 90 |
39851787463 | import datetime
import json
import os
import time
from collections import Counter
import torch
from torch.utils.tensorboard import SummaryWriter
from src.tasks.early_stopping import EarlyStopping
from src.tasks.saver import Saver
from src.utils.pytorch_ir_metrics import rank_labels
from src.utils.utils import batch_sparse
"""
Trainer classes
disclaimer: strongly inspired from https://github.com/victoresque/pytorch-template
"""
class BaseTrainer:
"""base trainer class"""
def __init__(self, model, loss, optimizer, config, metrics):
"""
model: model object
loss: loss object
optimizer: optimizer object
config: dict of config parameters
metrics: OrderedDict of (callable) metrics, e.g. {"map": map, ...
"""
print("initialize trainer...")
self.loss = loss
self.optimizer = optimizer
self.metrics = metrics
# no multi-GPUs case:
self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
self.model = model.to(self.device)
self.model.train() # put model on train mode
self.checkpoint_dir = config["checkpoint_dir"]
if not os.path.exists(self.checkpoint_dir):
os.makedirs(self.checkpoint_dir)
if not os.path.exists(os.path.join(self.checkpoint_dir, "saved")):
os.makedirs(os.path.join(self.checkpoint_dir, "saved"))
self.nb_epochs = config["nb_epochs"]
# setup tensorboard writer instance:
self.writer_dir = os.path.join(config["checkpoint_dir"],
"tensorboard",
datetime.datetime.now().strftime('%m%d_%H%M%S'))
self.writer = SummaryWriter(self.writer_dir)
self.config = config
print("trainer config:\n", self.config)
self.config["model_init_dict"] = model.init_dict # NOTE: each model should have a dict attribute that contains
# everything needed to instantiate it in this fashion: model(**init_dict)
# (handy when loading a saved model for inference)
def train(self):
"""
full training logic
"""
# initialize early stopping or saver:
if "early_stopping" in self.config:
saver = EarlyStopping(self.config["patience"], self.config["early_stopping"])
else:
saver = Saver()
t0 = time.time()
training_res_handler = open(os.path.join(self.checkpoint_dir, "training_perf.txt"), "w")
validation_res_handler = open(os.path.join(self.checkpoint_dir, "validation_perf.txt"), "w")
training_res_handler.write("epoch,loss\n")
validation_res_handler.write("epoch,loss,{}\n".format(",".join(self.metrics.keys())))
try:
if self.test_loader is not None:
test_res_handler = open(os.path.join(self.checkpoint_dir, "test_perf.txt"), "w")
test_res_handler.write("epoch,loss,{}\n".format(",".join(self.metrics.keys())))
except AttributeError:
print("no logging of test metrics")
for epoch in range(1, self.nb_epochs + 1):
print("==== BEGIN EPOCH {} ====".format(epoch))
# == start training for one epoch ==
self.model.train() # => train mode
train_loss = self.train_epoch(epoch) # train_epoch() returns the training loss (on full training set)
print("*train loss:{}".format(train_loss))
self.writer.add_scalar(os.path.join(self.writer_dir, "full_train_loss"), train_loss, epoch)
training_res_handler.write("{},{}\n".format(epoch, train_loss))
# == start validation ==
self.model.eval() # => eval mode
with torch.no_grad():
val_loss, val_metrics = self.valid_epoch()
# add validation loss to tensorboard:
self.writer.add_scalar(os.path.join(self.writer_dir, "full_validation_loss"), val_loss, epoch)
# add validation metrics to tensorboard:
for metric in val_metrics:
self.writer.add_scalar(os.path.join(self.writer_dir, "full_validation_{}".format(metric)),
val_metrics[metric], epoch)
# and write these values to validation text file:
validation_res_handler.write("{},{:5f}".format(epoch, val_loss))
for key_ in self.metrics.keys():
validation_res_handler.write(",{:5f}".format(val_metrics[key_]))
validation_res_handler.write('\n')
# same for test (if test loader):
try:
test_loss, test_metrics = self.valid_epoch(data="test")
# add validation loss to tensorboard:
self.writer.add_scalar(os.path.join(self.writer_dir, "full_test_loss"), test_loss, epoch)
for metric in test_metrics:
self.writer.add_scalar(os.path.join(self.writer_dir, "test_{}".format(metric)),
test_metrics[metric], epoch)
test_res_handler.write("{},{:5f}".format(epoch, test_loss))
for key_ in self.metrics.keys():
test_res_handler.write(",{:5f}".format(test_metrics[key_]))
test_res_handler.write('\n')
except AssertionError:
pass
print("=validation-loss:{}".format(val_loss))
for key, val in val_metrics.items():
print("+validation-{}:{}".format(key, val))
if "early_stopping" in self.config:
if self.config["early_stopping"] == "loss":
saver(val_loss, self, epoch)
else:
saver(val_metrics[self.config["early_stopping"]], self, epoch)
if saver.stop: # meaning we reach the early stopping criterion
print("== EARLY STOPPING AT EPOCH {}".format(epoch))
self.config["stop_iter"] = epoch
break
else:
saver(val_metrics[self.config["monitoring_metric"]], self, epoch)
self.writer.close() # closing tensorboard writer
with open(os.path.join(self.checkpoint_dir, "config.json"), "w") as handler:
json.dump(self.config, handler)
training_res_handler.close()
validation_res_handler.close()
print("======= TRAINING DONE =======")
print("took about {} hours".format((time.time() - t0) / 3600))
def save_checkpoint(self, epoch, val_perf, is_best=False):
"""
"""
state = {"epoch": epoch,
"val_perf": val_perf,
"model_state_dict": self.model.state_dict(),
"optimizer_state_dict": self.optimizer.state_dict(),
"config": self.config
}
if is_best:
listdir = os.listdir(os.path.join(self.checkpoint_dir, "saved"))
update = False
if len(listdir) == 1:
previous_file_name = listdir[0]
update = True
torch.save(state, os.path.join(self.checkpoint_dir, "saved/model_best-at_epoch_{}.tar".format(epoch)))
if update:
os.remove(os.path.join(self.checkpoint_dir, "saved/{}".format(previous_file_name))) # because we
# only want to keep the last best config
def train_epoch(self, epoch):
"""
epoch training logic
"""
raise NotImplementedError
def valid_epoch(self, **kwargs):
"""
epoch validation logic
"""
raise NotImplementedError
class GraphRankingTrainer(BaseTrainer):
def __init__(self, model, loss, optimizer, config, metrics, train_loader, validation_loader, test_loader=None):
"""
"""
super().__init__(model, loss, optimizer, config, metrics)
self.train_loader = train_loader
self.validation_loader = validation_loader
if test_loader is not None:
self.test_loader = test_loader
else:
self.test_loader = None
def forward(self, batch_graph):
"""
batch_graph: batch of graphs (torch_geometric.data.Data object)
return: scores, labels and batch vector for the batch of graphs
"""
for k in batch_graph.keys:
batch_graph[k] = batch_graph[k].to(self.device)
# => move all the tensors in batch_graph to device
scores = self.model(batch_graph)
# returns 1-D tensors:
return scores, batch_graph.y, batch_graph.batch
def train_epoch(self, epoch):
"""
"""
# the model is already in train mode
total_loss = 0
for batch_id, batch_graph in enumerate(self.train_loader):
self.optimizer.zero_grad()
scores, labels, batch_vec = self.forward(batch_graph)
loss = self.loss(scores, labels, batch_vec)
loss.backward()
self.optimizer.step()
total_loss += loss.item()
return total_loss / len(self.train_loader)
def valid_epoch(self, data="validation"):
"""
epoch validation logic
return: validation loss and validation metrics (or test if data == "test", validation by default)
"""
# the model is in eval mode (+ torch.no_grad())
total_metrics = Counter({})
total_loss = 0
assert (data == "validation" or (data == "test" and self.test_loader is not None))
loader = self.validation_loader if data == "validation" else self.test_loader
for batch_id, batch_graph in enumerate(loader):
scores, labels, batch_vec = self.forward(batch_graph)
loss = self.loss(scores, labels, batch_vec)
total_loss += loss.item()
total_metrics += self.eval_metrics(scores, labels, batch_vec)
out = {key: value / len(loader) for key, value in total_metrics.items()}
return total_loss / len(loader), out
def eval_metrics(self, scores, labels, batch_vec):
"""
computes of bunch of metrics for a batch of graphs
"""
res = {}
# convert sparse batch encoding of scores and labels to dense tensors:
batch_scores, batch_labels = batch_sparse(scores, labels, batch_vec)
batch_scores = batch_scores.to(self.device)
batch_labels = batch_labels.to(self.device)
ranked_labels = rank_labels(batch_scores, batch_labels)
for key, metric_fn in self.metrics.items():
res[key] = metric_fn(ranked_labels).item()
return Counter(res)
| naver/dcmm | src/tasks/trainer.py | trainer.py | py | 10,836 | python | en | code | 8 | github-code | 90 |
19335456054 | GREEN='\033[0;32m'
RED='\033[0;31m'
NC='\033[0m' # No Color
fh = open("test_results.csv")
for line in fh.readlines():
line = line.replace(GREEN,'')
line = line.replace(RED,'')
line = line.replace(NC,'')
if line.startswith("\0"):
x = 0
res = ''
zero_ctr = 0
while x < len(line):
if line[x] == '\0':
zero_ctr += 1
res += str(ord( line[x] ))
res += ' '
x += 1
print("Bad Line! len=%s, zero_ctr=%s, [0]=--->%s<--- [1]=--->%s<---, @%s=%s, @%s=%s" % (len(line), zero_ctr, line[0], line[1], zero_ctr, line[zero_ctr], zero_ctr+1, line[zero_ctr+1]))
print(res)
else:
print(line.strip())
fh.close()
| MycroftAI/mark-ii-hardware-testing | clean_test_results.py | clean_test_results.py | py | 732 | python | en | code | 0 | github-code | 90 |
33585074698 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2018/11/6 15:00
@Author : TianCi
@File : _single_link_list1.py
@Software: PyCharm
@desc:单链表实现
"""
class Node(object):
"""单链表的节点"""
def __init__(self, element):
self.element = element
self.next = None
class SingleLinkList(object):
"""单链表"""
def __init__(self, node=None): # 设置默认参数
self.__head = node # 私有化属性
def is_empty(self):
"""判断链表是否为空"""
return self.__head is None
def length(self):
"""获取链表长度"""
# cursor游标,用来遍历链表(self.__head保存的是节点的地址值)
cursor = self.__head
# 计数
count = 0
while cursor:
count += 1
cursor = cursor.next
return count
def travel(self):
"""遍历链表"""
cursor = self.__head
while cursor:
print(cursor.element)
cursor = cursor.next
def add(self, item):
"""添加头部元素O(1)"""
node = Node(item)
# 将新节点的链接域指向头节点(把头节点的地址值给next)
node.next = self.__head
# 将链表的头节点指向新节点(把新节点的地址值给head)
self.__head = node
def append(self, item):
"""添加尾部元素O(n)"""
# 创建新节点
node = Node(item)
# 判断节点是否为空
if self.is_empty():
self.__head = node
else:
# 初始化游标位置
cursor = self.__head
# 判断游标的链接域是否为空
while cursor.next:
cursor = cursor.next
cursor.next = node
def insert(self, pos, item):
"""任意位置添加元素O(n)"""
node = Node(item)
count = 0
pre = self.__head
# 如果位置小于0,执行头部添加元素
if pos <= 0:
self.add(item)
# 如果位置大于等于链表长度,执行尾部添加元素
elif pos >= self.length():
self.append(item)
else:
# 循环结束后,游标指向插入位置的前一个节点
while count < pos - 1:
# 获取后一个节点
pre = pre.next
count += 1
# 将新节点的node的next指向插入位置的节点
node.next = pre.next
# 将插入位置的前一个节点的next指向新节点
pre.next = node
def remove(self, item):
"""删除元素"""
cursor = self.__head
pre = None
while cursor:
if cursor.element == item:
# 先判断此节点是否是头节点
if cursor == self.__head:
self.__head = cursor.next
return True
pre.next = cursor.next
return True
else:
pre = cursor
cursor = cursor.next
return False
def search(self, item):
"""查找元素O(n)"""
cursor = self.__head
# 判断节点是否存在
while cursor:
# 判断元素是否相等
if cursor.element == item:
return True
else:
# 获取下一个节点
cursor = cursor.next
# 循环结束返回False
return False
if __name__ == '__main__':
link_list = SingleLinkList()
# print(link_list.is_empty())
# print(link_list.length())
link_list.append(1)
link_list.add(2)
link_list.append(1)
link_list.append(1)
link_list.insert(2, 100)
link_list.remove(1)
link_list.remove(100)
print(link_list.travel())
| PandaHero/data_structure_algorithms | data_structure/_single_link_list1.py | _single_link_list1.py | py | 3,851 | python | en | code | 0 | github-code | 90 |
70668589416 | import pytorch_lightning as pl
from torch.utils.data import DataLoader
from sinc.data.tools.collate import collate_pairs_and_text, collate_datastruct_and_text, collate_datastruct_and_multi_text
from sinc.data.tools.collate import collate_text_and_body_parts
import torch
class BASEDataModule(pl.LightningDataModule):
def __init__(self,
batch_size: int,
num_workers: int,
datatype: str = None):
super().__init__()
# if '+' in datatype:
# collate_fn = collate_datastruct_and_multi_text
if 'separate_pairs' == datatype:
collate_fn = collate_pairs_and_text
elif datatype == "text":
collate_fn = collate_text_and_body_parts
else:
collate_fn = collate_datastruct_and_text
def set_worker_sharing_strategy(worker_id: int) -> None:
sharing_strategy = "file_system"
torch.multiprocessing.set_sharing_strategy(sharing_strategy)
self.dataloader_options = {
'batch_size': batch_size,
'num_workers': num_workers,
'collate_fn': collate_fn,
'drop_last': False,
# 'worker_init_fn': set_worker_sharing_strategy
# 'pin_memory': True,
}
# need to be overloaded:
# - self.Dataset
# - self._sample_set => load only a small subset
# There is an helper below (get_sample_set)
# - self.nfeats
# - self.transforms
self._train_dataset = None
self._val_dataset = None
self._test_dataset = None
# Optional
self._subset_dataset = None
def get_sample_set(self, overrides={}):
sample_params = self.hparams.copy()
sample_params.update(overrides)
return self.Dataset(**sample_params)
@property
def train_dataset(self):
if self._train_dataset is None:
self._train_dataset = self.Dataset(split="train", **self.hparams)
return self._train_dataset
@property
def val_dataset(self):
if self._val_dataset is None:
self._val_dataset = self.Dataset(split="val", **self.hparams)
return self._val_dataset
@property
def test_dataset(self):
if self._test_dataset is None:
self._test_dataset = self.Dataset(split="test", **self.hparams)
return self._test_dataset
@property
def subset_dataset(self):
if self._subset_dataset is None:
self._subset_dataset = self.Dataset(split="subset", **self.hparams)
return self._subset_dataset
def setup(self, stage=None):
# Use the getter the first time to load the data
if stage in (None, "fit"):
_ = self.train_dataset
_ = self.val_dataset
if stage in (None, "test"):
_ = self.test_dataset
def train_dataloader(self):
return DataLoader(self.train_dataset,
shuffle=True,
**self.dataloader_options)
def predict_dataloader(self):
return DataLoader(self.train_dataset,
shuffle=False,
**self.dataloader_options)
def val_dataloader(self):
return DataLoader(self.val_dataset,
shuffle=False,
**self.dataloader_options)
def test_dataloader(self):
return DataLoader(self.test_dataset,
shuffle=False,
**self.dataloader_options)
def subset_dataloader(self):
return DataLoader(self.subset_dataset,
shuffle=False,
**self.dataloader_options)
| athn-nik/sinc | sinc/data/base.py | base.py | py | 3,758 | python | en | code | 68 | github-code | 90 |
10129652046 | import unittest
import torch as T
from mltoolkit.mlmo.generation import Beamer
from mltoolkit.mlmo.utils.tools import DecState
import numpy as np
class TestBeamDecoder(unittest.TestCase):
"""
Note that this test ignores the fact that word scores should be log
probabilities.
"""
def test_simple_output(self):
"""Hidden state independent test."""
beam_size = 2
max_steps = 3
vocab = {0: "a", 1: "b", 2: "c", 3: "<pad>", 4: "<s>", 5: "<e>"}
exp_seqs = [[4, 2, 5], [4, 0, 0, 5]]
init_hidden = T.tensor([[0., 0., 0.], [0., 0., 0.]], dtype=T.float32)
init_dec_state = DecState(rec_vals={"hidden": init_hidden})
dec = Dec()
beam_decoder = Beamer(decoding_func=dec.dummy_dec_func, start_id=4,
beam_size=beam_size, end_id=5,
validate_dec_out=False)
act_seqs, _ = beam_decoder(init_dec_state=init_dec_state,
max_steps=max_steps)
self.assertTrue((exp_seqs == act_seqs))
def test_hidden_dependent_output(self):
beam_size = 2
max_steps = 3
vocab = {0: "a", 1: "b", 2: "c", 3: "<pad>", 4: "<s>", 5: "<e>"}
exp_seqs = [[4, 1, 0, 5]]
init_hidden = T.tensor([[0.]], dtype=T.float32)
dec = Dec()
beam_decoder = Beamer(decoding_func=dec.hidden_dep_dec_func,
start_id=4, end_id=5, validate_dec_out=False,
n_best=beam_size, beam_size=beam_size)
init_dec_state = DecState(rec_vals={"hidden": init_hidden})
act_seqs, _ = beam_decoder(init_dec_state, max_steps=max_steps)
self.assertTrue((exp_seqs == act_seqs))
# def test_coll_vals(self):
# """
# Testing whether the decoder correctly collects additional artifacts
# produced by the decoder.
# """
# beam_size = 2
# max_steps = 3
# raise NotImplementedError
class Dec:
def __init__(self):
self.state = -1
def dummy_dec_func(self, prev_word_ids, hidden):
t1 = [
[1.1, 0., 1., 0., 0., 0.],
[0., 0., 0., 10., 10., 10.], # this will be ignored
[1.1, 1., 0., 0., 0., 0.],
[0., 0., 0., 10., 0., 0.] # this will be ignored
]
t2 = [
[0., 1., 2., 0., 0., 0.],
[0., 0., 0., 0., 0., 6.],
[1.1, 0., 0., 0., 0., 0.],
[0., 1., 0., 0., 0., 0.]
]
t3 = [
[4., 9999., 3., 10., 133., 5.],
[0., 0., 0., 0., 0., 1.],
[0., 0., 0., 0., 0., 1.],
[0., 0., 0., 0., 0., 1.]
]
self.state += 1
if self.state == 0:
word_scores = T.tensor(t1)
elif self.state == 1:
word_scores = T.tensor(t2)
elif self.state == 2:
word_scores = T.tensor(t3)
else:
raise ValueError("The decoding func supports only 3 steps!")
return DecState(word_scores=word_scores, rec_vals={"hidden": hidden})
def hidden_dep_dec_func(self, prev_word_ids, hidden):
t1 = [
[1., 1.1, 0., 0.0, 0.0, 0.0],
[10., 0., 50., 0., 0., 0.] # this will be ignored
]
t2 = [
[0.2, 0., 0.1, 0., 0., 0.],
[0., 0., 0., 1.1, 0., 2.]
]
t3 = [
[4., 99., 331., 10., 133., 53.], # should be ignored
[0., 0., 0., -3.0000, 0., 0.001]
]
self.state += 1
if self.state == 0:
hidden[0, 0] += 1.
return DecState(T.tensor(t1), rec_vals={"hidden": hidden})
if self.state == 1:
t2 = T.tensor(t2)
t2[0, :] += hidden[0, 0]
return DecState(t2, rec_vals={"hidden": T.tensor([[1.2], [2.]])})
if self.state == 2:
t3 = T.tensor(t3)
t3[0, :] += hidden[0, 0]
t3[1, :] += hidden[1, 0]
return DecState(t3, rec_vals={"hidden": T.tensor([[0.], [0.]])})
raise ValueError("The decoding func supports only 3 steps!")
def val_coll_dec_func(self, prev_word_ids, hidden):
raise NotImplementedError
if __name__ == '__main__':
unittest.main()
| abrazinskas/Copycat-abstractive-opinion-summarizer | mltoolkit/mlmo/tests/tools/test_beam_decoder.py | test_beam_decoder.py | py | 4,288 | python | en | code | 98 | github-code | 90 |
1249927002 | import re
from datetime import datetime
import scrapy
import tldextract
from tpdb.BaseSceneScraper import BaseSceneScraper
from tpdb.items import SceneItem
class VIP4KPagedSpider(BaseSceneScraper):
name = 'VIP4KPaged'
network = 'VIP 4K'
parent = 'VIP 4K'
start_urls = [
'https://debt4k.com',
'https://hunt4k.com',
'https://law4k.com',
'https://loan4k.com',
'https://shame4k.com',
'https://stuck4k.com',
'https://tutor4k.com',
]
selector_map = {
'title': '',
'description': '',
'date': '',
'image': '',
'performers': '',
'tags': "",
'external_id': 'updates\\/(.*)\\.html$',
'trailer': '//video/source/@src',
'pagination': '/en/%s'
}
def parse(self, response, **kwargs):
count = 0
if "debt4k" in response.url:
scenes = response.xpath('//div[@class="episode__body"]')
if "hunt4k" in response.url:
scenes = response.xpath('//div[@class="box-index"]')
if "law4k" in response.url:
scenes = response.xpath('//div[@class="content__block episode-block"]')
if "loan4k" in response.url:
scenes = response.xpath('//div[@class="holder"]')
if "stuck4k" in response.url:
scenes = response.xpath('//div[@class="content__block episode"]')
if "shame4k" in response.url:
scenes = response.xpath('//div[@class="content__block episode"]')
if "tutor4k" in response.url:
scenes = response.xpath('//div[@class="content__block episode"]')
for scene in scenes:
count = count + 1
item = SceneItem()
item['performers'] = []
item['trailer'] = ''
item['description'] = ''
item['parent'] = "VIP 4K"
item['network'] = "VIP 4K"
item['url'] = response.url
item['tags'] = ''
description = ''
item['date'] = datetime.now().isoformat()
if "debt4k" in response.url:
performer = scene.xpath('.//strong[contains(text(),"Name")]/../following-sibling::div/text()').get()
if "hunt4k" in response.url:
performer = ''
if "law4k" in response.url:
performer = scene.xpath('./following-sibling::div[contains(@class,"episode")][1]//div[contains(text(),"Alias")]/following-sibling::div/text()').get()
if "loan4k" in response.url:
performer = scene.xpath('.//li/span[contains(text(), "Name")]/following-sibling::strong/text()').get()
if "shame4k" in response.url:
performer = scene.xpath('.//div[@class="record__about"]/div/span[contains(text(),"Name:")]/following-sibling::text()').get()
if "stuck4k" in response.url:
performer = ''
if "tutor4k" in response.url:
performer = ''
performer = performer.strip()
item['performers'].append(performer)
if "debt4k" in response.url:
title = scene.xpath('.//h2[contains(@class,"episode__title")]/text()').get()
if "hunt4k" in response.url:
title = scene.xpath('.//div[@class="title-embed"]/span/text()').get()
if "law4k" in response.url:
title = scene.xpath('.//h2[contains(@class,"title")]/text()').get()
if "loan4k" in response.url:
title = scene.xpath('.//div[@class="top_panel"]/span/text()').get()
if "shame4k" in response.url:
title = scene.xpath('./h2/text()').get()
if "stuck4k" in response.url:
title = scene.xpath('.//div[@class="record__title"]/text()').get()
if "tutor4k" in response.url:
title = scene.xpath('.//h2[contains(@class,"episode__title")]/text()').get()
title = title.strip().title()
item['id'] = re.sub(r'[^a-zA-Z0-9\-]', '', title.replace(" ", "-").lower())
if performer and "law4k" not in response.url and "stuck4k" not in response.url and "shame4k" not in response.url and "tutor4k" not in response.url:
title = title + ": " + performer
item['title'] = title
if "debt4k" in response.url:
description = scene.xpath('.//div[@class="player-item__text"]/text()').get()
if "hunt4k" in response.url:
description = scene.xpath('.//div[@class="descr-embed"]/text()').get()
if "law4k" in response.url:
description = scene.xpath('./following-sibling::div[@class="content__block episode"][1]//div[contains(@class,"debt-note__text")]/text()').get()
if "loan4k" in response.url:
managernotes = scene.xpath('.//div[@class="hold_notes"]/p/text()').get()
description = scene.xpath('//div[@class="post hide_block"]/p/text()').get()
if managernotes:
description = "Managers Notes: " + managernotes + '\r\nDescription: ' + description
if "shame4k" in response.url:
description = scene.xpath('.//div[@class="episode__text"]/text()').get()
if "stuck4k" in response.url:
description = scene.xpath('.//div[@class="episode__text"]/span[@class="episode__text-area"]/text()').get()
if "tutor4k" in response.url:
description = scene.xpath('.//span[@class="episode-about__text text"]/text()').get()
if description:
item['description'] = description.strip()
if "debt4k" in response.url:
image = scene.xpath('.//div[@class="episode__player"]//img/@data-src').get()
if "hunt4k" in response.url:
image = scene.xpath('./div/div[@class="embed"]/a/img/@data-src').get()
if "law4k" in response.url:
image = scene.xpath('.//div/span[contains(text(),"Punishment")]/../../../a/@style').get()
if image:
image = re.search(r'url\((.*\.jpg)\)', image).group(1)
if "loan4k" in response.url:
image = scene.xpath('.//div[@class="wrapper_player"]/img/@data-src').get()
if "shame4k" in response.url:
image = scene.xpath('./div/a/picture//source[@type="image/jpeg"]/@data-srcset').get()
if "stuck4k" in response.url:
image = scene.xpath('./div[@class="episode__img"]/a/img/@data-src').get()
if "tutor4k" in response.url:
image = scene.xpath('./div[@class="episode__img"]/a/img/@data-src').get()
if image[:2] == "//":
image = "https:" + image
item['image'] = image.strip()
item['site'] = tldextract.extract(response.url).domain
yield item
if count and ("hunt4k" in response.url and count > 1):
if 'page' in response.meta and response.meta['page'] < self.limit_pages:
meta = response.meta
meta['page'] = meta['page'] + 1
print('NEXT PAGE: ' + str(meta['page']))
yield scrapy.Request(url=self.get_next_page_url(response.url, meta['page']),
callback=self.parse,
meta=meta,
headers=self.headers,
cookies=self.cookies)
| SFTEAM/scrapers | scenes/networkVIP4KPaged.py | networkVIP4KPaged.py | py | 7,531 | python | en | code | null | github-code | 90 |
6974930415 | import pygame
import pymysql
from gtts import gTTS
from time import sleep
from sense_hat import SenseHat
import subprocess
# Initialize SenseHat and pygame mixer
sense = SenseHat()
pygame.mixer.init()
def start_screen():
subprocess.run(["python3",f"emojis/dikke8.py" ])
# Play a specific sound effect
def play_sound_effect(file_path):
pygame.mixer.music.load(file_path)
pygame.mixer.music.play()
def get_message_and_emoji_from_db():
conn = pymysql.connect(host='localhost', user='root', password='', database='magic8ball')
cursor = conn.cursor()
cursor.execute("SELECT message, emoji FROM answer ORDER BY RAND() LIMIT 1")
result = cursor.fetchone()
conn.close()
if result:
return result[0], result[1]
return None, None
# Play an audio file
def play_audio(filename):
pygame.mixer.music.load(filename)
pygame.mixer.music.play()
while pygame.mixer.music.get_busy():
sleep(1)
# Fetch an answer and play the corresponding TTS
def fetch_eightball_answer():
message, emoji_file = get_message_and_emoji_from_db()
if message:
tts = gTTS(text=message, lang='en')
print(str(message))
tts.save("message.mp3")
print("TTS MP3 generated.") #debug
play_audio("message.mp3")
if emoji_file:
# Running the python file to display the emoji on SenseHat
subprocess.run(["python3",f"emojis/{emoji_file}" ])
return message if message else "Error"
def get_orientation():
orientation = sense.get_orientation_degrees()
roll = orientation['roll']
# Print the roll value for debugging
print("Roll:", roll)
if 0 <= roll < 180:
return 0
else:
return 180
def was_shaken(prev_values, threshold=1.5):
# Get current accelerometer values
accel = sense.get_accelerometer_raw()
x = accel['x']
y = accel['y']
z = accel['z']
# Calculate difference from previous values
dx = abs(prev_values['x'] - x)
dy = abs(prev_values['y'] - y)
dz = abs(prev_values['z'] - z)
# Update previous values
prev_values['x'] = x
prev_values['y'] = y
prev_values['z'] = z
# If change is significant, return True
if dx > threshold or dy > threshold or dz > threshold:
return True
return False
last_orientation = 0
def smooth_rotation():
global last_orientation
current_orientation = get_orientation()
# Determine the difference
difference = current_orientation - last_orientation
# Only adjust if the difference is significant
if abs(difference) >= 45: # Adjust this threshold as needed
sense.set_rotation(current_orientation)
last_orientation = current_orientation
def main():
start_screen()
# Initial accelerometer values
accel = sense.get_accelerometer_raw()
prev_values = {'x': accel['x'], 'y': accel['y'], 'z': accel['z']}
# Initial orientation
last_orientation = get_orientation()
sense.set_rotation(last_orientation)
while True:
if was_shaken(prev_values):
play_sound_effect('sound.mp3') # Play a sound effect when shaken
sleep(5)
answer = fetch_eightball_answer()
smooth_rotation() # Adjust the orientation smoothly
#sense.show_message(answer, scroll_speed=0.2)
sleep(1) # delay to avoid repeated immediate shakes
if __name__ == "__main__":
main()
| alicia573/Magic8Ball | magic/Magic8Ball.py | Magic8Ball.py | py | 3,520 | python | en | code | 0 | github-code | 90 |
18374184555 | import serial
import time
morsecode = {"a" : ".-", "b" : "-...", "c" : "-.-.", "d" : "-..", "e" : ".",
"f" : "..-.", "g" : "--.", "h" : "....", "i" : "..", "j" : ".---", "k" : "-.-",
"l" : ".-..", "m" : "--", "n" : "-.", "o" : "---", "p" : ".--.", "q" : "--.-",
"r" : ".-.", "s" : "...", "t" : "-", "u" : "..-", "v" : "...-", "w" : ".--",
"x" : "-..-", "y" : "-.--", "z" : "--.."}
ourstring = "" # any string that the user wants
temp = "" # leave blank
for i in range(len(ourstring)):
for key, value in morsecode.items():
if key == ourstring[i]:
temp += value
for i in range(len(temp)):
with serial.Serial('COM3', 9800, timeout=1) as ser: # change COM3 depending on the port your arduino is plugged into
print (temp[i])
if temp[i] == "-":
ser.write(b'H')
time.sleep(2)
ser.write(b'L')
else:
ser.write(b'H')
time.sleep(1)
ser.write(b'L')
| AndrewGordienko/Arduino | Morse Code/morsecode.py | morsecode.py | py | 1,001 | python | en | code | 0 | github-code | 90 |
41327163850 | #importing libraries
import cv2
import mediapipe as mp
#creating an object for video capture, 0 arguement for in built device camera
cap = cv2.VideoCapture(0)
#mediapipe imports
#initialising the hand tracking model
mpHands = mp.solutions.hands
hands = mpHands.Hands()
#drawing_utils lets you visually track the hands
mpDraw = mp.solutions.drawing_utils
while True:
success, image = cap.read() #returns two values, first is the boolean value for whether the frame is returned or not, and the other one is numpy array of image
imageRGB = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)#converting the color from BGR to RGB bcoz mediapipe works with RGB
#processing to detect the hands
results = hands.process(imageRGB)
# checking whether a hand is detected
if results.multi_hand_landmarks:
for handLms in results.multi_hand_landmarks: # working with each hand
print(handLms)
for id, lm in enumerate(handLms.landmark):
h, w, c = image.shape
cx, cy = int(lm.x * w), int(lm.y * h)
#selecting the ids of the tip of fingers
if id == 20 or id==8 or id==12 or id==16 :
cv2.circle(image, (cx, cy), 25, (255, 0, 255), cv2.FILLED)
mpDraw.draw_landmarks(image, handLms, mpHands.HAND_CONNECTIONS)
cv2.imshow("Output", image)
cv2.waitKey(1) | rohinish404/cursor_controller | tracking_hand.py | tracking_hand.py | py | 1,404 | python | en | code | 0 | github-code | 90 |
11410968913 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 17 feb. 2020
@author: Robert G
'''
lista = [] # Declaramos una lista vacia
# Aqui ponemos cuantas veces queremos que pida un numero
for x in range(3):
numero = input("Introduce un numero entero:")
lista.append(numero)
# ordenamos la lista
lista.sort()
# imprimimos la lista
print("Los numeros ordenados son: ") , (lista)
| RNutM/DAM2-SGE-U4 | Unidad4/ejercicios/Ejercicio06.py | Ejercicio06.py | py | 398 | python | es | code | 0 | github-code | 90 |
21774398425 | import cv2
import numpy as np
import tensorflow as tf
from keras.models import load_model
# Load YOLOv3 config and weights
net = cv2.dnn.readNetFromDarknet('cfg/yolov3-custom.cfg', 'backup/yolov3-custom_last.weights')
# Load emotion recognition model
model_h5 = "path/to/model"
emotion_model = load_model(model_h5)
# Define the classes and corresponding colors
classes = ['Anger', 'Happy', 'Sad', 'Surprise']
# classes = ['freezer', 'goku', 'vegeta']
num_classes = len(classes)
colors = np.random.uniform(0, 255, size=(num_classes, 4))
# Get the output layer names
layer_names = net.getLayerNames()
output_layers = [layer_names[i-1] for i in net.getUnconnectedOutLayers().tolist()]
# Load video file
video = r'data/DBZ-TEST/video1.mp4'
cap = cv2.VideoCapture(video)
# Loop through each frame
while True:
ret, frame = cap.read()
if not ret:
break
# Detect objects in the frame
blob = cv2.dnn.blobFromImage(frame, 0.00392, (416, 416), swapRB=True, crop=False)
net.setInput(blob)
outs = net.forward(output_layers)
# Filter out low confidence detections
class_ids = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.5:
# Object detected
center_x = int(detection[0] * frame.shape[1])
center_y = int(detection[1] * frame.shape[0])
w = int(detection[2] * frame.shape[1])
h = int(detection[3] * frame.shape[0])
x = int(center_x - w / 2)
y = int(center_y - h / 2)
boxes.append([x, y, w, h])
confidences.append(float(confidence))
class_ids.append(class_id)
# Process each face detected
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
for i in range(len(boxes)):
if i in indexes:
x, y, w, h = boxes[i]
if w <= 0 or h <= 0:
continue
# Extract face region
face = frame[y:y+h, x:x+w]
# Check if face is not empty
if face.size == 0:
continue
# Resize and preprocess image for emotion recognition model
face = cv2.resize(face, (448, 448))
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
face = np.expand_dims(face, axis=0)
face = np.expand_dims(face, axis=-1)
face = face / 255.0
# Predict emotion using the model
emotions = emotion_model.predict(face)
emotion_label = classes[np.argmax(emotions)]
# Draw bounding boxes and emotion labels
label = "{}: {:.2f}".format(emotion_label, np.max(emotions))
color = colors[class_ids[i]]
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
cv2.putText(frame, label, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
# Show the video frame with bounding boxes and emotion labels
cv2.imshow('Object detection and Emotion recognition', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows
| gipinze/Computer-Vision-Thesis | Yolo/yolo_dbz_emotion_video.py | yolo_dbz_emotion_video.py | py | 3,276 | python | en | code | 0 | github-code | 90 |
35360617334 | import json
with open('D:\\QA COURSE\\API_Testing_Python\\Simple_Json2.txt') as file1:
data1 = json.load(file1)
print(type(data1))
print(data1)
with open('body.json', 'w') as f:
json.dump(data1, f)
for class_name in data1:
content = data1[class_name]
ans = content['answer']
options = list(content['options'])
assert options.__contains__(ans)
with open('D:\\QA COURSE\\API_Testing_Python\\Simple_Json.txt') as file2:
data2 = json.load(file2)
print(type(data2))
class_data = data2['quiz']
class_list = list(class_data)
for i in range(0, len(class_data)):
class_data_content = class_data[class_list[i]]
class_data_content_list = list(class_data_content)
for question_data in class_data_content:
question_content = class_data_content[question_data]
question1 = str(question_content['question'])
options1 = list(question_content['options'])
answer1 = str(question_content['answer'])
assert options1.__contains__(answer1)
# Comparing json schemas of two json files
with open('D:\\QA COURSE\\API_Testing_Python\\Simple_Json_1.txt') as file3:
data3 = json.load(file3)
assert data2 == data3
| sharifzodah/API_Python | Day5_JasonParsing/_02_Json_File_Parsing.py | _02_Json_File_Parsing.py | py | 1,261 | python | en | code | 0 | github-code | 90 |
9069458572 | import ast
import plotly.graph_objects as go
from matplotlib import pyplot as plt
import numpy as np
import plotly.express as px # interactive charts
import GCP_class
import requests
import dash
from dash import Dash, html, dcc, Input, Output, State, callback, ctx
import dash_bootstrap_components as dbc
from PIL import Image
from datetime import datetime
from google.cloud import storage
import gen_key
url_secret=gen_key.url_secret
print("dashbaord.py url:",gen_key.url_secret[0])
#img1= Image.open("elderly_cover.jpg")
#img2= Image.open("lidar_cover_crop.jpg")
#img3= Image.open("pressure_cover_crop.jpg")
dash.register_page(__name__)
bucket_name = "ee4002d_bucket"
# authentication details
jsonfile="cred_details.json"
server=GCP_class.Server(bucket_name,jsonfile)
img1= server.retrieve_img('template/elderly_cover.jpg')
img2= server.retrieve_img('template/lidar_cover.jpg')
img3= server.retrieve_img('template/pressure_cover.jpg')
card1=dbc.Card([
dbc.CardImg(src=img1,title='Image by Lifestylememory on Freepik', top=True),
dbc.CardBody([html.H5("Profile bio", className="card-title"),
html.P("Access both profile of the Elderly currently living in the unit as well as the caretaker."),
dbc.Row(children=
[
dbc.Col(children=[
# html.P("test",id="key"),
# html.P("test2",id="key2"),
html.P("Postal Code ID:"),
html.P("Elderly name:"),
]),
dbc.Col(children=[
html.P("Block unknown",id='dash-blk'),
html.P("Name unknown",id='dash-name'),
])
]), # sensor transmission and
dbc.Button("Access Profile bio",href=gen_key.DASH_URL_BASE+'profile', color="primary"),
])
]#,style={"width": "30rem","height":"30rem"}
,class_name='h-100'
,style={"height":"100vh"}
)#card component
card2 = dbc.Card([
dbc.CardImg(src=img2, top=True),
dbc.CardBody([html.H5("Lidar data", className="card-title"),
html.P("Check status of LIDAR sensor, scanning mode, and graph to verify if there is a detected fall."),
dbc.Row(children=
[
dbc.Col(children=[
html.P("2D LIDAR result:"),
html.P("Consciousness result:"),
# html.Br(),
# html.P("3D calibration:"),
# html.P("Full calibration:"),
]),
dbc.Col(children=[
html.P([dbc.Badge("Unknown result", color="info",id="2d-result-dash", className="me-1")]),
html.P([dbc.Badge("Unknown result",id="3d-result-dash", color="info",
className="me-1")]),
# html.P(dbc.Button("Run",id="2d-run",size='sm')),
# html.P(dbc.Button("Run",id="3d-run",size='sm')),
# html.P(dbc.Button("Run",id="full-run",size='sm'))
])
] # sensor transmission and
), # row for sensor transmission and on
dbc.Button("Access LIDAR readings",href=gen_key.DASH_URL_BASE+'lidar', color="primary"),
])]
#, style={"width": "30rem", "height": "30rem"}
,class_name='h-100'
#,style={"height":"30rem"}
) # card component
card3 = dbc.Card([
dbc.CardImg(src=img3, top=True),
dbc.CardBody([html.H5("Pressure tile & Mic readings", className="card-title"),
html.P("Check the status of the SMART pressure tiles, Graph or Microphone recordings for any anormalities or fall detection."),
dbc.Row(children=
[
dbc.Col(children=[
html.P("Microphone status:"),
html.P("Pressure Tile fall result:"),
]),
dbc.Col(children=[
html.P([dbc.Badge("Unknown result", id="mic-cmd-dash", color="info",
className="me-1")]),
html.P([dbc.Badge("Unknown result", id="pressure-result-dash", color="info",
className="me-1")]),
]),
] # sensor transmission and
), # row for sensor transmission and on
dbc.Button("Access Pressure graph & Mic recordings",href=gen_key.DASH_URL_BASE+'pressure', color="primary"),
])
]#, style={"width": "30rem", "height": "30rem"}
,class_name='h-100'
# , style={"height": "30rem"}
) # card component
layout = dbc.Container(children=[
dcc.Store(id="elderly-name-storage",storage_type="session"),
dbc.Row(children=[#dbc.Col("",width=1),
dbc.Col(children=[dbc.Button("Back to Overview",href=gen_key.DASH_URL_BASE, color="primary",outline=True,size='sm',external_link=True),html.H1(children='Welcome user',id='title-name', className="display-3")])]),
dbc.Row(children=[#dbc.Col("",width=1),
dbc.Col(children=[html.P("Access the sensors on the Navigation bar for details")])])
,
dbc.Container(dbc.Row(children=[
#dbc.Col("",width=1),
dbc.Col(card1,style={"height": "150%"}),
dbc.Col(card2,style={"height": "150%"}),
dbc.Col(card3,style={"height": "100%"}),
#dbc.Col("",width=1),
],justify="center",align='center'),fluid=True)
],style = {'flexGrow': '1'},fluid=True)#end of layout
@callback(
Output(component_id='title-name', component_property='children'),
Output(component_id='dash-blk', component_property='children'),
Output(component_id='dash-name', component_property='children'),
Output(component_id="elderly-name-storage",component_property="data"),
Output(component_id='2d-result-dash', component_property='children'),
Output(component_id='2d-result-dash', component_property='color'),
Output(component_id='3d-result-dash', component_property='children'),
Output(component_id='3d-result-dash', component_property='color'),
Output(component_id='pressure-result-dash', component_property='children'),
Output(component_id='pressure-result-dash', component_property='color'),
Output(component_id='mic-cmd-dash', component_property='children'),
Output(component_id='mic-cmd-dash', component_property='color'),
Input(component_id="main-storage",component_property="data"),
Input(component_id="address_unit",component_property="data"),
Input("2d-result-storage", "data"),
Input("unconscious-result-storage", "data"),
Input("pressure-result", "data"),
)
def update_dashboard(main,add,f2d,f3d,fps):
#f2d = 'fall'
#f3d = 'lying'
#fps = "True"
#retriving profile bio
direc_item = "details"
detail_direc = server.get_directory(main, direc_item)
data = server.retrieve_file_string(detail_direc)
res = ast.literal_eval(data)
#setting mic settings
mic_cmd=False
mic_cmd_text = 'Disabled'
mic_cmd_color = 'success'
if f2d=="fall":
f2d_color="danger"
mic_cmd = True
else:
f2d_color='success'
if f3d=="lying":
f3d_color="warning"
elif f3d == "unconscious" and ( f2d == "fall"):
f3d_color = "danger"
mic_cmd = True
else:
f3d_color='success'
if fps=="True":
fps_color="danger"
mic_cmd = True
else:
fps_color='success'
#checking mic_cmd
if mic_cmd:
mic_cmd_text = 'Enabled'
mic_cmd_color = 'warning'
return 'Welcome '+gen_key.dash_session['given_name']+'',main,res['bio']['elderly']['Name'],res['bio']['elderly']['Name'],f2d,f2d_color,f3d,f3d_color,fps,fps_color,mic_cmd_text,mic_cmd_color
| splhadi/elderilyv2_deployment_file | pages/dashboard.py | dashboard.py | py | 8,825 | python | en | code | 0 | github-code | 90 |
18286154459 | import sys
import math
import itertools
import collections
from collections import deque
sys.setrecursionlimit(1000000)
MOD = 10 ** 9 + 7
input = lambda: sys.stdin.readline().strip()
NI = lambda: int(input())
NMI = lambda: map(int, input().split())
NLI = lambda: list(NMI())
SI = lambda: input()
def main():
N = NI()
XL = [NLI() for _ in range(N)]
#print(XL)
arm_range = []
for n in range(N):
arm_range.append([XL[n][0]-XL[n][1],XL[n][0]+XL[n][1]])
#print(arm_range)
sorted_arm_range = sorted(arm_range, key=lambda x: x[1]) #[1]に注目してソート
#print(sorted_arm_range)
ans = 1
arm_end = sorted_arm_range[0][1]
if N == 1:
ans = 1
else:
for n in range(N-1):
if sorted_arm_range[n+1][0] >= arm_end:
arm_end = sorted_arm_range[n+1][1]
ans += 1
print(ans)
if __name__ == '__main__':
main() | Aasthaengg/IBMdataset | Python_codes/p02796/s998168721.py | s998168721.py | py | 943 | python | en | code | 0 | github-code | 90 |
18280177279 | n,d,a=map(int,input().split())
xh=[list(map(int, input().split())) for _ in range(n)]
xh.sort()
cnt=0
import heapq # heapqライブラリのimport
#heapq.heapify(a) # リストを優先度付きキューへ
#heapq.heappop(a) # 最小値の取り出し
#heapq.heappush(a, -2)
bomb=0
he=[]
heapq.heapify(he)
dic={}
for i in range(n):
#print(xh[i],bomb,he)
(x,h)=xh[i]
if bomb==0:
secchi=x+d
count=((-1*h)//a)*(-1)
damage=count*a
cnt+=count
dic[secchi]=damage
bomb+=damage
heapq.heappush(he, secchi)
else:
while a:
tmp=heapq.heappop(he)
if tmp+d<x:
bomb-=dic[tmp]
else:
heapq.heappush(he, tmp)
break
if bomb==0:
break
if bomb<h:
h-=bomb
secchi=x+d
count=((-1*h)//a)*(-1)
damage=count*a
cnt+=count
dic[secchi]=damage
bomb+=damage
heapq.heappush(he, secchi)
print(cnt)
| Aasthaengg/IBMdataset | Python_codes/p02788/s576638145.py | s576638145.py | py | 1,076 | python | en | code | 0 | github-code | 90 |
73823327338 | from typing import TYPE_CHECKING
import requests
if TYPE_CHECKING:
from undergen.lib.data import Character
url = "https://api.15.ai/app/getAudioFile5"
cdn_url = "https://cdn.15.ai/audio/"
headers = {'authority': 'api.15.ai',
'access-control-allow-origin': '*',
'accept': 'application/json, text/plain, */*',
'dnt': '1',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.71 Safari/537.36',
'content-type': 'application/json;charset=UTF-8',
'sec-gpc': '1',
'origin': 'https://15.ai',
'sec-fetch-site': 'same-site',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'referer': 'https://15.ai/',
'accept-language': 'en-US,en;q=0.9'}
def get_sound(character: "Character", text: str):
character_name = character.sound_name
emotion = "Normal"
print(f"Getting audio for {character_name} '{text}'...")
response = requests.post(url, json = {
"character": character_name,
"emotion": emotion,
"text": text
}, headers = headers)
if response.status_code != 200:
raise RuntimeError(f"15.ai responded with code {response.status_code}.")
data_json = response.json()
wav_name = data_json["wavNames"][0]
second_response = requests.get(cdn_url + wav_name)
if second_response.status_code != 200:
raise RuntimeError(f"15.ai CDN responded with code {second_response.status_code}.")
print("Audio success!")
return second_response.content
| DigiDuncan/undergen | undergen/lib/audio.py | audio.py | py | 1,607 | python | en | code | 0 | github-code | 90 |
3370712317 | #####
# Class for preprocessing the data
#
import pandas as pd
class Preprocessor:
json_id_dictionary = {
# "af8313d0-22c2-49bc-a736-8cde9d6b6415": "Heat 0th floor (kWh)",
# "e28aa982-3ab8-4be2-9954-8c0ee699dceb": "Water 0th floor (kWh)",
# "5c10d36c-99e6-49b5-af06-0df5b9985b71": "Heat 1st floor (kWh)",
# "5419cd17-8f71-4139-afa1-dec0bbfd21a4": "Water 1st floor (kWh)",
# "481b48aa-5bba-44ae-a5ab-2c78c68bfcc5": "Heat 2nd floor (kWh)",
# "00a46798-af26-49cd-84e9-fd64a69cbe24": "Water 2nd floor (kWh)",
# "70ccaf33-1575-4bd5-ae4b-3cb6d857731a": "Electricity (kWh)",
"bbc8c07b-b8e0-47e7-a4f3-7fb8d4768260": "Solar energy produced (Wh)",
"b007fc66-9715-4e5a-b4dc-1540c92de99e": "Power imported from Grid (Wh)",
"40299d14-bf30-4747-8f72-edfe7c26c15a": "Power exported to Grid (Wh)"
}
solar_id = "Solar energy produced (Wh)"
power_imported_id = "Power imported from Grid (Wh)"
power_exported_id = "Power exported to Grid (Wh)"
def __init__(self, valid_to="2022-02-28"):
pd.set_option('display.precision', 1)
self.df_solar, self.df_power_imported, self.df_power_exported = self.create_tables_and_set_index(
[self.solar_id, self.power_imported_id, self.power_exported_id])
self.df_solar_resampled, self.df_power_imported_resampled, self.df_power_exported_resampled = \
self.get_hourly_resolution([self.df_solar, self.df_power_imported, self.df_power_exported])
self.master_df = self.create_master_df(valid_to)
self.export([self.master_df, self.df_solar, self.df_power_imported, self.df_power_exported],
["extracted-data/master-df.csv", "extracted-data/solar-produced.csv",
"extracted-data/power-imported.csv", "extracted-data/power-exported.csv"])
def read_telemetry_data(self) -> pd.DataFrame:
df_telemetry = pd.read_csv("initial-data/TelemetryData.csv", names=["id", "timestamp", "value"],
parse_dates=True)
return df_telemetry.replace(self.json_id_dictionary)
def create_tables_and_set_index(self, id_list: list) -> list:
df = self.read_telemetry_data()
df_list = list()
for id_ in id_list:
table = df.loc[df["id"] == id_]
table = table.rename(columns={"value": id_})
table = table.set_index("timestamp")
table.index = pd.to_datetime(table.index) # set index to DateTimeIndex
table = table.reindex(index=table.index[::-1]) # reverse df
del table["id"]
df_list.append(table)
return df_list
@staticmethod
def get_abs_value_from_daily_acc(df: pd.DataFrame, old_column: str, new_column: str) \
-> pd.DataFrame:
df[new_column] = df[old_column].diff().fillna(0) # get the difference of elements from previous elements
# get date change locations where diff != 0 Days
# (in df2["add"], True: whenever the day changed, False: whenever is a value from the same day)
df["date"] = df.index.date
df["add"] = df["date"].diff().ne("0D")
# 3. add the previous total back
df.loc[df["add"], new_column] += df[old_column].shift()[df["add"]]
del df["date"]
del df["add"]
return df.fillna(0)
@staticmethod
def get_new_resolution_by_argument(df: pd.DataFrame, resolution: str) \
-> pd.DataFrame:
return df.resample(resolution).max().fillna(value=0)
def get_hourly_resolution(self, df_list: list) -> list:
return_list = []
for df in df_list:
return_list.append(self.get_new_resolution_by_argument(df, 'H'))
return return_list
def create_master_df(self, valid_to: str) -> pd.DataFrame:
SDA = "solar_da"
IDA = "imported_da"
EDA = "exported_da"
DDA = "demand_da"
SA = "solar_absolute"
IA = "imported_absolute"
EA = "exported_absolute"
DA = "demand_absolute"
master_df = self.df_solar_resampled.copy()
master_df = master_df.rename(columns={"Solar energy produced (Wh)": SDA})
master_df[IDA] = self.df_power_imported_resampled.iloc[:, 0]
master_df[EDA] = self.df_power_exported_resampled.iloc[:, 0]
master_df[DDA] = master_df.apply(
lambda row: self.calculate_demand(row[IDA], row[EDA], row[SDA]), axis=1)
master_df = self.get_abs_value_from_daily_acc(master_df, SDA, SA)
master_df = self.get_abs_value_from_daily_acc(master_df, IDA, IA)
master_df = self.get_abs_value_from_daily_acc(master_df, EDA, EA)
master_df = self.get_abs_value_from_daily_acc(master_df, DDA, DA)
# master_df = self.del_lines(master_df, ["2020-01-01 00:00", "2020-03-29 02:00", "2021-03-28 02:00"])
master_df = self.set_df_valid_date(master_df, valid_to)
master_df.index = pd.to_datetime(master_df.index)
master_df = self.fill_missing_values(master_df, ["2020-03-29 02:00", "2021-03-28 02:00"])
return master_df
@staticmethod
def calculate_demand(imported: float, exported: float, solar: float) -> float:
return imported + solar - exported
# TODO: instead of deleting completely, you could inject the value from an hour before
@staticmethod
def del_lines(df: pd.DataFrame, list_of_dates: list) -> pd.DataFrame:
for date in list_of_dates:
df = df[~(df.index == date)]
return df
# copy the value from the same time the day before (for absolute values)
# zero for daily aggregated values
def fill_missing_values(self, df: pd.DataFrame, timestamps: list) -> pd.DataFrame:
for timestamp in timestamps:
observation_index = df.index.get_loc(timestamp)
df.loc[timestamp, "solar_da":"demand_da"] = 0
df.loc[timestamp, "solar_absolute":"demand_absolute"] = df.iloc[(observation_index-24)]
return df
@staticmethod
def set_df_valid_date(df: pd.DataFrame, date: str) -> pd.DataFrame:
return df[:date] # eliminate rows after 2022-03-01
@staticmethod
def export(df_list: list, file_list: list):
for df, filename in zip(df_list, file_list):
df.to_csv(filename, index_label=False)
preprocessor = Preprocessor("2022-02-28")
| whyrutaken/thesis-repo | preprocessor.py | preprocessor.py | py | 6,390 | python | en | code | 0 | github-code | 90 |
3933793806 | import requests #首先导入库
import re
MaxSearchPage = 20 # 收索页数
CurrentPage = 0 # 当前正在搜索的页数
DefaultPath = "/Users/caishilin/Desktop/pictures" # 默认储存位置
NeedSave = 0 # 是否需要储存
def imageFiler(content): # 通过正则获取当前页面的图片地址数组
return re.findall('"objURL":"(.*?)"',content,re.S)
def nextSource(content): # 通过正则获取下一页的网址
next = re.findall('<div id="page">.*<a href="(.*?)" class="n">',content,re.S)[0]
print("---------" + "http://image.baidu.com" + next)
return next | burness/CV_bot | script/models/crawler.py | crawler.py | py | 584 | python | zh | code | 2 | github-code | 90 |
74873001575 | from django.urls import path
from . import views
app_name = 'cowshare'
urlpatterns = [
path('', views.index, name='index'),
path('send-email-verification/', views.send_email_verification, name='send_email_verification'),
path('profile/', views.profile, name='users-profile'),
path('products/', views.products, name='allproducts'),
path('products/<str:category>/', views.products, name='productscategory'),
path('productdetail/<int:pk>', views.product_detail, name='productdetail'),
path('userorder/', views.user_order, name='userorder'),
path('exchangeoffer/', views.exchangeoffer, name='exchangeoffer'),
path('usermessages/', views.usermessages, name='usermessages'),
path('search/', views.search, name='searchProducts'),
path('updateprofile', views.update_profile, name='update_profile'),
] | emmakodes/cowshare | cowshare/urls.py | urls.py | py | 838 | python | en | code | 0 | github-code | 90 |
22789911169 | import re
from pathlib import Path
from typing import Callable, Optional
import click
from pydantic_settings import BaseSettings
class GlobalSettings(BaseSettings):
pass
class ModelInput(BaseSettings):
_model_path: Callable = click.option(
"-m",
"--model-path",
type=click.Path(readable=True),
required=True,
help="Path of the model to be used",
)
model_path: Path
class PrecisionInput(BaseSettings):
_n_bits_fractional_precision: Callable = click.option(
"-f",
"--n-bits-fractional-precision",
type=click.IntRange(21),
default=21,
help=(
"Lower bound for number of bits to represent fractional part of "
"ciphertexts resulting from the key to be generated"
),
)
n_bits_fractional_precision: int
class KeyParamsOutput(BaseSettings):
_key_params_path: Callable = click.option(
"-o",
"--key-params-path",
type=click.Path(writable=True),
required=True,
help="Path to store the key parameters",
)
key_params_path: Path
class KeyParamsInput(BaseSettings):
_key_params_path: Callable = click.option(
"-i",
"--key-params-path",
type=click.Path(readable=True),
required=True,
help="Path to load the key parameters from",
)
key_params_path: Path
class KeyOutput(BaseSettings):
_secret_key_path: Callable = click.option(
"-o",
"--secret-key-path",
type=click.Path(writable=True),
required=True,
help="Path to store the generated secret key",
)
secret_key_path: Path
class KeyInput(BaseSettings):
_key_path: Callable = click.option(
"-k",
"--key-path",
type=click.Path(readable=True),
required=True,
help="Path to load the key from",
)
key_path: Path
class PlaintextOutput(BaseSettings):
_plaintext_output_path: Callable = click.option(
"-o",
"--plaintext-output-path",
type=click.Path(writable=True),
required=True,
help="Path of the file to store the plaintext output",
)
plaintext_output_path: Path
class PlaintextInput(BaseSettings):
_plaintext_input_path: Callable = click.option(
"-i",
"--plaintext-input-path",
type=click.Path(readable=True),
required=True,
help="Path of the file containing the plaintext input",
)
plaintext_input_path: Path
class CiphertextOutput(BaseSettings):
_ciphertext_output_path: Callable = click.option(
"-o",
"--ciphertext-output-path",
type=click.Path(writable=True),
required=True,
help="Path of the file to store the ciphertext output",
)
ciphertext_output_path: Path
class CiphertextInput(BaseSettings):
_ciphertext_input_path: Callable = click.option(
"-i",
"--ciphertext-input-path",
type=click.Path(readable=True),
required=True,
help="Path of the file containing the ciphertext input",
)
ciphertext_input_path: Path
class CalibrationDataInput(BaseSettings):
_calibration_data_path: Callable = click.option(
"-c",
"--calibration-data-path",
type=click.Path(readable=True),
required=True,
help="Path of the zip file containing the calibration data",
)
calibration_data_path: Path
def check_relu_mode(
ctx: Optional[click.Context], param: Optional[click.Parameter], value: str
) -> str:
value = value.lower()
if re.match("^deg[1-9][0-9]*(_no_offset)?$", value):
return value
else:
raise click.BadParameter("Invalid relu_mode parameter.")
class ReluApproximationMode(BaseSettings):
_relu_mode: Callable = click.option(
"--relu-mode",
type=click.STRING,
callback=check_relu_mode,
default="deg3",
required=False,
help="Method to approximate ReLU using a polynomial",
)
relu_mode: str
class DomainCalibrationMode(BaseSettings):
_domain_mode: Callable = click.option(
"--domain-mode",
type=click.Choice(["min-max", "mean-std"]),
default="min-max",
required=False,
help="Method to calibrate the domain",
)
domain_mode: str
class KeyParamsConfig(
GlobalSettings,
ModelInput,
PrecisionInput,
CalibrationDataInput,
ReluApproximationMode,
DomainCalibrationMode,
KeyParamsOutput,
):
pass
class KeyGenConfig(GlobalSettings, KeyParamsInput, KeyOutput):
pass
class EncryptConfig(GlobalSettings, KeyInput, PlaintextInput, CiphertextOutput):
pass
class InferenceConfig(
GlobalSettings, ModelInput, KeyInput, CiphertextInput, CiphertextOutput
):
pass
class DecryptConfig(GlobalSettings, KeyInput, CiphertextInput, PlaintextOutput):
pass
| smile-ffg/he-man-tenseal | he_man_tenseal/config.py | config.py | py | 4,894 | python | en | code | 6 | github-code | 90 |
37383796011 |
'''
Abby Powell
apowell9@binghamton.edu
A52, Elizabeth Voroshylo
Simrohn Iftekhar
siftekh1@binghamton.edu
A53, Jonathan Cen
Final Project
Collaborated equally in designing and building the GameGUI
'''
import piece
import player
from tkinter import *
class GameGUI:
START_LOC = -200
ONE_MORE = 1
NONE = 0
def __init__(self):
#Creates the person and computer players
self.__human = player.Player('X')
self.__computer = player.ComputerPlayer('O')
#Says if there is a winner yet
self.__has_winner = False
#Creates the main window
self.__game_wn = Tk()
#Renames title of the main window
self.__game_wn.title('Tic Tac Toe')
#Creates labels for Player1 to show who they are and their wins
self.__player_1_label = Label(self.__game_wn, font=('Times 15 bold'),
text = 'Player')
self.__player_1_wins_label = Label(self.__game_wn, font=('Times 13'),
text = 'Wins: ')
#Creates labels for Player2 to show it is the computer
# and its wins
self.__player_2_label = Label(self.__game_wn, font=('Times 15 bold'),
text = 'Computer')
self.__player_2_wins_label = Label(self.__game_wn, font=('Times 13'),
text = 'Wins: ')
#Creates a label to show player1's number of wins
self.__player_1_num_wins = IntVar()
self.__player_1_num_wins.set(0)
self.__player_1_wins_count = Label(self.__game_wn, font = 'Times 13',
textvariable = self.__player_1_num_wins)
#Creates a label to show player2's number of wins
self.__player_2_num_wins = IntVar()
self.__player_2_num_wins.set(0)
self.__player_2_wins_count = Label(self.__game_wn, font = 'Times 13',
textvariable = self.__player_2_num_wins)
#Sets up the buttons on the tic-tac-toe board
self.__space_1_2_str = StringVar()
self.__space_1_2_str.set('\t\n\t')
self.__space_1_2 = Button(self.__game_wn, font=('Times 15 bold'),
fg = 'red', height=3, width=6,
textvariable = self.__space_1_2_str, command = self.set_space_1_2)
self.__space_1_7_str = StringVar()
self.__space_1_7_str.set('\t\n\t')
self.__space_1_7 = Button(self.__game_wn, font=('Times 15 bold'),
fg = 'red', height=3, width=6,
textvariable = self.__space_1_7_str, command = self.set_space_1_7)
self.__space_1_6_str = StringVar()
self.__space_1_6_str.set('\t\n\t')
self.__space_1_6 = Button(self.__game_wn, font=('Times 15 bold'),
fg = 'red', height=3, width=6,
textvariable = self.__space_1_6_str, command = self.set_space_1_6)
self.__space_2_9_str = StringVar()
self.__space_2_9_str.set('\t\n\t')
self.__space_2_9 = Button(self.__game_wn, font=('Times 15 bold'),
fg = 'red', height=3, width=6,
textvariable = self.__space_2_9_str, command = self.set_space_2_9)
self.__space_2_5_str = StringVar()
self.__space_2_5_str.set('\t\n\t')
self.__space_2_5 = Button(self.__game_wn, font=('Times 15 bold'),
fg = 'red', height=3, width=6,
textvariable = self.__space_2_5_str, command = self.set_space_2_5)
self.__space_2_1_str = StringVar()
self.__space_2_1_str.set('\t\n\t')
self.__space_2_1 = Button(self.__game_wn, font=('Times 15 bold'),
fg = 'red', height=3, width=6,
textvariable = self.__space_2_1_str, command = self.set_space_2_1)
self.__space_3_4_str = StringVar()
self.__space_3_4_str.set('\t\n\t')
self.__space_3_4 = Button(self.__game_wn, font=('Times 15 bold'),
fg = 'red', height=3, width=6,
textvariable = self.__space_3_4_str, command = self.set_space_3_4)
self.__space_3_3_str = StringVar()
self.__space_3_3_str.set('\t\n\t')
self.__space_3_3 = Button(self.__game_wn, font=('Times 15 bold'),
fg = 'red', height=3, width=6,
textvariable = self.__space_3_3_str, command = self.set_space_3_3)
self.__space_3_8_str = StringVar()
self.__space_3_8_str.set('\t\n\t')
self.__space_3_8 = Button(self.__game_wn, font=('Times 15 bold'),
fg = 'red', height=3, width=6,
textvariable = self.__space_3_8_str, command = self.set_space_3_8)
#Sets up a button that allows the user to change some of the game settings
self.__gear_img = PhotoImage(file = 'smallGear.png')
self.__settings_button = Button(image = self.__gear_img,
command = self.open_settings)
#Creates spacer labels for the game board
self.__vertical_space_1 = Label(self.__game_wn,
text = '|\n|\n|\n|\n|\n|\n|')
self.__vertical_space_2 = Label(self.__game_wn,
text = '|\n|\n|\n|\n|\n|\n|')
self.__vertical_space_3 = Label(self.__game_wn,
text = '|\n|\n|\n|\n|\n|\n|')
self.__vertical_space_4 = Label(self.__game_wn,
text = '|\n|\n|\n|\n|\n|\n|')
self.__vertical_space_5 = Label(self.__game_wn,
text = '|\n|\n|\n|\n|\n|\n|')
self.__vertical_space_6 = Label(self.__game_wn,
text = '|\n|\n|\n|\n|\n|\n|')
self.__horizontal_space_1 = Label(self.__game_wn,
text = '------------------')
self.__horizontal_space_2 = Label(self.__game_wn,
text = '------------------')
self.__horizontal_space_3 = Label(self.__game_wn,
text = '------------------')
self.__horizontal_space_4 = Label(self.__game_wn,
text = '------------------')
self.__horizontal_space_5 = Label(self.__game_wn,
text = '------------------')
self.__horizontal_space_6 = Label(self.__game_wn,
text = '------------------')
#Creates a button if user wants to start a new game
self.__new_game_button = Button(self.__game_wn, text = 'New\nGame', \
font = 'Times 11', command = self.start_new_game)
#Creates a button if user wants to reset the game information
self.__reset_button = Button(self.__game_wn, text = 'Reset', \
font = 'Times 11', command = self.reset_everything)
#Creates a label to show who won the past game
self.__winner_var = StringVar()
self.__winner_var.set('')
self.__winner_label = Label(self.__game_wn, font = 'Times 15 bold',
textvariable = self.__winner_var)
#Sets up widgets in the window
self.__player_1_label.grid(row=0, column=0)
self.__player_1_wins_label.grid(row=1, column=0)
self.__player_2_label.grid(row=2, column=0)
self.__player_2_wins_label.grid(row=3, column=0)
self.__player_1_wins_count.grid(row=1, column=1)
self.__player_2_wins_count.grid(row=3, column=1)
self.__space_1_2.grid(row=0, column=2, sticky=S + N + E + W)
self.__horizontal_space_1.grid(row=1, column=2)
self.__space_2_9.grid(row=2, column=2, sticky=S + N + E + W)
self.__horizontal_space_2.grid(row=3, column=2)
self.__space_3_4.grid(row=4, column=2, sticky=S + N + E + W)
self.__vertical_space_1.grid(row=0, column=3)
self.__vertical_space_2.grid(row=2, column=3)
self.__vertical_space_3.grid(row=4, column=3)
self.__space_1_7.grid(row=0, column=4, sticky=S + N + E + W)
self.__horizontal_space_3.grid(row=1, column=4)
self.__space_2_5.grid(row=2, column=4, sticky=S + N + E + W)
self.__horizontal_space_4.grid(row=3, column=4)
self.__space_3_3.grid(row=4, column=4, sticky=S + N + E + W)
self.__vertical_space_4.grid(row=0, column=5)
self.__vertical_space_5.grid(row=2, column=5)
self.__vertical_space_6.grid(row=4, column=5)
self.__space_1_6.grid(row=0, column=6, sticky=S + N + E + W)
self.__horizontal_space_5.grid(row=1, column=6)
self.__space_2_1.grid(row=2, column=6, sticky=S + N + E + W)
self.__horizontal_space_6.grid(row=3, column=6)
self.__space_3_8.grid(row=4, column=6, sticky=S + N + E + W)
self.__new_game_button.grid(row=0, column=7)
self.__reset_button.grid(row=2, column=7)
self.__winner_label.grid(row=4, column=0)
self.__settings_button.grid(row=4, column=7)
#Runs the mainloop
mainloop()
#The following methods say what to do if the user selects a space
#Called when the Button self.__space_1_2 is pushed
#invoke __can_make_move(loc), __set_space(loc), __update_round()
def set_space_1_2(self):
#Checks if user is allowed to select the location
if self.__can_make_move(2):
#Uses piece, updates information, allows computer to move next
self.__set_space(2)
#Places character on button to show that user already chose the space
self.__space_1_2_str.set('X')
#Checks whether their is a winner, a tie, or if the computer can move
self.__update_round()
#Same logic as set_space_1_2
def set_space_1_7(self):
if self.__can_make_move(7):
self.__set_space(7)
self.__space_1_7_str.set('X')
self.__update_round()
#Same logic as set_space_1_2
def set_space_1_6(self):
if self.__can_make_move(6):
self.__set_space(6)
self.__space_1_6_str.set('X')
self.__update_round()
#Same logic as set_space_1_2
def set_space_2_9(self):
if self.__can_make_move(9):
self.__set_space(9)
self.__space_2_9_str.set('X')
self.__update_round()
#Same logic as set_space_1_2
def set_space_2_5(self):
if self.__can_make_move(5):
self.__set_space(5)
self.__space_2_5_str.set('X')
self.__update_round()
#Same logic as set_space_1_2
def set_space_2_1(self):
if self.__can_make_move(1):
self.__set_space(1)
self.__space_2_1_str.set('X')
self.__update_round()
#Same logic as set_space_1_2
def set_space_3_4(self):
if self.__can_make_move(4):
self.__set_space(4)
self.__space_3_4_str.set('X')
self.__update_round()
#Same logic as set_space_1_2
def set_space_3_3(self):
if self.__can_make_move(3):
self.__set_space(3)
self.__space_3_3_str.set('X')
self.__update_round()
#Same logic as set_space_1_2
def set_space_3_8(self):
if self.__can_make_move(8):
self.__set_space(8)
self.__space_3_8_str.set('X')
self.__update_round()
#Says if user is allowed to make a move based on whether it is their
# turn, there is no winner yet, and that the spot they chose is open
#param - num (int) - assigned location of the selected space
#return bool - whether or not user can choose the selected space
def __can_make_move(self, num):
return self.__human.get_status() and (not self.__has_winner) and \
(str(num) in self.__human.get_spots_open())
#After player has taken their turn says what to do next
def __update_round(self):
#Says what to do if person wins
if self.__human.check_if_win():
self.__has_winner = True
num_wins = self.__player_1_num_wins.get() + GameGUI.ONE_MORE
self.__player_1_num_wins.set(num_wins)
self.__winner_var.set('You Win!')
self.__winner_label['fg'] = 'green'
#Says what to do if there is a tie
elif self.__check_if_tie():
self.__has_winner = True
self.__winner_var.set('It\'s a tie!')
self.__winner_label['fg'] = 'orange'
#Allows the computer to move if the user neither wins nor ties
else:
self.__play_computer()
#Computer takes a turn playing
#invokes __set_computer_space, ComputerPlayer.make_move(),
# Player.check_if_win(), Player.change_status()
def __play_computer(self):
#Computer selects open location
computer_num = self.__computer.make_move()
#Computer uses a piece in the selected location
self.__set_computer_space(computer_num)
#Says what to do if the computer won
if self.__computer.check_if_win():
self.__has_winner = True
num_wins = self.__player_2_num_wins.get() + 1
self.__player_2_num_wins.set(num_wins)
self.__winner_var.set('You Lose')
self.__winner_label['fg'] = 'red'
#Allows user to move again if the computer did not win
else:
self.__human.change_status()
#Reserves a selected space that is open
#param num - selected location
def __set_space(self, num):
#Person uses a piece in the selected location
self.__human.use_piece(num)
#Updates spots available for computer to choose from
self.__computer.set_spots_used(num)
#Shows that it is the computer's turn to move
self.__computer.change_status()
#Updates information after computer chooses a location
#param - num (int) - location that the computer selected
def __set_computer_space(self, num):
#For any location that the computer chose, marks the spots as used and
# updates user's information to how that they cannot take the same spot
if num == 1:
self.__space_2_1_str.set('O')
self.__human.set_spots_used(1)
elif num == 2:
self.__space_1_2_str.set('O')
self.__human.set_spots_used(2)
elif num == 3:
self.__space_3_3_str.set('O')
self.__human.set_spots_used(3)
elif num == 4:
self.__space_3_4_str.set('O')
self.__human.set_spots_used(4)
elif num == 5:
self.__space_2_5_str.set('O')
self.__human.set_spots_used(5)
elif num == 6:
self.__space_1_6_str.set('O')
self.__human.set_spots_used(6)
elif num == 7:
self.__space_1_7_str.set('O')
self.__human.set_spots_used(7)
elif num == 8:
self.__space_3_8_str.set('O')
self.__human.set_spots_used(8)
else:
self.__space_2_9_str.set('O')
self.__human.set_spots_used(9)
#Checks if there is a tie by seeing if there are any more spots available
#invokes Player.show_num_spots_open()
#return bool - whether or not there is a tie
def __check_if_tie(self):
return (self.__human.show_num_spots_open() == GameGUI.NONE) or\
(self.__computer.show_num_spots_open() == GameGUI.NONE)
#Starts the game again
#invokes __reset_spaces, Player.reset_pieces()
#Called by the __new_game_button
def start_new_game(self):
#Resets the locations of both player's pieces to be off the board
self.__human.reset_pieces()
self.__computer.reset_pieces()
#Says that there is no winner yet since the game is restarted
self.__has_winner = False
#Removes location markers from GUI
self.__reset_spaces()
#Resets all information
#Called by the reset button
def reset_everything(self):
#Resets both players' information
self.__human.reset_player()
self.__player_1_num_wins.set(0)
self.__computer.reset_player()
self.__player_2_num_wins.set(0)
#Shows that there is no winner yet since the game is restarted
self.__has_winner = False
#Removes the location markers from the GUI
self.__reset_spaces()
#Resets all tic-tac-toe spaces to be blank again
def __reset_spaces(self):
self.__space_1_2_str.set('\t\n\t')
self.__space_1_7_str.set('\t\n\t')
self.__space_1_6_str.set('\t\n\t')
self.__space_2_9_str.set('\t\n\t')
self.__space_2_5_str.set('\t\n\t')
self.__space_2_1_str.set('\t\n\t')
self.__space_3_4_str.set('\t\n\t')
self.__space_3_3_str.set('\t\n\t')
self.__space_3_8_str.set('\t\n\t')
self.__winner_var.set('')
#Says that it is the user's turn, not the computer's turn
if not self.__human.get_status():
self.__human.change_status()
if self.__computer.get_status():
self.__computer.change_status()
#Opens another window to allow a user to change the game's color settings
#Called by the __settings_button
def open_settings(self):
#Creates a new window
settings_wn = Tk()
#Changes the title of the window
settings_wn.title('settings')
piece_color_label = Label(settings_wn, text = 'Set Pieces Color:')
#Creates button that allow the user to change the color of the pieces
black_button = Button(settings_wn, text = 'Black', fg = 'black',
font = 'Times 15 bold',
command = self.set_black_color)
red_button = Button(settings_wn, text = 'Red', fg = 'red',
font = 'Times 15 bold', command = self.set_red_color)
blue_button = Button(settings_wn, text = 'Blue', fg = 'blue',
font = 'Times 15 bold',
command = self.set_blue_color)
green_button = Button(settings_wn, text = 'Green', fg = 'green',
font = 'Times 15 bold',
command = self.set_green_color)
#Sets up the buttons and the label in the settings swindow
piece_color_label.grid(sticky = S + N + E + W)
black_button.grid(row = 1, sticky = S + N + E + W)
red_button.grid(row = 2, sticky = S + N + E + W)
blue_button.grid(row = 3, sticky = S + N + E + W)
green_button.grid(row = 4, sticky = S + N + E + W)
#Runs the mainloop of the settings window
mainloop()
#Sets the color of the pieces to black
def set_black_color(self):
self.__set_color('black')
#Sets the color of the pieces to red
def set_red_color(self):
self.__set_color('red')
#Sets the color of the pieces to blue
def set_blue_color(self):
self.__set_color('blue')
#Sets the color of the pieces to green
def set_green_color(self):
self.__set_color('green')
#Sets the color of the piece markers on the game board buttons to a
# specified color
#param - color_str (str) - color to change the pieces to
def __set_color(self, color_str):
self.__space_1_2['fg'] = color_str
self.__space_1_7['fg'] = color_str
self.__space_1_6['fg'] = color_str
self.__space_2_9['fg'] = color_str
self.__space_2_5['fg'] = color_str
self.__space_2_1['fg'] = color_str
self.__space_3_4['fg'] = color_str
self.__space_3_3['fg'] = color_str
self.__space_3_8['fg'] = color_str
GameGUI()
| abigail-powell/Tic-Tac-Toe | game_GUI.py | game_GUI.py | py | 18,842 | python | en | code | 0 | github-code | 90 |
1814948126 | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 21 14:13:51 2020
@author: Rajesh
"""
"""
Name:
Pallindromic Integer
Filename:
pallindromic.py
Problem Statement:
You are given a space separated list of integers.
If all the integers are positive and if any integer is a palindromic integer,
then you need to print True else print False.
(Take Input from User)
Data:
Not required
Extension:
Not Available
Hint:
A palindromic number or numeral palindrome is a number that remains the same
when its digits are reversed.
Like 16461, for example, it is "symmetrical"
"""
while(True):
num=input('Enter any number :')
a=num[:]
rev_a=num[: : -1]
if not num:
break
if a==rev_a:
print('True')
else:
print('False')
| Rajesh-sharma92/FTSP_2020 | Python_CD6/pallindromic.py | pallindromic.py | py | 881 | python | en | code | 3 | github-code | 90 |
26720415427 | # -- coding: utf-8 --
import logging
import time
import uuid
from flask import Flask, render_template, request
from werkzeug.utils import secure_filename
import os
from main import main
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = 'upload/'
if not os.path.exists(app.config['UPLOAD_FOLDER']):
os.mkdir(app.config['UPLOAD_FOLDER'])
def get_logger():
if not os.path.exists("log"):
os.mkdir("log")
logger = logging.getLogger()
fh = logging.FileHandler(filename=f"log/{time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime())}.log",
encoding="utf-8", mode="a")
fh.setFormatter(logging.Formatter("[%(asctime)s] %(levelname)s : %(message)s"))
logger.addHandler(fh)
logger.setLevel(logging.INFO)
return logger
logger = get_logger()
@app.route('/uploader', methods=['GET', 'POST'])
def uploader():
if request.method == 'POST':
f = request.files['file']
path = os.path.join(app.config['UPLOAD_FOLDER'], uuid.uuid1().hex + "." + secure_filename(f.filename)[-3:])
f.save(path)
text = main({"source": path, "debug": False})
if os.path.isfile(path):
os.remove(path)
return text
return render_template('fileupload.html')
if __name__ == '__main__':
app.config['JSON_AS_ASCII'] = False
app.run(debug=True)
| NephrenCake/SealOcr | API.py | API.py | py | 1,350 | python | en | code | 33 | github-code | 90 |
3323266020 | import json
# seperate sentences into words
import jieba
l=0
cn=[]
en=[]
with open('D:\\download\\translation2019zh\\translation2019zh_train.json', encoding='utf-8') as f:
for i in range(100000):
line=f.readline()
j=json.loads(line)
# print(j["chinese"])
cn.append(j["chinese"])
en.append(j["english"])
l+=1
f.close()
cn_words=[]
for s in cn:
words=[]
w=jieba.cut(s)
for word in w:
words.append(word)
cn_words.append(words)
print("save")
with open("1.txt","w",encoding="utf-8") as f:
for words in cn_words:
for word in words:
f.write(word+"\n") | JunMA1997/temp | toolsfordata.py | toolsfordata.py | py | 651 | python | en | code | 0 | github-code | 90 |
71774381738 | """
compute metrics on model predictions
"""
import argparse
import logging
import subprocess
from pathlib import Path
import pandas as pd
import pyhocon
from evaluate_summ import compute_summ_metrics
logger = logging.getLogger(__name__)
def load_data(file_path: Path):
df = pd.read_csv(file_path, sep="\t", na_filter=False)
return df
def main(config, no_ref: bool = False):
# get events from the split
split = "dev" if config["split"] == "validation" else config["split"]
split_path = f"data/splits/{split}.txt"
with open(split_path, "r") as rf:
events = [line.strip() for line in rf.readlines()]
logger.info(f"split: {split}")
logger.info(f"events: {events}")
gold_path = Path("data/events")
if config.get("model", None) == "gpt-3.5-turbo":
pred_path = config["output_path"] / "preds"
else:
# convert JSON-L file to per annotator tsv files
# necessary for HF based models
pred_jsonl_path = (
Path(config["model_name_or_path"]) / f"{config['split']}_preds.jsonl"
)
pred_path = Path(config["model_name_or_path"]) / "preds"
subprocess.run(
[
"python",
"src/reconstruct_ann_pred.py",
"--preds",
str(pred_jsonl_path),
"--data",
"data",
"--split",
split,
"--out",
str(pred_path),
],
check=True,
capture_output=True,
)
logger.info(f"gold: {gold_path}")
logger.info(f"pred: {pred_path}")
src, tgt, pred = [], [], []
for event in events:
anns = [f"annotator{idx}" for idx in range(1, 4)]
for ann in anns:
gold_df = load_data(gold_path / event / f"{ann}.tsv")
pred_df = load_data(pred_path / event / f"{ann}.tsv")
# curate source
updates = [
f"Date: {gold_df['Date'][idx]}, Article: {gold_df['Update'][idx]}"
for idx in range(len(gold_df["Update"]))
]
src += [" ".join(updates[:idy]) for idy in range(1, len(gold_df["Update"]))]
# tgt
tgt += list(gold_df["Background"][1:])
# pred
pred += list(pred_df["Background"][1:])
assert (
len(src) == len(tgt) == len(pred)
), f"src: {len(src)}, tgt: {len(tgt)}, pred: {len(pred)}"
logger.info(f"src: {len(src)}, tgt: {len(tgt)}, pred: {len(pred)}")
if no_ref:
tgt = None
logger.info("running in reference-free mode")
scores = compute_summ_metrics(src=src, tgt=tgt, pred=pred)
logger.info(scores)
def init_config(config_path: Path, config_name: str):
config = pyhocon.ConfigFactory.parse_file(config_path)[config_name]
for x in ["output_path", "log_path"]:
config[x] = Path(config[x])
config[x] /= f"{config_name}"
handlers = [
logging.StreamHandler(),
logging.FileHandler(
config["log_path"] / "log_summ_scores.txt",
mode="w",
),
]
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
handlers=handlers,
)
logger.info(pyhocon.HOCONConverter.convert(config, "hocon"))
return config
def parse_args():
parser = argparse.ArgumentParser(description="score predictions")
parser.add_argument("--config", type=Path, help="config path")
parser.add_argument("--config-name", type=str, help="config name")
parser.add_argument(
"--no-ref", action="store_true", help="only run reference-free metrics"
)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
config = init_config(args.config, args.config_name)
main(config, no_ref=args.no_ref)
| amazon-science/background-summaries | src/score.py | score.py | py | 3,932 | python | en | code | 0 | github-code | 90 |
38993053454 | from __future__ import annotations
import typing
import toolcli
import toolsql
from ctc import config
from ctc import spec
from .. import schema_utils
def create_tables(
networks: typing.Sequence[spec.NetworkReference] | None = None,
datatypes: typing.Sequence[str] | None = None,
verbose: bool = True,
confirm: bool = False,
) -> None:
# get netowrks and datatypes
if networks is None:
networks = config.get_used_networks()
if datatypes is None:
datatypes = schema_utils.get_all_datatypes()
# get preamble
if verbose or not confirm:
print(
'creating tables for',
len(datatypes),
'datatype(s) across',
len(networks),
'network(s)',
)
if len(datatypes) > 1:
print(' - datatypes:')
for datatype in datatypes:
print(' -', datatype)
else:
print(' - datatype:', datatype)
if len(networks) > 1:
print(' - networks:')
for network in networks:
print(' -', network)
else:
print(' - network:', network)
# get confirmation
if not confirm:
if not toolcli.input_yes_or_no('continue? '):
raise Exception('aborted creation of tables')
# create tables
for network in networks:
for datatype in datatypes:
db_config = config.get_db_config(
network=network,
datatype=datatype,
)
db_schema = schema_utils.get_prepared_schema(
datatype=datatype,
network=network,
)
toolsql.create_tables(
db_schema=db_schema,
db_config=db_config,
)
| 0xmzz/checkthechain | src/ctc/db/db_management/dba_utils.py | dba_utils.py | py | 1,822 | python | en | code | null | github-code | 90 |
39499610051 | from django.shortcuts import render
from django.http import HttpResponse
from django.contrib import messages
from rest_framework import generics
from .models import PatientDetails
from .serializers import PatientDetailsSerializer
import io
import csv
# Create your views here.
class PatientDetailsListView(generics.ListAPIView) :
#lookup_field = 'blood_abo_type'
serializer_class = PatientDetailsSerializer
def get_queryset(self):
MAX_PATIENT_COUNT_THRESHOLD = 100
MIN_PATIENT_COUNT_THRESHOLD = 10
abo_type = self.kwargs.get("abo")
rh_type = self.kwargs.get("rh")
cur_city = self.kwargs.get("city")
cur_district = self.kwargs.get("district")
cur_state = self.kwargs.get("state")
patient_list = PatientDetails.objects.filter(status='R', blood_abo_type=abo_type, blood_rh_type=rh_type)
if cur_state != "" and len(patient_list) > MAX_PATIENT_COUNT_THRESHOLD :
modified_list = patient_list.filter(state=cur_state)
if len(modified_list) > MIN_PATIENT_COUNT_THRESHOLD :
patient_list = modified_list
if cur_district != "" and len(patient_list) > MAX_PATIENT_COUNT_THRESHOLD :
modified_list = patient_list.filter(district=cur_district)
if len(modified_list) > MIN_PATIENT_COUNT_THRESHOLD :
patient_list = modified_list
if cur_city != "" and len(patient_list) > MAX_PATIENT_COUNT_THRESHOLD :
patient_list = patient_list.filter(city=cur_city)
if len(modified_list) > MIN_PATIENT_COUNT_THRESHOLD :
patient_list = modified_list
return patient_list
"""def get_object(self):
abo_type = self.kwargs.get("abo")
rh_type = self.kwargs.get("rh")
state = self.kwargs.get("state")
status = 'R'
#return PatientDetails.objects.filter(blood_abo_type=abo_type, blood_rh_type=rh_type)
return PatientDetails.objects.all()"""
| aadhityasw/Plasm_Hospital_1 | patients/views.py | views.py | py | 2,013 | python | en | code | 0 | github-code | 90 |
32582164837 | import pygame
from GameEngine import App, Server
from general.Constants import Constants
from world.World import Map
from entities.Entities import Entities
from general.Net import Client
from gui.Messages import Messages
class Game(App):
def __init__(self, title, width_, height_, background_color=(255, 255, 255)):
Map()
Entities()
Messages()
super(self.__class__, self).__init__(title, width_, height_, background_color)
def render(self, game_display):
Map.render(game_display)
Entities.render(game_display)
Messages.render(game_display)
def update(self):
Entities.update()
Map.update()
def handle_keys(self, event):
super(self.__class__, self).handle_keys(event)
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_a:
Constants.A = True
if event.key == pygame.K_w:
Constants.W = True
if event.key == pygame.K_s:
Constants.S = True
if event.key == pygame.K_d:
Constants.D = True
if event.key == pygame.K_LSHIFT:
Constants.SHIFT = True
if event.key == pygame.K_LCTRL:
Constants.CTRL = True
if event.type == pygame.KEYUP:
if event.key == pygame.K_a:
Constants.A = False
if event.key == pygame.K_w:
Constants.W = False
if event.key == pygame.K_s:
Constants.S = False
if event.key == pygame.K_d:
Constants.D = False
if event.key == pygame.K_LSHIFT:
Constants.SHIFT = False
if event.key == pygame.K_LCTRL:
Constants.CTRL = False
if __name__ == "__main__":
pygame.init()
info = open("settings.txt")
connection_type = info.readline().replace("\n", "").replace(" ", "").split("=")[1]
ip = info.readline().replace("\n", "").replace(" ", "").split("=")[1]
port = int(info.readline().replace("\n", "").replace(" ", "").split("=")[1])
Constants.width = int(info.readline().replace("\n", "").replace(" ", "").split("=")[1])
Constants.height = int(info.readline().replace("\n", "").replace(" ", "").split("=")[1])
info.close()
Constants()
if connection_type == "server":
Constants.HOST = True
Server(port)
Client(None, port)
else:
Client(ip, port)
Game("Game", Constants.width, Constants.height)
pygame.quit()
quit()
| VladSzabo/Pixel-Game | general/Main.py | Main.py | py | 2,560 | python | en | code | 0 | github-code | 90 |
18504684679 | def main():
S = input()
K = int(input())
def cut(s):
for c in s:
yield c
if c != '1': break
*s, = cut(S)
if len(s) < K:
print(s[-1])
else:
print(s[K - 1])
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p03282/s793758384.py | s793758384.py | py | 269 | python | en | code | 0 | github-code | 90 |
30929948006 | # Creando un Iterador
my_list = range(1,11)
my_iter = iter(my_list)
# Iterando un iterador
def run():
while True:
try:
element = next(my_iter)
print(element)
except StopIteration:
break
if __name__ == "__main__":
run()
| steven1174/Python_Projects | Python_3/Iterators1.py | Iterators1.py | py | 300 | python | en | code | 0 | github-code | 90 |
22357846055 | from abc import ABC, abstractmethod
from addition.factory_method_pattern.pizza import Pizza, CheesePizza, PepperoniPizza, NormalPizza
from addition.factory_method_pattern.pizza_ingredient_factory import ChicagoPizzaIngredientFactory, \
NYPizzaIngredientFactory
class PizzaStore(ABC):
def order_pizza(self, type: str) -> Pizza:
pizza = self.create_pizza(type)
pizza.prepare()
pizza.bake()
pizza.cut()
pizza.box()
return pizza
@abstractmethod
def create_pizza(self, type: str) -> Pizza:
pass
class ChicagoPizzaStore(PizzaStore):
def create_pizza(self, type: str) -> Pizza:
ingredient_factory = ChicagoPizzaIngredientFactory()
if type == 'CHEESE':
pizza = CheesePizza(ingredient_factory)
elif type == 'PEPPERONI':
pizza = PepperoniPizza(ingredient_factory)
else:
pizza = NormalPizza(ingredient_factory)
return pizza
class NYPizzaStore(PizzaStore):
def create_pizza(self, type: str):
ingredient_factory = NYPizzaIngredientFactory()
if type == 'CHEESE':
pizza = CheesePizza(ingredient_factory)
elif type == 'PEPPERONI':
pizza = PepperoniPizza(ingredient_factory)
else:
pizza = NormalPizza(ingredient_factory)
return pizza
| JuJin1324/learning-design-pattern | addition/factory_method_pattern/pizza_store.py | pizza_store.py | py | 1,363 | python | en | code | 0 | github-code | 90 |
72681504617 | from db import db
class RisklayerPrognosis(db.Model):
__tablename__ = 'risklayer_prognosis'
datenbestand = db.Column(db.TIMESTAMP, primary_key=True, nullable=False)
prognosis = db.Column(db.Float, nullable=False)
# class RisklayerPrognosisSchema(SQLAlchemyAutoSchema):
# class Meta:
# strict = True
# model = RisklayerPrognosis
#
# timestamp = fields.Timestamp(data_key="datenbestand")
# prognosis = fields.Number(data_key="prognosis")
| dbvis-ukon/coronavis | Backend/models/risklayerPrognosis.py | risklayerPrognosis.py | py | 476 | python | en | code | 16 | github-code | 90 |
6727711350 | import numpy as np
from sklearn.model_selection import KFold, GridSearchCV, StratifiedKFold
from sklearn.base import clone
def draw_smpls(n_obs, n_folds, n_rep=1, groups=None):
all_smpls = []
for _ in range(n_rep):
if groups is None:
resampling = KFold(n_splits=n_folds,
shuffle=True)
else:
resampling = StratifiedKFold(n_splits=n_folds,
shuffle=True)
smpls = [(train, test) for train, test in resampling.split(X=np.zeros(n_obs), y=groups)]
all_smpls.append(smpls)
return all_smpls
def fit_predict(y, x, ml_model, params, smpls, train_cond=None):
y_hat = []
for idx, (train_index, test_index) in enumerate(smpls):
if params is not None:
ml_model.set_params(**params[idx])
if train_cond is None:
y_hat.append(ml_model.fit(x[train_index], y[train_index]).predict(x[test_index]))
else:
train_index_cond = np.intersect1d(train_cond, train_index)
y_hat.append(ml_model.fit(x[train_index_cond], y[train_index_cond]).predict(x[test_index]))
return y_hat
def fit_predict_proba(y, x, ml_model, params, smpls, trimming_threshold=0, train_cond=None):
y_hat = []
for idx, (train_index, test_index) in enumerate(smpls):
if params is not None:
ml_model.set_params(**params[idx])
if train_cond is None:
preds = ml_model.fit(x[train_index], y[train_index]).predict_proba(x[test_index])[:, 1]
else:
train_index_cond = np.intersect1d(train_cond, train_index)
preds = ml_model.fit(x[train_index_cond], y[train_index_cond]).predict_proba(x[test_index])[:, 1]
if trimming_threshold > 0:
preds[preds < trimming_threshold] = trimming_threshold
preds[preds > 1 - trimming_threshold] = 1 - trimming_threshold
y_hat.append(preds)
return y_hat
def tune_grid_search(y, x, ml_model, smpls, param_grid, n_folds_tune, train_cond=None):
tune_res = [None] * len(smpls)
for idx, (train_index, _) in enumerate(smpls):
g_tune_resampling = KFold(n_splits=n_folds_tune, shuffle=True)
g_grid_search = GridSearchCV(ml_model, param_grid,
cv=g_tune_resampling)
if train_cond is None:
tune_res[idx] = g_grid_search.fit(x[train_index, :], y[train_index])
else:
train_index_cond = np.intersect1d(train_cond, train_index)
tune_res[idx] = g_grid_search.fit(x[train_index_cond, :], y[train_index_cond])
return tune_res
def _clone(learner):
if learner is None:
res = None
else:
res = clone(learner)
return res
| DoubleML/doubleml-for-py | doubleml/tests/_utils.py | _utils.py | py | 2,768 | python | en | code | 347 | github-code | 90 |
72555429098 | import os
import sys
import wx
from vtkmodules.vtkCommonMath import vtkMatrix4x4
from vtkmodules.vtkRenderingCore import (
vtkActor2D,
vtkCoordinate,
vtkTextActor,
vtkTextMapper,
vtkTextProperty,
)
from vtkmodules.vtkIOGeometry import vtkOBJReader, vtkSTLReader
from vtkmodules.vtkIOPLY import vtkPLYReader
from vtkmodules.vtkIOXML import vtkXMLPolyDataReader
from invesalius.pubsub import pub as Publisher
import invesalius.constants as const
import invesalius.utils as utils
from invesalius import inv_paths
class ProgressDialog(object):
def __init__(self, parent, maximum, abort=False):
self.title = "InVesalius 3"
self.msg = _("Loading DICOM files")
self.maximum = maximum
self.current = 0
self.style = wx.PD_APP_MODAL
if abort:
self.style = wx.PD_APP_MODAL | wx.PD_CAN_ABORT
self.dlg = wx.ProgressDialog(self.title,
self.msg,
maximum = self.maximum,
parent = parent,
style = self.style)
self.dlg.Bind(wx.EVT_BUTTON, self.Cancel)
self.dlg.SetSize(wx.Size(250,150))
def Cancel(self, evt):
Publisher.sendMessage("Cancel DICOM load")
def Update(self, value, message):
if(int(value) != self.maximum):
try:
return self.dlg.Update(int(value),message)
#TODO:
#Exception in the Windows XP 64 Bits with wxPython 2.8.10
except(wx._core.PyAssertionError):
return True
else:
return False
def Close(self):
self.dlg.Destroy()
if sys.platform == 'win32':
try:
import win32api
_has_win32api = True
except ImportError:
_has_win32api = False
else:
_has_win32api = False
# If you are frightened by the code bellow, or think it must have been result of
# an identation error, lookup at:
# Closures in Python (pt)
# http://devlog.waltercruz.com/closures
# http://montegasppa.blogspot.com/2007/01/tampinhas.html
# Closures not only in Python (en)
# http://en.wikipedia.org/wiki/Closure_%28computer_science%29
# http://www.ibm.com/developerworks/library/l-prog2.html
# http://jjinux.blogspot.com/2006/10/python-modifying-counter-in-closure.html
def ShowProgress(number_of_filters = 1,
dialog_type="GaugeProgress"):
"""
To use this closure, do something like this:
UpdateProgress = ShowProgress(NUM_FILTERS)
UpdateProgress(vtkObject)
"""
progress = [0]
last_obj_progress = [0]
if (dialog_type == "ProgressDialog"):
try:
dlg = ProgressDialog(wx.GetApp().GetTopWindow(), 100)
except (wx._core.PyNoAppError, AttributeError):
return lambda obj, label: 0
# when the pipeline is larger than 1, we have to consider this object
# percentage
number_of_filters = max(number_of_filters, 1)
ratio = (100.0 / number_of_filters)
def UpdateProgress(obj, label=""):
"""
Show progress on GUI according to pipeline execution.
"""
# object progress is cummulative and is between 0.0 - 1.0
# is necessary verify in case is sending the progress
#represented by number in case multiprocess, not vtk object
if isinstance(obj, float) or isinstance(obj, int):
obj_progress = obj
else:
obj_progress = obj.GetProgress()
# as it is cummulative, we need to compute the diference, to be
# appended on the interface
if obj_progress < last_obj_progress[0]: # current obj != previous obj
difference = obj_progress # 0
else: # current obj == previous obj
difference = obj_progress - last_obj_progress[0]
last_obj_progress[0] = obj_progress
# final progress status value
progress[0] = progress[0] + ratio*difference
# Tell GUI to update progress status value
if (dialog_type == "GaugeProgress"):
Publisher.sendMessage('Update status in GUI', value=progress[0], label=label)
else:
if (progress[0] >= 99.999):
progress[0] = 100
if not(dlg.Update(progress[0],label)):
dlg.Close()
return progress[0]
return UpdateProgress
class Text(object):
def __init__(self):
self.layer = 99
self.children = []
property = vtkTextProperty()
property.SetFontSize(const.TEXT_SIZE)
property.SetFontFamilyToArial()
property.BoldOff()
property.ItalicOff()
property.ShadowOn()
property.SetJustificationToLeft()
property.SetVerticalJustificationToTop()
property.SetColor(const.TEXT_COLOUR)
self.property = property
mapper = vtkTextMapper()
mapper.SetTextProperty(property)
self.mapper = mapper
actor = vtkActor2D()
actor.SetMapper(mapper)
actor.GetPositionCoordinate().SetCoordinateSystemToNormalizedDisplay()
actor.PickableOff()
self.actor = actor
self.SetPosition(const.TEXT_POS_LEFT_UP)
def SetColour(self, colour):
self.property.SetColor(colour)
def ShadowOff(self):
self.property.ShadowOff()
def BoldOn(self):
self.property.BoldOn()
def SetSize(self, size):
self.property.SetFontSize(size)
def SetValue(self, value):
if isinstance(value, int) or isinstance(value, float):
value = str(value)
if sys.platform == 'win32':
value += "" # Otherwise 0 is not shown under win32
# With some encoding in some dicom fields (like name) raises a
# UnicodeEncodeError because they have non-ascii characters. To avoid
# that we encode in utf-8.
if sys.platform == 'win32':
self.mapper.SetInput(value.encode("utf-8", errors='replace'))
else:
try:
self.mapper.SetInput(value.encode("latin-1"))
except(UnicodeEncodeError):
self.mapper.SetInput(value.encode("utf-8", errors='replace'))
def GetValue(self):
return self.mapper.GetInput()
def SetCoilDistanceValue(self, value):
#TODO: Not being used anymore. Can be deleted.
if isinstance(value, int) or isinstance(value, float):
value = 'Dist: ' + str("{:06.2f}".format(value)) + ' mm'
if sys.platform == 'win32':
value += "" # Otherwise 0 is not shown under win32
# With some encoding in some dicom fields (like name) raises a
# UnicodeEncodeError because they have non-ascii characters. To avoid
# that we encode in utf-8.
if sys.platform == 'win32':
self.mapper.SetInput(value.encode("utf-8"))
else:
try:
self.mapper.SetInput(value.encode("latin-1"))
except(UnicodeEncodeError):
self.mapper.SetInput(value.encode("utf-8"))
def SetPosition(self, position):
self.actor.GetPositionCoordinate().SetValue(position[0],
position[1])
def GetPosition(self, position):
self.actor.GetPositionCoordinate().GetValue()
def SetJustificationToRight(self):
self.property.SetJustificationToRight()
def SetJustificationToCentered(self):
self.property.SetJustificationToCentered()
def SetVerticalJustificationToBottom(self):
self.property.SetVerticalJustificationToBottom()
def SetVerticalJustificationToCentered(self):
self.property.SetVerticalJustificationToCentered()
def Show(self, value=1):
if value:
self.actor.VisibilityOn()
else:
self.actor.VisibilityOff()
def Hide(self):
self.actor.VisibilityOff()
class TextZero(object):
def __init__(self):
self.layer = 99
self.children = []
property = vtkTextProperty()
property.SetFontSize(const.TEXT_SIZE_LARGE)
property.SetFontFamilyToArial()
property.BoldOn()
property.ItalicOff()
#property.ShadowOn()
property.SetJustificationToLeft()
property.SetVerticalJustificationToTop()
property.SetColor(const.TEXT_COLOUR)
self.property = property
actor = vtkTextActor()
actor.GetTextProperty().ShallowCopy(property)
actor.GetPositionCoordinate().SetCoordinateSystemToNormalizedDisplay()
actor.PickableOff()
self.actor = actor
self.text = ''
self.position = (0, 0)
self.symbolic_syze = wx.FONTSIZE_MEDIUM
self.bottom_pos = False
self.right_pos = False
def SetColour(self, colour):
self.property.SetColor(colour)
def ShadowOff(self):
self.property.ShadowOff()
def SetSize(self, size):
self.property.SetFontSize(size)
self.actor.GetTextProperty().ShallowCopy(self.property)
def SetSymbolicSize(self, size):
self.symbolic_syze = size
def SetValue(self, value):
if isinstance(value, int) or isinstance(value, float):
value = str(value)
if sys.platform == 'win32':
value += "" # Otherwise 0 is not shown under win32
# With some encoding in some dicom fields (like name) raises a
# UnicodeEncodeError because they have non-ascii characters. To avoid
# that we encode in utf-8.
try:
self.actor.SetInput(value.encode("cp1252"))
except(UnicodeEncodeError):
self.actor.SetInput(value.encode("utf-8","surrogatepass"))
self.text = value
def SetPosition(self, position):
self.position = position
self.actor.GetPositionCoordinate().SetValue(position[0],
position[1])
def GetPosition(self):
return self.actor.GetPositionCoordinate().GetValue()
def SetJustificationToRight(self):
self.property.SetJustificationToRight()
def SetJustificationToCentered(self):
self.property.SetJustificationToCentered()
def SetVerticalJustificationToBottom(self):
self.property.SetVerticalJustificationToBottom()
def SetVerticalJustificationToCentered(self):
self.property.SetVerticalJustificationToCentered()
def Show(self, value=1):
if value:
self.actor.VisibilityOn()
else:
self.actor.VisibilityOff()
def Hide(self):
self.actor.VisibilityOff()
def draw_to_canvas(self, gc, canvas):
coord = vtkCoordinate()
coord.SetCoordinateSystemToNormalizedDisplay()
coord.SetValue(*self.position)
x, y = coord.GetComputedDisplayValue(canvas.evt_renderer)
font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
font.SetSymbolicSize(self.symbolic_syze)
font.Scale(canvas.viewer.GetContentScaleFactor())
if self.bottom_pos or self.right_pos:
w, h = canvas.calc_text_size(self.text, font)
if self.right_pos:
x -= w
if self.bottom_pos:
y += h
canvas.draw_text(self.text, (x, y), font=font)
def numpy_to_vtkMatrix4x4(affine):
"""
Convert a numpy 4x4 array to a vtk 4x4 matrix
:param affine: 4x4 array
:return: vtkMatrix4x4 object representing the affine
"""
# test for type and shape of affine matrix
# assert isinstance(affine, np.ndarray)
assert affine.shape == (4, 4)
affine_vtk = vtkMatrix4x4()
for row in range(0, 4):
for col in range(0, 4):
affine_vtk.SetElement(row, col, affine[row, col])
return affine_vtk
# TODO: Use the SurfaceManager >> CreateSurfaceFromFile inside surface.py method instead of duplicating code
def CreateObjectPolyData(filename):
"""
Coil for navigation rendered in volume viewer.
"""
filename = utils.decode(filename, const.FS_ENCODE)
if filename:
if filename.lower().endswith('.stl'):
reader = vtkSTLReader()
elif filename.lower().endswith('.ply'):
reader = vtkPLYReader()
elif filename.lower().endswith('.obj'):
reader = vtkOBJReader()
elif filename.lower().endswith('.vtp'):
reader = vtkXMLPolyDataReader()
else:
wx.MessageBox(_("File format not reconized by InVesalius"), _("Import surface error"))
return
else:
filename = os.path.join(inv_paths.OBJ_DIR, "magstim_fig8_coil.stl")
reader = vtkSTLReader()
if _has_win32api:
obj_name = win32api.GetShortPathName(filename).encode(const.FS_ENCODE)
else:
obj_name = filename.encode(const.FS_ENCODE)
reader.SetFileName(obj_name)
reader.Update()
obj_polydata = reader.GetOutput()
if obj_polydata.GetNumberOfPoints() == 0:
wx.MessageBox(_("InVesalius was not able to import this surface"), _("Import surface error"))
obj_polydata = None
return obj_polydata | invesalius/invesalius3 | invesalius/data/vtk_utils.py | vtk_utils.py | py | 13,185 | python | en | code | 536 | github-code | 90 |
28982272260 | from django.shortcuts import render
from django.shortcuts import redirect
from django.contrib import messages
from appblog.models import blogm, categorias #importamos los modelos de la appblog
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login,logout
# from django.http import HttpResponseRedirect
# Create your views here.
# from proyecto_rein.forms import Inputimagen
def blog(request):
blogs=blogm.objects.all()#importa todas las entradas de blog
categoria=categorias.objects.all()
return render(request,"appblog/blog.html",{
'blogs':blogs,
'categorias':categoria
})
def registrarnuevaentrada(request):
# sesionusuario = request.session.get('usuario')
# print(sesionusuario)
if request.method == 'POST':
tituloe = request.POST['tituloe']
contenidoe = request.POST['contenidoe']
categoriae = request.POST['categoriae']
imagene = request.FILES['imagene']
category=categorias.objects.filter(nombre=categoriae).exists()
if category:
catg = categorias.objects.get(nombre=categoriae)
catg.nombre=categoriae
catg.save()
documento = blogm.objects.create(titulo=tituloe,descripcion=contenidoe,imagen=imagene,categoria=catg)
documento.save()
if documento:
messages.success(request,'Información Guardada')
return redirect('blog')
else:
cat = categorias.objects.create(nombre=categoriae)
cat.save()
documento = blogm.objects.create(titulo=tituloe,descripcion=contenidoe,imagen=imagene,categoria=cat)
documento.save()
if documento:
messages.success(request,'Información Guardada')
return redirect('blog')
return render(request,'appblog/nuevaentrada.html',{})
# if request.method == 'POST':
# usuario = request.POST.get('username')
# clave = request.POST.get('password')
# user = authenticate(username=usuario,password=clave)
# if user:
# login(request,user)
# nombreusuario=User.objects.all()
# for usr in nombreusuario:
# if usr.username == usuario:
# print(usr.first_name)
# 'registrarnuevaentrada': form_registrarnuevaentrada
# form_registrarnuevaentrada = FormRegistrarEntrada(request.POST or None)
# if request.method == 'POST' and form_registrarnuevaentrada.is_valid():
# tituloE=form_registrarnuevaentrada.cleaned_data.get('tituloEntrada')
# contenidoE=form_registrarnuevaentrada.cleaned_data.get('contenidoEntrada')
# # categoriaE=registrarnuevaentrada.cleaned_data.get('categoriasEntrada')
# # imagenE=form_registrarnuevaentrada.get('imagenDestacada')
# nuevo=blog.objects.create(titulo=tituloE,descripcion=contenidoE,imagen='autismo.jpg')
# if nuevo:
# nuevo.save()
# messages.success(request,'Entrada creada con exito')
# return redirect('blog')
# else:
# print('error al guardar la informacion')
# return render(request,'appblog/nuevaentrada.html',{
# 'registrarnuevaentrada': form_registrarnuevaentrada
# }) | jkaalexkei/rein | appBlog/views.py | views.py | py | 3,499 | python | es | code | 0 | github-code | 90 |
2726801719 | # To run this, you can install BeautifulSoup
# https://pypi.python.org/pypi/beautifulsoup4
# Or download the file
# http://www.py4e.com/code3/bs4.zip
# and unzip it in the same directory as this file
import urllib.request, urllib.parse, urllib.error
from bs4 import BeautifulSoup
import ssl
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
url = input('Enter - ')
count = input("Enter Count: ")
count = int(count)
position = input("Enter Position: ")
position = int(position)
position = position - 1
linklist = list()
while count >= 0:
print(url)
html = urllib.request.urlopen(url, context=ctx).read()
soup = BeautifulSoup(html, 'html.parser')
count = count - 1
del linklist
linklist = list()
tags = soup('a')
for tag in tags:
linklist.append(tag.get("href", None))
url = linklist[position]
# urllist = linklist
# url = urllist[2]
#print(url)
#print("SHANKBONE")
#while count >= 1:
# print(newurl)
# url = input('Enter - ')
# if newurl != None: url = newurl
# html = urllib.request.urlopen(url, context=ctx).read()
# soup = BeautifulSoup(html, 'html.parser')
# count = count - 1
# tags = soup('a')
# for tag in tags:
# linklist.append(tag.get("href", None))
# newurl = (linklist[0])
#print("SHANKBONE")
| zwarshavsky/introtopython | Exercises/ex_12.7.py | ex_12.7.py | py | 1,398 | python | en | code | 0 | github-code | 90 |
16509110865 | import sys
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Model
from tensorflow.keras import layers
from tensorflow.keras.layers import Input, Dense, Embedding, Reshape, GRU, Concatenate, LSTM, Dropout, BatchNormalization, Activation, concatenate, multiply, MaxPooling1D, Conv1D, Flatten, GlobalAveragePooling1D
from tensorflow.compat.v1.keras.layers import Bidirectional, RepeatVector, Permute, TimeDistributed, dot
from tensorflow.keras.optimizers import RMSprop, Adamax
from tensorflow.keras import utils, metrics
from custom.qstransformer_layers import TransformerBlock, TokenAndPositionEmbedding, MultiHeadAttentionBlock
from custom.qs_loss import use_prep, attendgru_prep, custom_use_loss, custom_attendgru_loss, custom_cce_loss, custom_dist_cce_loss
class TransformerBase:
def __init__(self, config):
config['tdatlen'] = 50
self.config = config
self.tdatvocabsize = config['tdatvocabsize']
self.comvocabsize = config['comvocabsize']
self.datlen = config['tdatlen']
self.comlen = config['comlen']
self.embdims = 100
self.attheads = 2 # number of attention heads
self.recdims = 100
self.ffdims = 100 # hidden layer size in feed forward network inside transformer
self.config['batch_config'] = [ ['tdat', 'com'], ['comout'] ]
self.config['loss_type'] = config['loss_type']
if self.config['loss_type'] == 'use':
self.index_tensor, self.comwords_tensor = use_prep(self.config['comstok'])
elif self.config['loss_type'] == 'attendgru':
self.fmodel = attendgru_prep()
elif self.config['loss_type'] == 'use-dist':
self.dist = config['target_dist']
def create_model(self):
dat_input = Input(shape=(self.datlen,))
com_input = Input(shape=(self.comlen,))
ee = TokenAndPositionEmbedding(self.datlen, self.tdatvocabsize, self.embdims)
eeout = ee(dat_input)
etransformer_block = TransformerBlock(self.embdims, self.attheads, self.ffdims)
encout = etransformer_block(eeout, eeout)
de = TokenAndPositionEmbedding(self.comlen, self.comvocabsize, self.embdims)
deout = de(com_input)
de_mha1 = MultiHeadAttentionBlock(self.embdims, self.attheads)
de_mha1_out = de_mha1(deout, deout)
dtransformer_block = TransformerBlock(self.embdims, self.attheads, self.ffdims)
decout = dtransformer_block(de_mha1_out, encout)
context = decout
out = context
# out = TimeDistributed(Dense(self.recdims, activation="tanh"))(context)
out = Flatten()(out)
out = Dense(self.comvocabsize, activation="softmax")(out)
model = Model(inputs=[dat_input, com_input], outputs=out)
lossf = custom_cce_loss()
if self.config['loss_type'] == 'use':
lossf = custom_use_loss(self.index_tensor, self.comwords_tensor)
elif self.config['loss_type'] == 'attendgru':
lossf = custom_attendgru_loss(self.fmodel)
elif self.config['loss_type'] == 'use-dist':
lossf = custom_dist_cce_loss(self.dist)
model.compile(loss=lossf, optimizer='adam', metrics=['accuracy'])
return self.config, model
| aakashba/callcon-public | models/transformer_base.py | transformer_base.py | py | 3,333 | python | en | code | 2 | github-code | 90 |
17991976059 | import sys
readline = sys.stdin.readline
MOD = 10 ** 9 + 7
INF = float('INF')
sys.setrecursionlimit(10 ** 5)
def main():
n, a, b = map(int, readline().split())
ans = max(b * (n - 2) - a * (n - 2) + 1, 0)
print(ans)
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p03705/s605759451.py | s605759451.py | py | 270 | python | en | code | 0 | github-code | 90 |
38538325097 | #coding:utf-8
import csv
f = open('hoge.csv','rb')
d = open('text.txt','w')
dataReader=csv.reader(f)
for row in dataReader:
chars= list(row)
for i in range(len(row)):
if chars[i].isdigit():
d.write(chars[i])
d.write(',')
d.close
#print(chars[i]) | narumoto65/python | csvhenkan.py | csvhenkan.py | py | 301 | python | en | code | 0 | github-code | 90 |
18118981719 | from math import sqrt, sin, cos, radians
a, b, cc = map(int, input().split())
cc = radians(cc)
sinc = sin(cc)
s = a * b * sinc / 2
c = sqrt(a * a + b * b - 2 * a * b * cos(cc))
h = b * sinc
print(s)
print(a + b + c)
print(h) | Aasthaengg/IBMdataset | Python_codes/p02380/s855065641.py | s855065641.py | py | 225 | python | en | code | 0 | github-code | 90 |
16169261564 | from django.views.generic import ListView, DetailView, View
from django.core.cache import cache
from django.shortcuts import get_object_or_404
from django.http import Http404, HttpResponse
from models import Image
from settings import PROJECT_ROOT
from gallery.tasks import download_image
import json
import os
import urllib
class GalleryView(ListView):
template_name = "gallery/list.html"
model = Image
def get_query(self):
title = self.request.GET.get('query', None)
if title:
query = cache.get(title)
if query is None:
query = Image.objects.filter(title__contains=title)
cache.set(title, query)
return query
else:
return None
def get_queryset(self):
query = self.get_query()
if query is None:
query = Image.objects.all()
return query
def get(self, *args, **kwargs):
format = self.request.GET.get('format', None)
query = self.get_query()
if format == 'json' and query is None:
raise Http404
elif format == 'json' and query:
json_list = []
for image in query:
json_list.append({'title': image.title,
'author': image.author.name,
'timestamp': str(image.timestamp),
'hash': image.hash,
'ext': image.ext,
'size': image.size,
'url': image.url})
return HttpResponse(json.dumps(json_list))
else:
return super(GalleryView, self).get(*args, **kwargs)
class GalleryDetailView(DetailView):
template_name = "gallery/detail.html"
model = Image
class GalleryByAuthorView(ListView):
template_name = "gallery/list.html"
def get_queryset(self):
return Image.objects.filter(author=self.kwargs['authorname'])
class CachedImage(View):
def get(self, *args, **kwargs):
image = get_object_or_404(Image, pk=kwargs['pk'])
downloads = os.path.join(PROJECT_ROOT, 'images')
file_name = "%s/%s%s"%(downloads, image.hash, image.ext)
if os.path.exists(file_name):
f = open(file_name)
else:
download_image.delay(image)
f = urllib.urlopen(image.url)
return HttpResponse(f.read(), mimetype="image/jpg")
| ed-rahn/simplegallery | gallery/views.py | views.py | py | 2,485 | python | en | code | 0 | github-code | 90 |
73178891815 | #!/usr/bin/env python3
"""Load up a ledger and match against various parts of the entries in a way
that one can iterate with Emacs.
"""
__copyright__ = "Copyright (C) 2013-2018 Martin Blais"
__license__ = "GNU GPLv2"
import re
from beancount import load
from beancount import utils
from beancount.core import data
def main():
import argparse
optparser = argparse.ArgumentParser(description=__doc__)
optparser.add_argument('filename', help='Filename.')
optparser.add_argument('-q', '--quiet', action='store_true',
help="Don't print file or line numbers.")
optparser.add_argument('-a', '--account', metavar='REGEXP', action='store',
help="Match against the account names.")
optparser.add_argument('-t', '--tag', metavar='REGEXP', action='store',
help="Match against the tag names of Transactions.")
optparser.add_argument('-i', '--invert', action='store_true',
help="Invert signs on all the postings.")
opts = optparser.parse_args()
# Parse the input file.
entries, errors, options = load(opts.filename)
# Create a mapping from a unique value (we're using the fileloc) to the
# entry objects.
entry_map = {entry.fileloc: entry for entry in entries}
# Initialized the reduced list to the entire set of keys.
filtered_keys = set(entry_map.keys())
# Filter by matching a regexp against the account name.
if opts.account:
account_keys = set()
regexp = re.compile(opts.account)
for entry in entries:
# Iterate over all references to accounts.
for account in utils.get_tuple_typed_values(entry, data.Account):
if regexp.match(account.name):
account_keys.add(entry.fileloc)
filtered_keys = filtered_keys.intersection(account_keys)
# Filter by matching a regexp against the tags.
if opts.tag:
tag_keys = set()
regexp = re.compile(opts.tag)
for entry in utils.filter_type(entries, data.Transaction):
if any(map(regexp.match, entry.tags)):
tag_keys.add(entry.fileloc)
filtered_keys = filtered_keys.intersection(tag_keys)
# Print out the match results that have filtered through.
filtered_entries = list(map(entry_map.get, filtered_keys))
# Sort the entries by date, and then by location in the file.
filtered_entries.sort(key=lambda entry: (entry.date, entry.fileloc))
# Invert the signs if requested; this is convenient for undoing entries.
for entry in utils.filter_type(filtered_entries, data.Transaction):
for posting in entry.postings:
# Note: modify in-place; there's nothing interesting going on there
# after.
posting.position.number = -posting.position.number
# Print out something about each entry.
for entry in filtered_entries:
if not opts.quiet:
print()
print('{}'.format(data.render_fileloc(entry.fileloc)))
print()
print(data.format_entry(entry))
if __name__ == '__main__':
main()
| beancount/beanlabs | beanlabs/scripts/grep.py | grep.py | py | 3,175 | python | en | code | 7 | github-code | 90 |
18402550759 | import sys
import itertools
# import numpy as np
import time
import math
import heapq
from collections import defaultdict
from collections import Counter
sys.setrecursionlimit(10 ** 7)
INF = 10 ** 18
MOD = 10 ** 9 + 7
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
# map(int, input().split())
N, M, K = map(int, input().split())
MAX = 2 * 10 ** 5 + 5
MOD = 10 ** 9 + 7
fac = [0 for i in range(MAX)]
finv = [0 for i in range(MAX)]
inv = [0 for i in range(MAX)]
def comInit(mod):
fac[0], fac[1] = 1, 1
finv[0], finv[1] = 1, 1
inv[1] = 1
for i in range(2, MAX):
fac[i] = fac[i - 1] * i % mod
inv[i] = mod - inv[mod % i] * (mod // i) % mod
finv[i] = finv[i - 1] * inv[i] % mod
def com(n, r, mod):
if n < r:
return 0
if n < 0 or r < 0:
return 0
return fac[n] * (finv[r] * finv[n - r] % mod) % mod
def p(n, r, mod):
return fac[n] * finv[n - r] % mod
comInit(MOD)
xs = 0
for i in range(M):
xs += i * (M - i)
xs %= MOD
xs *= N ** 2
ys = 0
for i in range(N):
ys += i * (N - i)
ys %= MOD
ys *= M ** 2
ys %= MOD
print(((xs + ys) % MOD) * com(N * M - 2, K - 2, MOD) % MOD) | Aasthaengg/IBMdataset | Python_codes/p03039/s238350842.py | s238350842.py | py | 1,216 | python | en | code | 0 | github-code | 90 |
6319831947 | # Ultralytics YOLO ЁЯЪА, AGPL-3.0 license
import contextlib
import glob
import inspect
import math
import os
import platform
import re
import shutil
import subprocess
import sys
import time
from importlib import metadata
from pathlib import Path
from typing import Optional
import cv2
import numpy as np
import requests
import torch
from matplotlib import font_manager
from ultralytics.utils import (ASSETS, AUTOINSTALL, LINUX, LOGGER, ONLINE, ROOT, USER_CONFIG_DIR, SimpleNamespace,
ThreadingLocked, TryExcept, clean_url, colorstr, downloads, emojis, is_colab, is_docker,
is_jupyter, is_kaggle, is_online, is_pip_package, url2file)
def parse_requirements(file_path=ROOT.parent / 'requirements.txt', package=''):
"""
Parse a requirements.txt file, ignoring lines that start with '#' and any text after '#'.
Args:
file_path (Path): Path to the requirements.txt file.
package (str, optional): Python package to use instead of requirements.txt file, i.e. package='ultralytics'.
Returns:
(List[Dict[str, str]]): List of parsed requirements as dictionaries with `name` and `specifier` keys.
Example:
```python
from ultralytics.utils.checks import parse_requirements
parse_requirements(package='ultralytics')
```
"""
if package:
requires = [x for x in metadata.distribution(package).requires if 'extra == ' not in x]
else:
requires = Path(file_path).read_text().splitlines()
requirements = []
for line in requires:
line = line.strip()
if line and not line.startswith('#'):
line = line.split('#')[0].strip() # ignore inline comments
match = re.match(r'([a-zA-Z0-9-_]+)\s*([<>!=~]+.*)?', line)
if match:
requirements.append(SimpleNamespace(name=match[1], specifier=match[2].strip() if match[2] else ''))
return requirements
def parse_version(version='0.0.0') -> tuple:
"""
Convert a version string to a tuple of integers, ignoring any extra non-numeric string attached to the version. This
function replaces deprecated 'pkg_resources.parse_version(v)'.
Args:
version (str): Version string, i.e. '2.0.1+cpu'
Returns:
(tuple): Tuple of integers representing the numeric part of the version and the extra string, i.e. (2, 0, 1)
"""
try:
return tuple(map(int, re.findall(r'\d+', version)[:3])) # '2.0.1+cpu' -> (2, 0, 1)
except Exception as e:
LOGGER.warning(f'WARNING тЪая╕П failure for parse_version({version}), returning (0, 0, 0): {e}')
return 0, 0, 0
def is_ascii(s) -> bool:
"""
Check if a string is composed of only ASCII characters.
Args:
s (str): String to be checked.
Returns:
bool: True if the string is composed only of ASCII characters, False otherwise.
"""
# Convert list, tuple, None, etc. to string
s = str(s)
# Check if the string is composed of only ASCII characters
return all(ord(c) < 128 for c in s)
def check_imgsz(imgsz, stride=32, min_dim=1, max_dim=2, floor=0):
"""
Verify image size is a multiple of the given stride in each dimension. If the image size is not a multiple of the
stride, update it to the nearest multiple of the stride that is greater than or equal to the given floor value.
Args:
imgsz (int | cList[int]): Image size.
stride (int): Stride value.
min_dim (int): Minimum number of dimensions.
max_dim (int): Maximum number of dimensions.
floor (int): Minimum allowed value for image size.
Returns:
(List[int]): Updated image size.
"""
# Convert stride to integer if it is a tensor
stride = int(stride.max() if isinstance(stride, torch.Tensor) else stride)
# Convert image size to list if it is an integer
if isinstance(imgsz, int):
imgsz = [imgsz]
elif isinstance(imgsz, (list, tuple)):
imgsz = list(imgsz)
else:
raise TypeError(f"'imgsz={imgsz}' is of invalid type {type(imgsz).__name__}. "
f"Valid imgsz types are int i.e. 'imgsz=640' or list i.e. 'imgsz=[640,640]'")
# Apply max_dim
if len(imgsz) > max_dim:
msg = "'train' and 'val' imgsz must be an integer, while 'predict' and 'export' imgsz may be a [h, w] list " \
"or an integer, i.e. 'yolo export imgsz=640,480' or 'yolo export imgsz=640'"
if max_dim != 1:
raise ValueError(f'imgsz={imgsz} is not a valid image size. {msg}')
LOGGER.warning(f"WARNING тЪая╕П updating to 'imgsz={max(imgsz)}'. {msg}")
imgsz = [max(imgsz)]
# Make image size a multiple of the stride
sz = [max(math.ceil(x / stride) * stride, floor) for x in imgsz]
# Print warning message if image size was updated
if sz != imgsz:
LOGGER.warning(f'WARNING тЪая╕П imgsz={imgsz} must be multiple of max stride {stride}, updating to {sz}')
# Add missing dimensions if necessary
sz = [sz[0], sz[0]] if min_dim == 2 and len(sz) == 1 else sz[0] if min_dim == 1 and len(sz) == 1 else sz
return sz
def check_version(current: str = '0.0.0',
required: str = '0.0.0',
name: str = 'version',
hard: bool = False,
verbose: bool = False) -> bool:
"""
Check current version against the required version or range.
Args:
current (str): Current version or package name to get version from.
required (str): Required version or range (in pip-style format).
name (str, optional): Name to be used in warning message.
hard (bool, optional): If True, raise an AssertionError if the requirement is not met.
verbose (bool, optional): If True, print warning message if requirement is not met.
Returns:
(bool): True if requirement is met, False otherwise.
Example:
```python
# Check if current version is exactly 22.04
check_version(current='22.04', required='==22.04')
# Check if current version is greater than or equal to 22.04
check_version(current='22.10', required='22.04') # assumes '>=' inequality if none passed
# Check if current version is less than or equal to 22.04
check_version(current='22.04', required='<=22.04')
# Check if current version is between 20.04 (inclusive) and 22.04 (exclusive)
check_version(current='21.10', required='>20.04,<22.04')
```
"""
if not current: # if current is '' or None
LOGGER.warning(f'WARNING тЪая╕П invalid check_version({current}, {required}) requested, please check values.')
return True
elif not current[0].isdigit(): # current is package name rather than version string, i.e. current='ultralytics'
try:
name = current # assigned package name to 'name' arg
current = metadata.version(current) # get version string from package name
except metadata.PackageNotFoundError:
if hard:
raise ModuleNotFoundError(emojis(f'WARNING тЪая╕П {current} package is required but not installed'))
else:
return False
if not required: # if required is '' or None
return True
result = True
c = parse_version(current) # '1.2.3' -> (1, 2, 3)
for r in required.strip(',').split(','):
op, v = re.match(r'([^0-9]*)([\d.]+)', r).groups() # split '>=22.04' -> ('>=', '22.04')
v = parse_version(v) # '1.2.3' -> (1, 2, 3)
if op == '==' and c != v:
result = False
elif op == '!=' and c == v:
result = False
elif op in ('>=', '') and not (c >= v): # if no constraint passed assume '>=required'
result = False
elif op == '<=' and not (c <= v):
result = False
elif op == '>' and not (c > v):
result = False
elif op == '<' and not (c < v):
result = False
if not result:
warning_message = f'WARNING тЪая╕П {name}{op}{required} is required, but {name}=={current} is currently installed'
if hard:
raise ModuleNotFoundError(emojis(warning_message)) # assert version requirements met
if verbose:
LOGGER.warning(warning_message)
return result
def check_latest_pypi_version(package_name='ultralytics'):
"""
Returns the latest version of a PyPI package without downloading or installing it.
Parameters:
package_name (str): The name of the package to find the latest version for.
Returns:
(str): The latest version of the package.
"""
with contextlib.suppress(Exception):
requests.packages.urllib3.disable_warnings() # Disable the InsecureRequestWarning
response = requests.get(f'https://pypi.org/pypi/{package_name}/json', timeout=3)
if response.status_code == 200:
return response.json()['info']['version']
def check_pip_update_available():
"""
Checks if a new version of the ultralytics package is available on PyPI.
Returns:
(bool): True if an update is available, False otherwise.
"""
if ONLINE and is_pip_package():
with contextlib.suppress(Exception):
from ultralytics import __version__
latest = check_latest_pypi_version()
if check_version(__version__, f'<{latest}'): # check if current version is < latest version
LOGGER.info(f'New https://pypi.org/project/ultralytics/{latest} available ЁЯШГ '
f"Update with 'pip install -U ultralytics'")
return True
return False
@ThreadingLocked()
def check_font(font='Arial.ttf'):
"""
Find font locally or download to user's configuration directory if it does not already exist.
Args:
font (str): Path or name of font.
Returns:
file (Path): Resolved font file path.
"""
name = Path(font).name
# Check USER_CONFIG_DIR
file = USER_CONFIG_DIR / name
if file.exists():
return file
# Check system fonts
matches = [s for s in font_manager.findSystemFonts() if font in s]
if any(matches):
return matches[0]
# Download to USER_CONFIG_DIR if missing
url = f'https://ultralytics.com/assets/{name}'
if downloads.is_url(url):
downloads.safe_download(url=url, file=file)
return file
def check_python(minimum: str = '3.8.0') -> bool:
"""
Check current python version against the required minimum version.
Args:
minimum (str): Required minimum version of python.
Returns:
None
"""
return check_version(platform.python_version(), minimum, name='Python ', hard=True)
@TryExcept()
def check_requirements(requirements=ROOT.parent / 'requirements.txt', exclude=(), install=True, cmds=''):
"""
Check if installed dependencies meet YOLOv8 requirements and attempt to auto-update if needed.
Args:
requirements (Union[Path, str, List[str]]): Path to a requirements.txt file, a single package requirement as a
string, or a list of package requirements as strings.
exclude (Tuple[str]): Tuple of package names to exclude from checking.
install (bool): If True, attempt to auto-update packages that don't meet requirements.
cmds (str): Additional commands to pass to the pip install command when auto-updating.
Example:
```python
from ultralytics.utils.checks import check_requirements
# Check a requirements.txt file
check_requirements('path/to/requirements.txt')
# Check a single package
check_requirements('ultralytics>=8.0.0')
# Check multiple packages
check_requirements(['numpy', 'ultralytics>=8.0.0'])
```
"""
prefix = colorstr('red', 'bold', 'requirements:')
check_python() # check python version
check_torchvision() # check torch-torchvision compatibility
if isinstance(requirements, Path): # requirements.txt file
file = requirements.resolve()
assert file.exists(), f'{prefix} {file} not found, check failed.'
requirements = [f'{x.name}{x.specifier}' for x in parse_requirements(file) if x.name not in exclude]
elif isinstance(requirements, str):
requirements = [requirements]
pkgs = []
for r in requirements:
r_stripped = r.split('/')[-1].replace('.git', '') # replace git+https://org/repo.git -> 'repo'
match = re.match(r'([a-zA-Z0-9-_]+)([<>!=~]+.*)?', r_stripped)
name, required = match[1], match[2].strip() if match[2] else ''
try:
assert check_version(metadata.version(name), required) # exception if requirements not met
except (AssertionError, metadata.PackageNotFoundError):
pkgs.append(r)
s = ' '.join(f'"{x}"' for x in pkgs) # console string
if s:
if install and AUTOINSTALL: # check environment variable
n = len(pkgs) # number of packages updates
LOGGER.info(f"{prefix} Ultralytics requirement{'s' * (n > 1)} {pkgs} not found, attempting AutoUpdate...")
try:
t = time.time()
assert is_online(), 'AutoUpdate skipped (offline)'
LOGGER.info(subprocess.check_output(f'pip install --no-cache {s} {cmds}', shell=True).decode())
dt = time.time() - t
LOGGER.info(
f"{prefix} AutoUpdate success тЬЕ {dt:.1f}s, installed {n} package{'s' * (n > 1)}: {pkgs}\n"
f"{prefix} тЪая╕П {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n")
except Exception as e:
LOGGER.warning(f'{prefix} тЭМ {e}')
return False
else:
return False
return True
def check_torchvision():
"""
Checks the installed versions of PyTorch and Torchvision to ensure they're compatible.
This function checks the installed versions of PyTorch and Torchvision, and warns if they're incompatible according
to the provided compatibility table based on:
https://github.com/pytorch/vision#installation.
The compatibility table is a dictionary where the keys are PyTorch versions and the values are lists of compatible
Torchvision versions.
"""
import torchvision
# Compatibility table
compatibility_table = {'2.0': ['0.15'], '1.13': ['0.14'], '1.12': ['0.13']}
# Extract only the major and minor versions
v_torch = '.'.join(torch.__version__.split('+')[0].split('.')[:2])
v_torchvision = '.'.join(torchvision.__version__.split('+')[0].split('.')[:2])
if v_torch in compatibility_table:
compatible_versions = compatibility_table[v_torch]
if all(v_torchvision != v for v in compatible_versions):
print(f'WARNING тЪая╕П torchvision=={v_torchvision} is incompatible with torch=={v_torch}.\n'
f"Run 'pip install torchvision=={compatible_versions[0]}' to fix torchvision or "
"'pip install -U torch torchvision' to update both.\n"
'For a full compatibility table see https://github.com/pytorch/vision#installation')
def check_suffix(file='yolov8n.pt', suffix='.pt', msg=''):
"""Check file(s) for acceptable suffix."""
if file and suffix:
if isinstance(suffix, str):
suffix = (suffix, )
for f in file if isinstance(file, (list, tuple)) else [file]:
s = Path(f).suffix.lower().strip() # file suffix
if len(s):
assert s in suffix, f'{msg}{f} acceptable suffix is {suffix}, not {s}'
def check_yolov5u_filename(file: str, verbose: bool = True):
"""Replace legacy YOLOv5 filenames with updated YOLOv5u filenames."""
if 'yolov3' in file or 'yolov5' in file:
if 'u.yaml' in file:
file = file.replace('u.yaml', '.yaml') # i.e. yolov5nu.yaml -> yolov5n.yaml
elif '.pt' in file and 'u' not in file:
original_file = file
file = re.sub(r'(.*yolov5([nsmlx]))\.pt', '\\1u.pt', file) # i.e. yolov5n.pt -> yolov5nu.pt
file = re.sub(r'(.*yolov5([nsmlx])6)\.pt', '\\1u.pt', file) # i.e. yolov5n6.pt -> yolov5n6u.pt
file = re.sub(r'(.*yolov3(|-tiny|-spp))\.pt', '\\1u.pt', file) # i.e. yolov3-spp.pt -> yolov3-sppu.pt
if file != original_file and verbose:
LOGGER.info(
f"PRO TIP ЁЯТб Replace 'model={original_file}' with new 'model={file}'.\nYOLOv5 'u' models are "
f'trained with https://github.com/ultralytics/ultralytics and feature improved performance vs '
f'standard YOLOv5 models trained with https://github.com/ultralytics/yolov5.\n')
return file
def check_file(file, suffix='', download=True, hard=True):
"""Search/download file (if necessary) and return path."""
check_suffix(file, suffix) # optional
file = str(file).strip() # convert to string and strip spaces
file = check_yolov5u_filename(file) # yolov5n -> yolov5nu
if not file or ('://' not in file and Path(file).exists()): # exists ('://' check required in Windows Python<3.10)
return file
elif download and file.lower().startswith(('https://', 'http://', 'rtsp://', 'rtmp://', 'tcp://')): # download
url = file # warning: Pathlib turns :// -> :/
file = url2file(file) # '%2F' to '/', split https://url.com/file.txt?auth
if Path(file).exists():
LOGGER.info(f'Found {clean_url(url)} locally at {file}') # file already exists
else:
downloads.safe_download(url=url, file=file, unzip=False)
return file
else: # search
files = glob.glob(str(ROOT / 'cfg' / '**' / file), recursive=True) # find file
if not files and hard:
raise FileNotFoundError(f"'{file}' does not exist")
elif len(files) > 1 and hard:
raise FileNotFoundError(f"Multiple files match '{file}', specify exact path: {files}")
return files[0] if len(files) else [] # return file
def check_yaml(file, suffix=('.yaml', '.yml'), hard=True):
"""Search/download YAML file (if necessary) and return path, checking suffix."""
return check_file(file, suffix, hard=hard)
def check_imshow(warn=False):
"""Check if environment supports image displays."""
try:
if LINUX:
assert 'DISPLAY' in os.environ and not is_docker() and not is_colab() and not is_kaggle()
cv2.imshow('test', np.zeros((8, 8, 3), dtype=np.uint8)) # show a small 8-pixel image
cv2.waitKey(1)
cv2.destroyAllWindows()
cv2.waitKey(1)
return True
except Exception as e:
if warn:
LOGGER.warning(f'WARNING тЪая╕П Environment does not support cv2.imshow() or PIL Image.show()\n{e}')
return False
def check_yolo(verbose=True, device=''):
"""Return a human-readable YOLO software and hardware summary."""
import psutil
from ultralytics.utils.torch_utils import select_device
if is_jupyter():
if check_requirements('wandb', install=False):
os.system('pip uninstall -y wandb') # uninstall wandb: unwanted account creation prompt with infinite hang
if is_colab():
shutil.rmtree('sample_data', ignore_errors=True) # remove colab /sample_data directory
if verbose:
# System info
gib = 1 << 30 # bytes per GiB
ram = psutil.virtual_memory().total
total, used, free = shutil.disk_usage('/')
s = f'({os.cpu_count()} CPUs, {ram / gib:.1f} GB RAM, {(total - free) / gib:.1f}/{total / gib:.1f} GB disk)'
with contextlib.suppress(Exception): # clear display if ipython is installed
from IPython import display
display.clear_output()
else:
s = ''
select_device(device=device, newline=False)
LOGGER.info(f'Setup complete тЬЕ {s}')
def collect_system_info():
"""Collect and print relevant system information including OS, Python, RAM, CPU, and CUDA."""
import psutil
from ultralytics.utils import ENVIRONMENT, is_git_dir
from ultralytics.utils.torch_utils import get_cpu_info
ram_info = psutil.virtual_memory().total / (1024 ** 3) # Convert bytes to GB
check_yolo()
LOGGER.info(f"\n{'OS':<20}{platform.platform()}\n"
f"{'Environment':<20}{ENVIRONMENT}\n"
f"{'Python':<20}{sys.version.split()[0]}\n"
f"{'Install':<20}{'git' if is_git_dir() else 'pip' if is_pip_package() else 'other'}\n"
f"{'RAM':<20}{ram_info:.2f} GB\n"
f"{'CPU':<20}{get_cpu_info()}\n"
f"{'CUDA':<20}{torch.version.cuda if torch and torch.cuda.is_available() else None}\n")
for r in parse_requirements(package='ultralytics'):
try:
current = metadata.version(r.name)
is_met = 'тЬЕ ' if check_version(current, str(r.specifier), hard=True) else 'тЭМ '
except metadata.PackageNotFoundError:
current = '(not installed)'
is_met = 'тЭМ '
LOGGER.info(f'{r.name:<20}{is_met}{current}{r.specifier}')
def check_amp(model):
"""
This function checks the PyTorch Automatic Mixed Precision (AMP) functionality of a YOLOv8 model. If the checks
fail, it means there are anomalies with AMP on the system that may cause NaN losses or zero-mAP results, so AMP will
be disabled during training.
Args:
model (nn.Module): A YOLOv8 model instance.
Example:
```python
from ultralytics import YOLO
from ultralytics.utils.checks import check_amp
model = YOLO('yolov8n.pt').model.cuda()
check_amp(model)
```
Returns:
(bool): Returns True if the AMP functionality works correctly with YOLOv8 model, else False.
"""
device = next(model.parameters()).device # get model device
if device.type in ('cpu', 'mps'):
return False # AMP only used on CUDA devices
def amp_allclose(m, im):
"""All close FP32 vs AMP results."""
a = m(im, device=device, verbose=False)[0].boxes.data # FP32 inference
with torch.cuda.amp.autocast(True):
b = m(im, device=device, verbose=False)[0].boxes.data # AMP inference
del m
return a.shape == b.shape and torch.allclose(a, b.float(), atol=0.5) # close to 0.5 absolute tolerance
im = ASSETS / 'bus.jpg' # image to check
prefix = colorstr('AMP: ')
LOGGER.info(f'{prefix}running Automatic Mixed Precision (AMP) checks with YOLOv8n...')
warning_msg = "Setting 'amp=True'. If you experience zero-mAP or NaN losses you can disable AMP with amp=False."
try:
from ultralytics import YOLO
assert amp_allclose(YOLO('yolov8n.pt'), im)
LOGGER.info(f'{prefix}checks passed тЬЕ')
except ConnectionError:
LOGGER.warning(f'{prefix}checks skipped тЪая╕П, offline and unable to download YOLOv8n. {warning_msg}')
except (AttributeError, ModuleNotFoundError):
LOGGER.warning(f'{prefix}checks skipped тЪая╕П. '
f'Unable to load YOLOv8n due to possible Ultralytics package modifications. {warning_msg}')
except AssertionError:
LOGGER.warning(f'{prefix}checks failed тЭМ. Anomalies were detected with AMP on your system that may lead to '
f'NaN losses or zero-mAP results, so AMP will be disabled during training.')
return False
return True
def git_describe(path=ROOT): # path must be a directory
"""Return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe."""
with contextlib.suppress(Exception):
return subprocess.check_output(f'git -C {path} describe --tags --long --always', shell=True).decode()[:-1]
return ''
def print_args(args: Optional[dict] = None, show_file=True, show_func=False):
"""Print function arguments (optional args dict)."""
def strip_auth(v):
"""Clean longer Ultralytics HUB URLs by stripping potential authentication information."""
return clean_url(v) if (isinstance(v, str) and v.startswith('http') and len(v) > 100) else v
x = inspect.currentframe().f_back # previous frame
file, _, func, _, _ = inspect.getframeinfo(x)
if args is None: # get args automatically
args, _, _, frm = inspect.getargvalues(x)
args = {k: v for k, v in frm.items() if k in args}
try:
file = Path(file).resolve().relative_to(ROOT).with_suffix('')
except ValueError:
file = Path(file).stem
s = (f'{file}: ' if show_file else '') + (f'{func}: ' if show_func else '')
LOGGER.info(colorstr(s) + ', '.join(f'{k}={strip_auth(v)}' for k, v in args.items()))
def cuda_device_count() -> int:
"""
Get the number of NVIDIA GPUs available in the environment.
Returns:
(int): The number of NVIDIA GPUs available.
"""
try:
# Run the nvidia-smi command and capture its output
output = subprocess.check_output(['nvidia-smi', '--query-gpu=count', '--format=csv,noheader,nounits'],
encoding='utf-8')
# Take the first line and strip any leading/trailing white space
first_line = output.strip().split('\n')[0]
return int(first_line)
except (subprocess.CalledProcessError, FileNotFoundError, ValueError):
# If the command fails, nvidia-smi is not found, or output is not an integer, assume no GPUs are available
return 0
def cuda_is_available() -> bool:
"""
Check if CUDA is available in the environment.
Returns:
(bool): True if one or more NVIDIA GPUs are available, False otherwise.
"""
return cuda_device_count() > 0
| ultralytics/ultralytics | ultralytics/utils/checks.py | checks.py | py | 25,954 | python | en | code | 15,778 | github-code | 90 |
18154777549 | # -*- coding: utf-8 -*-
def input_int_array():
return map(int, input().split())
def answer(n, x, m):
a1 = x
ai = a1
loop_start = -1
loop_end = n
a[1] = a1
ans = a1
for i in range(2, n + 1):
ai = ai*ai % m
a.append(ai)
ans += ai
for j in range(1, i):
if a[j] == ai:
loop_start = j
loop_end = i
break
if loop_start >= 0:
break
if loop_start < 0 or ai == 0:
print(ans)
return
ans -= ai
loop_count, remaining = divmod(n - (loop_start - 1), loop_end - loop_start)
if loop_count > loop_start:
start_ans = 0
for i in range(1, loop_start):
start_ans += a[i]
loop_sum = ans - start_ans
ans += loop_sum * (loop_count-1)
for i in range(loop_start, loop_start + remaining):
ans += a[i]
else:
for i in range(loop_start, loop_end):
if i < remaining:
ans += a[i]
loop_sum += a[i]
ans += loop_sum * (loop_count-1)
print(ans)
n, x, m = input_int_array()
a = [0, 0]
answer(n, x, m)
| Aasthaengg/IBMdataset | Python_codes/p02550/s165321081.py | s165321081.py | py | 1,174 | python | en | code | 0 | github-code | 90 |
15713645588 | # 파이썬 웹 스크랭핑 강의 11
# https://nadocoding.tistory.com/10
# 동적 페이지에 대한 웹스크래핑, 구글 무비
# 백그라운드로 크롬 실행 : headless Chrome
# selenium을 더 공부하고 싶으면 : selenium with python 을 검색
from selenium import webdriver
options = webdriver.ChromeOptions()
options.headless = True
options.add_argument('winddow-size=1920x1080')
# headless 없애주는 agent를 받아오기 위해서는
options.add_argument('Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36')
browser = webdriver.Chrome(options=options)
browser.maximize_window()
url = 'https://www.whatismybrowser.com/detect/what-is-my-user-agent'
browser.get(url)
detected_value = browser.find_element_by_id('detected_value')
print(detected_value.text)
# Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) HeadlessChrome/88.0.4324.150 Safari/537.36 | zoro6908/PY_acamedy | ExPyT0116.py | ExPyT0116.py | py | 975 | python | ko | code | 0 | github-code | 90 |
18113542539 | # coding: utf-8
# 二分探索(応用)
def loading(pack_list, k, q):
truck = 0
idx = 0
for pack in pack_list:
if pack > q:
return False
if truck + pack > q:
idx += 1
truck = 0
truck += pack
if truck > 0:
idx += 1
return False if idx > k else True
if __name__ == "__main__":
n, k = [int(i) for i in input().split()]
pack = []
for _ in range(n):
pack.append(int(input()))
min = 0
max = int(1E20)
# min = sum(pack) // n
# max = sum(pack)
q = (max + min) // 2
res = loading(pack, k, q)
while min + 1 != max:
min, max = [min, q] if res else [q, max]
q = (max + min) // 2
res = loading(pack, k, q)
print(max)
| Aasthaengg/IBMdataset | Python_codes/p02270/s108470321.py | s108470321.py | py | 789 | python | en | code | 0 | github-code | 90 |
39891099127 | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 6 20:46:33 2018
@author: Ricardo Nunes
"""
def budgeting2(budget, products, wishlist):
new_products=list(products.items())
new_products=sorted(new_products,key=valores)
new_products=dict(new_products)
preco={}
total=0
other_list={}
o_minimo=0
for produto in new_products:
if produto in wishlist:
other_list[produto]=new_products[produto]*wishlist[produto]
preco[produto]=new_products[produto]
total=sum(other_list.values())
sim=False
para_max=other_list.copy()
preco_max=preco.copy()
while total>budget:
o_minimo=minimo(preco)
other_list[o_minimo]=other_list[o_minimo]-preco[o_minimo]
(other_list,sim)=limpar_0(other_list)
if sim:
del preco[o_minimo]
total=sum(other_list.values())
if other_list=={}:
other_list=para_max.copy()
total=sum(other_list.values())
preco=preco_max.copy()
while total>budget:
o_maximo=maximo(preco)
other_list[o_maximo]=other_list[o_maximo]-preco[o_maximo]
(other_list,sim)=limpar_0(other_list)
if sim:
del preco[o_maximo]
total=sum(other_list.values())
for produto in other_list:
other_list[produto]=int(other_list[produto]/new_products[produto])
return other_list
def limpar_0(adict):
sim=False
result={}
for key in adict:
if adict[key]!=0:
result[key]=adict[key]
elif adict[key]==0:
sim=True
return (result,sim)
def minimo(adict):
minimo=0
for produto in adict:
if minimo==0 or adict[produto]<minimo:
minimo=adict[produto]
minimo_produto=produto
return minimo_produto
def valores(products):
(coisa,valor)=products
return valor
def maximo(adict):
maximo=0
for produto in adict:
if maximo==0 or adict[produto]>maximo:
maximo=adict[produto]
maximo_produto=produto
return maximo_produto
print (maximo({'laptop': 2000, 'jeans': 50}))
print(budgeting2(1000, {'laptop': 2000, 'jeans': 50}, {'laptop': 2,'jeans': 3})) | TitanicThompson1/FPRO | Práticas/RE10/budgeting2.py | budgeting2.py | py | 2,375 | python | pt | code | 0 | github-code | 90 |
33092164471 | def is_palindrome(text):
# Convert the text to lowercase and remove spaces
text = text.lower().replace(" ", "")
# Compare the text with its reverse
if text == text[::-1]:
return True
else:
return False
# Ask for user input
word = input("Enter a word or phrase: ")
if is_palindrome(word):
print("It's a palindrome.")
else:
print("It's not a palindrome.")
| escorcia21/WebAvanzada | reto2.py | reto2.py | py | 405 | python | en | code | 0 | github-code | 90 |
9777514830 | from brickv.plugin_system.comcu_plugin_base import COMCUPluginBase
from brickv.plugin_system.plugins.performance_stepper.ui_performance_stepper import Ui_PerformanceStepper
from brickv.bindings.bricklet_performance_stepper import BrickletPerformanceStepper
from brickv.bindings import ip_connection
from brickv.async_call import async_call
from brickv.load_pixmap import load_masked_pixmap
from brickv.monoflop import Monoflop
from brickv.plugin_system.plugins.stepper.speedometer import SpeedoMeter
from brickv.slider_spin_syncer import SliderSpinSyncer
from PyQt5.QtWidgets import QTreeWidget, QTreeWidgetItem, QWidget
from PyQt5.QtGui import QLinearGradient, QPainter, QColor
from PyQt5.QtCore import Qt, QTimer
registers = {
'General': [
('GCONF', 0x00, 'RW'),
('GSTAT', 0x01, 'R+C'),
('IFCNT', 0x02, 'R'),
('SLAVECONF', 0x03, ' W'),
('IOIN', 0x04, 'R'),
('X_COMPARE', 0x05, ' W'),
('OTP_PROG', 0x06, ' W'),
('OTP_READ', 0x07, 'R'),
('FACTORY_CONF', 0x08, 'RW'),
('SHORT_CONF', 0x09, ' W'),
('DRV_CONF', 0x0A, ' W'),
('GLOBAL_SCALER', 0x0B, ' W'),
('OFFSET_READ', 0x0C, 'R'),
],
'Velocity': [
('IHOLD_IRUN', 0x10, ' W'),
('TPOWERDOWN', 0x11, ' W'),
('TSTEP', 0x12, 'R'),
('TPWMTHRS', 0x13, ' W'),
('TCOOLTHRS', 0x14, ' W'),
('THIGH', 0x15, ' W'),
],
'Ramp Generator Motion': [
('RAMPMODE', 0x20, 'RW'),
('XACTUAL', 0x21, 'RW'),
('VACTUAL', 0x22, 'R'),
('VSTART', 0x23, ' W'),
('A1', 0x24, ' W'),
('V1', 0x25, ' W'),
('AMAX', 0x26, ' W'),
('VMAX', 0x27, ' W'),
('DMAX', 0x28, ' W'),
('D1', 0x2A, ' W'),
('VSTOP', 0x2B, ' W'),
('TZEROWAIT', 0x2C, ' W'),
('XTARGET', 0x2D, 'RW'),
],
'Ramp Generator Driver': [
('VDCMIN', 0x33, ' W'),
('SW_MODE', 0x34, 'RW'),
('RAMP_STAT', 0x35, 'R+WC'),
('XLATCH', 0x36, 'R'),
],
'Encoder': [
('ENCMODE', 0x38, 'RW'),
('X_ENC', 0x39, 'RW'),
('ENC_CONST', 0x3A, ' W'),
('ENC_STATUS', 0x3B, 'R+WC'),
('ENC_LATCH', 0x3C, 'R'),
('ENC_DEVIATION', 0x3D, ' W'),
],
'Micro Stepping': [
('MSLUT0', 0x60, ' W'),
('MSLUT1', 0x61, ' W'),
('MSLUT2', 0x62, ' W'),
('MSLUT3', 0x63, ' W'),
('MSLUT4', 0x64, ' W'),
('MSLUT5', 0x65, ' W'),
('MSLUT6', 0x66, ' W'),
('MSLUT7', 0x67, ' W'),
('MSLUTSEL', 0x68, ' W'),
('MSLUTSTART', 0x69, ' W'),
('MSCNT', 0x6A, 'R'),
('MSCURACT', 0x6B, 'R'),
],
'Driver': [
('CHOPCONF', 0x6C, 'RW'),
('COOLCONF', 0x6D, ' W'),
('DCCTRL', 0x6E, ' W'),
('DRV_STATUS', 0x6F, 'R'),
('PWMCONF', 0x70, ' W'),
('PWM_SCALE', 0x71, 'R'),
('PWM_AUTO', 0x72, 'R'),
('LOST_STEPS', 0x73, 'R'),
],
}
class PerformanceStepper(COMCUPluginBase, Ui_PerformanceStepper):
def __init__(self, *args):
COMCUPluginBase.__init__(self, BrickletPerformanceStepper, *args)
self.setupUi(self)
self.ps = self.device
self.update_timer = QTimer(self)
self.update_timer.timeout.connect(self.update_data)
self.update_counter = 0
# Init register access elements
self.parent_items = {}
for register_name in registers.keys():
self.parent_items[register_name] = QTreeWidgetItem([register_name])
for pi in self.parent_items.values():
self.tree_widget.addTopLevelItem(pi)
self.child_items = []
for name, rows in registers.items():
for row in rows:
child = QTreeWidgetItem([row[0], hex(row[1]), row[2], '0', ''])
self.child_items.append(child)
self.parent_items[name].addChild(child)
for pi in self.parent_items.values():
pi.setExpanded(True)
self.tree_widget.itemClicked.connect(self.register_item_clicked)
self.button_read_registers.clicked.connect(self.read_registers_clicked)
self.button_write_register.clicked.connect(self.write_register_clicked)
self.enable_checkbox.stateChanged.connect(self.enable_state_changed)
self.maximum_motor_current_spinbox.valueChanged.connect(self.maximum_motor_current_changed)
self.to_button.clicked.connect(self.to_button_clicked)
self.steps_button.clicked.connect(self.steps_button_clicked)
# Motion Configuration
self.velocity_start_syncer = SliderSpinSyncer(self.velocity_start_slider, self.velocity_start_spinbox, self.motion_changed)
self.velocity_stop_syncer = SliderSpinSyncer(self.velocity_stop_slider, self.velocity_stop_spinbox, self.motion_changed)
self.velocity_1_syncer = SliderSpinSyncer(self.velocity_1_slider, self.velocity_1_spinbox, self.motion_changed)
self.velocity_max_syncer = SliderSpinSyncer(self.velocity_max_slider, self.velocity_max_spinbox, self.motion_changed)
self.acceleration_1_syncer = SliderSpinSyncer(self.acceleration_1_slider, self.acceleration_1_spinbox, self.motion_changed)
self.acceleration_max_syncer = SliderSpinSyncer(self.acceleration_max_slider, self.acceleration_max_spinbox, self.motion_changed)
self.deceleration_max_syncer = SliderSpinSyncer(self.deceleration_max_slider, self.deceleration_max_spinbox, self.motion_changed)
self.deceleration_1_syncer = SliderSpinSyncer(self.deceleration_1_slider, self.deceleration_1_spinbox, self.motion_changed)
self.ramp_zero_wait_syncer = SliderSpinSyncer(self.ramp_zero_wait_slider, self.ramp_zero_wait_spinbox, self.motion_changed)
# Step Configuration
self.step_resolution_dropbox.currentIndexChanged.connect(self.step_configuration_changed)
self.interpolate_checkbox.stateChanged.connect(self.step_configuration_changed)
# Basic Configuration
self.standstill_current_spin.valueChanged.connect(self.basic_configuration_changed)
self.motor_run_current_spin.valueChanged.connect(self.basic_configuration_changed)
self.standstill_delay_time_spin.valueChanged.connect(self.basic_configuration_changed)
self.power_down_time_spin.valueChanged.connect(self.basic_configuration_changed)
self.stealth_threshold_spin.valueChanged.connect(self.basic_configuration_changed)
self.coolstep_threashold_spin.valueChanged.connect(self.basic_configuration_changed)
self.classic_threshold_spin.valueChanged.connect(self.basic_configuration_changed)
self.high_velocity_chopper_mode_checkbox.stateChanged.connect(self.basic_configuration_changed)
# Spreadcycle Configuration
self.slow_decay_duration_spin.valueChanged.connect(self.spreadcycle_configuration_changed)
self.fast_decay_duration_spin.valueChanged.connect(self.spreadcycle_configuration_changed)
self.hysteresis_start_value_spin.valueChanged.connect(self.spreadcycle_configuration_changed)
self.hysteresis_end_value_spin.valueChanged.connect(self.spreadcycle_configuration_changed)
self.sine_wave_offset_spin.valueChanged.connect(self.spreadcycle_configuration_changed)
self.chopper_mode_combo.currentIndexChanged.connect(self.spreadcycle_configuration_changed)
self.comparator_blank_time_combo.currentIndexChanged.connect(self.spreadcycle_configuration_changed)
self.high_velocity_fullstep_checkbox.stateChanged.connect(self.spreadcycle_configuration_changed)
self.fast_decay_without_comparator_checkbox.stateChanged.connect(self.spreadcycle_configuration_changed)
# Stealth Configuration
self.enable_stealth_checkbox.stateChanged.connect(self.stealth_configuration_changed)
self.offset_spin.valueChanged.connect(self.stealth_configuration_changed)
self.gradient_spin.valueChanged.connect(self.stealth_configuration_changed)
self.enable_autoscale_checkbox.stateChanged.connect(self.stealth_configuration_changed)
self.enable_autogradient_checkbox.stateChanged.connect(self.stealth_configuration_changed)
self.freewheel_mode_combo.currentIndexChanged.connect(self.stealth_configuration_changed)
self.regulation_loop_gradient_spin.valueChanged.connect(self.stealth_configuration_changed)
self.amplitude_limit_spin.valueChanged.connect(self.stealth_configuration_changed)
# Coolstep Configuration
self.minimum_stallguard_value_spin.valueChanged.connect(self.coolstep_configuration_changed)
self.maximum_stallguard_value_spin.valueChanged.connect(self.coolstep_configuration_changed)
self.current_up_step_width_combo.currentIndexChanged.connect(self.coolstep_configuration_changed)
self.current_down_step_width_combo.currentIndexChanged.connect(self.coolstep_configuration_changed)
self.minimum_current_combo.currentIndexChanged.connect(self.coolstep_configuration_changed)
self.stallguard_threshold_value_spin.valueChanged.connect(self.coolstep_configuration_changed)
self.stallguard_mode_combo.currentIndexChanged.connect(self.coolstep_configuration_changed)
# Short Configuration
self.disable_short_to_voltage_protection_checkbox.stateChanged.connect(self.short_configuration_changed)
self.disable_short_to_ground_protection_checkbox.stateChanged.connect(self.short_configuration_changed)
self.short_to_voltage_level_spin.valueChanged.connect(self.short_configuration_changed)
self.short_to_ground_level_spin.valueChanged.connect(self.short_configuration_changed)
self.spike_filter_bandwidth_combo.currentIndexChanged.connect(self.short_configuration_changed)
self.short_detection_delay_checkbox.stateChanged.connect(self.short_configuration_changed)
self.filter_time_combo.currentIndexChanged.connect(self.short_configuration_changed)
def motion_changed(self, _):
ramping_mode = self.ramping_mode_combobox.currentIndex()
velocity_start = self.velocity_start_slider.value()
acceleration_1 = self.acceleration_1_slider.value()
velocity_1 = self.velocity_1_slider.value()
acceleration_max = self.acceleration_max_slider.value()
velocity_max = self.velocity_max_slider.value()
deceleration_max = self.deceleration_max_slider.value()
deceleration_1 = self.deceleration_1_slider.value()
velocity_stop = self.velocity_stop_slider.value()
ramp_zero_wait = self.ramp_zero_wait_slider.value()
motion_configuration = (ramping_mode, velocity_start, acceleration_1, velocity_1, acceleration_max, velocity_max, deceleration_max, deceleration_1, velocity_stop, ramp_zero_wait)
self.ps.set_motion_configuration(*motion_configuration)
def register_item_clicked(self, item, column):
try:
reg = int(item.text(1), 16)
value = int(item.text(3))
self.spinbox_write_register.setValue(reg)
self.spinbox_write_value.setValue(value)
except:
pass
def read_registers_clicked(self):
for child in self.child_items:
reg = int(child.text(1), 0)
ret = self.ps.read_register(reg)
child.setText(3, str(ret.value))
def write_register_clicked(self):
reg = self.spinbox_write_register.value()
value = self.spinbox_write_value.value()
ret = self.ps.write_register(reg, value)
def start(self):
async_call(self.ps.get_motion_configuration, None, self.get_motion_configuration_async, self.increase_error_count)
async_call(self.ps.get_step_configuration, None, self.get_step_configuration_async, self.increase_error_count)
async_call(self.ps.get_motor_current, None, self.get_motor_current_async, self.increase_error_count)
async_call(self.ps.get_enabled, None, self.get_enabled_async, self.increase_error_count)
async_call(self.ps.get_step_configuration, None, self.get_step_configuration_async, self.increase_error_count)
async_call(self.ps.get_basic_configuration, None, self.get_basic_configuration_async, self.increase_error_count)
async_call(self.ps.get_spreadcycle_configuration, None, self.get_spreadcycle_configuration_async, self.increase_error_count)
async_call(self.ps.get_stealth_configuration, None, self.get_stealth_configuration_async, self.increase_error_count)
async_call(self.ps.get_coolstep_configuration, None, self.get_coolstep_configuration_async, self.increase_error_count)
async_call(self.ps.get_short_configuration, None, self.get_short_configuration_async, self.increase_error_count)
self.update_timer.start(100)
def stop(self):
self.update_timer.stop()
def destroy(self):
pass
@staticmethod
def has_device_identifier(device_identifier):
return device_identifier == BrickletPerformanceStepper.DEVICE_IDENTIFIER
def update_data(self):
async_call(self.ps.get_remaining_steps, None, self.get_remaining_steps_async, self.increase_error_count)
async_call(self.ps.get_current_position, None, self.get_current_position_async, self.increase_error_count)
async_call(self.ps.get_current_velocity, None, self.get_current_velocity_async, self.increase_error_count)
self.update_counter += 1
if self.update_counter % 10 == 0:
async_call(self.ps.get_input_voltage, None, self.get_input_voltage_async, self.increase_error_count)
async_call(self.ps.get_driver_status, None, self.get_driver_status_async, self.increase_error_count)
async_call(self.ps.get_temperature, None, self.get_temperature_async, self.increase_error_count)
def to_button_clicked(self):
drive_to = self.to_spin.value()
try:
self.ps.set_target_position(drive_to)
except ip_connection.Error:
return
def steps_button_clicked(self):
drive_steps = self.steps_spin.value()
try:
self.ps.set_steps(drive_steps)
except ip_connection.Error:
return
def enable_state_changed(self, state):
self.ps.set_enabled(state == Qt.Checked)
def maximum_motor_current_changed(self, value):
try:
self.ps.set_motor_current(value)
except ip_connection.Error:
return
def step_configuration_changed(self, _):
step_resolution = self.step_resolution_dropbox.currentIndex()
interpolation = self.interpolate_checkbox.isChecked()
try:
self.ps.set_step_configuration(step_resolution, interpolation)
except ip_connection.Error:
return
def basic_configuration_changed(self, _):
standstill_current = self.standstill_current_spin.value()
motor_run_current = self.motor_run_current_spin.value()
standstill_delay_time = self.standstill_delay_time_spin.value()
power_down_time = self.power_down_time_spin.value()
stealth_threshold = self.stealth_threshold_spin.value()
coolstep_threshold = self.coolstep_threashold_spin.value()
classic_threshold = self.classic_threshold_spin.value()
high_velocity_chopper_mode = self.high_velocity_chopper_mode_checkbox.isChecked()
try:
self.ps.set_basic_configuration(standstill_current, motor_run_current, standstill_delay_time, power_down_time, stealth_threshold, coolstep_threshold, classic_threshold, high_velocity_chopper_mode)
except ip_connection.Error:
return
def spreadcycle_configuration_changed(self, _):
slow_decay_duration = self.slow_decay_duration_spin.value()
fast_decay_duration = self.fast_decay_duration_spin.value()
hysteresis_start_value = self.hysteresis_start_value_spin.value()
hysteresis_end_value = self.hysteresis_end_value_spin.value()
sine_wave_offset = self.sine_wave_offset_spin.value()
chopper_mode = self.chopper_mode_combo.currentIndex()
comparator_blank_time = self.comparator_blank_time_combo.currentIndex()
high_velocity_fullstep = self.high_velocity_fullstep_checkbox.isChecked()
fast_decay_without_comparator = self.fast_decay_without_comparator_checkbox.isChecked()
try:
self.ps.set_spreadcycle_configuration(slow_decay_duration, high_velocity_fullstep, fast_decay_duration, hysteresis_start_value, hysteresis_end_value, sine_wave_offset, chopper_mode, comparator_blank_time, fast_decay_without_comparator)
except ip_connection.Error:
return
def stealth_configuration_changed(self, _):
enable_stealth = self.enable_stealth_checkbox.isChecked()
offset = self.offset_spin.value()
gradient = self.gradient_spin.value()
enable_autoscale = self.enable_autoscale_checkbox.isChecked()
enable_autogradient = self.enable_autogradient_checkbox.isChecked()
freewheel_mode = self.freewheel_mode_combo.currentIndex()
regulation_loop_gradient = self.regulation_loop_gradient_spin.value()
amplitude_limit = self.amplitude_limit_spin.value()
try:
self.ps.set_stealth_configuration(enable_stealth, offset, gradient, enable_autoscale, enable_autogradient, freewheel_mode, regulation_loop_gradient, amplitude_limit)
except ip_connection.Error:
return
def coolstep_configuration_changed(self, _):
minimum_stallguard_value = self.minimum_stallguard_value_spin.value()
maximum_stallguard_value = self.maximum_stallguard_value_spin.value()
current_up_step_width = self.current_up_step_width_combo.currentIndex()
current_down_step_width = self.current_down_step_width_combo.currentIndex()
minimum_current = self.minimum_current_combo.currentIndex()
stallguard_threshold_value = self.stallguard_threshold_value_spin.value()
stallguard_mode = self.stallguard_mode_combo.currentIndex()
try:
self.ps.set_coolstep_configuration(minimum_stallguard_value, maximum_stallguard_value, current_up_step_width, current_down_step_width, minimum_current, stallguard_threshold_value, stallguard_mode)
except ip_connection.Error:
return
def short_configuration_changed(self, _):
disable_short_to_voltage_protection = self.disable_short_to_voltage_protection_checkbox.isChecked()
disable_short_to_ground_protection = self.disable_short_to_ground_protection_checkbox.isChecked()
short_to_voltage_level = self.short_to_voltage_level_spin.value()
short_to_ground_level = self.short_to_ground_level_spin.value()
spike_filter_bandwidth = self.spike_filter_bandwidth_combo.currentIndex()
short_detection_delay = self.short_detection_delay_checkbox.isChecked()
filter_time = self.filter_time_combo.currentIndex()
try:
self.ps.set_short_configuration(disable_short_to_voltage_protection, disable_short_to_ground_protection, short_to_voltage_level, short_to_ground_level, spike_filter_bandwidth, short_detection_delay, filter_time)
except ip_connection.Error:
return
def get_remaining_steps_async(self, remaining_steps):
self.remaining_steps_label.setText(str(remaining_steps))
def get_current_position_async(self, current_position):
self.position_label.setText(str(current_position))
def get_current_velocity_async(self, current_velocity):
self.velocity_label.setText(str(current_velocity))
def get_input_voltage_async(self, input_voltage):
self.input_voltage_label.setText('{0:.1f}V'.format(input_voltage/1000))
def get_temperature_async(self, temperature):
self.status_temperature.setText('{0:.1f}°C'.format(temperature/10))
def get_driver_status_async(self, driver_status):
if driver_status.open_load == 0:
self.status_open_load.setText('No')
elif driver_status.open_load == 1:
self.status_open_load.setText('Phase A')
elif driver_status.open_load == 2:
self.status_open_load.setText('Phase B')
elif driver_status.open_load == 3:
self.status_open_load.setText('Phase A and B')
else:
self.status_open_load.setText('Unknown')
if driver_status.short_to_ground == 0:
self.status_short_to_ground.setText('No')
elif driver_status.short_to_ground == 1:
self.status_short_to_ground.setText('Phase A')
elif driver_status.short_to_ground == 2:
self.status_short_to_ground.setText('Phase B')
elif driver_status.short_to_ground == 3:
self.status_short_to_ground.setText('Phase A and B')
else:
self.status_short_to_ground.setText('Unknown')
if driver_status.over_temperature == 0:
self.status_over_temperature.setText('No')
elif driver_status.over_temperature == 1:
self.status_over_temperature.setText('<font color=yellow>Warning</font>')
elif driver_status.over_temperature == 2:
self.status_over_temperature.setText('<font color=red>Limit</font>')
if driver_status.motor_stalled:
self.status_motor_stalled.setText('Yes')
else:
self.status_motor_stalled.setText('No')
self.status_actual_motor_current.setText(str(driver_status.actual_motor_current))
if driver_status.full_step_active:
self.status_full_step_active.setText('Yes')
else:
self.status_full_step_active.setText('No')
self.status_stallguard_result.setText(str(driver_status.stallguard_result))
self.status_stealth_voltage_amplitude.setText(str(driver_status.stealth_voltage_amplitude))
def get_enabled_async(self, enabled):
old_state = self.enable_checkbox.blockSignals(True)
self.enable_checkbox.setChecked(enabled)
self.enable_checkbox.blockSignals(old_state)
def get_motor_current_async(self, value):
old_state = self.maximum_motor_current_spinbox.blockSignals(True)
self.maximum_motor_current_spinbox.setValue(value)
self.maximum_motor_current_spinbox.blockSignals(old_state)
def get_step_configuration_async(self, conf):
old_state = self.step_resolution_dropbox.blockSignals(True)
self.step_resolution_dropbox.setCurrentIndex(conf.step_resolution)
self.step_resolution_dropbox.blockSignals(old_state)
old_state = self.interpolate_checkbox.blockSignals(True)
self.interpolate_checkbox.setChecked(conf.interpolation)
self.interpolate_checkbox.blockSignals(old_state)
def get_motion_configuration_async(self, conf):
self.velocity_start_syncer.set_value(conf.velocity_start)
self.acceleration_1_syncer.set_value(conf.acceleration_1)
self.velocity_1_syncer.set_value(conf.velocity_1)
self.acceleration_max_syncer.set_value(conf.acceleration_max)
self.velocity_max_syncer.set_value(conf.velocity_max)
self.deceleration_max_syncer.set_value(conf.deceleration_max)
self.deceleration_1_syncer.set_value(conf.deceleration_1)
self.velocity_stop_syncer.set_value(conf.velocity_stop)
self.ramp_zero_wait_syncer.set_value(conf.ramp_zero_wait)
self.ramping_mode_combobox.setCurrentIndex(conf.ramping_mode)
def get_basic_configuration_async(self, conf):
old_state = self.standstill_current_spin.blockSignals(True)
self.standstill_current_spin.setValue(conf.standstill_current)
self.standstill_current_spin.blockSignals(old_state)
old_state = self.motor_run_current_spin.blockSignals(True)
self.motor_run_current_spin.setValue(conf.motor_run_current)
self.motor_run_current_spin.blockSignals(old_state)
old_state = self.standstill_delay_time_spin.blockSignals(True)
self.standstill_delay_time_spin.setValue(conf.standstill_delay_time)
self.standstill_delay_time_spin.blockSignals(old_state)
old_state = self.power_down_time_spin.blockSignals(True)
self.power_down_time_spin.setValue(conf.power_down_time)
self.power_down_time_spin.blockSignals(old_state)
old_state = self.stealth_threshold_spin.blockSignals(True)
self.stealth_threshold_spin.setValue(conf.stealth_threshold)
self.stealth_threshold_spin.blockSignals(old_state)
old_state = self.coolstep_threashold_spin.blockSignals(True)
self.coolstep_threashold_spin.setValue(conf.coolstep_threshold)
self.coolstep_threashold_spin.blockSignals(old_state)
old_state = self.classic_threshold_spin.blockSignals(True)
self.classic_threshold_spin.setValue(conf.classic_threshold)
self.classic_threshold_spin.blockSignals(old_state)
old_state = self.high_velocity_chopper_mode_checkbox.blockSignals(True)
self.high_velocity_chopper_mode_checkbox.setChecked(conf.high_velocity_chopper_mode)
self.high_velocity_chopper_mode_checkbox.blockSignals(old_state)
def get_spreadcycle_configuration_async(self, conf):
old_state = self.slow_decay_duration_spin.blockSignals(True)
self.slow_decay_duration_spin.setValue(conf.slow_decay_duration)
self.slow_decay_duration_spin.blockSignals(old_state)
old_state = self.fast_decay_duration_spin.blockSignals(True)
self.fast_decay_duration_spin.setValue(conf.fast_decay_duration)
self.fast_decay_duration_spin.blockSignals(old_state)
old_state = self.hysteresis_start_value_spin.blockSignals(True)
self.hysteresis_start_value_spin.setValue(conf.hysteresis_start_value)
self.hysteresis_start_value_spin.blockSignals(old_state)
old_state = self.hysteresis_end_value_spin.blockSignals(True)
self.hysteresis_end_value_spin.setValue(conf.hysteresis_end_value)
self.hysteresis_end_value_spin.blockSignals(old_state)
old_state = self.sine_wave_offset_spin.blockSignals(True)
self.sine_wave_offset_spin.setValue(conf.sine_wave_offset)
self.sine_wave_offset_spin.blockSignals(old_state)
old_state = self.chopper_mode_combo.blockSignals(True)
self.chopper_mode_combo.setCurrentIndex(conf.chopper_mode)
self.chopper_mode_combo.blockSignals(old_state)
old_state = self.standstill_current_spin.blockSignals(True)
self.comparator_blank_time_combo.setCurrentIndex(conf.comparator_blank_time)
self.standstill_current_spin.blockSignals(old_state)
old_state = self.high_velocity_fullstep_checkbox.blockSignals(True)
self.high_velocity_fullstep_checkbox.setChecked(conf.high_velocity_fullstep)
self.high_velocity_fullstep_checkbox.blockSignals(old_state)
old_state = self.fast_decay_without_comparator_checkbox.blockSignals(True)
self.fast_decay_without_comparator_checkbox.setChecked(conf.fast_decay_without_comparator)
self.fast_decay_without_comparator_checkbox.blockSignals(old_state)
def get_stealth_configuration_async(self, conf):
old_state = self.enable_stealth_checkbox.blockSignals(True)
self.enable_stealth_checkbox.setChecked(conf.enable_stealth)
self.enable_stealth_checkbox.blockSignals(old_state)
old_state = self.offset_spin.blockSignals(True)
self.offset_spin.setValue(conf.offset)
self.offset_spin.blockSignals(old_state)
old_state = self.gradient_spin.blockSignals(True)
self.gradient_spin.setValue(conf.gradient)
self.gradient_spin.blockSignals(old_state)
old_state = self.enable_autoscale_checkbox.blockSignals(True)
self.enable_autoscale_checkbox.setChecked(conf.enable_autoscale)
self.enable_autoscale_checkbox.blockSignals(old_state)
old_state = self.enable_autogradient_checkbox.blockSignals(True)
self.enable_autogradient_checkbox.setChecked(conf.enable_autogradient)
self.enable_autogradient_checkbox.blockSignals(old_state)
old_state = self.freewheel_mode_combo.blockSignals(True)
self.freewheel_mode_combo.setCurrentIndex(conf.freewheel_mode)
self.freewheel_mode_combo.blockSignals(old_state)
old_state = self.regulation_loop_gradient_spin.blockSignals(True)
self.regulation_loop_gradient_spin.setValue(conf.regulation_loop_gradient)
self.regulation_loop_gradient_spin.blockSignals(old_state)
old_state = self.amplitude_limit_spin.blockSignals(True)
self.amplitude_limit_spin.setValue(conf.amplitude_limit)
self.amplitude_limit_spin.blockSignals(old_state)
def get_coolstep_configuration_async(self, conf):
old_state = self.minimum_stallguard_value_spin.blockSignals(True)
self.minimum_stallguard_value_spin.setValue(conf.minimum_stallguard_value)
self.minimum_stallguard_value_spin.blockSignals(old_state)
old_state = self.maximum_stallguard_value_spin.blockSignals(True)
self.maximum_stallguard_value_spin.setValue(conf.maximum_stallguard_value)
self.maximum_stallguard_value_spin.blockSignals(old_state)
old_state = self.current_up_step_width_combo.blockSignals(True)
self.current_up_step_width_combo.setCurrentIndex(conf.current_up_step_width)
self.current_up_step_width_combo.blockSignals(old_state)
old_state = self.current_down_step_width_combo.blockSignals(True)
self.current_down_step_width_combo.setCurrentIndex(conf.current_down_step_width)
self.current_down_step_width_combo.blockSignals(old_state)
old_state = self.minimum_current_combo.blockSignals(True)
self.minimum_current_combo.setCurrentIndex(conf.minimum_current)
self.minimum_current_combo.blockSignals(old_state)
old_state = self.stallguard_threshold_value_spin.blockSignals(True)
self.stallguard_threshold_value_spin.setValue(conf.stallguard_threshold_value)
self.stallguard_threshold_value_spin.blockSignals(old_state)
old_state = self.stallguard_mode_combo.blockSignals(True)
self.stallguard_mode_combo.setCurrentIndex(conf.stallguard_mode)
self.stallguard_mode_combo.blockSignals(old_state)
def get_short_configuration_async(self, conf):
old_state = self.disable_short_to_voltage_protection_checkbox.blockSignals(True)
self.disable_short_to_voltage_protection_checkbox.setChecked(conf.disable_short_to_voltage_protection)
self.disable_short_to_voltage_protection_checkbox.blockSignals(old_state)
old_state = self.disable_short_to_ground_protection_checkbox.blockSignals(True)
self.disable_short_to_ground_protection_checkbox.setChecked(conf.disable_short_to_ground_protection)
self.disable_short_to_ground_protection_checkbox.blockSignals(old_state)
old_state = self.short_to_voltage_level_spin.blockSignals(True)
self.short_to_voltage_level_spin.setValue(conf.short_to_voltage_level)
self.short_to_voltage_level_spin.blockSignals(old_state)
old_state = self.short_to_ground_level_spin.blockSignals(True)
self.short_to_ground_level_spin.setValue(conf.short_to_ground_level)
self.short_to_ground_level_spin.blockSignals(old_state)
old_state = self.spike_filter_bandwidth_combo.blockSignals(True)
self.spike_filter_bandwidth_combo.setCurrentIndex(conf.spike_filter_bandwidth)
self.spike_filter_bandwidth_combo.blockSignals(old_state)
old_state = self.short_detection_delay_checkbox.blockSignals(True)
self.short_detection_delay_checkbox.setChecked(conf.short_detection_delay)
self.short_detection_delay_checkbox.blockSignals(old_state)
old_state = self.filter_time_combo.blockSignals(True)
self.filter_time_combo.setCurrentIndex(conf.filter_time)
self.filter_time_combo.blockSignals(old_state) | Tinkerforge/brickv | src/brickv/plugin_system/plugins/performance_stepper/performance_stepper.py | performance_stepper.py | py | 32,865 | python | en | code | 18 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.