blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
f80820c5ba23c71fba0af71458c2634726d9952a | Python | daedalaus/practice | /Python高效开发实战——Django、Tornado、Flask、Twisted/src/chapter7/async_http_client.py | UTF-8 | 1,100 | 2.796875 | 3 | [] | no_license | from tornado import gen
from tornado.ioloop import IOLoop
from tornado.httpclient import HTTPClient, AsyncHTTPClient
def synchronous_visit():
http_client = HTTPClient()
response = http_client.fetch('http://www.baidu.com')
print(response.body)
def handle_response(response):
print(response.body)
def asynchronous_visit():
http_client = AsyncHTTPClient()
http_client.fetch('http://www.baidu.com', callback=handle_response)
@gen.coroutine
def coroutine_visit():
http_client = AsyncHTTPClient()
response = yield http_client.fetch('http://www.baidu.com')
print(response.body)
@gen.coroutine
def outer_coroutine():
print('start call another coroutine')
yield coroutine_visit()
print('end of outer_couroutine')
def func_normal():
print('start to call a coroutine ')
IOLoop.current().run_sync(lambda: coroutine_visit())
print('end of calling a coroutine')
if __name__ == '__main__':
# synchronous_visit() # yes
# asynchronous_visit() # no
# coroutine_visit() # no
# outer_coroutine() # no
func_normal() # yes
| true |
db056594a5ce52a74c01ad69d51350d38f5ff510 | Python | maldonadoangel/PythonPractice | /ejercicioLibreria/main.py | UTF-8 | 510 | 4.09375 | 4 | [] | no_license | #Solicite al usuario que ingrese la informacion de el libro, imprima todos los datos registrados al final
nombre = input('Ingrese el nombre del libro: ')
numeroIdentificacion = int(input('Ingrese el id del libro: '))
precio = float(input('Ingrese el precio del libro: '))
envio = input('El envio es Gratuito? (True/False): ')
print()
print(f'El nombre del libro es: {nombre}')
print(f'El numero de id: {numeroIdentificacion}')
print(f'El precio del libro es: {precio}')
print(f'Tiene envio gratis? {envio}')
| true |
b8bad66956d931d3efa33e8b042681f669fff87a | Python | wangweihao/CloudBackup | /Server/BalanceServer/newbalanceServer.py | UTF-8 | 2,338 | 2.515625 | 3 | [] | no_license | #!/usr/bin/env python
#coding=utf-8
import socket
import select
import time
import threading
#创建服务端socket
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#设置为非阻塞
server.setblocking(False)
#设置可重用端口
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
#设置ip和端口并绑定
server_address = ('192.168.20.184', 12345)
server.bind(server_address)
#监听并设置监听队列大小
server.listen(10)
inputs = [server]
outputs = []
message_queues = {}
workServer_fd = []
timeout = 20
#创建定时发送信号线程
def threadFunc():
while 1:
time.sleep(4)
print "send signal"
for fd in workServer_fd:
fd.send("send signal")
tid = threading.Thread(target = threadFunc)
tid.start()
while 1:
#print 'waiting for next event'
#得到select返回的3个事件集合,读写异常
readable, writable, exceptional = select.select(inputs, outputs, inputs, timeout)
#当时间到了,如果没有时间发生
if not (readable or writable or exceptional):
print 'time out'
continue;
#处理读事件集合
for s in readable:
#说明有连接加入
if s is server:
#获得连接和客户端ip
connection, client_address = s.accept()
#加入WorkServer集合中,定时向WorkServer发送信号
workServer_fd.append(connection)
#connection.send('send')
print " connection from ", client_address
#设置非阻塞
connection.setblocking(False)
inputs.append(connection)
else:
date = s.recv(1024)
if date:
print " received date from ", s.getpeername()
print date
if s not in outputs:
outputs.append(s)
else:
print " closing", client_address
if s in outputs:
outputs.remove(s)
inputs.remove(s)
s.close()
del message_queues[s]
#处理异常
for s in exceptional:
print "exception condition on", s.getpeername()
inputs.remove(s)
if s in outputs:
outputs.remove(s)
s.close()
del message_queues[s]
tid.join()
| true |
97bb4b21c89d0d3664fe6825d0d156db0e2a1203 | Python | abelfp/radiative_transfer_amcvn | /amcvn_3d.py | UTF-8 | 4,013 | 3.0625 | 3 | [] | no_license | #!/usr/bin/env python3
"""
amcvn_3d.py - Main script for running the radiative transfer solution to AM CVn.
15Jun18 - Abel Flores Prieto
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from time import perf_counter
from packages import general_formulas as gf
from packages import radiative_class as rc
def numbers():
global n_rays
while True:
try:
print("How many photon paths do you want to compute?")
print("Or hit <Enter> for a default of 300.")
n_rays = input("> ")
if n_rays == '':
n_rays = 300
break
else:
n_rays = int(n_rays)
assert n_rays >= 200
break
except:
print("Please only input an integer above 200.")
def inclination_angle():
global i
while True:
try:
print("What inclination angle do you want? (from 0 to 180 degrees)")
i = float(input("> "))
assert i > 0 and i < 180
break
except:
print("Angle is from 0 to 180 degrees non-inclusive.")
if __name__ == '__main__':
# load the profile data
txt_file = "2DRhoTVphi_AMCVNMdot_08732.txt"
profile = np.loadtxt("data/" + txt_file)
# load the line data
hei = pd.read_csv("data/hei_lines.txt", delim_whitespace=True, skiprows=1)
heii = pd.read_csv("data/heii_lines.txt", delim_whitespace=True, skiprows=1)
# Create the frequency array
nu_bb = []
for freq1 in hei.nu_ik:
nu_bb.append(gf.nu_peak(freq1))
for freq2 in heii.nu_ik:
nu_bb.append(gf.nu_peak(freq2))
nu_bb = np.array(nu_bb).reshape(np.size(nu_bb))
nu_gen = 10.**np.linspace(14, 16.3, num=200)
nu = np.sort(np.append(nu_gen, nu_bb)) # fix frequencies
# photo-ionization of HeI, which returns new frequency array as well
sig_bf_I, nu = gf.sig_heI(nu)
f1 = plt.figure(1)
ax1 = f1.gca(projection='3d')
print("Using profile data in {}".format(txt_file))
print("""To use another profile data from a txt file, simply place it on the
data directory and change the variable txt_file in this file.
""")
print("""Each photon path has 50 points, if you want to change this, locate
the function parallel_lines3d() from the package general_functions in this file
and change the parameter num_path to the desired number of points.
""")
numbers() # ask user for number of photon paths.
inclination_angle() # ask user for inclination angle.
# start radiative object
amcvn_3d = rc.Radiative3D(profile, hei, heii, nu, sig_bf_I)
t0 = perf_counter()
for x, y, z, n in gf.parallel_lines3d(n_rays, view=i, num_path=50):
amcvn_3d.light_rays(x, y, z, n, bf_alpha=True, bb_alpha=True)
print("\033[H\033[J") # clears screen
print("{} photon paths computed!".format(amcvn_3d.count))
ax1.plot(x, y, z)
t1 = perf_counter()
print("\033[H\033[J") # clears screen
print("{} seconds to run {} photon paths.".format(t1 - t0, amcvn_3d.count))
print("Average time for loop was {} s.".format((t1 - t0) / amcvn_3d.count))
ax1.set_title("{} Photon Paths".format(amcvn_3d.count))
ax1.set_xlabel(r"$x$")
ax1.set_ylabel(r"$y$")
ax1.set_zlabel(r"$z$")
f1.show()
f2 = plt.figure(2)
ax2 = f2.gca()
ax2.plot(nu, amcvn_3d.I_nu)
ax2.set_title(r'Frequency Spectrum - Inclination Angle $i = {:.1f}^\circ$'.format(i))
ax2.set_xlabel(r'$\nu$ (Hz)')
ax2.set_ylabel(r'$I_\nu$ (ergs cm$^{-2}$ s$^{-1}$ ster$^{-1}$ Hz$^{-1}$)')
f2.show()
f3 = plt.figure(3)
ax3 = f3.gca()
ax3.plot(nu, amcvn_3d.I_nu)
ax3.set_title('Frequency Spectrum - Zoomed at Lines')
ax3.set_xlabel(r'$\nu$ (Hz)')
ax3.set_ylabel(r'$I_\nu$ (ergs cm$^{-2}$ s$^{-1}$ ster$^{-1}$ Hz$^{-1}$)')
ax3.set_xlim([0.4e15, 0.8e15]) # lines
f3.show()
input("Press <Enter> to exit...")
| true |
ef9042e1818f96dba950086530661690d5d0d986 | Python | wattaihei/ProgrammingContest | /AtCoder/ABC-B/098probB.py | UTF-8 | 263 | 2.71875 | 3 | [] | no_license | from collections import Counter
N = int(input())
S = list(input())
ans = 0
for i in range(N):
L = list(Counter(S[:i]).keys())
R = list(Counter(S[i:]).keys())
c = 0
for l in L:
if l in R:
c += 1
ans = max(ans, c)
print(ans) | true |
224946ef34d6584f16421cb9daff1704c9a26c53 | Python | MatthewAbugeja/lmbn | /optim/warmup_cosine_scheduler.py | UTF-8 | 4,449 | 2.59375 | 3 | [
"MIT"
] | permissive | # encoding: utf-8
import torch
import matplotlib.pyplot as plt
from torch.optim.lr_scheduler import _LRScheduler
from torch.optim.lr_scheduler import ReduceLROnPlateau
import torch.optim.lr_scheduler as lrs
import math
class WarmupCosineAnnealingLR(_LRScheduler):
def __init__(self, optimizer, multiplier, warmup_epoch, epochs, min_lr=3.5e-7, last_epoch=-1):
self.multiplier = multiplier
if self.multiplier < 1.:
raise ValueError(
'multiplier should be greater thant or equal to 1.')
self.warmup_epoch = warmup_epoch
self.last_epoch = last_epoch
self.eta_min = min_lr
self.T_max = float(epochs - warmup_epoch)
self.after_scheduler = True
super(WarmupCosineAnnealingLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
if self.last_epoch > self.warmup_epoch - 1:
return [self.eta_min + (base_lr - self.eta_min) *
(1 + math.cos(math.pi * (self.last_epoch -
self.warmup_epoch) / (self.T_max - 1))) / 2
for base_lr in self.base_lrs]
if self.multiplier == 1.0:
return [base_lr * (float(self.last_epoch + 1) / self.warmup_epoch) for base_lr in self.base_lrs]
else:
return [base_lr * ((self.multiplier - 1.) * self.last_epoch / self.warmup_epoch + 1.) for base_lr in self.base_lrs]
if __name__ == '__main__':
v = torch.zeros(10)
optim1 = torch.optim.SGD([v], lr=3.5e-4)
scheduler2 = WarmupCosineAnnealingLR(
optim1, multiplier=1, warmup_epoch=10, epochs=120, min_lr=3.5e-7,last_epoch=-1)
a = []
b = []
for i in range(1, 121):
print('kk1', scheduler2.get_last_lr())
print('3333333', scheduler2.last_epoch+1)
if scheduler2.last_epoch ==120:
break
a.append(scheduler2.last_epoch+1)
b.append(optim1.param_groups[0]['lr'])
print(i, optim1.param_groups[0]['lr'])
# optim.step()
scheduler2.step()
print(dir(scheduler))
tick_spacing = 5
plt.figure(figsize=(20,10))
plt.rcParams['figure.dpi'] = 300 #分辨率
plt.plot(a, b, "-", lw=2)
plt.yticks([3.5e-5, 3.5e-4], ['3.5e-5', '3.5e-4'])
plt.xlabel("Epoch")
plt.ylabel("Learning rate")
optim = torch.optim.SGD([v], lr=3.5e-4)
scheduler1 = WarmupCosineAnnealingLR(
optim, multiplier=1, warmup_epoch=10, epochs=120, min_lr=3.5e-7,last_epoch=-1)
a = []
b = []
for i in range(1, 71):
print('kk1', scheduler1.get_last_lr())
print('3333333', scheduler1.last_epoch+1)
if scheduler1.last_epoch ==120:
break
a.append(scheduler1.last_epoch+1)
b.append(optim.param_groups[0]['lr'])
print(i, optim.param_groups[0]['lr'])
# optim.step()
scheduler1.step()
scheduler = WarmupCosineAnnealingLR(
optim, multiplier=1, warmup_epoch=10, epochs=120, min_lr=3.5e-7,last_epoch=69)
print(dir(scheduler))
tick_spacing = 5
plt.plot(a, b, "-", lw=2)
# plt.xticks(3.5e-4)
# plt.plot(n, m1, 'r-.', n, m2, 'b')
# plt.xlim((-2, 4))
# plt.ylim((-5, 15))
# x_ticks = np.linspace(-5, 4, 10)
# plt.xticks(x_ticks)
# 将对应标度位置的数字替换为想要替换的字符串,其余为替换的不再显示
plt.yticks([3.5e-5, 3.5e-4], ['3.5e-5', '3.5e-4'])
plt.xlabel("Epoch")
plt.ylabel("Learning rate")
a = []
b = []
for i in range(1, 120):
print('kk', scheduler.get_last_lr())
print('3333333', scheduler.last_epoch+1)
if scheduler.last_epoch ==126:
break
a.append(scheduler.last_epoch+1)
b.append(optim.param_groups[0]['lr'])
print(i, optim.param_groups[0]['lr'])
optim.step()
scheduler.step()
# plt.plot(t, s, "o-", lw=4.1)
# plt.plot(t, s2, "o-", lw=4.1)
tick_spacing = 10
plt.plot(a, b, "-", lw=2)
# plt.xticks(3.5e-4)
# plt.plot(n, m1, 'r-.', n, m2, 'b')
# plt.xlim((-2, 4))
# plt.ylim((-5, 15))
# x_ticks = np.linspace(-5, 4, 10)
# plt.xticks(x_ticks)
# 将对应标度位置的数字替换为想要替换的字符串,其余为替换的不再显示
plt.yticks([3.5e-5, 3.5e-4], ['3.5e-5', '3.5e-4'])
plt.xlabel("Epoch")
plt.ylabel("Learning rate")
| true |
e080412c64825cc6e990a5a134a2116f8bf236a0 | Python | Anurodh437/Competetive_Programming | /bitwise tuples.py | UTF-8 | 283 | 2.765625 | 3 | [] | no_license | def Si(): return input()
def Ii(): return int(input())
def Li(): return list(map(int, input().split()))
def Lsi(): return input().split()
def Mi(): return map(int, input().split())
for _ in range(Ii()):
n,m = Mi()
res = pow(2,n,1000000007)-1
print(pow(res,m,1000000007)) | true |
7a03af1920394484ca383e020cd10b2ab818afe6 | Python | ppb/pursuedpybear | /ppb/features/twophase.py | UTF-8 | 1,074 | 2.9375 | 3 | [
"Artistic-2.0"
] | permissive | """
A system for two phase updates: Update, and Commit.
"""
from dataclasses import dataclass
from ppb.systemslib import System
__all__ = 'Commit',
@dataclass
class Commit:
"""
Fired after Update.
"""
class TwoPhaseSystem(System):
"""
Produces the Commit event.
"""
def on_update(self, event, signal):
signal(Commit())
class TwoPhaseMixin:
"""
Mixin to apply to objects to handle two phase updates.
"""
__staged_changes = None
def stage_changes(self, **kwargs):
"""
Stage changes for the next commit.
These are just properties on the current object to update.
"""
if self.__staged_changes is None:
self.__staged_changes = {}
self.__staged_changes.update(kwargs)
def on_commit(self, event, signal):
"""
Commit changes previously staged.
"""
changes, self.__staged_changes = self.__staged_changes, {}
if changes:
for name, value in changes.items():
setattr(self, name, value)
| true |
babb0d9da9ff4e5d8ff83ab62069d04a98e0acb0 | Python | sihota/CS50W | /Python/variables.py | UTF-8 | 218 | 3.1875 | 3 | [] | no_license | number = 10
print(f"number is {number}")
total = 100.35
print(f"total is {total}")
name = "Amarpal"
print(f"name is {name}")
isrequire = True
print(f"isrequire is {isrequire}")
null = None
print(f"null is {null}")
| true |
0676d99b0e8f5895b269851d533456cbbc4c1c01 | Python | nachovazquez98/gw_inyection | /notebooks/open_hdf5.py | UTF-8 | 2,548 | 3.140625 | 3 | [] | no_license | #%%
"""
-meta: Meta-data for the file. This is basic information such as the GPS times covered, which instrument, etc.
-quality: Refers to data quality. The main item here is a 1 Hz time series describing the data quality for each
second of data. This is an important topic, and we'll devote a whole step of the tutorial to working with data quality information.
-strain: Strain data from the interferometer. In some sense, this is "the data", the main measurement performed by LIGO.
"""
#el eje x es meta>GPSstart
#el eje y es strain>strain
import numpy as np
import pandas as pd
import h5py
import matplotlib.pyplot as plt
hdf5_path = fileName = 'L-L1_GWOSC_O2_4KHZ_R1-1185669120-4096.hdf5'
#%%
with h5py.File(hdf5_path, 'r') as hdf:
ls = list(hdf.keys()) #tiene llaves cada archivo
print('List of datasets in this file: \n', ls)
#contenido de strain
with h5py.File(hdf5_path, 'r') as hdf:
key_strain = list(hdf.keys())[2]
data_strain = list(hdf[key_strain])
print("Data in strain: ", data_strain)
#contenido de meta
#gpsstart
#GPSstart es el tiempo, eje x
with h5py.File(hdf5_path, 'r') as hdf:
key_meta = list(hdf.keys())[0]
data_meta = list(hdf[key_meta])
print("Data in meta: ", data_meta)
#contenido de quality
with h5py.File(hdf5_path, 'r') as hdf:
key_quality = list(hdf.keys())[1]
data_quality = list(hdf[key_quality])
print("Data in quality: ", data_quality)
#%%
##########################################################################
#contenido
dataFile = h5py.File(fileName, 'r')
for key in dataFile.keys():
print (key)
#%%
strain = dataFile['strain']['Strain'][()]
#time sample (tiempo de muestreo)
ts = dataFile['strain']['Strain'].attrs['Xspacing']
print ("\n\n")
metaKeys = dataFile['meta'].keys()
meta = dataFile['meta']
for key in metaKeys:
print (key), (meta[key][()])
#%%
gpsStart = meta['GPSstart'][()]
duration = meta['Duration'][()]
gpsEnd = gpsStart + duration
print ("\n\n")
strainKeys = dataFile['strain'].keys()
strain = dataFile['strain']
##accede al contenido de strain
for key in strainKeys:
print ((key), (strain[key][()]))
#almacena el arreglo de strain en strain1 (vector y)
strain1 = (strain[key][()])
#print ("Strain: ",strain1)
#%%
#crea el vector x
time = np.arange(gpsStart, gpsEnd, ts)
print("Time sample: ", ts)
print("\n\metaKey: ",metaKeys)
print("\n\meta: ",meta)
print ("\n\ngpsStart: ",gpsStart)
print ("\n\ngpsEnd: ",gpsEnd)
plt.plot(time, strain1)
plt.xlabel('GPS Time (s)')
plt.ylabel('H1 Strain')
plt.show()
#%%
#%%
# | true |
0d56036b1d97ba0250eb36331d0f3e312e754b1e | Python | JakobKallestad/Python-Kattis | /src/IRepeatMyself.py | UTF-8 | 436 | 2.875 | 3 | [] | no_license | n = int(input())
for _ in range(n):
inp = list(input())
f_c = inp[0]
min_rep = 1
current_repeat = [f_c]
cr_index = 0
for i, c in enumerate(inp, 1):
if c == current_repeat[cr_index]:
cr_index = (cr_index + 1) % len(current_repeat)
else:
cr_index = (1 if c == f_c else 0)
min_rep = i-cr_index
current_repeat = inp[:i-cr_index]
print(min_rep)
| true |
a6f4623267bf0c77f7bc8464d441cf2122021765 | Python | MuskanValmiki/Dictionary | /kavita.py | UTF-8 | 934 | 2.71875 | 3 | [] | no_license | # datas= [{"name":"komal","score":40,"school":"pyds"},{"name":"koma","score":40,"school":"pyd"},{"name":"jaya","score":60,"school":"pyds"},{"name":"Sonam","score":60,"school":"Union"},{"name":"Akshit","score":50,"school":"Summer Fileld school"}]
# for index in range(0,len(datas)):
# for key in datas[index]:
# if datas[index]["score"]>50:
# if datas[index]["school"]=="pyds":
# print(datas[index])
# break
datas= [{"name":"komal","score":40,"school":"pyds"},{"name":"koma","score":40,"school":"pyd"},{"name":"jaya","score":60,"school":"pyds"},{"name":"Sonam","score":60,"school":"Union"},{"name":"Akshit","score":50,"school":"Summer Fileld school"}]
i=0
c=0
while i<len(datas):
for key in datas[i]:
if datas[i]["score"]>50:
if datas[i]["school"]=="pyds":
if c==0:
print(datas[i])
c+=1
i+=1
| true |
3d94497ce28400b69d010a28ddcb5fc7014d3847 | Python | Kdotseth7/DeepLearning | /CNN/CNN.py | UTF-8 | 2,477 | 3.1875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 15 18:46:33 2019
@author: Kushagra Seth
"""
# Convolutional Neural Network (CNN)
# PART-1 : Creating the CNN
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
# Initializing the CNN
classifier = Sequential()
# Step-1: Convolution
classifier.add(Conv2D(filters=32, kernel_size=(3, 3), input_shape=(64, 64, 3), activation="relu"))
# Step-2: Max Pooling
classifier.add(MaxPooling2D(pool_size=(2, 2)))
# Adding second convolution layer
classifier.add(Conv2D(filters=32, kernel_size=(3, 3), activation="relu"))
classifier.add(MaxPooling2D(pool_size=(2, 2)))
# Step-3: Flattening
classifier.add(Flatten())
#Step-4: Full Connection
classifier.add(Dense(output_dim=128, activation="relu"))
classifier.add(Dense(output_dim=1, activation="sigmoid")) # sigmoid because o/p is binary otherwise use softmax
# Compiling CNN
classifier.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]) # adam - stochastic gradient
# loss = "binary_crossentropy" bcoz o/p is binary
# Part-2 : Fitting CNN classifier to the Training set
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
training_set = train_datagen.flow_from_directory('dataset/training_set',
target_size=(64, 64),
batch_size=32,
class_mode='binary')
test_set = test_datagen.flow_from_directory('dataset/test_set',
target_size=(64, 64),
batch_size=32,
class_mode='binary')
import scipy.ndimage
classifier.fit_generator( training_set,
steps_per_epoch=8000/32,
epochs=25,
validation_data=test_set,
validation_steps=2000/32)
| true |
045d6e44019af1fb0fb1218214670a0b4badd3d5 | Python | kojino/nlp100 | /ch1/p1.py | UTF-8 | 80 | 3.03125 | 3 | [] | no_license | # 0
def reverse_string(s):
return s[::-1]
# print reverse_string("stressed") | true |
0ed25063ea8eaadc19343793d265a1c2e5c0175c | Python | ciubotaruv/hackathon404-team | /sarcina6/main.py | UTF-8 | 5,409 | 3.1875 | 3 | [] | no_license | # Sarcina 2.1
class Lab():
def __init__(self):
self.stiva = []
self.flag = []
self.matrix = []
self.k = 0
def up(self, i, j):
# print('up')
i1 = i
number = ''
while True:
i1 = i1 + 1
try:
int(self.matrix[i1][j])
except:
break
number += self.matrix[i1][j]
number = int(number)
self.action(i - number, j)
def down(self, i, j):
# print('down')
i1 = i
number = ''
while True:
i1 = i1 - 1
try:
int(self.matrix[i1][j])
except:
break
number += self.matrix[i1][j]
number = int(number)
self.action(i + number, j)
def right(self, i, j):
# print('rihjt')
j1 = j
number = ''
while True:
j1 = j1 - 1
try:
int(self.matrix[i][j1])
except:
break
number += self.matrix[i][j1]
number = int(number)
self.action(i, j + number)
def left(self, i, j):
# print('left')
j1 = j
number = ''
while True:
j1 = j1 + 1
try:
int(self.matrix[i][j1])
except:
break
number += self.matrix[i][j1]
number = int(number)
self.action(i, j - number)
def par_left(self, i, j):
j1 = j
number = ''
while True:
j1 = j1 + 1
try:
int(self.matrix[i][j1])
except:
break
number += self.matrix[i][j1]
number = int(number)
temp = self.stiva.pop()
self.flag.insert(0, temp)
self.action(i, j - number)
def par_right(self, i, j):
j1 = j
number = ''
while True:
j1 = j1 - 1
try:
int(self.matrix[i][j1])
except:
break
number += self.matrix[i][j1]
number = int(number)
temp = self.stiva.pop()
self.flag.append(temp)
self.action(i, j + number)
def minus(self, i, j):
i1 = i
number = ''
while True:
i1 = i1 + 1
try:
int(self.matrix[i1][j])
except:
break
number += self.matrix[i1][j]
number = int(number)
del self.flag[0]
self.action(i - number, j)
def plus(self, i, j):
i1 = i
number = ''
while True:
i1 = i1 - 1
try:
int(self.matrix[i1][j])
except:
break
number += self.matrix[i1][j]
number = int(number)
self.flag.pop()
self.action(i + number, j)
def procent(self, i, j):
self.flag = self.flag[::-1]
self.action(i + 1, j)
def pat_right(self, i, j):
j1 = j - 1
self.stiva.append(self.matrix[i][j1])
self.action(i, j - 2)
def pat_left(self, i, j):
j1 = j + 1
self.stiva.append(self.matrix[i][j1])
self.action(i, j + 2)
def inm(self, i, j):
i1 = i - 1
self.stiva.append(self.matrix[i1][j])
self.action(i - 2, j)
def dot(self, i, j):
i1 = i + 1
self.stiva.append(self.matrix[i1][j])
self.action(i + 2, j)
def action(self, i, j):
c = self.matrix[i][j]
# print(c,end='')
if c == '>':
self.right(i, j)
if c == '<':
self.left(i, j)
if c == '^':
self.up(i, j)
if c == 'v':
self.down(i, j)
if c == '(':
self.par_left(i, j)
if c == ')':
self.par_right(i, j)
if c == '-':
self.minus(i, j)
if c == '+':
self.plus(i, j)
if c == '%':
self.procent(i, j)
if c == ']':
self.pat_right(i, j)
if c == '[':
self.pat_left(i, j)
if c == '*':
self.inm(i, j)
if c == '.':
self.dot(i, j)
if c == '@':
print(*self.flag, sep='')
# print('Stop___________________')
def start(self, i, j):
# print(*self.flag,sep='')
self.k += 1
self.flag = []
self.stiva = []
self.action(i + 1, j)
def ececution(self):
i = -1
for line in self.matrix:
i += 1
for j in range(len(line)):
if self.matrix[i][j] == '$':
self.start(i, j)
break
def read_file(self):
with open("labirint.txt", 'r') as file:
while True:
read_line = file.readline()
if len(read_line) == 0:
break
vector = []
for read_c in read_line:
vector.append(read_c)
self.matrix.append(vector[:-1])
if __name__ == '__main__':
a = Lab()
a.read_file()
a.ececution()
| true |
156699c3fdc64335a155d257c5c23b4b5ce06a9f | Python | kscharlund/kattis | /aaah/aaah.py | UTF-8 | 190 | 2.8125 | 3 | [] | no_license | import sys
if __name__ == '__main__':
a1 = sys.stdin.readline().strip()
a2 = sys.stdin.readline().strip()
if len(a1) < len(a2):
print('no')
else:
print('go')
| true |
30a294296fb657e31a4487fc8ae1392bfae47261 | Python | herndev/Good-old-mini-projects | /Python/Tkinter/kbank/main.py | UTF-8 | 3,526 | 2.609375 | 3 | [] | no_license | #TABBED LAYOUT
#By: Hernie Jabien
#Copyright @ Syntaxer 2019 all rights reserved.
from hern import*
from tkinter import*
from tkinter import messagebox
from tkinter.ttk import Combobox
usr = ["Tisoy", "Pogie", "Gwapo", "Beauty", "Tisay"]
usr.sort()
class MainView(Frame):
def __init__(self, *arg, **args):
Frame.__init__(self, *arg, **args)
label = Label(self, text="")
label.pack(side="top", fill="both", expand=True)
self.userr = StringVar()
self.userrr = StringVar()
self.code = ""
user = Combobox(self, width=18, font="Arial 20", values= usr, textvariable=self.userr)
user.set("Select user")
user.place(x=280, y=105)
self.money = Entry(self, relief="ridge", bd=3, width=20, font="Arial 19")
self.money.place(x=280, y=145)
btn = Button(self, text="Deposit money", font="Arial 18", width=20, bd=3, relief="raised", background="orange", command=lambda:self.adder(self.money.get()))
btn.place(x=280, y=190)
btn1 = Button(self, text="Withdraw money", font="Arial 18", width=20, bd=3, relief="raised", background="orange", command=lambda:self.withdrawer(self.money.get()))
btn1.place(x=280, y=240)
self.lstView1 = Listbox(self, width=36, height=17, background="white", fg="#000")
self.lstView1.place(x=280, y=300)
user1 = Combobox(self, width=15, font="Arial 20", values= usr, textvariable=self.userrr)
user1.set("Select user")
user1.place(x=810, y=105)
btn2 = Button(self, text="Search", font="Arial 17", width=6, background="orange", fg="white", bd=3, relief="raised", command=lambda:self.display(""))
btn2.place(x=1060, y=105)
self.lstView = Listbox(self, width=44, height=25, background="white", fg="#000")
self.lstView.place(x=810, y=155)
def adder(self,money):
# self.userr.get()
if self.userr.get() == "Select user":
self.lstView1.insert(0,"Error$ Please select valid username.")
else:
if money != "":
messagebox.showinfo("Bank", "Money added successfully.")
insertdata("bank",{"name":self.userr.get(),"money":money})
self.lstView1.insert(0,"Info$ Verified successfully.")
self.lstView1.insert(0,"Info$ P%s added to %s."%(money,self.userr.get()))
self.userr.set("Select user")
self.money.delete(0,END)
else:
self.lstView1.insert(0,"Info$ Money must not be empty.")
def withdrawer(self,money):
if self.userr.get() == "Select user":
self.lstView1.insert(0,"Error$ Please select valid username.")
else:
if money != "":
rem = (int(money)*0.10)
if rem <= 1:
rem = 1
money = int(money) + rem
money = str(money)
messagebox.showinfo("Bank", "Money withdrawn successfully.")
insertdata("bank",{"name":self.userr.get(),"money":"-"+money})
self.lstView1.insert(0,"Info$ Verified successfully.")
self.lstView1.insert(0,"Info$ P%s withdrawn to %s."%(money,self.userr.get()))
self.userr.set("Select user")
self.money.delete(0,END)
else:
self.lstView1.insert(0,"Info$ Money must not be empty.")
def display(self,arr):
if self.userrr.get() != "Select user":
if selectdata("bank", {"name":self.userrr.get()}) is not False:
sum = 0
self.lstView.delete(0,END)
for m in selectdata("bank", {"name":self.userrr.get()}):
self.lstView.insert(END, "%s"%str(m["money"]))
sum = sum + float(m["money"])
self.lstView.insert(0, "Total money: %2d"%sum)
if __name__ == "__main__":
root = Tk()
main = MainView(root)
main.pack(side="top", fill="both", expand=True)
root.wm_geometry("1440x900")
root.resizable(0,0)
root.mainloop() | true |
70a35a9bc2b4d289ac9c87816657c0edcce1eb1b | Python | benkeanna/pyladies | /05/ukol8.py | UTF-8 | 1,196 | 3.625 | 4 | [] | no_license | from random import randrange
soucet = 0
for hrac in range(1,5)
hod = 0
while hod1 != 6:
if hod1 == 6:
break
print(hod1)
soucet1 = soucet1 + hod1
hod1 = randrange(1,7)
print('Skóre prvního hráče je: ',soucet1)
while hod2 != 6:
if hod2 == 6:
break
print(hod2)
soucet2 = soucet2 + hod2
hod2 = randrange(1,7)
print('Skóre druhého hráče je: ',soucet2)
while hod3 != 6:
if hod3 == 6:
break
print(hod3)
soucet3 = soucet3 + hod3
hod3 = randrange(1,7)
print('Skóre třetího hráče je: ',soucet3)
while hod4 != 6:
if hod4 == 6:
break
print(hod4)
soucet4 = soucet4 + hod4
hod4 = randrange(1,7)
print('Skóre třetího hráče je: ',soucet4)
if soucet1 >= soucet2 and soucet1 >= soucet3 and soucet1 >= soucet4:
print('První hráč vyhrál.')
elif soucet2 > soucet1 and soucet2 >= soucet3 and soucet2 >= soucet4:
print('Druhý hráč vyhrál.')
elif soucet3 > soucet1 and soucet3 > soucet2 and soucet3 >= soucet4:
print('Třetí hráč vyhrál.')
elif soucet4 > soucet1 and soucet4 > soucet2 and soucet4 > soucet3:
print('Třetí hráč vyhrál.')
| true |
2b49a19a3a717ccc8acb9ede90de106a6bf5f366 | Python | EduMeurer999/Algoritmos-Seg- | /10.py | UTF-8 | 158 | 3.71875 | 4 | [] | no_license | salarioHora = float(input('Informe o salario por hora: R$'))
horas = float(input('Informe horas trabalhadas: '))
print('Salario total: R$', salarioHora*horas) | true |
9ad4a7cfa40a0393f7521134930848f14f067071 | Python | hulehuani/web-app0910 | /common/logs.py | UTF-8 | 2,605 | 2.875 | 3 | [] | no_license | #!/usr/bin/python3.5.1
# -*- coding: utf-8 -*-
# @Time : 2020/8/31 22:12
# @Author : yuzhenyu
# @File : logs.py hu
import time
import logging
import os
class Log:
"""
对公共的日志的封装。
每天都会生成一个当天的日志文本,如果当天执行多次,则都在一个文本里。目前没有设置日志文本大小
"""
@classmethod
def get_instance(cls, *args, **kwargs):
if not hasattr(Log, "_instance"):
Log._instance = Log(*args, **kwargs)
return Log._instance
def save_log(self):
file = os.path.dirname(
os.path.dirname(
os.path.abspath(__file__)
)
)
self.filename = os.path.join(file, "logs", "%s.logs" % time.strftime("%Y_%M_%D"))
self.logger = logging.getLogger()
#设定日志等级
self.logger.setLevel(logging.DEBUG)
#设定日志格式
self.formater = logging.Formatter(
"[%(asctime)s] %(name)s][%(filename)s:%(lineno)d] [%(levelname)s][%(message)s")
def output_consle_logs(self):
"""
把日志输出到控制台
:return:
"""
self.consle = logging.StreamHandler()
self.consle.setLevel(logging.DEBUG)
self.consle.setFormatter(self.formater)
self.logger.addHandler(self.consle)
def output_file_logs(self):
"""
把日志暂时输出到文本里
:return:
"""
self.file_log = logging.FileHandler()
self.file_log.setLevel(logging.DEBUG)
self.file_log.setFormatter(self.formater)
self.logger.addHandler(self.file_log)
def judge_log(self, level, msg):
"""
判断日志等级
:param level: 日志等级
:param msg: 自定义日志信息
:return:
"""
if level == "debug":
self.logger.debug(msg)
elif level == "info":
self.logger.info(msg)
elif level == "warning":
self.logger.warning(msg)
elif level == "error":
self.logger.error(msg)
elif level == "critical":
self.logger.critical(msg)
@staticmethod
def debug(self,msg):
self.judge_log("debug",msg)
@staticmethod
def info(self,msg):
self.judge_log("info", msg)
@staticmethod
def warning(self,msg):
self.judge_log("warning", msg)
@staticmethod
def error(self,msg):
self.judge_log("error", msg)
@staticmethod
def critical(self,msg):
self.judge_log("critical", msg)
| true |
f42164331f4b89e21d854159d9e09d73480bb227 | Python | MattBUWM/WD | /cw6/1.py | UTF-8 | 47 | 2.578125 | 3 | [] | no_license | import numpy as np
a=np.arange(2,42,2)
print(a) | true |
ed17c9e6e504ab4da9c23219df66c57d259d8394 | Python | bleuxr/News_Collections | /stop_words.py | UTF-8 | 2,232 | 2.984375 | 3 | [] | no_license | import re
import jieba
import mysql.connector
## 去除停用词的2个函数
# 创建停用词list
def stopwordslist(filepath):
stopwords = [line.strip() for line in open(filepath, 'r', encoding='gbk').readlines()]
return stopwords
# 对句子去除停用词
def movestopwords(sentence):
stopwords = stopwordslist('stop_words.txt') # 这里加载停用词的路径
outstr = ''
for word in sentence:
if word not in stopwords:
if word != '\t'and'\n':
outstr += word
outstr += " "
return outstr
mydb = mysql.connector.connect(
host="localhost",
user="root",
passwd="2019my03sql31",
database="toutiao"
)
mycursor=mydb.cursor()
mycursor.execute("SELECT id,title,abstract FROM information")
myresult=mycursor.fetchall()
# 过滤不了\\ \ 中文()还有————
r1 = u'[a-zA-Z0-9’!"#$%&\'()*+,-./:;<=>?@,。?★、…【】《》?“”‘’![\\]^_`{|}~]+'#用户也可以在此进行自定义过滤字符
# 者中规则也过滤不完全
r2 = "[\s+\.\!\/_,$%^*(+\"\']+|[+——!,。?、~@#¥%……&*()]+"
# \\\可以过滤掉反向单杠和双杠,/可以过滤掉正向单杠和双杠,第一个中括号里放的是英文符号,第二个中括号里放的是中文符号,第二个中括号前不能少|,否则过滤不完全
r3 = "[.!//_,$&%^*()<>+\"'?@#-|:~{}]+|[——!\\\\,。=?、:“”‘’《》【】¥……()]+"
# 去掉括号和括号内的所有内容
r4 = "\\【.*?】+|\\《.*?》+|\\#.*?#+|[.!/_,$&%^*()<>+""'?@|:~{}#]+|[——!\\\,。=?、:“”‘’¥……()《》【】]"
for x in myresult:
id=x[0]
title=x[1]
title=re.sub(r4,'',title)
abstract=x[2]
abstract=re.sub(r4,'',abstract)
seg_list = jieba.cut(title, cut_all=False) # 分词精确模式
# title=" ".join(seg_list)
# print(title)
title=movestopwords(seg_list)
# print(title)
seg_list = jieba.cut(abstract, cut_all=False)
abstract=movestopwords(seg_list)
# abstract=" ".join(seg_list)
sql="UPDATE fenci SET title = %s, abstract = %s WHERE id = %s"
val=(title,abstract,id)
mycursor.execute(sql,val)
# break
mydb.commit() | true |
b82d9b9fc9ea4974c3e97327ca486778f436d167 | Python | Yang-X-Y/LISA | /src/utils/FileViewer.py | UTF-8 | 2,207 | 2.96875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import os
import shutil
def convert_type(x, type):
if type == 'int':
return int(x)
if type == 'long':
return long(x)
if type == 'float':
return float(x)
return x
def list_files(filepath, suffix=None, isdepth=True):
files = []
for fpathe, dirs, fs in os.walk(filepath):
for f in fs:
if suffix is None or f.endswith(suffix):
files.append(os.path.join(fpathe, f))
if isdepth == False:
break
return files
def get_filename_from_absolute_path(filepath, retain_suffix=True):
res = None
if filepath.find('/') >= 0:
items = filepath.split('/')
res = items[-1]
elif filepath.find('\\') >= 0:
items = filepath.split('/')
res = items[-1]
if res is not None:
if retain_suffix == False:
idx = res.find('.')
if idx >= 0:
res = res[0:idx]
return res
def load_map(path, key_type, value_type, split_tag='\t'):
res = {}
for line in open(path, 'r'):
items = line.strip().split(split_tag)
key = convert_type(items[0], key_type)
value = convert_type(items[1], value_type)
res[key] = value
return res
def load_reverse_map(path, key_type, value_type, split_tag='\t'):
res = {}
for line in open(path, 'r'):
items = line.strip().split(split_tag)
value = convert_type(items[0], value_type)
key = convert_type(items[1], key_type)
res[key] = value
return res
def load_list(path):
with open(path, 'r') as reader:
lines = reader.readlines()
res = [s.strip() for s in lines]
return res
def dump_map(path, map, split_tag='\t'):
with open(path, 'w') as writer:
for key, value in map.items():
line = str(key) + split_tag + str(value) + '\n'
writer.write(line)
def detect_and_create_dir(dir):
if os.path.exists(dir) == False:
os.makedirs(dir)
def detect_and_delete_empty_dir(dir):
if os.path.exists(dir) == True:
os.removedirs(dir)
def detect_and_delete_dir(dir):
if os.path.exists(dir) == True:
shutil.rmtree(dir)
| true |
ff1d1cbc8ec199eaa3094b1aaf1f1476dd42da6e | Python | erjan/coding_exercises | /valid_boomerang.py | UTF-8 | 423 | 3.59375 | 4 | [
"Apache-2.0"
] | permissive | '''
Given an array points where points[i] = [xi, yi] represents a point on the X-Y plane, return true if these points are a boomerang.
A boomerang is a set of three points that are all distinct and not in a straight line.
'''
class Solution:
def isBoomerang(self, points: List[List[int]]) -> bool:
(x0, y0), (x1, y1), (x2, y2) = points
return (y2 - y1) * (x0 - x1) != (x2 - x1) * (y0 - y1)
| true |
d8f70f2790e6d7fe9a18eedf40953488b8ed8196 | Python | PyQuake/earthquakemodels | /code/testingAlarmBased/molchanBased.py | UTF-8 | 2,897 | 2.96875 | 3 | [
"BSD-3-Clause"
] | permissive | import random
import models.mathUtil as mathUtil
def molchan(modelLambda, modelOmega):
"""
Calculates the Molchan test of a model against a reference model
"""
if len(modelLambda.bins)==len(modelOmega.bins):
referenceValues=[]
referenceValues[:]=[x+1 for x in modelOmega.bins]
referenceValues=mathUtil.normalize(referenceValues)
N = sum(modelOmega.bins)
trajectory=[0]*(N+1)
fullData=[]
for lam,value in zip(modelLambda.bins, referenceValues):
fullData.append((lam, value))
fullData.sort()
fullData.reverse()
testingValues=[]
referenceValues=[]
for data in fullData:
testingValues.append(data[0])
referenceValues.append(data[1])
hits=0
tau=0
for i in range(len(modelOmega.bins)):
tau+=referenceValues[i]
hitsInThisBin=0
hitsInThisBin=modelOmega.bins[i]
if hitsInThisBin>0:
thresholdInThisBin=testingValues[i]
thresholdInNextBin=fullData[min(i+1,len(testingValues)-1)]
while thresholdInThisBin==thresholdInNextBin and i<=len(testingValues)-2:
i+=1
tau+=referenceValues[i]
hitsInThisBin+=modelOmega.bins[i]
if i<(len(testingValues)-1):
thresholdInNextBin=testingValues[i+1]
else:
thresholdInNextBin=float('-Infinity')
for j in range(hitsInThisBin):
trajectory[hits+j+1]=tau
hits+=hitsInThisBin
if hits==N:
break
return trajectory
def whichLegAreWeOn(molchanTrajectory, tau):
"""
Function needed as part of the areaUnderTrajectory function
"""
n=0
for i in range(len(molchanTrajectory)):
if molchanTrajectory[i]<tau:
n=i
else:
return(i-1)
return n
def areaUnderTrajectory(molchanTrajectory, tau):
"""
Function needed to calculate the areaUnderTrajectory for the ASS test
"""
n=whichLegAreWeOn(molchanTrajectory, tau)
N = len(molchanTrajectory)-1
height=[]
for i in range(len(molchanTrajectory)):
height.append((N-i)/N)
area=0
index=0
for i in range(n):
area+=height[index]*(molchanTrajectory[index+1]-molchanTrajectory[index])
index+=1
area+=height[n]*(tau-molchanTrajectory[n])
area/=tau
return area
def assTest(modelLambda, modelOmega, tauSteps=0.01):
"""
Calculates the ASS alarm test function defined by Zechar.
Its as alarm based test that considers regions, threshold, miss rate and hits.
The param should be an model with bins that divides one region, and every bin should contain the quatity of earthquakes
"""
molchanTrajectory=molchan(modelLambda, modelOmega)
assTrajectory=[]
limitRange=int(1/tauSteps)
for i in range(limitRange):
tau=(i+1)*tauSteps
ass=1-areaUnderTrajectory(molchanTrajectory, tau)
assTrajectory.append(ass)
return assTrajectory
| true |
1a39bf368d82a9175431e92ea811f38e9146ebc2 | Python | tripleKS/u-python | /challenging.py | UTF-8 | 997 | 4.40625 | 4 | [] | no_license | print('==== SPY GAME ====')
def spy_game(nums):
preceding0 = 0
for num in nums:
if num == 0:
preceding0 += 1
elif num == 7:
if preceding0 >= 2:
return True
return False
print(spy_game([1,2,4,0,0,7,5]))
print(spy_game([1,0,2,4,0,5,7]))
print(spy_game([1,7,2,0,4,5,0,4,5,0,0,4,5,0,4,5,0,4,5,0,4,5,7]))
print(spy_game([1,0,7,2,4,5,0,7]))
print('\n==== COUNT PRIMES ====')
def is_prime(num):
for i in range(2, round(num/2)):
if num % i == 0:
return False
return True
def count_primes(num):
if num < 2:
return 0
if num == 2:
return 1
if num == 3 or num == 4:
return 2
if num == 5 or num == 6:
return 3
if num >= 7 and num < 11:
return 4
primes = 4;
for el in range(11,num+1):
if is_prime(el):
primes += 1
return primes
print(count_primes(100))
print(count_primes(200))
print('\n==== PRINT BIG ====')
| true |
0b1eba9faa7abb7fd23b6ad3029d9e6d68172c14 | Python | OwenLiuzZ/tensorlayer | /tensorlayer/layers/convolution/depthwise_conv.py | UTF-8 | 5,396 | 2.59375 | 3 | [
"Apache-2.0"
] | permissive | #! /usr/bin/python
# -*- coding: utf-8 -*-
import tensorflow as tf
from tensorlayer.layers.core import Layer
from tensorlayer.layers.core import LayersConfig
from tensorlayer import logging
from tensorlayer.decorators import deprecated_alias
__all__ = [
'DepthwiseConv2d',
]
class DepthwiseConv2d(Layer):
"""Separable/Depthwise Convolutional 2D layer, see `tf.nn.depthwise_conv2d <https://www.tensorflow.org/versions/master/api_docs/python/tf/nn/depthwise_conv2d>`__.
Input:
4-D Tensor (batch, height, width, in_channels).
Output:
4-D Tensor (batch, new height, new width, in_channels * depth_multiplier).
Parameters
------------
prev_layer : :class:`Layer`
Previous layer.
filter_size : tuple of int
The filter size (height, width).
stride : tuple of int
The stride step (height, width).
act : activation function
The activation function of this layer.
padding : str
The padding algorithm type: "SAME" or "VALID".
dilation_rate: tuple of 2 int
The dilation rate in which we sample input values across the height and width dimensions in atrous convolution. If it is greater than 1, then all values of strides must be 1.
depth_multiplier : int
The number of channels to expand to.
W_init : initializer
The initializer for the weight matrix.
b_init : initializer or None
The initializer for the bias vector. If None, skip bias.
W_init_args : dictionary
The arguments for the weight matrix initializer.
b_init_args : dictionary
The arguments for the bias vector initializer.
name : str
A unique layer name.
Examples
---------
>>> net = InputLayer(x, name='input')
>>> net = Conv2d(net, 32, (3, 3), (2, 2), b_init=None, name='cin')
>>> net = BatchNormLayer(net, act=tf.nn.relu, is_train=is_train, name='bnin')
...
>>> net = DepthwiseConv2d(net, (3, 3), (1, 1), b_init=None, name='cdw1')
>>> net = BatchNormLayer(net, act=tf.nn.relu, is_train=is_train, name='bn11')
>>> net = Conv2d(net, 64, (1, 1), (1, 1), b_init=None, name='c1')
>>> net = BatchNormLayer(net, act=tf.nn.relu, is_train=is_train, name='bn12')
...
>>> net = DepthwiseConv2d(net, (3, 3), (2, 2), b_init=None, name='cdw2')
>>> net = BatchNormLayer(net, act=tf.nn.relu, is_train=is_train, name='bn21')
>>> net = Conv2d(net, 128, (1, 1), (1, 1), b_init=None, name='c2')
>>> net = BatchNormLayer(net, act=tf.nn.relu, is_train=is_train, name='bn22')
References
-----------
- tflearn's `grouped_conv_2d <https://github.com/tflearn/tflearn/blob/3e0c3298ff508394f3ef191bcd7d732eb8860b2e/tflearn/layers/conv.py>`__
- keras's `separableconv2d <https://keras.io/layers/convolutional/#separableconv2d>`__
"""
# https://zhuanlan.zhihu.com/p/31551004 https://github.com/xiaohu2015/DeepLearning_tutorials/blob/master/CNNs/MobileNet.py
@deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release
def __init__(
self,
prev_layer,
shape=(3, 3),
strides=(1, 1),
act=None,
padding='SAME',
dilation_rate=(1, 1),
depth_multiplier=1,
W_init=tf.truncated_normal_initializer(stddev=0.02),
b_init=tf.constant_initializer(value=0.0),
W_init_args=None,
b_init_args=None,
name='depthwise_conv2d',
):
super(DepthwiseConv2d, self
).__init__(prev_layer=prev_layer, act=act, W_init_args=W_init_args, b_init_args=b_init_args, name=name)
logging.info(
"DepthwiseConv2d %s: shape: %s strides: %s pad: %s act: %s" % (
self.name, str(shape), str(strides), padding, self.act.__name__
if self.act is not None else 'No Activation'
)
)
try:
pre_channel = int(prev_layer.outputs.get_shape()[-1])
except Exception: # if pre_channel is ?, it happens when using Spatial Transformer Net
pre_channel = 1
logging.info("[warnings] unknown input channels, set to 1")
shape = [shape[0], shape[1], pre_channel, depth_multiplier]
if len(strides) == 2:
strides = [1, strides[0], strides[1], 1]
if len(strides) != 4:
raise AssertionError("len(strides) should be 4.")
with tf.variable_scope(name):
W = tf.get_variable(
name='W_depthwise2d', shape=shape, initializer=W_init, dtype=LayersConfig.tf_dtype, **self.W_init_args
) # [filter_height, filter_width, in_channels, depth_multiplier]
self.outputs = tf.nn.depthwise_conv2d(self.inputs, W, strides=strides, padding=padding, rate=dilation_rate)
if b_init:
b = tf.get_variable(
name='b_depthwise2d', shape=(pre_channel * depth_multiplier), initializer=b_init,
dtype=LayersConfig.tf_dtype, **self.b_init_args
)
self.outputs = tf.nn.bias_add(self.outputs, b, name='bias_add')
self.outputs = self._apply_activation(self.outputs)
self._add_layers(self.outputs)
if b_init:
self._add_params([W, b])
else:
self._add_params(W)
| true |
fba3d3e9f2aa528214c9b35b991969e65990ae78 | Python | seanv507/sagemaker | /criteo/analyse.py | UTF-8 | 4,113 | 2.546875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 16 23:41:03 2018
@author: sviolante
"""
import os
import re
import subprocess
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib.pyplot as plt
import criteo
sns.set(style="ticks")
fil ='data/input/train.txt'
fil_short = 'data/input/train_4000000.txt'
fil_short_vw = 'data/train/train_4000000.vw'
vw_path = 'data'
# criteo.gen_vw(fil_short, fil_short_vw, True)
nint=13
ncat=26
categs = ['C{:02d}'.format(c) for c in range(ncat)]
ints = ['I{:02d}'.format(c) for c in range(nint)]
dat = pd.read_csv(fil_short,sep='\t', #nrows=1000000,
header=None,
names = ['click'] + [f'I{i:02d}' for i in range(nint)]
+ [f'C{i:02d}' for i in range(ncat)])
plt.subplot(3,5,1)
dat['I01'].hist(by=dat.click,bins=20)
dat[['click','I01']].assign(ln01=lambda x:np.log(x['I01'])).boxplot(by='click',sharex=False)
def to_apriori(line):
lin = line.rstrip().split('\t')
cats = lin[14:]
new_cats = [f'C{n:02d}:{c}' for n, c in enumerate(cats) if c]
new_line = '\t'.join(new_cats) + '\n'
return new_line
def calc_cats(ser_cat):
v = ser_cat.value_counts().rename('counts').to_frame()
v['cumsum'] = v.counts.cumsum()
v['cumfreq'] = v['cumsum']/v['cumsum'].iloc[-1]
return v
b = {}
vcs = {}
cnts = {}
freqs = [.50,.75,.9,.95, 1]
counts = np.array([5, 10, 50, 100])
for c in categs:
print(c)
v = calc_cats(dat[c])
#vcs[c] = v
#b[c] = pd.Series(v.cumfreq.searchsorted(freqs),index=freqs,name=c)
cn = pd.Series((-v.counts).searchsorted(-counts), index=counts, name=c)
cnts[c] = cn
res = pd.concat(b, axis=1)
res_cnt = pd.concat(cnts, axis=1)
dat[ints].describe()
import matplotlib.pyplot as plt
f, axes = plt.subplots(4, 4)
axes_flat = [a for ax in axes for a in ax]
for i, col in enumerate(ints):
(dat[[col]]
.apply(lambda x: np.log(x+3) )
.boxplot(ax=axes_flat[i]))
axes_flat[i].set_title('log(' + col + ' + 3)')
def create_vw_metrics_res():
metric_res = [
'passes used = (\d+)',
'number of examples per pass = (\d+)',
'average loss = ([-.0-9]+)',
'best constant = ([-.0-9]+)',
"best constant's loss = ([-.0-9]+)",
'total feature number = (\d+)'
]
df = pd.DataFrame({'re_s': metric_res})
df['re'] = df.re_s.apply(lambda x: re.compile(x))
df['col'] = df.re_s.str.split(' =', 1).str[0]
df = df.set_index('col')
return df
df_metric_res = create_vw_metrics_res()
def extract_vw_results(df, results_col='results'):
ext = get_results_ext(results_col)
for i in df.index:
for i_re in df_metric_res.index:
res = df_metric_res.loc[i_re, 're'].search(df.loc[i, results_col])
if res:
df.loc[i, i_re + ext] = np.float(res.group(1))
def create_cmd(dic):
vw_cmd = 'vw ' + ' '.join(['--{} {}'.format(k, v) for k, v in vw_params.items()])
return vw_cmd
op = subprocess.check_output(vw_cmd,shell=True, cwd=vw_path,
stderr=subprocess.STDOUT).decode('utf-8')
vw_params={
'data': fil_short_vw,
'cache': ' ',
'holdout_after': 3000000,
'passes': 1000,
'early_terminate': 15,
'l2': 0,
'l1': 0,
'loss_function': 'logistic'
}
results=[]
l2s = [0, 1e-2, 1e-4, 1e-6, 1e-8]
for var in l2s:
vw_params={
'data': fil_short_vw,
'cache': ' ',
'holdout_after': 3000000,
'passes': 1000,
'early_terminate': 15,
'stage_poly': ' ',
'l2': var,
'l1': 0,
'loss_function': 'logistic'
}
vw_cmd = create_cmd(vw_params)
op = subprocess.check_output(vw_cmd,shell=True, cwd=vw_path,
stderr=subprocess.STDOUT).decode('utf-8')
res = vw_params.copy()
res['results'] = op
for r in df_metric_res.index:
res[r] = float(df_metric_res.loc[r, 're'].search(op).group(1))
print(var, res['average loss'])
results.append(res)
| true |
f484ab4805e3ba8145d3bc624e40d64b3d8a7973 | Python | 01090841589/solved_problem | /2020-01/python/18223_민준이와마산그리고건우.py | UTF-8 | 1,118 | 2.734375 | 3 | [] | no_license | import sys
sys.stdin = open("민준이와마산그리고건우.txt")
from collections import deque
V, E, P = map(int, input().split())
MAP = [[] for _ in range(V+1)]
for _ in range(E):
a, b, c = map(int, input().split())
MAP[a].append([b, c])
MAP[b].append([a, c])
que = deque()
que.append([1, 0, 0])
visited = [10000*V] * (V+1)
visited[1] = 0
flag = 0
res = 10000*V
while que:
nod, scr, konu = que.popleft()
for arr in MAP[nod]:
if arr[0] == V:
if res > scr+arr[1]:
res = scr+arr[1]
flag = konu
elif res == scr+arr[1] and konu == 1:
flag = konu
continue
if visited[arr[0]] > scr + arr[1]:
visited[arr[0]] = scr + arr[1]
if arr[0] == P:
que.append([arr[0], scr + arr[1], 1])
else:
que.append([arr[0], scr + arr[1], konu])
elif visited[arr[0]] == scr + arr[1] and konu == 1:
que.append([arr[0], scr + arr[1], konu])
if P == 1 or P == V:
flag = 1
if flag:
print("SAVE HIM")
else:
print("GOOD BYE") | true |
6423c23f88dbb8538b5d7eeb3891a7b57242b719 | Python | Jing-jing-yin/tensorflow-exercise | /mnist_Adam.py | UTF-8 | 1,661 | 2.78125 | 3 | [] | no_license | import tensorflow as tf
import random
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
learning_rate=0.001
training_epochs=15
batch_size=100
X=tf.placeholder(tf.float32,[None,784])
Y=tf.placeholder(tf.float32,[None,10])
W = tf.Variable(tf.random_normal([784, 10]))
b = tf.Variable(tf.random_normal([10]))
hypothesis=tf.matmul(X,W)+b
cost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=hypothesis,labels=Y))
optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
sess=tf.Session()
sess.run(tf.global_variables_initializer())
# train my model
for epoch in range(training_epochs):
avg_cost = 0
total_batch = int(mnist.train.num_examples / batch_size)
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
feed_dict = {X: batch_xs, Y: batch_ys}
c, _ = sess.run([cost, optimizer], feed_dict=feed_dict)
avg_cost += c / total_batch
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost))
print('Learning Finished!')
# Test model and check accuracy
correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print('Accuracy:', sess.run(accuracy, feed_dict={
X: mnist.test.images, Y: mnist.test.labels}))
# Get one and predict
r = random.randint(0, mnist.test.num_examples - 1)
print("Label: ", sess.run(tf.argmax(mnist.test.labels[r:r + 1], 1)))
print("Prediction: ", sess.run(
tf.argmax(hypothesis, 1), feed_dict={X: mnist.test.images[r:r + 1]}))
sess.close() | true |
fcb98b0f55908c445a73b5639d0ab3d022e58662 | Python | ryu19-1/atcoder_python | /joi2013yo/e/main.py | UTF-8 | 1,170 | 2.875 | 3 | [] | no_license | #!/usr/bin/env python3
import sys
from collections import deque, Counter
from heapq import heappop, heappush
from bisect import bisect_right
from itertools import accumulate
sys.setrecursionlimit(10**6)
INF = 10**12
m = 10**9 + 7
def main():
N, K = map(int, input().split())
X1 = [None] * N
X2 = [None] * N
Y1 = [None] * N
Y2 = [None] * N
Z1 = [None] * N
Z2 = [None] * N
for i in range(N):
X1[i], Y1[i], Z1[i], X2[i], Y2[i], Z2[i] = map(int, input().split())
X = sorted(X1 + X2)
Y = sorted(Y1 + Y2)
Z = sorted(Z1 + Z2)
# print(X, Y, Z)
ans = 0
for i in range(2 * N - 1):
for j in range(2 * N - 1):
for k in range(2 * N - 1):
cnt = 0
for l in range(N):
if X1[l] <= X[i] and X[i + 1] <= X2[l] and Y1[l] <= Y[j] \
and Y[j + 1] <= Y2[l] and Z1[l] <= Z[k] and Z[k + 1] <= Z2[l]:
cnt += 1
if cnt >= K:
ans += (X[i + 1] - X[i]) * \
(Y[j + 1] - Y[j]) * (Z[k + 1] - Z[k])
print(ans)
if __name__ == "__main__":
main()
| true |
ce3cb103176d81e706b05257bbacd34f69b8e5d3 | Python | kenshinji/codingbat_python | /Warmup-2/array123.py | UTF-8 | 127 | 2.796875 | 3 | [] | no_license | def array123(nums):
for i in range(len(nums)-2):
if nums[i:i+3]==[1,2,3]:
return True
return False
| true |
c5e379576e4168c7a7a8b102698ce7573324a12e | Python | coolmich/py-leetcode | /solu/90|Subsets II.py | UTF-8 | 554 | 2.890625 | 3 | [] | no_license | class Solution(object):
def subsetsWithDup(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
if not len(nums): return []
if len(nums) == 1: return [nums, []]
nums, i, res = sorted(nums), 0, []
while i < len(nums) and nums[i] == nums[0]: i+=1
for item in self.subsetsWithDup(nums[1:]):
res.append([nums[0]]+item)
if i == len(nums):
res.append([])
else:
res += self.subsetsWithDup(nums[i:])
return res
| true |
a02bb8cc6743f803ba506015511e19e9e0d9496b | Python | andregama/rethink-backend | /database/mybaseclass.py | UTF-8 | 1,591 | 2.640625 | 3 | [] | no_license | import re
from logging import getLogger
from sqlalchemy import event
logger = getLogger()
class MyBase():
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
def __repr__(self):
def should_print(key, value):
if key[0] == '_': # is private or protected
return False
return True
def prune(obj):
obj_str = '%s' % obj
if len(obj_str) > 80:
return re.sub('\s+', ' ', obj_str)[:80] + ' ...(pruned)'
else:
return obj_str
class_name = type(self).__name__
indentation = ' ' * (len(class_name) + 1)
attributes = [
"{0}{1}='{2!s}'".format(
indentation,
key,
prune(self.__dict__[key])
)
for key in self.__dict__
if should_print(key, self.__dict__[key])
]
joined_attributes = ',\n'.join(attributes)
return f'<{class_name}\n{joined_attributes}>'
@staticmethod
def log_insert(_, __, object):
logger.info(f'Inserted at DB {object!s}\n{object!r}')
@staticmethod
def log_delete(_, __, object):
logger.info(f'Deleted at DB {object!s}')
@classmethod
def __declare_last__(cls):
# get called after mappings are completed
# http://docs.sqlalchemy.org/en/rel_0_7/orm/extensions/declarative.html#declare-last
event.listen(cls, 'after_insert', cls.log_insert)
event.listen(cls, 'after_delete', cls.log_delete)
| true |
da69d8a5c07af5e93e954cf6779c4dece10a582c | Python | BastienLaby/badmintonScheduler | /findBestCombinaisons.py | UTF-8 | 4,722 | 2.921875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import os
import cProfile
combinaisonsFilepath = os.path.join(os.path.dirname(__file__), 'valid_combinaisons_nomultiplemeetings.txt')
def getCombinaisonRounds(combinaisonStr):
'''
Return the string combinaison into a list of list of integers.
'''
return [[int(i) - 1 for i in combinaisonStr[k:k+8]] for k in [0, 8, 16, 24, 32, 40, 48]]
class Player(object):
def __init__(self, name, score):
self.name = name
self.score = score
class Pool(object):
def __init__(self, poolName, p1, p2, p3, p4, p5, p6, p7, p8):
self.name = poolName
self.players = (p1, p2, p3, p4, p5, p6, p7, p8)
self.bestCombinaison = None
self.bestCombinaisonScore = None
self.averages = []
def computeAverages(self):
for i in range(0, 8):
self.averages.append([])
for j in range(0, 8):
self.averages[i].append((self.players[i].score + self.players[j].score) / 2.0)
def getAverage(self, i, j):
return self.averages[i][j]
def considerCombinaison(self, combinaisonStr):
'''
Consider the given combinaison, and keep its results if this is a better combinaison than the one already stored.
'''
score = 0
for r in getCombinaisonRounds(combinaisonStr):
score += abs(self.getAverage(r[0], r[1]) - self.getAverage(r[2], r[3])) + abs(self.getAverage(r[4], r[5]) - self.getAverage(r[6], r[7]))
# score += 0
if not self.bestCombinaison or score < self.bestCombinaisonScore:
self.bestCombinaison = combinaisonStr
self.bestCombinaisonScore = score
def printResults(self):
print 'Pool %s' % self.name
print 'Best combinaison : %s (%s)' % (self.bestCombinaison, self.bestCombinaisonScore)
for r, pairs in enumerate(getCombinaisonRounds(self.bestCombinaison)):
print 'Round %s' % r
print '\t%s / %s VS %s / %s' % (self.players[pairs[0]].name, self.players[pairs[1]].name, self.players[pairs[2]].name, self.players[pairs[3]].name)
print '\t%s / %s VS %s / %s' % (self.players[pairs[4]].name, self.players[pairs[5]].name, self.players[pairs[6]].name, self.players[pairs[7]].name)
POOLS = (
Pool(
'HOMMES - Débutant - Poule A',
Player('Olivier HERBIN', 0),
Player('Enguerran BERNARD', 0),
Player('Cédric PETIT GALLEY', 0),
Player('Stevens BARGOT', 0),
Player('Denis TRIPIER', 0),
Player('Gérard LE GOUIL', 0),
Player('VIDE1', 0),
Player('VIDE2', 0)
),
Pool(
'HOMMES - Intermédiaire - Poule A',
Player('Bruno DE BASTIANI', 0),
Player('Jean-Marc BARBAGGIO', 0),
Player('Maxime RAGOT', 0),
Player('Thierry BRIEN', 0),
Player('Aurélien BRAULT', 0),
Player('Nouredine SALEH', 0),
Player('Guillaume LESPAGNOL', 0),
Player('Mickaël DHOURY', 0)
),
Pool(
'FEMMES - Intermédiaire - Poule A',
Player('Marie BOURE', 0),
Player('Corinne BERTHELOT', 0),
Player('Yilin ZHOU', 0),
Player('Noémie PAJOT', 0),
Player('Pierrette MILOT', 0),
Player('Isabelle LOREAL', 0),
Player('VIDE1', 0),
Player('VIDE2', 0)
),
Pool(
'HOMMES - Compétition - Poule A',
Player('Axel TRAN', 618.22),
Player('Nicola LUGNAGNI', 264.59),
Player('Renaud DANFLOUS', 83.45),
Player('Bastien LABY', 22.16),
Player('Jules BARBAGGIO', 18.72),
Player('Pierre BUSTINGORY', 13.78),
Player('Maxime PHILIPPON', 8),
Player('Julien LEBOIS', 7.48)
),
Pool(
'HOMMES - Compétition - Poule B',
Player('Daniel MARIN', 278.63),
Player('Renaud AGNASSE', 217.13),
Player('Bernard LAM VAN BA', 167.43),
Player('Théo DESAGNAT', 27.47),
Player('Emmanuel PATEYRON', 21.92),
Player('Vincent KAUFFMANN', 18.72),
Player('Pierre SIAUGE', 11.32),
Player('Samuel DURAND', 1.99)
),
Pool(
'FEMMES - Compétition - Poule A',
Player('Lucile PATEYRON', 528.57),
Player('Astrid GALY-DEJEAN', 395.84),
Player('Myriam DIEMER', 358.66),
Player('Mégane SIMON', 191.9),
Player('Aude MIGLIASSO', 73.03),
Player('Tiphaine CHOTEAU', 22.31),
Player('Margaux VERDIER', 12.58),
Player('VIDE1', 10) #TODO moyenne du tableau ici
),
)
def main():
combinaisons = []
with open(combinaisonsFilepath, 'r') as f:
combinaisons = f.readlines()
for pool in POOLS:
pool.computeAverages()
for c, combinaison in enumerate(combinaisons):
pool.considerCombinaison(combinaison)
pool.printResults()
#TODO :
# - Build UI with inputs for PLAYERS and score
# - Add automatic score review ('https://badiste.fr/rechercher-joueur-badminton?todo=search&nom=laby&prenom=bastien&Submit=Rechercher')
if __name__ == '__main__':
cProfile.run('main()')
| true |
1520ead68500d8a91d38bc39c80ad4e2efe822fc | Python | cjm715/ml_scratch | /ml_scratch/NeuralNetworks.py | UTF-8 | 4,313 | 2.828125 | 3 | [] | no_license | import numpy as np
class NeuralNetwork:
def __init__(self,
num_layers = 2,
input_size = 64,
num_nodes= [30, 10],
batch_size = 40,
learning_rate = 0.1):
self.num_layers = num_layers
self.input_size = input_size
self.num_nodes = num_nodes
self.batch_size = batch_size
self.learning_rate = learning_rate
self.W = [np.random.randn(num_nodes[0], input_size)*0.01]
self.b = [np.random.randn(num_nodes[0], 1)*0.01]
for i in range(1, num_layers):
self.W.append(np.random.randn(num_nodes[i], num_nodes[i-1])*0.01)
self.b.append(np.random.randn(num_nodes[i], 1)*0.01)
def fit(self, X, y, X_val=None, y_val=None, num_iterations = 10000):
for itr in range(num_iterations):
X_batch, y_batch = sample_batch(X, y, self.batch_size)
a, z = self._forward(X_batch)
dW, db = self._backward(X_batch, y_batch, a, z)
for layer_idx in range(self.num_layers):
self.W[layer_idx] -= self.learning_rate*dW[layer_idx]
self.b[layer_idx] -= self.learning_rate*db[layer_idx]
if itr % 100 == 0:
y_hat = self.predict(X)
if y_val is not None:
y_val_hat = self.predict(X_val)
print(itr,
" Loss: ",
self._loss(y, y_hat),
" Train Accuracy: ",
self._accuracy(y, y_hat),
" Val Accuracy",
self._accuracy(y_val, y_val_hat))
else:
print(itr,
" Loss: ",
self._loss(y, y_hat),
" Train Accuracy: ",
self._accuracy(y, y_hat))
def predict(self, X):
a, _ = self._forward(X)
return a[-1].T
def _loss(self, y, y_hat):
return - np.mean(y*np.log(y_hat))
def _accuracy(self, y, y_hat):
is_correct = (np.argmax(y, axis = 1) == np.argmax(y_hat, axis = 1))
#print(is_correct.shape)
return sum(is_correct)/ len(is_correct)
def _forward(self, X):
num_instances = X.shape[0]
a = [np.zeros((self.num_nodes[i], num_instances))
for i in range(self.num_layers)]
z = [np.zeros((self.num_nodes[i], num_instances))
for i in range(self.num_layers)]
for i in range(self.num_layers):
if i == 0:
z[i] = self.W[i].dot(X.T) + self.b[i]
else:
z[i] = self.W[i].dot(a[i-1]) + self.b[i]
if i < (self.num_layers - 1):
a[i] = ReLU(z[i])
else: # layer i is the final layer
a[i] = softmax(z[i])
return a, z
def _backward(self, X, y, a, z):
num_instances = len(y)
dW = [np.zeros(self.W[i].shape) for i in range(self.num_layers)]
db = [np.zeros(self.b[i].shape) for i in range(self.num_layers)]
# da = [np.zeros((self.num_nodes[i], num_instances))
# for i in range(num_layers)]
dz = [np.zeros((self.num_nodes[i], num_instances))
for i in range(self.num_layers)]
dz[-1] = a[-1] - y.T
dW[-1] = (1/num_instances) *dz[-1].dot(a[-2].T)
db[-1] = (1/num_instances) * np.sum(dz[-1], axis = 1, keepdims = True)
for i in range(self.num_layers - 2, -1, -1):
dz[i] = derivReLU(z[i]) * self.W[i+1].T.dot(dz[i+1])
if i == 0:
dW[i] = (1/num_instances) * dz[i].dot(X)
else:
dW[i] = (1/num_instances) * dz[i].dot(a[i-1].T)
db[i] = (1/num_instances) * np.sum(dz[i], axis = 1, keepdims = True)
return dW, db
def sample_batch(X, y, batch_size):
row_idx = np.random.choice(X.shape[0], batch_size, replace=False)
X_batch = X[row_idx, :]
y_batch = y[row_idx]
return X_batch, y_batch
def derivReLU(z):
deriv = np.zeros(z.shape)
deriv[z > 0] = 1
return deriv
def ReLU(z):
z[z <= 0] = 0
return z
def softmax(z):
a = np.exp(z)
a = a / np.sum(a, axis = 0)
return a
| true |
7fe2d7d615784b43cdd33e453d6cf768b8a296f7 | Python | red1habibullah/DevTools | /Analyzer/python/utilities.py | UTF-8 | 403 | 2.625 | 3 | [] | no_license | # common utilities for analyzers
import ROOT
ZMASS = 91.1876
def deltaPhi(phi0,phi1):
result = phi0-phi1
while result>ROOT.TMath.Pi():
result -= 2*ROOT.TMath.Pi()
while result<=-ROOT.TMath.Pi():
result += 2*ROOT.TMath.Pi()
return result
def deltaR(eta0,phi0,eta1,phi1):
deta = eta0-eta1
dphi = deltaPhi(phi0,phi1)
return ROOT.TMath.Sqrt(deta**2+dphi**2)
| true |
595a3a3396d15d17f840422b738f0dc930df7b53 | Python | FelipeGCosta/Introducao-a-Ciencia-da-Computacao-2018-2 | /Provas/Prova 2/Provas Tipo A/Questao A.7/gabarito.py | UTF-8 | 285 | 2.875 | 3 | [] | no_license | N = int(input())
produtos = []
for i in range(N):
temp = input().split()
produtos.append((temp[0], temp[1:]))
tags = input()
requests = list(filter(lambda f: list(filter(lambda tag:
tag in tags, f[1])), produtos))
for r in requests:
print(r[0])
| true |
fc3169d38109711dadac95856984bfe984bb14c6 | Python | mikehelmick/tek_transparency | /plot-oldies.py | UTF-8 | 5,280 | 2.625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python3
# ie and ukni services sometimes serve stale zips - make a plot
# of those as they can affect my ie/ukni estimates
# Input is a CSV with: date,country, and a set of id,time_t+ms
import os,sys,argparse,csv,dateutil,math,statistics
import matplotlib
#matplotlib.use('Agg')
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
import gif,datetime
from forecasting_metrics import *
# mainline processing
if __name__ == "__main__":
# command line arg handling
parser=argparse.ArgumentParser(description='Plot daily TEK counts for a set of countries')
parser.add_argument('-i','--input',
dest='infile',
help='File name (wildcards supported) containing country daily TEK count CSVs')
parser.add_argument('-o','--output',
dest='outfile',
help='output for graph')
parser.add_argument('-y','--yoffset',
action='store_true',
help='Y-offset for ireland')
parser.add_argument('-c','--country',
dest='country',
help='country to graph')
parser.add_argument('-s','--start',
dest='start',
help='start date')
parser.add_argument('-e','--end',
dest='end',
help='end date')
parser.add_argument('-v','--verbose',
help='additional output',
action='store_true')
parser.add_argument('-n','--nolegend',
help='don\'t add legend to figure',
action='store_true')
args=parser.parse_args()
if args.verbose:
if args.outfile is not None:
print("Output will be in " + args.outfile)
mintime=dateutil.parser.parse("2020-01-01")
maxtime=dateutil.parser.parse("2022-01-01")
if args.start is not None:
mintime=dateutil.parser.parse(args.start)
if args.end is not None:
maxtime=dateutil.parser.parse(args.end)
if args.infile is None:
print("Mising input file - exiting")
sys.exit(1)
ie_dates=[]
ukni_dates=[]
ie_tstamps=[]
ukni_tstamps=[]
rowind=1
with open(args.infile) as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
print(rowind,row)
rdate=dateutil.parser.parse(row[0])
if rdate < mintime or rdate >= maxtime:
print("Out of time range:",rdate,rowind)
rowind+=1
continue
if len(row)<4:
print("Too few cols:",rowind)
rowind+=1
continue
if row[3]=='missing':
print("Skipping missing:",rowind)
rowind+=1
continue
c=row[1]
ind=2
while ind <= len(row)-2:
ms=int(row[ind+1])
zt=datetime.datetime.fromtimestamp(ms//1000).replace(microsecond=ms%1000*1000)
if c == 'ie' and (args.country is None or c == args.country):
ie_dates.append(rdate)
ie_tstamps.append(zt)
print("Adding",rdate,c,zt)
elif c=='ukni' and (args.country is None or c == args.country):
ukni_dates.append(rdate)
ukni_tstamps.append(zt)
print("Adding",rdate,c,zt)
else:
print("Odd country: ",c)
ind+=2
rowind+=1
fig, ax = plt.subplots(1)
ax.xaxis_date()
ax.yaxis_date()
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.tick_params(axis='x', which='major', labelsize=24, labelrotation=20)
ax.tick_params(axis='y', which='major', labelsize=24)
if args.country is None:
dmintime=min(ie_dates[0],ukni_dates[0])
dmaxtime=max(ie_dates[-1],ukni_dates[-1])
elif args.country == 'ie':
dmintime=ie_dates[0]
dmaxtime=ie_dates[-1]
elif args.country == 'ukni':
dmintime=ukni_dates[0]
dmaxtime=ukni_dates[-1]
else:
print("Unsupported country")
sys.exit(1)
if args.start:
dmintime=mintime
if args.end:
dmaxtime=maxtime
ax.set_xlim(dmintime,dmaxtime)
yoffset=datetime.timedelta(days=0)
if args.yoffset:
yoffset=datetime.timedelta(days=3)
plt.scatter(ie_dates,[y + yoffset for y in ie_tstamps],color='green')
plt.scatter(ukni_dates,ukni_tstamps,marker='D',color='blue')
if not args.nolegend:
plt.suptitle("Irish and Northern Irish, download time vs. zip filename timestamp")
if args.yoffset:
plt.title("Irish y-values offset by 3 days (upwards)")
patches=[]
patches.append(mpatches.Patch(label="Ireland",color="green"))
patches.append(mpatches.Patch(label="Northern Ireland",color="blue"))
fig.legend(loc='lower center', fancybox=True, ncol=10, handles=patches)
if args.outfile is not None:
fig.set_size_inches(18.5, 11.5)
plt.savefig(args.outfile,dpi=300)
else:
plt.show()
| true |
1fc2e9c0be9dc5b9d1efb2c9cb72f4e285905111 | Python | aggieKevin/financial-analysis | /staModel.py | UTF-8 | 529 | 2.59375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sun Sep 2 12:32:53 2018
@author: kevin he
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
df = sm.datasets.macrodata.load_pandas().data
df.head()
index = pd.Index(sm.tsa.datetools.dates_from_range('1959Q1', '2009Q3'))
df.index = index
df['realgdp'].plot()
plt.ylabel("REAL GDP")
# get he cycle and trend of a cycle
gdp_cycle, gdp_trend = sm.tsa.filters.hpfilter(df.realgdp)
df["trend"] = gdp_trend
gdp_cycle.plot()
gdp_trend.plot()
| true |
f12dcad6fb2ce85b2beb79ee7c91be0d84b5be25 | Python | Bradysm/daily_coding_problems | /reversewords.py | UTF-8 | 1,941 | 4.40625 | 4 | [
"MIT"
] | permissive | # This problem is relatively easy to do not in place
# simply take the word and split the word at the whitespace
# you can then iterate over the words that were split in reverse order
# and then join the split words on a whitespace. bada boom bada bang
# O(s) time and space, where s is the length of the string
def reverse_words(s: str) -> str:
return ' '.join(reversed(s.split(' ')))
word = "hello world here"
print("reversed: \'{w}\'".format(w=reverse_words(word)))
# if we assume that the string is mutable (so this could be an instance where the characters are passed in
# an array) Then we can perform another algorithm to compute this result. The way I thought of this problem
# was to mentally replace all the characters for a word with a placeholder. If we then reverse the list with the
# placeholder, then the placeholder will be in the correct space i.e. the word is in the correct space in the lsit
# we just need to make sure the word is in the correct order. So if we reverse the whole list, then the words will
# be in the correct position. We then make a second pass with two pointers and reverse the words contained
# within the array themselves to get the words in the correct order.
# O(s) time and O(1) space
def reverse_words2(arr: list) -> list:
# implementation
reverse(arr, 0, len(arr)-1) # reverse the whole array
start = 0
for i in range(len(arr)):
if arr[i] == ' ' and i-1 >= 0: # check to see if we're on a delimiter
reverse(arr, start, i-1)
start = i+1 # place start after the delimiter
reverse(arr, start, len(arr)-1) # reverse the last word
return arr
def reverse(arr: list, start: int, finish: int):
while start < finish:
temp = arr[start]
arr[start] = arr[finish]
arr[finish] = temp
start += 1
finish -=1
word
word_list = [c for c in word]
print("".join(reverse_words2(word_list))) | true |
b41868e790b8b54ddb5f217560e5f99ff4e7f753 | Python | 742617000027/advent-of-code-2020 | /14/14.py | UTF-8 | 1,850 | 2.875 | 3 | [] | no_license | from time import time
import utils
def masking(v, m):
ret = ''
for x, y in zip(v, m):
ret += x if y == 'X' else y
return ret
def float_masking(v, m):
ret = ['']
for x, y in zip(v, m):
if y == '0':
for i in range(len(ret)):
ret[i] += x
elif y == '1':
for i in range(len(ret)):
ret[i] += y
else:
ret.extend(ret)
for i in range(len(ret)):
ret[i] += str((i < len(ret) / 2) * 1)
return ret
if __name__ == '__main__':
# Part 1
"""
tic = time()
sequence = utils.read_str_sequence()
mem = dict()
mask = sequence[0].replace('mask = ', '')
for line in sequence[1:]:
if 'mask' in line:
mask = line.replace('mask = ', '')
else:
pos, val = line.split(' = ')
pos = int(pos.replace('mem[', '').replace(']', ''))
val = bin(int(val))[2:].zfill(36)
val = int(masking(val, mask), 2)
mem[pos] = val
toc = time()
print(sum([val for val in mem.values()]))
print(f'finished in {1000 * (toc - tic):.2f}ms') # 3.03ms
"""
# Part 2
tic = time()
sequence = utils.read_str_sequence()
mem = dict()
mask = sequence[0].replace('mask = ', '')
for line in sequence[1:]:
if 'mask' in line:
mask = line.replace('mask = ', '')
else:
pos, val = line.split(' = ')
pos = pos.replace('mem[', '').replace(']', '')
pos = bin(int(pos))[2:].zfill(36)
addresses = float_masking(pos, mask)
for address in addresses:
mem[int(address, 2)] = int(val)
toc = time()
print(sum([val for val in mem.values()]))
print(f'finished in {1000 * (toc - tic):.2f}ms') # 211.31ms
| true |
99a2f630f3af218640221baf4a931e2a8ddb2c31 | Python | shankar7791/MI-10-DevOps | /Personel/AATIF/Python/Practice/09-MAR/prog3.py | UTF-8 | 111 | 3.453125 | 3 | [] | no_license | count = 0
for letter in 'Hello World' :
if(letter == 'l') :
count += 1
print(count,'letters found') | true |
e5087086b7bdd5373bb380c214e2c956d34640c2 | Python | emculber/CSI-5810 | /Project 1/reduced_neural.py | UTF-8 | 4,495 | 2.703125 | 3 | [] | no_license | import urllib
import numpy as np
from sklearn.decomposition import PCA
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import classification_report,confusion_matrix
def KNN(path_i, nn):
n_neighbors = nn
X = np.array(c1+c2)
y = [0, 0, 0, 1, 1, 1, 1]
h = .02
knn=neighbors.KNeighborsClassifier()
knn.fit(X, Y)
# x_min, x_max = X[:,0].min() - .5, X[:,0].max() + .5
# y_min, y_max = X[:,1].min() - .5, X[:,1].max() + .5
# xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Z = knn.predict(np.c_[xx.ravel(), yy.ravel()])
def generate_csv(path1, path2):
f = open(path1, "r")
fo = open(path2, "w")
fo.write("features")
for i in range(561):
fo.write(",%s"%(i+1))
fo.write("\n")
count = 0
for line in f:
tmp_line = ""
eles = line[:-1].split(" ")
count += 1
tmp_line += "%d"%count
for ele in eles:
if not ele=="":
tmp_line += ",%s"%ele
tmp_line += "\n"
fo.write("%s"%tmp_line)
f.close()
fo.close()
def feature_PCA(path_i, path_o, num):
tb_existing_url_csv = None
local_tb_existing_file = path_i
existing_df = pd.read_csv(
local_tb_existing_file,
index_col = 0,
thousands = ',')
existing_df.index.names = ['feature']
existing_df.columns.names = ['item']
existing_df.head()
print "Original data:"
print existing_df
print "\n--------------"
print "%d-Dim:"%num
pca = PCA(n_components=num)
pca.fit(existing_df)
existing_2d = pca.transform(existing_df)
print existing_2d
fo = open(path_o, "w")
fo.write("features")
for i in range(num):
fo.write(",%s"%(i+1))
fo.write("\n")
count = 0
for line in existing_2d:
count += 1
fo.write("%d"%count)
for ele in line:
# print ele
fo.write(",%f"%ele)
# print "\n"
fo.write("\n")
fo.close()
def pca(train, test, num):
print "Original data:"
print train
print "\n--------------"
print "%d-Dim:"%num
pca = PCA(n_components=num)
pca.fit(train)
train = pca.transform(train)
test = pca.transform(test)
return train, test
def load_data(path1, path2, path3, path4):
f = open(path1, "r")
count = 0
train_data_x = []
for line in f:
if count==0:
count += 1
continue
eles = line[:-1].split(",")[1:]
f_eles = []
for ele in eles:
f_eles.append(float(ele))
train_data_x.append(f_eles)
count += 1
f.close()
f = open(path2, "r")
train_data_y = []
for line in f:
ele = line[:-1].split(",")[0]
train_data_y.append(int(ele))
f.close()
f = open(path3, "r")
count = 0
test_data_x = []
for line in f:
if count==0:
count += 1
continue
eles = line[:-1].split(",")[1:]
f_eles = []
for ele in eles:
f_eles.append(float(ele))
test_data_x.append(f_eles)
count += 1
f.close()
f = open(path4, "r")
test_data_y = []
for line in f:
ele = line[:-1].split(",")[0]
test_data_y.append(int(ele))
f.close()
print train_data_y
print train_data_x[0]
return (train_data_x, train_data_y, test_data_x, test_data_y)
def neu(train_dataset, train_label_dataset, test_dataset, test_label_dataset):
scaler = StandardScaler()
scaler.fit(train_dataset)
train_dataset = scaler.transform(train_dataset)
test_dataset = scaler.transform(test_dataset)
mlp = MLPClassifier(hidden_layer_sizes=(400,400,300,200,100))
mlp.fit(train_dataset, train_label_dataset)
predictions = mlp.predict(test_dataset)
print(confusion_matrix(test_label_dataset,predictions))
print(classification_report(test_label_dataset,predictions))
print(len(mlp.coefs_))
print(len(mlp.coefs_[0]))
print(len(mlp.intercepts_[0]))
num = 10
path1 = "X_train.txt"
path1_test = "X_test.txt"
path2 = "features.csv"
path2_test = "features_test.csv"
path_y1 = "y_train.txt"
path_y2 = "y_test.txt"
path3 = "feature_%d.csv"%num
path4 = "test_%d.csv"%num
generate_csv(path1, path2)
generate_csv(path1_test, path2_test)
if num>0 and num<561:
feature_PCA(path2, path3, num)
feature_PCA(path2_test, path4, num)
train_d,train_l, test_d, test_l = load_data(path3, path_y1, path4, path_y2)
neu(train_d,train_l, test_d, test_l)
elif num==561:
## PCA
train_d,train_l, test_d, test_l = load_data(path2, path_y1, path2_test, path_y2)
print len(train_d), len(train_d[0]), len(train_l)
neu(train_d,train_l, test_d, test_l)
else:
print "input a PCA dim in range of [1,561]"
| true |
e02f9defa7c2b79639a4c35e544b36cdb6119eea | Python | gabihartobanu/grep_project | /grep/grep.py | UTF-8 | 4,799 | 2.5625 | 3 | [] | no_license | import re
import os
import argparse
out_file = "D:\\files\\test"
global got_mutiple_files
def find_in_file(location, find_what, ignore_case_option, not_option, count_option):
print("In functia de cautare")
global got_mutiple_files
count = 0
file_handle = open(location, "r")
if count_option:
if not_option:
if ignore_case_option:
for line in file_handle.readlines():
if not re.search(find_what, line, re.IGNORECASE):
count += 1
if got_mutiple_files:
print(location + ":" + count)
else:
print(count)
else:
for line in file_handle.readlines():
if not re.search(find_what, line):
count += 1
if got_mutiple_files:
print(location + ":" + count)
else:
print(count)
else:
if ignore_case_option:
for line in file_handle.readlines():
if re.search(find_what, line, re.IGNORECASE):
count += 1
if count >= 1:
if got_mutiple_files:
print(location + ":" + count)
else:
print(count)
else:
for line in file_handle.readlines():
if re.search(find_what, line):
count += 1
if count >= 1:
if got_mutiple_files:
print(location + ":" + count)
else:
print(count)
else:
if not_option:
if ignore_case_option:
for line in file_handle.readlines():
if not re.search(find_what, line, re.IGNORECASE):
if got_mutiple_files:
print(location + ":" + line[:-1])
else:
print(line[:-1])
else:
for line in file_handle.readlines():
if not re.search(find_what, line):
if got_mutiple_files:
print(location + ":" + line[:-1])
else:
print(line[:-1])
else:
if ignore_case_option:
for line in file_handle.readlines():
if re.search(find_what, line, re.IGNORECASE):
if got_mutiple_files:
print(location + ":" + line[:-1])
else:
print(line[:-1])
else:
for line in file_handle.readlines():
if re.search(find_what, line):
if got_mutiple_files:
print(location + ":" + line[:-1])
else:
print(line[:-1])
file_handle.close()
def find_in_location(location, find_what, ignore_case_option , not_option, count_option):
global got_mutiple_files
if os.path.exists(location):
print("fisierul/directorul exista")
if os.path.isdir(location):
print("locatia e director")
content = os.listdir(location)
for file in content:
new_location = os.path.join(location, file)
find_in_location(new_location)
elif os.path.isfile(location) and location[-4:] == ".txt":
print("locatia este fisier")
find_in_file(location, find_what, ignore_case_option, not_option, count_option)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Simulates some grep functionalities")
parser.add_argument("--not_option", "-n", action='store_true', required=False, help="Verifica daca nu face match")
parser.add_argument("--ignoreCase", "-i", action='store_true', required=False, help="Ignore case for search")
parser.add_argument("--count", "-c", action='store_true', required=False, help="Count number of apariton in file")
parser.add_argument("EXPRESION", metavar="e", type=str, help="Ce sa caute")
parser.add_argument("PATH", type=str, metavar="p", help="Unde sa caute")
args = parser.parse_args()
not_option = args.not_option
ignoreCase_option = args.ignoreCase
count_option = args.count
expresion = args.EXPRESION
path = args.PATH
global got_mutiple_files
if os.path.exists(path) and os.path.isdir(path):
got_mutiple_files = True
else:
got_mutiple_files = False
find_in_location(path, expresion, ignoreCase_option, not_option, count_option) | true |
a4647036e594a738d219b81046d003cc117527c1 | Python | NyanCat12/CrossinWeekly | /20170922/0922.py | UTF-8 | 219 | 3.265625 | 3 | [] | no_license | import math
def uniquePath(m,n):
return ((math.factorial(m+n-2))/((math.factorial(m-1))*(math.factorial(n-1))))
if __name__ == "__main__":
print (uniquePath(1,1))
print (uniquePath(3,3))
print (uniquePath(10,20))
| true |
7be0ea15b0c2b27cf4351bcc9afdb0f34b408115 | Python | meloun/py_ewitis | /libs/comm/serialprotocol.py | UTF-8 | 9,576 | 2.546875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
'''
Created on 16.9.2009
@author: Luboš Melichar
desc: UART protocol for application-terminal communication
application counts as MASTER, terminal counts as SLAVE
communication session can be initiated only by MASTER
MASTER starts by sending command
------------------------------------------------------------
| STARTBYTE | SEQ | COMMAND | DATALENGTH | DATA | .. | XOR |
------------------------------------------------------------
STARTBYTE = 0x53 (byte), constant for start of frame
SEQ = 0-255(byte), control id of command in case of retransmit
COMMAND = 0-0x7F(byte), application command
= 0-0x7F+0x80(byte), terminal acknowledge for good received command
DATALENGTH = 0-255(byte), length of transmitted data
DATA = payload, contains useful info
XOR = 0-0xFF(byte), logic xor of STARTBYTE, SEQ, COMMAND, DATALENGTH and DATA (if any)
SLAVE executes command (using function comm_app_process()) and
responds with the equal structured frame, but the COMMAND MSb is
set to 1
Commands
CMD_GET_RUN_PAR_INDEX 0x30
CMD_GET_TIME_PAR_INDEX 0x32
'''
import serial, time
import binascii
import sqlite3
from struct import unpack
import _winreg as winreg
import re, itertools
#from ewitis.log import log
START_BYTE = "\x53"
FRAMELENGTH_NO_DATA = 5
#protocol states
(eNONE, eWAIT_FOR_STARTBIT, eWAIT_FOR_SEQ, eWAIT_FOR_COMMAND, eWAIT_FOR_DATALENGTH, eWAIT_FOR_DATA, eWAIT_FOR_XOR) = range(0,7)
#exceptions
class Receive_Error(Exception): pass
class SR_SeqNr_Error(Exception): pass
class SendReceiveError(Exception): pass
class SerialProtocol():
def __init__(self, xcallback, port = None, baudrate = 9600):
self.callback = xcallback
self.port = port
self.baudrate = baudrate
self.seq_id = 1
def xor(self, string):
xor = 0
for i in range(len(string)):
xor ^= ord(string[i])
return xor
def open_port(self):
self.ser = serial.Serial(self.port, self.baudrate)
'''otevreni portu'''
if self.ser.isOpen() == 1: #port jiz otevren
self.ser.close()
self.ser.open()
if self.ser.isOpen() == 0: #port se neotevrel
print "E: Can not open port:", self.ser.name
exit
else:
print "I: Port is succesfully open:", self.ser.name
def close_port(self):
'''zavreni portu'''
self.ser.close()
if self.ser.isOpen() == 1: #port se neotevrel
print "E: Can not close port:", self.ser.name
exit
else:
print "I: Port is succesfully close:", self.ser.name
#===========================================================================
# send 1 frame
#===========================================================================
def send_frame(self, cmd, data):
#self.frame_id = 0x53
aux_string = START_BYTE;
aux_string += (chr(self.seq_id))
aux_string += (chr(cmd))
aux_string += chr(len(data))
aux_string += data
aux_string += chr(self.xor(aux_string));
#print aux_string.encode('hex')
self.ser.write(aux_string)
#return self.seq_id
#===========================================================================
# wait for receiving the frame
# or comes timeout and set init state
#===========================================================================
def receive_frame(self):
frame = {}
state = eWAIT_FOR_STARTBIT
#print "buffer:", self.ser.inWaiting(),
#wait for the start bit
if state == eWAIT_FOR_STARTBIT:
znak = self.ser.read()
while (znak != START_BYTE):
znak = self.ser.read()
state = eWAIT_FOR_SEQ
#print "\n=>eWAIT_FOR_ID",
if state == eWAIT_FOR_SEQ:
znak = self.ser.read()
frame['seq_id'] = ord(znak)
state = eWAIT_FOR_COMMAND
#print "=>eWAIT_FOR_COMMAND",
if state == eWAIT_FOR_COMMAND:
znak = self.ser.read()
frame['cmd'] = ord(znak)
state = eWAIT_FOR_DATALENGTH
#print "=>eWAIT_FOR_DATALENGTH",
if state == eWAIT_FOR_DATALENGTH:
znak = self.ser.read()
frame['datalength'] = ord(znak)
state = eWAIT_FOR_DATA
#print "=>eWAIT_FOR_DATA",
if state == eWAIT_FOR_DATA:
#cnt = 0
if(self.ser.inWaiting()<frame['datalength']):
print"E:NEDOSTATEK DAT! (cekam..)"
#else:
frame['data'] = self.ser.read(frame['datalength'])
state = eWAIT_FOR_XOR
#print "=>eWAIT_FOR_XOR",
if state == eWAIT_FOR_XOR:
znak = self.ser.read()
#callback_return = self.callback(frame['cmd'], frame['data'])
state = eWAIT_FOR_STARTBIT
return frame
raise Receive_Error()
#=======================================================================
# - vyslani cmd + data
# - prijmuti odpovedi
# - zavolani callbacku(cmd, data) a vraceni jiz slovniku s konkretnimi daty
#=======================================================================
def send_receive_frame(self, cmd, data):
'''clear buffers'''
self.ser.flushInput()
self.ser.flushOutput()
for attempt in range(3):
'''increment sequence id'''
self.seq_id += 1
self.seq_id &= 0xFF
'''send frame'''
self.send_frame(cmd, data)
'''wait for enough data'''
for attempt_2 in range(5):
if(self.ser.inWaiting() >= len(data) + FRAMELENGTH_NO_DATA):
break
time.sleep(0.1)
else:
continue #no enough data, try send,receive again
'''receive answer'''
try:
aux_frame = self.receive_frame()
if(aux_frame['seq_id'] != self.seq_id):
raise SendReceiveError(1, "no match sequence ids")
'''ALL OK'''
break #end of for
except (Receive_Error, SendReceiveError) as (errno, strerror):
print "W:SendReceiveError - {1}({0}) , try again..".format(errno, strerror)
else:
raise SendReceiveError(100,"no valid response")
'''call user callback to parse data to dict structure'''
aux_dict = self.callback(aux_frame['cmd'], aux_frame['data'])
'''ADD COMMON data and errors'''
'''common errors'''
#aux_dict['common_errors'] = 0
return aux_dict
if __name__ == "__main__":
import struct
import libs.file.file as file
def funkce_callback(command, data):
print "\nCallback=> cmd:", hex(command), "data", data.encode('hex')
if(command == CMD_GET_RUN_PAR_INDEX):
return "get time"
elif(command == (CMD_GET_TIME_PAR_INDEX | 0x80)):
''' GET_TIME_PAR_IDNEX => RUN struct (16b) + 2b error
| error (2b) | state(1b) | id (4b) | run_id (2b) | user_id (4b) | cell (1b) | time(4b) |
'''
aux_run = {}
aux_run['error'], aux_run['state'], aux_run['id'], aux_run['run_id'], \
aux_run['user_id'], aux_run['cell'], aux_run['time'], = struct.unpack("<HBIHIBI", data)
return aux_run
return "error"
csv_export_file = file.File("export.csv")
protokol = SerialProtocol( funkce_callback, port='COM8', baudrate=38400)
try:
protokol.open_port()
except serial.SerialException:
print "Port se nepodařilo otevřít"
else:
index = 0x00
aux_csv_string = "state;index;id;time\n"
csv_export_file.add(aux_csv_string)
while(1):
time.sleep(1)
''' send request and receive run record '''
try:
run = protokol.send_receive_frame(CMD_GET_TIME_PAR_INDEX, chr(index)+"\x00")
except sqlite3.IntegrityError:
raise
if(run['error'] == 0):
aux_csv_string = str(run['state']) + ";" + str(index) + ";" + str(run['id']) + ";" + str(run['time'])
print "I:receive run: " + aux_csv_string
csv_export_file.add(aux_csv_string)
index += 1
else:
print "no new run"
| true |
adbf2c348611a49bf198b51f1d20eb2032abde70 | Python | cwza/leetcode | /python/35-Search Insert Position.py | UTF-8 | 789 | 3.453125 | 3 | [] | no_license | from typing import List
class Solution:
def searchInsert(self, nums: List[int], target: int) -> int:
"Binary Search, Time: O(logn), Space: O(1)"
l, r = 0, len(nums)
while l < r:
m = l + (r-l)//2
if nums[m] >= target: r = m
else: l = m + 1
return l
nums = [1,3,5,6]
target = 5
result = Solution().searchInsert(nums, target)
assert result == 2
nums = [1,3,5,6]
target = 2
result = Solution().searchInsert(nums, target)
assert result == 1
nums = [1,3,5,6]
target = 7
result = Solution().searchInsert(nums, target)
assert result == 4
nums = [1,3,5,6]
target = 0
result = Solution().searchInsert(nums, target)
assert result == 0
nums = [1]
target = 0
result = Solution().searchInsert(nums, target)
assert result == 0 | true |
9cfca65da6e5db2d8824e8e8d8d87e5371c244f3 | Python | wanglethan/Games | /Minesweeper/settings.py | UTF-8 | 504 | 2.90625 | 3 | [] | no_license | import pygame
import math
import random
tiles = []
pygame.init()
pygame.display.set_caption("Minesweeper")
run = True
x = 20
y = 15
mines = 100
tile_size = 30
FPS = 1001
width = x * tile_size
height = y * tile_size
margin = 15
header = 50
screen = pygame.display.set_mode((width + margin*2, height + margin*2 + header))
mouseX = 0
mouseY = 0
press = False
first_move = False
# Colors
black = (0, 0, 0)
white = (255, 255, 255)
light_grey = (220, 220, 220)
grey = (180, 180, 180)
red = (255, 0, 0) | true |
dd6c04e320e3b45b6a661e10e535064438614ed4 | Python | sn8ke01/movieranker | /afi_top100.py | UTF-8 | 1,003 | 3.265625 | 3 | [] | no_license | import requests
import bs4
import re
import csv
import collections
def get_html():
url = 'https://www.afi.com/100Years/movies10.aspx'
response = requests.get(url)
return response.text
def get_movie_list(html):
movie_data = []
soup = bs4.BeautifulSoup(html, 'html.parser')
title = soup.find_all(class_='filmTitle')
for t in title:
movie_data.extend(t)
return movie_data
def generate_csv_data(movie_list):
for index, entry in enumerate(movie_list):
entry = entry.strip()
entry = entry.replace('(', '').replace(')', '').replace('.', '')
entry = re.sub(' ', ',', entry, 1)
entry = re.sub('\s(?=\d{4})', ',', entry)
print('{},{}'.format(entry, index + 1))
def main():
# print(response.status_code)
# print(response.text)
html = get_html()
movie_list = get_movie_list(html)
generate_csv_data(movie_list)
if __name__ == '__main__':
main()
| true |
7d7d5764f91a7db5ecd7cf6fae9c9fd0ddb88760 | Python | cww33/Shopping-Cart | /my_test.py | UTF-8 | 465 | 2.71875 | 3 | [] | no_license | from shopping_cart import to_usd
from shopping_cart import taxtotal
from shopping_cart import total
from shopping_cart import subtotal
from shopping_cart import taxpercentage
def test_to_usd():
result= to_usd(73498.82 )
assert result == " $73,498.82"
assert to_usd(9.9) == " $9.90"
def test_taxtotal():
result= taxtotal
assert result == subtotal*taxpercentage
def test_total():
result= subtotal+taxtotal
assert result == total
| true |
a8c705ac045d0d87d38b844be947aa3e1f9e064a | Python | BjarneKraak/Autonomous-Vehicles-Conquering-The-World-Group-4 | /Final lab/example code/python_sample/memesim.py | UTF-8 | 6,500 | 2.734375 | 3 | [] | no_license | import math
from time import sleep
# import code that is used
from lib.memegenome import MemeGenome
from lib.memesimcommand import MemeSimCommand
from lib.memesimresponse import MemeSimResponse
from lib.memesimclient import MemeSimClient
from lib.zigbee import Zigbee
# Global variables/constants that can be accessed from all functions should be defined below
x_pos = [None] * 3
y_pos = [None] * 3
angle = [None] * 3
#make different destination for every robot so a array with 3 variables
destination = [None] * 3
destination[0] = "C1"
#Location of all cities and lab
#Contintent 1:
C1 = [2550, 250]
C2 = [3250, 250]
C3 = [3250, 950]
C4 = [2550, 1250]
#continent 2:
C5 = [3250, 2550]
C6 = [3250, 3250]
C7 = [2550, 3250]
C8 = [2250, 2250]
#continent 3:
C9 = [950, 3250]
C10 = [250, 3250]
C11 = [250, 2250]
C12 = [1250, 2250]
#lab (4):
LAB = [175, 1025]
#middle of continents
M1 = [1750, 875]
M2 = [4375, 4375]
M3 = [875, 1750]
MLAB = [875, 875]
# Create a Zigbee object for communication with the Zigbee dongle
# Make sure to set the correct COM port and baud rate!
# You can find the com port and baud rate in the xctu program.
ZIGBEE = Zigbee('COM12', 9600)
# set the simulator IP address
MEMESIM_IP_ADDR = "131.155.124.132"
# set the team number here
TEAM_NUMBER = 4
# create a MemeSimClient object that takes car of all TCP communication with the simulator
MEMESIM_CLIENT = MemeSimClient(MEMESIM_IP_ADDR, TEAM_NUMBER)
# dictionary to hold a collection of memes
MY_MEMES = dict()
# the setup function is called once at startup
# you can put initialization code here
def setup():
# create a collection of random memes
for i in range(0, 10):
mg = MemeGenome.random_meme_genome()
mg[0] = 'A'
mg[99] = mg[0]
MY_MEMES['Meme'+str(i)] = mg
# connect to the simulator
MEMESIM_CLIENT.connect()
ZIGBEE.write(b'The program has started')
# the process_response function is called when a response is received from the simulator
def process_response(resp):
global x_pos
global y_pos
global angle
if resp.cmdtype() == 'rq':
if not resp.iserror():
robot_id = int(resp.cmdargs()[1])
#save positions of robot
x_pos[robot_id - 10] = float(resp.cmdargs()[2])
y_pos[robot_id - 10] = float(resp.cmdargs()[3])
angle[robot_id - 10] = ( float(resp.cmdargs()[4]) / (2*math.pi) )*360 #find angle and convert radians to degrees
#print("Received response: " + str(resp))
ZIGBEE.write(b'The program has started')
data = readZIGBEE()
if len(data) is not 0:
print(data)
#FUNCTIONS:_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#
#NEXT_FUNCTION:
def navigate_to(destination, robot_id):
update_position(robot_id)
continent = find_continent(robot_id)
if (continent == 'CON1'):
drive_to(M1[0],M1[1], robot_id)
if (continent == 'CON2'):
drive_to(M2[0],M2[1], robot_id)
if (continent == 'CON3'):
drive_to(M3[0],M3[1], robot_id)
if (continent == 'LAB'):
drive_to(LAB[0],LAB[1], robot_id)
#read_pos(10)
#update responses
def update_responses():
# get new responses
RESPONSES = MEMESIM_CLIENT.new_responses()
# process new responses
for r in RESPONSES:
process_response(r)
#find current continents
def find_continent(robot_id):
continent = None
if x_pos[robot_id - 10]<1450 and y_pos[robot_id - 10]<1450:
continent = "LAB"
if x_pos[robot_id - 10]>2100 and y_pos[robot_id - 10]<1450:
continent = "CON1"
if x_pos[robot_id - 10]>2100 and y_pos[robot_id - 10]>2100:
continent = "CON2"
if x_pos[robot_id - 10]<1450 and y_pos[robot_id - 10]>2100:
continent = "CON3"
#print('continent = ', continent) #debug message
return continent
def drive_to(x_goal, y_goal, robot_id):
#print("entered drive_to function") #debug message
update_position(robot_id)
angle_difference_vector = alignment_angle(x_goal, y_goal, robot_id)
if (abs(angle_difference_vector - angle[robot_id - 10]) > 8): # 8 is foutmarge
if (angle_difference_vector - angle[robot_id - 10] < 0):
while (angle_difference_vector - angle[robot_id - 10] < 0):
print('Angle of difference vector is', angle_difference_vector)
print('Angle of robot is', angle[robot_id - 10])
ZIGBEE.write(b'r') #send: turn to right
update_position(robot_id) #update position
print("turn to right")
sleep(0.3) #wait for stability
elif (angle_difference_vector - angle[robot_id - 10] > 0):
while (angle_difference_vector - angle[robot_id - 10] > 0):
print('Angle of difference vector is', angle_difference_vector)
print('Angle of robot is', angle[robot_id - 10])
ZIGBEE.write(b'l') #send: turn to left
update_position(robot_id) #update position
print("turn to left")
sleep(0.3) #wait for stability
ZIGBEE.write(b's')
print("stop turning")
#update position of robots: find x, y, and angle of robot
def update_position(robot_id):
RQ1 = MemeSimCommand.RQ(4, robot_id) #make a request
MEMESIM_CLIENT.send_command(RQ1) #send request
sleep(1.0) #wait a bit
update_responses() #find answers to responses: to x, y and angle
#find alginment angle of robot with goal
def alignment_angle(x_goal, y_goal, robot_id):
difference_vector = [None] * 2
difference_vector[0] = x_goal - x_pos[robot_id - 10]
difference_vector[1] = y_goal - y_pos[robot_id - 10]
angle_difference_vector = math.atan2(difference_vector[1], difference_vector[0]) * 180 / math.pi
return angle_difference_vector
#read info from zigbee module
def readZIGBEE():
data = str(ZIGBEE.read()) #read data as non string (dunno what it is) and convert to string
data = data[2:len(data)-1] # delete begin b' and '
return data #return
#read position of robot
def read_pos(robot_id):
global x_pos
global y_pos
global angle
print( x_pos[robot_id - 10] )
print( y_pos[robot_id - 10] )
print( angle[robot_id - 10] )
#END_OF_FUNCTIONS_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#
# call the setup function for initialization
setup()
while True:
navigate_to(destination[0], 10)
navigate_to(destination[1], 11)
navigate_to(destination[2], 12)
| true |
8fe1509e113e033eb3bcf173b3d6aa794c63dbb9 | Python | gffryclrk/ThinkPython2e | /ch7/ex7.1.py | UTF-8 | 1,187 | 3.546875 | 4 | [] | no_license | # py 3.7
import math
from decimal import Decimal
def mysqrt(a):
x = a / 2.0
epsilon = 0.0000001
while True:
# print(x)
y = (x + a/x) / 2
if math.fabs(y - x) < epsilon:
break
x = y
return x
# print('4: ', mysqrt(4))
# print('9: ', mysqrt(9))
# print('19: ', mysqrt(19))
# print('100: ', mysqrt(100))
def test_square_root(a):
col_headers = ['a','mysqrt(a)','math.sqrt(a)','diff']
print('\t'.join(col_headers))
line = ''
for header in col_headers:
line += '-'*len(header)
line += '\t'
print(line)
for i in a:
line = []
line.append(i)
line.append(mysqrt(i))
line.append(math.sqrt(i))
# print(line[1], " ", line[2], " ", line[1] - line[2])
line.append(line[1] - line[2])
line = list(map(str, line))
for c in range(1, len(col_headers)-1):
length = len(col_headers[c]) + 3
if len(line[c]) >= length: line[c] = line[c][0:length]
else: line[c] = line[c] + ' '*(length - len(line[c]))
# print(line)
print('\t'.join(line))
test_square_root([1,2,3,4,5,6,7,8, 9, 15, 20, 100])
| true |
8f3d8feaefab809054e2aed76d5cb7330c5450cf | Python | jcarlos46/golem-py | /remote.py | UTF-8 | 776 | 2.734375 | 3 | [] | no_license | import re
import os
import paramiko
from getpass import getpass
def info(path):
user = re.search('(.*)@',path).group(1)
host = re.search('@(.*):',path).group(1)
port = re.search(':(\d+)\/',path).group(1)
root = re.search(':\d+(\/.*)',path).group(1)
root = os.path.abspath(root)
return Info(user, host, port, root)
def sftpclient(path):
info = info(path)
password = getpass(info.host + "\'s password: ")
t = paramiko.Transport((info.host,info.port))
t.start_client()
t.auth_password(info.user,password)
sftp = t.open_session()
sftp = paramiko.SFTPClient.from_transport(t)
return sftp
class Info:
def __init__(self, user, host, port, root):
self.user = user
self.host = host
self.port = int(port)
self.root = root | true |
f4795e8a34ae8df2fb0d97182a16b0ee75085e8c | Python | lywc20/daily-programming | /Python/GeneratorExamples.py | UTF-8 | 470 | 3.859375 | 4 | [
"MIT"
] | permissive | def my_gen():
n = 1
print("This printed first")
yield n
n += 1
print("This printed second")
yield n
n += 2
print("This printed third")
yield n
def rev_str(string):
length = len(string)
for i in range(length - 1,-1,-1):
yield string[i]
##for char in rev_str("Hello"):
## print(char)
my_list = [2,3,4,5]
#print([x**2 for x in my_list])
a = (x**2 for x in my_list)
print(next(a))
print(next(a))
print(next(a))
| true |
ba1bc154fbda4e0582feeed0a6eb8bfe2b47c5c5 | Python | Ongakute/SI-2021-DALA-game | /main_ai.py | UTF-8 | 7,277 | 3.15625 | 3 | [] | no_license | from Board import Board
from mcts.nodes import *
from mcts.search import MonteCarloTreeSearch
from State import State
from Gui import Gui
import Gui_user
def init():
init_board = State(Board(), 1, 0)
root = MonteCarloTreeSearchNode(state=init_board, parent=None)
mcts = MonteCarloTreeSearch(root)
best_node = mcts.best_action(50)
c_state = best_node.state
c_board = c_state.board
return c_state, c_board
class Game:
def __init__(self, type_of_game, count_of_simulation, window) -> None:
self.Player = 2;
self.c_state, self.c_board = init()
#c_board = State(Board(), 1, 0)
self.c_board.Printing_board(self.c_board.turn)
self.gameWindow = Gui(self.c_board, window)
# graphics(c_board)
self.next_move = 2
self.next_phase = 0
self.count_of_simulation = count_of_simulation
self.type_of_game = type_of_game
def start(self):
print("ilość symulacji: ", self.count_of_simulation)
if(self.type_of_game == 0):
self.ai_vs_ai()
elif(self.type_of_game == 1):
self.user_vs_ai()
else:
print("type of game problem!")
return self.Player
def ai_player_move(self):
print(self.c_board.phase)
board_state = State(self.c_board, self.Player, self.next_phase)
root = MonteCarloTreeSearchNode(state=board_state, parent=None)
mcts = MonteCarloTreeSearch(root)
best_node = mcts.best_action(self.count_of_simulation)
self.c_state = best_node.state
self.c_board = self.c_state.board
self.c_board.Printing_board(self.Player)
#self.gameWindow.setBoard(self.c_board, self.next_phase, self.Player)
def user_player_move(self):
self.c_board, user_last_x, user_last_y = self.gui_user.user_move(self.c_board, 2, self.next_phase)
#self.gameWindow.setBoard(self.c_board)
return user_last_x, user_last_y
def user_vs_ai(self):
self.gui_user = Gui_user.Gui_user(self.c_board, 2)
self.gameWindow.setBoard(self.c_board, self.next_phase, self.Player)
while True:
#print("while phase: ", self.next_phase)
#print("end: ", self.c_board.end())
if self.Player == 1:
self.ai_player_move()
# graphics(c_board)
#print("trojka1 " + str(c_board.If_three_pawns(c_state.current_move[0], c_state.current_move[1])))
if(self.next_phase == 1):
if(self.c_board.If_three_pawns(self.c_state.current_move[2],self.c_state.current_move[3])):
print("ok")
self.next_phase = 2
self.Player = 1
else:
self.next_phase = self.c_board.phase
self.Player = 2
else:
if (self.c_board.If_three_pawns(self.c_state.current_move[0], self.c_state.current_move[1])):
print("ok")
self.next_phase = 2
self.Player = 1
else:
self.next_phase = self.c_board.phase
self.Player = 2
if self.c_state.is_game_over():
break
elif self.Player == 2:
user_last_x, user_last_y = self.user_player_move()
#print("user x/y:",user_last_x,": ", user_last_y)
# graphics(c_board)
#print("trojka2 " + str(c_board.If_three_pawns(c_state.current_move[0], c_state.current_move[1])))
if(self.next_phase == 1):
if (self.c_board.If_three_pawns(user_last_y, user_last_x)):
print("ok")
self.next_phase = 2
self.Player = 2
else:
self.c_board.set_state_for_user_move()
self.next_phase = self.c_board.phase
self.Player = 1
else:
if (self.c_board.If_three_pawns(user_last_y, user_last_x)):
print("ok")
self.next_phase = 2
self.Player = 2
else:
self.c_board.set_state_for_user_move()
self.next_phase = self.c_board.phase
self.Player = 1
if self.c_board.end() == 1 or self.c_board.end() == 2:
break
self.gameWindow.setBoard(self.c_board, self.next_phase, self.Player)
print("Koniec")
return self.Player
def ai_vs_ai(self):
self.gameWindow.setBoard(self.c_board, self.next_phase, self.Player)
while True:
print("while phase: ", self.next_phase)
if self.Player == 1:
self.ai_player_move()
# graphics(c_board)
#print("trojka1 " + str(c_board.If_three_pawns(c_state.current_move[0], c_state.current_move[1])))
if(self.next_phase == 1):
if(self.c_board.If_three_pawns(self.c_state.current_move[2],self.c_state.current_move[3])):
print("ok")
self.next_phase = 2
self.Player = 1
else:
self.next_phase = self.c_board.phase
self.Player = 2
else:
if (self.c_board.If_three_pawns(self.c_state.current_move[0], self.c_state.current_move[1])):
print("ok")
self.next_phase = 2
self.Player = 1
else:
self.next_phase = self.c_board.phase
self.Player = 2
if self.c_state.is_game_over():
break
elif self.Player == 2:
self.ai_player_move()
# graphics(c_board)
#print("trojka2 " + str(c_board.If_three_pawns(c_state.current_move[0], c_state.current_move[1])))
if(self.next_phase == 1):
if (self.c_board.If_three_pawns(self.c_state.current_move[2], self.c_state.current_move[3])):
print("ok")
self.next_phase = 2
self.Player = 2
else:
self.next_phase = self.c_board.phase
self.Player = 1
else:
if (self.c_board.If_three_pawns(self.c_state.current_move[0], self.c_state.current_move[1])):
print("ok")
self.next_phase = 2
self.Player = 2
else:
self.next_phase = self.c_board.phase
self.Player = 1
if self.c_state.is_game_over():
break
self.gameWindow.setBoard(self.c_board, self.next_phase, self.Player)
print("Koniec")
return self.Player
if __name__ == "__main__":
game = Game(10)
game.start()
| true |
bd71a0ec99ac470bcfeae3bdf1ccd2707e0ac9b0 | Python | sspkumdp/doubanfilmspider | /analysis/情感分析.py | UTF-8 | 1,064 | 3.09375 | 3 | [] | no_license |
'''
# -*- coding: utf-8 -*-
from snownlp import SnowNLP
s1 = SnowNLP(u"这本书质量真不太好!")
print("SnowNLP:")
print(" ".join(s1.words))
import jieba
s2 = jieba.cut(u"这本书质量真不太好!", cut_all=False)
print("jieba:")
print(" ".join(s2))
'''
from snownlp import SnowNLP
import os
rootdir = '/Users/yumiko/Desktop/comment'
list = os.listdir(rootdir) #列出文件夹下所有的目录与文件
for i in list:
# print(i)
f = open(os.path.join(rootdir, i), 'r', encoding='UTF-8')
file = os.path.join(rootdir, i)
print(f)
if file.endswith('.rtf'):
list = f.readlines()
sentimentslist = []
sum = 0
count = 0
for i in list:
s = SnowNLP(i)
# print s.sentiments
#print(s.sentiments)
sum+=(s.sentiments)
count+=1
print(sum/count)
'''
plt.hist(sentimentslist, bins=np.arange(0, 1, 0.01), facecolor='g')
plt.xlabel('Sentiments Probability')
plt.ylabel('Quantity')
plt.title('Analysis of Sentiments')
plt.show()
'''
| true |
f97aa864203fcb3cb715fd2fdc3d234daa710de6 | Python | VaishnaviRohatgi/opencvproject | /dominant color.py | UTF-8 | 1,501 | 3.203125 | 3 | [] | no_license | import cv2
import numpy as np
from sklearn.cluster import KMeans
from PIL import Image
import matplotlib.pyplot as plt
class DominantColors:
CLUSTERS = None
IMAGE = None
COLORS = None
LABELS = None
def __init__(self, image, clusters=3):
self.CLUSTERS = clusters
self.IMAGE = image
def dominantColors(self):
# read image
img = cv2.imread(self.IMAGE)
# convert to rgb from bgr
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# reshaping to a list of pixels
img = img.reshape((img.shape[0] * img.shape[1], 3))
# save image after operations
self.IMAGE = img
# using k-means to cluster pixels
kmeans = KMeans(n_clusters=self.CLUSTERS)
kmeans.fit(img)
# the cluster centers are our dominant colors.
self.COLORS = kmeans.cluster_centers_
# save labels
self.LABELS = kmeans.labels_
# returning after converting to integer from float
return self.COLORS
img = 'colors.jpg'
clusters = 5
dc = DominantColors(img, clusters)
colors = dc.dominantColors()
print(colors)
colors = (np.array(colors)).astype(np.uint8)
title = "p"
#creating bar image
cols = len(colors)
rows = max([1, int(cols/2.5)])
# Create color Array
barFullData = np.tile(colors, (rows,1)).reshape(rows, cols, 3)
# Create Image from Array
barImg = Image.fromarray(barFullData, 'RGB')
#saving image
barImg.save("{}_{}.png".format(title,"method"))
barImg.show() | true |
e26242adc5c5606ce2c1ffeb4e9e7e7502de6d3c | Python | jincongho/Python-Machine-Learning-Cookbook | /12. Visualizing Data/moving_wave_variable.py | UTF-8 | 1,383 | 3.203125 | 3 | [] | no_license |
# coding: utf-8
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
# In[2]:
# Generate the signal
def generate_data(length=2500,t=0,step_size=0.05):
for count in range(length):
t += step_size
signal = np.sin(2*np.pi*t)
damper = np.exp(-t/8.0)
yield t, signal * damper
# In[4]:
# Initializer function
def initializer():
peak_val = 1.0
buffer_val = 0.1
ax.set_ylim(-peak_val * (1+buffer_val), peak_val * (1+buffer_val))
ax.set_xlim(0,10)
del x_vals[:]
del y_vals[:]
line.set_data(x_vals, y_vals)
return line
# In[5]:
def draw(data):
# update the data
t, signal = data
x_vals.append(t)
y_vals.append(signal)
x_min, x_max = ax.get_xlim()
if t>=x_max:
ax.set_xlim(x_min, 2*x_max)
ax.figure.canvas.draw()
line.set_data(x_vals, y_vals)
return line
# In[6]:
# Create the figure
fig, ax = plt.subplots()
ax.grid()
# In[7]:
# Extract the line
line, = ax.plot([],[],lw=1.5)
# In[8]:
# Create the variables
x_vals, y_vals = [], []
# In[9]:
# Define the animator object
animator = animation.FuncAnimation(fig, draw, generate_data,
blit=False, interval=10, repeat=False,
init_func=initializer)
plt.show()
# In[ ]:
| true |
e3d585c8d1ecdf326d0bd3e80d24be80eabdc7f0 | Python | welchsoft/tip_calculator | /tipcalc.py | UTF-8 | 336 | 4.0625 | 4 | [] | no_license | #define function that multiplies total and tip percentage
def tipcalc(num1, num2):
return num1 * num2
#take user input
number1 = float(input('enter the total amount '))
number2 = float(input('enter the tip percentage as a decimal '))
#call the function and print result
print("the tip amount is $" + str(tipcalc(number1,number2)))
| true |
187b2c752887358542150cd1a020adac29aacbd6 | Python | manumonforte/agrs | /etl/graphs.py | UTF-8 | 1,481 | 2.578125 | 3 | [] | no_license | from agrs.etl.preprocess_data import *
from agrs.etl.utils import get_nodes_and_weights, get_edges, get_labels_and_colors, draw_graph
if __name__ == '__main__':
with open('../data/processed_data.json') as json_file:
data = json.load(json_file)
# print(data)
data_d3js = {'nodes': [], 'links': []}
g = nx.Graph()
g = get_nodes_and_weights(g, data, data_d3js)
g = get_edges(g, data, data_d3js)
labels, colors = get_labels_and_colors(g)
draw_graph(g, labels, colors)
centrality = nx.degree_centrality(g)
closeness = nx.closeness_centrality(g)
betweenness = nx.betweenness_centrality(g)
eigenvector = nx.eigenvector_centrality(g)
pagerank = nx.pagerank(g)
for node in data_d3js['nodes']:
current_name = node['name']
node['id'] = current_name
node['name'] = None
node['centrality'] = round(centrality[current_name] * 100, 2)
node['closeness'] = round(closeness[current_name] * 100, 2)
node['betweenness'] = round(betweenness[current_name] * 100, 2)
node['eigenvector'] = round(eigenvector[current_name] * 100, 2)
node['pagerank'] = round(pagerank[current_name] * 100, 2)
node.pop('partners', None)
node.pop('tracks', None)
node.pop('genres', None)
with open('../data/data_d3js.json', 'w+', encoding='UTF-8') as outfile:
json.dump(data_d3js, outfile, ensure_ascii=False)
| true |
91ce80a01190553923f115434709ec61e3c493ee | Python | Srivat04/Basic-Computer-Vision-Stuff | /sobel_and_laplacian.py | UTF-8 | 704 | 2.75 | 3 | [] | no_license | import numpy as np
import cv2 as cv
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-i","--image",required = True,help = "HELP")
args = parser.parse_args()
image = cv.cvtColor(cv.imread(args.image),cv.COLOR_BGR2GRAY)
cv.imshow("Original",image)
lap = cv.Laplacian(image,cv.CV_64F)
lap = np.uint8(np.absolute(lap))
cv.imshow("Laplacian of the image",lap)
sobelX = cv.Sobel(image,cv.CV_64F,1,0)
sobelY = cv.Sobel(image,cv.CV_64F,0,1)
sobelX = np.uint8(np.absolute(sobelX))
sobelY = np.uint8(np.absolute(sobelY))
sobel_combined = cv.bitwise_or(sobelX,sobelY)
cv.imshow("Sobel X",sobelX)
cv.imshow("Sobel Y",sobelY)
cv.imshow("Combined Sobel",sobel_combined)
cv.waitKey(0)
| true |
bd4e00aa151c52d85147b26dd92ff9f9bf438c6f | Python | sinhalaBsc/python-Regular-Expressions-re | /readfiles.py | UTF-8 | 4,343 | 3.953125 | 4 | [] | no_license | # this lesson show how to read text skill file in python.
'''
for open the file from your computer you can use python build-in open command.let's open 'text.txt'
file that located on same directory have current python file.There are two mathod to open files.
1. nomal method.
f=open('text.txt') # pass the directory as parameter
# this command defaults opening to file for reading. but
# we can set that for reading,writing,appending or reading/writing
# let's specifies that we open this file for reading in for same command
f=open('text.txt','r') # read 'r'
# write 'w'
# append 'a'
# read/write 'r+'
# in case we opened file should colse when we no need more, to not for messive with others.
f.close()
# just print the file name that we opened.
print(f.name)
# >> text.txt
# just print which mode we opened the current file.
print(f.mode)
# >> r
eg:
'''
f=open('text.txt','r')
print(f.name)
print(f.mode)
f.close()
'''
2. context manager method
with open('text.txt','r') as f: # 'f' is variable name of opened file.
print(f.name) # benefit of this method is it will automatically close
# when we exit from block context and clean exceptions which are thrown.
# method_1 : for read text file (load all text file data to 'f_contents' variable)
with open('text.txt','r') as f:
f_contents=f.read() # this method good for read small data text file.
print(f_contents)
# method_2 : for read text file (load all text file data to 'f_contents' by lines)
with open('text.txt','r') as f:
f_contents=f.readlines() # this method good for small data text file.
print(f_contents) # this will add '\n' to every end of lines data.
# method_3 : for read text file (load one data line to 'f_contents' variable from file)
with open('text.txt','r') as f:
f_contents=f.readline() # this method good for big data text file.
print(f_contents) # print only one line from text file.
f_contents=f.readline() # this will load only next line data from last read's line.
print(f_contents) # by every this command(method) will print next line data from file.
f_contents=f.readline()
print(f_contents,end='') # pass end='' to escape printing extra new line.
# for same purpose we can use print(f_contents[:-1])
# for read all of the content from an extremely large file with less memory using.
# method_4 : for read text file (use for loop for load line by line)
with open('text.txt','r') as f:
for line in f:
print(line,end='')
# method_5 : for read text file (use for loop for load line by line)
with open('english.txt','r') as f:
f_contents=f.readline()
while f_contents:
getdata(f_contents,f.tell())
f_contents=f.readline()
# ********* more control **********
# method_6 : for read text file (load frist 100 characters form the file)
with open('text.txt','r') as f:
f_contents=f.read(100) # use only frist 100 characters form the file.
print(f_contents,end='') # this will add '\n' to every end of lines data.
f_contents=f.read(100) # use only next 100 characters form the file.
print(f_contents,end='')
f_contents=f.read(100) # if there don't have any more characters then.
print(f_contents,end='') # print nothing.
# method_7 : for read text file (print all from loading by 100 characters at one time form the file)
with open('text.txt','r') as f:
size_to_read =100
f_contents=f.read(f_contents)
while len(f_contents)>0:
print(f_contents,end='')
f_contents=f.read(f_contents)
# to print current position on the text file
print(f.tell())
# to change flow of position when we want
f.seek(0) # file go to 0 chareater
eg:
'''
with open('text.txt','r') as f:
size_to_read =10
f_contents=f.read(size_to_read)
print(f_contents,end='')
f.seek(0) # set current posintion to 0 character
f_contents=f.read(size_to_read)
print(f_contents,end='')
# >> 1) This is1) This is
| true |
72018acabaa71342a24826feac04054e997d5598 | Python | jd-webb/SparkAutoMapper | /spark_auto_mapper/data_types/lpad.py | UTF-8 | 1,142 | 3.015625 | 3 | [
"Apache-2.0"
] | permissive | from typing import Optional
from pyspark.sql import DataFrame, Column
from pyspark.sql.functions import lpad
from spark_auto_mapper.data_types.text_like_base import AutoMapperTextLikeBase
from spark_auto_mapper.type_definitions.wrapper_types import (
AutoMapperColumnOrColumnLikeType,
)
class AutoMapperLPadDataType(AutoMapperTextLikeBase):
"""
Returns column value, left-padded with pad to a length of length. If column value is longer than length,
the return value is shortened to length characters.
"""
def __init__(self, column: AutoMapperColumnOrColumnLikeType, length: int, pad: str):
super().__init__()
self.column: AutoMapperColumnOrColumnLikeType = column
self.length: int = length
self.pad: str = pad
def get_column_spec(
self, source_df: Optional[DataFrame], current_column: Optional[Column]
) -> Column:
column_spec = lpad(
col=self.column.get_column_spec(
source_df=source_df, current_column=current_column
),
len=self.length,
pad=self.pad,
)
return column_spec
| true |
253cbd195ae00a67dd456c0aff4fb58f69cbe284 | Python | jrinconada/examen-tipo-test | /probability.py | UTF-8 | 1,205 | 3.640625 | 4 | [
"MIT"
] | permissive |
answers = 4
questions = 1
# Initializes all the variables
def init(q, a):
global answers
global questions
questions = q
answers = len(a)
# Returns the probability as a percentage given a number of events, taking into account the possibilities computed before
def computeProbability(events, possibilities):
return (events / possibilities) * 100
# Returns how much is added or substracted given a number of correct answers taking into account the number of questions
# Assuming addition of one point for a right answers and substraction of 0.33 for a wrong answer
def score(correctAnswers):
positive = 1
negative = 1 / (answers - 1)
return positive * correctAnswers - ((questions - correctAnswers) * negative)
# Returns the added probability for a given condition
# Possible conditions are: an positive number of points, a negative number or zero
def conditionalProbability(rightAnswers, condition, possibilities):
p = 0
for i in range(0, len(rightAnswers)):
if condition(score(i)):
p = p + computeProbability(rightAnswers[i], possibilities)
return p
def isPositive(score):
return score > 0
def isNegative(score):
return score < 0
def isZero(score):
return score == 0
| true |
25264fb549cb50f7b226126c5add5da909171859 | Python | cryzis07/Strategy_1 | /fighter.py | UTF-8 | 735 | 3.015625 | 3 | [] | no_license | import style
class Fighter (object):
def __init__(self, name=None, health=100):
self.name = name
self.health = health
self.style = style.Style()
def attack(self,attacker,defender):
self.fighting_style.attack(attacker,defender)
def defend(self,attacker,defender):
self.fighting_style.defend(attacker,defender)
class Chelovek(Fighter):
def __init__(self, name=None,health=100,fighting_style=None):
super(Chelovek, self).__init__(name,health)
self.fighting_style = fighting_style
class Zmey(Fighter):
def __init__(self, name=None,health=100,fighting_style=None):
super(Zmey, self).__init__(name,health)
self.fighting_style = fighting_style
| true |
f264809fc1cbf9e859495b342a1040ba7055a3cc | Python | Ivaylo-Kirov/pyjs-ds-algos | /py/BST.py | UTF-8 | 1,846 | 3.796875 | 4 | [] | no_license |
class Node:
def __init__(self, data, left=None, right=None):
self.data = data
self.left = left
self.right = right
class BST:
def __init__(self):
self.root = None
self.size = 0
def addNode(self, data):
if self.root is None:
self.root = Node(data)
else:
self._addNode(self.root, data)
def _addNode(self, startNode, data):
if data < startNode.data:
if startNode.left is None:
startNode.left = Node(data)
else:
self._addNode(startNode.left, data)
else:
if startNode.right is None:
startNode.right = Node(data)
else:
self._addNode(startNode.right, data)
def _countNodes(self, node):
if node is None:
return 0
return (1 + self._countNodes(node.left) + self._countNodes(node.right))
def countNodes(self):
return self._countNodes(self.root)
def findMin(self):
startNode = self.root
while startNode.left is not None:
startNode = startNode.left
return startNode.data
def checkValid(self):
minV = -200000
maxV = 200000
return self._checkValid(self.root, minV, maxV)
def _checkValid(self, currentNode, minV, maxV):
if currentNode == None:
return True
return currentNode.data >= minV and currentNode.data <= maxV and self._checkValid(currentNode.left, minV, currentNode.data) and self._checkValid(currentNode.right, currentNode.data, maxV)
bst = BST()
bst.addNode(5)
bst.addNode(2)
bst.addNode(8)
bst.addNode(4)
bst.addNode(3)
result = bst.countNodes()
print(bst.findMin())
bst.addNode(1)
print(bst.findMin())
result = bst.checkValid()
print('hi')
| true |
1316061b7ad673366a7c2e7aaa7ab3b10eae1301 | Python | hamolicious/Tile-Set-Previewer | /tile_manager.py | UTF-8 | 1,048 | 3.078125 | 3 | [
"Apache-2.0"
] | permissive | import json
from hashlib import md5
import pygame
import os
from time import sleep
with open('settings.json') as file:
settings, tiles = json.load(file)
class Tile():
def __init__(self, path_to_image, pos):
self.pos = pos
self.path_to_image = path_to_image
self.image = pygame.image.load(self.path_to_image)
self.image_hash = md5()
def check_update(self):
with open(self.path_to_image, 'rb') as file:
content = file.read()
temp_hash = md5(content).hexdigest()
try:
if self.image_hash != temp_hash:
self.image = pygame.image.load(self.path_to_image)
self.image_hash = temp_hash
except pygame.error:
self.check_update()
def draw(self, screen):
self.check_update()
screen.blit(pygame.transform.scale(self.image, (settings['tile-size'][0] * settings['visual-increase'], settings['tile-size'][1] * settings['visual-increase'])), self.pos)
| true |
327456bb0f2b0d805fed76a06a3cba33d19c6c5f | Python | icaroslb/alg_lin_comp | /Atividade_3/quest_9.py | UTF-8 | 898 | 2.9375 | 3 | [] | no_license | import numpy as np
import quest_7
def Q_T_rot_givens( A, m, n ):
Q = np.eye( m )
T = A
for i in range( min( m, n ) - 1 ):
for j in range( i + 1, min( m, n ) ):
Q_i = quest_7.vetor_matriz_rot_givens( np.array( [ T[ :, i ] ] ).transpose(), i, j )
T = Q_i @ T
Q = Q @ Q_i.transpose()
return Q, T
if ( __name__ == "__main__" ):
m, n = [ int( x ) for x in input( "Insira as ordens m e n separadas por espaço: " ).split( " " ) ]
A = np.zeros( [ m, n ] )
for i in range( m ):
linha = [ float( x ) for x in input( "Insira a linha {} separadas por espaço: ".format( i ) ).split( " " ) ]
A[ i, : ] = linha[ 0 : n ]
Q, T = Q_T_rot_givens( A, m, n )
print( "-------------------------------------------------------------------------------\nMatriz Q:\n{}\n\nMatriz T:\n{}\n".format( Q, T ) ) | true |
3a6e84043062bb7b6c3dac979e34b38fb037bb89 | Python | rongcuid/Pygame-RPG-Engine | /RNA/RNA_Info.py | UTF-8 | 2,156 | 3.359375 | 3 | [] | no_license | '''
Created on Aug 30, 2013
This file includes the InfoRNA class which stores basic information
of RNA structure. The class itself also stores ALL information
@author: carl
'''
class InfoRNA():
'''
This class stores the basic information of RNA structure.
Eg. name, description, id
The class itself also stores all ids
'''
# This stores the previous ID assigned
prevID = 0
# This stores all names and objects
nameDict = {}
# This stores a list of all names
nameList = []
def __init__(self,desc=""):
'''
Renew the ID, initializes an InfoRNA object, and record it
to nameDict
@type desc: String
'''
# Gives a new, non-repetitive ID
InfoRNA.prevID += 1
# Assign the new ID
self.id = InfoRNA.prevID
# Record name to nameDict
InfoRNA.nameDict["self.name"] = self
# Assign description
self.description = desc
# To tell that this InfoRNA is not used
self.assigned = False
def assign(self, rnaObj):
'''
Assign a RNA object to InfoRNA object
'''
if not self.assigned and type(rnaObj) == PropRNA: #or type(rnaObj) == StructRNA:
# Stores the RNA Object contain
self.contain = rnaObj
# To tell that this InfoRNA object is used
self.assigned = True
else:
raise Exception("[InfoRNA]This InfoRNA ",self,"cannot assign object ",rnaObj,"!")
def getDesc(self):
return self.description
def getID(self):
return self.id
@classmethod
def checkNameUnique(cls,name):
for n in cls.nameList:
if name == n:
raise Exception("[InfoRNA]The name ",name,"is not unique!")
@classmethod
def retrieve(cls,name):
'''
Retrieves an InfoRNA object from name
'''
for n in cls.nameList:
if n == name:
return cls.nameDict[name]
raise Exception("[InfoRNA]The object with name ",name,"does not exist!")
from PropertyRNA import PropRNA
| true |
e3baca69c6ab14abc0a045a6ddb63f3ec0020e86 | Python | yutasrobot/RaspberryHome | /server.py | UTF-8 | 642 | 2.8125 | 3 | [] | no_license | # Raspberry Pi Uzaktan Kontrol Projesi Server Programi
import RPi.GPIO as GPIO #raspberry pi'nin pinlerini kontrol kutuphanesini ekle
import time
led1=18
led2=23
GPIO.setmode(GPIO.BCM) #pin numaralarini boarddaki siralamaya gore ayarla
GPIO.cleanup() #onceden kalmis olan pin ayarlarini temizle
GPIO.setup(18,GPIO.OUT) #led 1in bagli olacagi pini cikis olarak ayarla
for x in range(1,5):
GPIO.output(led1,GPIO.HIGH) #ledi yak
time.sleep(1) #1sn bekle
GPIO.output(led1,GPIO.LOW) #ledi sondur
time.sleep(1) #1sn bekle
| true |
902401b0d8e37abdd623205017938b6bb41181c6 | Python | j0h4x0r/InfoMiner | /main.py | UTF-8 | 7,006 | 2.609375 | 3 | [] | no_license | #!/usr/bin/python
import csv, sys, itertools, codecs
class AprioriExtractor:
def __init__(self, datafile, min_sup, min_conf):
self.datafile = datafile
self.min_sup = min_sup
self.min_conf = min_conf
self.discrete_granularity = [-1, -1, -1, 50, 50, 50000, 1000000, 20, 5000000, 50]
self.discrete_start = [0, 0, 0, 50, 1850, 50000, 1000000, 20, 5000000, 50]
def run(self):
# read data
database, header = self.loadData()
if not database:
print 'Error reading data file'
return
# sort items in transactions
for transac in database:
transac.sort()
# initialize data structures
L = [[] for i in range(2)]
Supports = [{} for i in range(2)]
# compute large 1-itemsets
candidates = [(item,) for item in set(itertools.chain(*database))]
Supports[1] = self.selectCandidates(candidates, database)
L[1] = Supports[1].keys()
# compute large k-itemsets
i = 1
while len(L[i]) != 0:
i += 1
candidates = self.apriorGen(L[i-1])
Supports.append(self.selectCandidates(candidates, database))
L.append(Supports[i].keys())
Supports.pop()
L.pop()
# extract rules
rules = self.extract1RRules(L, Supports)
self.printdata(Supports,rules, header)
# print rules
def printdata(self, supports, rules, header):
outfile = codecs.open('outfile.txt', encoding = 'utf-8', mode = 'w')
#sort supports in decs order
outfile.write('==Frequent itemsets (min_sup='+ "{0:.0f}%".format(min_sup * 100) +')\n')
for i in xrange(1,len(supports)):
sorted_supp = sorted(supports[i].items(), key=lambda x: x[1], reverse=True)
for each in sorted_supp:
h = header[each[0][0][0]]
if self.discrete_granularity[each[0][0][0]] < 0:
f = str(each[0][0][1])
else:
f = str('<=' + str(each[0][0][1]) + '&>=' + str(each[0][0][1] - self.discrete_granularity[each[0][0][0]]))
sup = each[1]
if i==1:
outfile.write('[' + h +': '+ f + '], ')
outfile.write("{0:.0f}%".format(sup * 100))
outfile.write('\n')
else:
outfile.write('[')
for inner in xrange(len(each[0])):
h = header[each[0][inner][0]]
if self.discrete_granularity[each[0][inner][0]] < 0:
f = str(each[0][inner][1])
else:
f = str('<=' + str(each[0][inner][1]) + '&>=' + str(each[0][inner][1] - self.discrete_granularity[each[0][inner][0]]))
# f = str(each[0][inner][1])
if inner == i-1:
outfile.write( h +': '+ f + '], ')
else:
outfile.write( h +': '+ f +', ')
outfile.write("{0:.0f}%".format(sup * 100))
outfile.write('\n')
outfile.write('\r\n\n')
outfile.write('==High-confidence association rules (min_conf='+ "{0:.0f}%".format(min_conf * 100) +')\n')
#sort rules in
for rule in rules:
sup = supports[len(rule[0])][rule[0]]
for i in xrange(len(rule)):
if i==0:
for item in xrange(len(rule[i])):
h = str(header[rule[i][item][0]])
if self.discrete_granularity[rule[i][item][0]] < 0:
f = str(rule[i][item][1])
else:
f = str('<=' + str(rule[i][item][1]) + '&>=' + str(rule[i][item][1] - self.discrete_granularity[rule[i][item][0]]))
if len(rule[i]) == 1:
outfile.write('[' + h +': '+ f + '] => ')
elif item == 0:
outfile.write('[' + h +': '+ f + ', ')
elif item == len(rule[i]) - 1:
outfile.write(h +': '+ f + '] => ')
else:
outfile.write(h +': '+ f + ', ')
elif i==1:
h = str(header[rule[i][0][0]])
if self.discrete_granularity[rule[i][0][0]] < 0:
f = str(rule[i][0][1])
else:
f = str('<=' + str(rule[i][0][1]) + '&>=' + str(rule[i][0][1] - self.discrete_granularity[rule[i][0][0]]))
# f = str(rule[i][0][1])
# (Conf: 100.0%, Supp: 75%)
outfile.write(h +': '+ f + ' (Conf: ' + "{0:.0f}%".format(min_conf * 100) + ', Supp: ' + "{0:.0f}%".format(min_sup * 100) + ')\n')
return
def loadData(self):
database = header = None
# read raw from file
with open(self.datafile, 'r') as csvfile:
csvreader = csv.reader(csvfile)
header = csvreader.next()
database = []
for row in csvreader:
database.append(map(lambda i: (i, row[i]), range(len(row))))
# discretize numeric attributes
for i in range(len(header)):
self.discretizeAttribute(database, i)
return database, header
def discretizeAttribute(self, database, k):
# negative granularity means this is not a numeric attribute
if self.discrete_granularity[k] < 0:
return
for row in database:
bound = ((int(float(row[k][1])) - self.discrete_start[k]) / self.discrete_granularity[k] + 1) * self.discrete_granularity[k] + self.discrete_start[k]
row[k] = (row[k][0], bound)
# This function selects large itemsets and returns a dictionary with the keys large itemsets and the values supports
def selectCandidates(self, candidates, database):
support = dict.fromkeys(candidates, 0)
for transac in database:
transac_set = set(transac)
for cand in candidates:
if set(cand) <= transac_set:
support[cand] += 1
total = len(database)
largeItemsetSupport = {key: val / float(total) for key, val in support.iteritems() if val / float(total) >= self.min_sup}
return largeItemsetSupport
# Return a list of candidates
def apriorGen(self, l):
# join step
def largersets(l):
for p in l:
for q in l:
if p == q:
continue
elif p[:-1] == q[:-1] and p[-1] < q[-1]:
yield p + q[-1:]
# prune step
candidates = []
for itemset in largersets(l):
qual = True
for sub in itertools.combinations(itemset, len(itemset) - 1):
if sub not in l:
qual = False
break
if qual:
candidates.append(itemset)
return candidates
# Extract rules in such format: ((item1, item2,...), (itema, itemb,...))
def extractRules(self, L, Supports):
if len(L) <= 2:
print 'No rule extracted'
return
rules = []
for largesets in L:
for lset in largesets:
for lhs in itertools.chain.from_iterable(itertools.combinations(lset, i) for i in range(1, len(lset))):
conf = Supports[len(lset)][lset] / Supports[len(lhs)][lhs]
if conf >= self.min_conf:
rhs = tuple(item for item in lset if item not in lhs)
rules.append((lhs, rhs))
return rules
def extract1RRules(self, L, Supports):
if len(L) <= 2:
print 'No rule extracted'
return
rules = []
for largesets in L:
for lset in largesets:
if len(lset) < 2:
continue
for lhs in itertools.combinations(lset, len(lset) - 1):
conf = Supports[len(lset)][lset] / Supports[len(lhs)][lhs]
if conf >= self.min_conf:
rhs = tuple(item for item in lset if item not in lhs)
rules.append((lhs, rhs))
return rules
if __name__ == '__main__':
if len(sys.argv) != 4:
print 'Usage: main.py datafile min_sup min_conf'
sys.exit()
try:
global min_sup
global min_conf
min_sup = float(sys.argv[2])
min_conf = float(sys.argv[3])
except:
print 'Illegal parameters'
sys.exit()
extractor = AprioriExtractor(sys.argv[1], min_sup, min_conf)
extractor.run() | true |
9e568ab359c3fcd96459c55ecc0277e290ef3881 | Python | roxanaN/Producer-Consumer | /producer.py | UTF-8 | 2,567 | 3.4375 | 3 | [] | no_license | """
This module represents the Producer.
Computer Systems Architecture Course
Assignment 1
March 2020
"""
from threading import Thread
from time import sleep
class Producer(Thread):
"""
Class that represents a producer.
"""
def __init__(self, products, marketplace, republish_wait_time, **kwargs):
"""
Constructor.
@type products: List()
@param products: a list of products that the producer will produce
@type marketplace: Marketplace
@param marketplace: a reference to the marketplace
@type republish_wait_time: Time
@param republish_wait_time: the number of seconds that a producer must
wait until the marketplace becomes available
@type kwargs:
@param kwargs: other arguments that are passed to the Thread's __init__()
"""
# Am deschis un thread pentru fiecare Producer
# Am initializat products, marketplace, retry_wait_time si kwargs,
# cu valorile primite ca argument
Thread.__init__(self, **kwargs)
self.products = products
self.marketplace = marketplace
self.republish_wait_time = republish_wait_time
self.kwargs = kwargs
def run(self):
# Am inregistrat producatorul in marketplace,
# obtinand un id pentru acesta
producer_id = self.marketplace.register_producer()
# Am asigurat publicarea permanenta de produse
while True:
# Pentru fiecare produs pe care un producator trebuie sa il fabrice
for prod in self.products:
# Am extras tipul produsului
product = prod[0]
# Am extras numarul de produse necesare, de acel tip
qty = prod[1]
# Am extras timpul de asteptare pana se trece la urmatorul produs
time = prod[2]
# Cat timp nu s-au publicat suficiente produse
while qty:
# Cat timp produsul nu a fost adaugat,
# deoarece "raftul" marketplace-ului este plin
while not self.marketplace.publish(producer_id, product):
# Asteptam un timp si reincercam
sleep(self.republish_wait_time)
# Daca s-a iesit din while, produsul a fost publicat si
# trebuie sa asteptam un timp, pentru a trece la urmatorul produs
sleep(time)
# Am scazut numarul de produse adaugate
qty -= 1
| true |
1ef51a99e341c54db68e4a8496a5b3ec13568261 | Python | Aasthaengg/IBMdataset | /Python_codes/p02613/s623977845.py | UTF-8 | 175 | 3.21875 | 3 | [] | no_license | from collections import defaultdict
m = defaultdict(int)
n = int(input())
for _ in range(n):
m[input()] += 1
for k in ["AC", "WA", "TLE", "RE"]:
print(f"{k} x {m[k]}") | true |
007ee57059f04a669bfb274613858364de05ca7a | Python | hawksFTW/PythonProjects | /ASCII/CaesarCipher.py | UTF-8 | 880 | 4.25 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat Jan 2 22:05:36 2021
@author: dhruv
"""
#Desc: This time, our secret message will be encoded with letters. First, ask the user for a secret message and a key (a number).
#The program should print out the message with each letter shifted forward in the alphabet by the key amount.
#Keep in mind: what happens if the letter goes past the end of the alphabet? How can we make sure to start over at the beginning of the alphabet?
# main.py
i = input("Type in your message to encrypt: ")
key = int(input("Enter the key: "))
d = ""
for x in i:
w = ord(x)
d = chr(w + key)
print(d, end = " ")
print("\n")
g = input("Type in your message to decrypt: ")
dkey = int(input("Type in the decryption key: "))
o = ""
for f in g:
e = ord(f)
o = chr(e - key)
print(o, end = " ")
print("\n")
| true |
6af1a3b31c395ab7f7ecea070fa2033a151b8604 | Python | zsalec/Python-Full-Stack-Free | /Chapter3/3-3-while-function.py | UTF-8 | 5,716 | 4.1875 | 4 | [] | no_license | # while 循环
# - for 循环 - 确定循环次数
# - while 循环 - 不确定循环次数,只知道退出条件
# -- while 条件表达式:
# -- 语句块
# 利率 6.7%,计算翻倍的年限
rate = 0.067
year = 0
capital = 10000
while capital < 20000:
capital *= 1 + 0.067
year += 1
print('No. {:} years, capital: {:.1f}'.format(year, capital))
print('After {} years, capital will be doubled'.format(year))
'''
No. 1 years, capital: 10670.0
No. 2 years, capital: 11384.89
No. 3 years, capital: 12147.677629999998
No. 4 years, capital: 12961.572031209998
No. 5 years, capital: 13829.997357301068
No. 6 years, capital: 14756.607180240238
No. 7 years, capital: 15745.299861316334
No. 8 years, capital: 16800.23495202453
No. 9 years, capital: 17925.85069381017
No. 10 years, capital: 19126.88269029545
No. 11 years, capital: 20408.383830545245
After 11 years, capital will be doubled
'''
# 函数
# - 代码的一种组织形式
# - 一个函数一般完成一项特定的功能
# - 函数使用
# -- 函数需要先定义
# -- 使用函数,俗称调用
def func():
print('This is a function')
print('It can finish a special function')
func() # This is a function
# 函数参数和返回值
# - 形参,实参
# -- person 形参
# - return 结束函数
def hello(person):
print('{}, what\'s wrong'.format(person))
print('Sir, 你不理我,我就走了!')
s = '我已经跟{0}打过招呼了,{0}不理我'.format(person)
# -- s 返回值
return s
print('不会被执行!!!')
t = 'Moon'
# -- t 实参
result = hello(t)
'''
Moon, what's wrong
Sir, 你不理我,我就走了!
'''
print(result) # 我已经跟Moon打过招呼了,Moon不理我
# demo
def print_table():
def print_line(no):
for i in range(1, no + 1):
print('{} x {} = {}'.format(i, no, i * no), end='\t')
print()
return None
for i in range(1, 10):
print_line(i)
print_table()
'''
1 x 1 = 1
1 x 2 = 2 2 x 2 = 4
1 x 3 = 3 2 x 3 = 6 3 x 3 = 9
1 x 4 = 4 2 x 4 = 8 3 x 4 = 12 4 x 4 = 16
1 x 5 = 5 2 x 5 = 10 3 x 5 = 15 4 x 5 = 20 5 x 5 = 25
1 x 6 = 6 2 x 6 = 12 3 x 6 = 18 4 x 6 = 24 5 x 6 = 30 6 x 6 = 36
1 x 7 = 7 2 x 7 = 14 3 x 7 = 21 4 x 7 = 28 5 x 7 = 35 6 x 7 = 42 7 x 7 = 49
1 x 8 = 8 2 x 8 = 16 3 x 8 = 24 4 x 8 = 32 5 x 8 = 40 6 x 8 = 48 7 x 8 = 56 8 x 8 = 64
1 x 9 = 9 2 x 9 = 18 3 x 9 = 27 4 x 9 = 36 5 x 9 = 45 6 x 9 = 54 7 x 9 = 63 8 x 9 = 72 9 x 9 = 81
'''
def stu(**kwargs):
print('Arguments:')
print(type(kwargs))
for k, v in kwargs.items():
print('{} = {}'.format(k, v))
return None
stu(name='Tom', age=18, learn='Python')
'''
Arguments:
<class 'dict'>
name = Tom
age = 18
learn = Python
'''
# - 收集参数混合调用的顺序问题
# -- 顺序:普通参数 > 关键字参数 > 收集参数
def student(name, age, *args, hobby='None', **kwargs):
print('Hello,大家好')
print('My name is {}, is {} years old'.format(name, age))
if hobby is None:
print('I have none hobby')
else:
print('My hobby is', hobby)
print('*' * 30)
for i in args:
print(i)
print('*' * 30)
for k, v in kwargs.items():
print(k, '--', v)
return None
name = 'Tonny'
age = 19
student(name, age)
'''
Hello,大家好
My name is Tonny, is 19 years old
My hobby is None
******************************
******************************
'''
student(name, age, hobby='basketball')
'''
Hello,大家好
My name is Tonny, is 19 years old
My hobby is basketball
******************************
******************************
'''
student(name, age, 'swimming', v2='param1', v3='param2', v1='param3')
'''
Hello,大家好
My name is Tonny, is 19 years old
My hobby is basketball
******************************
******************************
Hello,大家好
My name is Tonny, is 19 years old
My hobby is None
******************************
swimming
******************************
v2 -- param1
v3 -- param2
v1 -- param3
'''
student(name, age, 'Python', 'C++', hobby='hiking', a1='p1', a2='p2', a3='p3')
'''
Hello,大家好
My name is Tonny, is 19 years old
My hobby is hiking
******************************
Python
C++
******************************
a1 -- p1
a2 -- p2
a3 -- p3
'''
# -- 收集参数的解包问题
# --- 把参数放到 list/dict 中
# demo
def stu1(*args):
print('=' * 30)
n = 0
for i in args:
n += 1
print(n, type(i), i)
return None
l1 = {'Tonny', 10, 2, 'Hello'}
# l1 作为一个变量收集到 args 中
stu1(l1)
'''
==============================
1 <class 'set'> {'Hello', 10, 2, 'Tonny'}
'''
# - l1 解包后 收集到 args 中
stu1(*l1)
'''
==============================
1 <class 'str'> Hello
2 <class 'int'> 10
3 <class 'int'> 2
4 <class 'str'> Tonny
'''
# 返回值
def func1():
print('有返回值')
return 1
def func2():
print('没有返回值')
f1 = func1()
print(f1)
'''
有返回值
1
'''
# - 默认返回 None
f2 = func2()
print(f2)
'''
没有返回值
None
'''
# 函数文档
# -- 写法
# -- 1. 第一行用三引号定义符
# -- 2. 一般具有固定格式
# --
# - 文档查看
def func3(name, age, *args):
'''
这是文档演示
:param name: 姓名
:param age: 年龄
:param args: 其他参数
:return: None
'''
print('This is function func3')
help(func3)
'''
Help on function func3 in module __main__:
func3(name, age, *args)
这是文档演示
:param name: 姓名
:param age: 年龄
:param args: 其他参数
:return: None
'''
print(func3.__doc__)
'''
这是文档演示
:param name: 姓名
:param age: 年龄
:param args: 其他参数
:return: None
'''
| true |
2e5b840b33c51ab12ad727deffb61a08dc72c700 | Python | trishantpahwa/Python_Data_Structures | /Stack/Stack_Linked_List.py | UTF-8 | 1,272 | 4.1875 | 4 | [] | no_license | # This is to implement Stack using Linked List
class node:
data = None
next = None
def __init__(self):
self.data = None
self.next = None
def add_data(self, data):
self.data = data
def add_next(self, node):
self.next = node
class stack:
head = None
def __init__(self):
self.head = None
def add_head(self, node):
self.head = node
def push(self, node):
temp = self.head
while(temp.next != None):
temp = temp.next
temp.next = node
def pop(self):
temp = self.head
while(temp.next.next != None):
temp = temp.next
temp.next = None
def print_stack(self):
temp = self.head
temp_list = []
while(temp.next != None):
temp_list.append(temp.data)
temp = temp.next
temp_list.append(temp.data)
print(temp_list)
n1 = node()
n1.add_data(1)
n2 = node()
n2.add_data(2)
n3 = node()
n3.add_data(3)
n4 = node()
n4.add_data(4)
n5 = node()
n5.add_data(5)
s = stack()
s.add_head(n1)
s.push(n2)
s.push(n3)
s.push(n4)
s.push(n5)
s.print_stack()
s.pop()
s.print_stack()
n6 = node()
n6.add_data(6)
s.push(n6)
s.print_stack()
| true |
3ea00355f47f271641a6580f8ff59e9c49604a77 | Python | jasper12112/discordbot | /cogs/trivia.py | UTF-8 | 1,294 | 2.921875 | 3 | [] | no_license | import os
import random
import asyncio
import json
import praw
import discord
from discord.ext import commands
from discord import utils
from discord.utils import get
#Reddit client
reddit = praw.Reddit(client_id=os.environ.get("REDDIT_ID"),
client_secret=os.environ.get("REDDIT_SECRET"),
user_agent="discordBot")
class Trivia(commands.Cog):
#Init bot
def __init__(self, bot):
self.bot = bot
#Command on ready
@commands.Cog.listener()
async def on_ready(self):
print('Cog has been loaded!')
#Commands
@commands.command(help='trivia question')
async def testtrivia(self, ctx):
await ctx.send('Guess a number between 1 and 5!')
def is_correct(m):
return m.author == ctx.author and m.content.isdigit()
answer = random.randint(1, 5)
try:
guess = await self.bot.wait_for('message', check=is_correct, timeout=5.0)
except asyncio.TimeoutError:
return await ctx.channel.send('Sorry, you took too long it was {}.'.format(answer))
if int(guess.content) == answer:
await ctx.send('You are right!')
else:
await ctx.send('Oops. It is actually {}.'.format(answer))
#Setup cog
def setup(bot):
bot.add_cog(Trivia(bot)) | true |
338b0a701c1b538fba5447210079bf5d74591244 | Python | naokityokoyama/adlabs | /python-projects/coffeetime/coffeetime.py | UTF-8 | 1,409 | 2.609375 | 3 | [
"BSD-2-Clause"
] | permissive | # -*- coding: utf-8 -*-
#!/usr/bin/python
##
# CoffeeTime
# Copyright (C) 2016, Augusto Damasceno
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import myserial
import time
import datetime
import serialPython
ports = serialPython.serialDiscover()
port = ports[int(input("Choose the port (number, the first is 0): "))]
hour = input('Hour to alarm: ');
minute = input('Minute to alarm: ')
alarmWait = True
while alarmWait:
d = datetime.datetime.now()
if(d.hour == hour):
if(d.minute == minute):
serialPython.serialSend(port,9600,'O',1,serial.PARITY_NONE,serial.EIGHTBITS,serial.STOPBITS_ONE)
alarmWait = False
time.sleep(3)
# Wait 10 minutes and turn off
time.sleep(600)
serialPython.serialSend(port,9600,'F',1,serial.PARITY_NONE,serial.EIGHTBITS,serial.STOPBITS_ONE)
| true |
dc329b53ce34fbea7a547f4a21a051bd79de5cf3 | Python | sysofwan/zapfeeds | /app/util/get_data_reddit.py | UTF-8 | 3,170 | 2.53125 | 3 | [] | no_license | import feedparser
import requests
import time
from app import db
from app.models.Content import Content
from get_data import url_content
REDDIT_RSS = ['http://www.reddit.com/r/news/new/.rss?limit=100',
'http://www.reddit.com/r/worldnews/new/.rss?limit=100']
'''
REDDIT_RSS = ['http://www.reddit.com/r/news/.rss?limit=100',
'http://www.reddit.com/r/news/new/.rss?limit=100',
'http://www.reddit.com/r/worldnews/.rss?limit=100',
'http://www.reddit.com/r/worldnews/new/.rss?limit=100']
REDDIT_RSS += ['http://www.reddit.com/r/technology/.rss?limit=100',
'http://www.reddit.com/r/business/.rss?limit=100']
REDDIT_RSS += ['http://www.reddit.com/r/videos/.rss?limit=100',
'http://www.reddit.com/.rss?limit=100']
'''
def rss_data(url):
data = []
#try parse rss feed
try:
content = feedparser.parse(url)
except:
print 'Problem parsing url: ' + url
return
#extract data
for i in content.entries:
dictData = {}
dictData['title'] = i.title
dictData['timestamp'] = i.published_parsed
#reddit comment section url
dictData['raw_url'] = i.link
url_reddit_comment = i.link
#check if url ends with a '/'
if url_reddit_comment[-1] != '/':
url_reddit_comment += '/'
#open reddits url with json format
try:
page = requests.get(url_reddit_comment+'.json').json()
except:
print 'this URL:' + url_reddit_comment + ' cannot be oppened'
continue
#get data from reddit comment url
url_reddit_content = page[0]['data']['children'][0]['data']['url']
dictData['upvotes'] = page[0]['data']['children'][0]['data']['ups']
#get the content if url is valid
urlContentData = url_content(url_reddit_content)
if urlContentData:
dictData = dict(dictData.items() + urlContentData.items())
else:
print 'No content for url:' + url_reddit_content
continue
'''
#get social count from both reddit comment url and reddit url content
social1 = social_count(url_reddit_comment,reddit=False)
social2 = social_count(url_reddit_content,reddit=False)
#adding dict social1 and social2
social = dict( (n, social1.get(n, 0)+social2.get(n, 0)) for n in set(social1)|set(social2) )
dictData = dict(dictData.items() + social.items())
'''
#store dict in list
data.append(dictData)
time.sleep(1)
return data
def get_data_reddit():
for url in REDDIT_RSS:
data = rss_data(url)
for content in data:
print 'Storing data from ' + url
print 'TITLE:' + content['title'] + ' URL: ' + content['url']
print '------------------------------------------------------'
time.sleep(0.5)
Content.create_or_update_content(db.session,**content)
db.session.commit()
| true |
cd4550d18ceae26ab8567b32726b0b80860aee60 | Python | avcordaro/animated-reinforcement-learning | /model/environment_taxi_driver.py | UTF-8 | 4,974 | 3.34375 | 3 | [] | no_license | from model.environment import Environment
import random
class TaxiDriver(Environment):
"""
TaxiDriver is a 5x5 grid world environment, featuring a taxi, a passenger and their destination.
Both the passenger spawn state and their destination always belong to one of four locations in
the grid. The taxi can spawn anywhere in the grid, and the agent must move the taxi towards
the passenger, pick them up, move to their destination, and drop them off.
"""
def __init__(self):
self.name = "Taxi Driver"
self.MAX_REWARD = 20
self.MIN_REWARD = -250
self.REWARD_THRESHOLD = 5
self.MAX_EPISODE_STEPS = 1000
self.GRID_ROWS = 5
self.GRID_COLUMNS = 5
self.GRID_MAP = [" : | : : ",
" : | : : ",
" : : : : ",
" | : | : ",
" | : | : "
]
self.action_space = ["Left", "Up", "Right", "Down", "Pickup", "Dropoff"]
self.NUM_ACTIONS = 6
self.illegal_actions = [(0, 1, "Right"), (0, 2, "Left"), (1, 1, "Right"),
(1, 2, "Left"), (3, 0, "Right"), (3, 1, "Left"),
(3, 2, "Right"), (3, 3, "Left"), (4, 0, "Right"),
(4, 1, "Left"), (4, 2, "Right"), (4, 3, "Left")
]
self.locations = [(0, 0), (0, 4), (4, 0), (4, 3)]
self.state_space = []
self.NUM_STATE_FEATURES = 6
self.passenger_locations = self.locations + ["In Taxi"]
for row in range(self.GRID_ROWS):
for col in range(self.GRID_COLUMNS):
for passenger_location in self.passenger_locations:
for destination in self.locations:
self.state_space.append(((row, col), passenger_location, destination))
self.passenger_state = random.choice(self.locations)
self.passenger_in_taxi = False
self.destination_state = random.choice(self.locations)
self.taxi_state = (random.randrange(0, 5), random.randrange(0, 5))
self.start_state = (self.taxi_state, self.passenger_state, self.destination_state)
self.current_state = self.start_state
def execute_action(self, action):
"""
Updates the current state based on the given action. Dropping off a passenger at the correct
destinations gives a reward of 20. Incorrect pickup and dropoff actions give a reward of -10.
All other steps give a reward of -1.
@param action: the action chosen by the agent
@return: the observation to the agent, including the new stae and reward
"""
row, col = self.taxi_state
if action in ["Left", "Up", "Right", "Down"]:
if (row, col, action) not in self.illegal_actions:
if action == "Up" and not row == 0:
self.taxi_state = (row - 1, col)
elif action == "Left" and not col == 0:
self.taxi_state = (row, col - 1)
elif action == "Right" and not col == 4:
self.taxi_state = (row, col + 1)
elif action == "Down" and not row == 4:
self.taxi_state = (row + 1, col)
reward = -1
episode_done = False
if action == "Pickup":
if self.passenger_state == self.taxi_state and not self.passenger_in_taxi:
self.passenger_in_taxi = True
self.passenger_state = "In Taxi"
else:
reward = -10
if action == "Dropoff":
if self.passenger_in_taxi and self.taxi_state == self.destination_state:
reward = 20
episode_done = True
elif self.taxi_state in self.locations and self.passenger_in_taxi:
self.passenger_in_taxi = False
self.passenger_state = self.taxi_state
else:
reward = -10
self.current_state = (self.taxi_state, self.passenger_state, self.destination_state)
return self.current_state, reward, episode_done
def random_action(self):
"""
Chooses a random action from the environment's action space
@return: a random action
"""
return random.choice(self.action_space)
def restart_environment(self):
"""
Resets the various environment state variables, by randomly generating a new spawn location
for the passenger, destination and taxi.
"""
self.passenger_state = random.choice(self.locations)
self.passenger_in_taxi = False
self.destination_state = random.choice(self.locations)
self.taxi_state = (random.randrange(0, 5), random.randrange(0, 5))
self.start_state = (self.taxi_state, self.passenger_state, self.destination_state)
self.current_state = self.start_state
| true |
9a581c928c6f68c7c5daf44f195b26adaf10d537 | Python | mamaker/eupy | /label-eg3.py | UTF-8 | 544 | 2.78125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
label-eg3.py
Created on Tue May 14 14:12:58 2019
@author: madhu
"""
import tkinter as tk
root = tk.Tk()
logo = tk.PhotoImage(file="python-image.gif")
explanation = """At present, only GIF and PPM/PGM
formats are supported, but an interface
exists to allow additional image file
formats to be added easily."""
w = tk.Label(root,
text=explanation,
font = "Helvetica 16 bold",
compound = tk.CENTER,
image=logo).pack(side="right")
root.mainloop()
| true |
4e9af5df3a40d9c45d4caaf6c948bb02ed5a2855 | Python | brentclark/udemy-python-for-penetration-testers-course | /floodz.py | UTF-8 | 290 | 2.515625 | 3 | [] | no_license | from scapy.all import *
def floodz(source,target):
for source_p in range(100,150):
IPlayer = IP(src=source,dst=target)
TCPlayer = TCP(sport=source_p,dport=600)
pkt = IPlayer/TCPlayer
print(pkt)
send(pkt)
floodz('127.0.0.1', '10.0.0.104') | true |
7712d9e765712e4a161bb029eb64a73e9798b6f4 | Python | pberezow/Kryptografia2020 | /lista2/zad1.py | UTF-8 | 3,412 | 3.28125 | 3 | [] | no_license | import sys
from getpass import getpass
from aes_adapter import AESAdapter, AdapterError, get_oracle, get_challenger, get_decoder
def read_file(file_path, mode):
"""
Process single file.
In 'oracle' mode file represents message.
In 'decode' mode file represents single encoded messages, first 16 bytes of every message
should be initialization vector.
In 'challenge' mode file should have 2 lines with 2 messages (one msg in one line).
"""
with open(file_path, 'rb') as file:
msgs = file.read()
if mode == 'oracle' or mode == 'decode':
return msgs
elif mode == 'challenge':
msgs = list(filter(lambda x: x != b'', msgs.split(b'\n')))
if len(msgs) != 2:
print(f'In challenge mode expected 2 messages in file. Got {len(msgs)}.')
exit(1)
return msgs
else:
print('Incorrect mode, try oracle, challenge or decode')
exit(1)
def write_file(file_path, messages):
with open(file_path, 'wb') as file:
if type(messages) == bytes:
file.write(messages)
else:
for msg in messages:
file.write(msg + b'\n')
def run_aes(aes_adapter, mode, message):
if mode == 'oracle':
oracle = get_oracle(aes_adapter)
result = oracle(message)
return result
elif mode == 'challenge':
challenge = get_challenger(aes_adapter)
return challenge(message[0], message[1])
elif mode == 'decode':
decode = get_decoder(aes_adapter)
result = decode(message)
return result
def run():
"""
RUN:
python3 zad1.py <mode_of_encryption> <path_to_keystore> <key_id> <program's_mode> <file_1> ... <file_n>
ARGS:
mode_of_encryption - OFB, CTR or CBC
path_to_keystore - path to keystore. Example keystore - store.jck
contains 3 keys with ids: id1, id2 and id3. Password to store.jck - 'password'
keystore can be created with script `gen_key.sh` (./gen_key.sh KEYSTORE PASSWORD IDENT)
key_id - id of key from keystore
program's_mode - oracle, challenge or decode
"""
# TODO: add parse_args
try:
produce_output_file = True
enc_mode = sys.argv[1]
store_path = sys.argv[2]
key_id = sys.argv[3]
mode = sys.argv[4]
files = sys.argv[5:]
except IndexError:
print('Not enough arguments.')
exit(1)
# store_pass = getpass('Keystore password:')
store_pass = 'password'
# init aes
try:
aes_adapter = AESAdapter(enc_mode, store_path, store_pass, key_id)
except AdapterError as ex:
print('Error in AES initialization: ', ex)
exit(1)
# run aes on each file
for file in files:
print(f"Processing file '{file}'...")
message = read_file(file, mode)
if mode != 'challenge':
print(message, '\n --->')
result = run_aes(aes_adapter, mode, message)
print(result)
if produce_output_file:
if mode == 'oracle' or mode == 'challenge':
write_file(file + '_enc', result)
print(f'Output file: {file + "_enc"}')
else:
write_file(file + '_dec', result)
print(f'Output file: {file + "_dec"}')
if __name__ == '__main__':
run()
| true |
7f30106bdae6d4abe60369c0d66b105a235c6a38 | Python | lragnarsson/interrail-optimizer | /InterrailOptimizer.py | UTF-8 | 1,236 | 2.90625 | 3 | [] | no_license | import logging
from InputHandler import InputHandler
from CandidateGenerator import CandidateGenerator
from CandidateRanker import get_top_n_trips
import StationData
def run(trip_path="trips/cities-1.json"):
input_handler = InputHandler()
input_handler.read_input_file(trip_path)
candidate_generator = CandidateGenerator(input_handler.requested_cities,
input_handler.trip_days,
input_handler.avg_city_stay)
trip_candidates = candidate_generator.get_n_most_popular_candidates(10)
for trip in trip_candidates:
trip.calculate_trip_distances(input_handler.requested_cities,
input_handler.starting_station,
StationData.StationData.time_between_stations)
trip.find_optimal_route()
trip.calculate_route_score(input_handler.requested_cities, input_handler.all_travellers)
winning_trips = get_top_n_trips(trip_candidates, 5)
print("\n".join([str(t) for t in winning_trips]))
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
run()
| true |
e4c3a33145b49cb109a5c406b32c003b368e18d2 | Python | klauer/pyepwing | /eb/encodings/make_encodings.py | UTF-8 | 782 | 2.796875 | 3 | [] | no_license | from __future__ import print_function
import os
import sys
def make_encoding(fn, enc_col, utf8_col, out_f=sys.stdout, name=None):
if name is None:
name = os.path.split(fn)[1].lower()
name = os.path.splitext(name)[0]
print('{} = {{'.format(name), file=out_f)
with open(fn, 'rt') as f:
for line in f.readlines():
line = line.strip()
if line.startswith('#') or not line:
continue
cols = line.split('\t')
cols[enc_col]
print(' {}: u"\\u{}",'.format(cols[enc_col], cols[utf8_col][2:]),
file=out_f)
print('}', file=out_f)
if __name__ == '__main__':
with open('jisx0208.py', 'wt') as f:
make_encoding('jisx0208.txt', 1, 2, out_f=f)
| true |
a1144779de32aa1e0d43d60a211d167ade3277ab | Python | ibby360/python-crash-course | /chapter4/looping_slice.py | UTF-8 | 131 | 3.375 | 3 | [] | no_license | players = ['aslam','juma','amin','john','robert']
print('First three players')
for plyaer in players[:3]:
print(plyaer.title()) | true |
b804f1fba63ec2770b3865109c27af4d71bca18f | Python | parfx/ds_python | /1.syntax/10.classes.py | UTF-8 | 6,770 | 4.28125 | 4 | [] | no_license | # *** Основы объектно-ориентированного программирования (ООП) ***
# Объекты обладают свойствами и методами
# Каждый объект должен принадлежать определенному классу (типу)
# Класс - это "чертеж" объекта
# конкретный реализованный на базе класса объект называется экземпляром класса
# создание класса. Название принято писать с заглавной буквы
class Cat:
# метод-конструктор
def __init__(self):
# свойства (атрибуты, поля)
self.name = None
# метод - функция, принадлежащая классу
def mur(self):
return self.name
# создание объекта на базе класса Cat (т.е. экземпляра класса Cat)
cat_1 = Cat()
# чтение свойства
var = cat_1.name
# print("Значение var ДО изменения: ", var)
# запись в свойство
cat_1.name = 100
# print(cat_1.name)
# print("Значение var ПОСЛЕ изменения: ", var)
# var = 10
# print(cat_1.name)
# вызов метода экземпляра
res = cat_1.mur()
# print("Результат: ", res)
# каждый объект (экземпляр класса) независим
# создание 2-го экземпляра класса Cat
cat_2 = Cat()
cat_2.name = 200
# вызов метода обеих объектов
# print(cat_1.mur())
# print(cat_2.mur())
# *** Принцип Наследования - принцип ООП ***
# создание родительского (предкового) класса
class Animal:
def __init__(self):
self.num_legs = 0
# создание дочерних классов
class Dog(Animal):
def __init__(self, name):
self.name = name
def info(self):
print(f"My name is {self.name}. Legs: {self.num_legs}")
class Insect(Animal):
"""
docstring
"""
def __init__(self, name):
self.name = name
def info(self):
print(f"My name is {self.name}. Legs: {self.num_legs}")
# создание экземпляров дочерних классов
dog_1 = Dog("Мурзик")
dog_1.num_legs = 4
bug = Insect('Bug')
bug.num_legs = 8
# вызов метода дочерних классов
# dog_1.info()
# bug.info()
class Human(object):
"""
docstring
"""
def __init__(self, name, age, weight):
self.name = name
self.age = age
self.weight = weight
def info(self):
print(f"Name: {self.name}, Age: {self.age}, Weight: {self.weight}")
class Pilot(Human):
def skill(self):
print("я умею летать")
class Medic(Human):
def skill(self):
print("я умею лечить")
def therapy(self, obj):
print(f"Я вылечил {obj.name}")
class Simple_human(Human):
pass
# john = Pilot("John", 45, 82.4)
# katrin = Medic("Katrin", 35, 67.5)
# petya = Simple_human("Petya", 5, 23.1)
# вызов метода общего для всех (метод наследуется от родительского класса)
# john.info()
# katrin.info()
# petya.info()
# вызов метода, которым обладают все классы, кроме класса Simple_human
# john.skill()
# katrin.skill()
# вызов метода, которым обладает только класс Medic
# katrin.therapy(john)
# try:
# petya.skill()
# except AttributeError:
# print("у него нету метода skill")
# petya.info()
# *** Полиморфизм ***
# поли + морф = разные формы чего-то одного
# методы у разных классов переопределяем,
# т.е методы имеют одинаковое название, но имеют различные поведения
# родительский класс
class B:
"""
docstring
"""
def func(self, arg):
"""
docstring
"""
res = arg * 2
print(f"Данные: {res}")
# дочерний класс у которого метод переопределен
class B_1(B):
"""
docstring
"""
def func(self, arg):
res = arg ** 3
print(f"Result: {res}")
# Экземпляры классов
b = B()
b_1 = B_1()
# вызов методов с одинаковым названием, но с разным поведением
# b.func(10)
# b_1.func(10)
# 2 вид полиморфизма - применение "магических" методов (методы )
# метод, который делает из экземпляра класса функцию
class Sum(object):
"""
docstring
"""
def __init__(self, param):
self.coeff = param
def __call__(self,a,b):
res = (a + b) * self.coeff
print(f"Result: {res}")
def __str__(self):
return f"Sum {self.coeff}"
s_1 = Sum(0.5)
s_2 = Sum(3.14)
# объект ведет себя как функция
# s_1(10, 20)
# s_2(10, 20)
# объект при передачи в функцию print возвращает строку
# print(s_1)
# *** Инкапсуляция ***
# инкапсуляции нет
# class B:
# def __init__(self, arg):
# self._attr = arg
# def _method(self):
# print("Hello!")
# b = B(100)
# print(b.attr)
# b.method()
# инкапсуляция строгая
class C:
def __init__(self,arg):
self.__attr = arg
def method_2(self):
return self.__attr
def __method(self):
print("Hello!")
c = C(200)
# c._C__method()
# print(c.method_2())
# *** Композиция (Агрегация) ***
# использование экземпляров одного класса внутри другого
class D:
def __call__(self, a):
return a ** 2
class E:
def m(self, b):
d = D() # создается объект класса D
res = b + 2
return d(res) # используется объект класса D в качестве функции
e = E()
res = e.m(10)
# print(res)
# статический метод, метод класса
class Person:
# статическая переменная
counter = 0
def __init__(self, name, age):
self.__n = name
self.__a = age
Person.counter += 1
self.id = Person.counter
# метод экземпляра
def into(self):
print(f"Id: {self.id}, Name: {self.__n}, Age: {self.__a}")
# метод класса
@classmethod
def count_control(cls):
cls.counter += 1
#статический метод
@staticmethod
def method(x, y):
print(f"Res: {x + y}")
john = Person("John", 20)
john.into()
# john.count_control()
bob = Person('Bob', 35)
bob.into()
bob.method(10, 20)
Person.method(10, 20)
| true |
33ab6f22506cfef9ef3e54d957d6d767f84c44ac | Python | Woocheck/Python_excercise | /excercises/wykresyNBP.py | UTF-8 | 1,735 | 2.84375 | 3 | [] | no_license | import currencyNBP as nbp
import matplotlib.pyplot as plt
import pandas as pd
import matplotlib.ticker as plticker
def wykresJednaWaluta( row, column, notowania, nazwaWaluty, axes ):
"""Przygotowuje 1podwykres waluy, dla podanego przedziału w latach"""
notowania.plot(ax=axes[row,column], y = 'mid', kind = 'line', title = nazwaWaluty, grid = True, fontsize = 6, figsize = ( 8, 8.66 ) )
def wykresCzteryWaluty( dataPoczatek, dataKoniec, waluty ):
"""Przygotowuje 4 wykresy walut, dla podanego przedziału w latach"""
fig, axes = plt.subplots(nrows=2, ncols=2)
x = 0
y = 0
for waluta in waluty:
notowania = nbp.notowaniaLata( 2019, 2020, waluta )
wykresJednaWaluta( x%2, y%2, notowania, waluta, axes )
x+=1
if x%2:
y+=1
plt.savefig('czteryWaluty', dpi=None, facecolor='w', edgecolor='w',\
orientation='portrait', papertype='a4', format=None,\
transparent=False, bbox_inches=None, pad_inches=0.1,\
frameon=None, metadata=None)
def wykresCzteryNaJeden( dataPoczatek, dataKoniec, waluty ):
"""Przygotowuje wykres czterech walut, dla podanego przedziału w latach"""
fig, ax = plt.subplots()
loc = plticker.MultipleLocator(base=60)
ax.xaxis.set_major_locator(loc)
ax.grid()
for waluta in waluty:
notowanie = nbp.notowaniaLata( dataPoczatek, dataKoniec, waluta )
ax.plot( notowanie.index, notowanie['mid'], label ='linear')
fig.savefig('czteryWalutyJedenWykres', dpi=None, facecolor='w', edgecolor='w',\
orientation='portrait', papertype='a4', format=None,\
transparent=False, bbox_inches=None, pad_inches=0.1,\
frameon=None, metadata=None)
| true |
47f2a156fc59e928274376af042a71a53809b429 | Python | xistadi/Python-practice | /test.py | UTF-8 | 511 | 3.84375 | 4 | [] | no_license | number = 5
fnumber = 5.7
name = "abraham"
age = 21
status = True
# вывод комментарий (экранирование)
print( "вывод \"хы\" епта" )
# перевод строки
print("привет \nмир")
# конкатенация
print("привет еня зовут " + name )
print("мне " + str(age) + " года")
###########################
name = input ("Введи чета ")
print("ну и че ты написал? " + name + " че эта?")
a=5
b=10
c=a+b
print( c )
| true |
46d90227032c5e36b7a9dab024e58fe2af29a100 | Python | JJayeee/CodingPractice | /BaekJoon/단계별로 풀어보기/DFS & BFS/7576_토마토.py | UTF-8 | 1,187 | 3.171875 | 3 | [] | no_license | """
6 4
0 0 0 0 0 0
0 0 0 0 0 0
0 0 0 0 0 0
0 0 0 0 0 1
6 4
0 -1 0 0 0 0
-1 0 0 0 0 0
0 0 0 0 0 0
0 0 0 0 0 1
6 4
1 -1 0 0 0 0
0 -1 0 0 0 0
0 0 0 0 -1 0
0 0 0 0 -1 1
5 5
-1 1 0 0 0
0 -1 -1 -1 0
0 -1 -1 -1 0
0 -1 -1 -1 0
0 0 0 0 0
2 2
1 -1
-1 1
8 -1 6 14 0
"""
import collections
m, n = map(int, input().split()) # m: y, n: x
arr = [list(map(int, input().split())) for _ in range(n)]
tomato = collections.deque()
tomato_cnt = 0
for x in range(n):
for y in range(m):
if arr[x][y] == 1:
tomato.append((x, y))
elif arr[x][y] == 0:
tomato_cnt += 1
if tomato_cnt == 0:
print(0)
else:
total = n*m
time = 0
while tomato:
for i in range(len(tomato)):
kx, ky = tomato.popleft()
for dx, dy in (1, 0), (-1, 0), (0, 1), (0, -1):
nx = kx + dx
ny = ky + dy
if 0 <= nx < n and 0 <= ny < m and arr[nx][ny] == 0:
arr[nx][ny] = 1
tomato_cnt -= 1
tomato.append((nx, ny))
time += 1
if not tomato_cnt:
break
if tomato_cnt:
print(-1)
else:
print(time) | true |
da3b0ec23b9d23deff90012820e5e2b28de3b663 | Python | candyer/exercises | /rotate.py | UTF-8 | 1,291 | 3.796875 | 4 | [] | no_license | def rotate1(l, n):
"""create a function that return a rotated list. l is a list; n is an int
"""
if len(l) == 0 or len(l) == 1:
return l
if n <= 0:
for i in range(abs(n)):
l.append(l[0])
l.pop(0)
return l
else:
for i in range(len(l) - n%len(l)):
l.append(l[0])
l.pop(0)
return l
print rotate1([], 3) #[]
print rotate1([1], 2) #[1]
print rotate1(range(8), 0) #[0, 1, 2, 3, 4, 5, 6, 7]
print rotate1(range(8), -2) #[2, 3, 4, 5, 6, 7, 0, 1]
print rotate1(range(8), -10) #[2, 3, 4, 5, 6, 7, 0, 1]
print rotate1(range(8), 2) #[6, 7, 0, 1, 2, 3, 4, 5]
print rotate1(range(8), 10) #[6, 7, 0, 1, 2, 3, 4, 5]
print rotate1(range(8), 8) #[1, 2, 3, 4, 5, 6, 7, 0]
#better solution. complexity is O(n)
def reverse(l):
return l[::-1]
def rotate2(l, n):
if not l:
return l
n = -n % len(l)
first = l[:n]
second = l[n:]
return reverse(reverse(first) + reverse(second))
print rotate2([], 3) #[]
print rotate2([1], 2) #[1]
print rotate2(range(8), 0) #[0, 1, 2, 3, 4, 5, 6, 7]
print rotate2(range(8), -2) #[2, 3, 4, 5, 6, 7, 0, 1]
print rotate2(range(8), -10) #[2, 3, 4, 5, 6, 7, 0, 1]
print rotate2(range(8), 2) #[6, 7, 0, 1, 2, 3, 4, 5]
print rotate2(range(8), 10) #[6, 7, 0, 1, 2, 3, 4, 5]
print rotate2(range(8), 8) #[0, 1, 2, 3, 4, 5, 6, 7]
| true |
b7453f60f4dbf9cd35e1253365a4b464d19054ad | Python | ksomemo/Competitive-programming | /atcoder/abc/060/C.py | UTF-8 | 271 | 2.828125 | 3 | [] | no_license | def main():
N, T = map(int, input().split())
ts = list(map(int, input().split()))
ans = 0
for i in range(N - 1):
t1 = ts[i]
t2 = ts[i + 1]
ans += min(t2 - t1, T)
ans += T
print(ans)
if __name__ == '__main__':
main()
| true |
2ff150facd54ec0a05b19f777df41653073cdecb | Python | hoyttyoh/xray-bragg-optics | /conical_bragg_optic.py | UTF-8 | 1,863 | 3.328125 | 3 | [] | no_license | # class for conical bragg optic
import numpy as np
class ConicalBraggOptic(object):
def __init__(self, r1, r2, height, material=None, mode=None):
self.rmin = r1
self.rmax= r2
self.height = height
self.material = material
self.mode = mode
# define theta variable range
self.theta_max= np.arcsin(self.height/2.0/self.rmin)
# define a prime normal vector used to establish initial position
# and rotation angle relative to an initial k from the source
_prime_theta = 0.0
_prime_rad = self.rmin + (self.rmax - self.rmin)/2.0
def surface_point(self, r, h):
x = r
y = r*np.sin(h)
z = r*np.cos(h)
p = np.array([x,y,z])
return p
def surface_normal(self, r, h):
xn = r
yn = -r*np.sin(h-np.pi/2.0)
zn = -r*np.cos(h-np.pi/2.0)
N = np.array([xn,yn,zn])
n = N/np.sqrt(N[0]*N[0] + N[1]*N[1] + N[2]*N[2])
return n
def get_parametric_rep(self):
# define a vertical and horizontal range
r = np.linspace(self.rmin,self.rmax,4)
phi = np.linspace(-self.theta_max,self.theta_max,4)
# create 2d arrays
H,V = np.meshgrid(r,phi)
P = self.surface_point(H,V)
N = self.surface_normal(H,V)
return P,N
if __name__ == "__main__":
S = ConicalBraggOptic(100.,200.,20.0)
from mayavi import mlab
P,N = S.get_parametric_rep()
# plot the optical surface
mlab.mesh(P[0],P[1],P[2],color=(1,1,1))
mlab.quiver3d(P[0],P[1],P[2],N[0],N[1],N[2])
mlab.points3d(0.,0.,0.,color=(1,0,0))
mlab.show()
| true |
9fad426db5558f728780694f5bfa0ac0fa92cc1a | Python | BensonRen/catalyst_project | /cut_video.py | UTF-8 | 5,245 | 2.90625 | 3 | [
"MIT"
] | permissive | # This script / function calls ffmpeg in the linux system to cut frames
import os
import sys
import numpy as np
import shutil
save_img_big_dir = '/scratch/sr365/Catalyst_data/test_video_cut'
video_big_dir = '/scratch/sr365/Catalyst_data/BW'
post_fix = '.MP4'
# Function that gets the list of videos
def get_video_list(video_big_dir, post_fix):
"""
This script get in all the folders in the input folder and get the list of FULL PATH of all the videos
:param: video_big_dir: The directory to inquire one folder by another to look for videos
"""
video_list = []
for folder in os.listdir(video_big_dir):
sub_dir = os.path.join(video_big_dir, folder)
# Only go through folder
if not os.path.isdir(sub_dir):
continue
for file in os.listdir(sub_dir):
current_file = os.path.join(sub_dir, file)
# Only go through the videos
if not current_file.endswith(post_fix):
continue
video_list.append(current_file)
return video_list
def cut_video_to_dest(video_list, save_img_big_dir, video_big_dir):
"""
The function to save the cut images from the video list to save_img_big_dir
"""
for video in video_list:
# Get the video name
video_name = video.split(video_big_dir)[-1].split(post_fix)[0]
# Strip the leading '/'
if video_name.startswith('/'):
video_name = video_name[1:]
video_name = video_name.replace('/','_')
print('cutting :', video_name)
# Create the save_dir if not exist
save_dir = os.path.join(save_img_big_dir,video_name)
# ONLY ffmpeg if this folder does not exist, which means this video has not been cut before
if not os.path.isdir(save_dir):
# This means the video was never cut, make dir and cut here!
os.makedirs(save_dir)
else:
# This means the video has been cut, ignore and continue!!
continue
# prepare the ffmpeg command and execute!
command = 'ffmpeg -i {} {}/%04d.png -r 24 -hide_banner'.format(video, save_dir )
os.system(command)
# ffmpeg -i ../2021_03_10_10_D_90/DJI_0009__height_50m_N.mp4 DJI_0009_height_50m_N%04d.jpg -hide_banner
def label_imgs_with_folder_name(mother_dir):
"""
This function labels the images cut from this function with the folder name concatenated in front
example: /video_cut/2021_03_10_10_D_90_DJI_0052_DJI_0051_height_100m_S/001.jpg
change to 2021_03_10_10_D_90_DJI_0052_DJI_0051_height_100m_S_001.jpg in the same folder
"""
for folders in os.listdir(mother_dir):
cur_folders = os.path.join(mother_dir, folders)
if not os.path.isdir(cur_folders):
continue
for img in os.listdir(cur_folders):
cur_img = os.path.join(cur_folders, img)
new_name = os.path.join(cur_folders, os.path.basename(cur_folders) + img)
print('original name {}, new name {}'.format(cur_img, new_name))
os.rename(cur_img, new_name)
def sample_from_video_cuts(mother_dir, save_dir, exclude_pre=0.1, exclude_post=0.2, sample_num=3):
"""
This function samples a subset of the video cuts to form a dataset
:param moether_dir: The source dir with all the video cuts inside, each one is a folder with all the images inside
:param exclude_pre/post: The portion of images to exclude in front / end
:param sample_num: The number of samples drawn from each of the videos
:param save_dir: The directory to save the video
"""
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
total_samples_got = 0
for folders in os.listdir(mother_dir):
cur_folders = os.path.join(mother_dir, folders)
# check if this is a folder
if not os.path.isdir(cur_folders):
continue
img_list = os.listdir(cur_folders)
img_list.sort() # Get the sorted list of file names
num_img = len(img_list)
# Get the sampled indexs
sample_index_list = np.random.permutation(int(num_img*(exclude_post+exclude_pre)))[:sample_num] + int(num_img*exclude_pre)
#print('pre lim {}, {}, post lim {}'.format(int(num_img*exclude_pre), sample_index_list, int(num_img*(1-exclude_post))))
# Copy the images into the big dir
for sample_index in sample_index_list:
shutil.copyfile(os.path.join(cur_folders, img_list[sample_index]), os.path.join(save_dir, img_list[sample_index]))
# Testing purposes
#quit()
print('out of {} folders, we got {} samples and saving them in {}'.format(len(os.listdir(mother_dir)), total_samples_got, save_dir))
if __name__ == '__main__':
# The first step of cutting them
#video_list = get_video_list(video_big_dir, post_fix)
#cut_video_to_dest(video_list, save_img_big_dir, video_big_dir)
# The second step of relabelling them
#label_imgs_with_folder_name(save_img_big_dir)
# The third step of sampling a subset of them
sample_from_video_cuts(save_img_big_dir, save_dir='/scratch/sr365/Catalyst_data/test_moving_imgs')
| true |
6517fec4d77570a3b3058385a4b8b69313b8a877 | Python | python-practicing/Aizu_Online_Judge | /ALDS1_9_A.py | UTF-8 | 860 | 3.25 | 3 | [] | no_license | import math
n = int(input())
heap_elements = list(map(int, input().split()))
for i in range(1, n+1):
key = heap_elements[i-1]
if i == 1:
left_key = heap_elements[1]
right_key = heap_elements[2]
print(f'node {i}: key = {key}, left key = {left_key}, right key = {right_key}, ')
else:
parent_key = heap_elements[math.floor(i / 2) - 1]
if 2*i <= n-1:
left_key = heap_elements[2*i-1]
right_key = heap_elements[2*i]
print(f'node {i}: key = {key}, parent key = {parent_key}, left key = {left_key}, right key = {right_key}, ')
elif 2*i == n:
left_key = heap_elements[2*i-1]
print(f'node {i}: key = {key}, parent key = {parent_key}, left key = {left_key}, ')
else:
print(f'node {i}: key = {key}, parent key = {parent_key}, ')
| true |
73e256eb063823efd4ece5ef93413a65b8885d62 | Python | leekh730/Develop | /learn_webscraping/makingtheSoup.py | UTF-8 | 702 | 2.75 | 3 | [] | no_license | from bs4 import BeautifulSoup
path = 'datas/sample01.html' # from File
with open(path) as fp: # Safe Return Resource
soup = BeautifulSoup(fp, features='lxml')
print(type(soup),soup)
# <class 'bs4.BeautifulSoup'><html><body><p>a web page</p></body></html>
import requests # from URL
res = requests.get('https://google.com/')
print(res.status_code, res._content)
soup = BeautifulSoup(res.content, features='lxml')
print(type(soup), soup.prettify())
# 200b'<!doctype html><html itemscope=""itemtype="http://schema.org/..."
# <class 'bs4.BeautifulSoup'><html><body><p>a web page</p></body></html>
# Create instance From URL (at least 3site) and share source with google Doc
| true |
f3c1d129b43ed2c8a72e2858efe490b37d505b2b | Python | xiemingke/lesson3-hw | /homework5.1.py | UTF-8 | 194 | 3.328125 | 3 | [] | no_license | score = []
z = 0
x = int(input('student in class'))
for i in range(x):
y = int(input("student score"))
score.append(int(y))
z = z+y
print(score)
print(z//x)
| true |
0f6c1ccc0bdc15cecd6ef46d12b4ab30ed270954 | Python | AndreyQQQQ/amis_python | /km73/Dashchik_Andrey/1.py | UTF-8 | 120 | 3.03125 | 3 | [] | no_license | a=float(input('First value:'))+float(input('Second value:'))+float(input('Third value:'))
print("Sum:"+str(a))
input() | true |
fcf74c47a0b914db7526e9f670ad8410c4c3b33d | Python | PeterG75/proxy6-1 | /proxy6/errors.py | UTF-8 | 1,055 | 2.8125 | 3 | [
"MIT"
] | permissive | class Proxy6Error(Exception):
"""Proxy6 API error"""
def __init__(self, *, code: int = None, description: str = None):
if code is not None:
self.code = code
if description is not None:
self.description = description
if self.__class__ != Proxy6Error:
super().__init__(self.__class__.__doc__)
else:
super().__init__(f"{description} (code {code})")
class CountError(Proxy6Error):
"""Wrong proxies quantity, wrong amount or no quantity input"""
code = 200
description = "Error count"
class NoMoneyError(Proxy6Error):
"""Balance error. Zero or low balance on your account"""
code = 400
description = "Error no money"
def select(data: dict) -> Proxy6Error:
code = data.pop('error_id')
description = data.pop('error')
for Error in (CountError, NoMoneyError):
if code == Error.code:
assert description == Error.description
return Error
return Proxy6Error(code=code, description=description)
| true |