blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
f3fd6cf2971a7fe8f9a0ca27e4fe235348f09138 | Python | ramksharma1674/pyprojold | /temp.py | UTF-8 | 413 | 3.90625 | 4 | [] | no_license | def to_celcius(f):
celcius = (f - 32) * 5/9
return celcius
def to_frnh(c):
frnh = c * 9/5 + 32
return frnh
def main():
for temp in range(0, 212, 40):
print(temp, "Fahrenheit = ", round(to_celcius(temp)), "Celcius")
for temp in range(0, 100, 20):
print(temp, "Celcius = ", round(to_frnh(temp)), "Farenheit")
if __name__ == "__main__":
main()
| true |
f723365c01be763a1cfd9a43beb7cdaad446df4d | Python | Vandewaetere/py_test_interface | /bk1697.py | UTF-8 | 3,175 | 2.859375 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/python
import time
import serial
def get_serial():
return serial.Serial('/dev/ttyPROLIFIC',baudrate=9600)
# no newline, just carriage return
eol = '\r'
class TimeoutException(Exception): pass
class BK1697(object):
def __init__(self, sp=None):
self.sp = sp or get_serial()
self.channel = 0
def readline(self, timeout=1.):
'I think one second timeout covers all transactions I sniffed'
old_timeout = self.sp.getTimeout()
self.sp.setTimeout(timeout)
line=''
while 1:
c = self.sp.read(1)
if c=='':
raise TimeoutException("Timeout after "+repr(line))
if c==eol:
self.sp.setTimeout(old_timeout)
return line
line+=c
def txrx(self, send, arg=''):
cmd = send+('%02d'%self.channel)+arg+eol
#print cmd
self.sp.write(cmd)
while 1:
resp = self.readline()
if resp == 'OK':
return
else:
yield resp
def cmd(self,c,c2=''):
return list(self.txrx(c,c2))
def __getattr__(self, attr):
if attr.isupper():
return lambda *args: self.cmd(attr, *args)
else:
raise AttributeError('Dunno about '+repr(attr))
def begin_session(self):
"""Locks the control panel. This is not required to control the
supply, but is useful for long-running tests where curious fingers
might disrupt things.
Consider a try:, finally: with end_session() so the supply
isn't left in a locked state.
"""
self.SESS()
def end_session(self):
self.ENDS()
def init_serial(self):
# this sequence observed at power-up.
# doesn't appear to be necessary for control of the device
self.GMAX()
self.GOVP()
self.SESS()
self.GETP()
self.GETM()
self.GEEP('004')
self.GPAL()
self.GETS()
self.GETD()
self.GETD()
self.GETD()
def set_volts(self, volts):
self.VOLT('%03d'%int(volts*10.))
time.sleep(0.001)
def set_amps(self, amps):
self.CURR('%03d'%int(amps*100.))
time.sleep(0.001)
def get_volts_amps(self):
resp = self.GETD()[0]
volts = int(resp[:4])*0.010
amps = int(resp[4:])*0.0001
return volts,amps
def get_volts(self): return self.get_volts_amps()[0]
def get_amps(self): return self.get_volts_amps()[1]
def output_on(self, on=True):
self.SOUT('10'[bool(on)])
def output_off(self):
return self.output_on(False)
if __name__=="__main__":
bk = BK1697()
bk.begin_session()
try:
bk.output_on()
for v in range(300):
bk.set_volts(v/10.)
print bk.get_volts_amps()
bk.set_volts(2.7)
for a in range(1000):
bk.set_amps(a/1000.)
print bk.get_amps()
bk.output_off()
finally:
try:
bk.end_session()
except:
pass
| true |
51f6dd909ae689255bd5adabc7226ed4e1ba3286 | Python | mwangimaina/pythonpostgres | /PycharmProjects/untitled/lesson3a.py | UTF-8 | 104 | 3.1875 | 3 | [] | no_license | # LOOPS
for x in range(1,10):
for z in range(1,10):
print(x*z, end="\t")
print()
| true |
2616e31df92c90f5b8103517e5e5a8c524f9283f | Python | mmoosstt/diponaut | /gui/GlobalVariables.py | UTF-8 | 2,837 | 2.5625 | 3 | [] | no_license | import PySide
import PySide.QtGui
import PySide.QtCore
import logic.TradeGlobals
import utils.Interfaces
class myLineEdit(PySide.QtGui.QLineEdit):
valueChanged = PySide.QtCore.Signal(str, utils.Interfaces.IVariable)
def __init__(self, parent, name):
PySide.QtGui.QLineEdit.__init__(self, parent)
self.name = name
self.textChanged.connect(self.myTextChanged)
def myTextChanged(self, value):
self.value = value
self.textChanged.disconnect(self.myTextChanged)
self.valueChanged.emit(self.name, self.value)
self.textChanged.connect(self.myTextChanged)
class GlobalVariables(PySide.QtGui.QWidget):
def __init__(self, parent=None):
PySide.QtGui.QWidget.__init__(self, parent)
self.data_state = None
self.data_prediction = None
self.GloVar = logic.TradeGlobals.GloVar
self.GloVar.signal_set.connect(self.SetGloValues)
layout = PySide.QtGui.QGridLayout(self)
row = 0
col = 0
for _name in sorted(self.GloVar.__dict__.keys()):
_obj = self.GloVar.__dict__[_name]
if isinstance(_obj, utils.Interfaces.IVariable):
print(_obj.type, _name)
if _obj.protected == False:
layout.addWidget(PySide.QtGui.QLabel(_name, self), row, col)
self.__dict__["Q{0}".format(_name)] = myLineEdit(self, _name)
layout.addWidget(self.__dict__["Q{0}".format(_name)], row, col + 1)
self.__dict__["C{0}".format(_name)] = lambda name, value: self.GloVar.set(name, value)
self.__dict__["Q{0}".format(_name)].valueChanged.connect(self.__dict__["C{0}".format(_name)])
row += 1
self.setLayout(layout)
def SetGloValues(self, name, instance):
_attrib_str = "Q{0}".format(name)
_callback_str = "C{0}".format(name)
if _attrib_str in self.__dict__.keys():
_object = self.__dict__[_attrib_str]
_callback = self.__dict__[_callback_str]
if isinstance(_object, PySide.QtGui.QLineEdit):
_object.valueChanged.disconnect(_callback)
_object.setText(str(instance.value))
_object.valueChanged.connect(_callback)
def SetDataStates(self, Data):
if isinstance(Data, DataApi.TradingStates):
self.data_state = Data
def SetDataPrediction(self, Data):
if isinstance(Data, DataApi.TradingPrediction):
self.dataPrediction = Data
if __name__ == "__main__":
app = PySide.QtGui.QApplication([])
MainWidget = TraidingInterface()
MainWidget.resize(800, 800)
MainWidget.show()
app.exec_()
| true |
550847d06aa2e90d68725ab4c65797be8abc89b6 | Python | boonwj/adventofcode2019 | /day16/fft.py | UTF-8 | 1,975 | 3.75 | 4 | [] | no_license | """
Flawed frequency transmission
"""
import sys
def right_values(element_pos, max_size):
base_pattern = [0, 1, 0 , -1]
cur_value = 1
mod_value = len(base_pattern) * element_pos
size = 0
while size != max_size:
next = (cur_value % mod_value) / mod_value
cur_value += 1
size += 1
result = None
if next < 0.25:
result = base_pattern[0]
elif next < 0.50:
result = base_pattern[1]
elif next < 0.75:
result = base_pattern[2]
else:
result = base_pattern[3]
yield(result)
def fft(in_data, num_phases):
in_data = [int(x) for x in str(in_data)]
max_size = len(in_data)
# loop in phase
next_value = in_data
for phase in range(num_phases):
for i, _ in enumerate(in_data, start=1):
sum = 0
for x, y in zip(next_value, right_values(i, max_size)):
sum += x * y
next_value[i-1] = abs(sum) % 10
leading_zeros = True
result = 0
for i in next_value:
if not leading_zeros or i:
leading_zeros = False
result = result * 10 + i
return result
def part2_calculation(offset, in_data, num_phases):
in_data = [int(x) for x in str(in_data)]
for phase in range(num_phases):
print(f"Phase {phase+1}")
partial_sum = sum(in_data[offset:])
for i in range(offset, len(in_data)):
temp = partial_sum
partial_sum -= int(in_data[i])
in_data[i] = abs(temp) % 10
return in_data[offset:offset+8]
if __name__ == "__main__":
if len(sys.argv) < 2:
sys.exit(f"To use: {sys.argv[0]} <input>")
with open(sys.argv[1], "r") as in_f:
in_data = in_f.read().strip()
in_data = in_data * 10000
offset = int(in_data[:7])
print(offset)
print(len(in_data))
#print(fft(in_data, 1))
print(part2_calculation(offset, in_data, 100)) | true |
35c256dfc25657de4439e7cff8a3c9479f593423 | Python | Paradiss/lesson2 | /if2.py | UTF-8 | 486 | 3.515625 | 4 | [] | no_license |
def input_2_str(str1, str2):
if type(str1) is not str or type(str2) is not str: result = 0
elif str1 == str2: result = 1
elif len(str1)>len(str2): result = 2
elif str2 == 'learn': result = 3
else: result = 'хмм...'
return result
print(input_2_str('asd', 'add'))
print(input_2_str(3, 'add'))
print(input_2_str('EFf', 5.4))
print(input_2_str('EFf', 'EFf'))
print(input_2_str('EFf', 'Eff'))
print(input_2_str('EFfc', 'EFf'))
print(input_2_str('EFf', 'learn')) | true |
c93b5830a29dc968c229931044c6e6b60341bdca | Python | kwoneyng/beakjoon | /1613 역사.py | UTF-8 | 488 | 2.625 | 3 | [] | no_license | import sys
input = sys.stdin.readline
n,k = map(int,input().split())
bd = [[0]*(n+1) for _ in range(n+1)]
for _ in range(k):
a,b = map(int,input().split())
bd[a][b] = -1
bd[b][a] = 1
for k in range(1,n+1):
for i in range(1,n+1):
for j in range(i+1,n+1):
if bd[i][k] and bd[i][k] == bd[k][j]:
bd[i][j] = bd[i][k]
bd[j][i] = -bd[i][k]
for _ in range(int(input())):
a,b = map(int,input().split())
print(bd[a][b])
| true |
0cd1f3f9a9782bd8281207a0c42146c5d24714e9 | Python | VCBE123/combo_nas | /combo_nas/arch_space/predefined/mobilenetv2.py | UTF-8 | 5,223 | 2.578125 | 3 | [
"MIT"
] | permissive | import torch
import torch.nn as nn
from ...arch_space.constructor import Slot
from collections import OrderedDict
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def MobileInvertedConv(chn_in, chn_out, C, stride, activation):
return nn.Sequential(
nn.Conv2d(chn_in, C, kernel_size=1, bias=False),
nn.BatchNorm2d(C),
activation(inplace=True),
nn.Conv2d(C, C, kernel_size=3, stride=stride, padding=1, bias=False, groups=C),
nn.BatchNorm2d(C),
activation(inplace=True),
nn.Conv2d(C, chn_out, kernel_size=1, bias=False),
nn.BatchNorm2d(chn_out)
)
class MobileInvertedResidualBlock(nn.Module):
def __init__(self, chn_in, chn_out, stride=1, t=6, activation=nn.ReLU6):
super(MobileInvertedResidualBlock, self).__init__()
self.stride = stride
self.t = t
self.chn_in = chn_in
self.chn_out = chn_out
C = chn_in * t
self.conv = Slot(chn_in, chn_out, stride, C=C, activation=activation)
def forward(self, x):
residual = x
out = self.conv(x)
if self.stride == 1 and self.chn_in == self.chn_out:
out += residual
return out
class MobileNetV2(nn.Module):
def __init__(self, chn_in=3, scale=1.0, t=6, n_classes=1000, activation=nn.ReLU6):
super(MobileNetV2, self).__init__()
self.scale = scale
self.t = t
self.activation_type = activation
self.activation = activation(inplace=True)
self.n_classes = n_classes
self.num_of_channels = [32, 16, 24, 32, 64, 96, 160, 320]
self.c = [_make_divisible(ch * self.scale, 8) for ch in self.num_of_channels]
self.n = [1, 1, 2, 3, 4, 3, 3, 1]
self.s = [2, 1, 2, 2, 2, 1, 2, 1]
self.conv1 = nn.Conv2d(chn_in, self.c[0], kernel_size=3, bias=False, stride=self.s[0], padding=1)
self.bn1 = nn.BatchNorm2d(self.c[0])
self.bottlenecks = self._make_bottlenecks()
# Last convolution has 1280 output channels for scale <= 1
self.last_conv_out_ch = 1280 if self.scale <= 1 else _make_divisible(1280 * self.scale, 8)
self.conv_last = nn.Conv2d(self.c[-1], self.last_conv_out_ch, kernel_size=1, bias=False)
self.bn_last = nn.BatchNorm2d(self.last_conv_out_ch)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.dropout = nn.Dropout(p=0.2, inplace=True) # confirmed by paper authors
self.fc = nn.Linear(self.last_conv_out_ch, self.n_classes)
def _make_stage(self, chn_in, chn_out, n, stride, t, stage):
modules = OrderedDict()
stage_name = "MobileInvertedResidualBlock_{}".format(stage)
# First module is the only one utilizing stride
first_module = MobileInvertedResidualBlock(chn_in=chn_in, chn_out=chn_out, stride=stride, t=t,
activation=self.activation_type)
modules[stage_name + "_0"] = first_module
# add more MobileInvertedResidualBlock depending on number of repeats
for i in range(n - 1):
name = stage_name + "_{}".format(i + 1)
module = MobileInvertedResidualBlock(chn_in=chn_out, chn_out=chn_out, stride=1, t=6,
activation=self.activation_type)
modules[name] = module
return nn.Sequential(modules)
def _make_bottlenecks(self):
modules = OrderedDict()
stage_name = "Bottlenecks"
# First module is the only one with t=1
bottleneck1 = self._make_stage(chn_in=self.c[0], chn_out=self.c[1], n=self.n[1], stride=self.s[1], t=1,
stage=0)
modules[stage_name + "_0"] = bottleneck1
# add more MobileInvertedResidualBlock depending on number of repeats
for i in range(1, len(self.c) - 1):
name = stage_name + "_{}".format(i)
module = self._make_stage(chn_in=self.c[i], chn_out=self.c[i + 1], n=self.n[i + 1],
stride=self.s[i + 1],
t=self.t, stage=i)
modules[name] = module
return nn.Sequential(modules)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.activation(x)
x = self.bottlenecks(x)
x = self.conv_last(x)
x = self.bn_last(x)
x = self.activation(x)
# average pooling layer
x = self.avgpool(x)
x = self.dropout(x)
# flatten for input to fully-connected layer
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def get_default_converter(self):
return lambda slot: MobileInvertedConv(slot.chn_in, slot.chn_out, stride=slot.stride, **slot.kwargs)
def mobilenetv2(config):
chn_in = config.channel_in
n_classes = config.classes
kwargs = {
'chn_in': chn_in,
'n_classes': n_classes,
}
return MobileNetV2(**kwargs)
| true |
c235175b41a857df4d88cb1e20229bbb164ca3ff | Python | fallpeindafall/SGBD_JSON | /backend.py | UTF-8 | 21,743 | 2.703125 | 3 | [] | no_license | #This is the class where all methods the API use are going to be put
import os
from flask import json
class Backend:
DATABASE_IN_USE = ''
DATABASE_DIRECTORY_PATH = "Databases/"
CURRENT_USER = ''
# LDD -----------------------------------------------------------------------------------
#Creating a database
def create_database(self,db_name):
try:
f = open(self.DATABASE_DIRECTORY_PATH + db_name + ".json" , "r")
return "Database " + db_name + " already exists !"
except :
f = open(self.DATABASE_DIRECTORY_PATH + db_name + ".json" , "w+")
config = open(self.DATABASE_DIRECTORY_PATH + "config.json" , "a+")
temporar_unused_database = self.CURRENT_USER + '-' + db_name + '-' + 'CREATION'
config.write('\n')
config.write(temporar_unused_database)
config.close()
return "database '" + db_name + "' created successfully !"
finally:
f.close()
# Creating a table
def create_table(self,table_data):
if self.DATABASE_IN_USE == '':
return "No database selected !"
table_data = json.loads(table_data)
# Writing data on config file
config_file = open(self.DATABASE_DIRECTORY_PATH + "config.json" , "a+")
full_table_name = self.CURRENT_USER + '-' + self.DATABASE_IN_USE + '-' + table_data['table_name']
data = full_table_name
for field in table_data['fields']:
data+='-'
data += field
print(data)
config_file.write('\n')
config_file.write(data)
config_file.close()
#Editing real database structure
with open(self.DATABASE_DIRECTORY_PATH + self.DATABASE_IN_USE + ".json" , "r") as database_file:
try:
db_data = json.load(database_file)
except:
db_data = {}
finally:
db_data[table_data['table_name']] = {}
with open(self.DATABASE_DIRECTORY_PATH + self.DATABASE_IN_USE + ".json" , "w") as updated_database_file:
json.dump(db_data,updated_database_file)
return "Table " + table_data['table_name'] + " has been created successfully !"
#Create a user
def create_user(self,user_credentials):
user_credentials = json.loads(user_credentials)
login = user_credentials['login']
password = user_credentials['password']
user_data = login + "-" + password
with open(self.DATABASE_DIRECTORY_PATH + 'users.txt' , 'a') as user_file:
user_file.write('\n')
user_file.write(user_data)
return "The user has been recorded successfully"
# Dropping a database
def drop_database(self,db_name):
if os.path.exists(self.DATABASE_DIRECTORY_PATH + db_name + ".json"):
os.remove(self.DATABASE_DIRECTORY_PATH + db_name + ".json")
return "database " + db_name + " deleted successfully"
else:
return "database " + db_name + " doesn't exist"
#Dropping a table
def drop_table(self,table_name):
if self.DATABASE_IN_USE == '':
return "No database selected !"
return "dropping table " + table_name + "..."
#Use a database
def use(self, db_name):
f = open(self.DATABASE_DIRECTORY_PATH + "unecessary")
#check if database exists
try:
f = open(self.DATABASE_DIRECTORY_PATH + db_name + ".json" , "r")
current_user_databases = self.get_databases_of_user(self.CURRENT_USER)
if(db_name not in current_user_databases):
return "database '" + db_name + "' doesn't exist !"
else :
self.DATABASE_IN_USE = db_name
return "database '" + db_name + "' in use!"
except :
return "database '" + db_name + "' doesn't exists !"
finally:
f.close()
# END LDD -----------------------------------------------------------------------------------
#LMD ----------------------------------------------------------------------------------------
#Insert data into database
def insert(self,request_data):
if self.DATABASE_IN_USE == '':
return "No database selected !"
#Load request data
request_data = json.loads(request_data)
table_name = request_data['table']
fields = request_data['fields']
values = request_data['values']
#Check if the table user wants to insert into, truly exists
existing_tables = self.get_tables_of(self.DATABASE_IN_USE)
if table_name not in existing_tables:
return table_name + ' doesn\'t exists in the current database !'
#Check if fields the user want to insert into, truly exist
existing_fields = self.get_fields_of(table_name)
for field in fields:
if field not in existing_fields:
return 'A field named ' + field + " doesn\'t exist in the table !"
#Take into account the case when the user just give some fields
for field in existing_fields:
if field not in fields:
fields.append(field)
values.append("Null")
#Compter le nombre d'éléments dans la table pour l'id
database = None
with open(self.DATABASE_DIRECTORY_PATH + self.DATABASE_IN_USE +'.json','r') as database_file:
database = json.load(database_file)
print(database[table_name])
id = len(database[table_name])
table_data = database[table_name]
#Check if there's the same number of fields and values
if len(fields) != len(values):
return 'There must be as much fields as values !'
table_length = len(fields)
#Creating the structure to add in the database
data_to_insert = {}
for i in range(0,table_length):
data_to_insert[fields[i]] = values[i]
database[table_name][str(id + 1)] = data_to_insert
with open(self.DATABASE_DIRECTORY_PATH + self.DATABASE_IN_USE +'.json','w') as updated_database :
json.dump(database,updated_database)
return "The record has been inserted successfully"
#Updating items
def update(self,request_data):
if self.DATABASE_IN_USE == '':
return "No database selected !"
#récupérer les données pilotant la modification
else:
loaded_data = json.loads(request_data)
field_value = loaded_data['field_value']
print(loaded_data)
#Field to update and its new value
field_to_update = field_value[0]
value_of_updated_field = field_value[1]
#récupérer l'id du champ à modifer
given_id = loaded_data['id']
given_id = int(given_id)
#Ouvrir la base de données
with open (self.DATABASE_DIRECTORY_PATH + self.DATABASE_IN_USE + '.json' ,'r') as file_data :
#récupérer toutes données de la base
loaded_database = json.load(file_data)
#récupérer les données de la table qui est concernée
table_name =loaded_data['table']
table_data = loaded_database[table_name]
#vérifier si nous avons un seul élément à modifier
if given_id != -1 and given_id <= len(table_data):
data = table_data[str(given_id)]
data[field_to_update] = value_of_updated_field
#vérifier si l'id est supérieur au nombres de champs
elif(given_id > len(table_data)):
print("Cannot be update")
#on applique le update à tous les champs de la table correspondante
else:
for i in range(1,len(table_data) + 1):
data = table_data[str(i)]
data[field_to_update] = value_of_updated_field
#Enregistrer les modifications
with open(self.DATABASE_DIRECTORY_PATH + self.DATABASE_IN_USE + '.json' , 'w') as updated_database:
json.dump(loaded_database,updated_database)
return "Record updated successfully..."
#Deleting items
def delete(self,request_data):
if self.DATABASE_IN_USE == '':
return "No database selected !"
loaded_data = json.loads(request_data)
print(loaded_data)
given_id =loaded_data['id']
print(given_id)
given_id = int(given_id)
with open(self.DATABASE_DIRECTORY_PATH + self.DATABASE_IN_USE + '.json','r') as file_data:
loaded_database = json.load(file_data)
table_name =loaded_data['table']
table_data = loaded_database[table_name]
if given_id != -1 and given_id <= len(table_data):
data = table_data[str(given_id)]
# suppression des data de la table
data.clear()
# puis je delete l'id correspondant
del table_data[str(given_id)]
elif given_id == -1:
loaded_database[table_name] = {}
else:
print("Cannot be deleted")
with open(self.DATABASE_DIRECTORY_PATH + self.DATABASE_IN_USE + '.json','w') as updated_database:
json.dump(loaded_database,updated_database)
return "Data successfully deleted!"
#END LMD ------------------------------------------------------------------------------------
# LED -----------------------------------------------------------------------------------
#Fetch data from database
def select(self,request_data):
if self.DATABASE_IN_USE == '':
return "No database selected !"
request_data = json.loads(request_data)
#load the database
f = open(self.DATABASE_DIRECTORY_PATH + self.DATABASE_IN_USE + ".json",'r')
data = json.load(f)
table_name = request_data['table']
fields = request_data['fields']
#verify if the table truly exist in the database
existing_tables = self.get_tables_of(self.DATABASE_IN_USE)
if table_name not in existing_tables:
return 'table ' + table_name + 'doesn\'t exist in the current database !'
#verify if fields given truly exist in the table
existing_fields = self.get_fields_of(table_name)
for field in fields:
if field not in existing_fields:
return field + " doesn\'t exist in the table " + table_name
#verify if there are data in the table
data = data[table_name]
if data == {}:
return "Table " + table_name + " has no record !"
response = ""
for id in data:
response+='id: '+ id + '\n'
current_tuple = data[id]
for current_tuple_key in current_tuple:
if current_tuple_key in fields:
response+= str(current_tuple_key).upper() + ' : ' + current_tuple[current_tuple_key]
response+="\n"
response+="-----------------\n"
return response
#END LED -----------------------------------------------------------------------------------
#Other -------------------------------------------------------------------------------------
#GET USER DATABASES
def get_databases_of_user(self,user_name):
config_file = open(self.DATABASE_DIRECTORY_PATH + 'config.json' , 'r')
lines = config_file.readlines()
databases = []
for line in lines:
line.replace("\"",'')
data = line.split('-')
if data[0] == user_name :
databases.append(data[1])
return databases
#GET DATABASE FIELDS
def get_tables_of(self,database_name):
config_file = open(self.DATABASE_DIRECTORY_PATH + 'config.json' , 'r')
lines = config_file.readlines()
tables = []
for line in lines:
line.replace("\"",'')
data = line.split('-')
if data[1] == database_name :
tables.append(data[2])
return tables
#GET TABLE FIELDS
def get_fields_of(self , table):
if self.DATABASE_IN_USE == '':
return "No database selected !"
config_file = open(self.DATABASE_DIRECTORY_PATH + 'config.json' , 'r')
lines = config_file.readlines()
for line in lines:
line.replace("\"",'')
data = line.split('-')
if( data[0] == self.CURRENT_USER and data[1] == self.DATABASE_IN_USE and data[2] == table):
#removing the user, the database name and the table name
data.pop(0)
data.pop(0)
data.pop(0)
last_item = data[len(data)-1].strip()
data.pop(len(data) - 1)
data.append(last_item)
config_file.close()
return data
return None
#SYNTAX CHECKER
def validate(self,query):
return "this is where the presumed SQL query : '" + query + "' will be validated ! "
#AUTHENTICATION
def authenticate(self , login , password):
user_file = open(self.DATABASE_DIRECTORY_PATH + 'users.txt' , 'r')
users = user_file.readlines()
for user in users:
current_user_data = user.split('-')
current_user_login = current_user_data[0]
current_user_password = current_user_data[1]
current_user_password = current_user_password.strip()
if current_user_login == login and current_user_password == password:
self.CURRENT_USER = login
return "true"
return "false"
#SEMANTIC ANALYSER
def dispatcher(self , user_input):
requete = user_input
#declaration
database ={}
table = {}
select ={}
insert = {}
update = {}
show = {}
user = {}
value = []
requete=requete.lower()
tabRequet=[]
champ = []
mot= ""
j=0
k=0
long = len(requete)
#stockage de la requete dans un tableau
for i in range (0,long):
if((requete[i] != " ") and (requete[i] != "=")):
if(requete[i] != "("):
if(requete[i] != ")"):
if(requete[i] != "'" ):
if(requete[i] !="," ):
mot = mot + requete[i]
else:
tabRequet.append(mot)
mot = ""
j=j+1
#GESTION DES AUTHENTIFICATIONS
if(tabRequet[0] == "credentials"):
authentication = {
'nature': "authentication",
'login': tabRequet[1],
'password': tabRequet[2]
}
return authentication
#GESTION DES REQUETES CREATE
elif(tabRequet[0]=="create"):
if(tabRequet[1]=="database"):
database = {
'nature': "create_database",
'database_name': tabRequet[2]
}
return database
elif(tabRequet[1]=="user"):
user = {
'nature': "create_user",
'login': tabRequet[2],
'password': tabRequet[5]
}
return user
elif(tabRequet[1]=="table"):
for i in range (3,len(tabRequet)):
champ.append(tabRequet[i])
table = {
'nature': "create_table",
'table_name': tabRequet[2],
'fields': champ
}
return table
else:
return {
'nature':'error',
'error_msg':'Check create command syntax'
}
#GESTION DES REQUETES DROP
elif(tabRequet[0]=="drop"):
if(tabRequet[1]=="database"):
database = {
'nature': "drop_database",
'database_name': tabRequet[2]
}
return database
elif(tabRequet[1]=="table"):
table = {
'nature': "drop_table",
'table_name': tabRequet[2]
}
return table
else:
return {
'nature':'error',
'error_msg':'Check drop command syntax'
}
#GESTION DES REQUETES SELECT
elif(tabRequet[0]=="select"):
table_name = tabRequet[(len(tabRequet)-1)]
if(tabRequet[1]=="*"):
# Get all fields of the table
fields = self.get_fields_of(table_name)
if fields == None:
return {
'nature':'error',
'error_msg': 'table ' + table_name + ' doesn\'t exists in the currently selected database'
}
select = {
'nature': "select",
'table':table_name,
'fields': fields
}
else:
for i in range (1,(len(tabRequet)-1)):
if(tabRequet[i] != "from"):
champ.append(tabRequet[i])
select = {
'nature': "select",
'table': table_name,
'fields':champ
}
return select
#GESTION DES REQUETES INSERT
elif(tabRequet[0] == "insert"):
for i in range (3,(len(tabRequet)-1)):
if(tabRequet[i] == "values"):
break
else:
k=k+1
champ.append(tabRequet[i])
for i in range ( ( 4 + k ),( len(tabRequet) )):
value.append(tabRequet[i])
insert = {
'nature': "insert",
'table': tabRequet[2],
'fields':champ,
'values': value
}
return insert
elif (tabRequet[0]=="update"):
for i in range (3,len(tabRequet)):
if(tabRequet[i] == "where"):
break
else:
k = k + 1
champ.append(tabRequet[i])
if((k+3) == len(tabRequet)):
update = {
'nature': "update",
'table': tabRequet[1],
'field_value': champ,
'id': -1
}
else:
update = {
'nature': "update",
'table': tabRequet[1],
'field_value':champ,
'id': tabRequet[(len(tabRequet)-1)],
}
return update
#Gestion des requetes delete
elif(tabRequet[0]=="delete"):
for i in range (3,(len(tabRequet))):
if((tabRequet[i]!="and") and (tabRequet[i]!="where")):
champ.append(tabRequet[i])
if(len(champ)!=0):
delete = {
'nature': "delete",
'table': tabRequet[2],
'id':champ[1]
}
else:
delete = {
'nature': "delete",
'table': tabRequet[2],
'id':-1
}
return delete
#GESTION DES REQUETES USE
elif(tabRequet[0] == "use"):
data = {
'nature' : "use",
'database_name':tabRequet[1]
}
return data
# GESTION DES REQUETES SHOW
elif(tabRequet[0] == "show"):
show = {
'nature' : "show",
'table':"tables"
}
return show
else:
return {
'nature':'error',
'error_msg':'Command doesn\'t exist'
}
| true |
93876f263dec9f8fcc67da099dee855a6658ca47 | Python | abdulkadirkarakus/pythonOdevler | /tam.py | UTF-8 | 354 | 3.546875 | 4 | [] | no_license | #burası ödev degil
def tambolenleri(sayi):
tam_bolenler = []
for i in range( 2,sayi ):
if (sayi % i == 0 ):
tam_bolenler.append(i)
return tam_bolenler
while True:
sayi = int( input("sayi:"))
if (sayi == "q"):
print("program sonlandırıldı")
else:
print("Tam bölenler:",tambolenleri(sayi))
| true |
ab07b840458dd3029d0b8d2ad576a10468524b26 | Python | dymx101/InvestigateTextsAndCalls-Udacity | /Task4.py | UTF-8 | 1,363 | 3.3125 | 3 | [] | no_license | """
下面的文件将会从csv文件中读取读取短信与电话记录,
你将在以后的课程中了解更多有关读取文件的知识。
"""
import csv
with open('texts.csv', 'r') as f:
reader = csv.reader(f)
texts = list(reader)
with open('calls.csv', 'r') as f:
reader = csv.reader(f)
calls = list(reader)
text_senders, text_receivers, text_times = zip(*texts)
text_senders = list(text_senders)
text_receivers = list(text_receivers)
call_makers, call_receivers, call_times, call_durations = zip(*calls)
call_makers = list(call_makers)
call_receivers = list(call_receivers)
telemarketers = set()
for call_maker in call_makers:
if call_maker not in text_senders and call_maker not in text_receivers and call_maker not in call_receivers:
telemarketers.add(call_maker)
telemarketers = sorted(telemarketers)
print("These numbers could be telemarketers: ")
for telemarketer in telemarketers:
print(telemarketer)
"""
任务4:
电话公司希望辨认出可能正在用于进行电话推销的电话号码。
找出所有可能的电话推销员:
这样的电话总是向其他人拨出电话,
但从来不发短信、接收短信或是收到来电
请输出如下内容
"These numbers could be telemarketers: "
<list of numbers>
电话号码不能重复,每行打印一条,按字典顺序排序后输出。
"""
| true |
c06740d5feeb8aa11131e359cdc8f932f1eb302a | Python | shubhransujana19/Python | /love_counting.py | UTF-8 | 1,087 | 3.671875 | 4 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 1 17:50:20 2021
@author: shubhransu
"""
boy_name = input("Enter your boyfriend name")
girl_name = input("Enter your girlfriend name")
t= int(boy_name.count("t")) + int(girl_name.count("t"))
r= int(boy_name.count("r")) + int(girl_name.count("r"))
u= int(boy_name.count("u")) + int(girl_name.count("u"))
e= int(boy_name.count("e")) + int(girl_name.count("e"))
l= int(boy_name.count("l")) + int(girl_name.count("l"))
o= int(boy_name.count("o")) + int(girl_name.count("o"))
v= int(boy_name.count("v")) + int(girl_name.count("v"))
e= int(boy_name.count("e")) + int(girl_name.count("e"))
n1 =(t+r+u+e)*10
n2 = (l+o+v+e)
love_percentage = (n1+n2)
print("Your love percentage is:",love_percentage)
if (love_percentage<10 & love_percentage>90):
print(f"Your score is{love_percentage},you go together like coke and mentos ")
elif(love_percentage>40 & love_percentage<50):
print(f"Your score is {love_percentage}, you are alright together.")
else:
print(f"Your score is{love_percentage}")
| true |
0e454e08fadfe4631280316f39a98400a37a3e93 | Python | Rockrs/Algorithm_DS | /rearrange_array_gfs.py | UTF-8 | 564 | 3.71875 | 4 | [] | no_license |
##Complete this code
def arrange(arr, n):
for i in range(n):
arr[i] = arr[i]+ (arr[arr[i]]%n)*n
for i in range(n):
arr[i] = arr[i]//n
#{
# Driver Code Starts
#Initial Template for Python 3
import math
def main():
T=int(input())
while(T>0):
n=int(input())
arr=[int(x) for x in input().strip().split()]
arrange(arr,n)
for i in arr:
print(i,end=" ")
print()
T-=1
if __name__ == "__main__":
main()
# } Driver Code Ends
| true |
9df02f0500dc0072ad0743ea31ea553c28f464bd | Python | andrewparkermorgan/snoop | /io.py | UTF-8 | 2,760 | 2.59375 | 3 | [] | no_license | #! /usr/bin/env python
## --- snoop/io.py --- ##
## Date: 21 Feb 2014
## Updated: 7 Aug 2014
## Purpose: miscellaneous utility functions for checking, reading, writing files from command-line args
import os
import sys
import argparse
import csv
import subprocess
## functions for command-line argument validation; intended to work with argparse module
def expand_all(path):
return os.path.expanduser( os.path.expandvars(path) )
def readable_dir(indir):
indir = expand_all(indir)
if not os.path.isdir(indir):
raise argparse.ArgumentError("readable_dir:{0} is not a valid path".format(indir))
if os.access(indir, os.R_OK):
return indir
else:
raise argparse.ArgumentError("readable_dir:{0} is not a readable dir".format(indir))
def writeable_dir(indir):
indir = expand_all(indir)
if not os.path.isdir(indir):
raise argparse.ArgumentError("writeable_dir:{0} is not a valid path".format(indir))
if os.access(indir, os.W_OK):
return indir
else:
raise argparse.ArgumentError("writeable_dir:{0} is not a writeable dir".format(indir))
def readable_file(infile):
infile = expand_all(infile)
if not os.path.isfile(infile):
raise argparse.ArgumentError("readable_file:{0} is not a valid file path".format(infile))
if os.access(infile, os.R_OK):
return infile
else:
raise argparse.ArgumentError("readable_file:{0} is not a readable file".format(infile))
def writeable_file(infile):
infile = expand_all(infile)
if not os.path.isfile(infile):
raise argparse.ArgumentError("writeable_file:{0} is not a valid file path".format(infile))
if os.access(infile, os.W_OK):
return infile
else:
raise argparse.ArgumentError("writeable_file:{0} is not a writeable file".format(infile))
def readable_or_stdin(infile):
if not infile == "-":
return readable_file(infile)
else:
return infile
def readable_or_stdin_handle(infile):
if not infile == "-":
return argparse.FileType("rU")(infile)
else:
return sys.stdin
def writeable_or_stdout_handle(infile):
if not infile == "-":
return argparse.FileType("w")(infile)
else:
return sys.stdout
def comma_list(value):
return value.split(",")
def list_from_file(infile):
if not (os.path.isfile(infile) and os.access(infile, os.R_OK)):
raise argparse.ArgumentError("list_from_file:{0} is not a readable file".format(infile))
else:
ll = []
with open(infile, "rU") as ff:
samples = csv.reader(ff, delimiter = ",")
for line in samples:
ll.append(line[0])
return(ll)
def count_lines(fname):
p = subprocess.Popen(['wc', '-l', fname], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
result, err = p.communicate()
if p.returncode != 0:
raise IOError(err)
return int(result.strip().split()[0])
| true |
055869dc64842f3bc738c82caea21244d61725d8 | Python | JetLaggedCode/Sample-Projects | /def_dfs.py | UTF-8 | 841 | 2.671875 | 3 | [] | no_license | start = [problem.getStartState()]
for item in start:
Open=[item]
Closed=[]
Path=[]
if problem.isGoalState(Open[0]) is True:
return
else:
count=0
while Open:
if count==0:
visit=Open.pop()
else:
temp=Open.pop()
visit=temp[0]
Closed.append(visit)
if problem.isGoalState(visit) is True:
return Path
else:
Successors= problem.getSuccessors(visit)
for index in Successors:
if index[0] not in Closed :
Open.append((index[0],index[1]))
print Open
count=count+1
| true |
9b0c288eb3cf29b43f57f6f7d7343c727e318e7a | Python | Youngseok0001/ML | /tensorflow_tuts/carvana-challenge/src/nn/train_callbacks.py | UTF-8 | 5,578 | 2.71875 | 3 | [
"MIT"
] | permissive | import cv2
import torch
import numpy as np
import scipy.misc as scipy
from tensorboardX import SummaryWriter
class Callback:
def __call__(self, *args, **kwargs):
raise NotImplementedError
class TensorboardVisualizerCallback(Callback):
def __init__(self, path_to_files):
"""
Callback intended to be executed at each epoch
of the training which goal is to display the result
of the last validation batch in Tensorboard
Args:
path_to_files (str): The path where to store the log files
"""
self.path_to_files = path_to_files
def _apply_mask_overlay(self, image, mask, color=(0, 255, 0)):
mask = np.dstack((mask, mask, mask)) * np.array(color)
mask = mask.astype(np.uint8)
return cv2.addWeighted(mask, 0.5, image, 0.5, 0.) # image * α + mask * β + λ
def _get_mask_representation(self, image, mask):
"""
Given a mask and an image this method returns
one image representing 3 patches of the same image.
These patches represent:
- The original image
- The original mask
- The mask applied to the original image
Args:
image (np.ndarray): The original image
mask (np.ndarray): The predicted mask
Returns (np.ndarray):
An image of size (original_image_height, (original_image_width * 3))
showing 3 patches of the original image
"""
H, W, C = image.shape
results = np.zeros((H, 3 * W, 3), np.uint8)
p = np.zeros((H * W, 3), np.uint8)
m = np.zeros((H * W), np.uint8)
l = mask.reshape(-1)
masked_img = self._apply_mask_overlay(image, mask)
a = (2 * l + m)
miss = np.where(a == 2)[0]
hit = np.where(a == 3)[0]
fp = np.where(a == 1)[0]
p[miss] = np.array([0, 0, 255])
p[hit] = np.array([64, 64, 64])
p[fp] = np.array([0, 255, 0])
p = p.reshape(H, W, 3)
results[:, 0:W] = image
results[:, W:2 * W] = p
results[:, 2 * W:3 * W] = masked_img
return results
def __call__(self, *args, **kwargs):
if kwargs['step_name'] != "epoch":
return
epoch_id = kwargs['epoch_id']
last_images, last_targets, last_preds = kwargs['last_val_batch']
writer = SummaryWriter(self.path_to_files)
for i, (image, target_mask, pred_mask) in enumerate(zip(last_images, last_targets, last_preds)):
image = image.data.float().cpu().numpy().astype(np.uint8)
image = np.transpose(image, (1, 2, 0)) # Invert c, h, w to h, w, c
target_mask = target_mask.float().data.cpu().numpy().astype(np.uint8)
pred_mask = pred_mask.float().data.cpu().numpy().astype(np.uint8)
if image.shape[0] > 256: # We don't want the images on tensorboard to be too large
image = scipy.imresize(image, (256, 256))
target_mask = scipy.imresize(target_mask, (256, 256))
pred_mask = scipy.imresize(pred_mask, (256, 256))
expected_result = self._get_mask_representation(image, target_mask)
pred_result = self._get_mask_representation(image, pred_mask)
writer.add_image("Epoch_" + str(epoch_id) + '-Image_' + str(i + 1) + '-Expected', expected_result, epoch_id)
writer.add_image("Epoch_" + str(epoch_id) + '-Image_' + str(i + 1) + '-Predicted', pred_result, epoch_id)
if i == 1: # 2 Images are sufficient
break
writer.close()
class TensorboardLoggerCallback(Callback):
def __init__(self, path_to_files):
"""
Callback intended to be executed at each epoch
of the training which goal is to add valuable
information to the tensorboard logs such as the losses
and accuracies
Args:
path_to_files (str): The path where to store the log files
"""
self.path_to_files = path_to_files
def __call__(self, *args, **kwargs):
if kwargs['step_name'] != "epoch":
return
epoch_id = kwargs['epoch_id']
writer = SummaryWriter(self.path_to_files)
writer.add_scalar('data/train_loss', kwargs['train_loss'], epoch_id)
writer.add_scalar('data/train_acc', kwargs['train_acc'], epoch_id)
writer.add_scalar('data/val_loss', kwargs['val_loss'], epoch_id)
writer.add_scalar('data/val_acc', kwargs['val_acc'], epoch_id)
writer.close()
class ModelSaverCallback(Callback):
def __init__(self, path_to_model, verbose=False):
"""
Callback intended to be executed each time a whole train pass
get finished. This callback saves the model in the given path
Args:
verbose (bool): True or False to make the callback verbose
path_to_model (str): The path where to store the model
"""
self.verbose = verbose
self.path_to_model = path_to_model
self.suffix = ""
def set_suffix(self, suffix):
"""
Args:
suffix (str): The suffix to append to the model file name
"""
self.suffix = suffix
def __call__(self, *args, **kwargs):
if kwargs['step_name'] != "train":
return
pth = self.path_to_model + self.suffix
net = kwargs['net']
torch.save(net.state_dict(), pth)
if self.verbose:
print("Model saved in {}".format(pth))
| true |
aed85de04458c6bb62ea51c62d8dc0ccb0f8f07c | Python | strikeraryu/multi-crop | /crop.py | UTF-8 | 1,200 | 2.9375 | 3 | [] | no_license | from PIL import Image
import time
import os
import sys
fnd = False
while not fnd:
img_path = input("enter image name/path :- ")
try:
img = Image.open(img_path)
fnd = True
except Exception as e:
print("image not found")
width, height = img.size
w_len = int(input("enter the width of cropped images :- "))
h_len = int(input("enter the height of cropped images :- "))
folder = False
while not folder:
title = input("enter the base name :- ")
try:
os.mkdir(title)
folder = True
except FileExistsError:
print("Error 001 enter new file name :- ")
n = 0
os.system('cls')
for i in range(0, width, w_len):
for j in range(0, height, h_len):
n+=1
crp_img = img.crop((i, j, i+w_len, j+h_len))
path = title + "/" + title + "_" + str(n) +".png"
crp_img.save(path)
if n%4 == 0:
sys.stdout.write('\rloading |')
if n%4 == 1:
sys.stdout.write('\rloading /')
if n%4 == 2:
sys.stdout.write('\rloading -')
if n%4 == 3:
sys.stdout.write('\rloading \\')
time.sleep(0.1)
sys.stdout.write('\r!! Done !!')
time.sleep(5)
| true |
536dbc05c692d368032ffacc930e252b91542b4d | Python | Zach41/LeetCode | /123_best_time_to_buy_and_sell_stock_iii/solve.py | UTF-8 | 1,094 | 3.40625 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding : utf-8 -*-
class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
n = len(prices)
if n <= 1:
return 0
max_left = [0] * n
max_right = [0] * n
min_p = prices[0]
for i in range(1, n):
max_left[i] = max(prices[i] - min_p, max_left[i-1])
if min_p > prices[i]:
min_p = prices[i]
max_p = prices[n-1]
for i in range(n-2, -1, -1):
max_right[i] = max(max_p - prices[i], max_right[i+1])
if prices[i] > max_p:
max_p = prices[i]
max_ans = 0
for i in range(n):
if max_ans < max_left[i] + max_right[i]:
max_ans = max_left[i] + max_right[i]
return max_ans
s = Solution()
print s.maxProfit([1, 2, 3, 4, 5, 6, 7])
import pdb
pdb.set_trace()
print s.maxProfit([0, 2, 1, 2, 7, 0, 8])
print s.maxProfit([1, 1, 1, 1])
print s.maxProfit([4, 3, 2, 1])
| true |
4ebd6397808e72063441368e18620b5a3dc43407 | Python | gamesdaco18/sincronizado | /fut_prom.py | UTF-8 | 1,824 | 3.234375 | 3 | [] | no_license | from tkinter import *
#Configuracion de la ventana
root= Tk()
root.title("Fut Players %")
root.geometry("400x300")
root.config(bg="Gray17")
root.iconbitmap("python.ico")
root.resizable(0,0)
opcion = IntVar()
num = IntVar()
#Declaracion de funcion
def operacion():
numero = num.get()
if opcion.get()==1:
total = (numero*10)/100
elif opcion.get()==2:
total = (numero*15)/100
elif opcion.get()==3:
total = (numero*20)/100
else:
total = numero + numero
etiqueta3=Label(root,text= f"Precio de venta: {str(total + numero)}",bg="Gray17",font="Console 10 bold ", fg="Green2")
etiqueta3.place(x=20, y=180)
etiqueta1 = Label(root, text="Valor compra: ",bg="Gray17", bd=5, font="Console 10 bold ", fg="Snow")
etiqueta1.place(x=20, y=20)
entrada1 = Entry(root,textvariable=num,bg="snow", bd=4, font="Console 10 bold ")
entrada1.place(x=150, y=20)
etiqueta2 = Label(root, text="Incrementar en un : ",bg="Gray17", bd=5, font="Console 10 bold ", fg="Snow")
etiqueta2.place(x=20, y=50)
x10 = Radiobutton(root,text="10%", value=1, bg="Gray17", bd=5, font="Console 10 bold ", fg="Snow",activeforeground="gray16",activebackground="grey",selectcolor="grey", variable=opcion)
x10.place(x=20, y=80)
x15 = Radiobutton(root,text="15%", value=2,bg="Gray17", bd=5, font="Console 10 bold", fg="Snow",activeforeground="gray16",activebackground="grey",selectcolor="grey", variable=opcion)
x15.place(x=70, y=80)
x20 = Radiobutton(root,text="20%", value=3,bg="Gray17", bd=5, font="Console 10 bold", fg="Snow",activeforeground="gray16",activebackground="grey",selectcolor="grey", variable=opcion)
x20.place(x=120, y=80)
boton1 = Button(root, text="Realizar operacion", font="Console 10 bold", bg="Snow",fg="Gray7", bd=4, command=operacion)
boton1.place(x=20, y=140)
root.mainloop() | true |
27d18e2c1d75a70f68cd3ae40cf9d5fdbd60956d | Python | Krirati/jib-project | /safety/workers/tests/test_models.py | UTF-8 | 1,575 | 2.59375 | 3 | [] | no_license | import os
from unittest.mock import MagicMock
from django.core.files import File
from django.test import TestCase
from ..models import Worker
class TestWorker(TestCase):
def test_worker_should_have_definded_field(self):
# Given (ว่าเรามีอะไร)
first_name = 'Keng'
last_name = 'Mak'
is_availble = True
primary_phone = '081-689-777x'
secondary_phone = '081-687-778x'
address = 'Geeky Base All Start'
image_mock = MagicMock(spec=File)
image_mock.name = 'nine.png'
# When (เมื่อดึงขึ้นมาควรจะมีตามที่ set ค่าไว้)
worker = Worker.objects.create(
first_name = first_name,
last_name = last_name,
image_profile = image_mock,
is_availble = is_availble,
primary_phone = primary_phone,
secondary_phone = secondary_phone,
address = address,
)
# Then
assert worker.first_name == first_name
assert worker.is_availble is True
self.assertEqual(worker.first_name, first_name)
self.assertEqual(worker.last_name, last_name)
self.assertEqual(worker.image_profile.name, image_mock.name)
self.assertTrue(worker.is_availble, is_availble)
self.assertEqual(worker.primary_phone, primary_phone)
self.assertEqual(worker.secondary_phone, secondary_phone)
self.assertEqual(worker.address, address)
os.remove('media/nine.png')
| true |
c09cef4135ec05268acf2459bb144920ed3d781e | Python | rebekahjennifer/HackerRank_30DaysofCode | /Day10_Binarynumbers.py | UTF-8 | 387 | 3.25 | 3 | [] | no_license | import math
import os
import random
import re
import sys
def maxOnes(n):
count = 0
while (n!=0):
n = (n & (n << 1))
count=count+1
return count
if __name__ == '__main__':
n = int(input())
max_ones = maxOnes(n)
print(max_ones)
#Print a single base-10 integer denoting the maximum number of consecutive 1's in the binary representation of n .
| true |
41f0375c823adef2bb977e9f4a72d12fed913ce1 | Python | dhana2552/linreg-assignment | /app.py | UTF-8 | 669 | 2.734375 | 3 | [] | no_license | import numpy as np
from flask import Flask, request, jsonify, render_template
import pickle
app = Flask(__name__)
model = pickle.load(open('model.pkl', 'rb'))
@app.route('/')
def index():
return render_template('index.html')
@app.route('/predict', methods=['POST'])
def predict():
input_features = [int(x) for x in request.form.values()]
final_features = [np.array(input_features)]
prediction = model.predict(final_features)
return render_template('index.html', prediction_text='The Predicted Median value of owner-occupied homes in $1000\'s is {}'.format(prediction[0]))
if __name__=="__main__":
app.run(debug=True) | true |
1308b90b509289869d6b37c983a76511764e3d8e | Python | anusha-sss/-TRAINING | /operators_assignment.py | UTF-8 | 413 | 4.34375 | 4 | [] | no_license | # character = 51
#
# if character < 3:
# print("name must be atleast three character")
# elif character > 50:
# print("name can be maximum of 50 character")
# else:
# print("name looks good")
name = "an"
if len(name) < 3 :
print("name miust be atleast three characters ")
elif len(name) > 50:
print("name can be maximum of 50 characters")
else:
print("name looks good")
| true |
15377b183d8af73f4c1ffce0634b62f33511f8e7 | Python | pankaj307/Data_Structures_and_Algorithms | /LinkedList/printNthNodeFromLast.py | UTF-8 | 575 | 3.65625 | 4 | [] | no_license | import my_LinkedList
def NthLast(l1,n):
if l1.head is None:
print('Linked List is empty.')
return
first = l1.head
for _ in range(n):
if first is None:
print('Linked List is smaller than given n.')
return
first = first.next
second = l1.head
while first:
second = second.next
first = first.next
print(second.data)
l1 = my_LinkedList.LinkedList()
l1.insert(10)
l1.insert(20)
l1.insert(30)
l1.insert(40)
l1.insert(50)
l1.insert(60)
n = 3
NthLast(l1,n) | true |
9fef6a44f9cedc3366c2daa5493efb3101abf903 | Python | obabawale/my_python_codes | /guessgame.py | UTF-8 | 300 | 4.09375 | 4 | [] | no_license | import random
comGuess = random.randint(0,100)
while True:
userGuess = int(input("Enter a guess between 0 and 100: "))
if userGuess > comGuess:
print ("Guess lower")
elif userGuess < comGuess:
print ("Guess higher")
else:
print ("Congratulations you've guessed right")
break | true |
9b60f8bb59161b8cab573b1f661df435de110948 | Python | 1094432175/ApiAutoTest | /day01/05.代理.py | UTF-8 | 772 | 2.53125 | 3 | [] | no_license | '''
设置代理
1.如果想抓包分析自动化发出去的报文,可以通过设置代理抓包
2.用一台电脑频繁访问某个网站,被网站认为是供给,将IP地址禁止,设置代理,换一个ip地址去访问
'''
import requests
proxy = {
"http":"http://127.0.0.1:8888", #http 协议,使用xxx代理
"https":"http://127.0.0.1:8888" #https 协议,使用xxx代理
}
proxy ={
"http":None,
"https":None
}
url = "http://192.168.150.54:8089/futureloan/mvc/api/member/list"
r = requests.get(url,proxies=proxy) #给需要抓包的接口设置代理
print(r.json())
url = "http://192.168.150.54:8089/futureloan/mvc/api/member/login?mobilephone=13821111111&pwd=123456"
r = requests.get(url)
print(r.json())
| true |
b17fd8c93f64f699cd784f6253dcb34d3750944d | Python | cjredmond/number_guesser | /very_hard_guesser.py | UTF-8 | 703 | 3.984375 | 4 | [] | no_license | import random
answer = int(input("Pick a number between 1 and 100: "))
while answer >= 101:
print("You need to chose a number between 1 and 100")
answer = int(input("Pick a number between 1 and 100: "))
print("You chose a valid number")
count = -1
min_guess = 1
max_guess = 100
while count < 7:
comp_guess = int((min_guess + max_guess) / 2)
print(comp_guess)
if comp_guess == answer:
print("Computer Wins!")
break
elif comp_guess < answer:
print("Too Low")
min_guess = (comp_guess + 1)
count = count + 1
else:
print(comp_guess)
print("Too High")
max_guess = (comp_guess -1)
count = count + 1
| true |
0a0751ab3abee0a95a08d633b0590237a438feb5 | Python | workprinond/DS_-_Algo_TechInterview_Practise | /Beginning/2ns.py | UTF-8 | 334 | 3.109375 | 3 | [] | no_license | def twons(array,targetsum):
nums ={}
for num in array:
potentialmatch = targetsum - num
if potentialmatch in nums:
return [potentialmatch,nums]
else:
nums[num]= True
return []
def main():
array = [8,2,-16,23,4]
twons(array,10)
if __name__== "__main__":
main()
| true |
c5b636dcc59f5122fd2022ec8967e43964f4025c | Python | leodegeus7/DeepLearning | /Volume 1 - Supervised Deep Learning/Part 1 - Artificial Neural Networks (ANN)/Section 4 - Building an ANN/ann.py | UTF-8 | 2,251 | 3.125 | 3 | [] | no_license | # Artificial Neural Network
# Installing Theano
# pip install --upgrade --no-deps git+git://github.com/Theano/Theano.git
# Installing Tensorflow
# pip install tensorflow
# Installing Keras
# pip install --upgrade keras
# Part 1 - Data Preprocessing
# Classification template
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Churn_Modelling.csv')
X = dataset.iloc[:, 3:13].values
y = dataset.iloc[:, 13].values
# Encoding categorical data
# Encoding the Independent Variable
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X_1 = LabelEncoder()
labelencoder_X_2 = LabelEncoder()
X[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])
X[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])
onehotencoder = OneHotEncoder(categorical_features = [1])
X = onehotencoder.fit_transform(X).toarray()
X = X[:,1:]
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Part 2 - Making de ANN
import keras
from keras.models import Sequential
from keras.layers import Dense
classifier = Sequential()
classifier.add(Dense(units = 6, kernel_initializer='uniform',activation='relu',input_dim=11))
classifier.add(Dense(units = 6, kernel_initializer='uniform',activation='relu'))
classifier.add(Dense(units = 1, kernel_initializer='uniform',activation='sigmoid'))
classifier.compile(optimizer = 'adam',loss = 'binary_crossentropy',metrics=['accuracy'])
classifier.fit(X_train,y_train,batch_size=10,epochs=100)
# Fitting classifier to the Training set
# Create your classifier here
# Predicting the Test set results
y_pred = classifier.predict(X_test)
y_pred = (y_pred > 0.5)
newPrediction = classifier.predict(sc.fit_transform(np.array([[0,0,600,1,40,3,60000,2,1,1,50000]])))
newPrediction = (newPrediction > 0.5)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred) | true |
095ef8e2aac199b02463c4f2c9bc873e560b7c74 | Python | haverford-cs/meta-net | /multi_scale_conv.py | UTF-8 | 2,036 | 3.0625 | 3 | [] | no_license | """
Convolutional neural network architecture which uses multi-scale features.
Authors: Gareth Nicholas + Emile Givental
Date: December 9th, 2019
"""
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Dense, Flatten, Conv2D, Activation, \
BatchNormalization, MaxPooling2D, Dropout, concatenate
from tensorflow.keras import Model, regularizers, Input
class multi_scale_conv(Model):
def __init__(self):
# Functional API because sequential design is not possible for
# non-sequential model.
super(multi_scale_conv, self).__init__()
self.model_name = "multi_scale_conv"
img_shape = (32, 32, 3)
in_layer = Input(shape = img_shape)
conv1 = Conv2D(32, (5, 5), kernel_regularizer=regularizers.l2(1e-4),
padding = "same")(in_layer)
act1 = Activation("relu")(conv1)
batch1 = BatchNormalization()(act1)
pool1 = MaxPooling2D((2,2))(batch1)
conv2 = Conv2D(64, (4, 4), kernel_regularizer=regularizers.l2(1e-4),
padding = "same")(pool1)
act2 = Activation("relu")(conv2)
batch2 = BatchNormalization()(act2)
pool2 = MaxPooling2D((2,2))(batch2)
conv3 = Conv2D(128, (4, 4), kernel_regularizer=regularizers.l2(1e-4),
padding = "same")(pool2)
act3 = Activation("relu")(conv3)
batch3 = BatchNormalization()(act3)
pool3 = MaxPooling2D((2,2))(batch3)
# Scale down features from earlier layers
scale_pool1 = MaxPooling2D((4,4))(pool1)
scale_pool2 = MaxPooling2D((2,2))(pool2)
flatten1 = Flatten()(scale_pool1)
flatten2 = Flatten()(scale_pool2)
flatten3 = Flatten()(pool3)
combined = concatenate([flatten1, flatten2, flatten3])
dense1 = Dense(1024, activation = tf.nn.relu)(combined)
drop1 = Dropout(0.3)(dense1)
dense2 = Dense(43, activation = tf.nn.softmax)(drop1)
self.model = Model(inputs=[in_layer], outputs=[dense2])
def call(self, x):
return self.model(x)
| true |
cf491841ea48b737689d2e054bd6f1a9607d189b | Python | maiconfriedel/ExerciciosPython | /Exercícios/Estrutura Sequencial/2.py | UTF-8 | 259 | 4.03125 | 4 | [] | no_license | #https://wiki.python.org.br/EstruturaSequencial
from number import Number
num = Number()
numero = input("Digite um número")
if num.isnumber(numero):
print("O número informado foi: " + numero)
else:
print("Não foi informado um número válido")
| true |
ac397ee3fbd7df5022985a468516c39c43f33f07 | Python | jakehoare/leetcode | /python_1_to_1000/962_Maximum_Width_Ramp.py | UTF-8 | 1,408 | 3.65625 | 4 | [] | no_license | _author_ = 'jake'
_project_ = 'leetcode'
# https://leetcode.com/problems/maximum-width-ramp/
# Given an array A of integers, a ramp is a tuple (i, j) for which i < j and A[i] <= A[j].
# The width of such a ramp is j - i.
# Find the maximum width of a ramp in A. If one doesn't exist, return 0.
# If a later element of A is greater than or equal to an earlier element then the earlier element makes a wider ramp.
# Hence we find the indices of strictly decreasing elements of A, which are candidates for the left edges of ramps.
# Then iterate over A again from the last index to the first, considering each element as the right edge of a ramp.
# When an element is greater than the top of stack, update max_ramp and pop off top of stack since lower index
# elements of A cannot make wider ramps.
# Time - O(n)
# Space - O(n)
class Solution(object):
def maxWidthRamp(self, A):
"""
:type A: List[int]
:rtype: int
"""
max_ramp = 0
stack = [] # stack of indices in decreasing value order, left edges of ramps
for i, num in enumerate(A):
if not stack or num < A[stack[-1]]:
stack.append(i)
for i in range(len(A) - 1, -1, -1): # iterate backwards
while stack and A[i] >= A[stack[-1]]:
max_ramp = max(max_ramp, i - stack.pop())
return max_ramp | true |
a08f731fa6950c7fdc369afe7bf28b874a264a9e | Python | sujitha-puthana/CS5560__LabSubmission | /Lab1A/source/PythonProject/ProjectSrc.py | UTF-8 | 98 | 3.03125 | 3 | [] | no_license | import math
{
print("Ceil - ",math.ceil(3.2344)),
print("floor - ",math.floor(3.2344)),
} | true |
1221b6824d8085e03a71632defcc3bd544ffc7ed | Python | IntelligentQuadruped/Implementation | /main/vision/file_support.py | UTF-8 | 2,325 | 3.40625 | 3 | [
"MIT"
] | permissive | """
Author: Jan Bernhard
Last updated: 02/04/18
Purpose: Managing file directories.
"""
import os, shutil, json
from time import time
def ensureDir(file_path):
'''
Creates folder for images if necessary.
Args:
file_path: Intended destination of images
Output:
Directory path
'''
if not os.path.exists(file_path):
os.makedirs(file_path)
print('created: {}'.format(file_path))
return file_path
def getRelativePath(src_path,dst_path):
'''
Returns the relative path between from two absolute paths.
Args:
src_path: Starting directory for relative path
dst_path: Destination directory for relative path
Output:
The relative path between the specified directories.
'''
return os.path.relpath(src_path,dst_path)
def moveFile(src_path, dst_path,file_name):
'''
Intendet to MOVE images into the corresponding folders
Args:
src_path: directory of file origin
dst_path: directory of file destination
file_name: name of the file that is being moved
Output:
None
'''
src = str(os.path.join(src_path,file_name))
dst = str(os.path.join(dst_path,file_name))
shutil.move(src,dst)
pass
def copyFile(src_path, dst_path,file_name):
'''
Intendet to COPY images into the corresponding folders
Args:
src_path: directory of file origin
dst_path: directory of file destination
file_name: name of the file that is being moved
Output:
None
'''
src = str(os.path.join(src_path,file_name))
dst = str(os.path.join(dst_path,file_name))
shutil.copyfile(src,dst)
pass
def remove(path):
'''
Removes folder, or file at the specified path.
Args:
path: directory of the folder or file that will be deleted
'''
if os.path.isfile(path):
os.remove(path)
elif os.path.isdir(path):
shutil.rmtree(path)
else:
raise ValueError("file {} is not a file or dir.".format(path))
pass
def saveToJson(obj, name = 'output', file_path = './'):
'''
Saves input object to .json in the output folder.
Args:
obj: python object to be save to .json.
name: name of saved .json-file.
file_path: directory in which .json file will be saved.
'''
if name == 'output':
name = name + '_' + str(int(time()))
if not name.endswith('.json'):
name = name + '.json'
with open(name,'w') as file:
json.dump(obj,file, indent=4)
pass
| true |
dc9495ec2f69d18e85720477f9b2cc5b9c7b87ab | Python | mittmannv8/desafio-programacao-1 | /challenge/apps/sales/views.py | UTF-8 | 2,507 | 2.578125 | 3 | [] | no_license | import operator
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.views import View
from functools import reduce
from challenge.apps.sales.models import Document
from challenge.apps.sales.models import Sale
class IndexSales(View):
def get(self, request):
"""
Return and template containing the last document sales (if exist)
and sum of all sales.
"""
sales = Sale.objects.all()
last_document = Document.objects.last() or None
if not last_document:
last_gross_sales = 0
gross_sales = 0
else:
last_sales = sales.filter(document=last_document)
last_gross_sales = reduce(
operator.add,
[s.total_price for s in last_sales]
)
gross_sales = reduce(operator.add, [s.total_price for s in sales])
return render(request, 'sales/index.html', {
'gross_sales': gross_sales,
'last_gross_sales': last_gross_sales,
'last_document': last_document,
})
class NewSalesFile(View):
def post(self, request, *args, **kwargs):
"""
Receive a file, parse and save the data on DB.
"""
try:
file = request.FILES['sales_file']
document = Document.objects.create()
for index, line in enumerate(file.readlines()):
line = line.decode('utf-8')
values = line.split('\t')
if index > 0:
Sale.objects.create(
purchaser_name=values[0],
item_description=values[1],
item_price=float(values[2]),
purchase_count=int(values[3]),
merchant_address=values[4],
merchant_name=values[5],
document=document
)
document.parse_complete = True
document.save()
messages.add_message(
request,
messages.SUCCESS,
'Documento inserido com sucesso'
)
except:
document.delete()
messages.add_message(
request,
messages.ERROR,
'Houve um erro ao inserir o documento. Tente novamente mais tarde.'
)
return HttpResponseRedirect('/')
| true |
9f417ad0a2ee44b0bfdc4b7ba4f53b9e108e827d | Python | huggins9000211/holbertonschool-higher_level_programming | /0x07-python-test_driven_development/tests/6-max_integer_test.py | UTF-8 | 681 | 3.46875 | 3 | [] | no_license | #!/usr/bin/python3
"""Unittest for max_integer([..])
"""
import unittest
max_integer = __import__('6-max_integer').max_integer
class TestMaxInteger(unittest.TestCase):
def test_func(self):
self.assertEqual(max_integer([]), None)
self.assertEqual(max_integer([4, 5, 6]), 6)
self.assertEqual(max_integer([-5, 5, 6]), 6)
self.assertEqual(max_integer([0]), 0)
self.assertEqual(max_integer([5.5]), 5.5)
self.assertEqual(max_integer("test"), 't')
self.assertEqual(max_integer(["test"]), 'test')
with self.assertRaises(TypeError):
max_integer(5)
print(max_integer(["test", 0])) | true |
04444b718f311c9310920240fff023e379948796 | Python | KnightApu/Leetcode-30days-challenge | /week-1/groupAnagram.py | UTF-8 | 542 | 3.796875 | 4 | [] | no_license | from typing import List
from collections import defaultdict
class Solution:
def groupAnagrams(self, strs: List[str]) -> List[List[str]]:
print("The original list : " + str(strs))
temp = defaultdict(list)
print(temp)
for ele in strs:
temp[str(sorted(ele))].append(ele)
res = list(temp.values())
print("The grouped Anagrams : " + str(res))
print(temp)
return res
sol = Solution()
arr = ['lump', 'eat', 'me', 'tea', 'em', 'plum']
print(sol.groupAnagrams(arr))
| true |
a8a6a2e5c699204c1531ae8c01dc03844c4db3d4 | Python | INYEONGKIM/BOJ | /BOJ17216.py | UTF-8 | 212 | 2.703125 | 3 | [
"MIT"
] | permissive | n=int(input());a=list(map(int,input().split()));d=[0]*n;r=1
for i in range(n):
d[i]=a[i]
for j in range(n):
if a[i]<a[j] and d[i]<=d[j]+a[i]:
d[i]=d[j]+a[i]
r=max(r,d[i])
print(r)
| true |
c3f3c0cb8f1a04a2ccfb565825f9ceb4b6ae9b98 | Python | taogeanton2/autogbt-alt | /example/boston.py | UTF-8 | 712 | 2.734375 | 3 | [
"MIT"
] | permissive | import argparse
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from autogbt import AutoGBTRegressor
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--n-trials', type=int)
args = parser.parse_args()
X, y = load_boston(return_X_y=True)
train_X, valid_X, train_y, valid_y = train_test_split(X, y, test_size=0.1)
model = AutoGBTRegressor(n_trials=args.n_trials)
model.fit(train_X, train_y)
print('valid MSE: %.3f' % (
mean_squared_error(valid_y, model.predict(valid_X))))
print('CV MSE: %.3f' % (model.best_score))
if __name__ == '__main__':
main()
| true |
2d5358409e795a6bf98caf36248f63406c9a69b1 | Python | shinenazeer/automating_excel_with_python | /09_pandas/csv_to_excel_pandas.py | UTF-8 | 285 | 3.0625 | 3 | [
"MIT"
] | permissive | # csv_to_excel_pandas.py
import pandas as pd
def csv_to_excel(csv_file, excel_file, sheet_name):
df = pd.read_csv(csv_file)
df.to_excel(excel_file, sheet_name=sheet_name)
if __name__ == "__main__":
csv_to_excel("books.csv", "pandas_csv_to_excel.xlsx", "Books") | true |
d8b88c7aa432a32122979b67c4104326eeedd6ad | Python | GarnetSquadron4901/2016-Sandstorm-III-PythonPort | /Devices/RevRobotics/AnalogPressureSensor.py | UTF-8 | 788 | 2.78125 | 3 | [] | no_license | import wpilib
class AnalogPressureSensor(wpilib.AnalogInput):
DEFAULT_GAIN = 150.0
DEFAULT_OFFSET = -25.0
def __init__(self, channel):
super().__init__(channel=channel)
self.controller = wpilib.ControllerPower()
self.gain = self.DEFAULT_GAIN
self.offset = self.DEFAULT_OFFSET
def get_instantaneous_pressure_psi(self):
return self.gain * (super().getVoltage() / self.controller.getVoltage5V()) + self.offset
def get_pressure_psi(self):
raise NotImplementedError
def get_gain(self):
return self.gain
def get_offset(self):
return self.offset
def set_gain(self, gain):
self.gain = gain
def set_offset(self, offset):
self.offset = offset
| true |
4926ed7249f834444fedb33d392ba59e34a3f2c5 | Python | Gas-Helio/Projeto---Engenheiro-de-dados | /modules/DatabaseCon.py | UTF-8 | 3,990 | 2.546875 | 3 | [] | no_license | import pyodbc
class Database:
def __init__(self, server, database, uid, pwd):
try:
str_connec = r'DRIVER={ODBC Driver 17 for SQL Server};' +\
'SERVER={};'.format(server) +\
'DATABASE={};PWD={};'.format(database, pwd)
if uid:
str_connec = str_connec + 'UID={};Trusted_Connection=no;'.format(uid)
else:
str_connec = str_connec + 'Trusted_Connection=yes;'
self.connection = pyodbc.connect(str_connec)
print('Conectado ao SQL Server')
self.success = True
if self.connection is not None:
self.connection.autocommit = True
self.cur = self.connection.cursor()
self.cur.execute("SELECT table_name FROM information_schema.tables;")
tab = self.cur.fetchall()
tab = [t[0] for t in tab]
if not ('atracacao_fato' in tab):
self.cur.execute(create_atracacao_fato)
if not ('carga_fato' in tab):
self.cur.execute(create_carga_fato)
except :
print('Conexão com SQL Server falhou')
self.success = False
def insert_values(self, table, data_df, batch_size=500):
if self.success:
columns = data_df.columns.values
str_insert = "INSERT INTO {} ({}) values({})".\
format(table, ', '.join(['['+c+']' for c in columns]), ('?,'*len(columns))[:-1])
for i in range(0, data_df.shape[0], batch_size):
print(f'[{i}/{data_df.shape[0]}]')
self.cur.executemany(str_insert, list(map(tuple, data_df.iloc[i:i + batch_size][columns].values)))
print('Concluído')
else:
print('Sem Conexão com SQL Server')
create_atracacao_fato = '''
CREATE TABLE atracacao_fato (
IDAtracacao int NOT NULL PRIMARY KEY,
CDTUP VARCHAR(255),
IDBerco VARCHAR(255),
Berço VARCHAR(255),
[Porto Atracação] VARCHAR(255),
[Apelido Instalação Portuária] VARCHAR(255),
[Complexo Portuário] VARCHAR(255),
[Tipo da Autoridade Portuária] VARCHAR(255),
[Data Atracação] DATETIME,
[Data Chegada] DATETIME,
[Data Desatracação] DATETIME,
[Data Início Operação] DATETIME,
[Data Término Operação] DATETIME,
[Ano da data de início da operação] SMALLINT,
[Mês da data de início da operação] TINYINT,
[Tipo de Operação] VARCHAR(255),
[Tipo de Navegação da Atracação] VARCHAR(255),
[Nacionalidade do Armador] VARCHAR(255),
[FlagMCOperacaoAtracacao] VARCHAR(255),
Terminal VARCHAR(255),
Município VARCHAR(255),
UF VARCHAR(255),
SGUF VARCHAR(255),
[Região Geográfica] VARCHAR(255),
[Nº da Capitania] VARCHAR(255),
[Nº do IMO] VARCHAR(255),
TEsperaAtracacao FLOAT(20),
TEsperaInicioOp FLOAT(20),
TOperacao FLOAT(20),
TEsperaDesatracacao FLOAT(20),
TAtracado FLOAT(20),
TEstadia FLOAT(20)
);
'''
create_carga_fato = '''
CREATE TABLE carga_fato (
IDCarga INT NOT NULL,
IDAtracacao INT FOREIGN KEY REFERENCES atracacao_fato(IDAtracacao),
Origem VARCHAR(7),
Destino VARCHAR(7),
CDMercadoria VARCHAR(4),
[Tipo Operação da Carga] VARCHAR(255),
[Carga Geral Acondicionamento] VARCHAR(20),
ConteinerEstado VARCHAR(5),
[Tipo Navegação] VARCHAR(20),
FlagAutorizacao VARCHAR(1),
FlagCabotagem TINYINT,
FlagCabotagemMovimentacao TINYINT,
FlagConteinerTamanho VARCHAR(10),
FlagLongoCurso TINYINT,
FlagMCOperacaoCarga TINYINT,
FlagOffshore TINYINT,
FlagTransporteViaInterioir TINYINT,
[Percurso Transporte em vias Interiores] VARCHAR(40),
[Percurso Transporte Interiores] VARCHAR(40),
STNaturezaCarga VARCHAR(20),
STSH2 VARCHAR(20),
STSH4 VARCHAR(20),
[Natureza da Carga] VARCHAR(30),
Sentido VARCHAR(20),
TEU FLOAT(20),
QTCarga INT,
VLPesoCargaBruta FLOAT(20),
[Ano da data de início da operação da atracação] SMALLINT,
[Mês da data de início da operação da atracação] TINYINT,
[Porto Atracação] VARCHAR(255),
SGUF VARCHAR(2),
[Peso líquido da carga] FLOAT(20)
);
''' | true |
06acbf6408d1d548aeb559147eeaaee4b9ecfeb8 | Python | NicholasLYang/WhoDat | /search.py | UTF-8 | 987 | 2.71875 | 3 | [] | no_license | from urllib2 import urlopen, Request
from urllib import urlencode
import json
from bs4 import BeautifulSoup
import regex
import re
def urls(userquery):
'''Returns the top four URLs for any Google query.
Takes a string as a search query.
Uses the Google API to find results for the search query.
Adapted from a StackOverflow answer posted by Alex Martelli.
'''
query = urlencode({'q': userquery})
#print query
url = 'http://ajax.googleapis.com/ajax/services/search/web?v=1.0&%s' % query
search_response = urlopen(url)
search_results = search_response.read()
results = json.loads(search_results)
data = results['responseData']
#print 'Total results: %s' % data['cursor']['estimatedResultCount']
if data: #if the Google API works fine and returns at least one result
hits = data['results']
return hits
else: #if the Google API did not return any results (most likely due to too many requests)
return False
| true |
aee2cd8278ecbfc118e8340f52b9ee3c95c865bb | Python | wooloba/LeetCode861Challenge | /326. Power of Three.py | UTF-8 | 473 | 3.234375 | 3 | [] | no_license | ####################
# Yaozhi Lu #
# Aug 24 2018 #
####################
#Origin: https://leetcode.com/problems/power-of-three/description/
import math
class Solution(object):
def isPowerOfThree(self, n):
"""
:type n: int
:rtype: bool
"""
if n <= 0:
return False
return 1162261467%n == 0
def main():
so = Solution()
print so.isPowerOfThree(27)
if __name__ == '__main__':
main() | true |
ff6cbe95f4788f949840d43a1f66d04e22315b97 | Python | andrewyoung1991/supriya | /supriya/tools/ugentools/LatoocarfianC.py | UTF-8 | 6,152 | 2.9375 | 3 | [
"MIT"
] | permissive | # -*- encoding: utf-8 -*-
from supriya.tools.ugentools.UGen import UGen
class LatoocarfianC(UGen):
r'''A cubic-interpolating Latoocarfian chaotic generator.
::
>>> latoocarfian_c = ugentools.LatoocarfianC.ar(
... a=1,
... b=3,
... c=0.5,
... d=0.5,
... frequency=22050,
... xi=0.5,
... yi=0.5,
... )
>>> latoocarfian_c
LatoocarfianC.ar()
'''
### CLASS VARIABLES ###
__documentation_section__ = 'Chaos UGens'
__slots__ = ()
_ordered_input_names = (
'frequency',
'a',
'b',
'c',
'd',
'xi',
'yi',
)
_valid_calculation_rates = None
### INITIALIZER ###
def __init__(
self,
calculation_rate=None,
a=1,
b=3,
c=0.5,
d=0.5,
frequency=22050,
xi=0.5,
yi=0.5,
):
UGen.__init__(
self,
calculation_rate=calculation_rate,
a=a,
b=b,
c=c,
d=d,
frequency=frequency,
xi=xi,
yi=yi,
)
### PUBLIC METHODS ###
@classmethod
def ar(
cls,
a=1,
b=3,
c=0.5,
d=0.5,
frequency=22050,
xi=0.5,
yi=0.5,
):
r'''Constructs an audio-rate LatoocarfianC.
::
>>> latoocarfian_c = ugentools.LatoocarfianC.ar(
... a=1,
... b=3,
... c=0.5,
... d=0.5,
... frequency=22050,
... xi=0.5,
... yi=0.5,
... )
>>> latoocarfian_c
LatoocarfianC.ar()
Returns ugen graph.
'''
from supriya.tools import synthdeftools
calculation_rate = synthdeftools.CalculationRate.AUDIO
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
a=a,
b=b,
c=c,
d=d,
frequency=frequency,
xi=xi,
yi=yi,
)
return ugen
# def equation(): ...
### PUBLIC PROPERTIES ###
@property
def a(self):
r'''Gets `a` input of LatoocarfianC.
::
>>> latoocarfian_c = ugentools.LatoocarfianC.ar(
... a=1,
... b=3,
... c=0.5,
... d=0.5,
... frequency=22050,
... xi=0.5,
... yi=0.5,
... )
>>> latoocarfian_c.a
1.0
Returns ugen input.
'''
index = self._ordered_input_names.index('a')
return self._inputs[index]
@property
def b(self):
r'''Gets `b` input of LatoocarfianC.
::
>>> latoocarfian_c = ugentools.LatoocarfianC.ar(
... a=1,
... b=3,
... c=0.5,
... d=0.5,
... frequency=22050,
... xi=0.5,
... yi=0.5,
... )
>>> latoocarfian_c.b
3.0
Returns ugen input.
'''
index = self._ordered_input_names.index('b')
return self._inputs[index]
@property
def c(self):
r'''Gets `c` input of LatoocarfianC.
::
>>> latoocarfian_c = ugentools.LatoocarfianC.ar(
... a=1,
... b=3,
... c=0.5,
... d=0.5,
... frequency=22050,
... xi=0.5,
... yi=0.5,
... )
>>> latoocarfian_c.c
0.5
Returns ugen input.
'''
index = self._ordered_input_names.index('c')
return self._inputs[index]
@property
def d(self):
r'''Gets `d` input of LatoocarfianC.
::
>>> latoocarfian_c = ugentools.LatoocarfianC.ar(
... a=1,
... b=3,
... c=0.5,
... d=0.5,
... frequency=22050,
... xi=0.5,
... yi=0.5,
... )
>>> latoocarfian_c.d
0.5
Returns ugen input.
'''
index = self._ordered_input_names.index('d')
return self._inputs[index]
@property
def frequency(self):
r'''Gets `frequency` input of LatoocarfianC.
::
>>> latoocarfian_c = ugentools.LatoocarfianC.ar(
... a=1,
... b=3,
... c=0.5,
... d=0.5,
... frequency=22050,
... xi=0.5,
... yi=0.5,
... )
>>> latoocarfian_c.frequency
22050.0
Returns ugen input.
'''
index = self._ordered_input_names.index('frequency')
return self._inputs[index]
@property
def xi(self):
r'''Gets `xi` input of LatoocarfianC.
::
>>> latoocarfian_c = ugentools.LatoocarfianC.ar(
... a=1,
... b=3,
... c=0.5,
... d=0.5,
... frequency=22050,
... xi=0.5,
... yi=0.5,
... )
>>> latoocarfian_c.xi
0.5
Returns ugen input.
'''
index = self._ordered_input_names.index('xi')
return self._inputs[index]
@property
def yi(self):
r'''Gets `yi` input of LatoocarfianC.
::
>>> latoocarfian_c = ugentools.LatoocarfianC.ar(
... a=1,
... b=3,
... c=0.5,
... d=0.5,
... frequency=22050,
... xi=0.5,
... yi=0.5,
... )
>>> latoocarfian_c.yi
0.5
Returns ugen input.
'''
index = self._ordered_input_names.index('yi')
return self._inputs[index] | true |
f80604ca88c18095a27de6051623594ae1c49bc3 | Python | standbyside/crossin-weekly-practice | /solutions/双色球选号器.py | UTF-8 | 350 | 2.90625 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
【基础题】:写一个小程序,随机挑选一组或几组双色球彩票的号码
【附加题】:
1. 模拟开奖结果,用你自己手选的号码,去计算中奖的概率
2. 加入购买费用(2元一注)和奖金返还,算算看玩一百年彩票能赚多少钱
""" | true |
394b05ca31be4cca90601b4202aec8d90fa2e965 | Python | Luis-VD/CS5242-Assignment1 | /code/Question_1.py | UTF-8 | 6,198 | 2.859375 | 3 | [] | no_license | import csv
from itertools import islice
import numpy as np
#Constants, configure here for tuning of input
network_input = [1, 2, 3, 4, 5]
def read_data(file_name):
data_set = []
with open(file_name, newline='') as csvfile:
data_file = csv.reader(csvfile)
for row in data_file:
data_set.append(list(float(x) for x in islice(list(row), 1, None)))
return np.array(data_set)
def first_network_iterate(weights, biases):
layer_one = []
layer_two = []
layer_three = []
for row in range(0, 5):
z_number = 0
for column in range(0, 5):
z_number += network_input[column]*weights[row][column]
layer_one.append(z_number+biases[0][row])
for row in range(5, 10):
z_number = 0
for column in range(0, 5):
z_number += layer_one[column]*weights[row][column]
layer_two.append(z_number+biases[1][row-5])
for row in range(10, 15):
z_number = 0
for column in range(0, 5):
z_number += layer_two[column]*weights[row][column]
layer_three.append(z_number+biases[2][row-10])
#print(layer_three)
return layer_three
def init_weights(weights):
comprised_weights = []
for row in range(0, 5):
weight_row = []
for column in range(0, 5):
weight_row.append(weights[row][column]*weights[row+5][column]*weights[row+5][column])
comprised_weights.append(weight_row)
#print(comprised_weights)
return comprised_weights
def init_biases (biases):
total_bias = []
for column in range(0, 5):
total_bias.append((biases[0][column]+biases[1][column]+biases[2][column])/3)
#print(total_bias)
return total_bias
def get_new_network_output(weights, biases):
new_output = []
for row in range(0, 5):
z_number = 0
for column in range(0, 5):
z_number += network_input[column]*weights[row][column]
new_output.append(z_number+biases[row])
return new_output
def get_cost (new, initial):
cost = np.sum(np.power(np.subtract(new, initial), 2))
return cost
def refine_weights_biases (weights, biases, initial_output, name):
refined_weights = weights
refined_biases = biases
new_network_output = get_new_network_output(weights, biases)
original_cost = get_cost(new_network_output, initial_output)
print(original_cost)
for row in range(0, 5):
for column in range (0,5):
refined_weights[row][column] += 0.001
while True:
new_network_output = get_new_network_output(refined_weights, refined_biases)
new_cost = get_cost(new_network_output, initial_output)
if new_cost <= original_cost:
original_cost = new_cost
refined_weights[row][column] += 0.001
#print(new_cost)
else:
refined_weights[row][column] -= 0.002
#print('subtracting to weight')
break
while True:
new_network_output = get_new_network_output(refined_weights, refined_biases)
new_cost = get_cost(new_network_output, initial_output)
if new_cost <= original_cost:
original_cost = new_cost
refined_weights[row][column] -= 0.001
else:
break
np.savetxt("../"+name+"-w.csv", refined_weights, delimiter=",")
for row in range(0, 5):
refined_biases[row] += 0.001
while True:
new_network_output = get_new_network_output(refined_weights, refined_biases)
new_cost = get_cost(new_network_output, initial_output)
if new_cost <= original_cost:
original_cost = new_cost
refined_biases[row] += 0.001
else:
refined_biases[row] -= 0.002
break
while True:
new_network_output = get_new_network_output(refined_weights, refined_biases)
new_cost = get_cost(new_network_output, initial_output)
if new_cost <= original_cost:
original_cost = new_cost
refined_biases[row] -= 0.001
else:
break
print(new_cost)
np.savetxt("../"+name+"-b.csv", refined_biases, delimiter=",")
print(refined_weights, refined_biases)
if __name__ == '__main__':
a_weights = read_data('../Question_1/a/a_w.csv')
b_weights = read_data('../Question_1/b/b_w.csv')
c_weights = read_data('../Question_1/c/c_w.csv')
d_weights = read_data('../Question_1/d/d_w.csv')
e_weights = read_data('../Question_1/e/e_w.csv')
a_bias = read_data('../Question_1/a/a_b.csv')
b_bias = read_data('../Question_1/b/b_b.csv')
c_bias = read_data('../Question_1/c/c_b.csv')
d_bias = read_data('../Question_1/d/d_b.csv')
e_bias = read_data('../Question_1/e/e_b.csv')
first_network_output = first_network_iterate(a_weights, a_bias)
initial_weights = init_weights(a_weights)
initial_biases = init_biases(a_bias)
refine_weights_biases(initial_weights, initial_biases, first_network_output, 'a')
first_network_output = first_network_iterate(b_weights, b_bias)
initial_weights = init_weights(b_weights)
initial_biases = init_biases(b_bias)
refine_weights_biases(initial_weights, initial_biases, first_network_output, 'b')
first_network_output = first_network_iterate(c_weights, c_bias)
initial_weights = init_weights(c_weights)
initial_biases = init_biases(c_bias)
refine_weights_biases(initial_weights, initial_biases, first_network_output, 'c')
first_network_output = first_network_iterate(d_weights, d_bias)
initial_weights = init_weights(d_weights)
initial_biases = init_biases(d_bias)
refine_weights_biases(initial_weights, initial_biases, first_network_output, 'd')
first_network_output = first_network_iterate(e_weights, e_bias)
initial_weights = init_weights(e_weights)
initial_biases = init_biases(e_bias)
refine_weights_biases(initial_weights, initial_biases, first_network_output, 'e')
| true |
30c03d4e8002dd2429f3d95be82bb7bcb1e94de6 | Python | ektamadhani/pythonautomation | /PythonBasics/del from cons.py | UTF-8 | 313 | 3.0625 | 3 | [] | no_license | class Emp:
def __init__(self,eid,ename):
self.eid=eid
self.ename=ename
#del self.ename
print(self.eid)
print(self.ename)
def dispInfo(self):
self.sal=100
#del self.sal
print(self.sal)
e1=Emp(1,'A')
e1.dispInfo()
del e1.ename
print(e1.ename)
| true |
cada6923215c6605641ed11342841a20e3aaf981 | Python | visrinivasan/Man-v-s-Bot | /scrabble.py | UTF-8 | 506 | 3.09375 | 3 | [] | no_license | import math
import enchant
import itertools
d = enchant.Dict("en_US")
v=""
w=""
def scrabble(v,m):
for i in reversed(range(2,len(v)+1)):
z=list(itertools.permutations(v,i))
z=set(z)
z=list(z)
for j in range(0,len(z)):
w=''.join(z[j])
if(d.check(w)):
if m in w:
return(w)
print "Input string was: blbearsc and word formed must contain e"
print " "
print "Word formed is: "+scrabble("blbearsc","e")
| true |
7f823e1446267688edd9506808931830edc89413 | Python | WarwickTabletop/tgrsite | /timetable/models.py | UTF-8 | 3,687 | 2.59375 | 3 | [
"ISC"
] | permissive | from datetime import date
from django.core import validators
from django.db import models
from django.shortcuts import reverse
class GoogleCalender(models.Model):
url = models.CharField(max_length=120,
help_text="Please ensure that it starts at the // (i.e. without the https: or webcal: part)")
name = models.CharField(max_length=30)
sort = models.IntegerField()
def __str__(self):
return self.name
# Create your models here.
class Week(models.Model):
startDate = models.CharField(max_length=10)
number = models.SmallIntegerField()
year = models.PositiveSmallIntegerField(default=date.today().year,
validators=[validators.MinValueValidator(
2000, message="Invalid year")],
help_text="Academic year (use greater year, i.e. 18/19 is 2019)")
def __str__(self):
return str(self.year) + " week " + str(self.number)
class Meta:
ordering = ['-year', 'number']
def get_absolute_url(self):
return reverse("timetable")
class Event(models.Model):
description = models.CharField(max_length=20)
date_time_line = models.CharField(max_length=20)
sort_key = models.SmallIntegerField()
def __str__(self):
return str(self.description) + " : " + str(self.date_time_line)
class Meta:
ordering = ['sort_key']
class Booking(models.Model):
event = models.ForeignKey(Event, on_delete=models.CASCADE)
week = models.ForeignKey(Week, on_delete=models.CASCADE)
room = models.CharField(max_length=100)
def __str__(self):
return str(self.week) + ": " + str(self.event)
class ColourScheme(models.Model):
name = models.CharField(
max_length=20, help_text="A description to help you identify it")
html_code = models.CharField(
max_length=7, help_text="Enter hexcode of colour to be used (include #)")
light_text = models.BooleanField(default=False,
help_text="Should the text used be a light colour (for dark colours)")
def __str__(self):
return str(self.name) + " (" + str(self.html_code) + ")"
class Timetable(models.Model):
title = models.CharField(max_length=30)
events = models.ManyToManyField(Event)
weeks = models.ManyToManyField(Week)
notes = models.TextField(blank=True)
active = models.BooleanField(default=False)
colour = models.ForeignKey(
ColourScheme, on_delete=models.SET_NULL, null=True)
def __str__(self):
return str(self.title)
def get_absolute_url(self):
return reverse("timetable:single_timetable", kwargs={"pk": self.pk})
class RoomLink(models.Model):
url = models.CharField(max_length=120,
help_text="Link provided by interactive map")
room = models.CharField(max_length=100, unique=True)
def __str__(self):
return self.room
class SpecialEvent(models.Model):
title = models.CharField(max_length=128)
url = models.URLField(blank=True, max_length=200)
room = models.CharField(blank=True, max_length=30)
week = models.SmallIntegerField()
display_date = models.CharField(
max_length=60, help_text="The description of date and time to display")
sort_date = models.DateField(
help_text="The date to sort by, usually start date")
hide_date = models.DateField(help_text="The date to hide this event after")
poster = models.ImageField(blank=True, upload_to='posters/%Y/%m/%d/')
def __str__(self):
return self.title + ": " + self.display_date
| true |
2e236695cf960e0b94b34bf559dc54eaa6e22d02 | Python | DukeLearningInnovation/coursera-labs-vscode-grader | /autograde/tests/test_assign1.py | UTF-8 | 995 | 3.5 | 4 | [] | no_license | import assign1
import pytest
@pytest.mark.parametrize("input, expected",[
("World", "Hello World!"),
("Drew", "Hello Drew!"),
("🤖", "Hello 🤖!")
])
def test_greet(input, expected, mocker):
# Using a pytest-mock spy to make sure the helper function
# is used and has the expected output
spy = mocker.spy(assign1, "helper")
assert expected == assign1.greet(input), "greet() does not return the expected greeting"
assert spy.call_count == 1, "you didn't call your helper function within the greet function"
assert spy.spy_return == f"{input}!", ("check your helper function implementation and "
"be sure it passes the included test")
def test_greet_no_args():
try:
result = assign1.greet()
except TypeError:
result = ''
assert result != '', "greet() does not handle missing argument"
assert "Hello World!" == result, "greet() does not have the expected default parameter value"
| true |
2e076d06bafa899e25a4489b7cebcd90f854cd8d | Python | game99world/binance-triangle-arbitrage | /main.py | UTF-8 | 4,939 | 2.9375 | 3 | [
"MIT"
] | permissive | from collections import defaultdict
from operator import itemgetter
from time import time
import os
from binance.client import Client
import binance_api as api
API_PUBLIC = os.environ.get("PUBLIC_KEY")
API_SECRET = os.environ.get("SECRET_KEY")
FEE = 0.0005
PERCENTAGE = 5 # percentage of the primary coin budget to use for arbitrage.
STARTING_COIN = 'BTC'
with open('primary.txt') as f:
PRIMARY = [line.rstrip() for line in f]
client = Client(API_PUBLIC, API_SECRET)
def execute_triangular_arbitrage(coins, percentage, starting_coin):
free = client.get_asset_balance(starting_coin)['free']
budget = (float(free) / 100) * percentage
print("The " + str(percentage) + "% of your total " + str(free) + " " +
starting_coin + " is: " + f"{budget:.9f}" + " " + starting_coin +
".")
# TODO:
# - Buy coins[1] with coins[0] with a budget of budget.
# - Sell all coins[1] in (coins[1] + coins[2]) market.
# - Buy coins[3] in (coins[2] + coins[3]) market.
# not sure about how to determine to the price parameter.
# not sure which method to use, LIMIT or MARKET
# note that this is not an actual order but a test order.
# TODO:
# - you need to figure out a relationship between budget and price.
# - you need to make sure that coins[i] + coins[j] market exist(
# BNB->COMP) does not exist.
#
# buy_order_limit = client.create_test_order(symbol=coins[1] + coins[0],
# side='BUY',
# type='LIMIT',
# timeInForce='GTC',
# quantity=0.5,
# price=0.00001)
# note that this is an actual order
# buy_order_limit = client.order_limit_buy(symbol=coins[1] + coins[0],
# quantity=budget,
# price=200)
# amount = client.get_asset_balance(coins[1])['free']
# sell_order_limit = client.create_test_order(symbol=coins[0] + coins[1],
# side='SELL',
# type='LIMIT',
# timeInForce='GTC',
# quantity=api._format(amount),
# price=200)
def main():
start_time = time()
prices = get_prices()
prices_time = time()
print(f"Downloaded in: {prices_time - start_time:.4f}s")
triangles = list(find_triangles(prices))
print(f"Computed in: {time() - prices_time:.4f}s")
if triangles:
for triangle in sorted(triangles, key=itemgetter('profit'), reverse=True):
describe_triangle(prices, triangle)
else:
print("No triangles found, trying again!")
main()
def get_prices():
prices = client.get_orderbook_tickers()
prepared = defaultdict(dict)
for ticker in prices:
pair = ticker['symbol']
ask = float(ticker['askPrice'])
bid = float(ticker['bidPrice'])
if ask == 0.0:
continue
for primary in PRIMARY:
if pair.endswith(primary):
secondary = pair[:-len(primary)]
prepared[primary][secondary] = 1 / ask
prepared[secondary][primary] = bid
return prepared
def find_triangles(prices):
triangles = []
starting_coin = STARTING_COIN
for triangle in recurse_triangle(prices, starting_coin, starting_coin):
coins = set(triangle['coins'])
if not any(prev_triangle == coins for prev_triangle in triangles):
yield triangle
triangles.append(coins)
def recurse_triangle(prices, current_coin, starting_coin, depth_left=3, amount=1.0):
if depth_left > 0:
pairs = prices[current_coin]
for coin, price in pairs.items():
new_price = (amount * price) * (1.0 - FEE)
for triangle in recurse_triangle(prices, coin, starting_coin, depth_left - 1, new_price):
triangle['coins'] = triangle['coins'] + [current_coin]
yield triangle
elif current_coin == starting_coin and amount > 1.0:
yield {
'coins': [current_coin],
'profit': amount
}
def describe_triangle(prices, triangle):
coins = triangle['coins']
price_percentage = (triangle['profit'] - 1.0) * 100
execute_triangular_arbitrage(coins, PERCENTAGE, STARTING_COIN)
print(f"{'->'.join(coins):26} {round(price_percentage, 4):-7}% <- profit!")
for i in range(len(coins) - 1):
first = coins[i]
second = coins[i + 1]
print(f" {second:4} / {first:4}: {prices[first][second]:-17.8f}")
print('')
if __name__ == '__main__':
main()
| true |
eb0980a18f417b030df7bc7920a908eef15b1cc2 | Python | ddank0/Python-ex | /ex21.py | UTF-8 | 389 | 3.703125 | 4 | [] | no_license | vet = []
n = input('A = adicionar / R = remover / I = imprimir / F = sair:')
while n.upper() != 'F':
if n.upper() == 'A':
any = input("elemento:")
vet.append(any)
elif n.upper() == 'R':
vet.pop()
elif n.upper() == 'I':
print(vet)
else:
print('opção invalida')
n = input('A = adicionar / R = remover / I = imprimir / F = sair:') | true |
b9244aad766b95d08175ad7fd7a994809abe5645 | Python | dibdas/python | /pre3.py | UTF-8 | 58 | 3.0625 | 3 | [] | no_license | n=int(input())
for j in range(1,10+1):
print(n*j)
| true |
4dcdbcb232536b2e653e8dd0a7fd548fbaaa1904 | Python | pugzillo/kpop_song_analyses | /src/web_scrape_wikipedia_lists.py | UTF-8 | 1,275 | 3.25 | 3 | [] | no_license | import requests
from bs4 import BeautifulSoup
import re
'''
Get the end of the urls for kpop artists on the two wikipedia list pages with Beautiful Soup!!!
Sept 10
'''
# websites I want to scrap
website_urls = ['https://en.wikipedia.org/wiki/List_of_South_Korean_idol_groups_(2000s)', 'https://en.wikipedia.org/wiki/List_of_South_Korean_idol_groups_(2010s)']
for url in website_urls:
website_url = requests.get(url).text
soup = BeautifulSoup(website_url,'lxml')
url_list = []
# headers of the sections I want to scrape from
years = ['2000', '2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019']
for year in years:
for headline in soup.findAll('span', {'class':'mw-headline', 'id':year}):
# print(headline)
links = headline.find_next('ul').find_all('a')
for link in links:
# print(link.get('href'))
if link.get('href').startswith('/wiki/'):
url_list.append(link.get('href'))
# print(url_list)
# Save scraped URLS to file
with open('kpop_wiki_urls.csv', 'w') as filehandle:
for item in url_list:
filehandle.write('%s\n' % item)
| true |
b39abf2feaf5046a1ae9313d5b7ddeecc7e2f7d8 | Python | igrek51/trimmer | /tests/test_trimming_silence.py | UTF-8 | 559 | 2.546875 | 3 | [
"MIT"
] | permissive | from pydub import AudioSegment
from trimmer.normalizer import detect_leading_silence
def test_trim_down_the_silence():
song = AudioSegment.from_mp3("./tests/tubular_ex.mp3")
start_trim = detect_leading_silence(song)
end_trim = detect_leading_silence(song.reverse())
trimmed_song = song[start_trim:-end_trim]
trimmed_song.export("./tests/result_tubular_trimmed.mp3", format="mp3")
trimmed_song = AudioSegment.from_mp3("./tests/result_tubular_trimmed.mp3")
duration_s = len(trimmed_song) / 1000
assert 76 < duration_s < 77
| true |
4247a34529f5501cd8d623158deb0f2e1a28d784 | Python | JetBrains/intellij-community | /python/testData/inspections/PyTypeCheckerInspection/AsyncForIterable.py | UTF-8 | 816 | 3.046875 | 3 | [
"Apache-2.0"
] | permissive | import asyncio
from random import randint
import collections
class Cls(collections.AsyncIterable):
async def __aiter__(self):
return self
async def __anext__(self):
data = await Cls.fetch_data()
if data:
return data
else:
print('iteration stopped')
raise StopAsyncIteration
@staticmethod
async def fetch_data():
r = randint(1, 100)
return r if r < 92 else False
async def coro():
a = Cls()
async for i in a: # OK
await asyncio.sleep(0.2)
print(i)
else:
print('end')
async for i in <warning descr="Expected type 'collections.AsyncIterable', got 'list' instead">[]</warning>:
pass
loop = asyncio.get_event_loop()
loop.run_until_complete(coro())
loop.close()
| true |
14e17ce7f87ffe16ae953c48b7216965504cc80c | Python | natsume-qwerty/sta141c | /170420problem3.py | UTF-8 | 2,446 | 2.703125 | 3 | [] | no_license | # sta 141c python problem3
# cd C:/Users/toshiya/Desktop/sta141c/hw1_data
# python
######### homework 1 prob3
import sys
import numpy as np
import pandas as pd
import pickle
# reading data using sys
df_training = pd.read_csv(sys.argv[1], header=None)
df_training = df_training.dropna()
df_training = df_training.reset_index(drop=True)
df_training.columns = ['id','qid1', 'qid2', 'question1', 'question2', 'is_duplicate']
df_training_qs = df_training[['question1','question2']]
# preprocessing data
def preprocess( str_in ):
numcols = len(str_in.columns)
str_out = pd.DataFrame()
for i in range(numcols):
str_out_i = pd.Series(str_in.iloc[:,i]).str.lower()
str_out_i = pd.Series(str_in.iloc[:,i]).str.replace('?'," ")
str_out_i = pd.Series(str_out_i).str.replace('!'," ")
str_out_i = pd.Series(str_out_i).str.replace(':'," ")
str_out_i = pd.Series(str_out_i).str.replace(','," ")
str_out_i = pd.Series(str_out_i).str.replace('.'," ")
str_out_i = pd.Series(str_out_i).str.replace('('," ")
str_out_i = pd.Series(str_out_i).str.replace(')'," ")
str_out_i = pd.Series(str_out_i).str.replace('’'," ")
str_out_i = pd.Series(str_out_i).str.replace('"'," ")
str_out_i = pd.Series(str_out_i).str.replace("'"," ")
str_out_i = pd.Series(str_out_i).str.replace("-","")
str_out_i = pd.Series(str_out_i).str.lower()
str_out = pd.concat([str_out,str_out_i],axis=1)
return str_out
df_training_qs = preprocess(df_training_qs)
# compute score
score_list = []
for k in range(len(df_training_qs)):
a = df_training_qs['question1'][k].split()
b = df_training_qs['question2'][k].split()
c = 0
for j in range(len(a)):
if a[j] in b:
c += 1
for i in range(len(b)):
if b[i] in a:
c += 1
score = c/(len(a)+len(b))
score_list = score_list + list([score]) # make score_list
### problem 3
# compute accuracy with thrsh
df_training['score'] = score_list
df_training['sign'] = df_training['score'] - float(sys.argv[2])
sign_list = []
for h in range(len(df_training)):
if df_training['sign'][h] > 0:
sign_list.append(1)
else:
sign_list.append(0)
df_training['sign_list'] = sign_list
d = 0
for i in range(len(df_training)):
if df_training['sign_list'][i] == df_training['is_duplicate'][i]:
d += 1
score_acc = (d/len(df_training)) # calculate accuracy
print(score_acc) # return the result
| true |
3c7339edcfccb646954c5d28d33d4ad1e5c4f187 | Python | FreyaXH/Homotopy_Continuation | /HomotopyContinuationSpyder.py | UTF-8 | 18,915 | 3.265625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 25 15:24:37 2020
@author: sr917
"""
#import functions
import numpy as np
import sympy as sy
import scipy.integrate as spi
from sympy.abc import symbols
from sympy.utilities.lambdify import lambdify
import itertools as it
import time
import iminuit as im
import pandas as pd
import copy as cp
#import according to how many variables are needed - Ex: for 1D import x, a, b
t,x,y, z, w, h, a,b,c,d, e, f, g,h, l, m,n = symbols('t,x,y, z, w, h, a,b,c,d, e,f,g,h,l,m,n', real = True)
def define_4by4_matrix_inv_and_determinant(file_name):
"""
Constructs a 4 x 4 matrix and calculates the form of the determinant and inverse.
"""
A = sy.Matrix(4, 4, symbols('A:4:4'))
A_inv = A.inv()
A_det = A.det()
df = pd.DataFrame({'A': [A], 'Determinant' : [A_det], 'Inverse': [A_inv]})
df.to_csv(file_name + '.csv', index=True)
return A, A_det, A_inv
def define_6by6_matrix_inv_and_determinant(file_name):
"""
Constructs a 4 x 4 matrix and calculates the form of the determinant and inverse.
"""
time_start = time.time()
A = sy.Matrix(6, 6, symbols('A:6:6'))
A_inv = A.inv()
A_det = A.det()
time_end = time.time()
df = pd.DataFrame({'A': [A], 'Determinant' : [A_det], 'Inverse': [A_inv]})
df.to_csv(file_name + '.csv', index=True)
print('Time taken to invert and calculate determinant : {}'.format(time_start - time_end))
return A, A_det, A_inv
def define_3by3_matrix_inv_and_determinant(file_name):
"""
Constructs a 3 x 3 matrix and calculates the form of the determinant and inverse.
"""
A = sy.Matrix(3, 3, symbols('A:3:3'))
A_inv = A.inv()
A_det = A.det()
df = pd.DataFrame({'A': [A], 'Determinant' : [A_det], 'Inverse': [A_inv]})
df.to_csv(file_name + '.csv', index=True)
return A, A_det, A_inv
#A4, det_4by4_matrix, inverse_4by4_matrix = define_4by4_matrix_inv_and_determinant('A4')
#A, det_6by6_matrix, inverse_6by6_matrix = define_6by6_matrix_inv_and_determinant('A6')
A3, det_3by3_matrix, inverse_3by3_matrix = define_3by3_matrix_inv_and_determinant('A3')
#construct homotopy
def Homotopy(t, G, F, gamma):
"""
Constructs the Homotopy from the function to determine F, and the intial easy function G
Gamma must be a complex number with absolute value 1
"""
return [(1 - t)*G[i] + gamma*t*F[i] for i in range(len(G))]
#construct starting polynomial
def G(input_variables):
"""
Constructs easy starting polynomial with known roots depending on the dimensions of the input variables
Parameters:
Input Variables: The variables in the function. Must be a list or an array Ex: [x, y]
"""
G_func = [i**3 - 1 for i in input_variables]
return G_func
#generate gamma
def Gamma_Generator():
"""
Generates a complex number with absolute value 1
"""
real = np.random.rand()
im = np.sqrt(1- real**2)
return real + im*1j
#roots of startin function
def G_Roots(n):
"""
Generates the roots of the starting polynomial G depending on the number of dimensions
"""
root_list = [1, np.exp(1j*2*np.pi/3), np.exp(1j*2*np.pi*2/3)]
if n == 1:
return root_list
else:
return [i for i in it.product(root_list, repeat = n)]
def Homotopy_Continuation(t, input_variables, input_functions, number_of_steps = 5, Newtons_method = True, expanded_functions = None, expansion_variables = None,\
matrix_substitution = False, matrix_A = None, det_matrix = None, inverse_matrix = None, remainder_tolerance = 1e-3, tolerance_zero = 1e-6, \
decimal_places = 5, newton_ratio_accuracy = 1e-10, max_newton_step = 100, debug = False, \
save_file = True, save_path = False, file_name = 'Homotopy_Roots'):
"""
Perfroms the Homotopy Continuation to determine the roots of a given function F, within a certain accuracy
using the RK4 method during the predictor step and either Newton's method of Minuit for the root-finding step.
For dimensions more than 4, setting matric_substitution to True and inputting ax externally calculated form of the
determinant and inverse of the matrix will speed up the calculation.
If function takes too long to run (for very complicated functions) increasing the number of Homotopy steps
Parameters:
t : Just given as a variable, the time step.
input_variables : Symbols to use as variables. Must be given as an array or list. Length determines the
the number of dimensions to consider.
Example: [x,y] for 2 dimension, where the symbols used must first be imported above.
Must not contain t.
input_functions : Function to be determined. Should be given as a list or array of variables.
Example: F = [x**2 , y**2]
number_of_steps : Number of steps for the Homotopy Continuation. Default : 5
Newtons_method : Default True else use Minuit
expanded_functions : expansion into complex, Ex: [a + 1j*b, c + 1j*d]
(only for Minuit) Variables must first be imported above, and cannot contain those in input_variables or t
Only needed when Minuit is used
expansion_variables = Array of variables for expansion to complex numbers, Ex for 2D : [a,b,c,d]
(only for Minuit) Only needed when Minuit is used
matrix_substitution = Default False. If True, calculated determinant form and inverse form must be given
Useful for 4 dimensions and above.
matrix_A : The intial matrix for which the determinant and inverse are calculated (only if matrix_substitution is True)
det_matrix : form of determinant of the matrix (only if matrix_substitution is True)
inverse_matrix : form of the inverse (only if matrix_substitution is True)
decimal_places : precision of roots found to determine unique roots
remainder_tolerance : Tolerance for roots to be considered, how far is the function from zero.
tolerance_zero : below this tolerance, the number is assumed to be zero
newton_ratio_accuracy : Convergence criteria for Newton's
max_newton_step = Max number of steps for Newton's method
save_file : Saves the soutions into a csv file
save_path : Tracks and saves how roots evolve
file_name : Save roots in file
Returns:
solutions_real: The Real Roots
"""
time_start = time.time()
#convert F to a function
F = lambdify([input_variables], input_functions)
#store the least accurate root
max_remainder_value = 0
#count the number of roots found
number_of_count = 0
#step size
delta_t = 1/number_of_steps
#determine the number of dimensions considered
dimension = len(input_variables)
#generate gamma
gamma = Gamma_Generator()
#print(gamma)
#gamma = 0.1890852662170326+0.9819606723793137j
#determine roots of easy polynomial
G_roots = G_Roots(dimension)
#construct homotopy
H = Homotopy(t, G(input_variables), F(input_variables), gamma)
#first derivative of H wrt to all the x variables
derivative_H_wrt_x = sy.Matrix([[H[i].diff(input_variables[j]) for j in range(len(input_variables))] for i in range(len(input_variables))])
if matrix_substitution is False:
time1 = time.time()
determinant_H = derivative_H_wrt_x.det(method='lu')
#invert the matrix of the derivatives of H wrt to x variables
inverse_derivative_H_wrt_x = derivative_H_wrt_x**-1
time2 = time.time()
if debug: print('Time for calculation : {}'.format(time2 - time1))
else:
time3 = time.time()
determinant_H = det_matrix.subs(zip(list(matrix_A), list(derivative_H_wrt_x)))
inverse_derivative_H_wrt_x = inverse_matrix.subs(list(zip(matrix_A, derivative_H_wrt_x)))
time4 = time.time()
if debug: print('Time for sub : {}'.format(time4 - time3))
#check the determinant does not go to zero so can invert
if determinant_H == 0:
return np.NaN
#function of determinant H
determinant_H_func = lambdify((t, input_variables), determinant_H)
#derivative of H with respect to t
derivative_H_wrt_t = sy.Matrix([H[i].diff(t) for i in range(len(input_variables))])
#differentiate of x wrt to t
x_derivative_t = -inverse_derivative_H_wrt_x*derivative_H_wrt_t
x_derivative_t_func = lambdify((t, input_variables), [x_derivative_t[i] for i in range(len(x_derivative_t))])
x_derivative_t_func_1d = lambdify((t,input_variables), H[0].diff(t)/H[0].diff(x))
#determine H/H' to use in Newton's method
H_over_derivative_H_wrt_x = inverse_derivative_H_wrt_x*sy.Matrix(H)
H_over_derivative_H_wrt_x_func = lambdify((t, input_variables), [H_over_derivative_H_wrt_x[i] for i in range(len(H_over_derivative_H_wrt_x))])
#track paths of roots
paths = []
#track roots
solutions = []
#track accuracy of each root
accuracies = []
#track real rots
solutions_real = []
#run for all roots in the starting system
for x_old in G_roots:
#path of each root
trace = []
#root number being found
number_of_count += 1
#set homotopy to inital system
t_new = 0
#convert 1D to an array
if dimension == 1:
x_old = np.array([x_old])
#run for all steps starting at t=0 ending at t=1
while round(t_new,5) < 1:
trace.append(x_old)
t_old = t_new
#increment time by step size
t_new += delta_t
if dimension == 1:
#perform RK4 for 1 D
predictor = spi.solve_ivp(x_derivative_t_func_1d, (t_old, t_new), x_old)
predicted_solution = np.array([predictor.y[-1][-1]])
if dimension != 1:
#check determinant to make sure does not go to zero
if abs(determinant_H_func(t_new, x_old)) < tolerance_zero:
return np.NaN
#perform RK4 method for n dimensions
predictor = spi.solve_ivp(x_derivative_t_func, (t_old, t_new), x_old)
predicted_solution = predictor.y[:,-1]
x_old = predicted_solution
#newton's method
#track how root changes and the number of steps used
ratio = np.full(dimension, 1)
number_of_newton_steps = 0
change_in_x = np.full(dimension, newton_ratio_accuracy)
if Newtons_method is True:
method_used = 'Newton-Raphson with ' + str(max_newton_step) + ' steps.'
#track amount of time newton uses for debugging
time_newtons_start = time.time()
#convergence criteria for step size in Newton's Method
while max(ratio) > newton_ratio_accuracy and number_of_newton_steps < max_newton_step:
if debug: print("Before Newton", x_old)
#check determinant to ensure can invert
if dimension != 1:
if abs(determinant_H_func(t_new, x_old)) < tolerance_zero:
return np.NaN
#find new position of root
x_old_intermediate = x_old - H_over_derivative_H_wrt_x_func(t_new, x_old)
change_in_x_old = change_in_x
change_in_x = abs(x_old_intermediate - x_old)
#calculate change in position of root
ratio = [change_in_x[j]/(change_in_x_old[j] + 1e-10) for j in range(dimension)]
x_old = x_old_intermediate
number_of_newton_steps += 1
time_newtons_end = time.time()
if debug: print("After Newton", x_old)
if debug:
print('Time for Newton: {}'.format(time_newtons_end - time_newtons_start))
#Minuit
else:
method_used = 'Minuit'
#Minuit only runs for more than 1 dimension
if dimension == 1:
raise TypeError('Minuit only runs for more than 1 dimension!')
#track time for debugging
time_minuit_start = time.time()
#substitute time t at each step into Homotopy equation
H_at_fixed_t = Homotopy(t_new, G(expanded_functions), F(expanded_functions), gamma)
if debug: print("Homotopy at current step: ", H_at_fixed_t)
#split real and imaginary and sum absolute value of expressions
H_im_real = sum([abs(sy.re(i_re)) for i_re in H_at_fixed_t] + [abs(sy.im(i_im)) for i_im in H_at_fixed_t])
if debug: print("Homotopy Absolute value at current step: ", H_im_real)
#convert into function
H_im_real_func = lambdify([expansion_variables], H_im_real)
x_old_re_im = []
#split x_old to real and imaginary
for i in range(dimension):
x_old_re_im.append(np.real(x_old[i]))
x_old_re_im.append(np.imag(x_old[i]))
#convert variables to strings for input into Minuit
string_variables = [str(j) for j in expansion_variables]
#call iminuit function
if debug: print("Before Minuit we start at", x_old_re_im)
printlevel = 10 if debug else 0
#find roots using Minuit
m = im.Minuit.from_array_func(H_im_real_func, x_old_re_im, forced_parameters= string_variables,print_level=printlevel)
m.migrad(resume=False)
x_old_im_re_vals = m.values
#reconstruct roots from real and imaginary parts
x_old = [x_old_im_re_vals[j] + 1j*x_old_im_re_vals[j+1] for j in range(0, 2*dimension, 2)]
if debug: print("After Minuit we got", x_old)
time_minuit_end = time.time()
if debug:
print('Time for Minuit: {}'.format(time_minuit_end - time_minuit_start))
trace.append(x_old)
#check root is found by ensuring roots found is within the tolerance
if dimension == 1 :
remainder = list(map(abs, F([x_old])))
remainder = list(map(abs, F(x_old)))
if max(remainder) < remainder_tolerance:
#make root real if imaginary part is below the zero tolerance
x_old = list(x_old)
#store the maximum remainder
max_rem = max(remainder)
if max_remainder_value < max_rem:
max_remainder_value = max_rem
solutions.append(x_old)
#if paths are wanted
if save_path is True:
paths.append(trace)
accuracies.append(remainder)
time_end = time.time()
if save_path is False:
paths = np.full(len(solutions),'-')
num_of_roots_found = len(solutions)
#only keep all the unique roots
solutions_unique = cp.deepcopy(solutions)
solutions_rounded = np.around(solutions_unique, decimal_places)
solutions_unique, unique_index = np.unique(solutions_rounded, axis=0, return_index=True)
#keep only the values associated to unique roots
accuracies = [accuracies[i] for i in unique_index]
paths = [paths[i] for i in unique_index]
num_of_unique_roots = len(solutions_unique)
#make root real if imaginary part is below the zero tolerance
solutions_real = [[solutions[j][i].real for i in range(len(solutions[j])) if abs(solutions[j][i].imag) < tolerance_zero] for j in range(len(solutions))]
solutions_real = [solutions_real_j for solutions_real_j in (solutions_real) if len(solutions_real_j) == dimension]
solutions_real = [[0 if abs(i) < tolerance_zero else i for i in j] for j in solutions_real]
solutions_real = list(np.unique(np.around(solutions_real, decimal_places), axis=0))
if save_file is True:
#save information into csv file
other_info = ['Function Used'] + input_functions + [''] + ['Time Taken'] + [time_end - time_start] + [''] + \
['Root Finding Method Used'] + [method_used] + [''] + ['Worst Accuracy'] + [max_remainder_value] + \
[''] + ['Number of Homotopy Steps'] + [number_of_steps] + [''] + ['Number of Roots Found'] + [num_of_roots_found] \
+ [''] + ['Number of Unique Roots'] + [num_of_unique_roots]
total_length = max(len(other_info), num_of_roots_found)
other_info = other_info + list(np.full(total_length - len(other_info), ''))
solutions_unique_s = list(solutions_unique) + list(np.full(total_length - num_of_unique_roots, ''))
solutions_real_s = solutions_real + list(np.full(total_length - len(solutions_real), ''))
accuracies_s = accuracies + list(np.full(total_length - num_of_unique_roots, ''))
paths_s = list(paths) + list(np.full(total_length - num_of_unique_roots, ''))
solutions_s = solutions + list(np.full(total_length - num_of_roots_found, ''))
df = pd.DataFrame({'Roots' : solutions_s, 'Unique Roots': solutions_unique_s, 'Real Roots' : solutions_real_s, 'Accuracy' : accuracies_s, 'Paths' : paths_s, 'Other Info' : other_info})
df.to_csv(file_name + '.csv', index=True)
return solutions_real
| true |
273cf8f40fd829c73dc92954383137756cac771d | Python | yashcholera3074/python-practical | /exp12_usingListComprehensions.py | UTF-8 | 392 | 3.875 | 4 | [] | no_license | num=int(input("enter a number to check number is prime or not:"))
prime_list=[i for i in range (2,(num//2)+1) if num%i==0]
def isPrime(num):
if num>1:
if len(prime_list)!=0:
print("{} is not prime number".format(num))
else:
print("{} is prime number".format(num))
else:
print("{} is not prime number".format(num))
isPrime(num) | true |
ffd1d1f1a2691119a6cf75191b315351f54eb79b | Python | ginapaal/series | /data/data_inserter.py | UTF-8 | 5,530 | 2.8125 | 3 | [] | no_license | import requests
from data_manager import *
from init_db import *
import datetime
import os
headers = {
'Content-Type': 'application/json',
'trakt-api-version': '2',
'trakt-api-key': os.environ.get('TRAKT_API_KEY')
}
trakt_api_url = 'https://api.trakt.tv'
def get_show_entity(show):
show_entity = {
'id': show['ids']['imdb'],
'title': show['title'],
'year': datetime.date(show['year'], 1, 1),
'overview': show['overview'],
'runtime': show['runtime'],
'trailer': show['trailer'],
'homepage': show['homepage'],
'rating': show['rating']
}
return show_entity
def get_genre_ids(genre_list):
genres = tuple((g.title() for g in genre_list))
id_result = execute_select("SELECT id FROM genres WHERE name IN %s;", (genres,))
genre_ids = [result[0] for result in id_result]
return genre_ids
def insert_show_genres(genre_ids, show_entity):
for genre_id in genre_ids:
show_genre_statement = """INSERT INTO show_genres (show_id, genre_id) VALUES (%(show_id)s, %(genre_id)s);"""
show_genre_param = {
'show_id': show_entity['id'],
'genre_id': genre_id
}
execute_dml_statement(show_genre_statement, show_genre_param)
def insert_shows(limit=20):
url = trakt_api_url + '/shows/popular?limit={limit}&extended=full'.format(limit=limit)
shows_request = requests.get(url, headers=headers)
inserted_ids = []
for show in shows_request.json():
show_entity = get_show_entity(show)
inserted_ids.append(show_entity['id'])
statement = """INSERT INTO shows (id,
title,
year,
overview,
runtime,
trailer,
homepage,
rating)
VALUES (%(id)s,
%(title)s,
%(year)s,
%(overview)s,
%(runtime)s,
%(trailer)s,
%(homepage)s,
%(rating)s);"""
execute_dml_statement(statement, show_entity)
genre_ids = get_genre_ids(show['genres'])
insert_show_genres(genre_ids, show_entity)
return inserted_ids
def get_season_entity(season, show_id):
season_entity = {
'season_number': season['number'],
'title': season['title'],
'overview': season['overview'],
'episode_count': season['episode_count'],
'show_id': show_id
}
return season_entity
def get_season_id(show_id, season_number):
stmt = """
SELECT id
FROM seasons
WHERE show_id LIKE %(show_id)s
AND season_number = %(season_number)s;
"""
params = {
'show_id': show_id + '%',
'season_number': season_number
}
result = execute_select(stmt, params)
return result[0][0]
def insert_episodes(show_id):
url = trakt_api_url + '/shows/{show_id}/seasons?extended=episodes'.format(show_id=show_id)
episode_request = requests.get(url, headers=headers)
for season in episode_request.json():
season_id = get_season_id(show_id, season['number'])
for episode in season['episodes']:
stmt = """
INSERT INTO episodes (title, episode_number, season_id)
SELECT COALESCE(%(title)s, '-'),
%(episode_number)s,
%(season_id)s;
"""
params = {
'title': episode['title'],
'episode_number': episode['number'],
'season_id': season_id
}
execute_dml_statement(stmt, params)
def insert_seasons(show_ids):
show_seasons = {}
for show_id in show_ids:
url = trakt_api_url + '/shows/{show_id}/seasons?extended=full'.format(show_id=show_id)
season_request = requests.get(url, headers=headers)
for season in season_request.json():
stmt = """INSERT INTO seasons (season_number,
title,
overview,
show_id)
VALUES (%(season_number)s,
%(title)s,
%(overview)s,
%(show_id)s);"""
season_entity = get_season_entity(season, show_id)
execute_dml_statement(stmt, season_entity)
insert_episodes(show_id)
return show_seasons
def insert_genres():
url = trakt_api_url + '/genres/movies'
genre_request = requests.get(url, headers=headers)
for genre in genre_request.json():
statement = "INSERT INTO genres (name) VALUES (%(name)s);"
execute_dml_statement(statement, {'name': genre['name']})
def main():
init_db()
create_schema()
insert_genres()
print("genres data inserted")
inserted_show_ids = insert_shows(limit=20)
print("show data inserted")
show_seasons = insert_seasons(inserted_show_ids)
print('season data inserted')
if __name__ == '__main__':
main()
| true |
94707923ad8945e4675d9bcb0033aa73f7902cb4 | Python | andutzu7/Lucrare-Licenta-MusicRecognizer | /NN/NNModule/Metrics/ActivationSoftmaxCategoricalCrossentropy.py | UTF-8 | 661 | 2.90625 | 3 | [] | no_license | import numpy as np
# Softmax classifier - combined Softmax activation
# and cross-entropy loss for faster backward step
class Activation_Softmax_Loss_CategoricalCrossentropy():
# Backward pass
def backward(self, derivated_values, y_true):
# Number of samples
samples = len(derivated_values)
# If labels are one-hot encoded,
# turn them into discrete values
if len(y_true.shape) == 2:
y_true = np.argmax(y_true, axis=1)
# Copy so we can safely modify
self.derivated_inputs = derivated_values.copy()
# Calculate gradient
self.derivated_inputs[range(samples), y_true] -= 1 | true |
5d52a6806a9fafdec35f94f347f0a798b41156c5 | Python | caervs/pivot | /pivot/lexicon/expression.py | UTF-8 | 4,146 | 3.234375 | 3 | [] | no_license | """
Models for symbols and symbolic expressions
"""
import importlib
from fractions import Fraction
from replicate.replicable import Replicable, preprocessor
PRIMITIVE_EXPRESSION_TYPES = (int, float, Fraction)
class Expression(Replicable):
"""
A mathematical expression. May be operationally composed with other expressions
"""
__add__ = lambda *args: OperationalExpression('+', *args)
__sub__ = lambda *args: OperationalExpression('-', *args)
__mul__ = lambda *args: OperationalExpression('*', *args)
__truediv__ = lambda *args: OperationalExpression('/', *args)
__radd__ = lambda *args: OperationalExpression('+', *reversed(args))
__rsub__ = lambda *args: OperationalExpression('-', *reversed(args))
__rmul__ = lambda *args: OperationalExpression('*', *reversed(args))
__rtruediv__ = lambda *args: OperationalExpression('/', *reversed(args))
def __hash__(self):
return hash(frozenset(self.parts.items()))
def __eq__(self, other):
equation = importlib.import_module("pivot.lexicon.equation")
same_exp = super().__eq__(other)
return equation.Equation(self, other, reflexive=same_exp)
@property
def variables(self):
"""
Return all variables in the Expression
Must be implemented by individual subclasses
"""
raise NotImplementedError
class Variable(Expression):
"""
A single variable
"""
@preprocessor
def preprocess(name):
"""
Preprocess Variable attributes
"""
pass
def __repr__(self):
return self.name
def __getattr__(self, attr_name):
if attr_name.startswith("_"):
return getattr(super(), attr_name)
return VariableAttribute(self, attr_name)
@property
def variables(self):
"""
Return all variables in the Variable (namely a set with itself)
"""
return {self}
@property
def attr_chain(self):
"""
Return a tuple of variable names starting with the
root variable name and appending each successive
attribute name
"""
return (self.name, )
class VariableAttribute(Variable):
"""
The attribute of a variable (which is also a variable)
"""
@property
def attr_chain(self):
"""
Return a tuple of variable names starting with the
root variable name and appending each successive
attribute name
"""
return self.variable.attr_chain + (self.attr_name, )
@preprocessor
def preprocess(variable, attr_name):
"""
Preprocess VariableAttribute attributes
"""
pass
def __repr__(self):
return "{}.{}".format(self.variable, self.attr_name)
class OperationalExpression(Expression):
"""
An operational composition of two expressions
"""
@preprocessor
def preprocess(operator, *arguments):
"""
Preprocess OperationalExpression attributes
"""
pass
def __repr__(self):
delimiter = " {} ".format(self.operator)
return delimiter.join(map(repr, self.arguments))
@property
def variables(self):
"""
Return all variables in the expression
"""
isexpression = lambda arg: isinstance(arg, Expression)
expressions = filter(isexpression, self.arguments)
return set().union(*(arg.variables for arg in expressions))
class Vector(Expression):
"""
An expression denoting an ontological Vector (i.e. an expression that is an
enumeration of subexpressions)
"""
@preprocessor
def preprocess(*items):
"""
Preprocess Vector attributes
"""
pass
def __repr__(self):
return "V({})".format(", ".join(map(repr, self.items)))
@property
def variables(self):
"""
Return all variables in the expression
"""
isexpression = lambda arg: isinstance(arg, Expression)
expressions = filter(isexpression, self.items)
return set().union(*(item.variables for item in expressions))
| true |
371533ff6e1248b357bca701b99d62cda9461db8 | Python | piyal-source/Python-programs | /Graph/DFS for undirected graph.py | UTF-8 | 1,010 | 3.65625 | 4 | [] | no_license | class Graph:
def __init__(self, vertices):
self.n = vertices
self.graph = [[False for _ in range(self.n)] for _ in range(self.n)]
def add_edge(self,start,end):
self.graph[start][end] = True
self.graph[end][start] = True
def print_graph(self):
for i in self.graph:
print(i)
print()
def dfs(self,source):
visited = {source}
stack = [source]
while stack:
top = stack.pop()
print(top, end=" ")
if len(visited) < self.n:
for i in range(self.n-1,-1,-1):
if i not in visited and self.graph[top][i] == True:
stack.append(i)
visited.add(i)
print()
vertices = 6
g = Graph(vertices)
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 3)
g.add_edge(1, 4)
g.add_edge(2, 4)
g.add_edge(3, 4)
g.add_edge(3, 5)
g.add_edge(4, 5)
g.print_graph()
g.dfs(0)
| true |
6e323a6b2aed06efba8e65573d98bca1e81843c6 | Python | keenajiao/Python | /20_DataStructureAlgorithm/2061_bubble_sort.py | UTF-8 | 654 | 3.53125 | 4 | [] | no_license | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : 2061_bubble_sort.py
@Time : 2019/10/24 12:57
@Author : Crisimple
@Github : https://crisimple.github.io/
@Contact : Crisimple@foxmail.com
@License : (C)Copyright 2017-2019, Micro-Circle
@Desc : None
"""
"""冒泡排序
最优时间复杂度:O(n)
最坏时间复杂度:O(n**2)
"""
def bubble_sort(alist):
for i in range(len(alist)-1, 0, -1):
for j in range(i):
if alist[j] > alist[j+1]:
alist[j], alist[j+1] = alist[j+1], alist[j]
if __name__ == "__main__":
li = [54, 26, 93, 17, 77, 31, 44, 55, 20]
bubble_sort(li)
print(li)
| true |
9be437d9aa83a67a9b5ee55f058596aed8fdcc8e | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_157/607.py | UTF-8 | 1,249 | 2.796875 | 3 | [] | no_license | infile = 'C-small-attempt0.in'
outfile = 'C-small-out.txt'
import math
quat = {'ii': [-1, '1'],
'ij': [1, 'k'],
'ik' : [-1,'j'],
'ji': [-1, 'k'],
'jj': [-1, '1'],
'jk': [1, 'i'],
'ki': [1, 'j'],
'kj': [-1, 'i'],
'kk': [-1, '1']}
def prod(x, letter):
if x[1] == '1':
return [x[0], letter]
p = quat[x[1]+letter]
return [x[0]*p[0], p[1]]
def check(ph, times):
phrase = ph*times
if len(phrase) < 3:
return 'NO'
target = 'ijk'
k = 0
reset = True
frag = ''
for i in xrange(len(phrase)):
if reset:
if frag == '':
frag = [1, phrase[i]]
else:
reset = False
if not reset:
frag = prod(frag, phrase[i])
if k <= 1:
if frag[0] == 1 and frag[1] == target[k]:
reset = True
k += 1
frag = ''
#print i, frag, reset
if k == 2 and frag[0] == 1 and frag[1] == target[k]:
return 'YES'
else:
return 'NO'
def main():
out = open(outfile, 'w')
f = open(infile)
N = int(f.readline())
for n in xrange(N):
times = int(f.readline().split()[1])
ph = f.readline().strip()
#print times, ph
out.write("Case #"+str(n+1)+": "+check(ph, times)+"\n")
main()
| true |
32cb4644d264d4606cbf94a34c502b427101a302 | Python | monksevillair/monksevillair.github.io | /src/check_email.py | UTF-8 | 3,495 | 2.578125 | 3 | [] | no_license | '''
sudo apt install python3-pip
pip3 install imap_tools
pip3 install genanki
'''
from imap_tools import MailBox, AND
import random
import time
import sys
from datetime import date
today = date.today()
import os
import smtplib
from pathlib import Path
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.utils import COMMASPACE, formatdate
from email import encoders
DIR = "./decks/"
class parseMessage:
def __init__(self):
self.email = sys.argv[1]
self.password = sys.argv[2]
mailbox = MailBox('imap.gmail.com')
mailbox.login(self.email, self.password, initial_folder='INBOX') # or mailbox.folder.set instead 3d arg
msgs = [msg for msg in mailbox.fetch(AND(seen=False))]
for msg in msgs:
if "`" in msg.subject:
topic = msg.subject.split("`")[1]
topic_dir = topic+'/'+today.strftime("%Y-%m-%d")+"-"+msg.subject.strip(topic).strip("`").replace(" ","-")
if not(os.path.exists(topic_dir) and os.path.isdir(topic_dir)):
os.makedirs(topic_dir)
with open(topic_dir+'/main.md', 'w') as f:
f.write(msg.text)
for att in msg.attachments:
with open(topic_dir+'/{}'.format(att.filename), 'wb') as f:
f.write(att.payload)
'''import json
with open('study_list.json', 'r') as f:
json_data = json.load(f)
json_data[msg.subject.strip("`youtube`")] = {'date':today.strftime("%m-%d-%y"), 't':msg.text.strip("\r\n")}
with open('study_list.json', 'w') as f:
f.write(json.dumps(json_data))'''
mailbox.logout()
def send_mail(self, send_from, send_to, subject, message, username, password, files=[],server="smtp.gmail.com", port=587, use_tls=True):
"""Compose and send email with provided info and attachments.
Args:
send_from (str): from name
send_to (list[str]): to name(s)
subject (str): message title
message (str): message body
files (list[str]): list of file paths to be attached to email
server (str): mail server host name
port (int): port number
username (str): server auth username
password (str): server auth password
use_tls (bool): use TLS mode
"""
msg = MIMEMultipart()
msg['From'] = send_from
msg['To'] = send_to #COMMASPACE.join(send_to)
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = subject
msg.attach(MIMEText(message))
for path in files:
part = MIMEBase('application', "octet-stream")
print(path)
with open(path, 'rb') as file:
part.set_payload(file.read())
encoders.encode_base64(part)
part.add_header('Content-Disposition',
'attachment; filename={}'.format(Path(path).name))
msg.attach(part)
smtp = smtplib.SMTP(server, port)
if use_tls:
smtp.starttls()
smtp.login(username, password)
smtp.sendmail(send_from, send_to, msg.as_string())
smtp.quit()
if __name__ == '__main__':
#while True:
p = parseMessage()
#time.sleep(20)
| true |
27f2f742bab156f323d9f827f0dcbc558f132f2d | Python | kchhero/suker_python_project | /CodeJams/EulerProject/my_problem_12.py | UTF-8 | 2,047 | 2.96875 | 3 | [] | no_license | import math
def suker_factorization(n) :
tempN = n
tempFactoEle = 0
sqrtNum = n/2#int(math.sqrt(n))
_factoL = []
for i in range(2,sqrtNum+1) :
if suker_isPrimeNum(i)==1 :
while tempN%i==0 :
tempFactoEle += 1
tempN = tempN/i
if not tempFactoEle==0 :
_factoL.append(tempFactoEle+1)
tempFactoEle = 0
if tempN==1 :
break
return _factoL
def suker_isPrimeNum(n) :
if n==2 or n==3 or n==5 or n==7 :
return 1
elif n%2==0 :
return 0
else :
sqrtNum = int(math.sqrt(n))
for i in range(3,sqrtNum+1) :
if n%i==0 :
return 0
return 1
maxNum = 0
triDigitNum = 0
for i in range(1,100000) :
triDigitNum += i
tempNum = 1
if triDigitNum>1000 :
for j in suker_factorization(triDigitNum) :
tempNum *= j
if maxNum < tempNum :
maxNum = tempNum
if maxNum >=500 :
print "tri Num : ",triDigitNum," step : ",i, "cnt : ",maxNum
break
print "tri Num : ",triDigitNum," step : ",i, "cnt : ",maxNum
"""
tri Num : 1035 step : 45 cnt : 12
tri Num : 1128 step : 47 cnt : 16
tri Num : 1176 step : 48 cnt : 24
tri Num : 2016 step : 63 cnt : 36
tri Num : 3240 step : 80 cnt : 40
tri Num : 5460 step : 104 cnt : 48
tri Num : 25200 step : 224 cnt : 90
tri Num : 73920 step : 384 cnt : 112
tri Num : 157080 step : 560 cnt : 128
tri Num : 437580 step : 935 cnt : 144
tri Num : 749700 step : 1224 cnt : 162
tri Num : 1385280 step : 1664 cnt : 168
tri Num : 1493856 step : 1728 cnt : 192
tri Num : 2031120 step : 2015 cnt : 240
tri Num : 2162160 step : 2079 cnt : 320
tri Num : 17907120 step : 5984 cnt : 480
tri Num : 76576500 step : 12375 cnt : 576
"""
| true |
d1df36c75f81e14d51a7017f1e5daf608650ebb7 | Python | mas178/Fragments | /sample_blockchane/simplest/verifier.py | UTF-8 | 2,578 | 2.53125 | 3 | [
"MIT"
] | permissive | from simplest.transaction import Transaction, FeeTransaction, SignedTransaction
class Verifier:
def __init__(self):
self.__last_block = None
self.__unconfirmed_trxs = []
self.name = None
self.network = None
def open(self, signed_trx: SignedTransaction) -> Transaction:
pass
def sign(self, tx: Transaction) -> SignedTransaction:
pass
def receive_signed_trx(self, signed_trx: SignedTransaction) -> None:
trx = self.open(signed_trx)
if trx is None:
print('[Verifier.receive_signed_trx] {name}: received invalid transaction {signed_trx}'.format(name=self.name, signed_trx=signed_trx))
return
if signed_trx.signer == self or trx.counter_party == self:
print('[Verifier.receive_signed_trx] {name}: not going to verify this transaction as I\'m involved'.format(name=self.name))
return
self.__unconfirmed_trxs.append(signed_trx)
print('[Verifier.receive_signed_trx] {name}: unconfirmed transactions {unconfirmed_trxs}'.format(
name=self.name, unconfirmed_trxs=self.__unconfirmed_trxs))
def receive_block(self, block: 'Block') -> None:
# validate block
if not block.validate():
print('[Verifier.receive_block] {name}: !!! invalid {block} is found !!!'.format(name=self.name, block=block))
return
print('[Verifier.receive_block] {name}: validated {block} is valid'.format(name=self.name, block=block))
self.__last_block = block
# if a transaction is already confirmed in a given block, remove it from unconfirmedTxs
self.__unconfirmed_trxs = list(set(self.__unconfirmed_trxs).difference(set(block.trxs)))
for signed_trx in [trx for trx in block.trxs if trx.signer == self]:
print('[Verifier.receive_block] {name}: my trx "{trx}" is validated by network!'.format(name=self.name, trx=self.open(signed_trx)))
def verify_message_trxs(self) -> None:
# TODO: verify no double spend
total_fee = sum([self.open(trx).fee for trx in self.__unconfirmed_trxs])
fee_trx = FeeTransaction(self, total_fee)
encrypted = self.sign(fee_trx)
print('\n[Verifier.verify_message_trxs] {name}: created {encrypted}'.format(name=self.name, encrypted=encrypted))
from simplest.block import Block
self.__unconfirmed_trxs.append(encrypted)
block = Block(self.__unconfirmed_trxs, self.__last_block)
self.__unconfirmed_trxs = []
self.network.announce_block(block)
| true |
5824bc424b213e111d990b6babd42295a8c24ed6 | Python | AotY/Play_Interview | /Sentiment_Bayes/sentiment.py | UTF-8 | 1,289 | 2.796875 | 3 | [] | no_license | from bayes import Bayes
from seg import Seg
import os
stop_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'stopwords.txt')
stop_words = set()
with open(stop_path, 'r', encoding='utf-8') as fr:
for line in fr:
line = line.rstrip()
stop_words.add(line.strip())
class Sentiment:
def __init__(self):
self.classifier = Bayes()
self.seg = Seg()
self.seg.load('seg.pickle')
def save(self, fname):
self.classifier.save(fname)
def load(self, fname):
self.classifier = self.classifier.load(fname)
def handle(self, doc):
words = self.seg.seg(doc)
words = self.filter_stop(words)
return words
def train(self, neg_docs, pos_docs):
datas = []
for doc in neg_docs:
datas.append([self.handle(doc), 'neg'])
for doc in pos_docs:
datas.append([self.handle(doc), 'pos'])
self.classifier.train(datas)
def classify(self, doc):
ret, prob = self.classifier.classify(self.handle(doc))
if ret == 'pos':
return prob
else:
return 1 - prob
@staticmethod
def filter_stop(words):
return list(filter(lambda x: x not in stop_words, words))
| true |
c55e220d1abd2a88b7ee6dd1ba6273b81af142e6 | Python | Ferrari1996gk/NLPOffensEval | /data_process.py | UTF-8 | 5,429 | 2.90625 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/2/15 17:36
# @Author : Kang
# @Site :
# @File : data_process.py
# @Software: PyCharm
import pandas as pd
import numpy as np
from nltk.stem import PorterStemmer
from nltk.tokenize import RegexpTokenizer
tokenizer = RegexpTokenizer(r'\w+')
stemmer = PorterStemmer()
def get_word2idx():
print('Getting word2idx with train and test set------')
train_path = 'OffensEval_task_data/start-kit/training-v1/offenseval-training-v1.tsv'
testa_path = 'OffensEval_task_data/Test A Release/testset-taska.tsv'
testb_path = 'OffensEval_task_data/Test B Release/testset-taskb.tsv'
testc_path = 'OffensEval_task_data/Test C Release/test_set_taskc.tsv'
train = pd.read_csv(train_path, sep='\t', index_col=False)
testa = pd.read_csv(testa_path, sep='\t', index_col=False)
testb = pd.read_csv(testb_path, sep='\t', index_col=False)
testc = pd.read_csv(testc_path, sep='\t', index_col=False)
raw_data = pd.concat([train[['id', 'tweet']], testa, testb, testc])
tweet = raw_data.tweet
corpus = list(tweet)
tokenized_corpus = []
for sentence in corpus:
tmp_tokens = tokenizer.tokenize(sentence)
lower_tokens = list(map(str.lower, tmp_tokens))
tokenized_sentence = list(map(stemmer.stem, lower_tokens))
tokenized_corpus.append(tokenized_sentence)
vocabulary = []
for sentence in tokenized_corpus:
vocabulary += [token for token in sentence if token not in vocabulary]
word2idx = {w: idx + 1 for (idx, w) in enumerate(vocabulary)}
word2idx['<pad>'] = 0
return word2idx, tokenized_corpus
class DataHandle:
def __init__(self, path='OffensEval_task_data/start-kit/training-v1/offenseval-training-v1.tsv', word2idx=None):
self.data_path = path
self.raw_data = self.read_data()
self.corpus = self.get_corpus()
self.tokenized_corpus = self.tokenize()
self.vocabulary = self.get_vocabulary()
if word2idx != None:
self.word2idx = word2idx
else:
self.word2idx = get_word2idx()
def read_data(self):
data = pd.read_csv(self.data_path, sep='\t', index_col=False)
return data
def get_corpus(self):
print('------------Begin to get corpus-----------')
tweet = self.raw_data.tweet
corpus = list(tweet)
return corpus
def tokenize(self):
print('------------Begin to tokenize corpus--------------')
tokenized_corpus = []
for sentence in self.corpus:
tmp_tokens = tokenizer.tokenize(sentence)
lower_tokens = list(map(str.lower, tmp_tokens))
tokenized_sentence = list(map(stemmer.stem, lower_tokens))
tokenized_corpus.append(tokenized_sentence)
return tokenized_corpus
def get_vocabulary(self):
print('------------Begin to get vocabulary--------------')
vocabulary = []
for sentence in self.tokenized_corpus:
vocabulary += [token for token in sentence if token not in vocabulary]
return vocabulary
def get_task_data(train=True, task='a', word2idx=None):
"""
To get the data for train/test and task a/b/c
For training in task c, labels are one-hot encoded.
:param train: if True, training data, if False, testing data
:param task: 'a' / 'b' / 'c'
:param word2idx: The total vocabulary.
:return: traing: (tokenized corpus, train labels); test: (tokenized corpus, None)
"""
print('---------------Prepare data for task '+task+'---------------')
if train:
print('---------You are requiring train data!---------')
obj = DataHandle(word2idx=word2idx)
all_text = obj.tokenized_corpus
col = 'subtask_' + task
if task == 'a':
initial_labels = obj.raw_data[col].dropna().apply(lambda x: 1 if x == 'OFF' else 0)
elif task == 'b':
initial_labels = obj.raw_data[col].dropna().apply(lambda x: 1 if x == 'TIN' else 0)
else:
initial_labels = obj.raw_data[col].dropna().apply(lambda x: 0 if x == 'IND' else 1 if x == 'GRP' else 2)
text = list(np.array(all_text)[list(initial_labels.index)])
train_labels = list(initial_labels)
return text, train_labels
else:
print('---------You are requiring test data!---------')
if task == 'a':
test_path = 'OffensEval_task_data/Test A Release/testset-taska.tsv'
elif task == 'b':
test_path = 'OffensEval_task_data/Test B Release/testset-taskb.tsv'
else:
test_path = 'OffensEval_task_data/Test C Release/test_set_taskc.tsv'
obj = DataHandle(path=test_path, word2idx=word2idx)
all_text = obj.tokenized_corpus
return all_text, None
def onehot_encode(label):
new_label = [[1, 0, 0] if x == 0 else [0, 1, 0] if x == 1 else [0, 0, 1] for x in label]
return new_label
if __name__ == '__main__':
import json
word2idx, _ = get_word2idx()
print(len(word2idx))
with open('word2idx.json', 'w') as f:
json.dump(word2idx, f)
f.close()
# ex = DataHandle(word2idx=word2idx)
# print(len(ex.word2idx))
data, label = get_task_data(word2idx=word2idx, task='c', train=False)
print(data[20:25])
print(label)
# print(onehot_encode(label))
# print(ex.vocabulary)
# print(ex.word2idx)
| true |
2aa8c12348354a22902cc6f44b882ab4b4b40496 | Python | RyanSaxe/CubeCobraRecommender | /src/scripts/similarity.py | UTF-8 | 815 | 2.515625 | 3 | [] | no_license | import json
from tensorflow.keras.models import load_model
from tensorflow.keras.losses import CosineSimilarity
import sys
import numpy as np
args = sys.argv[1:]
name = args[0].replace('_',' ')
N = int(args[1])
int_to_card = json.load(open('ml_files/recommender_id_map.json','r'))
int_to_card = {int(k):v for k,v in int_to_card.items()}
card_to_int = {v:k for k,v in int_to_card.items()}
num_cards = len(int_to_card)
model = load_model('ml_files/high_req')
cards = np.zeros((num_cards,num_cards))
np.fill_diagonal(cards,1)
dist_f = CosineSimilarity()
embs = model.encoder(cards)
idx = card_to_int[name]
dists = np.array([
dist_f(embs[idx],x).numpy() for x in embs
])
ranked = dists.argsort()
for i in range(N):
card_idx = ranked[i]
print(str(i + 1) + ":",int_to_card[card_idx],dists[card_idx]) | true |
f792c5469bbf29d8b612d27abcb61f950ae9b35f | Python | LeVanTien126/test2 | /bai2.py | UTF-8 | 487 | 3.28125 | 3 | [] | no_license | import numpy as np
li = np.random.randint(-100, 100, size=100)
print('Original list', li)
#Q.a
posli = list(map(lambda x: x if x >=0 else -x,li))
print('Positive list',posli)
def is_prime(n):
if n<2:
return False
for i in range(2,n):
if n%i == 0:
return False
return True
#.b
primes = list(filter(is_prime,posli))
print('Primes:',primes)
#.c
for p in primes:
divisible = list(filter(lambda x: x % p == 0, posli))
print(p, ':', divisible) | true |
53567aaaad99386eaa664d4892db70567624da12 | Python | heenashree/HRCodes | /find-a-string.py | UTF-8 | 230 | 3.109375 | 3 | [] | no_license | stringA = input()
stringB = input()
X = len(stringA)
Y = len(stringB)
count=0
i=0
A1=0
while i < X+Y and A1>=0:
A1 = stringA.find(stringB,i)
i = A1+Y-1
#print(A1)
count=count+1
print(count-1)
| true |
c17d632f3e40cd7bbb653c1271d83833d56940c6 | Python | Maxnotwell/reinforcement-Learing | /A2C/Policy_cartpole.py | UTF-8 | 613 | 2.5625 | 3 | [] | no_license | import torch.nn as nn
import torch.nn.functional as F
class Policy(nn.Module):
def __init__(self):
super(Policy, self).__init__()
self.fc1 = nn.Linear(4, 128)
self.action_head = nn.Linear(128, 2)
self.value_head = nn.Linear(128, 1)
self.saved_actions = []
self.saved_rewards = []
def forward(self, input):
tmp = self.fc1(input)
tmp = F.relu(tmp)
action_scores = self.action_head(tmp)
action_scores = F.softmax(action_scores, dim=-1)
state_values = self.value_head(tmp)
return action_scores, state_values
| true |
fc8c51da61a82de3385151770a268e16c15256da | Python | YanMiaoW/python-tools | /postman2markdown.py | UTF-8 | 1,439 | 2.859375 | 3 | [] | no_license | import json
import sys
import os
if (len(sys.argv) < 2):
print("请输入文件路径")
else:
filePath = sys.argv[1]
if (not os.path.exists(filePath)):
print("文件不存在")
else:
data = None
with open(filePath) as f:
data = json.load(f)
md = ""
md += f"# {data['info']['name']}\n\n"
md += f"> {data['info']['description']}\n\n"
md += f"## 所有接口\n"
for item in data['item']:
if item['request']['method'] == 'GET':
md += f"### {item['request']['method']} {item['name']}\n"
md += f"```\n{item['request']['url']['raw']}\n```\n"
md += f"#### 说明\n"
md += f"{item['request']['description']}\n\n"
md += f"#### 参数列表\n"
md += "| 参数名 | 值 | 说明 |\n"
md += "| ----- | ----- | ------ |\n"
for query in item['request']['url']['query']:
value = query['value'] if 'value' in query else ""
description = query['description'] if 'description' in query else ""
md += f"| {query['key']}| {value}|{description}|\n"
else:
md += "post not support\n"
outName = sys.argv[2] if len(sys.argv) > 2 else f"{data['info']['name']}.md"
with open(outName, 'w') as f:
f.write(md)
| true |
68a18bd6c597876c313047891dee54d467fbf891 | Python | jmuth/ML_course | /labs/ex03/template/helpers_muth.py | UTF-8 | 1,593 | 3.15625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""a function of ploting figures."""
import numpy as np
def compute_loss(y, tx, w):
"""Calculate the loss.
You can calculate the loss using mse or mae.
"""
# ***************************************************
# INSERT YOUR CODE HERE
# TODO: compute loss by MSE / MAE
# ***************************************************
# vector e
e = compute_e(y, tx, w)
N = compute_N(e)
L_MSE = np.dot(np.matrix.transpose(e), e)
L_MSE = L_MSE / (2 * N)
return L_MSE
def compute_e(y, tx, w):
return (y - np.dot(tx,w))
def compute_N(e):
return e.shape[0]
def grid_search(y, tx, w0, w1):
"""Algorithm for grid search."""
losses = np.zeros((len(w0), len(w1)))
# ***************************************************
# INSERT YOUR CODE HERE
# TODO: compute loss for each combination of w0 and w1.
# ***************************************************
for i in range(len(w0)):
for j in range(len(w1)):
w = np.array([w0[i], w1[j]])
losses[i, j] = compute_cost(y, tx, w)
return losses
def compute_cost(y, tx, w):
"""calculate the cost.
you can calculate the cost by mse or mae.
"""
# ***************************************************
# INSERT YOUR CODE HERE
# TODO: compute loss by MSE / MAE
# ***************************************************
# vector e
e = compute_e(y, tx, w)
N = compute_N(e)
L_MSE = np.dot(np.matrix.transpose(e), e)
L_MSE = L_MSE / (2 * N)
return L_MSE | true |
86dff860653a140cadd6a79cd48755232cf6a9ac | Python | RoyMachineLearning/nltk-2 | /resources/notebook-source-files/nltk-session-3.py | UTF-8 | 15,452 | 3.484375 | 3 | [] | no_license | # <markdowncell>
# <br>
# <img style="float:left" src="http://ipython.org/_static/IPy_header.png" />
# <br>
# <headingcell level=1>
# Session 3: The Fraser Speech Corpus
# <markdowncell>
# **Welcome back!**
# So, what did we learn yesterday? A brief recap:
# * The **IPython** Notebook
# * **Python**: syntax, variables, functions, etc.
# * **NLTK**: manipulating linguistic data
# * **Corpus linguistic tasks**: tokenisation, keywords, collocation, stemming, concordances
# Today's focus will be on **developing more advanced NLTK skills** and using these skills to **investigate the Fraser Speeches Corpus**. In the final session, we will discuss **how to use what you have learned here in your own research**.
# *Any questions or anything before we dive in?*
# <headingcell level=2>
# Malcolm Fraser and his speeches
# <markdowncell>
# For much of this session, we are going to be working with a corpus of speeches made by Malcolm Fraser.
# <codecell>
# this code allows us to display images and webpages in our notebook
from IPython.display import display
from IPython.display import display_pretty, display_html, display_jpeg, display_png, display_svg
from IPython.display import Image
from IPython.display import HTML
import nltk
# <codecell>
Image(url='http://www.unimelb.edu.au/malcolmfraser/photographs/family/105~36fam6p9.jpg')
# <markdowncell>
# Because our project here is *corpus driven*, we don't necessarily need to know about Malcolm Fraser and his speeches in order to analyse the data: we may be happy to let things emerge from the data themselves. Even so, it's nice to know a bit about him.
# Malcolm Fraser was a member of Australian parliament between 1955 and 1983, holding the seat of Wannon in western Victoria. He held a number of ministries, including Education and Science, and Defence.
# He became leader of the Liberal Party in March 1975 and Prime Minister of Australia in December 1975, following the dismissal of the Whitlam government in November 1975.
# He retired from parliament following the defeat of the Liberal party at the 1983 election and in 2009 resigned from the Liberal party after becoming increasingly critical of some of its policies.
# He can now be found on Twitter as `@MalcolmFraser12`
# <codecell>
HTML('<iframe src=http://en.wikipedia.org/wiki/Malcolm_Fraser width=700 height=350></iframe>')
# <markdowncell>
# In 2004, Malcolm Fraser made the University of Melbourne the official custodian of his personal papers. The collection consists of a large number of photographs, speeches and personal papers, including Neville Fraser's WWI diaries and materials relating to CARE Australia, which Mr Fraser helped to found in 1987.
# <codecell>
HTML('<iframe src=http://www.unimelb.edu.au/malcolmfraser/ width=700 height=350></iframe>')
# <markdowncell>
# Every week, between 1954 until 1983, Malcolm Fraser made a talk to his electorate that was broadcast on Sunday evening on local radio.
# The speeches were transcribed years ago. *Optical Character Recognition* (OCR) was used to digitise the transcripts. This means that the texts are not of perfect quality.
# Some have been manually corrected, which has removed extraneous characters and mangled words, but even so there are still some quirks in the formatting.
# For much of this session, we are going to manipulate the corpus data, and use the data to restructure the corpus.
# <headingcell level=2>
# Cleaning the corpus
# <markdowncell>
# A common part of corpus building is corpus cleaning. Reasons for cleaning include:
# 1. Not break the code with unexpected input
# 2. Ensure that searches match as many examples as possible
# 3. Increasing readability, the accuracy of taggers, stemmers, parsers, etc.
# The level of kind of cleaning depends on your data and the aims of your project. In the case of very clean data (lucky you!), there may be little that needs to be done. With messy data, you may need to go as far as to correct variant spellings (online conversation, very old books).
# <headingcell level=3>
# Discussion
# <markdowncell>
# *What are the characteristics of clean and messy data? Any personal experiences?
# It will be important to bear these characteristics in mind once you start building your own datasets and corpora.
# <headingcell level=3>
# OK, let's code!
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <markdowncell>
# # Charting change in Fraser's speeches
# Before we get started, we have to install Java, as some of our tools rely on some Java code. You'll very likely have Java installed on your local machine, but we need it on the cloud:
# <codecell>
! yum -y install java
# ! pip install corpkit
# <markdowncell>
# And now, let's import the functions we need from `corpkit`:
# <codecell>
import corpkit
from corpkit import (
interrogator, plotter, table, quickview,
tally, surgeon, merger, conc, keywords,
collocates, quicktree, searchtree
)
from resources.scripts import plot
# <markdowncell>
# Here's an overview of each function's purpose:
# | **Function name** | Purpose | |
# | ----------------- | ---------------------------------- | |
# | *quicktree()* | draw a syntax tree | |
# | *searchtree()* | find things in a parse tree | |
# | *interrogator()* | interrogate parsed corpora | |
# | *plot()* | visualise *interrogator()* results | |
# | *quickview()* | view *interrogator()* results | |
# | *tally()* | get total frequencies for *interrogator()* results | |
# | *surgeon()* | edit *interrogator()* results | |
# | *merger()* | merge *interrogator()* results | |
# | *conc()* | complex concordancing of subcopora | |
# <headingcell level=3>
# Interrogating the Fraser corpus
# <markdowncell>
# To interrogate the corpus, we need a crash course in **syntax trees** and **Tregex queries**. Let's define a tree (from the Fraser Corpus, 1956), and have a look at its visual representation.
# Melbourne has been transformed over the let 18 months in preparation for the visitors.
# <codecell>
melbtree = (r'(ROOT (S (NP (NNP Melbourne)) (VP (VBZ has) (VP (VBN been) (VP (VBN transformed) '
r'(PP (IN over) (NP (NP (DT the) (VBN let) (CD 18) (NNS months)) (PP (IN in) (NP (NP (NN preparation)) '
r'(PP (IN for) (NP (DT the) (NNS visitors)))))))))) (. .)))')
# <markdowncell>
# Notice that an OCR error caused a parsing error. Oh well. Here's a visual representation, drawn with NLTK:
# <br>
# <img style="float:left" src="https://raw.githubusercontent.com/resbaz/nltk/master/resources/images/melbtree.png" />
# <br>
# <markdowncell>
# The data is annotated at word, phrase and clause level. Embedded here is an elaboration of the meanings of tags *(ask Daniel if you need some clarification!)*:
# <codecell>
HTML('<iframe src=http://www.surdeanu.info/mihai/teaching/ista555-fall13/readings/PennTreebankConstituents.html width=700 height=350></iframe>')
# <markdowncell>
# There are a number of different parsers, with some better than others:
# <codecell>
quicktree("Melbourne has been transformed over the let 18 months in preparation for the visitors")
# <markdowncell>
# Neither parse is perfect, but the one we just generated has a major flaw: *Melbourne* is parsed as an adverb! Stanford CoreNLP correctly identifies it as a proper noun, and also, did a better job of handling the 'let' mistake.
# <markdowncell>
# *searchtree()* is a tiny function that searches a syntax tree. We'll use the sample sentence and *searchtree()* to practice our Tregex queries. We can feed it either *tags* (S, NP, VBZ, DT, etc.) or *tokens* enclosed in forward slashes.
# <codecell>
# any plural noun
query = r'NNS'
searchtree(melbtree, query)
# <markdowncell>
# Here's some more documentation about Tregex queries:
# <codecell>
HTML('<iframe src=http://nlp.stanford.edu/~manning/courses/ling289/Tregex.html width=700 height=350></iframe>')
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <markdowncell>
# A very complicated example:
# <codecell>
# particle verb in verb phrase with np sister headed by Melb.
# the particle verb must also be in a verb phrase with a child preposition phrase
# and this child preposition phrase must be headed by the preposition 'over'.
query = r'VBN >> (VP $ (NP <<# /Melb.?/)) > (VP < (PP <<# (IN < /over/)))'
searchtree(melbtree, query)
# <markdowncell>
# Here are two more trees for you to query, from 1969 and 1973.
# We continue to place a high value on economic aid through the Colombo Plan, involving considerable aid to Asian students in Australia.
# <markdowncell>
# <br>
# <img style="float:left" src="https://raw.githubusercontent.com/resbaz/nltk/master/resources/images/colombotree.png" />
# <br>
# <codecell>
colombotree = ( r'(ROOT (S (NP (PRP We)) (VP (VBP continue) (S (VP (TO to) (VP (VB place) (NP (NP (DT a) (JJ high) '
r'(NN value)) (PP (IN on) (NP (JJ economic) (NN aid)))) (PP (IN through) (NP (DT the) (NNP Colombo) (NNP Plan))) '
r'(, ,) (S (VP (VBG involving) (NP (JJ considerable) (NN aid)) (PP (TO to) (NP (NP (JJ Asian) (NNS students)) '
r'(PP (IN in) (NP (NNP Australia))))))))))) (. .)))' )
# <markdowncell>
# As a result, wool industry and the research bodies are in a state of wonder and doubt about the future.
# <markdowncell>
# <br>
# <img style="float:left" src="https://raw.githubusercontent.com/resbaz/nltk/master/resources/images/wooltree.png" />
# <br>
# <codecell>
wooltree = ( r'(ROOT (S (PP (IN As) (NP (DT a) (NN result))) (, ,) (NP (NP (NN wool) (NN industry)) (CC and) '
r'(NP (DT the) (NN research) (NNS bodies))) (VP (VBP are) (PP (IN in) (NP (NP (DT a) (NN state)) '
r'(PP (IN of) (NP (NN wonder) (CC and) (NN doubt))))) (PP (IN about) (NP (DT the) (NN future)))) (. .)))' )
# <markdowncell>
# Try a few queries using `searchtree()` in the cells below.
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <markdowncell>
# # Some linguistics...
# <markdowncell>
# *Functional linguistics* is a research area concerned with how *realised language* (lexis and grammar) work to achieve meaningful social functions.
# One functional linguistic theory is *Systemic Functional Linguistics*, developed by Michael Halliday (Prof. Emeritus at University of Sydney).
# Central to the theory is a division between **experiential meanings** and **interpersonal meanings**.
# * Experiential meanings communicate what happened to whom, under what circumstances.
# * Interpersonal meanings negotiate identities and role relationships between speakers
# Halliday argues that these two kinds of meaning are realised **simultaneously** through different parts of English grammar.
# * Experiential meanings are made through **transitivity choices**.
# * Interpersonal meanings are made through **mood choices**
# Here's one visualisation of it. We're concerned with the two left-hand columns. Each level is an abstraction of the one below it.
# <br>
# <img style="float:left" src="https://raw.githubusercontent.com/resbaz/nltk/master/resources/images/egginsfixed.jpg" />
# <br>
# Transitivity choices include fitting together configurations of:
# * Participants (*a man, green bikes*)
# * Processes (*sleep, has always been, is considering*)
# * Circumstances (*on the weekend*, *in Australia*)
# Mood features of a language include:
# * Mood types (*declarative, interrogative, imperative*)
# * Modality (*would, can, might*)
# * Lexical density---the number of words per clause, the number of content to non-content words, etc.
# Lexical density is usually a good indicator of the general tone of texts. The language of academia, for example, often has a huge number of nouns to verbs. We can approximate an academic tone simply by making nominally dense clauses:
# The consideration of interest is the potential for a participant of a certain demographic to be in Group A or Group B.
# Notice how not only are there many nouns (*consideration*, *interest*, *potential*, etc.), but that the verbs are very simple (*is*, *to be*).
# In comparison, informal speech is characterised by smaller clauses, and thus more verbs.
# A: Did you feel like dropping by?
# B: I thought I did, but now I don't think I want to
# Here, we have only a few, simple nouns (*you*, *I*), with more expressive verbs (*feel*, *dropping by*, *think*, *want*)
# > **Note**: SFL argues that through *grammatical metaphor*, one linguistic feature can stand in for another. *Would you please shut the door?* is an interrogative, but it functions as a command. *invitation* is a nominalisation of a process, *invite*. We don't have time to deal with these kinds of realisations, unfortunately.
# With this in mind, let's search the corpus for *interpersonal* and *experiential* change in Fraser's language.
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <codecell>
#,,,
# <markdowncell>
# # Cheatsheet
# <markdowncell>
# ### Some possible queries:
# <codecell>
head_of_np = r'/NN.?/ >># NP'
processes = r'/VB.?/ >># VP >+(VP) VP'
proper_np = r'NP <# NNP' # use titlefilter!
open_classes = r'/\b(JJ|NN|VB|RB)+.?\b/'
closed_classes = r'/\b(DT|IN|CC|EX|W|MD|TO|PRP)+.?\b/'
clauses = r'/^(S|SBAR|SINV|SQ|SBARQ)$/'
firstperson = r'/PRP.?/ < /(?i)^(i|me|my)$/'
thirdperson = r'/PRP.?/ < /(?i)^(he|she|it|they|them|him|her)$/'
questions = r'ROOT <<- /.?\?.?/'
# <markdowncell>
# ### `plot()` arguments:
# <br>
#
# | plot() argument | Mandatory/default? | Use | Type |
# | :------|:------- |:-------------|:-----|
# | *title* | **mandatory** | A title for your plot | string |
# | *results* | **mandatory** | the results you want to plot | *interrogator()* total |
# | *fract_of* | None | results for plotting relative frequencies/ratios etc. | list (interrogator('c') form) |
# | *num_to_plot* | 7 | number of top results to display | integer |
# | *multiplier* | 100 | result * multiplier / total: use 1 for ratios | integer |
# | *x_label*, *y_label* | False | custom label for axes | string |
# | *yearspan* | False | plot a span of years | a list of two int years |
# | *justyears* | False | plot specific years | a list of int years |
# | *csvmake* | False | make csvmake the title of csv output file | string |
# <markdowncell>
# | true |
e710a52417e0e9900fec8572c1d5acd6db0c5f99 | Python | gabriellaec/desoft-analise-exercicios | /backup/user_360/ch22_2020_03_11_19_42_13_626189.py | UTF-8 | 139 | 3.171875 | 3 | [] | no_license | fuma = int(input("Quantos cigarros você fuma por dia?"))
anos = int(input("Há quantos anos você fuma?"))
print((fuma*10*anos*365)/1440) | true |
8afd5c05377c6d16df75714f704b13e6423776db | Python | ccpro/server.projects | /movie-collection/update_dates.py | UTF-8 | 887 | 2.6875 | 3 | [] | no_license | #!/usr/local/bin/python3.6
import psycopg2
psql_conn = psycopg2.connect(host="10.1.1.1", database="ccpro_noip_org", user="ccpro")
cur = psql_conn.cursor()
cur.execute("SELECT reference FROM movie_review")
rows = cur.fetchall()
cur.close();
data = []
if rows is not None and len(rows) > 0:
for row in rows:
data.append({'ref': row[0]})
cur = psql_conn.cursor()
for d in data:
cur.execute("select date(eventtime) from live_journal where event like '%" + d['ref']+ "%'")
rows = cur.fetchall()
if rows is not None and len(rows) > 0:
d['date'] = str(rows[0][0])
cur.close();
for d in data:
if 'date' in d:
cur = psql_conn.cursor()
sql = "update movie_review set date = '"+ d['date'] +"' where reference = '"+ d['ref']+"'"
print(sql)
cur.execute(sql)
psql_conn.commit()
cur.close()
#print(data) | true |
89f826d73930e2b94f6073d9acea9ef5b4eb1642 | Python | PROFX8008/Python-for-Geeks | /Chapter06/mypandas/advance/pandastrick2.py | UTF-8 | 550 | 3.171875 | 3 | [
"MIT"
] | permissive | # pandastrick2.py
import pandas as pd
weekly_data = {'day':['Monday','Tuesday', 'Wednesday', 'Thursday',
'Friday', 'Saturday', 'Sunday'],
'temp':[40, 33, 42, 31, 41, 40, 30],
'condition':['Sunny,','_Cloudy ','Sunny','Rainy',
'--Sunny.','Cloudy.','Rainy']
}
df = pd.DataFrame(weekly_data)
print(df)
df["condition"] = df["condition"].map(
lambda x: x.lstrip('_- ').rstrip(',. '))
df["temp_F"] = df["temp"].apply(lambda x: 9/5*x+32 )
print(df) | true |
5f397798faebe50badcb7d6b12ce7eef57fcc5c9 | Python | ranjitkumar518/AWS-boto3 | /db_available.py | UTF-8 | 938 | 2.578125 | 3 | [] | no_license | #!/usr/local/bin/python
import boto3
import sys
import os
import json
import time
def usage():
print("Usage: "+__file__+" cluster_identifier current_region eg: "+__file__+" database-jan10 us-west-2")
exit(1)
if len(sys.argv) != 2 :
usage()
sys.exit(0)
cluster_identifier = sys.argv[1]
current_region = sys.argv[2]
def db_available(cluster_identifier, current_region):
print "\n ###### Checking status of Database: "+cluster_identifier+" in "+current_region+" ######\n"
count = 0
while(1) :
client = boto3.client('rds', region_name=current_region)
response = client.describe_db_instances(DBInstanceIdentifier=cluster_identifier)
status = response['DBInstances'][0]['DBInstanceStatus']
if status == 'available':
print "DB status: "+cluster_identifier+" "+status+" "
break
count = count + 1
if count <= 20 :
time.sleep(10)
continue
break
return status
db_available(cluster_identifier, current_region)
| true |
1fee231f8f407c92d757ddb733e34064821b5ef0 | Python | CooperMetts/comp110-21f-workspace | /lessons/sum_test.py | UTF-8 | 548 | 4.03125 | 4 | [] | no_license | """Tests for the sum function."""
# this imports the function sum from sum module (or file) in the lessons folder
from lessons.sum import sum
def test_sum_empty() -> None:
# assert that something is true based on how you expect the function to behave
# this asserts that if you give the sum function an empty list, it will return a value of 0.0
assert sum([]) == 0.0
def test_sum_single_item() -> None:
assert sum([110.0])
def test_sum_many_items() -> None:
xs: list[float] = [1.0, 2.0, 3.0]
assert sum(xs) == 6.0
| true |
bf24ce8aa42db30dd0b3386c4552baa3b672f081 | Python | Marzona/gqrx-remote | /modules/disk_io.py | UTF-8 | 2,610 | 3.015625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
"""
Remote application that interacts with gqrx using rigctl protocol.
Gqrx partially implements rigctl since version 2.3.
Please refer to:
http://gqrx.dk/
http://gqrx.dk/doc/remote-control
http://sourceforge.net/apps/mediawiki/hamlib/index.php?title=Documentation
Author: Rafael Marmelo <rafael@defying.me>
License: MIT License
Copyright (c) 2014 Rafael Marmelo
"""
import csv
import logging
import os.path
from modules.exceptions import InvalidPathError
# logging configuration
logger = logging.getLogger(__name__)
class IO(object):
"""IO wrapper class
"""
def __init__(self):
self.row_list = []
def _path_check(self, csv_file):
"""Helper function that checks if the path is valid.
:param csv_file: path
:type csv_file: string
:raises InvalidPathError: if the path is invalid
:returns:none
"""
if not os.path.exists(csv_file):
logger.warning("Invalid path provided:{}".format(csv_file))
raise InvalidPathError
def csv_load(self, csv_file, delimiter):
"""Read the frequency bookmarks file and populate the tree.
:param csv_file: path of the file to be written
:type csv_file: string
:param delimiter: delimiter char
:type delimiter: string
:raises: csv.Error if the data to be written as csv isn't valid
:returns: none
"""
self._path_check(csv_file)
try:
with open(csv_file, 'r') as data_file:
reader = csv.reader(data_file, delimiter=delimiter)
for line in reader:
self.row_list.append(line)
except csv.Error:
logger.error("The file provided({})"\
" is not a file with values "\
"separated by {}.".format(csv_file, delimiter))
except (IOError, OSError):
logger.error("Error while trying to read the file: "\
"{}".format(csv_file))
def csv_save(self, csv_file, delimiter):
"""Save current frequencies to disk.
:param delimiter: delimiter char used in the csv
:type delimiter: string
:raises: IOError, OSError
"""
try:
with open(csv_file, 'w') as data_file:
writer = csv.writer(data_file, delimiter=delimiter)
for row in self.row_list:
writer.writerow(row)
except (IOError, OSError):
logger.error("Error while trying to write the file: "\
"{}".format(csv_file))
| true |
86c4b5d7b5acbeda1e9bf787018f7c4a53b31b4a | Python | mcoshiro/picohanabi | /hanabi.py | UTF-8 | 5,189 | 2.53125 | 3 | [] | no_license | #!/usr/bin/env python3
import socket
import os.path
import subprocess
#GUI via web server
hanabi_port = 125
debug_mode = False
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#server_socket.bind((socket.gethostname(),125)) <- use this (if ports are forwarded) to be viewable externally (not recommended, probably a security concern)
server_socket.bind(('localhost',hanabi_port))
print('Welcome to Picohanabi v 0.2.')
print('Connect by going to localhost:'+str(hanabi_port)+' on a web browser. Picohanabi supports only 1 connection at a time.')
if (debug_mode):
print('DEBUG: debug mode is ON')
server_socket.listen(1)
#main loop
file_to_read = 'example'
path_to_read = './'
event_to_read = '0'
while True:
#accept connections and respond- only 1 connection at a time
(client_socket, address) = server_socket.accept()
msg = b''
broken_connection = False
while True:
chunk = client_socket.recv(2048)
msg = msg + chunk
if chunk == b'':
if (debug_mode):
print('DEBUG: ',end='')
print(msg)
print("Error:connection broken")
broken_connection = True
break
if (msg[-4:]==b'\r\n\r\n'):
#end of message?
if (debug_mode):
print('DEBUG: ',end='')
print(msg)
break
if (not broken_connection):
#process browser request
valid_url = False
is_favicon = False
user_request = msg.decode('utf-8')
request_string = user_request.split()[1]
if (request_string == '/'):
path_to_read = './'
file_to_read = 'example'
event_to_read = '0'
valid_url = True
elif (request_string == '/favicon.ico'):
is_favicon = True
valid_url = True
elif (request_string.split('?')[0] == '/get_event'):
file_to_read = request_string.split('?')[1].split('&')[0].split('=')[1]
if (file_to_read[-5:] == '.root'):
file_to_read = file_to_read[:-5]
event_to_read = request_string.split('?')[1].split('&')[1].split('=')[1]
path_to_read = '/'.join(file_to_read.split('%2F')[:-1])
if (path_to_read == ''):
path_to_read = './'
file_to_read = file_to_read.split('%2F')[-1]
valid_url = True
elif (request_string == '/previous_event'):
event_to_read = str(int(event_to_read)-1)
valid_url = True
elif (request_string == '/next_event'):
event_to_read = str(int(event_to_read)+1)
valid_url = True
#check if file exists, generate if possible
if (file_to_read == 'example'):
event_to_read = '0'
if (not os.path.isfile('data/'+file_to_read+'_'+event_to_read+'.js')):
generate_display_return_value = subprocess.call(['bin/generate_display',path_to_read,file_to_read,event_to_read])
if (generate_display_return_value != 0):
#failed to generate
valid_url = False
#return appropriate webpage
if (valid_url):
if (is_favicon):
client_socket.send(b'')
#check if file exists
else:
html_file = open('site/index_1.html','r')
html_string = html_file.read()
#replace button text
html_string = html_string.replace('%file_to_read%',file_to_read+'.root')
html_string = html_string.replace('%event_to_read%',event_to_read)
html_file.close()
html_file = open('data/'+file_to_read+'_'+event_to_read+'.js','r')
html_string += html_file.read()
html_file.close()
html_file = open('site/index_2.html','r')
html_string += html_file.read()
html_file.close()
reply_string = 'HTTP/1.1 200 OK\r\nDate: Fri, 27 Mar 2020 23:29:05 GMT\r\nServer: hanabi (CERN CentOS 7)\r\nX-Frame-Options: SAMEORIGIN\r\nLast-Modified: Fri, 27 Mar 2020 23:29:05 GMT\r\nETag: "190056-1dd9-594e2e50e6980"\r\nAccept-Ranges: bytes\r\nContent-Length: ' + str(len(html_string)+425) + '\r\nConnection: close\r\nContent-Type: text/html; charset=UTF-8\r\n\r\n<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">\n'
client_socket.send(reply_string.encode('utf-8'))
client_socket.send(html_string.encode('utf-8'))
else:
#not found
client_socket.send(b'HTTP/1.1 200 OK\r\nDate: Fri, 27 Mar 2020 23:29:05 GMT\r\nServer: hanabi (CERN CentOS 7)\r\nX-Frame-Options: SAMEORIGIN\r\nLast-Modified: Fri, 27 Mar 2020 23:29:05 GMT\r\nETag: "190056-1dd9-594e2e50e6980"\r\nAccept-Ranges: bytes\r\nContent-Length: 7641\r\nConnection: close\r\nContent-Type: text/html; charset=UTF-8\r\n\r\n<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">\n')
html_file = open("site/404.html","r")
html_string = html_file.read()
client_socket.send(html_string.encode('utf-8'))
html_file.close()
| true |
556637e93f38eec4fb79c42507dd55ba481a40df | Python | vietxb0911/BigData-Lab2 | /code3.py | UTF-8 | 1,506 | 3.109375 | 3 | [] | no_license | from mrjob.job import MRJob
from mrjob.step import MRStep
import re
WORD_REGEX = re.compile(r"([a-zA-Z]+[-'][a-zA-Z]+)|([a-zA-z]+)")
class MaxFrequencyWord(MRJob):
def steps(self): # phương thức định nghĩa chương trình mapreduce này gồm 2 step như bên dưới
return [
MRStep(mapper=self.mapper_step1, reducer=self.reducer_step1), # step này để tính word_count
MRStep(mapper=self.mapper_step2, reducer=self.reducer_step2) # step này để tìm max_frequency_word
]
def mapper_step1(self, _, line):
for word in WORD_REGEX.findall(line):
yield(word[1].lower(), 1)
def reducer_step1(self, word, count):
yield(word, sum(count))
# Phương thức này khác với phương thức mapper ở trên là key ứng với None, còn value là cặp giá trị (word, word_count)
# Vì nếu key là None thì tất cả các value ở bước shuffle sẽ được gom thành một list
# và từ đó ta sẽ tìm ra max_frequency từ list đó
def mapper_step2(self, word, word_count):
yield(None, (word, word_count))
# Phương thức reduce này lấy ra cặp giá trị (word, word_count) có giá trị word_count lớn nhất
# Dùng hàm max() với key là phần tử thứ 2 trong cặp giá trị (word, word_count)
def reducer_step2(self, _, pairs):
yield(max(pairs, key=lambda x: x[1]))
if __name__ == "__main__":
MaxFrequencyWord().run() | true |
eaa2fad7d4ac28d2a4bac6e81e3801683ae5373b | Python | OmkarPawaskar/Currency-Exchange-App | /app/currency_exchange/currency_exchange.py | UTF-8 | 3,616 | 3.078125 | 3 | [
"Apache-2.0"
] | permissive | """
This is currency exchange module that contains the Currency Exchange class which calls ExchangeRate-APIs
"""
import requests
from app.logger import log
from app import config
class Currency_Exchange():
"""
This class is used to call ExchangeRate-APIs
"""
def __init__(self):
self.api_key = config.API_KEY
self.query_response_standard = {}
self.query_response_pair_conversion = "Please check if target currency code and Amount was entered "
self.query_response_pair_conversion_rate = "Please check if target currency code was entered"
self.query_response_enriched_rate = "Please check if target currency code was entered"
self.query_response_enriched = "Please check if target currency code was entered"
self.query_response_history = {}
# Call Supported Codes ExchangeRate-API endpoint.
self.supported_codes = self.__call_api('https://v6.exchangerate-api.com/v6/'+self.api_key+'/codes')
def execute_search(self, currency_code=str, target_currency_code=None, amount=None, year=None, month=None, day=None):
'''
This function takes in currency code,target currency code, year, month and day as user input and retrieves
information from apis accordingly
'''
log('Currency Exchange search : ')
# Call Standard ExchangeRate-API endpoint
query_response_standard = self.__call_api('https://v6.exchangerate-api.com/v6/'+self.api_key+'/latest/'+currency_code)
self.query_response_standard = query_response_standard.get('conversion_rates')
if target_currency_code is not None:
# Call Pair ExchangeRate-API endpoint.
query_response_pair_conversion_rate = self.__call_api('https://v6.exchangerate-api.com/v6/'+self.api_key+'/pair/'+currency_code+'/'+target_currency_code)
self.query_response_pair_conversion_rate = query_response_pair_conversion_rate.get('conversion_rate')
if amount is not None:
query_response_pair_conversion = self.__call_api('https://v6.exchangerate-api.com/v6/'+self.api_key+'/pair/'+currency_code+'/'+target_currency_code+'/'+amount)
self.query_response_pair_conversion = query_response_pair_conversion.get('conversion_result')
# Call Enriched ExchangeRate-API endpoint
query_response_enriched = self.__call_api('https://v6.exchangerate-api.com/v6/'+self.api_key+'/enriched/'+currency_code+'/'+target_currency_code)
self.query_response_enriched_rate = query_response_enriched.get('conversion_rate')
self.query_response_enriched = query_response_enriched.get("target_data")
if year and month and day is not None:
# Call Historical Data ExchangeRate-API endpoint
query_response_history = self.__call_api('https://v6.exchangerate-api.com/v6/'+self.api_key+'/history/'+currency_code+'/'+year+'/'+month+'/'+day)
self.query_response_history = query_response_history.get('conversion_rates')
return {'response' : 'Success'}
def __call_api(self, url) :
"""
This function is used to call get requests for different urls
url : str -> link to pass get requests.
"""
headers = {}
headers['Content-Type'] = 'application/json'
response = requests.get(url, headers= headers)
if response.json()['result']=="success" and response.status_code == 200:
return response.json()
else:
return {"error" : "Invalid argument. Please try again."}
| true |
384b6991e6072744e03ade60ef5aa985f693209e | Python | SkafteNicki/unsuper | /unsuper/data/mnist_data.py | UTF-8 | 8,863 | 2.59375 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 5 09:31:42 2018
@author: nsde
"""
#%%
from __future__ import print_function
import torch.utils.data as data
from PIL import Image
import os
import os.path
import gzip
import numpy as np
import torch
import codecs
import errno
import hashlib
from tqdm import tqdm
#%%
def gen_bar_updater(pbar):
def bar_update(count, block_size, total_size):
if pbar.total is None and total_size:
pbar.total = total_size
progress_bytes = count * block_size
pbar.update(progress_bytes - pbar.n)
return bar_update
#%%
def check_integrity(fpath, md5=None):
if md5 is None:
return True
if not os.path.isfile(fpath):
return False
md5o = hashlib.md5()
with open(fpath, 'rb') as f:
# read in 1MB chunks
for chunk in iter(lambda: f.read(1024 * 1024), b''):
md5o.update(chunk)
md5c = md5o.hexdigest()
if md5c != md5:
return False
return True
#%%
def makedir_exist_ok(dirpath):
"""
Python2 support for os.makedirs(.., exist_ok=True)
"""
try:
os.makedirs(dirpath)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
#%%
def download_url(url, root, filename, md5):
from six.moves import urllib
root = os.path.expanduser(root)
fpath = os.path.join(root, filename)
makedir_exist_ok(root)
# downloads file
if os.path.isfile(fpath) and check_integrity(fpath, md5):
print('Using downloaded and verified file: ' + fpath)
else:
try:
print('Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(
url, fpath,
reporthook=gen_bar_updater(tqdm(unit='B', unit_scale=True))
)
except:
if url[:5] == 'https':
url = url.replace('https:', 'http:')
print('Failed download. Trying https -> http instead.'
' Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(
url, fpath,
reporthook=gen_bar_updater(tqdm(unit='B', unit_scale=True)))
#%%
def get_int(b):
return int(codecs.encode(b, 'hex'), 16)
#%%
def read_image_file(path):
with open(path, 'rb') as f:
data = f.read()
assert get_int(data[:4]) == 2051
length = get_int(data[4:8])
num_rows = get_int(data[8:12])
num_cols = get_int(data[12:16])
parsed = np.frombuffer(data, dtype=np.uint8, offset=16)
return torch.from_numpy(parsed).view(length, num_rows, num_cols)
#%%
def read_label_file(path):
with open(path, 'rb') as f:
data = f.read()
assert get_int(data[:4]) == 2049
length = get_int(data[4:8])
parsed = np.frombuffer(data, dtype=np.uint8, offset=8)
return torch.from_numpy(parsed).view(length).long()
#%%
class MNIST(data.Dataset):
""" Specialized version of the torchvision.datasets.MNIST class that takes
one additional argument "classes". This is a list of the classes that
should be included in the dataset.
"""
urls = [
'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz',
]
training_file = 'training.pt'
test_file = 'test.pt'
def __init__(self, root, train=True, transform=None, target_transform=None,
download=False, classes=[0,1,2,3,4,5,6,7,8,9], num_points = 20000):
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.train = train # training set or test set
if download:
self.download()
if not self._check_exists():
raise RuntimeError('Dataset not found.' +
' You can use download=True to download it')
if self.train:
data_file = self.training_file
else:
data_file = self.test_file
self.data, self.targets = torch.load(os.path.join(self.processed_folder, data_file))
# Extract only the wanted classes
n = sum([self.targets==c for c in classes]).sum().item()
newdata = torch.zeros(n, *self.data.shape[1:], dtype=self.data.dtype)
newtargets = torch.zeros(n, dtype=self.targets.dtype)
for i, idx in enumerate(np.where(sum([self.targets==c for c in classes]).numpy())[0]):
newdata[i] = self.data[idx]
newtargets[i] = self.targets[idx]
self.data = newdata
self.targets = newtargets
# Get only the wanted number of points
newdata, newtargets = [ ], [ ]
counter = 10 * [0]
for x, y in zip(self.data, self.targets):
if counter[y] < num_points:
newdata.append(x)
newtargets.append(y)
counter[y] += 1
self.data = torch.stack(newdata, dim=0)
self.targets = torch.stack(newtargets, dim=0)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], int(self.targets[index])
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img.numpy(), mode='L')
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.data)
@property
def raw_folder(self):
return os.path.join(self.root, self.__class__.__name__, 'raw')
@property
def processed_folder(self):
return os.path.join(self.root, self.__class__.__name__, 'processed')
def _check_exists(self):
return os.path.exists(os.path.join(self.processed_folder, self.training_file)) and \
os.path.exists(os.path.join(self.processed_folder, self.test_file))
@staticmethod
def extract_gzip(gzip_path, remove_finished=False):
print('Extracting {}'.format(gzip_path))
with open(gzip_path.replace('.gz', ''), 'wb') as out_f, \
gzip.GzipFile(gzip_path) as zip_f:
out_f.write(zip_f.read())
if remove_finished:
os.unlink(gzip_path)
def download(self):
"""Download the MNIST data if it doesn't exist in processed_folder already."""
if self._check_exists():
return
makedir_exist_ok(self.raw_folder)
makedir_exist_ok(self.processed_folder)
# download files
for url in self.urls:
filename = url.rpartition('/')[2]
file_path = os.path.join(self.raw_folder, filename)
download_url(url, root=self.raw_folder, filename=filename, md5=None)
self.extract_gzip(gzip_path=file_path, remove_finished=True)
# process and save as torch files
print('Processing...')
training_set = (
read_image_file(os.path.join(self.raw_folder, 'train-images-idx3-ubyte')),
read_label_file(os.path.join(self.raw_folder, 'train-labels-idx1-ubyte'))
)
test_set = (
read_image_file(os.path.join(self.raw_folder, 't10k-images-idx3-ubyte')),
read_label_file(os.path.join(self.raw_folder, 't10k-labels-idx1-ubyte'))
)
with open(os.path.join(self.processed_folder, self.training_file), 'wb') as f:
torch.save(training_set, f)
with open(os.path.join(self.processed_folder, self.test_file), 'wb') as f:
torch.save(test_set, f)
print('Done!')
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
tmp = 'train' if self.train is True else 'test'
fmt_str += ' Split: {}\n'.format(tmp)
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
#%%
if __name__ == '__main__':
dataset = MNIST(root='', train=True, download=True, num_points=10, classes=[1]) | true |
41fdbc3446ef312a654eb3220265c6e03e663730 | Python | nkdevelopment122317/gset-smart-cars | /remove_unused_training_data.py | UTF-8 | 1,285 | 2.703125 | 3 | [] | no_license | import os
from time import sleep
img_dir_str = "C:\\Users\\micro\\GSET OpenCV Scripts\\gset_smart_cars_training_data\\images\\training_video_2\\"
ann_dir_str = "C:\\Users\\micro\\GSET OpenCV Scripts\\gset_smart_cars_training_data\\annotations\\training_video_2\\"
imgs = []
anns = []
img_dir = os.fsencode(img_dir_str)
ann_dir = os.fsencode(ann_dir_str)
def populate_file_list(dir, ending, list_to_populate):
for file in os.listdir(dir):
filename = os.fsencode(file)
if filename.endswith(b"." + str.encode(ending)):
print("[INFO] " + filename.decode("utf-8").replace("." + ending, ""))
list_to_populate.append(filename.decode("utf-8").replace("." + ending, ""))
else:
continue
populate_file_list(img_dir_str, "jpg", imgs)
sleep(1)
populate_file_list(ann_dir_str, "xml", anns)
print("[INFO] Done. Length of imgs list: " + str(len(imgs)))
print("[INFO] Done. Length of anns list: " + str(len(anns)))
imgs_to_delete = [i + ".jpg" for i in imgs if i not in anns]
sleep(1)
for img in imgs_to_delete:
os.remove(img_dir_str + img)
print("[INFO] Deleted image: " + img)
print("[INFO] Deleted unused images. Length of imgs_to_delete list: " + str(len(imgs_to_delete)))
| true |
15f13546ae672464567a94927bf0d25637585be9 | Python | heyfavour/code_sniippet | /crypto_demo.py | UTF-8 | 7,609 | 2.65625 | 3 | [] | no_license | """
1.电码本模式(Electronic Codebook Book(ECB))
明文消息被分成固定大小的块(分组),并且每个块被单独加密
2.密码分组链接模式(Cipher Block Chaining(CBC))
每一个分组要先和前一个分组加密后的数据进行XOR异或操作
不利于并行计算 误差传递 初始化向量IV
3.计算器模式(Counter (CTR))
4.密码反馈模式(Cipher FeedBack (CFB))
进阶版CBC
5.输出反馈模式(Output FeedBack (OFB))
进阶版CFB
padding
RSA:
pkcs1(最基本)----pkcs5(对密钥加密)----pkcs8(在以上基础上安全存储移植等)
证书:
pkcs7(基本语法)----pkcs12(安全传输)
"""
import threading
import base64
import random
from typing import Union
from cryptography.hazmat.primitives import padding
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives import hashes, asymmetric
from cryptography.exceptions import InvalidSignature
# len(IV) = 16
IV = "8b991e525526bc73"
PRI_KEY = """
-----BEGIN PRIVATE KEY-----
MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBAMlLYcRK6Q0XMszy
GbNyiuGWxrfcXPUhUc5caRjMQF4hSf2uWdJ91xS9j3h3kg4ciYw53IvOu2MXh0/p
ycs3eVcoOv829X577r1eTZR6Z+3PM21ZH5LfJtuYE9BUW0kqR8VGCU/UjzaoRIoj
zm9bdt1vsFuBrrYK2AcQQgHgsxGLAgMBAAECgYBIPm7DRWNpGFdaKNXCiqx/lF6T
pFoUfDXhC1eI192OKwJkMov4OMPVpMb2JGvd9q4DDs0xvCuSv+IHc0/CSJGabFrK
RBSQMgfnduLSytIzHvrdmq4YN0txglP2JWulT4WrS7j5RGCNOSc0LkBQDpz+4Q7v
Bvzl5GU2CANKpeBUWQJBAOOSU6/w1E8H2GMJF90RDiIRH0pGKUveyje0W0O4Utzf
HN6QRblaB2RXq2hcwPQug9mE1R6yGPo9aQj2GQfZ2Z8CQQDicLhW04KVj3Kozttw
XgDZM/lXvfFN2JNPkuwLJHjzZjX/1V4dfs7ADSiu7BbKqbCrA8PhqkoBtrQ347uO
r5iVAkB2hwIbgx2xQ+7KNjQ9qeJoj+5yKvTbVWCRftiB/wD5lSNeMFqAXYm4E4lt
Q9Ij3A5EPtEZub0UqOOKDVOgKTEVAkEAur9dt/XN70yTslaPMVfFeVxc2hkDRkFE
FE9GLlZRDeOQy0IL0WWAW3E+ySxaC5/w3MlJJfZL/KfSb3l4eE+nFQJAOPAV2MPR
CT2KPWFXUYwQV6tgPYSqBpTJp5Averfobc2LqNgCUGwghJaB2/76pQISkYD/Emvb
9PLmxpoxxzT+nQ==
-----END PRIVATE KEY-----
"""
PUB_KEY = """
-----BEGIN PUBLIC KEY-----
MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDJS2HESukNFzLM8hmzcorhlsa3
3Fz1IVHOXGkYzEBeIUn9rlnSfdcUvY94d5IOHImMOdyLzrtjF4dP6cnLN3lXKDr/
NvV+e+69Xk2UemftzzNtWR+S3ybbmBPQVFtJKkfFRglP1I82qESKI85vW3bdb7Bb
ga62CtgHEEIB4LMRiwIDAQAB
-----END PUBLIC KEY-----
"""
class Crypto(object):
# _instance_lock = threading.Lock()
#
# def __new__(cls, *args, **kwargs):
# if not hasattr(cls, "_instance"):
# with cls._instance_lock:
# if not hasattr(cls, "_instance"):
# cls._instance = super().__new__(cls)
# return cls._instance
def __init__(self,
PRI_KEY=None,
PUB_KEY=None,
rsa_crypt_padding_type="PKCS1V15",
rsa_sign_padding_type="PKCS1V15",
sign_hash=hashes.SHA1(),
):
self.CHARSET = 'utf-8'
self.iv = IV
self.PRI_KEY = serialization.load_pem_private_key(PRI_KEY.encode(), password=None, backend=default_backend())
self.PUB_KEY = serialization.load_pem_public_key(PUB_KEY.encode(), backend=default_backend())
"""
with open("f{pri_key_path}}", "rb") as pri_key:
self.PRI_KEY = serialization.load_pem_private_key(key_file.read(),password = None,backend = default_backend())
with open("f{pub_key_path}}", "rb") as pub_key:
self.PUB_KEY = serialization.load_pem_public_key(key_file.read(),backend = default_backend())
"""
self.rsa_crypt_padding_type = rsa_crypt_padding_type
self.rsa_sign_padding_type = rsa_sign_padding_type
self.sign_hash = sign_hash
@property
def rsa_crypt_padding_dict(self):
# PKCS1V15 = 固定位 + 随机数 + 明文消息
# OAEP = 原文Hash + 随机数 + 分隔符 + 原文 #PKCS1V20
_dict = {
"PKCS1V15": asymmetric.padding.PKCS1v15(),
"OAEP": asymmetric.padding.OAEP(
mgf=asymmetric.padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
),
}
return _dict
@property
def rsa_sign_padding_dict(self):
_dict = {
"PKCS1V15": asymmetric.padding.PKCS1v15(),
"PSS": asymmetric.padding.PSS(
mgf=asymmetric.padding.MGF1(hashes.SHA256()),
salt_length=asymmetric.padding.PSS.MAX_LENGTH
),
}
return _dict
def random_key(self):
key = "".join([str(random.randint(0, 9)) for _ in range(16)])
return key
def aes_encrypt(self, data: str, key: str) -> str:
# AES的要求的分块长度固定为128比特
padder = padding.PKCS7(algorithms.AES.block_size).padder()
padding_data = padder.update(data.encode(self.CHARSET)) + padder.finalize()
cipher = Cipher(algorithms.AES(key.encode()), modes.ECB(), backend=default_backend())
# cipher = Cipher(algorithms.AES(key.encode()), modes.CBC(self.iv.encode()), backend=default_backend())
encrypt_data = cipher.encryptor().update(padding_data)
# return base64.b64encode(encrypt_data)#->bytes
return str(base64.b64encode(encrypt_data), encoding=self.CHARSET)
def aes_decrypt(self, data: Union[bytes, str], key: str) -> str:
bytes_data = base64.b64decode(data)
cipher = Cipher(algorithms.AES(key.encode()), modes.ECB(), backend=default_backend())
# cipher = Cipher(algorithms.AES(key.encode()), modes.CBC(self.iv.encode()), backend=default_backend())
unpdding_data = cipher.decryptor().update(bytes_data)
unpadder = padding.PKCS7(algorithms.AES.block_size).unpadder()
decrypt_data = unpadder.update(unpdding_data) + unpadder.finalize()
return decrypt_data.decode(self.CHARSET)
@property
def _rsa_padding(self):
return self.rsa_crypt_padding_dict[self.rsa_crypt_padding_type]
def rsa_encrypt(self, aes_key: str):
encrypted_data = self.PUB_KEY.encrypt(aes_key.encode(), self._rsa_padding)
# RSA加密出来是base64decode,需要转码
return base64.b64encode(encrypted_data).decode()
def rsa_decrypt(self, encrypted_data: Union[bytes, str]):
if isinstance(encrypted_data, str): encrypted_data = base64.b64decode(encrypted_data)
decrypted_data = self.PRI_KEY.decrypt(encrypted_data, self._rsa_padding)
return decrypted_data.decode()
@property
def _rsa_sign_padding(self):
return self.rsa_sign_padding_dict[self.rsa_sign_padding_type]
def sign(self, data: Union[bytes, str]):
# 主流的RSA签名包括 RSA-PSS RSA-PKCS1v15
# PSS更安全
if isinstance(data, str): data = bytes(data, encoding=self.CHARSET)
signature = self.PRI_KEY.sign(data, self._rsa_sign_padding, self.sign_hash)
return base64.b64encode(signature).decode()
def verify(self, data: Union[bytes, str], signature: [bytes, str]):
if isinstance(data, str): data = bytes(data, encoding=self.CHARSET)
if isinstance(signature, str): signature = base64.b64decode(signature.encode())
try:
self.PUB_KEY.verify(signature, data, self._rsa_sign_padding, self.sign_hash)
return True
except InvalidSignature:
return False
def encrypt(self, data):
aes_key = self.random_key() # random_keu
aes_data = self.aes_encrypt(data, aes_key) # AES data->aes_data
# data = self.aes_decrypt(aes_data, aes_key)
rsa_key = self.rsa_encrypt(aes_key) # pub_prim SHA1withRSA aes_key->rsa_key
signature = self.sign(data)
return aes_data, rsa_key, signature
def decrypt(self, aes_data, rsa_key, signature):
key = self.rsa_decrypt(rsa_key)
data = self.aes_decrypt(aes_data, key)
verify = self.verify(data, signature)
return data, verify
#改善 多渠道时 渠道单例
class Channel_A(Crypto):
_instance_lock = threading.Lock()
def __new__(cls, *args, **kwargs):
if not hasattr(cls, "_instance"):
with cls._instance_lock:
if not hasattr(cls, "_instance"):
cls._instance = super().__new__(cls)
return cls._instance
def __init__(self):
super().__init__(PRI_KEY, PUB_KEY, "PKCS1V15", "PKCS1V15")
if __name__ == '__main__':
import json
data = json.dumps({"name": "test"})
crypt = Channel_A()
aes_data, rsa_key, signature = crypt.encrypt(data)
data, verify = crypt.decrypt(aes_data, rsa_key, signature)
print(data, verify)
| true |
a06ba8eeddf8a576ad5f41e4146fb595b735db64 | Python | gkarumbi/tech-pitch | /app/main/forms.py | UTF-8 | 809 | 2.5625 | 3 | [
"MIT"
] | permissive | from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField,SubmitField,SelectField
from wtforms.validators import Required
class PostPitch(FlaskForm):
#category = SelectField('Categories', choices = [Category.agritech, Category.cloud,Category.fintech,Category.aiml,Category.block,Category.robotics], default=1)
pitch = TextAreaField('Tell us your idea!')
submit = SubmitField('Submit')
class CommentForm(FlaskForm):
'''
A class to create a comment form using wtf forms
'''
comments = TextAreaField('Leave a comment!')
submit = SubmitField('Submit')
class CategoryForm(FlaskForm):
'''
A class to create categories using wtf forms
'''
name = StringField('category name', validators=[Required()])
submit = SubmitField('Create') | true |
c52a6b2725dde1ee4a0d20f6f6ee86cbc33689f7 | Python | ajolson89/SQLite-Database | /area.py | UTF-8 | 376 | 2.921875 | 3 | [] | no_license | import sqlite3
import pandas
conn = sqlite3.connect("factbook.db")
query = 'SELECT SUM(area_land) FROM facts WHERE area_land != "" ;'
query2 = 'SELECT SUM(area_water) FROM facts WHERE area_water != "" ;'
area_land = pandas.read_sql_query(query, conn)
area_water = pandas.read_sql_query(query2, conn)
print(area_land['SUM(area_land)'][0] / area_water['SUM(area_water)'][0]) | true |
acc4ad4e6ce122e879915af54cf1f0854db3bce7 | Python | aslomoi/compliance-trestle | /trestle/core/models/elements.py | UTF-8 | 10,693 | 2.59375 | 3 | [
"Apache-2.0"
] | permissive | # Copyright (c) 2020 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Element wrapper of an OSCAL model element."""
from typing import List
from pydantic import Field, create_model
from pydantic.error_wrappers import ValidationError
import trestle.core.utils as utils
from trestle.core.base_model import OscalBaseModel
from trestle.core.err import TrestleError
import yaml
class ElementPath:
"""Element path wrapper of an element.
This only allows a single wildcard '*' at the end to denote elements of an array of dict
"""
PATH_SEPARATOR: str = '.'
WILDCARD: str = '*'
def __init__(self, element_path: str, parent_path=None):
"""Initialize an element wrapper."""
self._path: List[str] = self._parse(element_path)
# Initialize variables for lazy processing and caching
# This will be processed and cached
self._element_name = None
self._parent_element_path = None
if isinstance(parent_path, str):
parent_path = ElementPath(parent_path)
self._parent_path = parent_path
def _parse(self, element_path) -> List[str]:
"""Parse the element path and validate."""
parts: List[str] = element_path.split(self.PATH_SEPARATOR)
for i, part in enumerate(parts):
if part == '':
raise TrestleError(
f'Invalid path "{element_path}" because having empty path parts between "{self.PATH_SEPARATOR}" \
or in the beginning'
)
elif part == self.WILDCARD and i != len(parts) - 1:
raise TrestleError(f'Invalid path. Wildcard "{self.WILDCARD}" can only be at the end')
if parts[-1] == self.WILDCARD and len(parts) == 1:
raise TrestleError(f'Invalid path {element_path}')
return parts
def get(self) -> List[str]:
"""Return the path components as a list."""
return self._path
def get_parent(self):
"""Return the parent path.
It can be None or ElementPath
"""
return self._parent_path
def get_first(self) -> str:
"""Return the first part of the path."""
return self._path[0]
def get_last(self) -> str:
"""Return the last part of the path."""
return self._path[-1]
def get_element_name(self):
"""Return the element name from the path."""
# if it is available then return otherwise compute
if self._element_name is None:
element_name = self.get_last()
if element_name == self.WILDCARD:
element_name = self._path[-2]
self._element_name = element_name
return self._element_name
def get_parent_path(self):
"""Return the path to the parent element."""
# if it is available then return otherwise compute
if self._parent_element_path is None:
if len(self._path) > 1:
parent_path_parts = self._path[:-1]
self._parent_element_path = ElementPath(self.PATH_SEPARATOR.join(parent_path_parts))
return self._parent_element_path
def __str__(self):
"""Return string representation of element path."""
return self.PATH_SEPARATOR.join(self._path)
def __eq__(self, other):
"""Override equality method."""
if not isinstance(other, ElementPath):
return False
return self.get() == other.get()
class Element:
"""Element wrapper of an OSCAL model."""
_allowed_sub_element_types = [OscalBaseModel.__class__, list.__class__, None.__class__]
def __init__(self, elem: OscalBaseModel):
"""Initialize an element wrapper."""
self._elem: OscalBaseModel = elem
def get(self) -> OscalBaseModel:
"""Return the model object."""
return self._elem
def get_at(self, element_path: ElementPath = None):
"""Get the element at the specified element path.
it will return the sub-model object at the path. Sub-model object
can be of type OscalBaseModel or List
"""
if element_path is None:
return self._elem
# TODO process element_path.get_parent()
# return the sub-element at the specified path
elm = self._elem
for attr in element_path.get():
# process for wildcard and array indexes
if attr == ElementPath.WILDCARD:
break
elif attr.isnumeric():
if isinstance(elm, list):
elm = elm[int(attr)]
else:
elm = None
break
else:
elm = getattr(elm, attr, None)
return elm
def get_parent(self, element_path: ElementPath):
"""Get the parent element of the element specified by the path."""
# get the parent element
parent_path = element_path.get_parent_path()
if parent_path is None:
parent_elm = self.get()
else:
parent_elm = self.get_at(parent_path)
return parent_elm
def _get_sub_element_obj(self, sub_element):
"""Convert sub element into allowed model obj."""
if not self.is_allowed_sub_element_type(sub_element):
raise TrestleError(
f'Sub element must be one of "{self.get_allowed_sub_element_types()}", found "{sub_element.__class__}"'
)
model_obj = sub_element
if isinstance(sub_element, Element):
model_obj = sub_element.get()
return model_obj
def set_at(self, element_path, sub_element):
"""Set a sub_element at the path in the current element.
Sub element can be Element, OscalBaseModel, list or None type
It returns the element itself so that chaining operation can be done such as
`element.set_at(path, sub-element).get()`.
"""
# convert the element_path to ElementPath if needed
if isinstance(element_path, str):
element_path = ElementPath(element_path)
# convert sub-element to OscalBaseModel if needed
model_obj = self._get_sub_element_obj(sub_element)
# TODO process element_path.get_parent()
# If wildcard is present, check the input type and determine the parent element
if element_path.get_last() == ElementPath.WILDCARD:
# validate the type is either list or OscalBaseModel
if not isinstance(model_obj, list) and not isinstance(model_obj, OscalBaseModel):
raise TrestleError(
f'The model object needs to be a List or OscalBaseModel for path with "{ElementPath.WILDCARD}"'
)
# since wildcard * is there, we need to go one level up for parent element
parent_elm = self.get_parent(element_path.get_parent_path())
else:
# get the parent element
parent_elm = self.get_parent(element_path)
if parent_elm is None:
raise TrestleError(f'Invalid sub element path {element_path} with no parent element')
# check if it can be a valid sub_element of the parent
sub_element_name = element_path.get_element_name()
if hasattr(parent_elm, sub_element_name) is False:
raise TrestleError(
f'Element "{parent_elm.__class__}" does not have the attribute "{sub_element_name}" \
of type "{model_obj.__class__}"'
)
# set the sub-element
try:
setattr(parent_elm, sub_element_name, model_obj)
except ValidationError:
sub_element_class = self.get_sub_element_class(parent_elm, sub_element_name)
raise TrestleError(
f'Validation error: {sub_element_name} is expected to be "{sub_element_class}", \
but found "{model_obj.__class__}"'
)
# returning self will allow to do 'chaining' of commands after set
return self
def to_yaml(self):
"""Convert into YAML string."""
wrapped_model = self.oscal_wrapper()
return yaml.dump(yaml.safe_load(wrapped_model.json(exclude_none=True, by_alias=True)))
def to_json(self):
"""Convert into JSON string."""
wrapped_model = self.oscal_wrapper()
json_data = wrapped_model.json(exclude_none=True, by_alias=True, indent=4)
return json_data
def oscal_wrapper(self):
"""Create OSCAL wrapper model for read and write."""
class_name = self._elem.__class__.__name__
# It would be nice to pass through the description but I can't seem to and
# it does not affect the output
dynamic_passer = {}
dynamic_passer[utils.class_to_oscal(class_name, 'field')] = (
self._elem.__class__,
Field(
self, title=utils.class_to_oscal(class_name, 'field'), alias=utils.class_to_oscal(class_name, 'json')
)
)
wrapper_model = create_model(class_name, __base__=OscalBaseModel, **dynamic_passer)
# Default behaviour is strange here.
wrapped_model = wrapper_model(**{utils.class_to_oscal(class_name, 'json'): self._elem})
return wrapped_model
@classmethod
def get_sub_element_class(cls, parent_elm: OscalBaseModel, sub_element_name: str):
"""Get the class of the sub-element."""
sub_element_class = parent_elm.__fields__.get(sub_element_name).outer_type_
return sub_element_class
@classmethod
def get_allowed_sub_element_types(cls) -> List[str]:
"""Get the list of allowed sub element types."""
return cls._allowed_sub_element_types.append(Element.__class__)
@classmethod
def is_allowed_sub_element_type(cls, elm) -> bool:
"""Check if is of allowed sub element type."""
if (isinstance(elm, Element) or isinstance(elm, OscalBaseModel) or isinstance(elm, list) or elm is None):
return True
return False
def __str__(self):
"""Return string representation of element."""
return type(self._elem).__name__
| true |
43c780a18eee13e9f9a2cadb044abbf8da0f4358 | Python | arifkhan1990/LeetCode-solution | /189-rotate-array/189-rotate-array.py | UTF-8 | 571 | 2.765625 | 3 | [] | no_license | class Solution:
def rotate(self, nums: List[int], k: int) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
s , e = 0 , len(nums)-1
k = k%(len(nums))
while s < e:
nums[s], nums[e] = nums[e], nums[s]
s, e = s+1, e-1
s,e = 0, k-1
while s < e:
nums[s], nums[e] = nums[e],nums[s]
s, e = s+1, e-1
s,e = k, len(nums)-1
while s < e:
nums[s], nums[e] = nums[e], nums[s]
s,e = s+1, e-1 | true |
ff5dffe54de7f36260980d9588c053366460c9f5 | Python | JoeFannie/Polynomial-Regression | /src/main.py | UTF-8 | 1,084 | 3.03125 | 3 | [] | no_license | #!/usr/bin/env python
import matplotlib.pyplot as plt
import math
import numpy as np
from Regression import Regression
def main():
reg = Regression()
reg.set_max_iter = 20000
reg.set_lr = 0.01
reg.set_l2_penalty = 0.002
reg.set_tolerance = 1e-5
deg=9
num_sample = 10
x = np.arange(0,1,1.0/num_sample).reshape(num_sample,1)
y_list = [math.sin(2*math.pi*e) for e in x] + np.random.normal(0,0.3,num_sample)
y = np.array(y_list).reshape(num_sample,1)
theta = np.zeros((deg+1,1))
theta, loss, repeat = reg.polynomial_fit(x,y,deg)
z = np.linspace(0,1,100)
prediction = reg.predict(z)
fig = plt.figure()
plt.plot(x,y,'o',label='Input data')
plt.plot(z,prediction,'r-',label='Prediction')
plt.plot(z,[math.sin(2*math.pi*e) for e in z], label='Sine Function')
pylab.xlim([0,1])
pylab.ylim([-1.5,1.5])
plt.legend(loc=3)
fig.suptitle('Polynomial Regression, N=10,Dgree=3,Lamda=0.002')
plt.xlabel('Input')
plt.ylabel('Output(prediction)')
plt.show()
if __name__ = '__main__':
main() | true |
bbfd9718682f73609d3b088504109749753236ef | Python | tanvir-tech/PythonPractice | /2_function/21_Basic_Functions/3_lambda_function.py | UTF-8 | 721 | 4.03125 | 4 | [] | no_license | # ( lambda *args : singleExpression ) (Actual_Inputs)
r=(lambda x:x*x)(5) # lambda called with 5
print("Simple lambda call => Square is =",r)
def lambda_Variable_Multiplier(n):
return lambda a:a*n # lambda is incomplete => (n=?)..............a is the Input
doubler = lambda_Variable_Multiplier(2) # (n=2) => completing lambda function in doubler variable
result = doubler(5) # doubler(5) => lambda(a) function called
print("Double is =",result)
tripler = lambda_Variable_Multiplier(3) # (n=3) => completing lambda function in tripler variable
result = tripler(5) # tripler(5) => lambda(a) function called
print("Triple is =",result)
| true |
c04d260c7ec183cca2050bccceff6a35b6c2c5b3 | Python | Blokyt/Python | /NSI/NSI LOGISIM Cours/NSI/IEEE 754 32bits invert.py | UTF-8 | 3,576 | 3 | 3 | [] | no_license | def CodeVirguleFlottante():
NbDecimal = float(input("\nNombre decimal : "))
if NbDecimal > 0:
BinSigne = "0"
elif NbDecimal < 0:
BinSigne = "1"
NbDecimal = -NbDecimal
else:
print("\nBinaire : 0")
return
NbBits=int(input("Nombre de bits 32/64 : "))
if NbBits == 32:
longExposant = 8
longMantisse = 23
facteurExposant = 127
elif NbBits == 64:
longExposant = 11
longMantisse = 52
facteurExposant = 1023
PartieEntiere = int(NbDecimal)
BinEntier = PartieEntiereToBin(PartieEntiere)
PartieDecimale = NbDecimal - int(NbDecimal)
BinDecimal = PartieDecimalToBin(PartieDecimale)
NbBinaire = BinEntier+","+BinDecimal
#Exposant
Exposant = CalcExposant(NbBinaire)
BinExposant = PartieEntiereToBin(Exposant+facteurExposant)
#compléte les zéros manquants
BinExposant = "0"*(longExposant-len(BinExposant))+BinExposant
#print("Exposant = "+str(Exposant))
#print("BinExposant : "+BinExposant)
#Mantisse
Mantisse = CalcMantisse(NbBinaire, longMantisse)
#Affichage des 32 bits
print("\nNombre Binaire IEEE754 : "+BinSigne, BinExposant, Mantisse)
CodeVirguleFlottante()
def CalcExposant(NbBinaire):
i = 0
if NbBinaire[0] == ",":
for bin in NbBinaire:
if bin == "1":
return i
i = i-1
else:
for bin in NbBinaire:
if bin == ",":
i = i-1
return i
i = i+1
def CalcMantisse(NbBinaire, longMantisse):
#conversion en liste
NbBinaire = list(NbBinaire)
#remplace les bits jusqu'au premier 1 sinificatif par ""
i = 0
for bin in NbBinaire:
if bin == "1":
NbBinaire[i] = ""
break
else:
NbBinaire[i] = ""
i = i+1
Mantisse = NbBinaire
#enlève la virgule
i = 0
for bin in Mantisse:
if bin == ",":
del Mantisse[i]
i = i+1
#enlève tous les ""
while "" in NbBinaire:
NbBinaire.remove("")
#supprime les bits qui depasse de la norme
while len(Mantisse) > longMantisse:
del Mantisse[len(Mantisse)-1]
#convertie la liste en string
Mantisse = "".join(Mantisse)
#compléte les zeros manquants
Mantisse = Mantisse+"0"*(longMantisse-len(Mantisse))
return Mantisse
def PartieDecimalToBin(Partie_Décimal):
#init var
Decimal_Bin = ""
Partie_Entière = 0
#i est le nombre de chiffre après la virgule
i = 0
while 0 < Partie_Décimal and not Partie_Décimal == 1 and i < 25:
i += 1
Partie_Décimal = Partie_Décimal - Partie_Entière
#print(Partie_Décimal)
Partie_Décimal = Partie_Décimal*2
#print(Partie_Décimal)
Partie_Entière = int(Partie_Décimal)
#print(Partie_Entière)
#input()
#print("\n")
Decimal_Bin = Decimal_Bin+str(Partie_Entière)
return Decimal_Bin
def PartieEntiereToBin(Partie_Entière):
Entier_Bin = ""
Quotient = Partie_Entière
while Quotient > 0:
Reste = Quotient % 2
Quotient = Quotient // 2
#print("\nQuotient : "+str(Quotient))
#print("Reste : "+str(Reste))
Entier_Bin = Entier_Bin+str(Reste)
#inverser la chaîne de caractère
Inversed_Entier_Bin = ""
i = len(Entier_Bin)
while i > 0 :
Inversed_Entier_Bin += list(Entier_Bin)[i-1]
i -= 1
return Inversed_Entier_Bin
CodeVirguleFlottante()
| true |
caa47d79b96322ff23ece103dac135decf47dc6e | Python | clhchtcjj/Algorithm | /Tree/leetcode 450 删除BST中的节点.py | UTF-8 | 1,538 | 3.734375 | 4 | [] | no_license | # -*- coding: utf-8 -*-
__author__ = 'CLH'
# 思路:将删除的节点的值,替换为左子树最大值,或右子树最大值
class Solution(object):
def deleteNode(self, root, key):
"""
:type root: TreeNode
:type key: int
:rtype: TreeNode
"""
if not root:
return root
return self._deleteNode(root,key)
def _deleteNode(self,node,key):
if node.val == key:
if not node.left and not node.right: # 叶子节点
node = None
elif not node.right: # 没有右子树
node = node.left
elif not node.left:
node = node.right
else:
# # 找到左子树的最大值
# tmp = node.left
# while tmp.right:
# tmp = tmp.right
# print tmp.val
# node.left = self._deleteNode(node.left,tmp.val)
# node.val = tmp.val
# 找到右子树的最小值
tmp = node.right
while tmp.left:
tmp = tmp.left
node.right = self._deleteNode(node.right,tmp.val)
node.val = tmp.val
else:
# 注意利用BST性质,剪枝
if node.val > key and node.left:
node.left = self._deleteNode(node.left,key)
if node.val < key and node.right:
node.right = self._deleteNode(node.right,key)
return node
| true |
86cbf3f6f01f09b2d92463f64d94128c28ca6591 | Python | siddhiparkar151992/Online-Book-Store | /bookstore/src/shipment/dao/ShipmentDao.py | UTF-8 | 2,026 | 2.578125 | 3 | [] | no_license | '''
Created on Mar 30, 2016
@author: Dell
'''
from bookstore.src.dao.DataAccessor import DataAccessor
import datetime
from datetime import timedelta
from bookstore.config import userid
class ShipmentDao(DataAccessor):
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
super(ShipmentDao,self).__init__()
self.address_id = 0
def add_user_addr(self, address):
try:
qry = ("insert into address(country, state, city, zipcode, street, building, room_no) "
"values('{}', '{}', '{}', {}, '{}', '{}', {})").format(address['country'], address['state'], address['city'], int(address['zipcode']), address['street'], address['building'], int(address['room_no']))
qry_c = """select LAST_INSERT_ID() as id from address"""
super(ShipmentDao,self).read(query= qry)
result= super(ShipmentDao,self).read(query= qry_c)
self.address_id = result[0]['id']
qry_update = ("""update customer set address = {}""").format(self.address_id)
super(ShipmentDao,self).read(query=qry_update)
return self.address_id
except Exception as e:
print("exception in address",e)
def add_user_shipment(self, type="Home delivery", promised_date=None, delivery_date = None):
dt=(datetime.datetime.utcnow() + timedelta(hours = 24))
delivery_date = promised_date = dt.strftime("%y-%m-%d %H:%M:%S")
query=("insert into shipment"
" (address_id, type, promised_date, delivery_date, user_id)"
" values({}, '{}', '{}', '{}', '{}')").format(self.address_id, type, delivery_date,promised_date, userid)
super(ShipmentDao,self).read(query=query)
query =("select LAST_INSERT_ID() as id from shipment where user_id = '{}'").format(userid)
result= super(ShipmentDao,self).read(query=query)
return [result[0]['id'], delivery_date]
| true |
5ab5c958853323b31806390b0b8ce0aeb254f258 | Python | SamBaRufus/aeios | /aeios/config.py | UTF-8 | 11,499 | 2.921875 | 3 | [
"MIT"
] | permissive | import os
import plistlib
# import filelock
import logging
import fcntl
import threading
import time
import xml.parsers.expat
"""
Persistant Configuration
"""
__author__ = 'Sam Forester'
__email__ = 'sam.forester@utah.edu'
__copyright__ = 'Copyright (c) 2019 University of Utah, Marriott Library'
__license__ = 'MIT'
__version__ = "1.3.1"
# suppress "No handlers could be found" message
logging.getLogger(__name__).addHandler(logging.NullHandler())
__all__ = [
'Manager',
'FileLock',
'TimeoutError',
'ConfigError'
]
class Error(Exception):
pass
class ConfigError(Error):
pass
class Missing(Error):
pass
class TimeoutError(Error):
"""
Raised when lock could not be acquired before timeout
"""
def __init__(self, lockfile):
self.file = lockfile
def __str__(self):
return "{0}: lock could not be acquired".format(self.file)
class ReturnProxy(object):
"""
Wrap the lock to make sure __enter__ is not called twice
when entering the with statement.
If we would simply return *self*, the lock would be acquired
again in the *__enter__* method of the BaseFileLock,
but not released again automatically.
(Not sure if this is pertinant, but it definitely breaks without it)
"""
def __init__(self, lock):
self.lock = lock
def __enter__(self):
return self.lock
def __exit__(self, exc_type, exc_value, traceback):
self.lock.release()
class FileLock(object):
"""
Unix filelocking
Adapted from py-filelock, by Benedikt Schmitt
https://github.com/benediktschmitt/py-filelock
"""
def __init__(self, file, timeout=-1):
self._file = file
self._fd = None
self._timeout = timeout
self._thread_lock = threading.Lock()
self._counter = 0
@property
def file(self):
"""
:returns: lockfile path
"""
return self._file
@property
def timeout(self):
"""
:returns: value (in seconds) of the timeout
"""
return self._timeout
@timeout.setter
def timeout(self, value):
"""
Seconds to wait before raising TimeoutError()
a negative timeout will disable the timeout
a timeout of 0 will allow for one attempt acquire the lock
"""
self._timeout = float(value)
@property
def locked(self):
"""
:returns: True, if the object holds the file lock, else False
"""
return self._fd is not None
def _acquire(self):
"""
Unix based locking using fcntl.flock(LOCK_EX | LOCK_NB)
"""
flags = os.O_RDWR | os.O_CREAT | os.O_TRUNC
fd = os.open(self._file, flags, 0644)
try:
fcntl.flock(fd, fcntl.LOCK_EX|fcntl.LOCK_NB)
self._fd = fd
except (IOError, OSError):
os.close(fd)
def _release(self):
"""
Unix based unlocking using fcntl.flock(LOCK_UN)
"""
fcntl.flock(self._fd, fcntl.LOCK_UN)
os.close(self._fd)
self._fd = None
def acquire(self, timeout=None, poll_intervall=0.05):
if not timeout:
timeout = self.timeout
with self._thread_lock:
self._counter += 1
start = time.time()
try:
while True:
with self._thread_lock:
if not self.locked:
self._acquire()
if self.locked:
break
elif timeout >= 0 and (time.time() - start) > timeout:
raise TimeoutError(self._file)
else:
time.sleep(poll_intervall)
except:
with self._thread_lock:
self._counter = max(0, self._counter-1)
raise
return ReturnProxy(lock=self)
def release(self, force=False):
"""
Release the lock.
Note, that the lock is only completly released, if the
lock counter is 0
lockfile is not automatically deleted.
:arg bool force:
If true, the lock counter is ignored and the lock is
released in every case.
"""
with self._thread_lock:
if self.locked:
self._counter -= 1
if self._counter == 0 or force:
self._release()
self._counter = 0
def __enter__(self):
self.acquire()
def __exit__(self, exc_type, exc_value, traceback):
self.release()
def __del__(self):
self.release(force=True)
class Manager(object):
"""
This class is meant to allow scripts to read and serialize
configuration files.
The configuration files themselves are modified via filelocking to
prevent them from being mangled when being accessed by multiple
scripts.
:param id: the configuration identifier
:type id: str
EXAMPLE:
conf = config.Manager("foo") # initializes the config manager
try:
settings = conf.read() # read the config file
except config.Error:
settings = {}
settings['foo'] = 'bar'
conf.write(settings) # serialize the modified settings
All serialization files will be written to:
/user/specified/directory (path specified at instantiation)
/Library/Management/Configuration
~/Library/Management/Configuration
"""
TMP = '/tmp/config'
def __init__(self, id, path=None, logger=None, **kwargs):
"""
Setup the configuration manager. Checks to make sure a
configuration directory exists (creates directory if not)
"""
if not logger:
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
self.log = logger
lockdir = self.__class__.TMP
if not os.path.exists(lockdir):
os.mkdir(lockdir)
management = 'Library/Management/Configuration'
homefolder = os.path.expanduser('~')
directories = [os.path.join('/', management),
os.path.join(homefolder, management)]
if path:
if os.path.isfile(path):
raise TypeError("not a directory: {0}".format(path))
try:
dir = check_and_create_directories([path])
except ConfigError as e:
if os.path.isdir(path) and os.access(path, os.R_OK):
dir = path
else:
raise e
else:
# create the config directory if it doesn't exist
dir = check_and_create_directories(directories)
self.file = os.path.join(dir, "{0}.plist".format(id))
## create a lockfile to block race conditions
self.lockfile = "{0}/{1}.lockfile".format(lockdir, id)
# self.lock = filelock.FileLock(self.lockfile, **kwargs)
self.lock = FileLock(self.lockfile, **kwargs)
def write(self, data):
"""
Serializes specified settings to file
"""
with self.lock.acquire():
plistlib.writePlist(data, self.file)
def read(self):
"""
:returns: data structure (list|dict) as read from disk
:raises: ConfigError if unable to read
"""
if not os.path.exists(self.file):
raise Missing("file missing: {0}".format(self.file))
try:
with self.lock.acquire():
return plistlib.readPlist(self.file)
except xml.parsers.expat.ExpatError:
raise ConfigError("corrupted plist: {0}".format(self.file))
# TYPE SPECIFIC FUNCTIONS
def get(self, key, default=None):
with self.lock.acquire():
data = self.read()
return data.get(key, default)
def update(self, value):
"""
read data from file, update data, and write back to file
"""
with self.lock.acquire():
data = self.read()
data.update(value)
self.write(data)
return data
def delete(self, key):
"""
read data from file, update data, and write back to file
"""
with self.lock.acquire():
data = self.read()
v = data.pop(key)
self.write(data)
return v
def deletekeys(self, keys):
"""
remove specified keys from file (if they exist)
returns old values as dictionary
"""
with self.lock.acquire():
data = self.read()
_old = {}
for key in keys:
try:
_old[key] = data.pop(key)
except KeyError:
pass
self.write(data)
return _old
# EXPERIMENTAL
def reset(self, key, value):
"""
this is poor design, but I'm going to leave it for now
overwrites existing key with value
returns previous value
"""
with self.lock.acquire():
data = self.read()
previous = data[key]
data[key] = value
self.write(data)
return previous
def append(self, value):
with self.lock.acquire():
data = self.read()
data.append(value)
self.write(data)
return data
def remove(self, key, value=None):
with self.lock.acquire():
data = self.read()
if value:
if isinstance(data[key], list):
data[key].remove(value)
elif isinstance(data[key], dict):
data[key].pop(value)
elif value is None:
del(data[key])
else:
if isinstance(data, list):
data.remove(value)
elif isinstance(data, dict):
data.pop(value)
self.write(data)
def add(self, key, value):
with self.lock.acquire():
data = self.read()
try:
for i in value:
if i not in data[key]:
data[key].append(i)
# TO-DO: Is there a reason I'm catching KeyError specifically?
except:
data[key].append(value)
self.write(data)
def setdefault(self, key, default=None):
with self.lock.acquire():
data = self.read()
try:
return data[key]
except KeyError:
data[key] = default
if default is not None:
self.write(data)
return default
def check_and_create_directories(dirs, mode=0755):
"""
checks list of directories to see what would be a suitable place
to write the configuration file
"""
for path in dirs:
try:
os.makedirs(path, mode)
return path
except OSError as e:
if e.errno == 17 and os.access(path, os.W_OK):
# directory already exists and is writable
return path
## exhausted all options
raise ConfigError("no suitable directory was found for config")
if __name__ == '__main__':
pass
| true |