blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
c1b5d507c0b0ac0f58dd8fdf7fbc24cb3edb7b01 | Python | meads58/python | /LearningResources/LearnPythonTheHardWay/Exercises/Exercise_1/ex_1.py | UTF-8 | 153 | 3.796875 | 4 | [] | no_license | #exercise 1 printing
print 'this is some text '
print "using double quites"
print 'seeing what "this" does '
print "will this do 'something' different" | true |
6ca923f73c80e0d27735b8adcd903ea3cfaf5afb | Python | Jlobblet/apollo | /utils/utils.py | UTF-8 | 797 | 2.625 | 3 | [
"MIT"
] | permissive | from decimal import Decimal, InvalidOperation
from typing import Iterable
from config import CONFIG
def user_is_irc_bot(ctx):
return ctx.author.id == CONFIG.UWCS_DISCORD_BRIDGE_BOT_ID
def get_name_string(message):
# if message.clean_content.startswith("**<"): <-- FOR TESTING
if user_is_irc_bot(message):
return message.clean_content.split(" ")[0][3:-3]
else:
return f"{message.author.mention}"
def is_decimal(num):
try:
Decimal(num)
return True
except (InvalidOperation, TypeError):
return False
def pluralise(l, word, single="", plural="s"):
if len(l) > 1:
return word + plural
else:
return word + single
def filter_out_none(iterable: Iterable):
return [i for i in iterable if i is not None]
| true |
f01b60fb476cdbd81578f3f8ab6ba8ac574ebe07 | Python | maojingyi/PETCTomics | /aglio_y_olio/machine_specific_paths.py | UTF-8 | 2,039 | 2.625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 1 22:34:00 2015
@author: rharnish
"""
import os
import socket
#%%
class MachineSpecificPaths:
def __init__(self,*args):
self._db_dir = None
self._test_dir = None
self._data_dir = None
self._incoming_dir = None
self.determine_paths_for_machine()
# ---------------------------------------------------------------------
# Properties
# ---------------------------------------------------------------------
@property
def db_dir(self):
return self._db_dir
@property
def test_dir(self):
return self._test_dir
@property
def data_dir(self):
return self._data_dir
@property
def incoming_dir(self):
return self._incoming_dir
def determine_paths_for_machine(self):
hostName = socket.gethostname()
print 'host: {}'.format(hostName)
# Roy's Mac
RoysMac = 'cbl-mbp-3369'
if hostName == RoysMac:
print 'RoysMac'
self._test_dir = os.path.join('/Users/rharnish/Projects/Franc/PETCT','TEST')
self._db_dir = '/data/db'
self._data_dir = '/Volumes/Untitled 1/data'
self._incoming_dir = '/Volumes/Untitled 1/data'
# Lassen
# Lassen = 'lassen.radiology.ucsf.edu'
# if hostName == Lassen:
if hostName.endswith('radiology.ucsf.edu'):
print '{} -- using RRCS linux paths'.format(hostName)
self._test_dir = os.path.join('/data/francgrp1/PETCT','TEST')
self._db_dir = '/data/francgrp1/PETCT/DB'
# self._data_dir = '/data/francgrp1/incoming_breast_v2_processing'
self._data_dir = '/data/francgrp1/breast_radiomics/her2/PETCT'
self._incoming_dir = '/data/francgrp1/incoming'
#%%
if __name__ == "__main__":
paths = MachineSpecificPaths()
| true |
7b73fd5b0e44f9176e8900e32d12d1305ecc5edf | Python | mukund-23/Programming-in-Python | /re1.py | UTF-8 | 257 | 3.109375 | 3 | [] | no_license |
import re
xx = "guru99,education is fun"
r1 = re.findall(r"^\w+",xx)
print (r1)
phone = "2004-959-559 # This is Phone Number"
num = re.sub(r'#.*$', "", phone)
print ("Phone Num : ", num)
num = re.sub(r'\D', "", phone)
print ("Phone Num : ", num)
| true |
731d9eb85af1f1269765492dc6da4ca7db78cf61 | Python | DemocracyClub/EveryElection | /every_election/apps/organisations/management/commands/copy_divisions.py | UTF-8 | 2,235 | 2.75 | 3 | [] | permissive | from django.core.management.base import BaseCommand
from django.db import transaction
from organisations.models import OrganisationDivision, OrganisationDivisionSet
class Command(BaseCommand):
help = "Copy all of the division and geography objects from one DivisionSet to another"
def add_arguments(self, parser):
parser.add_argument(
"src_id", action="store", help="PK for the source DivisionSet"
)
parser.add_argument(
"dst_id", action="store", help="PK for the destination DivisionSet"
)
@transaction.atomic
def copy_divsions(self, old_divset_id, new_divset_id):
try:
old_divset = OrganisationDivisionSet.objects.get(pk=old_divset_id)
except OrganisationDivisionSet.DoesNotExist:
raise Exception("Invalid Source DivisionSet")
try:
new_divset = OrganisationDivisionSet.objects.get(pk=new_divset_id)
except OrganisationDivisionSet.DoesNotExist:
raise Exception("Invalid Destination DivisionSet")
if len(new_divset.divisions.all()) > 0:
raise Exception("Target DivisionSet must be empty")
self.stdout.write(
f"Copying all divisions from {str(old_divset)} to {str(new_divset)}..."
)
# copy the divisions
for div in old_divset.divisions.all():
div.pk = None
div.divisionset = new_divset
div.save()
# copy the geographies
geographies = [
(div.official_identifier, div.geography)
for div in old_divset.divisions.all()
]
for gss, geog in geographies:
div = OrganisationDivision.objects.get(
official_identifier=gss, divisionset=new_divset
)
geog.pk = None
geog.division_id = div.id
geog.save()
# attach it to the target division
div.geography = geog
div.save()
assert len(old_divset.divisions.all()) == len(
new_divset.divisions.all()
)
self.stdout.write("...done!")
def handle(self, *args, **options):
self.copy_divsions(options["src_id"], options["dst_id"])
| true |
a2840cc3b6e0c40a33983ad9b4f528fdffa53e76 | Python | mousepad01/RSA-python-implementation | /rsa_oaep_encryption.py | UTF-8 | 2,634 | 2.890625 | 3 | [] | no_license | import time
import sys
import random
from oaep_prototype import oaep_encode
from MGF1_prototype import sha256
sys.setrecursionlimit(100000)
def lenght(x):
count = 0
if x == 0:
return 1
while x:
count += 1
x //= 10
return count
def logpow(exp, base, mod):
base %= mod
if exp == 0:
return 1
if exp == 1:
return base % mod
if exp & 1 == 0:
return logpow(exp // 2, base ** 2, mod) % mod
if exp & 1 == 1:
return (base * logpow(exp // 2, base ** 2, mod) % mod) % mod
print('reading message...')
msgfile = open('mfile.txt')
message = msgfile.read()
lm = len(message)
pubk = open("public_key.txt")
privk = open("private_keys.txt")
auxp = privk.readline()
auxq = privk.readline()
auxd = privk.readline()
d = int(auxd[:len(auxd)])
t = time.time()
auxn = pubk.readline()
n = int(auxn[:len(auxn) - 1])
auxe = pubk.readline()
e = int(auxe[:len(auxe)])
# digital signature generator
message_hash = sha256(message)
signature = logpow(d, message_hash, n)
# ---------------------------------
converted_message = ''
for i in range(lm):
if 0 <= ord(message[i]) <= 9:
converted_message = converted_message + '00'
if 10 <= ord(message[i]) <= 99:
converted_message = converted_message + '0'
converted_message = converted_message + str(ord(message[i]))
message_packages = []
lmessage = len(converted_message)
nlenght = lenght(n)
lenpackage = random.randint(20, 30)
if lenpackage >= lmessage:
message_packages.append(converted_message)
else:
for i in range(0, lmessage, lenpackage):
message_packages.append(converted_message[i:min(lmessage, i + lenpackage)])
npackages = len(message_packages) - 1
# pentru padding
for i in range(len(message_packages)):
message_packages[i] = oaep_encode(message_packages[i], 110)
# ----------------------------------------
crypted_packages = []
for i in range(npackages + 1):
crypted_packages.append(logpow(e, message_packages[i], n) % n)
crypted_file = open("cfile.txt", 'w')
crypted_file.write(str(npackages))
crypted_file.write('\n')
for i in range(npackages + 1):
crypted_file.write(str(crypted_packages[i]))
crypted_file.write('\n')
print('message encrypted ---> found in cfile.txt starting with line 1 (', time.time() - t, ' seconds )')
crypted_file.write(str(signature))
print('message signed ---> signature found in last line of cfile.txt')
input('done. Press any KEY to continue')
| true |
3425696c1e67135ac79b19088adf100cfbc36015 | Python | ballcarsen/ML-Project-3 | /Project3/src/EvoStrategy.py | UTF-8 | 2,166 | 3.1875 | 3 | [] | no_license | from src.GeneticAlg import GeneticAlg
from src.Tester import Tester
import random
import math
#Evolutionary Strategy
class EvoStrat(GeneticAlg):
#Chagnges sigma
def updateVar(self, length, sigma):
u = random.uniform(0,sigma)
if length == 0:
length = 1
s = sigma * math.exp(u/math.sqrt(length))
return s
#muttation
def gaussMuatate(self, child):
for i in range(len(child) - 1):
for k in range(len(child[i])):
for j in range(len(child[i][k].weights) - 1):
l = child[i][j].weights
sigma = self.var(l)
child[i][k].weights[j] = child[i][k].weights[j] + self.updateVar(len(l),sigma)
#get variance of the weights
def var(self, l):
T1 = Tester(l)
return math.sqrt(T1.get_variance())
#train the networks
def train(self, maxIterations):
# for each generation
genCount = 0
while(genCount <= maxIterations):
self.children = [] # reset children array
genCount += 1 # increment generation count
parents = self.select() # select parents using tournament selection
# populate children via crossover
for i in range(len(parents)): # for every parent
# by steps of two
if (i % 2 == 0):
# if we have reached the end of the array, just select last element
if (i + 2 > len(parents)):
self.children.append(parents[i])
# otherwise cross parents and add children to children array
else:
childArr = self.crossover(parents[i],parents[i+1])
for child in childArr:
self.children.append(child)
# mutate children
for child in self.children:
child = self.gaussMuatate(child)
# replace members of self.population with self.children if the children are more fit
self.replaceAll()
print(self.evalFitness(self.getBestIndiv()), "performance") | true |
d7ed4b2ef0933c64868a3317dab4b78dbe5dea66 | Python | ranigera/HAS_ReinforcementSchedule | /create_subject_keycodes_and_manifests.py | UTF-8 | 4,058 | 2.515625 | 3 | [] | no_license | import os
from shutil import copyfile
import csv
import json
import time
import string
import random
mainAdress = 'https://experiments.schonberglab.org/static/rani/Space_Gold_App_RS/'
commonStartAdress = mainAdress + 'index.html?subId='
commonIconsAdress = mainAdress + 'icons/'
# FUNCTIONS:
def get_random_string(length):
letters = string.ascii_letters + string.digits
result_str = ''.join(random.choice(letters) for i in range(length))
return result_str
def createSubNumDict(ranges=[(1101, 1200), (1201, 1300), (1501, 1600), (1601, 1700)], key_code_length=20):
sub_key_dict = {}
for i in ranges:
for j in range(i[0], i[1]):
#the logic here below is to prevent the last 3 characters of being the same.
get_code = True
while get_code:
new_key = get_random_string(key_code_length)
get_code = False
for key in sub_key_dict.keys():
if new_key[-3:].lower() == key[-3:].lower():
get_code = True
break
sub_key_dict[new_key] = j
return sub_key_dict
# MANIFEST TEMPLATE:
myDynamicManifest = {
"name": "Space Gold",
"short_name": "Space Gold",
"start_url": "",
"display": "standalone",
# "orientation": "portrait",
"background_color": "#666666ff",
"theme_color": "#000000",
"icons": [
{
"src": "android-icon-36x36.png",
"sizes": "36x36",
"type": "image/png",
"density": "0.75"
},
{
"src": "android-icon-48x48.png",
"sizes": "48x48",
"type": "image/png",
"density": "1.0"
},
{
"src": "android-icon-72x72.png",
"sizes": "72x72",
"type": "image/png",
"density": "1.5"
},
{
"src": "android-icon-96x96.png",
"sizes": "96x96",
"type": "image/png",
"density": "2.0"
},
{
"src": "android-icon-144x144.png",
"sizes": "144x144",
"type": "image/png",
"density": "3.0"
},
{
"src": "android-icon-192x192.png",
"sizes": "192x192",
"type": "image/png",
"density": "4.0"
},
{
"src": "android-icon-512x512.png",
"sizes": "512x512",
"type": "image/png",
"density": "1.0"
}
]
}
# set the icons full path:
for icon in myDynamicManifest['icons']:
icon['src'] = commonIconsAdress + icon['src']
# RUN THE CODE:
sub_key_dict = createSubNumDict()
if not os.path.exists('./mapping_key_to_subId.js'):
# create the js file:
with open('mapping_key_to_subId.js', 'w') as f:
f.write('var key2subId_mapping = ')
json.dump(sub_key_dict, f, indent=4)
print('The file mapping_key_to_subId.js was saved')
# backup a copy with a timestamp:
copyfile('mapping_key_to_subId.js', 'backup/mapping_key_to_subId' + str(time.time()) + '.js')
# saving a csv file with url's:
with open('mapping_key_to_subId.csv', 'w', newline='') as file:
writer = csv.writer(file)
writer.writerow(["Sub_ID", "URL", "key_code"])
for key, val in sub_key_dict.items():
writer.writerow([val, commonStartAdress + key, key])
print('The file mapping_key_to_subId.csv was saved')
# backup a copy with a timestamp:
copyfile('mapping_key_to_subId.csv', 'backup/mapping_key_to_subId.' + str(time.time()) + '.csv')
# creating manifest files:
if not os.path.exists('manifests'):
os.makedirs('manifests')
for key, val in sub_key_dict.items():
myDynamicManifest["start_url"] = commonStartAdress + key
with open('manifests/manifest_' + key + '.json', 'w') as f:
json.dump(myDynamicManifest, f, indent=4)
print('The manifest files were saved')
else:
print('STOPPING! *** The files already exists ***')
| true |
52c9eac6c591d9f45e29be6443b0fced5c12febb | Python | doungin/python | /三角形.py | UTF-8 | 355 | 3.703125 | 4 | [] | no_license |
# a=input("请输入数字")
# b=a.split(',')
# b.sort()
# c=int(b[0])
# d=int(b[1])
# f=int(b[2])
# if c+d>f:
# if c**2+d**2>f**2:
# print('钝角三角形')
# elif c**2+d**2<f**2:
# print('锐角三角形')
# elif c**2+d**2==f**2:
# print('直角三角形')
# else:
# print('不是三角形')
| true |
bcb57b9c8158f117c3a5afa0ef8783bb37741777 | Python | kongfuchen/rpi_github | /UploadTmp.py | UTF-8 | 871 | 2.59375 | 3 | [] | no_license | import urllib2
import json
import time
import datetime
APIKEY = '你的APIKey'
def http_put():
file = open("/home/pi/dht11/tmp_data.txt")
temperature = float(file.read())
CurTime = datetime.datetime.now()
url = 'http://api.heclouds.com/devices/你的设备ID/datapoints'
values = {'datastreams': [{"id": "temp", "datapoints": [{"at": CurTime.isoformat(), "value": temperature}]}]}
print("the time is: %s" % CurTime.isoformat())
print("The upload temperature value is: %.3f" % temperature)
jdata = json.dumps(values)
print(jdata)
request = urllib2.Request(url, jdata)
request.add_header('api-key', APIKEY)
request.get_method = lambda: 'POST'
request = urllib2.urlopen(request)
return request.read()
while True:
time.sleep(5)
resp = http_put()
print("OneNET result:\n %s" % resp)
time.sleep(5)
| true |
d2970541685ba8fcc2a4e48354668ce1223b0a61 | Python | Salacho96/ADA_HomeWorks | /Tarea 4/equidivisions.py | UTF-8 | 1,331 | 3.15625 | 3 | [] | no_license | from sys import stdin
#Nombre: Juan Sebastian Rivera
#Codigo de estudiante 5498445
#Codigo DFS tomado de las notas de clase del profesor Camilo Rocha
delta = [(-1,0),(0,-1),(0,1),(1,0)]
matrix,tam = None,None
def dfs(visited, row, col):
stack = [ (row, col) ] ; visited[row][col] = 1
while len(stack)!=0:
r,c = stack.pop()
for dr,dc in delta:
if 0<=r+dr<tam and 0<=c+dc<tam and visited[r+dr][c+dc]==0:
if(matrix[r][c]==matrix[r+dr][c+dc] and visited[r+dr][c+dc]==0):
stack.append((r+dr,c+dc)) ; visited[r+dr][c+dc] = 1
visited[r][c] = 2
def solve():
global matrix,tam
visited = [ [ 0 for x in range(tam) ] for y in range(tam) ]
ans = 0
for r in range(tam):
for c in range(tam):
if visited[r][c]== 0:
ans = ans +1
dfs(visited,r,c)
return ans
def main():
global matrix,tam
tam = -1
while tam != 0:
tam = stdin.readline().strip()
tam = int(tam)
if tam !=0:
list1=[]
matrix = [ [ tam for x in range(tam) ] for y in range(tam) ]
for i in range(tam-1):
line = stdin.readline().strip()
list1=([int(x) for x in line.split() ])
cont = 0
while cont < len(list1)-1:
x = list1[cont]
y = list1[cont+1]
matrix[int(x)-1][int(y)-1] = i+1
cont = cont + 2
ans = solve()
if ans == tam:
print("good")
else:
print("wrong")
main() | true |
3daedb8bc9ed415ed8d8c2bc45f009da598c80c5 | Python | Squalexy/AED-complexity-analysis | /solB.py | UTF-8 | 360 | 2.984375 | 3 | [] | no_license | from sys import stdin
import time
num_inputs = int(input())
def readln():
return stdin.readline().rstrip()
num = [int(i) for i in readln().split()]
def solucao_B(array_num):
array_num.sort(reverse=True)
return array_num[0] + array_num[1]
tic = time.time()
print(solucao_B(num))
toc = time.time()
tempo = toc - tic
print(f"Tempo: {tempo}")
| true |
8134a237445b90e04f40f2eb18e14b445740fc1e | Python | XACT-RobA/Modelling-Stocks | /code/trading/Heikin_Ashi.py | UTF-8 | 1,219 | 2.640625 | 3 | [] | no_license | import sys
sys.path.append('../tools')
import getanalysis
import tradesim
import csv
trade_filepath = '../../data/tradedata/Heikenashispintopsbuyorsell.csv'
with open(trade_filepath, 'rb') as trade_file:
trade_array = []
trade_file_data = csv.reader(trade_file, delimiter=',')
for row in trade_file_data:
trade_array.append(int(row[0]))
data_filepath = '../../data/hacandles.csv'
data = getanalysis.import_j_candles(data_filepath)
[profit, profit_array] = tradesim.sim_trade(data, trade_array)
percent_profit = (profit - 1) * 100
print('Heikin Ashi spinning tops')
print('Profit: ' + str(percent_profit) + '%')
print('Max profit: ' + str((max(profit_array)-1)*100) + '%\n')
trade_filepath = '../../data/tradedata/ha-candles-bear-and-bull-dojis.csv'
with open(trade_filepath, 'rb') as trade_file:
trade_array = []
trade_file_data = csv.reader(trade_file, delimiter=',')
for row in trade_file_data:
trade_array.append(int(row[0]))
[profit, profit_array] = tradesim.sim_trade(data, trade_array)
percent_profit = (profit - 1) * 100
print('Heikin Ashi dojis')
print('Profit: ' + str(percent_profit) + '%')
print('Max profit: ' + str((max(profit_array)-1)*100) + '%\n')
| true |
6b85183c0e3edd2c855de8da721291fcb8ae593b | Python | Kose-i/machine_learning_tutorial | /DeepLearning_math/9step.py | UTF-8 | 813 | 3.09375 | 3 | [] | no_license | """
Sarsa
"""
import numpy as np
# Status is 4
S = np.array([0,1,2,3])
# Action is 2
A = np.array([0,1])
# Reward
R = np.array([[1,-20],[4,-1],[0,25],[0,0]])
# Status after Action on Status_t-1
S1 = np.array([[1,2],[3,0],[0,3],[None,None]])
# Probably forward
p = 0.5
# Learning rate
alpha = 0.01
# Discount rate
gamma = 0.8
# Trial Count
n = 3000
# Initialize table
Q = np.zeros(R.shape)
# Define Moving Direction with Probably
def pi(p):
if np.random.uniform(0,1) <= p:
return 0 # forward
else:
return 1 # back
def sarsa():
s = S[0]
a = pi(p)
while S1[s,a] != None:
a_next = pi(p)
td = R[s,a] + gamma*Q[S1[s,a], a_next] - Q[s,a]
Q[s,a] += alpha*td
s = S1[s,a]
a = a_next
print(Q[0,0], Q[0,1])
for i in range(n):
sarsa()
| true |
4478b487c730800ab992c55d32028435c8df8ce3 | Python | elemaryo/Leetcode-problems | /SmallestPostiveNumberNotInArray.py | UTF-8 | 959 | 3.4375 | 3 | [] | no_license | # you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
class Solution:
def firstMissingPositive(self, nums: List[int]) -> int:
list.sort(nums)
x = 1
for i in range(len(nums)):
if nums[i] < 0:
continue
# if we find a smaller number no need to continue, cause the array is sorted
if x < nums[i]:
return x
x = nums[i] + 1
return x
# def solution(A):
# # write your code in Python 3.6
# maximumNumber = 0
# for i in range(len(A)):
# maximumNumber = max(maximumNumber,A[i])
# # if minimumNumber < 0:
# if maximumNumber < 0:
# return 1
# elif (maximumNumber - 1) in A:
# return int(maximumNumber + 1)
# else:
# return int(maximumNumber - 1) | true |
4754f318a3948e06b3b1bd518539f9db7b1eb62e | Python | Bhaal22/gomspace | /src/cardinals.py | UTF-8 | 547 | 3.03125 | 3 | [] | no_license | from src.geometry import Vector
class WindRose:
NORTH = Vector(0, 1)
NORTH_INDEX = 0
EAST = Vector(1, 0)
EAST_INDEX = 1
SOUTH = Vector(0, -1)
SOUTH_INDEX = 2
WEST = Vector(-1, 0)
WEST_INDEX = 3
ORIENTATIONS = [ NORTH, EAST, SOUTH, WEST ]
@staticmethod
def clockwise_rotate(index):
next = (index + 1) % 4
return WindRose.ORIENTATIONS[next], next
@staticmethod
def anti_clockwise_rotate(index):
next = (index - 1) % 4
return WindRose.ORIENTATIONS[next], next | true |
6fce3f0ce9de6d0309f15f763c5e747b87a0d6c4 | Python | AdamZhouSE/pythonHomework | /Code/CodeRecords/2430/60792/287280.py | UTF-8 | 393 | 2.75 | 3 | [] | no_license | num=int(input())
for i in range(0,num):
n=int(input())
list1=list(map(int,input().split(" ")))
k=int(input())
count=0
for j in range(0,n):
for m in range(j+1,n):
if list1[j]+list1[m]==k:
print(list1[j],end=" ")
print(list1[m],end=" ")
print(k)
count+=1
if count==0:
print("-1") | true |
d167557871a903b8dc1979c2d4011e9512bb9cb2 | Python | ramon4rj/ED1 | /lista_dinamica_python.py | UTF-8 | 2,875 | 4.40625 | 4 | [] | no_license | import math
# no, lista encadeada
class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
# FUNÇÕES
# inserir um nó no inicio da lista
def push(self, new_data):
#Aloca o no em data
new_node = Node(new_data)
#Campo next do newnode recebe head
new_node.next = self.head
#Head aponta para newnode
#print(sd)
self.head = new_node
def insertAfter(self, ant, new_data):
# Checa a existência do nó
if ant is None:
print(" ")
print ("No anterior não está na lista")
return
# Cria novo nó &
# o coloca em data
new_node = Node(new_data)
# Coloca no campo next do new_node o next do node anterior
new_node.next = ant.next
# Coloca no campo next do node aterior o new_node
ant.next = new_node
def append(self, new_data):
#Aloca o no em data
new_node = Node(new_data)
#Se o for o primeiro no, insere
if self.head is None:
self.head = new_node
return
else: #Percorre a lista pela direita ate o ultimo no
tail = self.head
while(tail.next is not None):
tail = tail.next
tail.next = new_node
def remove(self, value):
aux = self.head
#Caso o no head seja o valor a ser apagado
if aux is not None:
if aux.data == value:
self.head = aux.next
aux = None
return
#Procura o no
while aux is not None:
if aux.data == value:
break
prev = aux
aux = aux.next
#Se aux percorreu a lista e não chegou a encontrar o valor, ou seja
#aux == None, então o valor não está na lista
if aux == None:
print(" ")
print("Valor não está na lista")
return
#Atualiza os ponteiros após a saída do while
prev.next = aux.next
aux = None
# Printar o nó
def printList(self, node):
while (node != None):
print(node.data, end = " ")
node = node.next
def show_list(self):
self.printList(self.head)
l = LinkedList()
node = l.push(1)
node = l.push(5)
node = l.push(8)
node = l.insertAfter(l.head, 2)
#node = l.insertAfter(l.head.next, 2)
print("Pós inserção: ")
l.show_list()
#l.remove(2)
#l.remove(8)
l.remove(7)
print(" ")
print("Pós remoção: ")
l.show_list()
| true |
7424b0db8503d32c91671d7f799194e62d8fe173 | Python | CharlesGodwin/pymagnum | /magnum/magparser.py | UTF-8 | 1,734 | 2.609375 | 3 | [
"BSD-3-Clause"
] | permissive |
import argparse
import shlex
import os
class MagnumArgumentParser(argparse.ArgumentParser):
isPosix = os.name != 'nt'
def convert_arg_line_to_args(self, arg_line):
return shlex.split(arg_line, '#', self.isPosix)
# This method cleans up device
def magnum_parse_args(self):
args = self.parse_known_args()[0]
if hasattr(args, 'device'):
if not isinstance(args.device, list):
args.device = [args.device]
else:
devices = []
for dev in args.device:
for subdev in dev:
subdev = subdev.replace(",", " ")
for item in subdev.split():
devices.append(item)
devices = list(dict.fromkeys(devices)) # strips duplicates
file_no = 1
for ix, dev in enumerate(devices):
if dev[0:1] == '!': # check for a tag
if dev.find('!', 1) < 0:
devices[ix] = f"!file{file_no}{dev}"
file_no = file_no + 1
args.device = devices
if len(args.device) == 0:
args.device = ['/dev/ttyUSB0']
if hasattr(args, 'timeout'):
if args.timeout < 0 or args.timeout > 1.0:
self.error(
"option --timeout: Must be a number (float) between 0 and 1 second. i.e. 0.005")
if hasattr(args, 'packets'):
if args.packets < 1:
self.error("option --packets: Must be greater than 0.")
if hasattr(args, 'cleanpackets'):
args.cleanpackets = not args.cleanpackets
return args
| true |
f1dcb55549aeb75823245f5eba4d72f3995d2976 | Python | sulei1324/Algorithm | /exhaustion.py | UTF-8 | 1,295 | 3.359375 | 3 | [] | no_license | __author__ = 'Su Lei'
def equationWith9Nums():
book = [0] * 9
iters = [1] * 9
for iters[0] in range(1,10):
for iters[1] in range(1,10):
for iters[2] in range(1,10):
for iters[3] in range(1,10):
for iters[4] in range(1,10):
for iters[5] in range(1,10):
for iters[6] in range(1,10):
for iters[7] in range(1,10):
for iters[8] in range(1,10):
for i in range(9):
book[iters[i]-1] = 1
n = 0
for i in range(9):
n += book[i]
if (n == 9) and (iters[0] * 100 + iters[1] * 10 + iters[2] +
iters[3] * 100 + iters[4] * 10 + iters[5] == iters[6] * 100 + iters[7] * 10 + iters[8]):
print "%d%d%d + %d%d%d = %d%d%d" %(iters[0], iters[1], iters[2], iters[3], iters[4], iters[5], iters[6], iters[7], iters[8])
book = [0] * 9
equationWith9Nums()
| true |
53a7318bb2a414d0bb003c6728f607c2bb431bc4 | Python | thcborges/estrutura-de-dados-com-python3 | /ComecandoComPython/code_skulptor.py | UTF-8 | 1,466 | 3.671875 | 4 | [] | no_license | # para executar, copiar o código e colar em: http://www.codeskulptor.org/
from math import sqrt
import random
import simplegui
center_point = [50, 50]
window_width = 600 # Largura da janela
window_height = 400 # Altura da janela
radius = 20
score = 0
# desenha o canvas
def draw(canvas):
canvas.draw_circle(center_point, radius, 1, 'Red', 'Red')
# temporizador
def timer_handler():
center_point[0] = random.randint(0, window_height)
center_point[1] = random.randint(0, window_height)
def mouse_handler(pos):
global score
# cáculo da distância
dist = sqrt(((pos[0] - center_point[0]) ** 2) +
((pos[1] - center_point[1]) ** 2))
# verifica se o usuário clicou dentro do círculo
if dist < radius:
score += 1 # incrementa o score
elif score > 0:
score -= 1 # decrementa o score
# atualizo o texto do label
label.set_text('Score: ' + str(score))
# cria uma janela passando o título largura e altura
frame = simplegui.create_frame(
'Clique na bolinha',
window_width,
window_height)
# cria um temporizador passando o intervalo e o manipulador
timer = simplegui.create_timer(1000, timer_handler)
# seta os manipuladores de eventos
frame.set_draw_handler(draw)
frame.set_mouseclick_handler(mouse_handler)
# adiciona um label
label = frame.add_label('Score: ' + str(score))
timer.start() # inicia o temporizador
frame.start() # loop principal da aplicação
| true |
5ba50154be39b2888438df27e79f49cbe64d7462 | Python | higorsantana-omega/Python_Aprendizado | /banco_de_dados/selecionar_com_filtro.py | UTF-8 | 222 | 2.65625 | 3 | [
"MIT"
] | permissive | from bd import nova_conexao
sql = "SELECT id, nome, tel FROM contatos WHERE tel = '963367427'"
with nova_conexao() as conexao:
cursor = conexao.cursor()
cursor.execute(sql)
for x in cursor:
print(x)
| true |
b94a72004dd49da0688d81a8645f5295a4b54dc0 | Python | PhilBug/Vessels-recognition | /main.py | UTF-8 | 2,717 | 2.671875 | 3 | [] | no_license | #!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import glob
import cv2 as cv
import pickle
import random as rd
def find_vessels(image):
cpy = image.copy()
gray = cv.cvtColor(cpy, cv.COLOR_BGR2GRAY)
blurred = cv.GaussianBlur(gray, (5, 5), 0)
thresh = cv.adaptiveThreshold(blurred, 256, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY_INV, 11, 2)
display_image(thresh)
def drawEdges(path):
img = cv.imread(path)
height, width, channels = img.shape
imgGray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
imgGray = cv.GaussianBlur(imgGray, (3, 3), 0)
imgGray = cv.medianBlur(imgGray, 3)
highThresh, thresh_img = cv.threshold(imgGray, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)
lowThresh = 0.3 * highThresh
edges = cv.Canny(imgGray, lowThresh, highThresh)
display_image(edges)
edges = cv.dilate(edges, np.ones((3, 3), np.uint8), iterations=1)
imgCnt, contours, hierarchy = cv.findContours(edges, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
imgC = np.zeros((height, width, 1), np.uint8)
for i in range(len(contours)):
moments = cv.moments(contours[i])
if moments['mu02'] < 400000.0:
continue
cv.drawContours(imgC, contours, i, (255, 255, 255), cv.FILLED)
edges = cv.erode(imgC, np.ones((3, 3), np.uint8), iterations=2)
highThresh, thresh_img = cv.threshold(edges, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)
lowThresh = 0.3 * highThresh
edges = cv.Canny(edges, lowThresh, highThresh)
edges = cv.dilate(edges, np.ones((3, 3), np.uint8), iterations=3)
imgCnt, contours, hierarchy = cv.findContours(edges, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
for i in range(len(contours)):
moments = cv.moments(contours[i])
cv.drawContours(img, contours, i, (rd.randint(0,255), rd.randint(0,255), rd.randint(0,255)), 2)
cv.circle(img, (int(moments['m10'] / moments['m00']), int(moments['m01'] / moments['m00'])), 5, (255, 255, 255), -1)
return img
def display_image(image, title='test'):
cv.imshow(title, image)
cv.waitKey(0)
def get_red_chanell(image):
b = image.copy()
# set green and red channels to 0
b[:, :, 1] = 0
b[:, :, 2] = 0
g = image.copy()
# set blue and red channels to 0
g[:, :, 0] = 0
g[:, :, 2] = 0
r = image.copy()
# set blue and green channels to 0
r[:, :, 0] = 0
r[:, :, 1] = 0
# RGB - Blue
cv.imshow('B-RGB', b)
# RGB - Green
cv.imshow('G-RGB', g)
# RGB - Red
cv.imshow('R-RGB', r)
cv.waitKey(0)
def main():
eye_image = cv.imread('original.png')
#drawEdges('original.png')
find_vessels(eye_image)
if __name__ == '__main__':
main() | true |
c5d965da963ea5824d21bfbf8200ab8b964629bb | Python | s13nder/diplom | /DB_Connect_and_Requests.py | UTF-8 | 5,146 | 2.546875 | 3 | [] | no_license | import datetime
import pymysql
import pymysql.cursors
class DBHandler:
#connection_string or conn_params
connection_string={'host':'217.71.129.139', 'port':4146, 'user':'wk', 'password':'Ghjcnjnf', 'db':'workmapdb'}
table_structure = ['region','quan_vacancy','average_salary','competition']
def __init__(self, db_name='workmapdb'):
self.db_name = db_name
self.db_len = 0
self.db_tables = []
try:
with pymysql.connect(**DBHandler.connection_string) as cnxn:
cnxn.autocommit = True
#use_query
sql_query = f"USE {self.db_name}"
cnxn.execute(sql_query)
for table in cursor.tables():
if table[1] == 'dbo':
self.db_len += 1
self.db_tables.append(table[2])
except:
print(f'Object DBHandler was successfull linked with {self.db_name} but database with the same name does not exist yet')
def __len__(self):
return self.db_len
def __repr__(self):
return f"Database `{self.db_name}` with {self.db_len} occupation tables: {self.db_tables}"
def count_tables(self):
pass
def create_database(self):
with pymysql.connect(**DBHandler.connection_string) as cnxn:
cnxn.autocommit = True
#create_query
sql_create_db_query = f"CREATE DATABASE {self.db_name}"
try:
cnxn.execute(sql_create_db_query)
except pymysql.ProgrammingError:
print(f'\nProbably the "{self.db_name}" database already exist...')
def create_table(self,table_name):
with pymysql.connect(**DBHandler.connection_string) as cnxn:
cnxn.autocommit = True
sql_use_db_query = f"USE {self.db_name}"
cnxn.execute(sql_use_db_query)
sql_create_table_query = \
f"CREATE TABLE IF NOT EXISTS {table_name}\
(id INT AUTO_INCREMENT PRIMARY KEY NOT NULL,\
{DBHandler.table_structure[0]} VARCHAR(255) NULL,\
{DBHandler.table_structure[1]} INT NOT NULL,\
{DBHandler.table_structure[2]} VARCHAR(255) NULL,\
{DBHandler.table_structure[3]} VARCHAR(255) NULL)"
try:
cnxn.execute(sql_create_table_query)
except pymysql.ProgrammingError:
print(f'\nProbably the "self.db_name" structure already created...')
def delete_data(self,table_name):
with pymysql.connect(**DBHandler.connection_string) as cnxn:
cnxn.autocommit = True
sql_use_db_query = f"USE {self.db_name}"
cnxn.execute(sql_use_db_query)
sql_clear_collum = f"TRUNCATE TABLE {table_name}"
try:
cnxn.execute(sql_clear_collum)
except pymysql.ProgrammingError:
print(f'\nProbably the "{self.db_name}" structure already delete...')
def copy_data(self, table_name):
with pymysql.connect(**DBHandler.connection_string) as cnxn:
cnxn.autocommit = True
sql_use_db_query = f"USE {self.db_name}"
cnxn.execute(sql_use_db_query)
table_name_YMD = (str(table_name) + str(datetime.datetime.now().strftime("%Y%m%d")))
sql_copy_table_query = \
f"CREATE TABLE {table_name_YMD} SELECT * FROM {table_name}"
try:
cnxn.execute(sql_copy_table_query)
except pymysql.ProgrammingError:
print(f'\nProbably the "{self.db_name}" structure already copy...')
def insert_data(self,table_name):
with pymysql.connect(**DBHandler.connection_string) as cnxn:
cnxn.autocommit = True
#use_query
sql_use_db_query = f"USE {self.db_name}"
cnxn.execute(sql_use_db_query)
for i in prof_data[raw_prof_data.search_criteria]:
row = prof_data[raw_prof_data.search_criteria][i]
if prof_data[raw_prof_data.search_criteria][i]['Наиболее востребованные компетенции'] == 'нет данных':
demanded_competencies = row['Наиболее востребованные компетенции']
else:
demanded_competencies = (', '.join(row['Наиболее востребованные компетенции']))
sql_insert_table_query= f"INSERT INTO {table_name} (region, quan_vacancy, average_salary, competition)\
VALUES \
('{i}','{row['Количество вакансий']}','{row['Средняя заработная плата для начинающего специалиста']}','{demanded_competencies}')"
cnxn.execute(sql_insert_table_query)
def length_tb(self, table_name):
with pymysql.connect(**DBHandler.connection_string) as cnxn:
cnxn.autocommit = True
sql_use_db_query = f"USE {self.db_name}"
cnxn.execute(sql_use_db_query)
sql_query = \
f"SELECT {DBHandler.table_structure[0]} FROM {table_name}"
try:
len_table=cnxn.execute(sql_query)
return len_table
except pymysql.ProgrammingError:
print(f'\nProbably the "{self.db_name}" structure already for count length of rows...')
| true |
c003b60ffa192726546e0b58b80d01668ad53ee6 | Python | loliktry/Software | /Python/Raspberry Pi/Drucker.py | UTF-8 | 6,640 | 2.84375 | 3 | [] | no_license | import serial
import sys
import time
# Serielle Schnittstelle oeffnen
UART = serial.Serial("/dev/ttyUSB0", 19200)
UART.open()
# Barcodetypen
# Barcode Laenge Bereich
# UPC-A 11-12 Bytes Bereich 0x30 - 0x39
# UPC-E 11-12 Bytes Bereich 0x30 - 0x39
# EAN13 12-13 Bytes Bereich 0x30 - 0x39
# EAN8 7-8 Bytes Bereich 0x30 - 0x39
# I25 >1 Byte even Number Bereich 0x30 - 0x39
# CODE39 >1 Byte 0x20, 0x24, 0x25, 0x2B, 0x2D-0x39, 0x41-5A
# CODEBAR >1 Byte 0x24, 0x2B, 0x2D-0x3A, 0x41-0x44
# CODE93 >1 Byte 0x00-0x7F
# CODE128 >1 Byte 0x00-0x7F
# CODE11 >1 Byte 0x30-0x39
# MSI >1 Byte 0x30-0x39
UPCA = 0
UPCE = 1
EAN13 = 2
EAN8 = 3
CODE39 = 4
I25 = 5
CODEBAR = 6
CODE93 = 7
CODE128 = 8
CODE11 = 9
MSI = 10
# ------------------------------------------------------------------------
# Drucker
# ------------------------------------------------------------------------
# Drucker initialisieren
# Buffer leeren
# Parameter auf Defaultwerte zuruecksetzen
# In den Standardmodus wechseln
# User-definierte Zeichen loeschen
def Init():
UART.write(chr(27))
UART.write(chr(64))
return
# Testseite drucken
def PrintTestpage():
UART.write(chr(18))
UART.write(chr(84))
return
# Standby auswaehlen
# Auswahl
# - Offline
# - Online
def Standby(Modus):
if(Modus == "Offline"):
Value = 0
elif(Modus == "Online"):
Value = 1
UART.write(chr(27))
UART.write(chr(61))
UART.write(chr(Value))
return
# Drucker in Sleepmodus setzen
# WICHTIG: Der Drucker muss erst mittels "Wake()" geweckt werden, wenn er wieder benutzt werden soll
# Auswahl
# - Zeit von 0-255
def Sleep(Zeit):
if(Zeit > 255):
print "Der Wert fuer die Zeit ist zu hoch!"
return -1
UART.write(chr(27))
UART.write(chr(56))
UART.write(chr(Zeit))
return
# Drucker aufwecken
def Wake():
UART.write(chr(255))
time.sleep(0.1)
return
# Pruefen ob der Drucker Papier hat (1 = kein Papier, 0 = Papier)
# Bit 3 -> 0 = Papier, 1 = kein Papier
def Paper():
Status = 0
UART.write(chr(27))
UART.write(chr(118))
UART.write(chr(0))
# Zeichen einlesen
Read = UART.read(UART.inWaiting())
if(Read == chr(32)):
Status = 0
elif(Read == chr(36)):
Status = 1
return Status
# Heizzeit konfigurieren
# Auswahl
# - Anzahl der Heizpunkte von 0-255
# - Heizzeit von 3-255
# - Heizintervall 0-255
def ConfigHeat(Dots, Time, Intervall):
if(Dots > 255):
print "Anzahl der Heizpunkte zu hoch!"
return -1
if((Time < 3) or (Time > 255)):
print "Ungueltige Angabe fuer die Heizzeit!"
return -1
if(Intervall > 255):
print "Heizintervall zu hoch!"
return -1
UART.write(chr(27))
UART.write(chr(55))
UART.write(chr(Dots))
UART.write(chr(Time))
UART.write(chr(Intervall))
return
# Default Einstellungen fuer die Heizung
def DefaultHeat():
UART.write(chr(27))
UART.write(chr(55))
UART.write(chr(7))
UART.write(chr(80))
UART.write(chr(2))
return
# ------------------------------------------------------------------------
# Character
# ------------------------------------------------------------------------
# Skipt eine bestimmte Anzahl Zeilen
def Feed(Anzahl):
if(Anzahl > 255):
print "Anzahl der Zeilen zu hoch!"
return -1
UART.write(chr(27))
UART.write(chr(100))
for Counter in range(Anzahl):
UART.write(chr(12))
return
# Druckt eine bestimmte Anzahl leerer Zeichen (max. 47)
def Blank(Anzahl):
if(Anzahl > 47):
print "Anzahl der Leerstellen zu hoch!"
return -1
UART.write(chr(27))
UART.write(chr(66))
UART.write(chr(Anzahl))
return
# Drucken einer Zeile
def Println(Text):
UART.write(Text)
UART.write(chr(10))
UART.write(chr(13))
return
# Noch in Arbeit
# Druckt ein Tab (8 leere Zeichen)
def Tab():
UART.write(chr(9))
return
# Linienstaerke einstellen:
# Auswahl
# - None
# - Middel
# - Big
def Underline(Dicke):
# Linienstaerke auswaehlen
if(Dicke == "None"):
Value = 0
elif(Dicke == "Middel"):
Value = 1
elif(Dicke == "Big"):
Value = 2
else:
return -1
UART.write(chr(27))
UART.write(chr(45))
UART.write(chr(Value))
return
# Deaktiviert das Unterstreichen vom Text
def DeleteUnderline():
UART.write(chr(27))
UART.write(chr(45))
UART.write(chr(0))
return
# Textmodus setzen
# Auswahl
# - Inverse
# - Updown
# - Bold
# - DoubleHeight
# - DoubleWidth
# - Deleteline
def PrintMode(Mode):
# Modus auswaehlen
if(Mode == "Inverse"):
Value = 2
elif(Mode == "Updown"):
Value = 4
elif(Mode == "Bold"):
Value = 8
elif(Mode == "DoubleHeight"):
Value = 16
elif(Mode == "DoubleWidth"):
Value = 32
elif(Mode == "Deleteline"):
Value = 64
else:
Value = 0
UART.write(chr(27))
UART.write(chr(33))
UART.write(chr(Value))
return
# Printmode zuruecksetzen
def DeletePrintMode():
UART.write(chr(27))
UART.write(chr(33))
UART.write(chr(0))
return
# Stellt den Abstand zwischen zwei Zeilen in Punkten ein
def SetLineSpace(Punkte):
if(Punkte > 32):
print "Anzahl der Punkte zu hoch!"
return -1
UART.write(chr(27))
UART.write(chr(51))
UART.write(chr(Punkte))
return
# Setzt den Abstand zwischen zwei Zeilen auf den Default Wert (32 Punkte)
def SetLineDefault():
UART.write(chr(27))
UART.write(chr(50))
return
# ------------------------------------------------------------------------
# Barcode
# ------------------------------------------------------------------------
# Noch in Arbeit
# Einstellen der lesbaren Zeichen fuer den Barcode
# Auswahl
# - Above -> Ueber dem Barcode
# - Below -> Unter dem Barcode
# - Both -> Ueber und unter dem Barcode
def BarcodeReadable(Position):
if(Position == "Above"):
Value = 1
elif(Position == "Below"):
Value = 2
elif(Position == "Both"):
Value = 3
else:
Value = 0
UART.write(chr(29))
UART.write(chr(72))
UART.write(chr(Value))
return
# Einstellen der Barcode Breite
# Auswahl
# - Small
# - Big
def BarcodeWidth(Breite):
if(Breite == "Small"):
Value = 2
elif(Breite == "Big"):
Value = 3
else:
print "Ungueltige Angabe"
return -1
UART.write(chr(29))
UART.write(chr(119))
UART.write(chr(Value))
return
# Hoehe des Barcodes (0 - 255)
def BarcodeHeight(Hoehe):
if(Hoehe > 255):
print "Die Hoehe ist zu hoch!"
return -1
UART.write(chr(29))
UART.write(chr(104))
UART.write(chr(Hoehe))
return
# Barcode drucken
def PrintBarcode(Daten, Barcodetyp):
UART.write(chr(29))
UART.write(chr(107))
UART.write(chr(Barcodetyp))
for Counter in Daten:
UART.write(Counter)
UART.write(chr(00))
return
# ------------------------------------------------------------------------
# Bitmap
# ------------------------------------------------------------------------
| true |
004067b8de0096b22e75d18e15c73c1d02b4ba26 | Python | jschnab/leetcode | /arrays/longest_palindrom_substr.py | UTF-8 | 1,595 | 4 | 4 | [] | no_license | # leetcode 5
# find the longest palindromic substring in a string
# dbracecarple returns racecar
def long_pal_brute(s):
"""Return longest palindromic substring.
Time complexity is O(n^3)."""
l = len(s)
if l < 2:
return s
answer = ''
for i in range(l):
for j in range(i):
if s[j:i+1] == s[j:i+1][::-1]:
if i - j + 1 > len(answer):
answer = s[j:i+1]
if answer:
return answer
else:
return s[0]
def long_pal(s):
"""Return longest string with dynamic programming approach.
Time and space complexity are both O(n^2)."""
ans = ''
l = len(s)
max_l = 0
# we generate an n x n table to store where we found palindromes
memo = [[0] * l for _ in range(l)]
# we first store 1-letter palindromes
for i in range(l):
memo[i][i] = 1
ans = s[i]
max_l = 1
# we then store eventual 2-letter palindromes
for i in range(l-1):
if s[i] == s[i+1]:
memo[i][i+1] = 1
ans = s[i:i+2]
max_len = 2
# now we extend our search for >= 3-letter palindromes
for j in range(l):
for i in range(j-1):
if s[i] == s[j] and memo[i+1][j-1]:
memo[i][j] = 1
if j - i + 1 > max_len:
max_len = j - i + 1
ans = s[i:j+1]
return ans
if __name__ == '__main__':
print('radar : ', long_pal('radar'))
print('dbracecarple : ', long_pal('dbracecarple'))
print(' : ', long_pal(''))
print('ab : ', long_pal('ab'))
| true |
358e891349f5f8aa3cd701f0d7b4e07a0166ae76 | Python | amenson1983/week_5 | /lesson_5_homework/cashregister_class_1.py | UTF-8 | 2,709 | 3.578125 | 4 | [] | no_license | import pickle
from lesson_5_homework.realitem_class import Realitem
filename = 'bin1.dat'
filename1 = 'sum.dat'
class CashRegister:
def __init__(self, description=None, quantity=None, price=None):
self._description = description
self._quantity = quantity
self._price = price
def input_description(self):
description = input('Input description: ')
self._description = description
def input_quantity(self):
quantity = input('Input stock: ')
self._quantity = quantity
def input_price(self):
price = input('Input price: ')
self._price = price
def ret_description(self):
return self._description
def ret_quantity(self):
return self._quantity
def ret_price(self):
return self._price
def get_sum_(self):
price = self.ret_price
quantity = self.ret_quantity
sum = float(price) * float(quantity)
return sum
def load_items(filename):
try:
input_file = open(filename, 'rb')
my_items = pickle.load(input_file)
input_file.close()
except IOError:
my_items = {}
return my_items
def load_sum(filename1):
try:
input_file = open(filename1, 'rb')
sum = pickle.load(input_file)
input_file.close()
except IOError:
my_items = {}
return sum
def get_menu_choice():
print('_____________________________')
print('1. Show bin')
print('2. Choose items to buy')
print('3. Get the sum to pay')
print('4. Clear bin')
print('5. Quit')
choice = int(input('Please make a choice: '))
while choice < 1 or choice > 5:
choice = int(input('Please make a choice: '))
return choice
def item_input():
q = int(input('How many items?'))
item = CashRegister()
sum = 0
list_items = {}
for person_num in range(0, q):
item.input_description()
item.input_quantity()
item.input_price()
description = item.ret_description()
quantity = item.ret_quantity()
price = item.ret_price()
sum += float(quantity) * float(price)
items = Realitem(description,quantity,price)
list_items[description] = items
print(list_items,sum)
return list_items, sum
def clear():
list_items = load_items(filename)
list_items.clear()
del list_items
def save_items_to_bin(list_items):
output_file = open(filename,'wb')
pickle.dump(list_items, output_file)
output_file.close()
def save_sum_to_bin(sum):
output_file = open(filename1,'wb')
pickle.dump(sum, output_file)
output_file.close()
def show_bin():
list_items = load_items(filename)
print(list_items) | true |
8950bfb72e53e4e306bb80daa50985c343a5d8df | Python | cs-fullstack-fall-2018/python-task-list-homework-psanon19 | /python_TaskList.py | UTF-8 | 1,942 | 3.390625 | 3 | [
"Apache-2.0"
] | permissive | import os
os.system("clear")
import datetime
now = datetime.datetime.now()
greeting = ("Congratulations! You're running PJ's Task List program. \n")
name = input("What is your filename? ").lower()
my_file = open(name+"userList.txt","a+")
my_file.write(str(now))
text = open(name+"userList.txt", "r").read()
print("Hello, this was your last session: \n" + text)
class User():
def __init__(self, tasks=[]):
self.userTasks = tasks
def main():
profile = User()
print (greeting)
profile.userTasks.append(input("\nEnter a Task: "))
while True:
modifyList = input("\nWould you like to 'add' a task, 'remove' a task, 'check' your current list, or 'quit' the program and save?: ").lower()
if modifyList!="remove" and modifyList!="add" and modifyList!="quit" and modifyList!="check":
print("Please input either 'add', 'remove', 'check', or 'quit'")
continue
elif modifyList == "add":
profile.userTasks.append(input("\n Input a new task: "))
continue
elif modifyList == "remove":
profile.userTasks.pop(int(input("\nWhich task do you want to remove: ")))
continue
elif modifyList == "check":
for pretty in profile.userTasks:
print(text)
print("\n" + pretty)
continue
elif modifyList == "quit":
end=(input("\nThanks for running my program! Please sign your name to end: "))
break
else:
print("an unexpected error occurred")
break
print("\nUser name is: " + end + "\n")
print("Your finished createing this list is: ", profile.userTasks)
my_file.write("\nUser name is: " + end + "\n")
for info in profile.userTasks:
my_file.write(info + "\n")
my_file.write("The date is: " + "\n" + "_______________________" + "\n")
if __name__ == '__main__':
main() | true |
5a783814219bded9fd908392d44717f2e237ae85 | Python | jeremybwilson/codingdojo_bootcamp | /bootcamp_class/python/type_list_commented.py | UTF-8 | 2,140 | 4.625 | 5 | [] | no_license | # Type List
# Write a program that takes a list and prints a message for each element in the list, based on that element's data type.
mixed_list = ['magical unicorns',19,'hello',98.98,'world']
integer_list = [2,3,1,7,4,12]
string_list = ['magical','unicorns']
# define a function to take a list as an argument
def identify_list_type(lst):
# define an empty placeholder string
new_string = ''
# define an empty variable for total (int)
sum = 0
# loop through values of the provided list
for value in lst:
# determine if value is a regular int or a float and if so, add up values
if isinstance(value, int) or isinstance(value, float):
# add up int or float values
sum += value
# otherwise values must be strings
elif isinstance(value, str):
# while looping, add values to new_string (string) variable
new_string += value
# if list contains both strings and numbers
if new_string and sum:
print "The list you entered is of mixed type"
print "String: magical unicorns hello world"
print "Total:", sum
# or if only a list with string values
elif new_string:
print "The list you entered is of string type"
print "String: magical unicorns"
# else, this is a list with only numbers
else:
print "The list you entered is of integer type"
print "Sum:", sum
print identify_list_type(mixed_list)
print identify_list_type(integer_list)
print identify_list_type(string_list)
mixed_list2 = ['magical unicorns', 19, 'hello', 98.98, 'world']
integer_list2 = [2,3,1,7,4,12]
string_list2 = ['magical', 'unicorns']
def find_list_type(someList):
total = 0
my_string = ""
output_type = ""
for item in someList:
if isinstance(item, int):
total += item
if output_type == "string":
output_type = "mixed"
else:
output_type = "number"
elif isinstance(item, str):
my_string += item
if output_type == "number":
output_type = "mixed"
else:
output_type = "string"
print find_list_type(mixed_list2)
print find_list_type(integer_list2)
print find_list_type(string_list2)
| true |
634c00d80908ffbd07363be7d568635b538096cb | Python | Zi-Shane/DNS-Amplification | /dns.py | UTF-8 | 2,875 | 2.828125 | 3 | [] | no_license | # Imports
from scapy.all import *
from pprint import pprint
import operator
# Parameters
interface = "eno2" # `Interface you want to use
dns_source = "192.168.100.1" # IP of that interface
dns_destination = ["8.8.8.8"] # List of DNS Server IPs
time_to_live = 128 # IP TTL
query_name = "dnssec-tools.org" # DNS Query Name
query_type = ["A"] # DNS Query Types
# query_type = ["ANY", "A","AAAA","CNAME","MX","NS","PTR","CERT","SRV","TXT", "SOA"] # DNS Query Types
# Initialise variables
results = []
packet_number=0
# Loop through all query types then all DNS servers
for i in range(0,len(query_type)):
for j in range(0, len(dns_destination)):
packet_number += 1
# Craft the DNS query packet with scapy
packet_dns = IP(src=dns_source, dst=dns_destination[j], ttl=time_to_live) / UDP() / DNS(rd=1, qd=DNSQR(qname=query_name, qtype=query_type[i]))
packet_dnssec = IP(src=dns_source, dst=dns_destination[j], ttl=time_to_live) / UDP() / DNS(rd=1, ad=1, qd=DNSQR(qname=query_name, qtype=query_type[i]),ar=DNSRROPT())
# print(hexdump(packet))
# packet.show()
# Sending the packet
try:
query_dns = sr1(packet_dns,iface=interface,verbose=False, timeout=8)
print("Packet dns #{} sent!".format(packet_number))
query_dnssec = sr1(packet_dnssec,iface=interface,verbose=False, timeout=8)
print("Packet dnssec #{} sent!".format(packet_number))
except:
print("Error sending packet #{}".format(packet_number))
# Creating dictionary with received information
try:
result_dict_dns = {
'query_dns_type': "dns",
'dns_destination':dns_destination[j],
'query_type':query_type[i],
'query_size':len(packet_dns),
'response_size':len(query_dns),
'amplification_factor': ( len(query_dns) / len(packet_dns) ),
'packet_number':packet_number
}
result_dict_dnssec = {
'query_dns_type': "dnssec",
'dns_destination':dns_destination[j],
'query_type':query_type[i],
'query_size':len(packet_dnssec),
'response_size':len(query_dnssec),
'amplification_factor': ( len(query_dnssec) / len(packet_dnssec) ),
'packet_number':packet_number
}
results.append(result_dict_dns)
results.append(result_dict_dnssec)
except:
pass
# Sort dictionary by the amplification factor
results.sort(key=operator.itemgetter('amplification_factor'),reverse=True)
# Print results
pprint(results)
| true |
50c4fd7fb78f11740351feed1b87327306337f42 | Python | heeewo/Python | /List/List_basic.py | UHC | 418 | 3.984375 | 4 | [] | no_license | #Ʈȿ ڷ ٵ ִ Ʈ ̴
my_list = ['a', 1, 2, 3, 'b', ['apple', 'banana'], 4]
print(my_list[3])
my_list[2] = "hello"
print(my_list[:])
print(my_list[0:6])
b = int(my_list[5].index('banana'))
print(my_list[5][b]) #ȿ ǥҶ þصȴ
#Ʈ Ʈ ε ұ? | true |
81d9ed4cc70e5a91a14d5d7c26b0ce49f138071b | Python | calispotato/python-1 | /timetest | UTF-8 | 181 | 3.015625 | 3 | [] | no_license | #!/usr/bin/env python3
import colors as c
import time as t
for count in range(2):
print(count + 1)
print(c.red + "hi " + c.green + "how are you?" + c.reset)
t.sleep(0.5)
| true |
eac05bb676ec107d2dbe09669005a8500d42beac | Python | joaovitor3/osm-tm-communication | /server/tests/test_app_config.py | UTF-8 | 568 | 2.78125 | 3 | [] | no_license | import server
from flask import Flask
from server.tests.base_test_config import BaseTestCase
class TestFlaskApp(BaseTestCase):
def test_create_app_must_exists(self):
self.assertEqual(
hasattr(server, 'create_app'),
True
)
def test_create_app_must_be_callable(self):
self.assertEqual(
hasattr(server.create_app, '__call__'),
True
)
def test_create_app_must_return_flask_app(self):
self.assertIsInstance(
server.create_app(),
Flask
)
| true |
7f1bb16c210e8bb104b6eceea76675e9f161d625 | Python | macic/pyboi | /utils/statistical.py | UTF-8 | 664 | 2.5625 | 3 | [
"MIT"
] | permissive | import numpy as np
import pandas as pd
def np_array_to_ohlc_df(klines: np.core.multiarray) -> pd.DataFrame:
df = pd.DataFrame(klines.reshape(-1,6),dtype=float, columns = ('ts',
'open',
'high',
'low',
'close',
'volume'))
df['ts'] = pd.to_datetime(df['ts'], unit='ms')
#df.set_index('Open Time')
return df
| true |
7f1e571e99684e6ed3ccb7b92b1374a50985317d | Python | tejasgpt/Wallbreakers | /Week 4/Stacks/baseball-game.py | UTF-8 | 673 | 3.609375 | 4 | [] | no_license | class Solution(object):
def calPoints(self, ops):
"""
PROBLEM STATEMENT:
Given a list of strings, each string can be one of the 4 following types : Integer, "+", "D", "C"
You need to return the sum of the points you could get in all the rounds.
:type ops: List[str]
:rtype: int
"""
stack = []
for op in ops:
if op == "+":
stack.append(stack[-1] + stack[-2])
elif op == "D":
stack.append(stack[-1] * 2)
elif op == "C":
stack.pop()
else:
stack.append(int(op))
return sum(stack)
| true |
0f5363b73001177657420c7e0f9fbc2f1ae45fce | Python | ramanathanaspires/learn-python | /basic/ep3_math_strings_exception_handling/exception_handling.py | UTF-8 | 220 | 3.453125 | 3 | [] | no_license | while True:
try:
number = int(input("Please enter a number: "))
break
except ValueError:
print("You didn't enter a number")
except:
print("An unknown error occured")
print("Thank you for entering a number") | true |
8191cfdebc2fc238a7f58bcf6d8e98e24cfa0c25 | Python | thuanaislab/visual_slam | /Graph_Idea/model/self_model.py | UTF-8 | 8,784 | 2.6875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 5 14:44:54 2021
Self-attention parts are mainly based on SuperGlue paper https://arxiv.org/abs/1911.11763
@author: thuan
"""
import torch
from torch import nn
import torch.nn.functional as F
import copy
BN_MOMENTUM = 0.1
def MLP(channels: list, do_bn=False):
# Multi layer perceptron
n = len(channels)
layers = []
for i in range(1,n):
layers.append(
nn.Conv1d(channels[i-1], channels[i], kernel_size = 1, bias =True))
if i < (n-1):
if do_bn:
layers.append(nn.BatchNorm1d(channels[i], momentum=BN_MOMENTUM))
layers.append(nn.ReLU())
return nn.Sequential(*layers)
def normalize_keypoints(kpoints, image_shape):
# Normalize the keypoints locations based on the image shape
_, _, height, width = image_shape
one = kpoints.new_tensor(1)
size = torch.stack([one*width, one*height])[None]
center = size/2
scaling = size.max(1, keepdim = True).values*0.7 # multiply with 0.7 because of discarded area when extracting the feature points
return (kpoints- center[:,None,:]) / scaling[:,None,:]
class KeypointEncoder(nn.Module):
def __init__(self, feature_dim, layers):
super().__init__()
self.encoder = MLP([3] + layers + [feature_dim])
nn.init.constant_(self.encoder[-1].bias, 0.0)
def forward(self, keypoints, scores):
inputs = [keypoints.transpose(1,2), scores.unsqueeze(1)]
return self.encoder(torch.cat(inputs, dim = 1))
def attention(query, key, value):
dim = query.shape[1]
scores = torch.einsum('bdhn,bdhm->bhnm', query, key)
pros = torch.nn.functional.softmax(scores, dim=-1)/dim**0.5
return torch.einsum('bhnm,bdhm->bdhn', pros, value)
class Multi_header_attention(nn.Module):
"""Multiheader attention class"""
def __init__(self, num_head: int, f_dimension: int):
super().__init__()
assert f_dimension % num_head == 0
self.dim = f_dimension // num_head
self.num_head = num_head
self.merge = nn.Conv1d(f_dimension, f_dimension, kernel_size = 1)
self.proj = nn.ModuleList([copy.deepcopy(self.merge) for _ in range(3)])
def forward(self, query, key, value):
batch_size = query.shape[0]
query, key, value = [l(x).view(batch_size, self.dim, self.num_head,
-1) for l,x in zip(self.proj, (query, key, value))]
x = attention(query, key, value)
return self.merge(x.contiguous().view(batch_size, self.dim*self.num_head,-1))
class AttentionalPropagation(nn.Module):
"""AttentionalPropagation"""
def __init__(self, num_head: int, f_dimension: int):
super().__init__()
self.attn = Multi_header_attention(num_head, f_dimension)
self.mlp = MLP([f_dimension*2, f_dimension*2, f_dimension])
nn.init.constant_(self.mlp[-1].bias, 0.0)
def forward(self, x, source):
message = self.attn(x, source, source)
return self.mlp(torch.cat([x, message], dim = 1))
class AttensionalGNN(nn.Module):
def __init__(self, num_GNN_layers: int, f_dimension: int):
super().__init__()
self.layers = nn.ModuleList([
AttentionalPropagation(4,f_dimension)
for _ in range(num_GNN_layers)])
def forward(self, descpt):
for layer in self.layers:
delta = layer(descpt, descpt)
descpt = descpt + delta
return descpt
class FourDirectionalLSTM(nn.Module):
def __init__(self, seq_size, origin_feat_size, hidden_size):
super(FourDirectionalLSTM, self).__init__()
self.feat_size = origin_feat_size // seq_size
self.seq_size = seq_size
self.hidden_size = hidden_size
self.lstm_rightleft = nn.LSTM(self.feat_size, self.hidden_size, batch_first=True, bidirectional=True)
self.lstm_downup = nn.LSTM(self.seq_size, self.hidden_size, batch_first=True, bidirectional=True)
def init_hidden_(self, batch_size, device):
return (torch.randn(2, batch_size, self.hidden_size).to(device),
torch.randn(2, batch_size, self.hidden_size).to(device))
def forward(self, x):
batch_size = x.size(0)
x_rightleft = x.view(batch_size, self.seq_size, self.feat_size)
x_downup = x_rightleft.transpose(1, 2)
hidden_rightleft = self.init_hidden_(batch_size, x.device)
hidden_downup = self.init_hidden_(batch_size, x.device)
_, (hidden_state_lr, _) = self.lstm_rightleft(x_rightleft, hidden_rightleft)
_, (hidden_state_ud, _) = self.lstm_downup(x_downup, hidden_downup)
hlr_fw = hidden_state_lr[0, :, :]
hlr_bw = hidden_state_lr[1, :, :]
hud_fw = hidden_state_ud[0, :, :]
hud_bw = hidden_state_ud[1, :, :]
return torch.cat([hlr_fw, hlr_bw, hud_fw, hud_bw], dim=1)
class AttentionBlock(nn.Module):
def __init__(self, in_channels):
super(AttentionBlock, self).__init__()
self.g = nn.Linear(in_channels, in_channels // 8)
self.theta = nn.Linear(in_channels, in_channels // 8)
self.phi = nn.Linear(in_channels, in_channels // 8)
self.W = nn.Linear(in_channels // 8, in_channels)
def forward(self, x):
batch_size = x.size(0)
out_channels = x.size(1)
g_x = self.g(x).view(batch_size, out_channels // 8, 1)
theta_x = self.theta(x).view(batch_size, out_channels // 8, 1)
theta_x = theta_x.permute(0, 2, 1)
phi_x = self.phi(x).view(batch_size, out_channels // 8, 1)
f = torch.matmul(phi_x, theta_x)
f_div_C = F.softmax(f, dim=-1)
y = torch.matmul(f_div_C, g_x)
y = y.view(batch_size, out_channels // 8)
W_y = self.W(y)
z = W_y + x
return z
class MainModel(nn.Module):
default_config = {
'descriptor_dim': 256,
'keypoint_encoder': [32, 64, 128, 256],
'num_GNN_layers': 9,
'num_hidden':2048,
'num_hiden_2':40,
'lstm': False,
}
def __init__(self, config):
super().__init__()
self.config = {**self.default_config,**config}
print("num_GNN_layers {}".format(self.config['num_GNN_layers']))
self.keypoints_encoder = KeypointEncoder(
self.config['descriptor_dim'], self.config['keypoint_encoder'])
self.gnn = AttensionalGNN(self.config['num_GNN_layers'], self.config['descriptor_dim'])
self.conv1 = nn.Conv1d(256, self.config['num_hidden'], 1)
#self.conv2 = nn.Conv1d(512, 1024, 1)
if self.config['lstm']:
self.fc1 = nn.Linear(self.config['num_hidden']//2, self.config['num_hiden_2'])
#self.fc2 = nn.Linear(1024,40)
self.fc3_r = nn.Linear(self.config['num_hidden']//2, 3)
self.fc3_t = nn.Linear(self.config['num_hidden']//2, 3)
else:
self.fc1 = nn.Linear(self.config['num_hidden'], self.config['num_hiden_2'])
#self.fc2 = nn.Linear(1024,40)
self.fc3_r = nn.Linear(self.config['num_hiden_2'], 3)
self.fc3_t = nn.Linear(self.config['num_hiden_2'], 3)
self.bn = nn.BatchNorm1d(2048, momentum=BN_MOMENTUM)
self.bn1 = nn.BatchNorm1d(512, momentum=BN_MOMENTUM)
self.bn2 = nn.BatchNorm1d(1024, momentum=BN_MOMENTUM)
self.bn3 = nn.BatchNorm1d(40, momentum=BN_MOMENTUM)
if self.config['lstm']:
self.lstm4dir = FourDirectionalLSTM(seq_size=32, origin_feat_size=2048, hidden_size=256)
def forward(self, data):
descpt = data['descriptors']
keypts = data['keypoints']
scores = data['scores']
# normalize keypoints
keypts = normalize_keypoints(keypts, data['image'].shape)
# Keypoint MLP encoder
key_encodes = self.keypoints_encoder(keypts, scores)
descpt = descpt + key_encodes
# Multi layer transformer network
descpt = self.gnn(descpt)
out = F.relu(self.conv1(descpt))
#out = F.relu(self.bn2(self.conv2(out)))
out = nn.MaxPool1d(out.size(-1))(out)
out = nn.Flatten(1)(out)
if self.config['lstm']:
out = self.lstm4dir(out)
out_r = self.fc3_r(out)
out_t = self.fc3_t(out)
else:
out = F.relu(self.fc1(out))
out_r = self.fc3_r(out)
out_t = self.fc3_t(out)
return torch.cat([out_t, out_r], dim = 1)
| true |
8f3ab8493d9c493354259a740fa2eac7be8c5088 | Python | MilesHewitt/Machine_Learning_QCD | /set-up-files/Kernel_Ridge_Regression_Hyperparameter.py | UTF-8 | 2,704 | 2.640625 | 3 | [
"MIT"
] | permissive | # Imports
from __future__ import print_function
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import Ridge, Lasso, SGDRegressor, ElasticNet, LinearRegression
from sklearn.multioutput import MultiOutputRegressor
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import math
from sklearn.model_selection import train_test_split, GridSearchCV, RandomizedSearchCV, cross_val_predict, cross_validate
from sklearn.kernel_ridge import KernelRidge
from mpl_toolkits.mplot3d import axes3d
from matplotlib import cm
from sklearn.ensemble import AdaBoostRegressor
from numpy import genfromtxt
##################################################################################################################################
# Surface Plot to obtain best hyperparameters
%matplotlib notebook
gammas= np.logspace(-4, 0, 5)
alphas = np.logspace(-12, -9, 5)
alf=[]
gam=[]
trainKR = []
testKR = []
train_MSE_KR = []
test_MSE_KR = []
for c in gammas:
for a in alphas:
KR_Reg = KernelRidge(alpha=a, kernel='rbf', gamma=c)
KR_Reg.fit(X_train,y_train)
#trainKR.append(KR_Reg.score(X_train, y_train, sample_weight=None))
#testKR.append(KR_Reg.score(X_test, y_test, sample_weight=None))
training_MSE = KR_Reg.predict(X_train)
testing_MSE = KR_Reg.predict(X_test)
train_MSE_KR.append(np.absolute(mean_squared_error(y_train, training_MSE)))
test_MSE_KR.append(np.absolute(mean_squared_error(y_test, testing_MSE)))
alf.append(a)
gam.append(c)
alpha = np.reshape(np.array(alf), (len(gammas), len(alphas)))
gamma = np.reshape(np.array(gam), (len(gammas), len(alphas)))
minis = np.reshape(np.array(test_MSE_KR), (len(gammas), len(alphas)))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Plot a basic surface.
ax.plot_surface(np.log(alpha), np.log(gamma), np.log(minis), cmap=cm.coolwarm)
ax.set_xlabel('log alpha')
ax.set_ylabel('log gamma')
ax.set_zlabel('log MSE')
ax.view_init(elev=38, azim=37)
plt.show()
#plt.savefig('3D_surface_plot.png', dpi=1000)
# Obtain best values from surface plot
result = np.where(np.array(test_MSE_KR) == np.amin(np.array(test_MSE_KR)))
print('Best RMSE is:',np.sqrt(np.array(test_MSE_KR).min()))
print('Best model is placed:',result[0])
print('Best RMSE is:',np.sqrt(np.array(test_MSE_KR)[result[0]]),'Best Gamma is:',np.array(gam)[result[0]],'Best Alpha is:',np.array(alf)[result[0]])
Best_KR_Gamma = np.array(gam)[result[0]]
Best_KR_Alpha = np.array(alf)[result[0]]
############################################################################################################
| true |
cef6c91225a94797eec40fc2f010cea9d8ea1b8f | Python | GHDaru/NEPS_em_Python | /MelhorAluno.py | UTF-8 | 74 | 3.265625 | 3 | [] | no_license | X,Y = list(map(float,input().split()))
print('Pedro' if X<=Y else 'Paulo') | true |
d5d30642145d3c4a5883a833ea5f7a7e12642bb0 | Python | ChenYH1994/Python | /Random_Forest/Temps_extended_effect.py | UTF-8 | 10,466 | 3.484375 | 3 | [] | no_license | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
'''
1. 在舊特徵下,資料量增大,對預測結果有啥影響?
2. 加入新特徵又會如何? 效率會改變嗎?
'''
'''讀取大資料'''
features=pd.read_csv('temps_extended.csv')
print(features.head(5))
'''查看資料數量'''
print()
print('資料規模:',features.shape)
'''
以上資料量變大,也新增了特徵:
ws_1: 前一天風速
prcp_1: 前一天降水
snwd_1: 前一天積雪深度
'''
'''日期整合'''
import datetime
years=features['year']
months=features['month']
days=features['day']
dates=[str(int(year))+'-'+str(int(month))+'-'+str(int(day)) for
year,month,day in zip(years,months,days)]
dates=[datetime.datetime.strptime(date,'%Y-%m-%d') for date in dates]
plt.style.use('fivethirtyeight')
'''設定版面配置'''
fig, ((ax1,ax2),(ax3,ax4))=plt.subplots(2,2,figsize=(15,10))
fig.autofmt_xdate(rotation=45)
'''平均最高氣溫'''
ax1.plot(dates,features['average'])
ax1.set_xlabel('')
ax1.set_ylabel('Temperature(F)')
ax1.set_title('Historical Avg Max Temp')
'''風速'''
ax2.plot(dates,features['ws_1'],'r-')
ax2.set_xlabel('')
ax2.set_ylabel('Wind speed(mph)')
ax2.set_title('Prior wind speed')
'''降水'''
ax3.plot(dates,features['prcp_1'],'r-')
ax3.set_xlabel('Date')
ax3.set_ylabel('Precipitation(in)')
ax3.set_title('Prior precipitation')
'''積雪'''
ax4.plot(dates,features['snwd_1'],'ro')
ax4.set_xlabel('Date')
ax4.set_ylabel('Snow Depth(in)')
ax4.set_title('Prior snow depth')
plt.tight_layout(pad=2)
plt.show()
'''發現天氣轉變跟季節有關,可以自己建立新特徵: 季節變數'''
seasons=[]
for month in features['month']:
if month in [1,2,12]:
seasons.append('winter')
elif month in [3,4,5]:
seasons.append('spring')
elif month in [6,7,8]:
seasons.append('summer')
elif month in [9,10,11]:
seasons.append('fall')
reduced_features=features[['temp_1','prcp_1','average','actual']]
reduced_features.insert(4,'season',seasons)
# '''接下來就可以依季節去觀察各項特徵的變化'''
# import seaborn as sns
# sns.set(style='ticks',color_codes=True)
#
# '''選擇你喜歡的顏色'''
# palette=sns.xkcd_palette(['dark blue','dark green','gold','orange'])
#
# '''繪製 pairplot'''
# sns.pairplot(reduced_features,hue='season',kind='reg',diag_kind='kde',palette=palette
# ,plot_kws=dict(alpha=0.7),diag_kws=dict(shade=True))
#
# plt.show()
'''對 weekday 做獨熱編碼'''
features=pd.get_dummies(features)
'''分離大資料的特徵和標籤'''
labels=features['actual']
features=features.drop('actual',axis=1)
features_list=list(features.columns)
print(features)
print(labels)
'''換成 array'''
import numpy as np
features=np.array(features)
labels=np.array(labels)
'''大資料的訓練集、測試集'''
from sklearn.model_selection import train_test_split
features_train,features_test,labels_train,labels_test=train_test_split\
(features,labels,test_size=0.25,random_state=0)
'''先來看看舊特徵也就是剃除 'ws_1', 'prcp_1', 'snwd_1' 的結果'''
'''把舊特徵的 index 集中'''
orig_feature_index=[features_list.index(feature) for feature in features_list
if feature not in ['ws_1', 'prcp_1', 'snwd_1']]
'''現在叫出資料量 348 筆的小資料'''
small_features=pd.read_csv('temps.csv')
small_features=pd.get_dummies(small_features)
# print('小資料的規模:')
# print(small_features.shape)
# print()
'''小的特徵跟標籤分離'''
small_labels=np.array(small_features['actual'])
small_features=small_features.drop('actual',axis=1)
small_features_list=list(small_features.columns)
small_features=np.array(small_features)
'''小的訓練集、測試集'''
from sklearn.model_selection import train_test_split
small_features_train,small_features_test,small_labels_train,\
small_labels_test=train_test_split(small_features,small_labels,test_size=0.25,random_state=42)
'''用小資料去建樹模型'''
from sklearn.ensemble import RandomForestRegressor
rf=RandomForestRegressor(n_estimators=100,random_state=0)
'''小資料樹模型'''
rf.fit(small_features_train,small_labels_train)
'''統一用大資料測試集去做測試,記得大資料測試集也要去除新特徵'''
small_model_pred=rf.predict(features_test[:,orig_feature_index])
'''計算平均溫度誤差'''
errors=abs(small_model_pred-labels_test)
print()
print('小資料平均溫度誤差:',round(np.mean(errors),2),'degrees.')
'''MAPE'''
mape=100*(errors/labels_test)
# 為了觀察方便,用 100-誤差
accuracy=100-np.mean(mape)
print('small_Accuracy:',round(accuracy,2),'%.')
'''現在用大資料建模,再跟小資料樹模型的結果做比較'''
'''同樣用舊特徵去建模'''
features_train1=features_train[:,orig_feature_index]
big_rf=RandomForestRegressor(n_estimators=100,random_state=0)
'''大資料樹模型'''
big_rf.fit(features_train1,labels_train)
'''用大資料樹模型去預測'''
big_pred=big_rf.predict(features_test[:,orig_feature_index])
'''大資料的平均溫度誤差'''
big_errors=abs(big_pred-labels_test)
print()
print('大資料平均溫度誤差:',round(np.mean(big_errors),2),'degrees.')
'''MAPE'''
big_mape=100*np.mean(big_errors/labels_test)
big_accuracy=100-big_mape
print('big_Accuracy:',round(big_accuracy,2),'%.')
print()
print('誤差下降為 4.2')
print('一般機器學習都希望資料量越大越好,可讓學習更充分且降低過擬合')
'''增加特徵個數對結果的影響'''
from sklearn.ensemble import RandomForestRegressor
rf_exp=RandomForestRegressor(n_estimators=100,random_state=0)
rf_exp.fit(features_train,labels_train)
'''預測'''
exp_pred=rf_exp.predict(features_test)
'''有新特徵的平均溫度誤差'''
exp_errors=abs(exp_pred-labels_test)
print()
print('新特徵平均溫度誤差:',round(np.mean(exp_errors),2),'degrees.')
'''MAPE'''
exp_mape=100*np.mean(exp_errors/labels_test)
exp_accuracy=100-exp_mape
print('exp_Accuracy:',round(exp_accuracy,2),'%.')
print()
print('整體還是有提升的!')
print()
print('展示特徵重要性')
'''特徵名'''
importances=list(rf_exp.feature_importances_)
'''名字、重要度組合'''
features_important=[(feature,round(importance,2)) for feature,importance
in zip(features_list,importances)]
'''以重要度排序'''
features_important=sorted(features_important,key=lambda x:x[1],reverse=True)
# 比較 key 的數值大小來排序
'''列印結果'''
[print('特徵:{:20} 重要度:{}'.format(*pair)) for pair in features_important]
print()
print('仍然是 temp_1、average 排在最前面,新特徵只有風速 ws_1 出現,但影響力小')
'''圖表化'''
'''有網格的背景'''
plt.style.use('fivethirtyeight')
'''指定位置'''
x_values=list(range(len(importances)))
'''畫圖'''
plt.bar(x_values,importances,orientation='vertical',color='b',
edgecolor='k',linewidth=1.2)
'''在指定位置上標上名字並且要豎著寫'''
plt.xticks(x_values,features_list,rotation='vertical')
'''圖名'''
plt.ylabel('Importance')
plt.xlabel('Variable')
plt.title('Variable Importances')
plt.show()
'''
將重要度從大到小排列。
設定一個門檻值:95%,讓特徵重要度以累加的方式達到,而達到的那些特徵就是主要特徵,其餘丟棄。
'''
'''把排序後的特徵跟重要度獨立出來'''
sorted_importances=[importance[1] for importance in features_important]
sorted_features=[importance[0] for importance in features_important]
'''累加'''
cumulative_importances=np.cumsum(sorted_importances)
'''繪製聚合線圖'''
plt.plot(x_values,cumulative_importances,'g-')
'''畫一條 y=0.95 的紅色虛線'''
plt.hlines(y=0.95,xmin=0,xmax=len(sorted_importances),colors='r',linestyles='dashed')
'''x軸標名字'''
plt.xticks(x_values,features_list,rotation='vertical')
'''y軸、圖名'''
plt.ylabel('Importance')
plt.xlabel('Variable')
plt.title('Variable Importances')
plt.show()
print('由圖可以看到,重要特徵只到 year,再往右都用不到')
'''實驗: 如果只用這 5 個特徵去建模,結果如何?'''
'''5個重要特徵名'''
impt_name=[feature[0] for feature in features_important[0:5]]
'''找出它們在原資料的index'''
impt_index=[features_list.index(feature) for feature in impt_name]
'''為了做比較,所以從原訓練集、測試集拿出新集合'''
impt_train=features_train[:,impt_index]
impt_test=features_test[:,impt_index]
'''訓練模型'''
rf_impt=RandomForestRegressor(n_estimators=100,random_state=0)
rf_impt.fit(impt_train,labels_train)
'''預測'''
impt_pred=rf_impt.predict(impt_test)
'''重要特徵的平均溫度誤差'''
impt_errors=abs(impt_pred-labels_test)
print()
print('重要特徵平均溫度誤差:',round(np.mean(impt_errors),2),'degrees.')
'''MAPE'''
impt_mape=100*np.mean(impt_errors/labels_test)
impt_accuracy=100-impt_mape
print('impt_Accuracy:',round(impt_accuracy,2),'%.')
print()
print('結果沒有比較好,代表說其餘特徵還是有一定作用的!')
print()
print('雖然效果沒有較好,但是說不定效率上可以更省時間?')
'''計算時間'''
import time
all_features_time=[]
'''取大資料計算十次,然後算效率總平均'''
for _ in range(10):
start_time=time.time()
rf_exp.fit(features_train,labels_train)
all_features_pred=rf_exp.predict(features_test)
end_time=time.time()
all_features_time.append(end_time-start_time)
# 將每次消耗的時間儲存
all_features_time=np.mean(all_features_time)
# 十次耗時的平均
print('使用所有特徵建模與測試的平均消耗時間:',round(all_features_time,2),'秒')
'''取重要特徵的資料效率'''
rd_features_time=[]
for _ in range(10):
start_time=time.time()
rf_exp.fit(impt_train,labels_train)
rd_pred=rf_exp.predict(impt_test)
end_time=time.time()
rd_features_time.append(end_time-start_time)
rd_features_time=np.mean(rd_features_time)
print('使用重要特徵建模與測試的平均消耗時間:',round(rd_features_time,2),'秒')
| true |
e7e5af0e503e60a1c1f0ba32beadad8fb1f8b56a | Python | JackTJC/LeetCode | /dp/MinSetSize.py | UTF-8 | 571 | 2.828125 | 3 | [] | no_license | # 1338
import collections
from typing import List
class Solution:
def minSetSize(self, arr: List[int]) -> int:
if len(arr) == 0:
return -1
if len(arr) == 1:
return 1
deleteLen = 0
deleteCount = 0
counter = collections.Counter(arr)
sortedCounter = counter.most_common(len(counter))
for k, v in sortedCounter:
if deleteLen < int(len(arr) / 2):
deleteLen += v
deleteCount += 1
else:
break
return deleteCount
| true |
14b247e3caa4c999f005b37dc0ae7e061b5fb328 | Python | davelive/Homework | /hw8_David.py | UTF-8 | 2,304 | 4.4375 | 4 | [] | no_license | """
Problem 1
Create 3 dictionaries for your favourite top 3 cars. Dict should contain information like brand, model, year, and color.
Add all those dicts in one dict and print items.
"""
car1 = {
"brand": "Audi",
"model": "R8",
"year": 2015,
"colors": ["White"]
}
car2 = {
"brand": "Chevrolet",
"model": "Corvette",
"year": 2009,
"colors": ["Yellow", 'Black']
}
car3 = {
"brand": "Mitsubishi",
"model": "Eclipse",
"year": 1995,
"colors": ["Red", 'Green']
}
fav_cars = {**car1, **car2, **car3}
print(list(fav_cars.items()))
"""
Problem 2
You have a list of lists. Each list in the list contains a key and a value. Transform it into a list of dictionaries.
Use loops.
"""
ls = [['Bob', 45], ['Anna', 4], ['Luiza', 24], ['Martin', 14]]
my_dict = dict()
for i,v in ls:
my_dict[i] = v
print(my_dict)
"""
Problem 3
Check if value 1000 exists in the dict values. If yes delete all other items except that one.
"""
dt = {'hundred': 100, 'million': 1000000, 'thousand': 1000, 'ten': 10}
if 1000 in dt.values():
ts = dt['thousand']
dt.clear()
dt['thousand'] = ts
print(dt)
"""
Problem 4
Change Narine's salary to 10000
"""
sampleDict = {
'employee1': {'name': 'Marine', 'salary': 7500},
'employee2': {'name': 'Karine', 'salary': 8000},
'employee3': {'name': 'Narine', 'salary': 6500}
}
sampleDict.update({"employee3": {'name': 'Narine', 'salary': 10000}})
print(sampleDict)
"""
Problem 5
Write a function that will get a dict of employees and their salaries. It will return a new dict with
the same keys (employees) and all values will be the average of their salaries.
example: dict1 = {'ann': 3000, 'bob': 4000, 'lily': 5000}
dict2 = {'ann': 4000, 'bob': 4000, 'lily': 4000}
"""
dict1 = {'ann': 3000, 'bob': 4000, 'lily': 5000, 'molly': 5500, 'david': 500}
def problem5(dict1):
avr = 0
for x in dict1.values():
avr += x // len(dict1.values())
for i in dict1:
dict1.update({i: avr})
return dict1
print(problem5(dict1))
"""
Homework 7 Problem 4
Write a program that will add the string 'AAA' as an item before every item of the list.
"""
the_list = ['chrome', 'opera', 'mozilla', 'explorer']
new_list = [v for s in the_list for v in ('AAA', s)]
print(new_list) | true |
86fc23af60a5c7aaa2e3f0607e1a957f3f917eda | Python | rohit9934/Data-Science | /PCA.py | UTF-8 | 1,724 | 3.234375 | 3 | [] | no_license | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sn
from sklearn.preprocessing import StandardScaler
#from sklearn import Decomposition
from scipy.linalg import eigh
#This is data visualisation using PCA technique
#MNIST data_set is used
#Basically, i have manually coded the sklearn PCA method to understand Principal Component Analysis deeply
# 3 steps are used
#1. Column Standardization
#2. Finding the covarient matrix
#3. Finding Eigen values and Eigen vectors
#4. The last two Eigen values are need to convert the 784 dimensional(features) dataset into 2D
dataset= pd.read_csv('./mnist_train.csv')
#print(dataset.head(0))
d= dataset.drop('label',axis='columns')
l= dataset["label"]
plt.figure(figsize=(7,7))
idx=100
grid= d.iloc[idx].values.reshape(28,28)
#plt.imshow(grid,interpolation="nearest",cmap="gray")
#plt.show()
data= d.head(15000)
labels= l.head(15000)
print(labels.shape)
#Standardizing data
standard_data= StandardScaler().fit_transform(data)
#print(standard_data.shape)
#Finding covariance = X^T*X
cov_matrix= np.matmul(standard_data.T,standard_data)
#print(cov_matrix.shape)
#Finding Eigen values and eigen vectors
values, vectors = eigh(cov_matrix,eigvals=(782,783))
vectors= vectors.T
#print(values)
#print(vectors.shape)
#print(standard_data.T.shape)
#Finding optimal value by multiplying eigen vector to the matrix
optimal_vector= np.matmul(vectors,standard_data.T)
optimal_vector= np.vstack((optimal_vector,labels)).T
dataframe= pd.DataFrame(optimal_vector,columns=("1st","2nd","labels"))
#print(dataframe.head())
sn.FacetGrid(dataframe,hue="labels",size=6).map(plt.scatter,"1st","2nd").add_legend()
plt.show()
| true |
0af5aeebf00da786d60c38868b8ff05e3d30f952 | Python | yangxiangtao/biji | /1-pbase/day06/practice/list_method.py | UTF-8 | 461 | 4.1875 | 4 | [] | no_license | #输入多行文字,存入列表中
# 每次输入后回车算作一行,任意输入多行文字
# 当直接输入回车时(即空行时算作结束)
#要求
# 1)按原输入的内容在屏幕输出
# 2)
L = []
while True:
s = input('请输入:')
if not s:
break
L.append(s)
print('L:',L)
print('您输入的内容是:')
for text in L:
print(text)
print('您输入了',len(text),'行')
print('您输入了',len(L),'字') | true |
ad46378c84085ac2a16643516d8d82229e63616b | Python | Ramesh-Bhutka/Scientific-Calculator | /calculator.py | UTF-8 | 6,609 | 2.875 | 3 | [] | no_license | from tkinter import *
import math
import parser
import tkinter.messagebox
root = Tk()
root.title("Scientific Calculator")
root.configure(background="powder blue")
root.resizable(width=False, height=False)
root.geometry("480x568")
calc = Frame(root)
calc.grid()
# ======================Menu and Functions=========================
def iExit():
iExit = tkinter.messagebox.askyesno("Scientific Calculator", "Confirm if you want to exit")
if iExit > 0:
root.destroy()
return
def Standard():
root.resizable(width=False, height=False)
root.geometry("480x568")
def Scientific():
root.resizable(width=False, height=False)
root.geometry("950x568")
menubar = Menu(calc)
filemenu = Menu(menubar, tearoff=0)
menubar.add_cascade(label="File", menu=filemenu)
filemenu.add_command(label="Standard", command=Standard)
filemenu.add_separator()
menubar.add_command(label="Exit", command=iExit)
# ============================= Functions ==========================
class Calc():
def __init__(self):
self.total = 0
self.current = ""
self.input_value = True
self.check_sum = False
self.op = ""
self.result = False
def numberEnter(self, num):
self.result = False
firstnum = txtDisplay.get()
secondnum = str(num)
if self.input_value:
self.current = secondnum
self.input_value = False
else:
if secondnum == '.':
if secondnum in firstnum:
return
self.current = firstnum + secondnum
self.display(self.current)
def sum_of_total(self):
self.result = True
self.current = float(self.current)
if self.check_sum == True:
self.valid_function()
else:
self.total = float(txtDisplay.get())
def display(self, value):
txtDisplay.delete(0, END)
txtDisplay.insert(0, value)
def valid_function(self):
if self.op == "add":
self.total += self.current
if self.op == "sub":
self.total -= self.current
if self.op == "multi":
self.total *= self.current
if self.op == "divide":
self.total /= self.current
if self.op == "mod":
self.total %= self.current
self.input_value = True
self.check_sum = False
self.display(self.total)
def operation(self, op):
self.current = float(self.current)
if self.check_sum:
self.valid_function()
elif not self.result:
self.total = self.current
self.input_value = True
self.check_sum = True
self.op = op
self.result = False
def Clear_Entry(self):
self.result = False
self.current = "0"
self.display(0)
self.input_value = True
def all_Clear_Entry(self):
self.Clear_Entry()
self.total = 0
def squared(self):
self.result = False
self.current = math.sqrt(float(txtDisplay.get()))
self.display(self.current)
def mathsPM(self):
self.result = False
self.current = -(float(txtDisplay.get()))
self.display(self.current)
added_value = Calc()
# ========================== Entrybox ============================
txtDisplay = Entry(calc, font=('arial', 20, 'bold'), background="powder blue", bd=30, width=28, justify=RIGHT)
txtDisplay.grid(row=0, column=0, columnspan=4, pady=1)
txtDisplay.insert(0, "0")
# ========================= NumberPad ============================
numberpad = "789456123"
i = 0
btn = []
for j in range(2, 5):
for k in range(3):
btn.append(Button(calc, width=6, height=2, font=('arial', 20, 'bold'), bd=4, text=numberpad[i]))
btn[i].grid(row=j, column=k, pady=1)
btn[i]["command"] = lambda x=numberpad[i]: added_value.numberEnter(x)
i += 1
# ======================== Standard ==============================
# ======================== Row[1] =================================
btnClear = Button(calc, text=chr(67), width=6, height=2, font=('arial', 20, 'bold'), bd=4, bg="powder blue",
command=added_value.Clear_Entry).grid(row=1, column=0, pady=1)
btnAllClear = Button(calc, text=chr(67) + chr(69), width=6, height=2, font=('arial', 20, 'bold'), bd=4,
bg="powder blue",
command=added_value.all_Clear_Entry).grid(row=1, column=1, pady=1)
btnSq = Button(calc, text="√", width=6, height=2, font=('arial', 20, 'bold'), bd=4, bg="powder blue",
command=added_value.squared).grid(row=1, column=2, pady=1)
btnAdd = Button(calc, text="+", width=6, height=2, font=('arial', 20, 'bold'), bd=4, bg="powder blue",
command=lambda: added_value.operation("add")).grid(row=1, column=3, pady=1)
# ======================== Row[2,3,4] =================================
btnSub = Button(calc, text="-", width=6, height=2, font=('arial', 20, 'bold'), bd=4, bg="powder blue",
command=lambda: added_value.operation("sub")).grid(row=2, column=3, pady=1)
btnMult = Button(calc, text="*", width=6, height=2, font=('arial', 20, 'bold'), bd=4, bg="powder blue",
command=lambda: added_value.operation("multi")).grid(row=3, column=3, pady=1)
btnDiv = Button(calc, text=chr(247), width=6, height=2, font=('arial', 20, 'bold'), bd=4, bg="powder blue",
command=lambda: added_value.operation("divide")).grid(row=4, column=3, pady=1)
# ======================== Row[5] =================================
btnZero = Button(calc, text="0", width=6, height=2, font=('arial', 20, 'bold'), bd=4, bg="powder blue",
command=lambda: added_value.numberEnter(0)).grid(row=5, column=0, pady=1)
btnDot = Button(calc, text=".", width=6, height=2, font=('arial', 20, 'bold'), bd=4, bg="powder blue",
command=lambda: added_value.numberEnter(".")).grid(row=5, column=1, pady=1)
btnPM = Button(calc, text=chr(177), width=6, height=2, font=('arial', 20, 'bold'), bd=4, bg="powder blue",
command=added_value.mathsPM).grid(row=5, column=2, pady=1)
btnEquals = Button(calc, text="=", width=6, height=2, font=('arial', 20, 'bold'), bd=4, bg="powder blue",
command=added_value.sum_of_total).grid(row=5, column=3, pady=1)
root.config(menu=menubar)
root.mainloop()
| true |
d02e94bba8a09348291bf28359a0494239b43b6a | Python | HeroicKatora/UrfsBakery | /server/riotapi/__init__.py | UTF-8 | 5,548 | 2.625 | 3 | [] | no_license | import urllib3
import certifi
import json
import time
import sys
from argparse import ArgumentParser
from threading import Lock
from .RateLimit import RateLimit
from collections import defaultdict
defaultkey = None
'''This makes sure the api is initialized and a default instance can be fetched.
The defaultkey you give is an offer and does not have to be the defaultkey after this call. If you need to access the api with
a specific key, use that key in the call to get_api, too.
You should keep in mind that in case there is no default key set yet and no user input, then this call will raise a RuntimeError.
This signals that th api was not initialized properly. All subsequent calls to this module may fail.
'''
def init(key=None, userinput=True):
global defaultkey
if defaultkey or key:
defaultkey = defaultkey or key
return
parser = ArgumentParser()
parser.add_argument('-f', '--failed', action='store_false', dest='ignoreFailedFiles',
default=True, help='Retry previously failed game ids')
parser.add_argument('-k', default = None, action='store', dest='key', type=str, help='Retry previously failed game ids')
parsed_options = parser.parse_known_args(sys.argv)[0]
entered_key = parsed_options.key
if (not entered_key) and userinput:
print("To work correctly, the api needs to have a key, please enter it now or start again with option -k <key>.")
entered_key = input();
if not entered_key:
raise RuntimeError("Api remains uninitialized since there was neither a default key nor user input")
defaultkey = entered_key
def low_limit():
return RateLimit(3000, 12.0)
def high_limit():
return RateLimit(180000, 620.0)
key_limits = defaultdict(lambda:lambda:None)
'''Sets a function to return limit for an api key if there isn't one in place
If no api key is given, then the defaultkey is limited
'''
def limit(apikey=None, limit_fun=low_limit):
global defaultkey, key_limits
apikey = apikey or defaultkey
key_limits.setdefault(apikey, limit_fun)
class AnswerException(Exception):
def __init__(self, msg, answer):
Exception(msg)
self.msg = msg
self.answer = answer
class Downloader:
"""An API python-binding. Requests can be done via #api_request.
The class automatically limits the usage of the API to conform to
the restrictions of a production key: 3000 rq/10s and 180.000rq/10min
"""
def __init__(self, key, region):
self.lock = Lock()
global key_limits
self.key = key
self.limit = key_limits[self.key]()
self.region = region
self.api = urllib3.PoolManager( # https connector
cert_reqs='CERT_REQUIRED', # Force certificate check.
ca_certs=certifi.where(), # Path to the Certifi bundle.
maxsize = 3,
num_pools = 10,
timeout = 5
)
def api_request(self, path, _fields = None, **data):
"""Makes an API request from the server, waiting if necessary to keep below the datacap.
@param path: the API path of the requested data, e.g. "/api/lol/tr/v2.2/match/263959903".
A leading slash is mandatory
@param _reg: a specific server region to request the data from, e.g. 'na'
@param _fields: the fields to forward to the raw HTTP-request. leading underscore to
prevent conflicts with
@param data: additional parameters for the request, e.g. includeTimeline=True
@return: a parsed version of the received JSON response
@raise AnswerException: when the HTTP status of the response is not 200.
"""
if self.limit is not None:
self.limit.inc()
url = "https://{region}.api.pvp.net{path}".format(region = self.region, path = path)
data['api_key'] = self.key
url += '?' + '&'.join(str(arg) + '=' + str(data[arg]) for arg in data)
print(url)
with self.lock:
answer = self.api.request('GET', url, fields = _fields)
readdata = answer.data.decode('utf-8')
retryTime = 0
if 'Retry-After' in answer.headers:
retryTime = answer.headers['Retry-After']
if answer.status == 429:
self.limit_fast.dec(retryTime)
self.limit_slow.dec(retryTime)
print("Limit exceeded received, slowing down")
elif answer.status >= 500:
print('Issues on the server side, hope for the best')
if answer.status != 200:
raise AnswerException('Error code returned by api: {err}'.format(err = answer.status), answer)
elif not readdata:
answer.status = 719
raise AnswerException('No data received in answer', answer)
return json.loads(readdata)
downloader_map = dict()
def getDownloader(apikey=None, region = 'global'):
"""Gets the downloader for the specified region. If no region is given,
returns the global downloader for static endpoint.
"""
global defaultkey, downloader_map
apikey = apikey or defaultkey
dl = downloader_map.get((apikey, region), None)
if dl is None:
downloader_map[(apikey, region)] = dl = Downloader(region=region, key=apikey)
return dl
regions = ['br', 'eune', 'euw', 'jp', 'kr', 'lan', 'las', 'na', 'oce', 'pbe', 'ru', 'tr']
| true |
24a64811421905ed6a5a937b885d8f9cc9fde799 | Python | kapelner/HouseTurker | /Randomized_Data/answer_key.py | UTF-8 | 873 | 3.171875 | 3 | [
"MIT"
] | permissive | import random, sys
filename = sys.argv[1]
outputname = sys.argv[2]
f = open(filename, encoding='utf-8')
output = open(outputname, 'w', encoding='utf-8')
inputarray = []
for line in f:
pair = line.split("^", -1)
pair[3] = pair[3].rstrip()
inputarray.append(pair)
length = len(inputarray)
answerarray = []
for i in range(length):
pair = inputarray[i]
if random.random() > .50:
pair.append("A")
answerarray.append(pair)
else:
temp = pair[0]
pair[0] = pair[1]
pair[1] = temp
pair.append("B")
answerarray.append(pair)
length2 = int (len(answerarray) / 5)
for i in range(length2):
output_string = ""
for j in range(5):
index = 5 * i + j
pair = answerarray[index]
output_string += '^{0}^{1}^{2}^{3}^{4}'.format(pair[0], pair[1], pair[2], pair[3], pair[4])
print ("parsing string ", index)
output_string += "\n"
output.write(output_string) | true |
48870e12ead8e7c44fcd4101414000764ca4aef7 | Python | uchihanuo/helloworld | /Program/ds_using_dict.py | UTF-8 | 512 | 2.890625 | 3 | [] | no_license | ad = \
{
'Wangxian': '52253@any3.com',
'Gannengqiang': '41996@any3.com',
'Lanjianhua': '48765@any3.com',
'Zhongling': '60000@any.com',
'Yangnuo': '50855@any3.com'
}
print("Lanjianhua's address is", ad['Lanjianhua'])
del ad['Yangnuo']
print('\nThere are {} contacts in the address-book.'.format(len(ad)))
for name, address in ad.items():
print('Contact {} at {}'.format(name, address))
ad['Youhongyi'] = '52356@any3.com'
if 'Youhongyi' in ad:
print("\nYouhongyi's address is", ad['Youhongyi'])
| true |
12cca5a2b6d6930132e3b0090809dcfb8e9c58f9 | Python | Ai-Albert/pypysonar | /indexer.py | UTF-8 | 878 | 3.421875 | 3 | [] | no_license | """transform an ast to a namespace hierachy
"""
import parser
def index(tree):
namespace, level, separator = {}, 0, " "
_walk(tree, namespace, level, separator)
return namespace
def _walk(node, namespace, level, separator):
print(level, ':', separator*level, node)
should_index, name, position = parser.shouldIndex(node)
if should_index:
_add(namespace, name, position)
if parser.isNamespace(node):
namespace = parser.createNamespace(namespace, name, position)
else:
assert not should_index
assert not parser.isNamespace(node)
for child in parser.getChildren(node):
_walk(child, namespace, level+1, separator)
def _add(namespace, name, position):
assert name and position
try:
namespace[name].append(position)
except KeyError:
namespace[name] = [position]
| true |
57526fa772668639d1b2289ccb40b81e0bca6e47 | Python | bigtreeljc/torchloop | /torchloop/util/fs_utils.py | UTF-8 | 278 | 2.8125 | 3 | [] | no_license | from io import open
import glob
def readLines(file):
# read all file content if file not big utf-8
lines = open(filename, encoding='utf-8').read().strip().split('\n')
return [unicodeToAscii(line) for line in lines]
def findFiles(path):
return glob.glob(path)
| true |
ef98afc2bebbf2aa06ef875948a5c41cf38fd938 | Python | max3koz/Karate | /Data_Filter.py | UTF-8 | 4,617 | 3.15625 | 3 | [] | no_license | import xlrd
import xlwt
import array
# Ввод данных для проведения сортировки данных
print("Введите название файла со списком участников соревнований без расширения:")
Name_Workbook_Competitor = input() + ".xls"
Workbook_Competitor = xlrd.open_workbook(Name_Workbook_Competitor)
Worksheet_Competitor = Workbook_Competitor.sheet_by_name('Участники')
print("Введите название соревнований:")
Name_Competition = input()
print("Ведеите число дня соревнований, например, 23:")
Day_Competition = input()
print("Ведеите число месяца соревнований, например, 02:")
Month_Competition = input()
print("Ведеите число года соревнований, например, 2017:")
Year_Competition = input()
print("Введите виды соревнований, например, ката, кумитэ, котен или Джунро:")
List_Type_Competition = []
Type_Competition = "+"
Qty_Type_Competition = 0
while Type_Competition != "":
Type_Competition = input()
if Type_Competition == "ката" or Type_Competition == "Ката" or Type_Competition == "kata" or Type_Competition == "Kata":
Type_Competition = Worksheet_Competitor.cell(0, 10).value
List_Type_Competition.append(Type_Competition)
elif Type_Competition == "кумитэ" or Type_Competition == "Кумитэ" or Type_Competition == "kumite" or Type_Competition == "Kumite":
Type_Competition = Worksheet_Competitor.cell(0, 11).value
List_Type_Competition.append(Type_Competition)
elif Type_Competition == "котен" or Type_Competition == "Котен" or Type_Competition == "koten" or Type_Competition == "Koten" or Type_Competition == "Джунро" or Type_Competition == "джунро" or Type_Competition == "dzhunro" or Type_Competition == "Dzhunro":
Type_Competition = Worksheet_Competitor.cell(0, 8).value
List_Type_Competition.append(Type_Competition)
elif Type_Competition == "":
break
else:
print("Нет такого вида соревнований.")
Qty_Type_Competition += 1
print (List_Type_Competition)
print("Введите категории спортменов:")
List_Type_Category = []
Type_Category = "+"
Qty_Type_Category = 0
while Type_Category != "":
Type_Category = input()
if Type_Category == "а" or Type_Category == "A" or Type_Category == "а" or Type_Category == "А":
List_Type_Category.append("А")
elif Type_Category == "б" or Type_Category == "Б" or Type_Category == "b" or Type_Category == "B":
List_Type_Category.append("Б")
elif Type_Category == "":
break
else:
print("Нет такой категории.")
Qty_Type_Category += 1
print (List_Type_Category)
print("Введите возрастные категории для ката:")
List_Type_Age_Kata = []
Type_Age_Kata = "+"
Qty_Type_Age_Kata = 0
while Type_Age_Kata != "":
Type_Age_Kata = input()
if Type_Age_Kata != "":
List_Type_Age_Kata.append(Type_Age_Kata)
else:
break
Qty_Type_Age_Kata += 1
print (List_Type_Age_Kata)
print("Введите возрастные категории для котен ката:")
List_Type_Age_Koten = []
Type_Age_Koten = "+"
Qty_Type_Age_Koten = 0
while Type_Age_Koten != "":
Type_Age_Koten = input()
if Type_Age_Koten != "":
List_Type_Age_Koten.append(Type_Age_Koten)
else:
break
Qty_Type_Age_Koten += 1
print (List_Type_Age_Koten)
print("Введите возрастные категории для кумитэ:")
List_Type_Age_Kumite = []
Type_Age_Kumite = "+"
Qty_Type_Age_Kumite = 0
while Type_Age_Kumite != "":
Type_Age_Kumite = input()
if Type_Age_Kumite != "":
List_Type_Age_Kumite.append(Type_Age_Kumite)
else:
break
Qty_Type_Age_Kumite += 1
print (List_Type_Age_Kumite)
List_Type_Weight_Category = []
for i in range(Qty_Type_Age_Kumite):
print("Введите граничный вес для категории ",List_Type_Age_Kumite[i])
Border_Weight = int(input())
List_Type_Weight_Category.append([])
List_Type_Weight_Category[0].append(int(List_Type_Age_Kumite[i]))
List_Type_Weight_Category[1].append(int(Border_Weight))
print ("Rjytw")
#for i in range(len(List_Type_Weight_Category)):
# for j in range(len(List_Type_Weight_Category[i])):
# print(List_Type_Weight_Category[i][j], end=' ') | true |
2d2f737c83a1175804fe0cd48218570f3f80345c | Python | chainet/jerry | /万年历.py | UTF-8 | 983 | 3.578125 | 4 | [] | no_license | def isRun(year):
if (year%4==0 and year%100!=0) or (year%400==0):
return True
return False
def current_year_days(month, days, years):
monthlist = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30]
for i in range(month-1):
days += monthlist[i]
if month > 2 and isRun(years):
days += 1
return days
print('请输入年份(必须是1900年到2099年):')
years = int(input())
if years < 1900 or years > 2099:
print('错误')
exit()
if isRun(years):
print("闰年")
else:
print('平年')
print('请输入月份:')
month = int(input())
print('请输入日期:')
days = int(input())
days = current_year_days(month, days, years)
run = 0
for i in range(1900, years, 4):
if isRun(i):
run += 1
ping = (years - 1900) - run
pastdays =((run * 366) + (ping * 365) + days) - 1
day = pastdays % 7
weekday = ['星期一', '星期二', '星期三', '星期四', '星期五', '星期六', '星期日']
print(weekday[day]) | true |
c1ec48d9f16676a2de77355429c0848750ade634 | Python | gabwow/bookswap_nameswap | /WriteScript.py | UTF-8 | 1,158 | 2.671875 | 3 | [] | no_license | import sys
import re
import random
guestNames = "guests.txt"
scriptName = sys.argv[1]
outputName = "NamesAdded.txt"
nameRE = "\[name([0-9]+)\]"
pageRE = "#PAGE_[A-Z][0-9]+"
page2number = {}
guests = []
with open(guestNames, "r") as guestFile:
for line in guestFile:
guests.append(line.strip())
random.shuffle(guests)
namePattern = re.compile(nameRE)
pagePattern = re.compile(pageRE)
with open(scriptName, "r") as readFile:
with open(outputName, "w") as outputFile:
pageCount = 1
for line in readFile:
pageId = pagePattern.search(line)
if pageId:
if pageId.group(0) not in page2number.keys():
page2number[pageId.group(0)] = pageCount
pageCount += 1
line = line.replace(pageId.group(0), "Page " + str(page2number[pageId.group(0)]))
hits = namePattern.search(line)
if hits:
index = int(hits.group(1)) - 1
if(index < len(guests)):
exactId = "[name" + hits.group(1) + "]"
outputFile.write(line.replace(exactId, guests[index]))
else:
outputFile.write(line.replace(exactId, "Bill"))
else:
outputFile.write(line)
| true |
b38b3e291adc7d1ee1a8fd56fc0c9a7da3408392 | Python | aishahassan98/Manager-and-Vehicle-OOP | /OOP M and V/Manager.py | UTF-8 | 576 | 2.875 | 3 | [] | no_license | from Employee import Employee
class Manager(Employee):
def __init__(self, name, salary, staff):
super().__init__(name,salary, staff)
self.staff = staff
def all_java_devs(self):
java_devs = []
for dev in self.staff:
if dev.main_language == "Java":
java_devs.append(dev)
return java_devs
def all_python_devs(self):
python_devs = []
for dev in self.staff:
if dev.main_language == "Python":
python_devs.append(dev)
return python_devs
| true |
68817544a25940a53aaf81cd6e051f559c889023 | Python | ChoiYoonJong/DataScience | /python_Pandas_Numpy/Pandas/Pandas06_04_GroupByChkPop_최윤종.py | UTF-8 | 790 | 3.484375 | 3 | [] | no_license |
# coding: utf-8
# In[1]:
import pandas
# In[2]:
df = pandas.read_csv('../data/gapminder.tsv', sep='\t')
# In[ ]:
uniList = df['year'].unique()
print(type(uniList))
print(uniList, "")
# In[6]:
uniList = df['year'].unique()
print(type(uniList))
print(uniList,"\n====>")
# In[9]:
for idx in uniList:
yearList = df[df["year"] == idx]
print(len(yearList), "\n ====> 2 \n:", yearList.head(3), "n =====> 3 :", yearList.shape)
print(yearList["pop"].mean())
# In[10]:
grouped_year_df = df.groupby('year')
print(type(grouped_year_df))
print(grouped_year_df["pop"])
# In[11]:
grouped_year_df["pop"].mean()
# In[12]:
uniList = df['year'].unique()
for idx in uniList:
print(idx, "======> 1 :")
grYear =df[df['year']==idx]
print(grYear['pop'].mean())
| true |
3c92e46c4a67542636915e68fc86ffa03ada5a86 | Python | buyongtatt/Python-Tutorial-From-Clever-Programmer | /Web Scraping/main.py | UTF-8 | 1,051 | 3.015625 | 3 | [] | no_license | import pandas as pd
import requests
from bs4 import BeautifulSoup
page = requests.get('https://forecast.weather.gov/MapClick.php?lat=40.7146&lon=-74.0071#.X-lMUtgzbIU')
soup = BeautifulSoup(page.content, 'html.parser')
week = soup.find(id='seven-day-forecast-list')
# print(week)
items = week.find_all(class_='tombstone-container')
# print(items[0])
# print(items[1].find(class_='period-name').get_text())
# print(items[1].find(class_='short-desc').get_text())
# print(items[1].find(class_='temp').get_text())
period_names = [item.find(class_='period-name').get_text() for item in items]
short_description = [item.find(class_='short-desc').get_text() for item in items]
temperature = [item.find(class_='temp').get_text() for item in items]
# print(period_names)
# print(short_description)
# print(temperature)
weather_stuff = pd.DataFrame(
{'period': period_names,
'short_descriptions': short_description,
'temperature': temperature,
}
)
print(weather_stuff)
weather_stuff.to_csv('weather.csv') | true |
7c831e713f0cf801466c4451aaa162952f215d3b | Python | xiaoqiangcs/LeetCode | /Remove Duplicates.py | UTF-8 | 372 | 2.84375 | 3 | [] | no_license | class Solution(object):
def removeDuplicates(self, A):
"""
:type nums: List[int]
:rtype: int
"""
if len(A)<=1:
return len(A)
A.sort()
NewIndex=0
for i in range(1, len(A)):
if A[i]!=A[NewIndex]:
NewIndex+=1
A[NewIndex]=A[i]
return NewIndex+
| true |
27e6aee4aa68bac92ddebd130aba7ac2fc7b195c | Python | SantaClaws91/BiteyBOT | /packages/steam/request.py | UTF-8 | 1,930 | 2.5625 | 3 | [] | no_license | import requests
import re
from packages.time.time import sec_to_string
from packages.log.log import mainLog
STEAM_WEB_API_KEY = "9E3142E7D7DC28C31FA9B0AF292043F7"
mode = [
'GetOwnedGames',
'GetRecentlyPlayedGames'
]
def get_steam_api(steamID, api=mode[0]):
url = (
"http://api.steampowered.com/IPlayerService/"+ api +
"/v0001/"
"?key="+ STEAM_WEB_API_KEY +
"&steamid="+ str(steamID) +
"&format=json"
)
try:
r = requests.get(url)
data = r.json()
return data
except:
return
mainLog.exception('Steam API fail')
def time_played_seconds(appid, steamID, recent=False):
time = 'playtime_forever'
if recent:
time = 'playtime_2weeks'
object_ = get_steam_api(steamID, mode[1])
else:
object_ = get_steam_api(steamID, mode[0])
if not object_:
return None
if not object_['response']:
return None
object_ = object_['response']
if not object_['games']:
return None
object_ = object_['games']
for index in object_:
if not index['appid'] == appid:
continue
return index[time] * 60
def replace_string(string, steamID):
match = re.match(
'.*\$played\((\d*)\).*',
string
)
recent = False
if not match:
match = re.match(
'.*\$playedrecent\((\d*)\).*',
string
)
recent = True
if not match:
return string
appid = match.group(1)
repl = '$played('+ appid +')'
if recent == True:
repl = '$playedrecent('+ appid +')'
time_delta = time_played_seconds(int(appid), steamID, recent)
if time_delta == None:
return ""
seconds_to_string = sec_to_string(time_delta)
return string.replace(repl, seconds_to_string)
| true |
e9e9b51be665d951f969c06ead51e9a585a5aba5 | Python | seejiewei/UECM3763_assign2 | /download_data.py | UTF-8 | 1,086 | 2.921875 | 3 | [] | no_license | from pandas.io.data import DataReader as DR
from datetime import datetime as dt
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# COLLECT DATA FROM 1/1/2011 TO 1/5/2015 FOR RHB CAPITAL BERHAD
start = dt(2011, 1, 1)
end = dt(2015, 5, 1)
data = DR("1066.KL", 'yahoo', start, end)
# calculate rhb moving average
rhb = data['Close']
moving_average = pd.rolling_mean(rhb,5)
#PLOT RHB MOVING AVERAGE
a = len(moving_average)
x_axis = np.arange(a) + 5
y_axis = moving_average
plt.xlabel('Days $n$')
plt.ylabel('5-day Moving Average')
plt.plot(x_axis,y_axis)
plt.title('RHB CAPITAL BERHAD 5-day Moving Average')
plt.show()
# COLLECT DATA FOR KLCI INDEX FOR SAME DURATION
mask = DR("^KLSE", 'yahoo', start, end)
#collect the closing data of RHB CAPITAL BERHAD and KLCI
combine = ['1066.KL', '^KLSE']
rhb_klse_close_value = DR(combine, 'yahoo', start, end)['Close']
# calculate correlation between RHB CAPITAL BERHAD and KLCI Index
correlation = rhb_klse_close_value.corr()
print ('Correlation between RHB CAPITAL BERHAD and KLCI Index =')
print(correlation) | true |
e77102c5fdca7c24aa996698951cfbabfd6b04a3 | Python | PaulGuo5/Leetcode-notes | /notes/0333/0333.py | UTF-8 | 983 | 3.078125 | 3 | [] | no_license | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def largestBSTSubtree(self, root: TreeNode) -> int:
def dfs(root):
nonlocal res
if not root:
return -float('inf'), float('inf'), 0, True
max_left, min_left, cnt_nodes_left, isBST_left = dfs(root.left)
max_right, min_right, cnt_nodes_right, isBST_right = dfs(root.right)
cnt_nodes = cnt_nodes_left+cnt_nodes_right+1
isBST = False
if max_left < root.val < min_right:
isBST = isBST_left and isBST_right
if isBST:
res = max(res, cnt_nodes)
return max(max_left, root.val, max_right), min(min_left, min_right, root.val), cnt_nodes, isBST
res = 0
dfs(root)
return res
| true |
9b364b8dcadf8619362e071356dff2b98a2ccb44 | Python | m2studio/DogFinderApi | /examples/get-dogs.py | UTF-8 | 164 | 2.546875 | 3 | [] | no_license | import requests
customer_id = 'xxx-xxxx-xxxx-1'
r = requests.get('https://dog-finder01.herokuapp.com/get-dogs/' + customer_id)
print(r.status_code)
print(r.json()) | true |
6a82d275a4696c125e17bba0ce0cefcddb3692af | Python | bbw7561135/eblstud | /misc/bin_energies.py | UTF-8 | 780 | 3.34375 | 3 | [
"BSD-3-Clause"
] | permissive | # Auxilliary functions to calculate bin boundary energies
import numpy as np
def calc_bin_bounds(X):
"""
calculate bin boundaries for array x assuming that x values lie at logarithmic bin center
Parameters
----------
X: n-dim array with logarithmic center values
Returns
-------
(n+1) dim array with bin boundaries
"""
bin_bounds = np.zeros(X.shape[0] + 1)
for i,x in enumerate(X[:-1]):
bin_bounds[i + 1] = np.sqrt(x * X[i + 1])
bin_bounds[0] = X[0] **2. / bin_bounds[1]
bin_bounds[-1] = X[-1]**2. / bin_bounds[-2]
return bin_bounds
# Energy dispersion: E: ndim array, Etrue, scalar or ndim array, sigE: dispersion
edisp = lambda E,Etrue,sigE : np.exp(-0.5 * (E - Etrue)**2. / sigE ** 2.) / np.sqrt(2 * np.pi) / sigE
| true |
2c5758f96abb8d6dc9ac772181e85c82b6d2b47a | Python | Constancellc/Demand-Model | /NTS/vehicle_feature_vector.py | UTF-8 | 4,061 | 2.625 | 3 | [] | no_license | import csv
import matplotlib.pyplot as plt
import random
import numpy as np
import scipy.ndimage.filters as filt
rawData = '../../Documents/UKDA-5340-tab/constance-trips.csv'
profiles = {}
vehicles = []
chosen = ['2014004729']#,'2014008246','2014004729']
with open(rawData,'rU') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if row[8] != '2014':
continue
vehicle = row[2]
if vehicle not in chosen:
continue
try:
start = int(row[9])+(int(row[6])-2)*24*60
end = int(row[10])+(int(row[6])-2)*24*60
dist = float(row[11])
except:
continue
if vehicle not in vehicles:
vehicles.append(vehicle)
profiles[vehicle] = [0]*(24*60*7)
if start >= 24*60*7:
start -= 24*60*7
if end >= 24*60*7:
end -= 24*60*7
if start < end:
d = dist/(end-start)
d = d*60 # miles/min -> mph
for i in range(start,end):
profiles[vehicle][i] = d
else:
d = dist/(24*60*7+end-start)
d = d*60
for i in range(start,24*60*7):
profiles[vehicle][i] = d
for i in range(0,end):
profiles[vehicle][i] = d
days = ['Mon','Tue','Wed']
plt.figure()
plt.rcParams["font.family"] = 'serif'
for day in range(3):
plt.subplot(3,2,day*2+1)
plt.title(days[day],y=0.7)
p = profiles[chosen[0]][24*60*day:24*60*(day+1)]
p2 = [0.0]*48
for t in range(1440):
p2[int(t/30)] += p[t]/sum(p)
plt.plot(np.linspace(0,24,1440),p)
plt.xticks([4,12,20],['04:00','12:00','20:00'])
plt.xlim(0,24)
plt.ylim(0,70)
plt.grid(ls=':')
plt.ylabel('Speed (mph)')
plt.subplot(3,2,day*2+2)
plt.title(days[day],y=0.7)
plt.bar(range(1,49),p2,zorder=3)
plt.xlim(0.5,48.5)
plt.ylim(0,0.35)
plt.grid(ls=':')
plt.tight_layout()
plt.savefig('../../Dropbox/thesis/chapter3/img/example_fv.eps', format='eps',
dpi=1000, bbox_inches='tight', pad_inches=0)
p = []
for i in range(3):
p.append([0.0]*48)
p[0][14] = 0.5
p[0][36] = 0.5
p[1][15] = 0.5
p[1][37] = 0.5
p[2][24] = 0.5
p[2][25] = 0.5
plt.figure(figsize=(9,2))
ttls = ['(a)','(b)','(c)']
for i in range(3):
plt.subplot(1,3,i+1)
plt.bar(range(1,49),p[i])
plt.title(ttls[i],y=0.8)
plt.grid()
plt.xlim(0.5,48.5)
plt.xticks([1,12,24,36,48])
plt.ylim(0,0.6)
plt.tight_layout()
plt.savefig('../../Dropbox/thesis/chapter3/img/example_dist_prob.eps', format='eps',
dpi=300, bbox_inches='tight', pad_inches=0)
plt.figure(figsize=(9,2))
ttls = ['(a)','(b)','(c)']
for i in range(3):
plt.subplot(1,3,i+1)
plt.bar(range(1,49),filt.gaussian_filter1d(p[i],1))
plt.title(ttls[i],y=0.8)
plt.grid()
plt.xlim(0.5,48.5)
plt.xticks([1,12,24,36,48])
plt.ylim(0,0.4)
plt.tight_layout()
plt.savefig('../../Dropbox/thesis/chapter3/img/example_dist_prob2.eps', format='eps',
dpi=300, bbox_inches='tight', pad_inches=0)
plt.show()
num_plot = 3
plotted_profiles = {}
plt.rcParams["font.family"] = 'serif'
t = np.linspace(0,24*7,num=24*60*7)
x = np.linspace(120,1320,num=6)
x_ticks = ['02:00','06:00','10:00','14:00','18:00','22:00']
y_ticks = ['M','','W','','F','','S']
for i in range(0,num_plot):
plt.subplot(num_plot,1,i+1)
heatmap = []
for j in range(0,7):
heatmap.append([0]*24*60)
ID = vehicles[i]#int(random.random()*len(vehicles))]
plotted_profiles[i] = profiles[ID]
c = 0
for j in range(0,7):
for k in range(0,1440):
heatmap[j][k] = profiles[ID][c]
c += 1
plt.grid(ls=':')
plt.imshow(heatmap,aspect=60,cmap='Blues')
plt.yticks(range(0,7),y_ticks)
plt.xticks(x,x_ticks)
plt.ylabel('Weekday')
#plt.title(ID)
plt.tight_layout()
#plt.savefig('../../Dropbox/thesis/chapter3/img/example_nts.eps', format='eps',
# dpi=1000, bbox_inches='tight', pad_inches=0)
plt.show()
| true |
d057cb5302700e93e3adbc235df6e175336dec5d | Python | Amudah41/EPAM_homeworks | /hw3/tasks/task32.py | UTF-8 | 1,080 | 3.5 | 4 | [] | no_license | # Here's a not very efficient calculation function that calculates something important::
import hashlib
import random
import struct
import time
import timeit
from multiprocessing import Pool
def slow_calculate(value):
"""Some weird voodoo magic calculations"""
time.sleep(random.randint(1, 3))
data = hashlib.md5(str(value).encode()).digest()
return sum(struct.unpack("<" + "B" * len(data), data))
# Calculate total sum of slow_calculate() of all numbers starting from 0 to 500.
# Calculation time should not take more than a minute. Use functional capabilities of multiprocessing module.
# You are not allowed to modify slow_calculate function.
def pallelization(n: int) -> int:
starttime = timeit.default_timer()
with Pool(n) as p:
sum(p.map(slow_calculate, range(501)))
return timeit.default_timer() - starttime
"""
The caulculation time is 57.14936398999998, count of processes is: 20
The caulculation time is 21.239177954999832, count of processes is: 60
The caulculation time is 7.007313468000575, count of processes is: 240
"""
| true |
4d8d3a20880fc53c04069a550d3d17f872bd97ff | Python | giri92431/DataVisulization | /grinder.py | UTF-8 | 477 | 2.71875 | 3 | [] | no_license | import pandas as pd
import numpy as np
df= pd.ExcelFile("Book1.xlsx")
# CompanyShareNBO = df.parse('Company Share NBO')
# CompanyShareGBO = df.parse('Company Share GBO')
# CompanyShareNBOL = df.parse('Company Share NBOL')
# CompanyShareGBOL = df.parse('Company Share GBOL')
# BrandShareLBN = df.parse('Brand Share LBN')
# BrandShareGBN = df.parse('Brand Share GBN')
# BrandShareLBNL = df.parse('Brand Share LBNL')
# BrandShareGBNL = df.parse('Brand Share GBNL')
print(df)
| true |
48183efd61bc237dd4a0f088b695f5998dbcdc33 | Python | chrissnell/Lightcube | /examples/lightcube_client.py | UTF-8 | 1,987 | 2.890625 | 3 | [] | no_license | #
# lightcube_client.py - A demo client that uses the drawing library to draw a few simple primatives and
# send them to the Lightcube (or simulator app)
import Lightcube
# Define some generic colors
RED = Lightcube.Color(rgb=0xFF0000)
WHITE = Lightcube.Color(rgb=0xffffff)
BLUE = Lightcube.Color(rgb=0x0000FF)
GREEN = Lightcube.Color(rgb=0x00FF00)
YELLOW = Lightcube.Color(rgb=0xffc400)
BLACK = Lightcube.Color(rgb=0x000000)
GREY = Lightcube.Color(rgb=0x222222)
# Create a new Frame
myframe = Lightcube.Frame(retain_delay=0xA)
# and a FrameRenderer
myrenderer = Lightcube.FrameRenderer(frame=myframe)
# Define the lower-left corner of a box
box_ll = Lightcube.Coordinate(x=0, y=0)
# and draw a 9x12 box starting there
myrenderer.draw_box(box_ll, 4, 4, RED)
# Define the start and end points of the line
line_start = Lightcube.Coordinate(x=0, y=0)
line_end = Lightcube.Coordinate(x=0, y=7)
# and draw a red line between them
myrenderer.draw_line(line_start, line_end, GREEN)
myrenderer.draw_line(Lightcube.Coordinate(x=4, y=0), Lightcube.Coordinate(x=4, y=2), BLUE)
myrenderer.draw_line(Lightcube.Coordinate(x=5, y=0), Lightcube.Coordinate(x=5, y=1), BLUE)
myrenderer.draw_line(Lightcube.Coordinate(x=7, y=0), Lightcube.Coordinate(x=2, y=7), YELLOW)
myrenderer.draw_point(Lightcube.Coordinate(x=5, y=5), WHITE)
myrenderer.draw_point(Lightcube.Coordinate(x=5, y=6), WHITE)
myrenderer.draw_point(Lightcube.Coordinate(x=5, y=7), WHITE)
myrenderer.draw_point(Lightcube.Coordinate(x=6, y=7), WHITE)
myrenderer.draw_point(Lightcube.Coordinate(x=7, y=7), WHITE)
myrenderer.draw_point(Lightcube.Coordinate(x=7, y=6), WHITE)
myrenderer.draw_point(Lightcube.Coordinate(x=7, y=5), WHITE)
myrenderer.draw_point(Lightcube.Coordinate(x=6, y=5), WHITE)
# Create a new assembled frame packet
packet = Lightcube.AssembledFramePacket(frame=myframe)
# and populate it with data
packet.create_packet()
# and send it over the wire
packet.send_packet("192.168.17.2", 7070)
| true |
56cc46274b7a254af78f69dfdb4cbfa842c7b03d | Python | doshmajhan/freedough | /main.py | UTF-8 | 3,328 | 2.875 | 3 | [] | no_license | """
Logs into the dp dough API and gives our user
the maximum amount of hearts obtainable each day
@author: Cameron Clark
"""
import json
import logging
import requests
LOG_FILE = "status.log"
CREDENTIAL_FILE = "credentials.json"
CREDENTIALS = dict()
API_URL = "https://api.dpdough.com"
AUTH_URL = "{}/oauth/token".format(API_URL)
POINTS_URL = "{}/api/game/points".format(API_URL)
USER_URL = "{}/api/game/users".format(API_URL)
MAX_POINTS = 37000
HEADERS = {
'User-Agent': "calzonerun/26 CFNetwork/974.2.1 Darwin/18.0.0",
'X-Unity-Version': "2018.1.7f1"
}
"""
Loads the credentials from the credentials.json file
"""
def load_credentials():
global CREDENTIALS
with open(CREDENTIAL_FILE) as cred_file:
CREDENTIALS = json.load(cred_file)
"""
Creates a requestions session that is authenticated with the
given credentials
Returns:
A requests session with the correct authentication
"""
def create_session():
session = requests.Session()
session.headers.update(HEADERS)
data = dict()
data['username'] = CREDENTIALS['user']
data['password'] = CREDENTIALS['password']
data['client_secret'] = CREDENTIALS['secret']
data['client_id'] = 2
data['grant_type'] = "password"
response = session.post(AUTH_URL, json=data)
if not response.ok:
logging.debug("Error authenticating: {} {}".format(response.status_code, response.text))
raise Exception("Error authenticating")
token_data = response.json()
session.headers.update({
"Authorization": "{} {}".format(token_data['token_type'], token_data['access_token'])
})
logging.info("Successfully authenticated")
return session
"""
Makes a PUT request to the API adding points to our account
Params:
session: the authenticated requests session
"""
def add_points(session):
data = dict()
data['customer_id'] = get_id(session)
data['game_points'] = MAX_POINTS
response = session.put(POINTS_URL, json=data)
if not response.ok:
logging.debug("Error putting points: {} {}".format(response.status_code, response.text))
raise Exception("Error putting points")
hearts = get_hearts(session)
logging.info("Successfully added points - Current Hearts: {}".format(hearts))
"""
Gets the data on a user
Params:
session: the authenticated requests session
Returns:
dictionary of data on the user
"""
def get_user_data(session):
data = dict()
data['email'] = CREDENTIALS['user']
response = session.post(USER_URL, json=data)
if not response.ok:
logging.debug("Error getting user: {} {}".format(response.status_code, response.text))
raise Exception("Error getting user")
return response.json()
"""
Gets the number of hearts a user has
Params:
session: the authenticated requests session
Returns:
the number of hearts
"""
def get_hearts(session):
user = get_user_data(session)
return user['dp_hearts']
"""
Gets the users id
Params:
session: the authenticated requests session
Returns:
id of the user
"""
def get_id(session):
user = get_user_data(session)
return user['id']
if __name__ == '__main__':
logging.basicConfig(filename=LOG_FILE, level=logging.DEBUG)
load_credentials()
session = create_session()
add_points(session)
| true |
8dc5af10673e35cd32f4041878ad3d1d45578c2f | Python | EojinK1m/Practice_Algorithm_Problems | /programmers/level2/다리를_지나는_트럭.py | UTF-8 | 712 | 2.84375 | 3 | [] | no_license | #https://programmers.co.kr/learn/courses/30/lessons/42583
def solution(bridge_length, weight, truck_weights):
truck_weights = truck_weights[::-1]
answer = 0
weight_of_bridge = 0
bridge = []
while (truck_weights or bridge):
for truck in bridge:
if (truck[0] <= 0):
weight_of_bridge -= truck[1]
bridge.remove(truck)
try:
if (weight >= weight_of_bridge + truck_weights[-1]):
bridge.append([bridge_length, truck_weights.pop()])
weight_of_bridge += bridge[-1][1]
except:
pass
for truck in bridge:
truck[0] -= 1
answer += 1
return answer | true |
20d3c7f146ec8e5725c4f3ef3b2fc3ed7eac8d3a | Python | archit342000/ResNet_keras | /resnet.py | UTF-8 | 2,585 | 2.734375 | 3 | [] | no_license | from keras.applications.resnet50 import ResNet50
from keras.applications.resnet50 import preprocess_input, decode_predictions
from keras.datasets import cifar10
from keras.layers import GlobalAveragePooling2D, Dense, Dropout
from keras.models import Model
from keras.regularizers import l2
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.optimizers import SGD, Adam
import numpy as np
#----------Data Loading and Preprocessing----------#
# function for one hot encoding
def convert_to_one_hot(Y, C):
Y = np.eye(C)[Y.reshape(-1)].T
return Y
# load cifar10 data
(X_train, Y_train), (X_test, Y_test) = cifar10.load_data()
# convert labels to one hot encodings
Y_train = convert_to_one_hot(Y_train, 10).T
Y_test = convert_to_one_hot(Y_test, 10).T
# preprocess train and test data
X_train = preprocess_input(X_train)
X_test = preprocess_input(X_test)
#--------------------------------------------------#
#------------------Make the model------------------#
# base pre-trained model, without the dense layers
base_model = ResNet50(input_shape=(32, 32, 3), weights="imagenet", include_top=False)
# add global average pooling layers and dense layers
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(1024, activation="relu")(x)
x = Dropout(0.6)(x)
x = Dense(512, activation="relu")(x)
x = Dropout(0.6)(x)
preds = Dense(10, activation="softmax", kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01))(x)
# final model
model = Model(inputs = base_model.input, outputs=preds)
# freeze all convolutional layers
for layer in base_model.layers:
layer.trainable = False
# compile the model
sgd = SGD(learning_rate=0.03, momentum=0.9, name = "sgd")
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=["accuracy"])
# print the summary of the model
model.summary()
#--------------------------------------------------#
# model callbacks
early = EarlyStopping(monitor="val_accuracy", min_delta = 0.0001, patience=20, mode="auto")
checkpoint = ModelCheckpoint("tmp/checkpoint", monitor="val_accuracy", save_best_only=True, save_weights_only=False, mode="auto")
rlrop = ReduceLROnPlateau(monitor="val_accuracy", factor=0.3, min_delta=0.0001, patience=15, mode="auto")
# train the model
model.fit(X_train, Y_train, batch_size = 128, epochs = 200, validation_data=(X_test, Y_test), callbacks=[early, checkpoint, rlrop])
predsTrain = model.evaluate(X_train, Y_train)
predsTest = model.evaluate(X_test, Y_test)
print("Training Accuracy: ", predsTrain[1])
print("Testing Accuracy: ", predsTest[1])
| true |
5dc47c0fd976fbef33e8e43d5b2aabd3639599b8 | Python | ducky-YFH/python | /利用多线程下载小说.py | UTF-8 | 909 | 2.65625 | 3 | [] | no_license | import requests
import re
import os
from threading import Thread
from lxml import etree
headers={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/537.36 Core/1.63.6726.400 QQBrowser/10.2.2265.400'}
def get_story(url):
response = requests.get(url,headers=headers).text
html = etree.HTML(response)
top = html.xpath('//div[@class="info fl"]')
storyDict = {}
for i in top:
title = i.xpath('.//h3/a/text()')[0]
autor = i.xpath('.//div/text()')[0]
introduce = i.xpath('.//div/text()')[1]
storyDict[title] = autor +'\n'+ introduce +'\n'
print(title+'\n'+storyDict[title]+'\n')
print('---------------------------------------')
for i in range(1,2000):
url = 'https://xiaoshuo.sogou.com/1_0_0_0_heat/?pageNo='+str(i)
th = Thread(target=get_story,args=(url,))
th.start()
| true |
7e73e64e1168535e5820089d28f4bc97afd5d7be | Python | rameez523/drf-events-api | /src/api/tests/test_endpoints.py | UTF-8 | 1,365 | 2.515625 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | import pytest
import os
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Event
from api.serializers import EventSerializer
# Define endpoint url globaly
os.environ.setdefault('BASE_URL', 'http://127.0.0.1:8000/api/')
BASE_URL = os.environ.get('BASE_URL')
@pytest.mark.django_db()
class TestsPublicEvents:
EVENT_URL = ''.join((BASE_URL, 'event/'))
@pytest.fixture()
def client(self):
"""Initialize DRF APIClient"""
yield APIClient()
@pytest.fixture()
def events(self):
"""Populate db with 4 events objects"""
for _ in range(4):
Event.objects.create(
event='test',
count=17
)
def test_creating_en_event(self, client):
payload = {'event': 'some', 'count': 77}
response = client.post(self.EVENT_URL, data=payload)
assert response.status_code == status.HTTP_201_CREATED
assert Event.objects.filter(event=payload['event']).count()
def test_listing_events(self, client, events):
spectedResponse = EventSerializer(
Event.objects.all(),
many=True
).data
actualResponse = client.get(self.EVENT_URL)
assert actualResponse.status_code == status.HTTP_200_OK
assert actualResponse.data == spectedResponse
| true |
eadf5e7957450522f04893e57ff1db5ba4f5dc4f | Python | jeremycryan/SevenPillars | /Spritesheet.py | UTF-8 | 2,019 | 2.96875 | 3 | [] | no_license | import pygame
import os
from Constants import *
class Sprite():
def __init__(self, file, framesize, frame_num, screen, player, scale, rev = False):
self.source = pygame.image.load(os.path.join(file)).convert_alpha()
self.frame_width = framesize[0]
self.frame_height = framesize[1]
self.curr_frame = 1
self.rev = rev
self.frame_num = frame_num
if rev:
self.curr_frame = self.frame_num
self.screen = screen
self.scale = scale
self.player = player
def get_frame_rect(self, frame):
framesize = (self.frame_width, self.frame_height)
position = (self.frame_width * (frame - 1), 0)
return position + framesize
def tic(self, pos, halt = False):
pos = (pos[0] - self.scale/2, pos[1] - self.scale/2 + 50)
self.render_frame(self.curr_frame, pos)
if not halt and not self.rev:
self.curr_frame += 1
if self.curr_frame > self.frame_num:
self.curr_frame = 1
self.player.state = STATE_ALIVE
elif not halt:
self.curr_frame -= 1
if self.curr_frame == 0:
self.curr_frame = self.frame_num
self.player.state = STATE_ALIVE
def render_frame(self, frame, pos):
surface = pygame.Surface((self.frame_width, self.frame_height)).convert_alpha()
surface.fill((255, 0, 0))
surface.set_alpha(127)
position = self.get_frame_rect(frame)
surface.blit(self.source, (0, 0), position)
self.remove_trans(surface)
surface = pygame.transform.scale(surface, (self.scale, self.scale))
self.screen.blit(surface, pos)
def remove_trans(self, img):
width, height = img.get_size()
for x in range(0, width):
for y in range(0, height):
r, g, b, alpha = img.get_at((x, y))
if r > 180 and g < 50 and b < 50:
img.set_at((x, y), (r, g, b, 0))
| true |
d6fd4499c4edabbd524a24db45c9598925375936 | Python | chocoai/shujuren_Python | /4data_project/hand_in_hand_using_python_machine_learning_project/model_regression_1.py | UTF-8 | 416 | 2.75 | 3 | [] | no_license | import pandas as pd
import quandl
df = quandl.get('WIKI/GOOGL')
print(df.head())
df1 = df[['Adj. Open', 'Adj. High', 'Adj. Low', 'Adj. Close', 'Adj. Volume']]
df1['HL_PCT'] = (df1['Adj. High'] - df1['Adj. Low']) / df1['Adj. Close'] * 100.0
df1['PCT_change'] = (df1['Adj. Close'] - df1['Adj. Open']) / df1['Adj. Open'] * 100.0
df2 = df1[['Adj. Close', 'HL_PCT', 'PCT_change', 'Adj. Volume']]
print(df2.head())
| true |
5a8c4c621571a59b236aeba18a384b03c1e13419 | Python | rnjsvlfwp/algorithm | /week_4/01_02_delete_max_heap.py | UTF-8 | 2,431 | 4.21875 | 4 | [] | no_license | class MaxHeap:
def __init__(self):
self.items = [None]
def insert(self, value):
self.items.append(value)
cur_index = len(self.items) - 1
while cur_index > 1: # cur_index 가 1이 되면 정상을 찍은거라 다른 것과 비교 안하셔도 됩니다!
parent_index = cur_index // 2
if self.items[parent_index] < self.items[cur_index]:
self.items[parent_index], self.items[cur_index] = self.items[cur_index], self.items[parent_index]
cur_index = parent_index
else:
break
def delete(self):
# 1. 자리 변경: 루트 노드와 가장 마지막 노드의 자리를 변경한다.
self.items[1], self.items[-1] = self.items[-1], self.items[1]
# 2. 할당 및 삭제: 변경된 가장 마지막 노드를 다른 변수로 할당하고 삭제한다.
prev_max = self.items.pop()
cur_index = 1
while cur_index < len(self.items) - 1:
# 3. 비교1: 변경된 루트노드와 왼쪽 자식을 비교한다.
left_node_index = cur_index * 2
right_node_index = cur_index * 2 + 1
if self.items[right_node_index] < self.items[left_node_index] and self.items[left_node_index] > self.items[
cur_index]:
self.items[cur_index], self.items[left_node_index] = self.items[left_node_index], self.items[cur_index]
cur_index = left_node_index
# 4. 비교2: 변경된 루트노드와 오른쪽 자식을 비교한다.
elif self.items[right_node_index] > self.items[left_node_index] and self.items[right_node_index] > \
self.items[cur_index]:
# 5. 자리 변경: 자식 노드 중 더 큰 자식과 자리를 변경한다.
self.items[cur_index], self.items[right_node_index] = self.items[right_node_index], self.items[
cur_index]
cur_index = right_node_index
# 6. 종료 시점: 자식 노드가 없거나 자식 노드가 더 작을 때까지
return prev_max
max_heap = MaxHeap()
max_heap.insert(8)
max_heap.insert(7)
max_heap.insert(6)
max_heap.insert(2)
max_heap.insert(5)
max_heap.insert(4)
print(max_heap.items) # [None, 8, 7, 6, 2, 5, 4]
print(max_heap.delete()) # 8 을 반환해야 합니다!
print(max_heap.items) # [None, 7, 5, 6, 2, 4]
| true |
055053f8eb9a5ee4ca358cbd63a551d38aae8515 | Python | siddharthchavan/programming-class | /week3/errors.py | UTF-8 | 929 | 4.46875 | 4 | [
"MIT"
] | permissive | # Example of error handling by Kabir Samsi
#Targets Value Errors
def value_error():
number = input("Enter a number: ")
try:
number = int(number)
return number
except ValueError:
number = input("Not a valid number, try again: ")
#Targets ZeroDivision Errors
def zero_division_error():
number1 = value_error()
number2 = value_error()
try:
quotient = number1/number2
print(quotient)
except ZeroDivisionError:
number2 = int(input("Cannot divide by 0, try again: "))
quotient = number1/number2
print(quotient)
#Targets Index Errors
def index_error(array):
index = int(input("Enter index you want to access: "))
try:
print(array[index])
except IndexError:
index = int(input("Index not in array, try again: "))
print(array[index])
print(value_error())
zero_division_error()
index_error([1, 2, 3, 4]) | true |
7fc95cfee7c60cba87d2e70fe4f331567cc6b0fe | Python | ka10ryu1/fontconv | /Tools/plot_diff.py | UTF-8 | 4,923 | 2.8125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# -*-coding: utf-8 -*-
#
help = 'logファイルの複数比較'
#
import json
import argparse
import numpy as np
import matplotlib.pyplot as plt
from func import argsPrint, getFilePath, sortTimeStamp
def command():
parser = argparse.ArgumentParser(description=help)
parser.add_argument('log_dir', nargs='+',
help='入力データセットのフォルダ')
parser.add_argument('--auto_ylim', action='store_true',
help='ylim自動設定')
parser.add_argument('-l', '--label', default='loss',
help='取得するラベル(default: loss, other: lr, all)')
parser.add_argument('-o', '--out_path', default='./result/',
help='生成物の保存先(default: ./result/)')
parser.add_argument('--no_show', action='store_true',
help='plt.show()を使用しない')
return parser.parse_args()
def jsonRead(path):
"""
chainerのextensionで出力されたlogをjsonで読み込む
[in] path: logのパス
[out] d: 読み込んだ辞書データ
"""
try:
with open(path, 'r') as f:
d = json.load(f)
except json.JSONDecodeError as e:
print('JSONDecodeError: ', e)
exit()
return d
def subplot(sub, val, log, ylim, line, header):
"""
subplotを自動化
[in] sub: subplotオブジェクト
[in] val: 入力する値のリスト
[in] log: 入力するラベルのリスト
[in] ylim: auto_ylimを使用する場合はTrue
[in] header: ラベルのヘッダ
"""
# グリッドを灰色の点線で描画する
sub.grid(which='major', color='gray', linestyle=':')
sub.grid(which='minor', color='gray', linestyle=':')
sub.set_yscale("log")
# args.auto_ylimが設定された場合、ylimを設定する
# ymax: 各データの1/8番目(400個データがあれば50番目)のうち最小の数を最大値とする
# ymin: 各データのうち最小の数X0.98を最小値とする
if ylim:
ymax = np.min([i[int(len(i) / 8)] for i in val])
ymin = np.min([np.min(i)for i in val]) * 0.98
sub.set_ylim([ymin, ymax])
print('ymin:{0:.4f}, ymax:{1:.4f}'.format(ymin, ymax))
# プロット
def getX(y):
return list(range(1, len(y)+1))
def getY(y):
return np.array(y)
def getLabel(header, body):
return '[' + header + '] ' + body
[sub.plot(getX(v), getY(v), label=getLabel(header, d), linestyle=line)
for v, d in zip(val, log)]
def savePNG(plt, loc, name, dpi=200):
"""
png形式での保存を自動化
[in] plt: pltオブジェクト
[in] loc: ラベルの位置
[in] name: 保存するファイル名
[in] dpi: 保存時の解像度
"""
plt.legend(loc=loc)
plt.savefig(getFilePath(args.out_path, name, '.png'), dpi=dpi)
def plot(args, loc, name, solid_line, dotted_line='', no_show=False):
"""
プロットメイン部
[in] args: オプション引数
[in] loc: ラベルの位置
[in] name: 保存するファイル名
[in] solid_line: 探索ラベル(実線)
[in] dotted_line: 探索ラベル(点線)
"""
sol = []
dot = []
log_file = []
for l in sortTimeStamp(args.log_dir, '.log'):
log_file.append(l)
print(log_file[-1])
data = jsonRead(log_file[-1])
sol.append([i[solid_line] for i in data if(solid_line in i.keys())])
dot.append([i[dotted_line] for i in data if(dotted_line in i.keys())])
# logファイルが見つからなかった場合、ここで終了
if not sol:
print('[Error] .log not found')
exit()
if len(sol[0]) == 0:
print('[Error] data not found:', solid_line)
return 0
# 対数グラフの設定
f = plt.figure(figsize=(10, 6))
a = f.add_subplot(111)
plt.xlabel('epoch')
plt.ylabel(name.split('_')[-1])
subplot(a, sol, log_file, args.auto_ylim, '-', 'test ')
plt.gca().set_prop_cycle(None)
subplot(a, dot, log_file, args.auto_ylim, ':', 'train')
# グラフの保存と表示
savePNG(plt, loc, name)
if not no_show:
plt.show()
def main(args):
if(args.label == 'loss' or args.label == 'all'):
plot(args, 'upper right', 'plot_diff_loss',
'validation/main/loss', 'main/loss',
no_show=args.no_show)
if(args.label == 'acc' or args.label == 'all'):
plot(args, 'lower right', 'plot_diff_acc',
'validation/main/accuracy', 'main/accuracy',
no_show=args.no_show)
if(args.label == 'lr' or args.label == 'all'):
plot(args, 'lower right', 'plot_diff_lr', 'lr',
no_show=args.no_show)
if __name__ == '__main__':
args = command()
argsPrint(args)
main(args)
| true |
681daaef351143462a4ab150393600cfa06fb397 | Python | xuejieshougeji0826/leetcode_top100 | /4.py | UTF-8 | 674 | 3.34375 | 3 | [] | no_license | class Solution:
def findMedianSortedArrays(self, nums1,nums2):
i=0;j=0;
l1=len(nums1)
l2=len(nums2)
nums3=[]
while (i!=l1 or j!=l2):
if i >= l1 or j >= l2 :
return nums3
else:
if(nums1[i]<=nums2[j]):
try:
nums3.append(nums1[i])
i+=1
print(nums3)
except:
nums3.append(nums2[j])
j+=1
print(nums3)
print(nums3)
s = Solution()
a = [1, 2]
b=[0,4]
print(s.findMedianSortedArrays(a,b))
| true |
40c4ad7bfbf5c1150753b24e1c9226dc36fb7a11 | Python | Cosmos-Break/leetcode | /1323.6-和-9-组成的最大数字.py | UTF-8 | 237 | 2.828125 | 3 | [] | permissive | #
# @lc app=leetcode.cn id=1323 lang=python3
#
# [1323] 6 和 9 组成的最大数字
#
# @lc code=start
class Solution:
def maximum69Number(self, num: int) -> int:
return int(str(num).replace("6", "9", 1))
# @lc code=end
| true |
91aa5f5898392082ba6de33e4bec8433f0af2ecb | Python | tata-LY/python | /study_oldboy/Day9/05.3_PriorityQueue.py | UTF-8 | 874 | 3.828125 | 4 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2021-2-26 13:22
# @Author : liuyang
# @File : 05.3_PriorityQueue.py
# @Software: PyCharm
"""
class queue.Queue(maxsize=0) #先入先出 创建一个队列对象(队列容量),若maxsize小于或者等于0,队列大小没有限制
class queue.LifoQueue(maxsize=0) # 先进后出
class queue.PriorityQueue(maxsize=0) #存储数据时可设置优先级的队列
"""
import queue
q = queue.PriorityQueue()
q.put((5, 'liuyang'))
q.put((3, 'zhangjuan'))
q.put((1, 'liuzhangyi'))
q.put((7, 'liuzhangyiyi'))
print(q.queue) # [(1, 'liuzhangyi'), (5, 'liuyang'), (3, 'zhangjuan'), (7, 'liuzhangyiyi')]
print(q.qsize()) # 4
print(q.empty()) # False
print(q.full()) # False
while not q.empty():
print(q.get())
"""
(1, 'liuzhangyi')
(3, 'zhangjuan')
(5, 'liuyang')
(7, 'liuzhangyiyi')
""" | true |
9580a1df5c607747134554a2b798bac73b06bef5 | Python | danielthorr18/forritun_git | /aefingar_forritun/daemi20-while.py | UTF-8 | 166 | 3.421875 | 3 | [] | no_license | turns = int(input("Sláðu inn tölu: "))
counter = 0
while counter < turns:
pick = input("Sláðu inn tölu: ")
print("þú valdir", pick)
counter += 1 | true |
c80f661dcab0f30490e87dbe93bc5415e1694e52 | Python | Zzeongyx2/bigData | /DAY_7/실습_3/실습(7).py | UTF-8 | 5,524 | 2.625 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[9]:
from selenium.webdriver import Chrome
import time
import sqlite3
from pandas.io import sql
import os
import pandas as pd
# In[10]:
from selenium import webdriver
options = webdriver.ChromeOptions()
options.add_argument("--start-maximized"); # 화면 최대 크기로 실행
browser = webdriver.Chrome('C:/Users/user/Downloads/chromedriver.exe', options=options) # exe 파일 저장 위치 연결
# In[11]:
# 브라우저 열기
browser.get('https://www.data.go.kr/')
browser.implicitly_wait(5)
# In[12]:
# 로그인화면으로 넘어감
browser.find_element_by_xpath('//*[@id="header"]/div/div/div/div[2]/div/a[1]').click() #cssselect로도 가능
browser.implicitly_wait(5)
# In[13]:
# 아이디 입력
browser.find_element_by_xpath('//*[@id="mberId"]').send_keys('사용자 아이디')
# In[14]:
# 비밀번호 입력
browser.find_element_by_xpath('//*[@id="pswrd"]').send_keys('사용자 비밀번호')
# In[15]:
# 로그인
browser.find_element_by_xpath('//*[@id="loginVo"]/div[2]/div[2]/div[2]/div/div[1]/button').click()
browser.implicitly_wait(5)
# In[16]:
# 팝업창 닫기
browser.find_element_by_xpath('//*[@id="layer_popup_info_1"]/div[1]/a').click()
# In[17]:
# 정보 공유 클릭
browser.find_element_by_xpath('//*[@id="M000400_pc"]/a').click()
# In[18]:
# 자료실 클릭
browser.find_element_by_xpath('//*[@id="M000402_pc"]/a').click()
# In[19]:
# 자료실 데이터 추출
def db_save(ARTICLE_LIST):
with sqlite3.connect(os.path.join('.','sqliteDB')) as con: # sqlite DB 파일이 존재하지 않는 경우 파일생성
try:
ARTICLE_LIST.to_sql(name = 'ARTICLE_LIST', con = con, index = False, if_exists='append')
#if_exists : {'fail', 'replace', 'append'} default : fail
except Exception as e:
print(str(e))
print(len(ARTICLE_LIST), '건 저장완료..')
# In[20]:
trs = browser.find_elements_by_xpath('//*[@id="searchVO"]/div[5]/table/tbody/tr')
df_list = []
for tr in trs:
df = pd.DataFrame({
'NO': [tr.find_element_by_xpath('td[1]').text],
'TITLE': [tr.find_element_by_xpath('td[2]').text],
'IQRY': [tr.find_element_by_xpath('td[3]').text],
'REGDT': [tr.find_element_by_xpath('td[4]').text],
'CHGDT': [tr.find_element_by_xpath('td[5]').text],
})
df_list.append(df)
ARTICLE_LIST = pd.concat(df_list)
db_save(ARTICLE_LIST)
# In[21]:
# 자료실 첫번째 글 클릭
browser.find_element_by_xpath('//*[@id="searchVO"]/div[5]/table/tbody/tr[1]/td[2]/a').click()
browser.implicitly_wait(3)
# In[22]:
# 첨부파일 다운로드
browser.find_element_by_xpath('//*[@id="recsroomDetail"]/div[2]/div[4]/div/a').click()
time.sleep(10)
# In[23]:
browser.quit()
# #### 브라우저 가동하지 않고 백그라운드 작업 수행
# In[24]:
from selenium.webdriver import Chrome
import time
import sqlite3
from pandas.io import sql
import os
import pandas as pd
# In[25]:
from selenium import webdriver
options = webdriver.ChromeOptions()
options.add_argument('--headless') #백그라운드 작업 수행 #위에 코드에서 여기만 변경된거임
options.add_argument('--disable-gpu')
options.add_argument('--window-size=1280x1024')
browser = webdriver.Chrome('C:/Users/user/Downloads/chromedriver.exe', options=options)
# In[26]:
browser.get('https://www.data.go.kr/')
browser.implicitly_wait(5)
# In[27]:
browser.find_element_by_xpath('//*[@id="header"]/div/div/div/div[2]/div/a[1]').click()
browser.implicitly_wait(5)
# In[28]:
browser.find_element_by_xpath('//*[@id="mberId"]').send_keys('사용자 아이디')
# In[29]:
browser.find_element_by_xpath('//*[@id="pswrd"]').send_keys('사용자 비밀번호')
# In[30]:
browser.find_element_by_xpath('//*[@id="loginVo"]/div[2]/div[2]/div[2]/div/div[1]/button').click()
browser.implicitly_wait(5)
# In[32]:
browser.find_element_by_xpath('//*[@id="layer_popup_info_1"]/div[1]/a').click()
# In[33]:
browser.find_element_by_xpath('//*[@id="M000400_pc"]/a').click()
# In[34]:
browser.find_element_by_xpath('//*[@id="M000402_pc"]/a').click()
# In[35]:
def db_save(ARTICLE_LIST):
with sqlite3.connect(os.path.join('.','sqliteDB')) as con: # sqlite DB 파일이 존재하지 않는 경우 파일생성
try:
ARTICLE_LIST.to_sql(name = 'ARTICLE_LIST', con = con, index = False, if_exists='append')
#if_exists : {'fail', 'replace', 'append'} default : fail
except Exception as e:
print(str(e))
print(len(ARTICLE_LIST), '건 저장완료..')
# In[36]:
trs = browser.find_elements_by_xpath('//*[@id="searchVO"]/div[5]/table/tbody/tr')
df_list = []
for tr in trs:
df = pd.DataFrame({
'NO': [tr.find_element_by_xpath('td[1]').text],
'TITLE': [tr.find_element_by_xpath('td[2]').text],
'IQRY': [tr.find_element_by_xpath('td[3]').text],
'REGDT': [tr.find_element_by_xpath('td[4]').text],
'CHGDT': [tr.find_element_by_xpath('td[5]').text],
})
df_list.append(df)
ARTICLE_LIST = pd.concat(df_list)
db_save(ARTICLE_LIST)
# In[38]:
browser.find_element_by_xpath('//*[@id="searchVO"]/div[5]/table/tbody/tr[1]/td[2]/a').click()
browser.implicitly_wait(3)
# In[39]:
browser.find_element_by_xpath('//*[@id="recsroomDetail"]/div[2]/div[4]/div/a').click()
time.sleep(10)
# In[40]:
browser.quit()
# In[ ]:
| true |
0c594f7ea5d4d494cc47e5a03c035fbca2cd3f05 | Python | banty306/stockProject | /app.py | UTF-8 | 2,650 | 3.125 | 3 | [
"MIT"
] | permissive | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pandas_datareader as data
# noinspection PyUnresolvedReferences
import silence_tensorflow.auto # for ignoring tensorflow info and warnings
from keras.models import load_model
from sklearn.preprocessing import MinMaxScaler
import streamlit as st
from datetime import date
# starting and ending of data frame
start = '2010-01-01'
end = date.today().strftime('%Y-%m-%d')
# decoration
st.title('Stock Trend Prediction')
# data frame
user_input = st.text_input('Enter Stock Ticker', 'SBI')
df = data.DataReader(user_input, 'yahoo', start, end)
print(df)
# Describing Data
st.subheader('Data from '+start.split('-')[0]+' - '+end.split('-')[0])
st.write(df.describe())
# Visualizations
st.subheader('Closing Price vs Time chart')
fig = plt.figure(figsize=(12, 6))
plt.plot(df.Close, 'b')
st.pyplot(fig)
st.subheader('Closing Price vs Time chart with 100MA')
ma100 = df.Close.rolling(100).mean()
fig = plt.figure(figsize=(12, 6))
plt.plot(ma100, 'r')
plt.plot(df.Close, 'b')
st.pyplot(fig)
st.subheader('Closing Price vs Time chart with 100MA & 200MA')
ma100 = df.Close.rolling(100).mean()
ma200 = df.Close.rolling(200).mean()
fig = plt.figure(figsize=(12, 6))
plt.plot(ma100, 'r')
plt.plot(ma200, 'g')
plt.plot(df.Close, 'b')
st.pyplot(fig)
# splitting data into Training and Testing
data_training = pd.DataFrame(df['Close'][0:int(len(df) * 0.70)])
data_testing = pd.DataFrame(df['Close'][int(len(df) * 0.70): int(len(df))])
# scaling down the training data and converting it into an array
scale = MinMaxScaler(feature_range=(0, 1))
data_training_array = scale.fit_transform(data_training)
# Load the model
model = load_model('keras_model.h5')
# testing data
past_100_days = data_training.tail(100)
final_df = past_100_days.append(data_testing, ignore_index=True)
# scaling down the testing data and converting it into an array
input_data = scale.fit_transform(final_df)
# splitting data into x_test and y_test
x_test = []
y_test = []
for i in range(100, input_data.shape[0]):
x_test.append(input_data[i - 100: i])
y_test.append(input_data[i, 0])
x_test, y_test = np.array(x_test), np.array(y_test)
# Making Prediction
y_predicted = model.predict(x_test)
# scaling up the predicted data
scale_factor = 1/scale.scale_[0]
y_predicted = y_predicted * scale_factor
y_test = y_test * scale_factor
# Final Graph
st.subheader('Predictions vs Original')
fig2 = plt.figure(figsize=(12, 6))
plt.plot(y_test, 'b', label='Original Price')
plt.plot(y_predicted, 'g', label='Predicted Price')
plt.xlabel('Time')
plt.ylabel('Price')
plt.legend()
st.pyplot(fig2)
| true |
a6e742f3ea7080fe03351eeae9c30f0b4f6224fa | Python | nickchic/Coding_Dojo | /2_Python/Week5/Hospital/hospital.py | UTF-8 | 1,299 | 3.46875 | 3 | [] | no_license | class Hospital(object):
def __init__(self, name, capacity):
self.name = name
self.capacity = capacity
self.patients = []
#array of beds False meaning empty
self.beds = []
for x in range(0,capacity):
self.beds.append(False)
def admit(self, new_patient):
if len(self.patients) >= self.capacity:
print "Hospital full."
else:
self.patients.append(new_patient)
#gives the 1st available bed to new patient
for index, bed in enumerate(self.beds):
if bed == False:
new_patient.bed_number = index+1
self.beds[index] = True
break
print "Patient added."
return self
def discharge(self, patient_to_discharge):
# empties the bed the discharged patient was in
self.beds[patient_to_discharge.bed_number-1] = False
patient_to_discharge.bed_number = None
for index, patient in enumerate(self.patients):
if patient.id_num == patient_to_discharge.id_num:
self.patients.pop(index)
return self
def __repr__(self):
return "Name: {}, Capacity: {}, Patients: {}".format(self.name, self.capacity, self.patients)
| true |
72ada5ddf0d3bb07eed6ceeaa2ea4e5b6a4da3e1 | Python | leogouttefarde/InfraOpenstack | /setup.py | UTF-8 | 2,127 | 2.65625 | 3 | [] | no_license | from threading import Thread
import subprocess
import os
class Installeur(Thread):
def __init__(self, id, scripts, master,brex_install):
Thread.__init__(self)
self.id = id
self.scripts = scripts
self.master = master
self.brex = brex_install
def run(self):
subprocess.call("./setup_connections.sh -n " + str(self.id) +
(" -m " + str(self.id)) if self.master else "",
shell=True,
cwd="./connection")
for script in self.scripts:
subprocess.call("./provision.sh -n " + str(self.id) + " -p " + script,
shell=True,
cwd="./provision")
if self.brex :
subprocess.call("./set_brex.sh -n " + str(self.id),
shell=True,
cwd="./management")
print("Installation finie sur la machine " + str(self.id))
def read_number():
inp = raw_input()
machines = []
while inp != "":
inp_machines = inp.split("..")
if len(inp_machines) == 2:
n1 = int(inp_machines[0])
n2 = int(inp_machines[1])
machines.extend(range(n1, n2 + 1))
else:
machines.append(int(inp_machines[0]))
inp = raw_input()
return inp_machines
os.path.dirname(os.path.realpath(__file__))
print("Machines sur lesquelles executer l'install (Soit un nombre par ligne soit 1..4)")
install = read_number()
print("Machines sur lesquelles donner un acces aux autres machines (Soit un nombre par ligne soit 1..4)")
masters = read_number()
print("Scripts d'install a executer (separer par une virgule)")
scripts = raw_input().split(",")
print("Voulez vous installer l'interface br_ex y/n")
install_brex = raw_input().lower() == 'y'
install_running = []
for m in install:
install_running.append(Installeur(m, scripts, m in masters,install_brex))
for thread in install_running:
thread.start()
for thread in install_running:
thread.join()
print("Installation finie sur les machines " + str(install))
| true |
a0d46a26d269b3e2da2d18ec79903c90f1176079 | Python | Mathesh-kumar/Flipcart-Review-Scrapper | /productDetails.py | UTF-8 | 7,580 | 3 | 3 | [] | no_license | # Import needed libraries
import requests
from bs4 import BeautifulSoup
"""
Function to scrap product name from the flipcart page
Requires one argument (i.e, source code of the page)
Returns product name as string
"""
def get_product_name(page):
try:
prodName = page.find_all("h1", {"class": "yhB1nd"})[0].text # Name of the product
except:
prodName = "No Name"
return prodName
"""
Function to scrap product sample image url from the flipcart page
Requires one argument (i.e, source code of the page)
Returns image url as string
"""
def get_product_image(page):
try:
imageLink = page.find_all("div", {"class": "CXW8mj _3nMexc"})[0].img['src'] # Sample image link of the product
except:
imageLink = "No Link"
return imageLink
"""
Function to scrap product highlights from the flipcart page
Requires one argument (i.e, source code of the page)
Returns product highlights as list of dictionary
"""
def get_product_highlights(page):
try:
prodHighs = {}
highlights = page.find_all("li", {"class": "_21Ahn-"}) # Highlights of the product
for i in range(len(highlights)):
prodHighs[str(i)] = highlights[i].text
except:
prodHighs = {'0': "No highlights"}
return [prodHighs]
"""
Function to scrap product description from the flipcart page
Requires one argument (i.e, source code of the page)
Returns product description as string
"""
def get_product_description(page):
try:
prodDesc = page.find_all("div", {"class": "_1mXcCf RmoJUa"})[0].text # Description about the product
except:
prodDesc = "No Description"
return prodDesc
"""
Function to scrap product ratings count from the flipcart page
Requires one argument (i.e, source code of the page)
Returns product ratings count as list of dictionary
"""
def get_product_ratings(page):
reviewsAndRatings = page.findAll("div", {"class": "row _3AjFsn _2c2kV-"})
reviewRatings = []
# Overall rating count of the product
try:
overallRating = reviewsAndRatings[0].find_all("div", {"class": "_2d4LTz"})[0].text
except:
overallRating = '0'
# Total no of people rated the product
try:
ratingCount = reviewsAndRatings[0].find_all("div", {"class": "row _2afbiS"})[0].text
except:
ratingCount = '0'
# Total no of reviews for the product
try:
reviewCount = reviewsAndRatings[0].find_all("div", {"class": "row _2afbiS"})[1].text
except:
reviewCount = '0'
ratings = dict(overallRating=overallRating, ratingCount=ratingCount, reviewCount=reviewCount)
# Rating chart (5,4,3,2,1 stars individually)
try:
startsCountAll = reviewsAndRatings[0].find_all("div", {"class": "_1uJVNT"})
startsCount = {}
n = len(startsCountAll)
for star in range(n):
startsCount[str(n - star)] = startsCountAll[star].text
except:
startsCount = {'1': '0', '2': '0', '3': '0', '4': '0', '5': '0'}
# Product feature ratings
try:
featureName = reviewsAndRatings[0].find_all("div", {"class": "_3npa3F"})
featureRating = reviewsAndRatings[0].find_all("text", {"class": "_2Ix0io"})
featureNameRating = {}
for feature in range(len(featureName)):
name = featureName[feature].text
rate = featureRating[feature].text
featureNameRating[name] = rate
except:
featureNameRating = {'No features': '0'}
reviewRatings.append(ratings)
reviewRatings.append(startsCount)
reviewRatings.append(featureNameRating)
return reviewRatings
"""
Function to scrap customer comments for the product from the flipcart page
Requires one argument (i.e, source code of the page)
Returns comments as list of dictionary
"""
def get_product_comments(page):
commentsPageLink = "https://www.flipkart.com" + page.findAll("div", {"class": "col JOpGWq"})[0].findAll("a")[-1]['href']
commentsPage = requests.get(commentsPageLink) # Request webpage from internet
commentsPage = BeautifulSoup(commentsPage.text, "html.parser") # Parse web page as html
links = commentsPage.findAll("nav", {"class": "yFHi8N"})[0].findAll("a")
commentLinks = []
for a in links:
link = "https://www.flipkart.com" + a['href']
commentLinks.append(link)
commentLinks = commentLinks[:10]
reviews = []
for link in commentLinks:
page = requests.get(link) # Request webpage from internet
page = BeautifulSoup(page.text, "html.parser") # Parse web page as html
commentBoxes = page.findAll("div", {"class": "col _2wzgFH K0kLPL"}) # Select all comments
# This for loop will iterate through each comments and retrieve all the information from it.
# Information line Comment name, rating, heading, review
for cBox in commentBoxes:
try:
name = cBox.find_all("p", {"class": "_2sc7ZR _2V5EHH"})[0].text # Name of the customer
except:
name = 'No Name'
try:
rating = cBox.find_all("div", {"class": "_3LWZlK _1BLPMq"})[0].text # Rating given by the customer
except:
rating = 'No Rating'
try:
commentHead = cBox.find_all("p", {"class": "_2-N8zT"})[0].text # Review heading given by the customer
except:
commentHead = 'No Comment Heading'
try:
customerComment = cBox.find_all("div", {"class": "t-ZTKy"})[0].div.text # Review by customer
customerComment = customerComment.replace("READ MORE", "")
except:
customerComment = 'No Customer Comment'
reviewDictionary = dict(Name=name, Rating=rating, CommentHead=commentHead,
Comment=customerComment) # Store retrieved information as a dictionary
reviews.append(reviewDictionary)
return reviews
"""
Function to scrap details about the product from the flipcart page
Requires two arguments (i.e, product page link and source code of the page)
Returns list of product details.
"""
def get_details(link, page):
scrappedContent = [] # List to store details of the product
productLink = link
productName = get_product_name(page) # Scrap product name from the page
productImage = get_product_image(page) # Scrap product image from the page
productHighlights = get_product_highlights(page) # Scrap product highlights from the page
productDescription = get_product_description(page) # Scrap product description from the page
productRatings = get_product_ratings(page) # Scrap product ratings from the page
productReviews = get_product_comments(page) # Scrap product comments from the page
# Append all the scrapped details of the product into list
scrappedContent.append(dict(productName=productName))
scrappedContent.append(dict(productLink=productLink))
scrappedContent.append(dict(productImage=productImage))
scrappedContent.append(dict(prductHighlights=productHighlights))
scrappedContent.append(dict(productDescription=productDescription))
scrappedContent.append(dict(productRatings=productRatings))
scrappedContent.append(dict(productReviews=productReviews))
result = {'product': scrappedContent} # Create dictionary with product name as key and details as values
return result # Result returned to app.py | true |
b87d12e0683aab3eba721fd8e8c50c913206aaf7 | Python | ariel215/movie-recs | /app/test_app.py | UTF-8 | 1,723 | 2.5625 | 3 | [] | no_license | from recommender.model import LDASearcher, TagSearcher, LiteralSearcher
import pytest
from . import app
import jinja2, flask
@pytest.fixture
def lda_cfg():
return {'model': 'ebert.lda'}
@pytest.fixture
def tag_cfg():
return {'tags': 'movie_tags.csv'}
@pytest.fixture()
def webapp(tag_cfg):
return app.application.test_client()
@pytest.mark.searcher
@pytest.mark.skip
def test_lda(lda_cfg):
searcher = LDASearcher(lda_cfg)
names = searcher.search("aliens")
print(names[:5])
@pytest.mark.searcher
def test_tags(tag_cfg):
searcher = TagSearcher(tag_cfg)
for query in ["horror", "80's horror", "World War II", "Jewish"]:
print("Search: {}".format(query))
print(searcher.search(query)[:5])
@pytest.mark.searcher
def test_literals(tag_cfg):
searcher = LiteralSearcher(tag_cfg)
names = searcher.search("Sports")
print(names[:5])
@pytest.mark.app
def test_home(webapp):
assert '200' in webapp.get('/').status
@pytest.mark.app
def test_search(webapp):
response = webapp.get('/search?search-query=war')
assert '200' in response.status
@pytest.mark.app
@pytest.mark.parametrize(
'movie_name', ['Miracle', 'Eighth Grade',
'Yours, Mine and Ours (2005)']
)
def test_movie(webapp, movie_name):
response = webapp.get(f'/movies/{jinja2.filters.do_urlencode(movie_name)}')
assert '200' in response.status
@pytest.mark.app
def test_lucky(webapp):
assert '302' in webapp.get('/lucky').status
@pytest.mark.app
def test_show_all(webapp):
assert '200' in webapp.get('/all').status
if __name__ == "__main__":
cfg = {'tags': 'movie_tags.csv'}
searcher = TagSearcher(cfg)
input("press any key to continue") | true |
a338741118058d60edb20d45761443384249cd14 | Python | oss/parse-scores | /parser.py | UTF-8 | 778 | 2.828125 | 3 | [] | no_license | import re
read = open(raw_input("Enter your file: "))
e1 = re.compile(r"\[(.+)\]")
e2 = re.compile(r"(.+)=(.+),|(.+)=(.+)\Z")
for line in read:
total = 0
URIBLtotal = 0
matched = re.search(e1, line)
matchedList = matched.group(1).split()
for a in matchedList:
splitMatch = re.search(e2, a)
if splitMatch.group(2) is None:
if "URIBL_" in splitMatch.group(3):
total += float(splitMatch.group(4))
URIBLtotal += float(splitMatch.group(4))
else:
total += float(splitMatch.group(4))
else:
if "URIBL_" in splitMatch.group(1):
total += float(splitMatch.group(2))
URIBLtotal += float(splitMatch.group(2))
else:
total += float(splitMatch.group(2))
net = total - URIBLtotal
net = round(net, 4)
total = round(total, 4)
print(total, net)
| true |
95a19d9db7321ddfc3ee23aff27c36983a987b36 | Python | winstonian/code_playground | /movie_exercise/main.py | UTF-8 | 1,321 | 2.921875 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import pandas as pd
import vincent
all_data = open("Data/running-times.list").readlines()[15:-2]
# print len(all_data)
parsed_data = []
for line in all_data:
release_date = re.search(r'(\d\d\d\d)', line)
run_time = re.search(r'\d+[\t\n]', line)
# movie_title = re.search(r'[#!$?.\'\s\w]+', line)
# if movie_title and release_date and run_time is not None:
# parsed_data.append([movie_title.group(), int(release_date.group()), int(run_time.group().strip())])
if release_date and run_time is not None:
parsed_data.append([int(release_date.group()), int(run_time.group().strip())])
else:
pass
# Filter out TV shows (if divisible by 30)
parsed_films = filter(lambda x: x[1] % 30, parsed_data)
# Pandas DataFrame
films = pd.DataFrame(parsed_films)
filtered_films = films[(films[0] > 1920) & (films[0] < 2015) & (films[1] > 45)]
# print filtered_films.describe()
films_by_year = filtered_films.groupby(0).mean()
line = vincent.Line(films_by_year)
line.axis_titles(x='Year', y="Run time")
line.to_json('movies.json', html_out=True, html_path='movies_template.html')
# Note to view movies_template.html locally, start and http server with:
# $python -m SimpleHTTPServer 8000
# And then visit: http://localhost:8000
| true |
25ad0a5bf0489b0503c9e17bc0265975fe1be8ad | Python | UB-info/estructura-datos | /RafaelArqueroGimeno_S6/HASH_Rafael_Arquero_Gimeno.py | UTF-8 | 1,133 | 3.28125 | 3 | [
"MIT"
] | permissive | import copy
import math
from ABB_Rafael_Arquero_Gimeno import ABB
__author__ = "Rafael Arquero Gimeno"
class Hash(object):
def __init__(self, size=2**10):
self.size = size
self.table = [ABB() for i in xrange(self.size)]
def insert(self, data):
key = self.function(data.relevance)
self.table[key].insert(data)
def isEmpty(self):
return all(tree.isEmpty() for tree in self.table)
def function(self, x):
"""The hash function. I observer a logarithmic distribution of data, so i used logarithm as hash function.
After playing with vars, I found this function, which performs very well"""
return self.size - 1 + int(math.log(x, 1.01))
def __copy__(self):
result = Hash(self.size)
self.table = [copy.copy(tree) for tree in self.table]
return result
def __nonzero__(self):
return any(tree for tree in self.table)
def __iter__(self):
for tree in reversed(self.table):
for data in tree:
yield data
def __str__(self):
return reduce(lambda x, y: x + str(y) + "\n", self, "")
| true |
490884e61aa81dba63d3accb32d480015daa9862 | Python | vecin2/em_automation | /sqltask/test/test_query_runner.py | UTF-8 | 3,610 | 2.53125 | 3 | [
"MIT"
] | permissive | import pytest
from sqltask.database.query_runner import QueryRunner
from sqltask.exceptions import ConfigFileNotFoundException
class FakeEMDB:
def __init__(self):
self.fetch_query = ""
self.find_query = ""
def pop_fetch_query(self):
result = self.fetch_query
self.fetch_query = ""
return result
def fetch(self, query):
self.fetch_query = query
def find(self, query):
self.find_query = query
queries = {
"v__by_name": "SELECT * FROM verb_name WHERE NAME='{}' and IS_DELETED='{}'",
"v__by_name_with_keywords": "SELECT * FROM verb_name WHERE NAME='{name}' and IS_DELETED='{deleted}'",
}
@pytest.mark.parametrize("op_name", ["fetch", "find"])
def test_run_query_call_db_with_correct_query(op_name):
fakedb = FakeEMDB()
ad = QueryRunner(query_dict=queries, emdb=fakedb)
op = getattr(ad, op_name) # ad.fetch or ad.find
op.v__by_name_with_keywords(name="inlineSearch", deleted="N")
expected_query = (
"SELECT * FROM verb_name WHERE NAME='inlineSearch' and IS_DELETED='N'"
)
assert expected_query == fakedb.__getattribute__(op_name + "_query")
@pytest.mark.parametrize("op_name", ["fetch", "find"])
def test_run_query_with_wrong_number_args_throws_exception(op_name):
fakedb = FakeEMDB()
ad = QueryRunner(query_dict=queries, emdb=fakedb)
with pytest.raises(AssertionError) as excinfo:
op = getattr(ad, op_name) # ad.fetch or ad.find
op.v__by_name("inlineSearch")
assert "Method 'v__by_name' takes 2 params (1 given)" == str(excinfo.value)
assert "" == fakedb.pop_fetch_query()
@pytest.mark.parametrize("op_name", ["fetch", "find"])
def test_non_existing_throws_exception_no_query_defined(op_name):
fakedb = FakeEMDB()
ad = QueryRunner(queries, fakedb)
with pytest.raises(AssertionError) as excinfo:
op = getattr(ad, op_name) # ad.fetch or ad.find
op.something_else("inlineSearch")
assert "No query defined called 'something_else'." == str(excinfo.value)
assert "" == fakedb.pop_fetch_query()
@pytest.mark.parametrize("op_name", ["fetch", "find"])
def test_non_existing_query_with_similar_name_throws_exception_suggest_queries(op_name):
fakedb = FakeEMDB()
ad = QueryRunner(query_dict=queries, emdb=fakedb)
with pytest.raises(AssertionError) as excinfo:
op = getattr(ad, op_name) # ad.fetch or ad.find
op.v__by_nam("inlineSearch")
assert (
"No query defined called 'v__by_nam'. Did you mean?\nv__by_name\nv__by_name_with_keywords"
== str(excinfo.value)
)
assert "" == fakedb.pop_fetch_query()
def test_make_queries_from_file(fs):
file_content = """
v__by_name=SELECT * FROM verb_name WHERE NAME='{}' and IS_DELETED='{}'
"""
expected_dict = {
"v__by_name": "SELECT * FROM verb_name WHERE NAME='{}' and IS_DELETED='{}'"
}
file_path = "/em/gsc/queries.sql"
fs.create_file(file_path, contents=file_content)
ad = QueryRunner.make_from_file(file_path, db=FakeEMDB())
assert expected_dict == ad.query_dict.properties
def test_run_query_does_throws_exception_if_file_not_exist():
with pytest.raises(FileNotFoundError) as excinfo:
ad = QueryRunner.make_from_file("/queries.sql", db=FakeEMDB())
ad.fetch.my_query()
assert "Try to load config file '/queries.sql' but it does not exist" in str(
excinfo
)
def test_something():
fakedb = FakeEMDB()
query_runner = QueryRunner(query_dict=queries, emdb=fakedb)
query_runner.find.v__by_name
assert fakedb == query_runner.addb
| true |
c746e311e74ae844a89bb5d2210976ec7fe52974 | Python | notem/cryptopals | /set1/challenge5.py | UTF-8 | 899 | 3.421875 | 3 | [] | no_license | #!/usr/bin/env python3
# coding: utf-8
# author: Nate Mathews, njm3308@rit.edu
# date: 2017-06-14
import sys
import binascii
import copy
# encode binary data by xor'ing a key repeating through the length of the data
def xor_encode(binary, key_binary):
binary_clone = copy.deepcopy(binary)
key_index = 0
for i in range(len(binary)):
binary_clone[i] ^= key_binary[key_index]
key_index += 1
if key_index >= len(key_binary):
key_index = 0
return binary_clone
if __name__ == '__main__':
text = "Burning 'em, if you ain't quick and nimble\nI go crazy when I hear a cymbal"
key = "ICE"
system_encoding = sys.getdefaultencoding()
binary = bytearray(text, encoding=system_encoding)
key_binary = bytearray(key, encoding=system_encoding)
encoded_string = binascii.hexlify(xor_encode(binary, key_binary)).decode()
print(encoded_string)
| true |
900257fb81ce3ed75a67b812576f4994070dd277 | Python | NickPanyushev/python-course | /spring-exam_2/sql_test.py | UTF-8 | 857 | 2.703125 | 3 | [] | no_license | #!/bin/python3
import sqlite3 as sql
query = "create table Users (" \
"id integer primary key autoincrement, --user id" \
"username text unique not null, --username," \
"registered datetime not null --when user" \
"--registered" \
");"
with sql.connect("data.sqlite") as data: # открыли соединение с базой данных
cur = data.cursor() # получили курсор
# for row in cur.execute(
# "create table Artists (" \
# "id integer primary key autoincrement, --artist id" \
# "name text unique not null, --name," \
# ");"):
# print (row)
select * from Albums limit 3
def unpaid(user_id):
with sql.connect("data.sqlite") as db:
cur = db.cursor()
data = cur.execute(query).fetchall()
return data
print() | true |
66a24ecefef67d117ec7cd2ab8133c43f5758b89 | Python | ChrisNoone/python_practice | /practice/func_test/func_sort.py | UTF-8 | 720 | 3.359375 | 3 | [] | no_license | # coding:utf-8
'''冒泡排序'''
def func_sort(s):
if type(s).__name__ != "list":
print "please give list!"
else:
for j in range(1,len(s)):
for i in range(len(s)-j):
if s[i]>s[i+1]:
s[i],s[i+1]=s[i+1],s[i]
return s
'''快速排序'''
def func_fsort(s):
if len(s) <= 1:
return s
key = s[0]
lt_l = []
lt_r = []
lt_m = []
for i in s:
if i<key:
lt_l.append(i)
elif i>key:
lt_r.append(i)
else:
lt_m.append(i)
lt_l = func_fsort(lt_l)
lt_r = func_fsort(lt_r)
return lt_l+lt_m+lt_r
li = [3,7,2,9,11,4,8]
print func_sort(li)
print func_fsort(li) | true |
89375343d8925cc58e5415e14d2d4a3fc4f9ead0 | Python | bencami22/PythonTheHardWay | /exercise_24-MorePractice.py | UTF-8 | 721 | 4.25 | 4 | [] | no_license | print("Let's practice everything")
print('You\'d need to know \'bout escape characaters using the \\ that do:')
print ('\n newlines and \t tabs')
poem="""
\t The local world \n where people use \t technology
"""
print("----------------")
print("poem")
print("-----------------")
five=8-3
print(f"This should be five = {five}")
def secret_formula(started):
jelly_beans=started*100
jars=jelly_beans/23
crates=jars*4.5
return jelly_beans, jars, crates
jelly_beans, jars, crates=secret_formula(12)
print("jelly_beans={}, jars={}, crates={}".format(jelly_beans, jars, crates))
print("We can also do it this way:")
formula=secret_formula(12)
print("jelly_beans={}, jars={}, crates={}".format(*formula))
| true |
6c2f886bb77d3c312020fdbc746f390a81481905 | Python | carlmaps/ML-MovieRecommendation | /kerasEngine.py | UTF-8 | 3,116 | 2.734375 | 3 | [] | no_license | import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class RecommenderNet(keras.Model):
def __init__(self, num_users, num_movies, embedding_size, **kwargs):
logger.info("Starting up the RecommenderNet: ")
super(RecommenderNet, self).__init__(**kwargs)
self.num_users = num_users
self.num_movies = num_movies
self.embedding_size = embedding_size
self.user_embedding = layers.Embedding(
num_users,
embedding_size,
embeddings_initializer="he_normal",
embeddings_regularizer=keras.regularizers.l2(1e-6),
)
self.user_bias = layers.Embedding(num_users, 1)
self.movie_embedding = layers.Embedding(
num_movies,
embedding_size,
embeddings_initializer="he_normal",
embeddings_regularizer=keras.regularizers.l2(1e-6),
)
self.movie_bias = layers.Embedding(num_movies, 1)
def call(self, inputs):
user_vector = self.user_embedding(inputs[:, 0])
user_bias = self.user_bias(inputs[:, 0])
movie_vector = self.movie_embedding(inputs[:, 1])
movie_bias = self.movie_bias(inputs[:, 1])
dot_user_movie = tf.tensordot(user_vector, movie_vector, 2)
# Add all the components (including bias)
x = dot_user_movie + user_bias + movie_bias
# The sigmoid activation forces the rating to between 0 and 1
return tf.nn.sigmoid(x)
# The rate function to predict user's rating of unrated items
def predictRate(self, userId, movieId):
return {"rating": self.predict(np.array([[userId, movieId]])).astype(str).flatten()[0]}
def getMovieRecommendation(self, userID, config):
logger.info("Retrieving Top 10 Movie Recommendation....")
movies_watched_by_user = config.rating_df[config.rating_df.userId == userID]
movies_not_watched = config.movie_df[~config.movie_df["movieId"].isin(movies_watched_by_user.movieId.values)]["movieId"]
movies_not_watched = list(set(movies_not_watched).intersection(set(config.movie2movie_encoded.keys())))
movies_not_watched = [[config.movie2movie_encoded.get(x)] for x in movies_not_watched]
user_encoder = config.user2user_encoded.get(userID)
user_movie_array = np.hstack(([[user_encoder]] * len(movies_not_watched), movies_not_watched))
ratings = self.predict(user_movie_array).flatten()
top_ratings_indices = ratings.argsort()[-10:][::-1]
recommended_movie_ids = [config.movie_encoded2movie.get(movies_not_watched[x][0]) for x in top_ratings_indices]
recommended_movies = config.movie_df[config.movie_df["movieId"].isin(recommended_movie_ids)]
recommendation = []
for row in recommended_movies.itertuples():
recommendation.append({"movieID" : row.movieId,"Title" : row.title,"Genre" : row.genres})
return recommendation
| true |
8c073cb33cdf7bb6454fda97e422ea71198cac2d | Python | eulerss/python | /contadorPalabras.py | UTF-8 | 871 | 3.515625 | 4 | [] | no_license | import re
class CuentaPalabrasArchivo:
wordcount = {}
def __init__(self):
print("Inicia contador de palabras")
def admin_file(self, archivo):
#Inicia admin de archivo
self.archivo = archivo
print("Contando las palabras del archivo: "+self.archivo)
with open(archivo) as file:
for word in file.read().split():
# Extrae caracteres
partes = re.split(r'(\W+)|(?<!\d)[,.;]|[,.;](?!\d)', word)
for i in partes:
#print("Parte:" +i)
if (i != ''):
if i not in self.wordcount:
self.wordcount[i] = 1
else:
self.wordcount[i] += 1
print(self.wordcount)
obj = CuentaPalabrasArchivo()
obj.admin_file("./ejemplo.txt")
| true |
d96a86b6cdc07603095122f7268db321cb7c3871 | Python | jmew91/TetrisBot | /code/board.py | UTF-8 | 221 | 3.484375 | 3 | [] | no_license | class Board:
def __init__(self, rows, cols):
self.rows = rows
self.cols = cols
self.board = [[0 for x in range(rows)] for y in range(cols)]
def print_board(self):
print(self.rows) | true |
2f264801db7065ef092a457d05108322efec1b94 | Python | sayuree/leetcode-problems | /arrays/217.contains_duplicate.py | UTF-8 | 592 | 3.359375 | 3 | [] | no_license | # Runtime: 204 ms, faster than 8.17% of Python3 online submissions for Contains Duplicate.
# Memory Usage: 19.3 MB, less than 38.17% of Python3 online submissions for Contains Duplicate.
class Solution:
def containsDuplicate(self, nums: List[int]) -> bool:
if not nums:
return False
my_set = set()
for item in nums:
my_set.add(item)
return len(my_set) != len(nums)
class Solution:
def containsDuplicate(self, nums: List[int]) -> bool:
if not nums:
return False
return len(nums) != len(set(nums))
| true |
a055d34d7d30d8d61a0997e0738c76caa931981a | Python | pingguoshouji/test | /a/b/list_tumple.py | UTF-8 | 2,755 | 3.84375 | 4 | [] | no_license | #list与tumple用法一模一样
#增加
alist = ['a','b','c']
alist[1] = 'd'
print(alist)
#append 增加到末尾
alist.append('D')
print(alist)
#insert
alist.insert(2,'e')
print(alist)
#删除
del alist[0]
print(alist)
#remove
alist.remove('c')
#删除队尾的
alist.pop()
print(alist)
#sort 排序
blist = [1,2,5,3,8,6]
blist.sort(reverse=False) #小到大
# blist.sort(reverse=True)
print(blist)
#len()
aList=[1,2,3,4,5]
print(len(aList))
#最大最小值
aList=[1,2,3,4,5]
print(len(aList))
print(max(aList))
#列表扩展 extend
#与+号的区别:区别在于+是返回一个新的列表,而extend是直接修改了列表
a = [1,2,3]
b = [4,5,6]
a.extend(b)
print(a)
#查找索引 index
aList=['This','is','a','very','good','idea']
print(alist.index('very'))
#计数 count
aList=['to','do','or','not','to','do']
print(aList.count('to'))
# 拆分元组
tup = 1,2,(3,4)
# a,b,c = tup
a,b,(c,d) = tup
print(c)
seq = [(1, 2, 3), (4, 5, 6), (7, 8, 9)]
for a, b, c in seq:
print('a={0}, b={1}, c={2}'.format(a, b, c))
seq = [7, 2, 3, 7, 5, 6, 0, 1]
print(seq[:5])
print(seq[-6:-2])
# enumerate
some_list = ['foo', 'bar', 'baz']
# a = enumerate(some_list)
# print(a) # <enumerate object at 0x00DC9878>
# print(list(enumerate(some_list,start=1)))
# for i,v in enumerate(some_list):
# print(i,v)
# print(dict(i,v)) # TypeError: dict expected at most 1 arguments, got 2
mapping = {}
for i,v in enumerate(some_list):
mapping[v] = i
print(mapping[v])
print(mapping)
# print(mapping[i])
#
words = ['apple', 'bat', 'bar', 'atom', 'book']
by_letter = {}
for word in words:
letter = word[0]
if letter not in by_letter:
by_letter[letter] = [word]
else:
by_letter[letter].append(word)
print(by_letter)
# defaultdict
# https://blog.csdn.net/the_little_fairy___/article/details/80551538
from collections import defaultdict
a=dict()
b=defaultdict(int)
print(b["a"])
# 集合推导式
result = set() # 创建空的集合
strings = ['a', 'as', 'bat', 'car', 'dove', 'python']
for i in strings:
result.add(len(i))
# print(result)
unique_lengths = {len(x) for x in strings}
result = set(map(len, strings))
print(result)
#--------------------------------------------------------------#
def student(x):
return x[2]
students = [('john', 'A', 15), ('jane', 'B', 12), ('dave', 'B', 10)]
print(student(students))
print((lambda student : student[2])(students))
# sorted(students, key=lambda student : student[2])
students = sorted(students, key=student)
print(students)
#
strings = ['foo', 'card', 'bar', 'aaaa', 'abab','aaaa']
strings.sort(key=lambda x: len(set(list(x))))
print(strings)
| true |
9856c5080542eb5d69ebc61ccbb8671bcc55b64a | Python | spitfire4040/excel_parsing | /excel_sheet_to_csv_v2.py | UTF-8 | 3,214 | 3.34375 | 3 | [] | no_license | import xlrd
#opens a specfic workbook, in other words an excel file
workbook = xlrd.open_workbook('FINAL_MackayTheses_Inventory.xlsx')
#opens a specific worksheet within the previously openned excel file
worksheet = workbook.sheet_by_name('Sheet1')
#accept user input
#excelFile = input("Enter the full name of excel file: ")
#worksheetName = input("Specify which sheet to read from: ")
#outputFile = input("Enter the full name of output file: ")
#columnsToBeRead = input("Enter the number of columns (horizontal) to be read: ")
#rowsToBeRead = input("Enter the number of rows (vertical) to be read: ")
#on the first row declare all values as NULL
author = 'NULL'
pubDate = 'NULL'
title = 'NULL'
thesisNumber = 'NULL'
fileName = 'NULL'
link = 'NULL'
#open text file to write to
f0 = open('MackayTheses.csv', "w")
#for some range of x rows
for x in range(0, worksheet.nrows):
#for some range of y columns
for y in range(0, worksheet.ncols):
#depending on which column, update values
if y == 0:
author = worksheet.cell(x, y).value
if y == 2:
pubDate = worksheet.cell(x, y).value
if y == 1:
title = worksheet.cell(x, y).value
if y == 10:
thesisNumber = worksheet.cell(x, y).value
if y == 6:
fileName = worksheet.cell(x, y).value
if y == 12:
link = worksheet.cell(x, y).value
#If commaFlag is true, that means that commas exist in
# the data cell. They are then formatted for csv
#Write authors to file MackayTheses_Author.txt
commaFlag = False
for z in range(0, len(author)):
if author[z] == ',':
commaFlag = True
break;
if commaFlag == True:
f0.write('"')
for z in range(0, len(author)):
if author[z] == '\n':
break;
f0.write(author[z])
if commaFlag == True:
f0.write('"')
f0.write(',')
#write publication dates to file
f0.write(str(pubDate))
f0.write(',')
#Write titles to file
commaFlag = False
for z in range(0, len(title)):
if title[z] == ',':
commaFlag = True
break;
if commaFlag == True:
f0.write('"')
for z in range(0, len(title)):
f0.write(title[z])
if commaFlag == True:
f0.write('"')
f0.write(',')
#write thesisNumber to file
f0.write(str(thesisNumber))
f0.write(',')
#write fileName to file
commaFlag = False
for z in range(0, len(fileName)):
if fileName[z] == ',':
commaFlag = True
break;
if commaFlag == True:
f0.write('"')
for z in range(0, len(fileName)):
f0.write(fileName[z])
if commaFlag == True:
f0.write('"')
f0.write(',')
#write link to file
commaFlag = False
for z in range(0, len(link)):
if link[z] == ',':
commaFlag = True
break;
if commaFlag == True:
f0.write('"')
for z in range(0, len(link)):
f0.write(link[z])
if commaFlag == True:
f0.write('"')
#make sure that last entry in file is not a new line
if x != worksheet.nrows - 1:
f0.write('\n')
f0.close()
| true |