blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
3abf33de8c48822f273f8052b51807bbc5e2f49f | Python | psegedy/test_async | /server.py | UTF-8 | 521 | 2.609375 | 3 | [] | no_license | import tornado.web
from tornado.ioloop import IOLoop
class MainHandler(tornado.web.RequestHandler):
"""Main handler of Tornado application."""
def get(self):
"""Handle get request."""
self.write("Hello, world")
# application with endpoint "/" and handler MainHandler
application = tornado.web.Application([
(r"/", MainHandler),
])
if __name__ == "__main__": # pragma: no cover
server = tornado.httpserver.HTTPServer(application)
server.listen(8080)
IOLoop.instance().start()
| true |
d31dacdd4345287825e9ad960d967dbb51e6787c | Python | joschout/tilde | /refactor/tilde_essentials/stop_criterion.py | UTF-8 | 2,463 | 3.203125 | 3 | [
"Apache-2.0"
] | permissive | from refactor.tilde_essentials.splitter import SplitInfo
import math
class StopCriterion:
"""
Checks whether a the node should be split; i.e. whether a stop criterion is reached.
"""
# def should_investigate_node(self):
# raise NotImplementedError('abstract method')
def __init__(self, max_depth: int = math.inf,
min_samples_split: int = 2,
min_samples_leaf: int = 2 # 1
):
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
def cannot_split_before_test(self, examples, depth):
"""
If we already know we cannot split without having to calculate possible tests,
report True here.
:param depth:
:param examples:
:return:
"""
if depth >= self.max_depth:
return True
if len(examples) < self.min_samples_split:
return True
def _not_enough_examples_in_leaves(self, split_info: SplitInfo) -> bool:
"""
Return true if the smallest of the two subsets has NOT enough examples to be acceptable as a leaf.
# NOTE: I changed it back to min, this explanation isn't true anymore
# REASON: setting:
# minimal_cases(n).
# the minimal nb of examples that a leaf in the tree should cover
#
# (De Raedt: a good heuristic:
# stop expanding nodes
# WHEN the number of examples in the nodes falls below a certain (user-defined threshold)
# NOTE:
# the nodes get split into two children
# --> possible case:
# only for 1 of the children, the nb of examples falls below the threshold
# IF by splitting,
# ONE of the nodes falls below a certain user-defined threshold
# (i.e. the MIN of their nbs < threshold)
# THEN we don't split this node
:param split_info:
:return:
"""
return min(
len(split_info.examples_left), len(split_info.examples_right)
) < self.min_samples_leaf
def cannot_split_on_test(self, split_info: SplitInfo):
if split_info is None:
return True
if not split_info.passing_score():
return True
if self._not_enough_examples_in_leaves(split_info):
return True
return False
| true |
692da255c7ec68949129fd0549e743f264e8a629 | Python | milanlx/Trans-Building-Research | /bus_plot.py | UTF-8 | 2,800 | 2.90625 | 3 | [] | no_license | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from utils.general_utils import *
"""
ref:
- plot dots: https://towardsdatascience.com/easy-steps-to-plot-geographic-data-on-a-map-python-11217859a2db
- get map: https://medium.com/@busybus/rendered-maps-with-python-ffba4b34101c
"""
def get_plot_df(filtered_bus_stops, bus_stops, dirt):
""":return: df [stop_id, stop_lat, stop_lon], for one direction"""
stop_list = []
columns = ['stop_id', 'stop_lat', 'stop_lon']
df = pd.DataFrame(columns=columns)
for key in filtered_bus_stops.keys():
if key[-1] == dirt:
for stop_id in filtered_bus_stops[key]:
if stop_id not in stop_list:
stop_list.append(stop_id)
for stop_id in stop_list:
lat = bus_stops.loc[bus_stops['stop_id'] == stop_id, 'stop_lat'].iloc[0]
lon = bus_stops.loc[bus_stops['stop_id'] == stop_id, 'stop_lon'].iloc[0]
df_row = {'stop_id': stop_id, 'stop_lat': lat, 'stop_lon': lon}
df = df.append(df_row, ignore_index=True)
return df
filtered_bus_stops = loadFromPickle('processed_data/bus/stops/filtered_stops.pkl')
bus_stops = loadFromPickle("processed_data/bus/stops/ground_truth/bus_stops.pkl")
dirt = '0'
df = get_plot_df(filtered_bus_stops, bus_stops, dirt)
group_center = loadFromPickle('processed_data/bus/stops/test.pkl')
# save
saveAsPickle(df, 'processed_data/bus/stops/bus_stops_total_seq_dir0.pkl')
bbox = (df.stop_lon.min(), df.stop_lon.max(), df.stop_lat.min(), df.stop_lat.max())
bbox = (-79.97441, -79.910184, 40.415419, 40.474327)
print(bbox)
ref_coord = [40.443431, -79.941643]
n = len(df.stop_lon)
labels = range(n)
print(n)
map_org = plt.imread('map_data/map_org.png')
fig, ax = plt.subplots(figsize=(15,18))
ax.scatter(df.stop_lon, df.stop_lat, zorder=1, alpha= 0.8, c='b', s=10)\
# group center point
ax.scatter(group_center.lon_mean, group_center.lat_mean, zorder=1, alpha= 0.8, c='g', s=30)
# direction arrow
for i in range(27):
for adj in group_center.iat[i,2]:
x_pos, y_pos = group_center.iat[i, 4], group_center.iat[i, 3]
x_direct, y_direct = group_center.iat[adj, 4], group_center.iat[adj, 3]
ax.annotate('', xy=(x_direct, y_direct), xytext=(x_pos, y_pos),
arrowprops=dict(arrowstyle='->', lw=1.5))
ax.scatter(ref_coord[1], ref_coord[0], zorder=1, alpha= 0.8, c='r', s=20)
# add number, start from 0
for i, txt in enumerate(labels):
ax.annotate(txt, (df.stop_lon[i], df.stop_lat[i]))
ax.set_title('Spatial Bus Stops')
ax.set_xlim(bbox[0],bbox[1])
ax.set_ylim(bbox[2],bbox[3])
ax.imshow(map_org, zorder=0, extent=bbox, aspect='equal')
plt.savefig('bus_stop_map_dir0_center.png')
| true |
0072332ca04d6b55e90274df7062ce599342348f | Python | chaglare/learning_python | /environments/my_env/ex1_defaults-arguments.py | UTF-8 | 148 | 3.046875 | 3 | [] | no_license | def my_function(str1, str2):
print(str1)
print(str2)
my_function("This is arg1", "This is arg2")
my_function("Strongly", "Hello World") | true |
a7440efc186c0653e35387482e7b0c0c85438f2e | Python | eddiewang-wgq/Python-unittest-webdriver | /webdriver常用的API测试/level_locate/test4.py | UTF-8 | 488 | 2.796875 | 3 | [] | no_license | from selenium import webdriver
import time
import os
from selenium.webdriver import ActionChains
driver = webdriver.Chrome()
file_path = 'file:///'+os.path.abspath("E:/Pythoncode/002/0004/level_locate.html")
driver.get(file_path)
#点击Link1链接(弹出下拉列表)
driver.find_element_by_link_text("Link1").click()
#将鼠标移动到Actionn元素的位置
action = driver.find_element_by_link_text("Action")
ActionChains(driver).move_to_element(action).perform()
time.sleep(10) | true |
7f6d698517470e85ca6fddd45cd92354e2ce50cd | Python | moskalev/polycomProvisioning | /server.py | UTF-8 | 1,648 | 2.890625 | 3 | [] | no_license | #!/usr/bin/env python3
from http.server import BaseHTTPRequestHandler, HTTPServer
import datetime
import os
class S(BaseHTTPRequestHandler):
def __init__(self, *args, log_file = 'polycom.log', **kwargs):
self.log_file = log_file
super(S, self).__init__(*args, **kwargs)
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
self._set_headers()
fname = str(self.path).split('/')[-1]
if os.path.exists('files/' + fname):
print('Serving ', fname)
with open('files/' + fname, 'rb') as file:
self.wfile.write(file.read())
else:
print('Not found ', fname) # Do not reply with an error here
def do_POST(self):
with open(self.log_file, "a") as f:
'''Reads post request body'''
self._set_headers()
f.write(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"))
f.write("\n")
f.write(str(self.headers))
post_body = self.rfile.read(int(self.headers['Content-Length'])).decode("UTF-8")
f.write(str(post_body))
f.write("\n")
def do_PUT(self):
self.do_POST()
def run(server_class=HTTPServer, handler_class=S, port=80):
server_address = ('', port)
httpd = server_class(server_address, handler_class)
print('Starting httpd... on port', port)
httpd.serve_forever()
if __name__ == "__main__":
from sys import argv
if len(argv) == 2:
run(port=int(argv[1]))
else:
run()
| true |
8394a6d051faf7bcd40f6da14a24ebf9ab7a88de | Python | oscarfruto1/pokemon | /sheets.py | UTF-8 | 1,456 | 3 | 3 | [] | no_license | import gspread
from oauth2client.service_account import ServiceAccountCredentials
from pip._vendor.distlib.compat import raw_input
import datetime
scope = ["https://spreadsheets.google.com/feeds", "https://www.googleapis.com/auth/spreadsheets",
"https://www.googleapis.com/auth/drive.file", "https://www.googleapis.com/auth/drive"]
creds = ServiceAccountCredentials.from_json_keyfile_name("creds.json", scope)
client = gspread.authorize(creds)
sheet = client.open("test").sheet1
data = sheet.get_all_records()
def fillData():
cell_list = sheet.range('A2:A21')
cell_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
for i, val in enumerate(cell_values): # gives us a tuple of an index and value
cell_list[i].value = val # use the index on cell_list and the val from cell_values
sheet.update_cells(cell_list)
def addItems():
new_value = input("type the new ID: ")
sheet.update_cell(2, 1, new_value) # Update one cell
def duplicates():
val = sheet.range('A2:A21')
x = input("Type the new ID: ")
for x in val:
if x in val:
sheet.update_cell(2, 1, x)
sheet.update_cell(2, 7, datetime)
else:
break
fillData() #function to fill data in google
addItems() #function to add new IDs in google
duplicates() #function to search duplicates and add date and time | true |
fb003b38bd6d4e4bf23f79179215b9b0870320e6 | Python | ashwinishivpatil/pythonframework | /TOB_AUTOMATION_DATABASE_REPORT/EntitySubChannel.py | UTF-8 | 6,793 | 2.671875 | 3 | [] | no_license | import pandas as pd
import DatabaseConnection
class EntitySubChannel :
EntitySubChannelDataFrame = pd.DataFrame()
EntityData = pd.DataFrame()
passed = 0
failed = 0
def __call__(self):
print("somthing")
def executeScripts(self, conn, mainExcel, wb):
print("In EntitySubChannel",conn)
#mainExcel.writeHeaderToSheet("EntitySubChannel", wb)
self.readDataFrame(conn)
self.CheckForSubChannels(conn,mainExcel,wb)
self.checkForBalnkSpaces(conn,mainExcel,wb)
self.checkCorporateEntity(conn,mainExcel,wb)
self.checkForData(conn, mainExcel, wb)
def readDataFrame(self,conn):
print("Reading Data Frame")
sqlst = "SELECT * FROM stg.EntitySubChannel "
self.EntitySubChannelDataFrame = pd.read_sql(sqlst, conn)
sqlst = "SELECT * FROM stg.Entity "
self.EntityData = pd.read_sql(sqlst, conn)
print(self.EntityData.head())
def checkForData(self, conn, mainExcel, wb):
print("Check for Data Exists or not ")
if (self.EntitySubChannelDataFrame.__len__() > 0):
self.passed = self.passed + 1
mainExcel.Module = "EntitySubChannel"
mainExcel.TestCaseName = "Check for the data avalaiblity in the table"
mainExcel.ExpectedResult = "The EntitySubChannel Table should contain data"
mainExcel.TestFailDescription = "None"
mainExcel.TestFailSeverity = "None"
mainExcel.TestCaseStatus = "PASSED"
DatabaseConnection.Connection.saveResultToDataBase(conn, mainExcel)
# mainExcel.writeToSheet("Entity", wb)
else:
print("FAILED")
self.failed = self.failed + 1
mainExcel.Module = "EntityProductFormulary"
mainExcel.TestCaseName = "Check for the data avalaiblity in the table"
mainExcel.ExpectedResult = "The EntitySubChannel Table should contain data"
mainExcel.TestFailDescription = "Data is not present in the EntityProductFormulary table"
mainExcel.TestFailSeverity = "Critical"
mainExcel.TestCaseStatus = "FAILED"
def checkForBalnkSpaces(self,conn, mainExcel, wb):
print("Check for Blank Spaces")
expectedList = ['EntityID', 'SubChannel']
null_columns = self.EntitySubChannelDataFrame.columns[self.EntitySubChannelDataFrame.isnull().any()].tolist()
print(null_columns)
result = set(null_columns).difference(set(expectedList))
print(result)
if (result.__len__() == 0):
self.passed = self.passed + 1
mainExcel.Module = "EntitySubChannel"
mainExcel.TestCaseName = "Check Blank space for columns"
mainExcel.ExpectedResult ="Blank space should not present any of the columns given 'EntityID', 'SubChannel' column"
mainExcel.TestFailDescription = "None"
mainExcel.TestFailSeverity = "None"
mainExcel.TestCaseStatus = "PASSED"
DatabaseConnection.Connection.saveResultToDataBase(conn, mainExcel)
else:
self.failed = self.failed + 1
mainExcel.Module = "EntitySubChannel"
mainExcel.TestCaseName = "Check Blank space for columns"
mainExcel.ExpectedResult ="Blank space should not present any of the columns given 'EntityID', 'SubChannel' column"
mainExcel.TestFailDescription = "Blanks are Present for other Coulmns" + str(result)
mainExcel.TestFailSeverity = "Critical"
mainExcel.TestCaseStatus = "FAILED"
DatabaseConnection.Connection.saveResultToDataBase(conn, mainExcel)
def CheckForSubChannels(self,conn,mainExcel,wb):
print(self.EntitySubChannelDataFrame.__len__())
subChannelsList = self.EntitySubChannelDataFrame['SubChannel'].values.tolist()
#expectedSubchannels = set(mainExcel.channelList)
#expectedSubchannels ={"Commercial","CVS","FEP", "Employer", "HIX", "MA", "MA - PD", "Managed Medicaid", "PBM", "PDP", "SPP", "State Medicaid", "TRICARE","VA"}
#expectedSubchannels = {"Cash","Commercial","CVS FEP","Employer","MA","MA-PD","Managed Medicaid","Medicare Other","Other Third Party","PBM","PDP","SPP","State Medicaid","TRICARE","VA","HIX"}
expectedSubchannels = set(mainExcel.EntitySubChannelList)
print("subChannelsList",subChannelsList)
result = expectedSubchannels.difference(set(subChannelsList))
if (result.__len__() == 0):
self.passed = self.passed + 1
mainExcel.Module = "EntitySubChannel"
mainExcel.TestCaseName = "Validate SubChannels Names"
mainExcel.ExpectedResult ="Given SubChannels name should be present"+expectedSubchannels.__str__()
mainExcel.TestFailDescription = "None"
mainExcel.TestFailSeverity = "None"
mainExcel.TestCaseStatus = "PASSED"
DatabaseConnection.Connection.saveResultToDataBase(conn, mainExcel)
else:
mainExcel.Module = "EntitySubChannel"
mainExcel.TestCaseName = "Validate SubChannels Names"
mainExcel.ExpectedResult ="Given SubChannels name should be present"+expectedSubchannels.__str__()
mainExcel.TestFailDescription = "Specified SubChannels are not present" + str(result)
mainExcel.TestFailSeverity = "Critical"
mainExcel.TestCaseStatus = "FAILED"
DatabaseConnection.Connection.saveResultToDataBase(conn, mainExcel)
def checkCorporateEntity(self,conn,mainExcel,wb):
print(self.EntitySubChannelDataFrame.__len__())
EntityIdFromFormularyList = self.EntitySubChannelDataFrame['EntityID'].values.tolist()
EntityFromEntity = self.EntityData[self.EntityData['EntityType'] == 'EGWP']
entityList = EntityFromEntity['EntityID'].values.tolist()
if(set(entityList).issubset(set(EntityIdFromFormularyList))):
print("PASSED")
mainExcel.Module = "EntitySubChannel"
mainExcel.TestCaseName = "check EGWp EntityId from Entity present under EntityID column"
mainExcel.ExpectedResult = "All EGWP Entities should be persent"
mainExcel.TestFailDescription = "None"
mainExcel.TestFailSeverity = "None"
mainExcel.TestCaseStatus = "PASSED"
DatabaseConnection.Connection.saveResultToDataBase(conn, mainExcel)
else:
print("FAILED")
mainExcel.Module = "EntitySubChannel"
mainExcel.TestCaseName = "check EGWp EntityId from Entity present under EntityID column"
mainExcel.ExpectedResult = "All EGWP Entities should be persent"
mainExcel.TestFailDescription = "Some Entity Id's are not present"
mainExcel.TestFailSeverity = "Critical"
mainExcel.TestCaseStatus = "FAILED"
DatabaseConnection.Connection.saveResultToDataBase(conn, mainExcel) | true |
45b2a9bf4c857c3c9d2cdab298b6c5a114a22f40 | Python | xshukla/pub | /prob1.py | UTF-8 | 1,432 | 3.4375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed May 27 10:11:52 2015
# solution to ACM-ICPC 2015 world finals Problem A
# for complete problem text visit http://icpc.baylor.edu/worldfinals/problems/icpc2015.pdf
# for bugs contact: xshukla@acm.org
@author: xitij
"""
import math as m
import matplotlib.pyplot as plt
import numpy as np
# =========== Base Variables ===========
x=[]
tmpDict={}
finalDict={}
# =========== Two Functions we use ===========
def getDiff(x): # this is used later, to identify the difference
for i in x:
tmp=i
for j in x:
if(x.index(j)>x.index(tmp)):
diff=tmp-j
if (diff>0):
#print(tmp,"-",j,"=",diff)
tmpDict[diff]=(tmp,j)
else:
tmpDict[diff]=(tmp,j)
else:
pass
def price(p,a,b,c,d,n): # this is used first, to populate list with modelled prices
global x
for k in range(1,n+1):
pr=p*(m.sin((a*k+b))+m.cos(c*k+d)+2) # modelled function
x.append(pr)
plot(x,n)
def plot(x,n):
nx = np.arange(1,n+1)
plt.plot(nx,x,"bo")
plt.show()
price(42,1,23,4,8,10) # this is how price funtion is invoked
getDiff(x)
finalDict = sorted(tmpDict,reverse=True)
i = iter(finalDict)
j=next(i)
if(j>0):
print(j) # this is the output
else:
j=0.00
print(j) # this is also the output in a case there's no decline!
| true |
16c1a05cc56a70ec2f8ddd678c7a2f374ed59f39 | Python | itsolutionscorp/AutoStyle-Clustering | /all_data/exercism_data/python/bob/5f61ee334ce941bd8955efa271680a4b.py | UTF-8 | 489 | 4.03125 | 4 | [] | no_license | #
# Skeleton file for the Python "Bob" exercise.
# Take input
def hey(what):
what = what.strip()
# Question returns 'Sure.'
if what.endswith('?') and what.isupper() == False:
return 'Sure.'
# All caps or exclamation returns 'Whoa, chill out!'
elif what.isupper():
return 'Whoa, chill out!'
# Empty returns 'Fine. Be that way!'
elif what == '':
return 'Fine. Be that way!'
# All else returns 'Whatever.'
else:
return 'Whatever.'
| true |
b487d82a141373f4b0bafc9b42497b150895449e | Python | alves-dev/public_botinho | /core/watson_commands.py | UTF-8 | 3,876 | 2.640625 | 3 | [] | no_license | from core.constants import WATSON_API_KEY, WATSON_VERSION, WATSON_URL, WATSON_Assistant_ID
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from ibm_watson.assistant_v2 import *
def new_assistente_watson():
cliente_assistente = assistant()
desable_ssl(cliente_assistente)
return cliente_assistente
def authenticator() -> IAMAuthenticator:
'''
Função que utiliza a constante API_KEY para realizar a autenticação na API do Watson
:return: Retorna um objeto de autenticação
'''
autenticado = IAMAuthenticator(WATSON_API_KEY)
return autenticado
def assistant() -> AssistantV2:
'''
Função cria um novo cliente ao serviço de assistente, utiliza a constante WATSON_VERSION para definir data da api
caso ela mude no futuro a ibm saiba sobre qual documentação ela deve prosseguir.
Também faz set_service_url passando a constante WATSON_URL, representa o servidor da IBM onde esta hospedado o
seu assistente.
:return: Novo cliente do assistente
'''
cliente = AssistantV2(
version=WATSON_VERSION,
authenticator=authenticator()
)
cliente.set_service_url(WATSON_URL)
return cliente
def desable_ssl(cliente: AssistantV2):
'''
Função desativa a verificação SSL, não recomendado!
:param cliente: Cliente assistente a qual a verificação SSL vai ser desativado.
'''
cliente.set_disable_ssl_verification(True)
def new_session(cliente: AssistantV2) -> str:
'''
Cria uma nova sessão com o assistente
:param cliente: Cliente já autenticado
:return: Retorna o ID da sessão
'''
response = cliente.create_session(WATSON_Assistant_ID).get_result()
session_id = response['session_id']
return session_id
def delete_session(cliente: AssistantV2, session_id: str):
'''
Função deleta uma sessão com o assistente
:param cliente: Assistente a qual a sessão esta vinculada
:param session_id: ID da sessão
'''
cliente.delete_session(WATSON_Assistant_ID, session_id)
def send_menssage(cliente: AssistantV2, session_id: str, msg: MessageInput) -> DetailedResponse:
'''
Envia a mensagem para o assistente watson
:param cliente: Objeto do tipo AssistantV2
:param session_id: ID da sessão a ser enviada a mensagem
:param msg: Objeto do tipo MessageInput
:return: Retorna um objeto contendo as informações da resposta
'''
my_detailed_response = cliente.message(WATSON_Assistant_ID, session_id, input=msg)
return my_detailed_response
def tratar_resposta(resposta: DetailedResponse) -> dict:
'''
Função trata o retorno, coletando apenas a resposta e as intenções
:param resposta:
:return:
'''
# parte responsável por pegar a resposta
valores = resposta.get_result()
valores = valores['output']
valores = valores['generic']
retorno = ''
i = 0
for val in valores:
if i > 0:
retorno = retorno + '\n'
retorno = retorno + val['text']
i = i + 1
# parte responsável por pegar as intenções da mensagem de envio
valores = resposta.get_result()
valores = valores['output']
valores = valores['intents']
intencoes = []
for val in valores:
ri = RuntimeIntent(val['intent'], val['confidence'])
intencoes.append(ri)
return {'resposta': retorno, 'intencoes': intencoes}
def criar_mensagem(text: str, intencoes: List[RuntimeIntent]) -> MessageInput:
'''
Cria o objeto msg do tipo MessageInput
:param text: Texto da mensagem
:param intencoes: Intenções anteriores
:return: MessageInput
'''
if len(intencoes) > 0:
msg = MessageInput(text=text, intents=intencoes)
else:
msg = MessageInput(text=text)
print('sem intencao')
msg = MessageInput(text=text)
return msg
| true |
acf2c8f6b58da584018924fb9c0fb84e8d446f2c | Python | PhySci/FC-DenseNet-Tiramisu | /gen.py | UTF-8 | 2,751 | 2.90625 | 3 | [] | no_license | import os
from random import shuffle
import numpy as np
import cv2
class Fib:
"""
Image and mask generator
"""
def __init__(self, img_pth=None, mask_pth=None, batch_size=1, shape=None, padding=None, flip=False):
"""
Constructor
:param img_pth:
:param mask_pth:
:param max:
"""
self.img_pth = img_pth
self.mask_pth = mask_pth
self.batch_size = batch_size
self.i =0
self.shape = shape
self.padding = padding
self.flip = flip
file_list = os.listdir(img_pth)
shuffle(file_list)
self.train_files = file_list
self.max = np.ceil(len(self.train_files)/self.batch_size)
def __iter__(self):
"""
:return:
"""
return self
def __next__(self):
"""
:return:
"""
start = self.i*self.batch_size
end = (self.i+1)*self.batch_size
file_list = self.train_files[start:end]
images = np.zeros([self.batch_size, self.shape[0], self.shape[1], 3])
mask = np.zeros([self.batch_size, self.shape[0], self.shape[1]])
for i, file in enumerate(file_list):
img = cv2.imread(os.path.join(self.img_pth, file), -1)
if self.flip:
k = np.random.choice([0, 1, 2, 3], p=[0.55, 0.15, 0.15, 0.15])
else:
k = 0
if self.padding is not None:
img = cv2.copyMakeBorder(img, self.padding[0], self.padding[1], self.padding[2], self.padding[3],
cv2.BORDER_REFLECT_101)
img = np.float32(img)/255.0
images[i, :, :, :] = self.augment_img(img, k)
if self.mask_pth is not None:
# Это может быть выделено в отдельную функцию.
img = cv2.imread(os.path.join(self.mask_pth, file), -1)
if self.padding is not None:
img = cv2.copyMakeBorder(img, self.padding[0], self.padding[1], self.padding[2], self.padding[3],
cv2.BORDER_REFLECT_101)
img = np.int8(img/65535)
mask[i, :, :] = self.augment_img(img, k)
if self.i >= self.max:
raise StopIteration
self.i +=1
return images, mask, file_list
@staticmethod
def augment_img(img, k):
if k == 0:
return img
elif k == 1:
return cv2.flip(img, 0)
elif k == 2:
return cv2.flip(img, -1)
elif k == 3:
return cv2.flip(img, 1)
else:
raise ValueError('Unknown augmentation code')
return img
| true |
23947b8498739c2236f74111d99b266296bd500a | Python | Jachtabahn/knapsack | /intervals.py | UTF-8 | 907 | 2.53125 | 3 | [] | no_license | from bokeh.core.properties import value
from bokeh.io import show, output_file
from bokeh.plotting import figure
item_ids = [12, 2]
all_intervals = [
[4, 6, 10, 14, 18, 20, 22, 24, 25, 28, 29, 31, 32, 35],
[9, 19, 29, 30, 31, 45, 67, 111, 145, 188, 999, 1111, 2222, 9999]
]
num_items = len(all_intervals)
plot = figure(
title='Weight intervals',
tools='pan,reset,wheel_zoom',
active_scroll='wheel_zoom')
red = '#e84d60'
intervals = [4, 6, 10, 15]
widths = [b - a for a, b in zip(intervals[1:], intervals[:-1])]
xs = intervals[:-1]
xs = [x + width/2 for x, width in zip(xs, widths)]
num_intervals = len(xs)
print(xs)
print(widths)
plot.rect(xs, 1, width=widths, height=1, color=red, alpha=0.5)
plot.y_range.start = 0
plot.x_range.range_padding = 0.1
plot.xgrid.grid_line_color = None
plot.axis.minor_tick_line_color = None
plot.outline_line_color = None
output_file('bar_stacked.html')
show(plot)
| true |
9b188150359046b4c9b7311ef2b191d182703b2f | Python | MArpogaus/stplfcnn | /src/stplfcnn/sklearn_quantile.py | UTF-8 | 12,340 | 2.90625 | 3 | [
"ISC"
] | permissive | # Authors: David Dale dale.david@mail.ru
# License: BSD 3 clause
# patch from https://github.com/scikit-learn/scikit-learn/pull/9978
import numpy as np
import warnings
from scipy import optimize
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.linear_model.base import LinearModel
from sklearn.utils import check_X_y
from sklearn.utils import check_consistent_length
from sklearn.utils.extmath import safe_sparse_dot
def _smooth_quantile_loss_and_gradient(
w, X, y, quantile, alpha, l1_ratio, sample_weight, gamma=0):
""" Smooth approximation to quantile regression loss, gradient and hessian.
Main loss and l1 penalty are both approximated by the same trick
from Chen & Wei, 2005
"""
_, n_features = X.shape
fit_intercept = (n_features + 1 == w.shape[0])
if fit_intercept:
intercept = w[-1]
else:
intercept = 0 # regardless of len(w)
w = w[:n_features]
# Discriminate positive, negative and small residuals
linear_loss = y - safe_sparse_dot(X, w)
if fit_intercept:
linear_loss -= intercept
positive_error = linear_loss > quantile * gamma
negative_error = linear_loss < (quantile - 1) * gamma
small_error = ~ (positive_error | negative_error)
# Calculate loss due to regression error
regression_loss = (
positive_error * (linear_loss*quantile - 0.5*gamma*quantile**2) +
small_error * 0.5*linear_loss**2 / (gamma if gamma != 0 else 1) +
negative_error * (linear_loss*(quantile-1) - 0.5*gamma*(quantile-1)**2)
) * sample_weight
loss = np.sum(regression_loss)
if fit_intercept:
grad = np.zeros(n_features + 1)
else:
grad = np.zeros(n_features + 0)
# Gradient due to the regression error
weighted_grad = (positive_error * quantile +
small_error * linear_loss / (gamma if gamma != 0 else 1) +
negative_error * (quantile-1)) * sample_weight
grad[:n_features] -= safe_sparse_dot(weighted_grad, X)
if fit_intercept:
grad[-1] -= np.sum(weighted_grad)
# Gradient and loss due to the ridge penalty
grad[:n_features] += alpha * (1 - l1_ratio) * 2. * w
loss += alpha * (1 - l1_ratio) * np.dot(w, w)
# Gradient and loss due to the lasso penalty
# for smoothness replace abs(w) with w^2/(2*gamma)+gamma/2 for abs(w)<gamma
if gamma > 0:
large_coef = np.abs(w) > gamma
small_coef = ~large_coef
loss += alpha*l1_ratio*np.sum(large_coef*np.abs(w) +
small_coef*(w**2/(2*gamma) + gamma/2))
grad[:n_features] += alpha*l1_ratio*(large_coef*np.sign(w) +
small_coef*w/gamma)
else:
loss += alpha * l1_ratio * np.sum(np.abs(w))
grad[:n_features] += alpha * l1_ratio * np.sign(w)
return loss, grad
class QuantileRegressor(LinearModel, RegressorMixin, BaseEstimator):
"""Linear regression model that is robust to outliers.
The Quantile Regressor optimizes the skewed absolute loss
``(y - X'w) (q - [y - X'w < 0])``, where q is the desired quantile.
Optimization is performed as a sequence of smooth optimization problems.
Read more in the :ref:`User Guide <quantile_regression>`
.. versionadded:: 0.20
Parameters
----------
quantile : float, strictly between 0.0 and 1.0, default 0.5
The quantile that the model predicts.
max_iter : int, default 100
Maximum number of iterations that scipy.optimize.minimize
should run for.
alpha : float, default 0.0001
Constant that multiplies ElasticNet penalty term.
l1_ratio : float, default 0.0
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
warm_start : bool, default False
This is useful if the stored attributes of a previously used model
has to be reused. If set to False, then the coefficients will
be rewritten for every call to fit.
``warm_start`` does not significantly speed up the convergence
if the model optimizes different cost functions, as ``gamma`` converges
to 0. It is therefore recommended to set small ``gamma``
if ``warm_start`` is set to True.
fit_intercept : bool, default True
Whether or not to fit the intercept. This can be set to False
if the data is already centered around the origin.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
gamma : float, default 1e-2
Starting value for smooth approximation.
Absolute loss is replaced with quadratic for ``|error| < gamma``.
Lasso penalty is replaced with quadratic for ``|w| < gamma``.
``gamma = 0`` gives exact non-smooth loss function.
The algorithm performs consecutive optimizations with gamma
decreasing by factor of ``gamma_decrease``,
until ``xtol`` criterion is met,
or until ``max_iter`` is exceeded.
gamma_decrease: float, default 0.1
The factor by which ``gamma`` is multiplied at each iteration.
n_gamma_decreases: int, default 10
Maximal number of iterations of approximation of the cost function.
At each iteration, ``gamma`` is multiplied by a factor
of ``gamma_decrease``
gtol : float, default 1e-4
The smooth optimizing iteration will stop when
``max{|proj g_i | i = 1, ..., n}`` <= ``gtol``
where pg_i is the i-th component of the projected gradient.
xtol : float, default 1e-6
Global optimization will stop when ``|w_{t-1} - w_t|`` < ``xtol``
where w_t is result of t'th approximated optimization.
Attributes
----------
coef_ : array, shape (n_features,)
Features got by optimizing the Huber loss.
intercept_ : float
Bias.
n_iter_ : int
Number of iterations that scipy.optimize.mimimize has run for.
References
----------
.. [1] Koenker, R., & Bassett Jr, G. (1978). Regression quantiles.
Econometrica: journal of the Econometric Society, 33-50.
.. [2] Chen, C., & Wei, Y. (2005).
Computational issues for quantile regression.
Sankhya: The Indian Journal of Statistics, 399-417.
"""
def __init__(self, quantile=0.5,
max_iter=10000, alpha=0.0001, l1_ratio=0.0,
warm_start=False, fit_intercept=True,
normalize=False, copy_X=True,
gamma=1e-2, gtol=1e-4, xtol=1e-6,
gamma_decrease=0.1, n_gamma_decreases=100):
self.quantile = quantile
self.max_iter = max_iter
self.alpha = alpha
self.l1_ratio = l1_ratio
self.warm_start = warm_start
self.fit_intercept = fit_intercept
self.copy_X = copy_X
self.normalize = normalize
self.gtol = gtol
self.xtol = xtol
self.gamma = gamma
self.gamma_decrease = gamma_decrease
self.n_gamma_decreases = n_gamma_decreases
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,)
Weight given to each sample.
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(
X, y, copy=False, accept_sparse=['csr'], y_numeric=True)
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
if sample_weight is not None:
sample_weight = np.array(sample_weight)
check_consistent_length(y, sample_weight)
else:
sample_weight = np.ones_like(y)
if self.quantile >= 1.0 or self.quantile <= 0.0:
raise ValueError(
"Quantile should be strictly between 0.0 and 1.0, got %f"
% self.quantile)
if self.warm_start and hasattr(self, 'coef_'):
parameters = np.concatenate(
(self.coef_, [self.intercept_]))
else:
if self.fit_intercept:
parameters = np.zeros(X.shape[1] + 1)
else:
parameters = np.zeros(X.shape[1] + 0)
# solve sequence of optimization problems
# with different smoothing parameter
total_iter = []
loss_args = (X, y, self.quantile, self.alpha, self.l1_ratio,
sample_weight)
for i in range(self.n_gamma_decreases):
gamma = self.gamma * self.gamma_decrease ** i
result = optimize.minimize(
_smooth_quantile_loss_and_gradient,
parameters,
args=loss_args + (gamma, ),
method='L-BFGS-B',
jac=True,
options={
'gtol': self.gtol,
'maxiter': self.max_iter - sum(total_iter),
}
)
total_iter.append(result['nit'])
prev_parameters = parameters
parameters = result['x']
# for lasso, replace parameters with exact zero,
# if this decreases the cost function
if self.alpha * self.l1_ratio > 0:
value, _ = _smooth_quantile_loss_and_gradient(parameters,
*loss_args,
gamma=0)
for j in range(len(parameters)):
new_parameters = parameters.copy()
old_param = new_parameters[j]
new_parameters[j] = 0
new_value, _ = _smooth_quantile_loss_and_gradient(
new_parameters, *loss_args, gamma=0)
# check if the cost function decreases,
# or increases, but by little, and param is small anyway
if new_value <= value \
or np.abs(old_param) < self.xtol \
and new_value < value + self.gtol:
value = new_value
parameters = new_parameters
# stop if solution does not change between subproblems
if np.linalg.norm(prev_parameters-parameters) < self.xtol:
break
# stop if maximum number of iterations is exceeded
if sum(total_iter) >= self.max_iter:
break
# stop if gamma is already zero
if gamma == 0:
break
# do I really need to issue this warning?
# Its reason is lineSearchError, which cannot be easily fixed
if not result['success']:
warnings.warn("QuantileRegressor did not converge:" +
" Scipy solver terminated with '%s'."
% str(result['message'])
)
self.n_iter_ = sum(total_iter)
self.gamma_ = gamma
self.total_iter_ = total_iter
self.coef_ = parameters[:X.shape[1]]
# do not use self.set_intercept_, because it assumes intercept is zero
# if the data is normalized, which is false in this case
if self.fit_intercept:
self.coef_ = self.coef_ / X_scale
self.intercept_ = parameters[-1] + y_offset \
- np.dot(X_offset, self.coef_.T)
else:
self.intercept_ = 0.0
return self
| true |
3511139009995b3f4a4ff3c418ef54ab5a62c43e | Python | rangapv/pyapps | /pydb/pysqlIns.py | UTF-8 | 354 | 2.984375 | 3 | [] | no_license | import sqlite3
comm = sqlite3.connect ('new.db')
print ("hello")
print ("success in Db")
#comm.execute('''CREATE TABLE T1
# (ID INT PRIMARY KEY NOT NULL,
# NAME TEXT NOT NULL);''')
print ("Table created success")
comm.execute("INSERT INTO T1 (ID,NAME) \
VALUES (01, 'Ranga')");
print ("Insert in Tabel success")
comm.close()
| true |
1b43e0d41f787000e9b9dbf19c80f07c3c867b10 | Python | RexAevum/Algorithms | /AlgoExp/Medium/BST_Validation.py | UTF-8 | 646 | 3.484375 | 3 | [] | no_license | # This is an input class. Do not edit.
class BST:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
def validateBst(tree):
# Write your code here.
return helpBST(tree, float("-inf"), float("+inf"))
# Time - O(n)
# Space - O(d) where d is the depth of the tree due to recursion
def helpBST(node, min, max):
if node is None:
return True
elif node.value < min or node.value >= max:
return False
# check left and right sub-trees
leftSubValid = helpBST(node.left, min, node.value)
rightSubValid = helpBST(node.right, node.value, max)
return leftSubValid and rightSubValid | true |
2d61cd47adc833b50d1d8a6b7214efd92004320a | Python | mabrao/ledProject | /projeto.py | UTF-8 | 8,862 | 3.296875 | 3 | [] | no_license | #!/usr/bin/env python
import opc #this is for connecting to simulator and sending pixels
import time #this is for delay functions
led_colour=[(0,0,0)]*360 #main list with 360 tuples
#declaring positions for letters to spell out brazil
B = [63,64,65,66,123,126,183,184,185,186,243,246,303,304,305,306] #values for letter B
R = [130,132,133,190,191,250,310] #values for letter r
A = [138,197,199,256,257,258,259,260,315,321] #values for letter a
S = [156,157,158,159,216,276,277,278,279,336,337,338,339]
I = [163,164,165,224,284,343,344,345]
L = [169,229,289,349,350,351]
#values for colours I will be using
black = (0,0,0)
white = (255,255,255)
purple = (175,0,164)
yellow = (255,255,0)
blue = (59,0,255)
baby_blue = (66,245,239)
red = (233,0,0)
green = (81,186,52)
#this is for a user input check:
acceptable_colours = ['r', 'g', 'b', 'R', 'G', 'B']
client = opc.Client('localhost:7890') #connecting to simulator/lights
#declaring functions that will create the flags:
def brazil():
for count, item in enumerate(led_colour):#iterate through 360 LEDs
#if count inside position of letters, pixel will be green:
if count in B:
led_colour[count] = green
if count in R:
led_colour[count] = green
if count in A:
led_colour[count] = green
if count in S:
led_colour[count] = green
if count in I:
led_colour[count] = green
if count in L:
led_colour[count] = green
#changed tuple for half of brazil flag: (I am doing half because the full flag would not work)
if (count==29) or (count>87 and count<91) or (count>146 and count<152) or (count>205 and count<213) or (count>264 and count<274) or (count>323 and count<335):
led_colour[count]= yellow
if (count > 147 and count < 151) or (count > 206 and count < 212) or (count > 265 and count < 273) or (count > 324 and count < 334):
led_colour[count] = blue
if(count == 149 or count == 209 or count == 269 or count == 329):
led_colour[count] = white
def italy():
for count, item in enumerate(led_colour):#iterate through 360 LEDs (with an index (count))
#green part of flag:
if count % 60 <= 20:
led_colour[count] = green
#white part of flag:
if (count % 60 <= 40) and (count % 60 >= 20):
led_colour[count] = white
#red part of flag:
if (count % 60 >= 40) and (count % 60 <= 80):
led_colour[count] = red
def france():
for count, item in enumerate(led_colour):#iterate through 360 lEDs (with an index (count))
#blue part of flag:
if count % 60 <= 20:
led_colour[count] = blue
#white part of flag:
if (count % 60 <= 40) and (count % 60 >= 20):
led_colour[count] = white
#red part of flag:
if (count % 60 >= 40) and (count % 60 <= 80):
led_colour[count] = red
def netherlands():
for count, item in enumerate(led_colour):#iterate through 360 LEDs (with an index (count))
#black part of flag:
if count % 360 <= 120:
led_colour[count] = red
#red part of flag:
if (count % 360 >= 120) and (count % 360 < 240):
led_colour[count] = white
#yellow part of flag:
if (count % 360 >= 240) and (count % 360 < 360):
led_colour[count] = blue
x = 0
def menu(): #this is the main menu for selecting the flags
#this will be displayed on the screen
print('\nPlease choose a flag to be displayed:')
print('\n1. Brazil\n2. Italy\n3. France\n4. Netherlands\n')
x = int(input()) #user input is assigned to variable x
#check if user input is within the acceptable values:
while(x not in (1,2,3,4)):
print('\nPlease enter a valid input!\n')
x = int(input()) #user input is assigned to variable x
#depending on the user input return a different flag:
if x == 1:
return brazil()
elif x == 2:
return italy()
elif x == 3:
return france()
elif x == 4:
return netherlands()
def menu_flashing(): #this menu asks for user input in order to flash the flag
print("\nHow many times do you want to flash the selected flag? (value must be between 1 and 10)")
y = int(input())
while (y < 1) or (y>10): #this checks for user input between 1 and 10
print("\nPlease enter a valid input:\t")
y = int(input())
flash(y) #calls function that flashes the flag
def menu_weather(): #this will print 1 message for the rain function and one message for the sun and rain function
print("\nWe are in london, so we might as well make it rain!\n")
rain() #calls rain function
print("\nOk, now let's have a some sun in between the rain\n")
sun_and_rain() #calls sun and rain function
print("\n\n")
#fade function:
def fade(colour): #this function requires an input from the user to be executed
fade_amount = 1
r,g,b = (0,0,0)
if colour == 'r' or colour == 'R': #if input is r
while (r < 255): #fade the red pixel from 0 to 255
r += fade_amount
fade_colour = [(r,g,b)]*360
time.sleep(0.01)
client.put_pixels(fade_colour)
while (r > 0): #fade the red pixel from 255 to 0
r -= fade_amount
fade_colour = [(r,g,b)]*360
time.sleep(0.01)
client.put_pixels(fade_colour)
elif colour == 'g' or colour == 'G': #if input is g
while (g < 255): #fade the green pixel from 0 to 255
g += fade_amount
fade_colour = [(r,g,b)]*360
time.sleep(0.01)
client.put_pixels(fade_colour)
while (g > 0): #fade the green pixel from 255 to 0
g -= fade_amount
fade_colour = [(r,g,b)]*360
time.sleep(0.01)
client.put_pixels(fade_colour)
elif colour == 'b' or colour == 'B': #if input is b
while (b < 255): #fade the blue pixel from 0 to 255
b += fade_amount
fade_colour = [(r,g,b)]*360
time.sleep(0.01)
client.put_pixels(fade_colour)
while (b > 0): #fade the green pixel from 255 to 0
b -= fade_amount
fade_colour = [(r,g,b)]*360
time.sleep(0.01)
client.put_pixels(fade_colour)
def flash(num_times): #this will flash the flag with the input from the user
i = 0
while (i < num_times): #iterate through the user input
#make screen all black:
blackout = [black]*360
client.put_pixels(blackout)
time.sleep(0.5) #delay
#display selected flag on screen (stored on list led_colour)
client.put_pixels(led_colour)
time.sleep(0.5)#delay
i += 1 #update iteration variable
def sun_and_rain():
i = 0 #iteration variable
rained = [black]*360
client.put_pixels(rained) #make screen all black
for i in range(0,360): #iterate through all LEDs
if i%2 == 0: #even numbers will be baby blue
rained[i] = baby_blue
time.sleep(0.03)
client.put_pixels(rained)
if i%2 != 0: #odd numbers will be yellow
rained[i] = yellow
time.sleep(0.03)
client.put_pixels(rained)
def rain(): #this will fill every other pixel with the baby blue colour
blackout = [black]*360
client.put_pixels(blackout) #this makes the screen all black
for i in range(0,360): #iterate inside all pixels
if i%2 ==0:
blackout[i] = baby_blue
time.sleep(0.03)
client.put_pixels(blackout)
while 1:
menu()
client.put_pixels(led_colour) #sends values to led simulator
#need to send it twice if not constantly sending values
#due to interpolation setting on fadecandy
client.put_pixels(led_colour)
time.sleep(3) #adds a delay
menu_flashing() #asks for user input and then flashes selected flag
#this is the menu for fading one of three colors:
print("\nchoose a colour to fade: enter r for red, g for green or b for blue:\t")
z = input() #asks for user input and assign to a var z
while z not in acceptable_colours: #user gets stuck in this loop if he does not enter an acceptable input
print("\nPlease enter a valid input!")
print("\nchoose a colour to fade: enter r for red, g for green or b for blue:\t")
z = input()
fade(z) #call the fade function
menu_weather() #make it rain and make it sunny
| true |
942a607154077d7064b28682fdfde73cf29094d1 | Python | gopaluniphani/remote-lab-assistance | /rla/labs/views.py | UTF-8 | 2,098 | 2.546875 | 3 | [] | no_license | from django.shortcuts import render
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from .models import Lab, ActiveLab, LabCycle, ActiveLabCycle
from instructors.models import Instructor
import json
def index(request):
return render(request, 'labs/index.html')
@csrf_exempt
def addlab(request):
if request.method == 'POST':
try:
data = json.loads(request.body.decode('utf-8'))
instructor_id = int(data['instructor_id'])
lab_id = data['lab_id']
l = Lab.objects.get(id=lab_id)
i = Instructor.objects.get(id=instructor_id)
a = ActiveLab.objects.create(
code=l.code, name=l.name, instructor=i)
for labcycle in l.labcycles.all():
ActiveLabCycle.objects.create(
name=labcycle.name, description=labcycle.description, lab=a)
return JsonResponse(data={"id": a.id, "message": "Lab Added Successfully"})
except Exception as e:
print(e)
return JsonResponse(status=500, data={"message": e})
else:
return JsonResponse(status=403, data={"message": "Action not allowed"})
@csrf_exempt
def addstudents(request):
if request.method == 'POST':
try:
data = json.loads(request.body.decode('utf-8'))
students = list(map(int, data['students']))
lab_id = int(data['lab_id'])
print(students, lab_id)
l = ActiveLab.objects.get(id=lab_id)
lcs = l.labcycles.all()
for student in students:
l.students.add(student)
for lc in lcs:
lc.students.add(student)
l.save()
for lc in lcs:
lc.save()
return JsonResponse(data={"message": "Students added to lab succesfully"})
except Exception as e:
print(e)
return JsonResponse(status=500, data={"message": e})
else:
return JsonResponse(status=403, data={"message": "Action not allowed"})
| true |
86e7f9fe21ea625417a0da0d1fa9b2b8c158daf6 | Python | lsb530/Algorithm-Python | /파이썬챌린지/1.기초/8.계산가격나누기.py | UTF-8 | 164 | 3.6875 | 4 | [] | no_license | totalPrice = int(input("Please enter total Price: "))
people = int(input("Please enter how many your are: "))
print(f"You guys have to pay {totalPrice / people}.")
| true |
c0827dd3822113ad24ac9fb50f9ef0b56ff3353e | Python | cybelewang/leetcode-python | /code1017ConvertToBase-2.py | UTF-8 | 1,079 | 4.125 | 4 | [] | no_license | """
1017 Convert to Base -2
Given a number N, return a string consisting of "0"s and "1"s that represents its value in base -2 (negative two).
The returned string must have no leading zeroes, unless the string is "0".
Example 1:
Input: 2
Output: "110"
Explantion: (-2) ^ 2 + (-2) ^ 1 = 2
Example 2:
Input: 3
Output: "111"
Explantion: (-2) ^ 2 + (-2) ^ 1 + (-2) ^ 0 = 3
Example 3:
Input: 4
Output: "100"
Explantion: (-2) ^ 2 = 4
Note:
0 <= N <= 10^9
"""
class Solution:
# my own solution, similar to base 2
def baseNeg2(self, N):
"""
:type N: int
:rtype: str
"""
digits = []
while N:
if N%(-2) != 0:
digits.append(1)
N -= 1
else:
digits.append(0)
N //= -2
while digits and digits[-1] == 0:
digits.pop()
return ''.join(map(str, digits[::-1])) or '0'
N = 0 # expect '0'
#N = 15 # expect '10011'
print(Solution().baseNeg2(N)) | true |
4723dcded94cdbe611286de03a1ec19f66bbf296 | Python | RoyceMou/PoCC | /Assignment 2/client.py | UTF-8 | 2,054 | 2.96875 | 3 | [] | no_license | #!/bin/python
import sys
import httplib
import time
PORT = '8080'
def request(connection, path, display_response=False, display_time=False):
start_time = time.time()
connection.request('GET', path)
response = connection.getresponse().read()
end_time = time.time()
if display_response:
print 'Response from server:', response
time_elapsed = end_time - start_time
if display_time:
print 'Connection request took {0} seconds'.format(time_elapsed)
return time_elapsed
def main(ip):
# @@@ NOTE @@@
# In your code, you should first start the main client-facing server on the
# horizon cloud. See my sample code nova_server_create.py on how to do
# this. You will need to do a bit more to that file so that it can
# be imported here and you can use the functions.
#
# Once the main server is active, also proceed to start the first VM on the
# 3rd tier. Inform the client-facing server the IP address of the
# 3rd tier VM so that the client-facing server can relay your requests
# to that VM thereafter.
print 'Connecting to server: {0}'.format(ip)
conn = httplib.HTTPConnection(ip, PORT)
num_times = 10
print 'Sending request for the dummy op {0} times'.format(num_times)
for i in range(1, num_times):
time_elapsed = request(conn, '/dummy_op', display_response=True, display_time=True)
print 'Time elapsed:', time_elapsed
# # sending a different kind of request. Here we send the autoscale
# # request.
# # @@@ Note @@@
# # I have not shown any code to start the second VM on the 3rd tier.
# # You should be including the IP addr of the 2nd VM on the 3rd tier
# # in this autoscale request so the client-facing server now has the
# # knowledge of the 2nd VM in the 3rd tier.
# invoke main
if __name__ == '__main__':
try:
ip = sys.argv[1]
sys.exit(main(ip))
except IndexError:
print 'usage: python client.py <ip_addr>'
# e.g.: python client.py 129.59.107.80 | true |
2158c5c4a7dca02b2c9e0b1f3406eada49e6dbe6 | Python | mariobru/book-recommender | /src/githubRequestAuthorized.py | UTF-8 | 428 | 2.65625 | 3 | [] | no_license | # Importing Libraries
import os
from dotenv import load_dotenv
import requests
load_dotenv()
def githubRequestAuthorized(isbn):
# Function
authToken = os.getenv("GOOGLE_BOOKS_API_TOKEN")
if not authToken:
raise ValueError("NECESITAS UN TOKEN")
url = "https://www.googleapis.com/books/v1/volumes?q=isbn:{}&key={}".format(isbn,authToken)
res = requests.get(url)
data = res.json()
return data | true |
af159b7b4d51fd98c4855e5465fdea8cf2364295 | Python | tkkuehn/aoc2018 | /day13/day13_1.py | UTF-8 | 5,986 | 2.90625 | 3 | [] | no_license | #!/usr/bin/python3
contents = []
with open('./resources/input.txt', 'r') as f:
contents = f.read().splitlines()
track = {}
carts = []
y_len = len(contents)
x_len = max([len(x) for x in contents])
for i in range(len(contents)):
line = contents[i]
for j in range(len(line)):
section = line[j]
if section == '|':
track[(j, i)] = 'vertical'
elif section == '-':
track[(j, i)] = 'horizontal'
elif section == '^':
track[(j, i)] = 'vertical'
carts.append({'pos': (j, i), 'facing': 'up'})
elif section == 'v':
track[(j, i)] = 'vertical'
carts.append({'pos': (j, i), 'facing': 'down'})
elif section == '>':
track[(j, i)] = 'horizontal'
carts.append({'pos': (j, i), 'facing': 'right'})
elif section == '<':
track[(j, i)] = 'horizontal'
carts.append({'pos': (j, i), 'facing': 'left'})
elif section == '\\':
if (j + 1 < len(line)) and line[j + 1] in ('-', '<', '>', '+'):
track[(j, i)] = 'up/right'
else:
track[(j, i)] = 'down/left'
elif section == '/':
if (j + 1 < len(line)) and line[j + 1] in ('-', '<', '>', '+'):
track[(j, i)] = 'down/right'
else:
track[(j, i)] = 'up/left'
elif section == '+':
track[(j, i)] = 'junction'
elif section == ' ':
pass
else:
print(f'Unknown character: {section}')
for j in range(y_len):
line = []
for i in range(x_len):
if (i, j) in track:
if track[(i, j)] == 'vertical':
line.append('|')
elif track[(i, j)] == 'horizontal':
line.append('-')
elif track[(i, j)] == 'junction':
line.append('+')
elif track[(i, j)] in ['up/right', 'down/left']:
line.append('\\')
elif track[(i, j)] in ['down/right', 'up/left']:
line.append('/')
else:
line.append(' ')
# print(''.join(line))
for cart in carts:
cart['next_turn'] = 'left'
collision = False
collision_site = (0, 0)
t = 0
while not collision:
carts.sort(key=lambda cart: cart['pos'][1])
for cart in carts:
pos = cart['pos']
facing = cart['facing']
next_pos = pos
track_type = track[pos]
if track_type == 'vertical':
if facing == 'up':
next_pos = (pos[0], pos[1] - 1)
else:
next_pos = (pos[0], pos[1] + 1)
elif track_type == 'horizontal':
if facing == 'right':
next_pos = (pos[0] + 1, pos[1])
else:
next_pos = (pos[0] - 1, pos[1])
elif track_type == 'up/right':
if facing == 'down':
next_pos = (pos[0] + 1, pos[1])
facing = 'right'
else:
next_pos = (pos[0], pos[1] - 1)
facing = 'up'
elif track_type == 'up/left':
if facing == 'down':
next_pos = (pos[0] - 1, pos[1])
facing = 'left'
else:
next_pos = (pos[0], pos[1] - 1)
facing = 'up'
elif track_type == 'down/right':
if facing == 'up':
next_pos = (pos[0] + 1, pos[1])
facing = 'right'
else:
next_pos = (pos[0], pos[1] + 1)
facing = 'down'
elif track_type == 'down/left':
if facing == 'up':
next_pos = (pos[0] - 1, pos[1])
facing = 'left'
else:
next_pos = (pos[0], pos[1] + 1)
facing = 'down'
elif track_type == 'junction':
if cart['next_turn'] == 'left':
if facing == 'up':
next_pos = (pos[0] - 1, pos[1])
facing = 'left'
elif facing == 'right':
next_pos = (pos[0], pos[1] - 1)
facing = 'up'
elif facing == 'down':
next_pos = (pos[0] + 1, pos[1])
facing = 'right'
elif facing == 'left':
next_pos = (pos[0], pos[1] + 1)
facing = 'down'
cart['next_turn'] = 'straight'
elif cart['next_turn'] == 'straight':
if facing == 'up':
next_pos = (pos[0], pos[1] - 1)
elif facing == 'right':
next_pos = (pos[0] + 1, pos[1])
elif facing == 'down':
next_pos = (pos[0], pos[1] + 1)
elif facing == 'left':
next_pos = (pos[0] - 1, pos[1])
cart['next_turn'] = 'right'
elif cart['next_turn'] == 'right':
if facing == 'up':
next_pos = (pos[0] + 1, pos[1])
facing = 'right'
elif facing == 'right':
next_pos = (pos[0], pos[1] + 1)
facing = 'down'
elif facing == 'down':
next_pos = (pos[0] - 1, pos[1])
facing = 'left'
elif facing == 'left':
next_pos = (pos[0], pos[1] - 1)
facing = 'up'
cart['next_turn'] = 'left'
else:
print(f'Unknown track type: {track_type}')
cart['pos'] = next_pos
cart['facing'] = facing
cart_positions = set()
for new_cart in carts:
cart_pos = new_cart['pos']
if cart_pos in cart_positions:
collision = True
collision_site = cart_pos
break
else:
cart_positions.add(cart_pos)
t += 1
print(collision_site)
| true |
6491ba8b67ec11d2f7c77b1ed7ed8d95dee06a0a | Python | zy1417548204/PAPER-In-CODE | /Bi-RCNN-Relation-Classification/data_generator.py | UTF-8 | 6,036 | 2.5625 | 3 | [] | no_license | import numpy as np
import copy
from config_environment import *
from Biutil import *
class DataGenerator(object):
def __init__(self, data_file, inverse_other=True):
data = load_object(data_file)
if not inverse_other:
data = self.re_inverse(data, len(data["sentence_label_train"]), 9) # other类的sdp结果不翻转,恢复原序
self.word_vec_matrix = data["word_vec_matrix"]
self.num_train_data = 7109
self.train_data = {
"sdp_words_index": data["sdp_words_index_train"][0:self.num_train_data],
"sdp_rev_words_index": data["sdp_words_index_rev_train"][0:self.num_train_data],
"sdp_rels_index": data["sdp_rels_index_train"][0:self.num_train_data],
"sdp_rev_rels_index": data["sdp_rels_index_rev_train"][0:self.num_train_data],
"sentence_label": data["sentence_label_train"][0:self.num_train_data],
}
self.valid_data = {
"sdp_words_index": data["sdp_words_index_train"][self.num_train_data:],
"sdp_rev_words_index": data["sdp_words_index_rev_train"][self.num_train_data:],
"sdp_rels_index": data["sdp_rels_index_train"][self.num_train_data:],
"sdp_rev_rels_index": data["sdp_rels_index_rev_train"][self.num_train_data:],
"sentence_label": data["sentence_label_train"][self.num_train_data:],
}
self.test_data = {
"sdp_words_index": data["sdp_words_index_test"],
"sdp_rev_words_index": data["sdp_words_index_rev_test"],
"sdp_rels_index": data["sdp_rels_index_test"],
"sdp_rev_rels_index": data["sdp_rels_index_rev_test"],
"sentence_label": data["sentence_label_test"],
}
self._index_in_epoch = 0
self._epochs_completed = 0
self.shuffled_indices = np.random.permutation(np.arange(self.num_train_data))
def re_inverse(self, data, length, id):
for i in len(0, length):
if data["sentence_label_train"][i] == id:
data["sdp_words_rev_index_train"][i] = data["sdp_words_index_train"][i]
data["sdp_rels_rev_index_train"][i] = data["sdp_rels_index_train"][i]
return data
def get_is_completed(self):
if self._epochs_completed == 0:
return False
else:
return True
def reset_is_completed(self):
self._epochs_completed = 0
def get_batch_length(self, sdp_batch):
length_batch = []
batch_size = len(sdp_batch)
for i in range(0, batch_size):
length_batch.append(len(sdp_batch[i]))
return np.array(length_batch, dtype=int)
def pad_to_matrix(self, sdp_batch, dtype):
length_batch = self.get_batch_length(sdp_batch)
pad_sdp_batch = copy.deepcopy(sdp_batch)
batch_size = len(length_batch)
max_len = max(length_batch)
for i in range(0, batch_size):
pad_len = int(max_len - length_batch[i])
pad_sdp_batch[i].extend(list(np.zeros(pad_len, dtype=int)))
return np.array(pad_sdp_batch, dtype=dtype)
# label is sparse
# batch is padded to a matrix
def transfer_to_input_format(self, data_batch):
label_fb, label_concat = self.transfer_to_sparse(data_batch["sentence_label"])
data_batch_new = {
"sdp_words_index": self.pad_to_matrix(data_batch["sdp_words_index"], "int"),
"sdp_rev_words_index": self.pad_to_matrix(data_batch["sdp_rev_words_index"], "int"),
"sdp_rels_index": self.pad_to_matrix(data_batch["sdp_rels_index"], "int"),
"sdp_rev_rels_index": self.pad_to_matrix(data_batch["sdp_rev_rels_index"], "int"),
"label_fb": label_fb,
"label_concat": label_concat,
#"sdp_length": self.get_batch_length(data_batch["sdp_words_index"]),
}
return data_batch_new
def transfer_to_sparse(self, label_batch):
batch_size = len(label_batch)
label_batch_fb = np.zeros((batch_size, 19), dtype=int)
label_batch_concat = np.zeros((batch_size, 10), dtype=int)
for i in range(0, batch_size):
num_fb = int(label_batch[i])
num_concat = int(num_fb % 10)
label_batch_fb[i][num_fb] = 1
label_batch_concat[i][num_concat] = 1
return label_batch_fb, label_batch_concat
def next_batch(self, batch_size):
if batch_size > self.num_train_data:
raise Exception('the batch size is bigger than the train data size')
else:
pass
start = self._index_in_epoch
end = min(start + batch_size, self.num_train_data)
batch_indices = self.shuffled_indices[start:end]
train_data_batch = {
"sdp_words_index": list(np.array(self.train_data["sdp_words_index"])[batch_indices]),
"sdp_rev_words_index": list(np.array(self.train_data["sdp_rev_words_index"])[batch_indices]),
"sdp_rels_index": list(np.array(self.train_data["sdp_rels_index"])[batch_indices]),
"sdp_rev_rels_index": list(np.array(self.train_data["sdp_rev_rels_index"])[batch_indices]),
"sentence_label": list(np.array(self.train_data["sentence_label"])[batch_indices]),
}
train_data_batch = self.transfer_to_input_format(train_data_batch)
self._index_in_epoch += batch_size
if self._index_in_epoch > self.num_train_data:
self._index_in_epoch = 0
self._epochs_completed = 1
self.shuffled_indices = np.random.permutation(np.arange(self.num_train_data))
return train_data_batch
def get_valid_data(self):
return self.transfer_to_input_format(self.valid_data)
def get_test_data(self):
return self.transfer_to_input_format(self.test_data)
"""
# test code
index = 3
file_name = "data/final_data/data_" + word_vec_file_state[index] + ".pkl"
dg = DataGenerator(file_name)
batch = dg.next_batch(100)
batch = dg.next_batch(100)
"""
| true |
619b8e8affe403b067dd241100b7bf324920eb2f | Python | NidhayPancholi/Movie-Recommendation-System | /functions.py | UTF-8 | 1,330 | 3.03125 | 3 | [] | no_license | def create_array(data):
arr=np.zeros(1682)
for x,y in data.iterrows():
arr[y[1]-1]=y[2]
return arr
def cosine_similarity(a,b):
temp=np.dot(a,b)
a=a**2
b=b**2
return temp/(((sum(a))**0.5)*((sum(b)**0.5)))
def recommend_movies(sim,num_movies,user_id):
sim.sort(key= lambda x:x[1])
user1=df[df['user_id']==user_id]
seen_movies=list(user1['item_id'])
enough_movies=False
user_movies=set()
for x in sim[::-1]:
temp=df[(df['user_id']==x[0]) &(df['rating'].isin([5]))]
if len(temp)==0:
continue
else:
for i,j in temp.iterrows():
if j['item_id'] not in seen_movies:
user_movies.add(j['item_id'])
if len(user_movies)==num_movies:
enough_movies=True
break
if enough_movies:
break
while len(user_movies)<num_movies:
user_movies.add(random.randint(1,1682))
user_movies=find_movie_names(user_movies)
seen_movies=find_movie_names(seen_movies)
print("SEEN MOVIES",seen_movies)
return user_movies
def find_movie_names(list_of_movies):
names=[]
for x in list_of_movies:
temp=movies[movies['item_id']==x]
names.append(list(temp['title'])[0])
return names
| true |
d3f86bb7c3cccc58dbb6f7d7af3e9aa393faa478 | Python | esterified/algorithm | /my_deep_learn.py | UTF-8 | 7,056 | 2.859375 | 3 | [] | no_license |
"""
Created on Sat Jun 17 14:07:09 2017
@author: ESTERIFIED
"""
from scipy import optimize,meshgrid
import numpy as np
import matplotlib.cm as com
import matplotlib
matplotlib.use('Qt5Agg')
from matplotlib.pyplot import plot,grid,scatter,xlabel,ylabel,figure,subplot,clabel,contour
from numpy.linalg import norm
from numpy import linspace
from mpl_toolkits.mplot3d import Axes3D
#Regularization Parameter:
# X = (hours sleeping, hours studying), y = Score on test
X = np.array(([5,1], [5,1], [10,2], [6,1.5]), dtype=float)
y = np.array(([75], [92], [93], [70]), dtype=float)
# Normalize
X = X/np.amax(X,axis=0)
y = y/100 #Max test score is 100
class neural(object):
def __init__(self):
self.Lambda = 0.0001
self.l1=2
self.l2=4
self.l3=1
self.w1=np.random.randn(self.l1,self.l2)
self.w2=np.random.randn(self.l2,self.l3)
def sigmoid(self,x):
return 1/(1+np.exp(-x))
def forward(self, x):
self.z2 = np.dot(x, self.w1)
self.a2 = self.sigmoid(self.z2)
self.z3 = np.dot(self.a2, self.w2)
yhat = self.sigmoid(self.z3)
return yhat
def sigmoidprime(self,x):
#Gradient of sigmoid
return np.exp(-x)/((1+np.exp(-x))**2)
#Need to make changes to costFunction and costFunctionPrim:
def costfunction(self, X, y):
#Compute cost for given X,y, use weights already stored in class.
self.yHat = self.forward(X)
J = 0.5*sum((y-self.yHat)**2)/X.shape[0] + (self.Lambda/2)*(np.sum(self.w1**2)+np.sum(self.w2**2))
return J
def costfunctionprime(self, X, y):
#Compute derivative with respect to W and W2 for a given X and y:
self.yhat = self.forward(X)
delta3 = np.multiply(-(y-self.yhat), self.sigmoidprime(self.z3))
#Add gradient of regularization term:
dJdW2 = np.dot(self.a2.T, delta3)/X.shape[0] + self.Lambda*self.w2
delta2 = np.dot(delta3, self.w2.T)*self.sigmoidprime(self.z2)
#Add gradient of regularization term:
dJdW1 = np.dot(X.T, delta2)/X.shape[0] + self.Lambda*self.w1
return dJdW1, dJdW2
# def costfunction(self,x,y):
# self.yhat=self.forward(x)
# return 0.5*sum((y-self.yhat)**2)
# def costfunctionprime(self,x,y):
# self.yhat = self.forward(x)
# delta3 = np.multiply(-(y-self.yhat), self.sigmoidprime(self.z3))
# dJdW2 = np.dot(self.a2.T, delta3)
# delta2 = np.dot(delta3, self.w2.T)*self.sigmoidprime(self.z2)
# dJdW1 = np.dot(x.T, delta2)
# return dJdW1, dJdW2
def getParams(self):
#Get W1 and W2 unrolled into vector:
params = np.concatenate((self.w1.ravel(), self.w2.ravel()))
return params
def setParams(self,params):
#Set W1 and W2 using single paramater vector.
W1_start = 0
W1_end = self.l2 * self.l1
self.w1 = np.reshape(params[W1_start:W1_end], (self.l1 , self.l2))
W2_end = W1_end + self.l2*self.l3
self.w2 = np.reshape(params[W1_end:W2_end], (self.l2, self.l3))
def computegradients(self, X, y):
dJdW1, dJdW2 = self.costfunctionprime(X, y)
return np.concatenate((dJdW1.ravel(), \
dJdW2.ravel()))
def computeNumericalGradient(self, X, y): #if needed
paramsInitial = self.getParams()
numgrad = np.zeros(paramsInitial.shape)
perturb = np.zeros(paramsInitial.shape)
e = 1e-4
for p in range(len(paramsInitial)):
#Set perturbation vector
perturb[p] = e
self.setParams(paramsInitial + perturb)
loss2 = self.costfunction(X, y)
self.setParams(paramsInitial - perturb)
loss1 = self.costfunction(X, y)
#Compute Numerical Gradient
numgrad[p] = (loss2 - loss1) / (2*e)
#Return the value we changed to zero:
perturb[p] = 0
#Return Params to original value:
self.setParams(paramsInitial)
return numgrad
class Train_n(object):
def __init__(self,g):
self.N=g
def callbackF(self, params):
self.N.setParams(params)
#storing costfunction corresponding to iteration just , \
#for the ease of plotting in to the variable j
self.J.append(self.N.costfunction(self.X, self.y))
#self.testJ.append(self.N.costfunction(self.testX, self.testy))
def costFunctionWrapper(self, params, X, y):
self.N.setParams(params)
cost = self.N.costfunction(X, y)
grad = self.N.computegradients(X,y)
return cost,grad
def train(self, X, y):
#Make an internal variable for the callback function:
self.X = X
self.y = y
#Make empty list to store costs:
self.J = []
self.testJ=[]
params0 = self.N.getParams()
options = {'maxiter': 200, 'disp' : True}
_res = optimize.minimize(self.costFunctionWrapper, params0, jac=True, method='BFGS', \
args=(X,y), options=options, callback=self.callbackF)
self.N.setParams(_res.x)
self.optimizationResults = _res
n=neural()
c=n.computegradients(X, y)
d=n.computeNumericalGradient(X, y)
#print a,'\n',b
#
testi=norm(c-d)/norm(c+d)
g=Train_n(n)
g.train(X,y)
#Plot after BFGS iteration completion
fig = figure('cost vs iterations')
plot(g.J)
grid(1)
xlabel('Iterations')
ylabel('Cost')
#Plot projections of our new data:
fig = figure('projections')
subplot(1,2,1)
scatter(X[:,0], y)
grid(1)
xlabel('Hours Sleeping')
ylabel('Test Score')
subplot(1,2,2)
scatter(X[:,1], y)
grid(1)
xlabel('Hours Studying')
ylabel('Test Score')
#Test network for various combinations of sleep/study:
hoursSleep = linspace(0, 10, 10)
hoursStudy = linspace(0, 5, 10)
#Normalize data (same way training data way normalized)
hoursSleepNorm = hoursSleep/10.
hoursStudyNorm = hoursStudy/5.
#Create 2-d versions of input for plotting
a, b = meshgrid(hoursSleepNorm, hoursStudyNorm)
#Join into a single input matrix:
allInputs = np.zeros((a.size, 2))
allInputs[:, 0] = a.ravel()
allInputs[:, 1] = b.ravel()
allOutputs = n.forward(allInputs)
#Contour Plot:
yy = np.dot(hoursStudy.reshape(10,1), np.ones((1,10)))
xx = np.dot(hoursSleep.reshape(10,1), np.ones((1,10))).T
figure('contour')
CS = contour(xx,yy,100*allOutputs.reshape(10, 10))
clabel(CS, inline=1, fontsize=10)
xlabel('Hours Sleep')
ylabel('Hours Study')
fig = figure()
ax=fig.gca(projection='3d')
ax.scatter(10*X[:,0], 5*X[:,1], 100*y, c='k', alpha = 1, s=100)
ax.grid(1)
surf = ax.plot_surface(xx, yy, 100*allOutputs.reshape(10, 10),cmap=com.jet, alpha = 0.7,rstride=1,cstride=1)
ax.set_xlabel('Hours Sleep')
ax.set_ylabel('Hours Study')
ax.set_zlabel('Test Score') | true |
49a6fee1184eb9fcbb3ae6011ba5c42948bf0696 | Python | ramuklihtnes/guviguvi | /positionvalue.py | UTF-8 | 128 | 2.984375 | 3 | [] | no_license | a,b=input().split()
t=[]
for i in a:
t.append(i)
a=len(a)
for i in range(1,a):
if b in t:
print(t.index(b)+1)
break
| true |
ffe1928bd5af40158d9bb3df8cc533de016a0d2c | Python | PSU-OIT-ARC/django-arcutils | /arcutils/drf/renderers.py | UTF-8 | 2,374 | 2.953125 | 3 | [
"MIT"
] | permissive | from collections import Sequence
from rest_framework.renderers import TemplateHTMLRenderer
class TemplateHTMLContextDictRenderer(TemplateHTMLRenderer):
"""Wrap serialized data in a dictionary for use in templates.
Otherwise, the serialized data will get dumped into the template
context without any namespacing.
The serialized data will be wrapped in a context dictionary like
this if the data is a list (or any other sequence type)::
{'object_list': data}
or like this if the data is a dict (or any other non-sequence
type)::
{'object': data}
To use a different wrapper name, set ``context_object_name`` on the
relevant view class::
class PantsView(ListAPIView):
# Access data via ``pants`` instead of ``object_list`` in
# template.
context_object_name = 'pants'
template_name = 'pants/list.html'
For view classes that have some methods that return lists and others
that return objects (e.g., ``ViewSets``), lists can be wrapped using
a different name by setting ``context_object_list_name``::
class HorseViewSet(ViewSet):
context_object_name = 'horse'
context_object_list_name = 'horses'
.. note:: If ``context_object_list_name`` or ``context_object_name``
is set to a name that's also set by a context processor
(like ``request`` or ``user``), the serialized data will
be shadowed and inaccessible in the template.
"""
context_object_name = 'object'
context_object_list_name = 'object_list'
def get_template_context(self, data, renderer_context):
wrapper_name = self.get_wrapper_name(data, renderer_context)
data = {wrapper_name: data}
return super().get_template_context(data, renderer_context)
def get_wrapper_name(self, data, renderer_context):
view = renderer_context['view']
if isinstance(data, Sequence):
name = (
getattr(view, 'context_object_list_name', None) or
getattr(view, 'context_object_name', None) or
self.context_object_list_name
)
else:
name = (
getattr(view, 'context_object_name', None) or
self.context_object_name
)
return name
| true |
bdea406c011ead83de9b88d0caf5c3f7fb50fcbd | Python | AndIag/ai-django | /ai_core/validators/base.py | UTF-8 | 630 | 2.703125 | 3 | [] | no_license | from django.utils.deconstruct import deconstructible
@deconstructible
class DNIValidator(object):
tabla = "TRWAGMYFPDXBNJZSQVHLCKE"
external = "XYZ"
external_map = {'X': '0', 'Y': '1', 'Z': '2'}
numbers = "1234567890"
def __call__(self, value):
dni = value.upper()
if len(dni) == 9:
dig_control = dni[8]
dni = dni[:8]
if dni[0] in self.external:
dni = dni.replace(dni[0], self.external_map[dni[0]])
return len(dni) == len([n for n in dni if n in self.numbers]) and self.tabla[int(dni) % 23] == dig_control
return False
| true |
e043a97c7682b52bc70657214f18a20e4c54d108 | Python | peterashwell/finding-streetsigns | /sift.py | UTF-8 | 6,194 | 2.546875 | 3 | [] | no_license | # We are using python2 because OpenCV compatibility for python3 sucks
# Continue using a python3 linter however, so fix print statements
from __future__ import print_function
import cv2
import numpy as np
import os
import sys
import copy
from sift_wrapper import SiftWrapper
from loaders import open_grayscale_image
MIN_MATCH_COUNT = 5
# Lower - more specifity for matches
TRAINING_PATH = sys.argv[1]
QUERY_PATH = sys.argv[2]
RESULT_PATH = sys.argv[3]
training_images = os.listdir(TRAINING_PATH)
query_images = os.listdir(QUERY_PATH)
sw = SiftWrapper()
training_feature_map = {}
training_image_map = {}
for train_fname in training_images:
image_path = os.path.join(TRAINING_PATH, train_fname)
image = open_grayscale_image(image_path)
training_feature_map[train_fname] = sw.do_sift(image)
training_image_map[train_fname] = image
for qnum, query_fname in enumerate(query_images):
print("reading {0}".format(query_fname))
training_hits = 0
query_image_path = os.path.join(QUERY_PATH, query_fname)
# Get 'a' component of lab image
query_image = open_grayscale_image(query_image_path)
output_image = np.copy(query_image)
sift_query = sw.do_sift(query_image)
all_src_pts = []
all_dst_pts = []
for train_fname in training_feature_map.keys():
sift_train = training_feature_map[train_fname]
knn_result = sw.do_knn_sift(sift_train, sift_query)
if knn_result.num_found > MIN_MATCH_COUNT:
#print("matched: {0}".format(train_fname))
training_hits += 1
src_pts = np.float32(knn_result.source_points).reshape(-1, 1, 2)
train_height, train_width = training_image_map[train_fname].shape
train_height *= 1.0
train_width *= 1.0
#print("train ratio:", train_height / train_width)
all_src_pts += [pt / train_width for pt in src_pts]
all_dst_pts += knn_result.destination_points
for sm, dm in zip(all_src_pts, all_dst_pts):
print('[[{0},{1}],[{2},{3}]]'.format(sm[0][0], sm[0][1], dm[0], dm[1]))
dst_pts = np.float32(knn_result.destination_points).reshape(-1, 1, 2)
#print('src:', all_src_pts)
#print('dst:', all_dst_pts)
# We shaped all the streetsigns in a 2:1 height:width size
# So this is the basis template for our homography
# Also, stretch it out a little to potentially include more matches
#
# (-0.1,-0.5) (1.1,-0.5)
# +-----------+
# | |
# | |
# | |
# | |
# | |
# | |
# | |
# | |
# | |
# +-----------+
# (-0.1,2.75) (1.1,2.75)
attempts = 3
train_matches = np.copy(all_src_pts)
query_matches = np.copy(all_dst_pts)
while attempts and len(query_matches):
if len(train_matches) < 4 and len(query_matches) < 4:
break
from_template = np.float32(train_matches).reshape(-1, 1, 2)
to_query_image = np.float32(query_matches).reshape(-1, 1, 2)
# Affine transform
M = cv2.estimateRigidTransform(from_template, to_query_image, False)
if M is not None:
M = np.vstack((M, np.array([0, 0, 1])))
#M = cv2.findHomography(from_template, to_query_image, cv2.RANSAC, 0.1)[0]
np.set_printoptions(formatter={'float': '{: 0.3f}'.format})
print('homography:\n', M)
normal_corners = np.array([
[0, 0],
[0, 2.25],
[1.0, 2.25],
[1.0, 0]
])
stretched_corners = np.array([
[-0.05, -0.5],
[-0.05, 2.75],
[1.05, 2.75],
[1.05, -0.5]
])
# Turn into vectors for matrix multiplication
normal_corners = np.float32(normal_corners).reshape(-1, 1, 2)
stretched_corners = np.float32(stretched_corners).reshape(-1, 1, 2)
# We have found an affine transform
if M is not None:
attempts -= 1
# Fill out the perspective matrix with a dummy row
normal_corners = cv2.perspectiveTransform(normal_corners, M)
stretched_corners = cv2.perspectiveTransform(stretched_corners, M)
cv2.polylines(output_image, [np.int32(normal_corners)], True, 255, 3)
# Remove points we have already found as a sign and repeat
new_query_matches = []
new_train_matches = []
for index, point in enumerate(query_matches):
# Check point is on or inside region of found sign (0 or +1)
tuplepoint = (point[0], point[1])
if cv2.pointPolygonTest(stretched_corners, tuplepoint, True) < 0:
new_query_matches.append(point)
new_train_matches.append(train_matches[index])
query_matches = new_query_matches
train_matches = new_train_matches
else:
attempts = False
if training_hits:
# Add points not matched to a homography
for dst_pt in query_matches:
int_pt = tuple([int(x) for x in dst_pt])
cv2.circle(output_image, int_pt, 5, (255, 255, 255), -1)
# Plot match lines
# Pad output image to place where sign would be if it were there
#fill = np.zeros(output_image.shape)
#output_image = np.concatenate((output_image, fill), axis=1)
# Scale and shift matches so that they fit in the box
scale_factor = output_image.shape[1] / 2.25
shift_factor = output_image.shape[0]
for train, query in zip(all_src_pts, all_dst_pts):
train = np.int32(train[0] * scale_factor)
train[0] += shift_factor
train = (train[0], train[1])
query = (int(query[0]), int(query[1]))
#cv2.line( output_image, train, query, [255, 0, 0], 5)
# Write each query image out with markers from training images
# NOTE query_fname includes .jpg extension
output_path = os.path.join(RESULT_PATH, query_fname)
cv2.imwrite(output_path, output_image)
| true |
40a96ce860291c375369569a1c367ee8d2c2658f | Python | WuShuang1998/underwater-od | /tools/gt_show.py | UTF-8 | 4,150 | 2.609375 | 3 | [] | no_license | from pycocotools.coco import COCO
import json
import torch
from torch.utils.data import Dataset
import skimage.io
import os
import numpy as np
import cv2
class CocoDataset(Dataset):
def __init__(self, root_dir, set_name='train', image_base=''):
self.root_dir = root_dir
self.set_name = set_name
self.image_base = image_base
self.coco = COCO(os.path.join(self.root_dir, self.set_name + '.json'))
self.image_ids = self.coco.getImgIds()
self.load_classes()
def load_classes(self):
# load class names (name -> label)
categories = self.coco.loadCats(self.coco.getCatIds())
categories.sort(key=lambda x: x['id'])
self.classes = {}
self.coco_labels = {}
self.coco_labels_inverse = {}
for c in categories:
self.coco_labels[len(self.classes)] = c['id']
self.coco_labels_inverse[c['id']] = len(self.classes)
self.classes[c['name']] = len(self.classes)
# also load the reverse (label -> name)
self.labels = {}
for key, value in self.classes.items():
self.labels[value] = key
def __len__(self):
return len(self.image_ids)
def __getitem__(self, idx):
img = self.load_image(idx)
annot = self.load_annotations(idx)
name = image_info = self.coco.loadImgs(self.image_ids[idx])[0]['file_name']
return img, annot, name
def load_image(self, image_index):
image_info = self.coco.loadImgs(self.image_ids[image_index])[0]
path = os.path.join(self.image_base, image_info['file_name'])
img = skimage.io.imread(path)
return img
def load_annotations(self, image_index):
# get ground truth annotations
annotations_ids = self.coco.getAnnIds(imgIds=self.image_ids[image_index], iscrowd=False)
annotations = np.zeros((0, 5))
# some images appear to miss annotations (like image with id 257034)
if len(annotations_ids) == 0:
return annotations
# parse annotations
coco_annotations = self.coco.loadAnns(annotations_ids)
for idx, a in enumerate(coco_annotations):
# some annotations have basically no width / height, skip them
if a['bbox'][2] < 1 or a['bbox'][3] < 1:
continue
annotation = np.zeros((1, 5))
annotation[0, :4] = a['bbox']
annotation[0, 4] = self.coco_label_to_label(a['category_id'])
annotations = np.append(annotations, annotation, axis=0)
# transform from [x, y, w, h] to [x1, y1, x2, y2]
annotations[:, 2] = annotations[:, 0] + annotations[:, 2]
annotations[:, 3] = annotations[:, 1] + annotations[:, 3]
return annotations
def coco_label_to_label(self, coco_label):
return self.coco_labels_inverse[coco_label]
def label_to_coco_label(self, label):
return self.coco_labels[label]
def draw_caption(image, box, caption):
b = np.array(box).astype(int)
cv2.putText(image, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 0), 2)
cv2.putText(image, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
if not os.path.exists('./output_gt'):
os.makedirs('./output_gt')
dataset = CocoDataset(root_dir='/home/aistudio/work/datasets/water', set_name='train_aug', image_base='/home/aistudio/work/datasets/water/train/image_aug')
colors = [[np.random.randint(0, 255) for i in range(3)] for i in range(4)]
for i in range(len(dataset)):
print(i)
img, annot, name = dataset.__getitem__(i)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
for j in range(annot.shape[0]):
if annot[j, 4] < 0:
continue
bbox = annot[j]
x1 = int(bbox[0])
y1 = int(bbox[1])
x2 = int(bbox[2])
y2 = int(bbox[3])
label_name = dataset.labels[bbox[4]]
draw_caption(img, (x1, y1, x2, y2), label_name)
cv2.rectangle(img, (x1, y1), (x2, y2), color=(0, 0, 255), thickness=1)
cv2.imwrite('output_gt/' + name, img) | true |
f4ca5bdeac4d7e7770397ad4d68581ddf7d4daf0 | Python | pombredanne/red-fab-deploy | /fab_deploy/config.py | UTF-8 | 849 | 2.640625 | 3 | [
"MIT"
] | permissive | import ConfigParser
class CustomConfig(ConfigParser.ConfigParser):
"""
Custom Config class that can read and write lists.
"""
# Config settings
CONNECTIONS = 'connections'
INTERNAL_IPS = 'internal-ips'
OPEN_PORTS = 'open-ports'
RESTRICTED_PORTS = 'restricted-ports'
ALLOWED_SECTIONS = 'allowed-sections'
USERNAME = 'username'
REPLICATOR = 'replicator'
REPLICATOR_PASS = 'replicator-password'
GIT_SYNC = 'git-sync'
def get_list(self, section, key):
if not self.has_option(section, key):
return []
return [x for x in self.get(section, key).split(',') if x ]
def set_list(self, section, key, slist):
t = ','.join(slist)
self.set(section, key, t)
def save(self, filename):
fp = open(filename, 'w')
self.write(fp)
fp.close()
| true |
8ae7f500d21d51083672782f9e998e9130c75369 | Python | ChastityAM/Python | /Small-Projects/Inventory.py | UTF-8 | 248 | 3.234375 | 3 | [] | no_license | in_stock = 600
jeans_sold = 500
target_sales = 500
target_hit = jeans_sold == target_sales
print("Hit jeans sales target:")
print(target_hit)
on_hand = in_stock - jeans_sold
in_stock = on_hand != 0
print("Amount of jeans on hand:")
print(on_hand) | true |
fa6c3b1202cb99dcd867d93ed3233b2736c33b15 | Python | daroot/gevent-breaker | /test/test_breaker.py | UTF-8 | 5,697 | 3 | 3 | [
"WTFPL"
] | permissive | import pytest
import gevent
from collections import defaultdict
from gevent_breaker import (CircuitBreaker, circuit_breaker, CircuitBroken)
@pytest.fixture
def testbreakbox():
"""
Create a set of breakers with useful properties for testing.
- basic [reset=10, threshold=1] (Default)
- short [reset=0.002, threshold=1]
- multi [reset=10, threshold=2]
- shortmulti [reset=.002, threshold=3]
"""
def default_test_breaker():
return CircuitBreaker(reset=10.0, threshold=1)
box = defaultdict(default_test_breaker)
box["basic"]
box["short"].reset = 0.002
box["multi"].threshold = 2
box["shortmulti"].threshold = 3
box["shortmulti"].reset = 0.002
return box
def raisefault(brk_name, breakerbox):
"""
Deliberately raise a fault in the named breaker.
Helper function to avoid needing to type out a full try/except block
in every test.
"""
try:
with circuit_breaker(brk_name, breakerbox=breakerbox):
1/0
except Exception:
pass # Swallow, we know.
def test_new_breaker_uses_defaults(testbreakbox):
"""
A newly instantiated CircuitBreaker should get options from
the defaultdict's factory function, and should be untripped and have
no outstanding timer task.
"""
breaker = testbreakbox["basic"]
assert breaker.reset == 10.0
assert breaker.threshold == 1
assert breaker.tripped is False
assert breaker.timer_task is None
def test_breaker_trips(testbreakbox):
"""
When a fault is raised, if the threshold is met, the breaker should trip.
"""
breaker = testbreakbox["basic"]
assert breaker.tripped is False
raisefault("basic", testbreakbox)
assert breaker.fails == 1
assert breaker.tripped is True
def test_tripped_breaker_resets(testbreakbox):
"""
After the reset period has elapsed, a tripped breaker should be reset
to untripped.
"""
breaker = testbreakbox["short"]
assert breaker.tripped is False
raisefault("short", testbreakbox)
assert breaker.fails == 1
assert breaker.tripped is True
gevent.sleep(0.03)
assert breaker.tripped is False
def test_breaker_will_raise(testbreakbox):
"""
Using the circuit_breaker context should raise CircuitBroken if
called on a CircuitBreaker that has already tripped.
"""
breaker = testbreakbox["basic"]
raisefault("basic", testbreakbox)
assert breaker.tripped is True
with pytest.raises(CircuitBroken):
with circuit_breaker("basic", breakerbox=testbreakbox):
1/0
def test_breaker_blocks(testbreakbox):
"""
A circuit_breaker context with the blocks=True flag should not raise, but
instead wait.
"""
breaker = testbreakbox["short"]
raisefault("short", testbreakbox)
assert breaker.tripped is True
with pytest.raises(gevent.Timeout), gevent.Timeout(0.001):
with circuit_breaker("short", block=True, breakerbox=testbreakbox):
raise Exception("Should not get here.")
def test_breaker_clears_on_success(testbreakbox):
"""
Once a circuit_breaker context has passed, it should completely clear the
breaker so that future uses will succeed as normal.
"""
breaker = testbreakbox["multi"]
raisefault("multi", testbreakbox)
assert breaker.fails == 1
with circuit_breaker("multi", breakerbox=testbreakbox):
pass # Should cause 'clear' to be run.
assert breaker.fails == 0
def test_breaker_clear_wakes_blocking_waiters(testbreakbox):
"""
A circuit_breaker context is waiting on a breaker reset will be
woken and continue when the reset period has elapsed.
"""
raisefault("short", testbreakbox)
result = {"done": False}
def waiting_task():
with circuit_breaker("short", block=True):
gevent.sleep(0.001)
result["done"] = True
task = gevent.spawn(waiting_task)
gevent.idle()
assert result["done"] is False
task.join(timeout=0.05)
assert task.ready() is True
assert result["done"] is True
def test_breaker_clear_kills_timer(testbreakbox):
"""
A cleared circuit_breaker should cancel its timer_task.
"""
breaker = testbreakbox["basic"]
def slow_worker():
with circuit_breaker("basic", breakerbox=testbreakbox):
gevent.sleep(0.01)
task = gevent.spawn(slow_worker)
gevent.idle() # let slow_worker start up
assert breaker.tripped is False
raisefault("basic", testbreakbox)
assert breaker.tripped is True
assert breaker.timer_task is not None
timer_task = breaker.timer_task
# Wait for slow_worker to finish, which should clear breaker
task.join(timeout=0.05)
assert task.ready() is True
assert breaker.tripped is False
assert breaker.timer_task is None
assert timer_task.ready() is True
def test_breaker_reset_fails_not_fully_reset(testbreakbox):
"""
A circuit_breaker's reset timer task should set its failure state to
one below the threshold, rather than completely clear it.
"""
breaker = testbreakbox["shortmulti"]
for x in range(3):
raisefault("shortmulti", testbreakbox)
assert breaker.tripped is True
gevent.sleep(0.02)
# Reset should run and set us to threshold-1 for a try.
assert breaker.tripped is False
assert breaker.fails == 2
# One more should be back at threshold.
raisefault("shortmulti", testbreakbox)
assert breaker.tripped is True
# And clear it fully.
with circuit_breaker("shortmulti", breakerbox=testbreakbox, block=True):
pass
assert breaker.tripped is False
assert breaker.fails == 0
| true |
6f4e6963f877d5158b59f47dbe572090a08ed65c | Python | benji06140/oci-prog-exos | /niveau-02/chapitre-3-chaines-de-caracteres/04i-analyse-d-une-langue-obligatoire.py | UTF-8 | 631 | 3.328125 | 3 | [] | no_license | ##################################
# fichier 04i-analyse-d-une-langue-obligatoire.py
# nom de l'exercice : Analyse d’une langue
# url : http://www.france-ioi.org/algo/task.php?idChapter=595&idTask=0&sTab=task&iOrder=26
# type : obligatoire
#
# Chapitre : chapitre-3-chaines-de-caracteres
#
# Compétence développée :
#
# auteur :
##################################
# chargement des modules
# mettre votre code ici
lettre = input()
nbLignes = int(input())
nombre = 0
for loop in range(nbLignes):
phrase = input()
for i in range(len(phrase)):
if phrase[i] == lettre:
nombre += 1
print(nombre)
| true |
00f2236f2cd306d5bab331dbc8262a69908c8c7c | Python | razum2um/yaspeller-dictionary-builder | /src/dictionary.py | UTF-8 | 3,258 | 2.515625 | 3 | [] | no_license | import os
import json
import pymorphy2
import argparse
morph = pymorphy2.MorphAnalyzer(lang='ru', char_substitutes={})
cases = [{'nomn'}, {'gent'}, {'datv'}, {'accs'}, {'ablt'}, {'loct'}]
# баг, самоорганизованными
noun_prtf_inflection_cases = [
x | y for x in cases for y in [{'sing'}, {'plur'}]]
# пропатченную
adjf_inflection_cases = [
x | gender for x in noun_prtf_inflection_cases for gender in [{'masc'}, {'femn'}, set()]]
inflection_cases_by_tag = {
'NOUN': noun_prtf_inflection_cases,
'PRTF': noun_prtf_inflection_cases,
'ADJF': adjf_inflection_cases,
}
def flatten(list_of_lists):
return [item for sublist in list_of_lists for item in sublist]
def _remove_prefix(text, prefix):
'''compat 3.9'''
if text.startswith(prefix):
return text[len(prefix):]
return text
def _export_words(file_report):
return [x['word'] for x in file_report['data']]
def export_all_words(fname):
with open(fname, 'r') as outfile:
lst = [_export_words(file_report)
for file_status, file_report in json.load(outfile)]
return sorted(set(flatten(lst)))
def _regexp_inflectable(parsed, inflection_cases):
'''_regexp_inflectable(morph.parse('баг')[0]) #=> "[бБ]аг(а|ам|ами|ах|е|и|ов|ом|у)?"'''
lexems = list(set([lexem.word for lexem in (parsed.inflect(
case) for case in inflection_cases) if lexem is not None]))
prefix = os.path.commonprefix(lexems)
start = prefix[0].lower()
suffixes = sorted(set([_remove_prefix(lexem, prefix) for lexem in lexems]))
optional_regexp_suffix = ''
if '' in suffixes:
optional_regexp_suffix = '?'
suffixes.remove('')
regexp_suffixes = "(%s)" % ('|'.join(suffixes),)
if len(suffixes) == 0:
regexp_suffixes = ''
optional_regexp_suffix = ''
regexp = "[%s%s]" % (start, start.upper()) + prefix[1:] + \
regexp_suffixes + optional_regexp_suffix
# debug output using this:
# return "%s | %s | %s" % (regexp, parsed.word, parsed.tag)
return regexp
def inflections(parsed):
inflection_cases = inflection_cases_by_tag.get(parsed.tag.POS)
if inflection_cases:
return _regexp_inflectable(parsed, inflection_cases)
else:
return parsed.word
def process_file(fname):
cache = {}
for word in export_all_words(fname):
parseds = morph.parse(word)
norms = set([parsed.normalized.word for parsed in parseds])
if len(norms & cache.keys()) > 0:
continue # if a 2nd variant of word is processed as 1st variant earlier
parsed = sorted(parseds, key=lambda p: p.tag.animacy != 'inan')[
0] # non-animals first
norm = parsed.normalized.word
if norm not in cache:
cache[norm] = inflections(parsed)
# debug output using this:
# cache[norm] += " [%s | %s]" % (parsed.word, parsed.tag)
return sorted(set(cache.values()))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('file')
args = parser.parse_args()
dict_words = process_file(args.file)
print(json.dumps(dict_words, ensure_ascii=False, indent=4, sort_keys=True))
| true |
d22484a61d79665e20e4dba63ecc10332680e556 | Python | iulikchip/ToyProject | /PygLatin/ex11.py | UTF-8 | 187 | 3.515625 | 4 | [] | no_license | print "Your age?",
age = raw_input()
print "Height?",
height = raw_input()
print "Weight?",
weight = raw_input()
print "Age = %r , height = %r, weight = %r" % (
age, height, weight) | true |
a84fd2c6d22f3cfae3158fc297feef95a38fc924 | Python | K-Schaeffer/atp-iot-esp32 | /wifi_lib.py | UTF-8 | 1,321 | 3.09375 | 3 | [
"MIT"
] | permissive | # Author: Kauan Schaeffer #
# Description: Connect ESP32 microprocessor #
# into a WIFI network and send data to server #
def connect(ssid, password):
import network #Lib used to connect into networks
import time
print("Connecting...")
station = network.WLAN(network.STA_IF) #Connecting and activating the ESP32 into WLAN network
station.active(True)
station.connect(ssid, password)
for t in range(50):
if station.isconnected():
break
time.sleep(0.1)
return station
def send_data(c_temperature, c_humidity):
import urequests
print('========= Thingspeak Data ==========')
print("Opening API endpoint...")
#Request to server API
response = urequests.get("http://api.thingspeak.com/update?api_key=SNUDWZOU5C89BLZQ&field1={}&field2={}".format(c_temperature, c_humidity))
if response.text:
print("Data sent successfully!")
print('====================================\n')
else:
print("Couldn't connect to Thingspeak")
print('====================================\n')
# More interesting methods from network lib
#station.disconnect() --> Disconnect
#station.scan() --> Scan for WIFI Networks
#station.ifconfig() --> Check IP's: esp32 ip / ipmask / default gateway / dns ip | true |
a80c5929229bac289bdca6e0921ba33f95c83217 | Python | MoleOrbitalHybridAnalyst/mdtools | /block_average.py | UTF-8 | 1,556 | 3.078125 | 3 | [] | no_license | # do block average for one cv time series
import re
import argparse
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def read_colvar(fname):
with open(fname, "r") as fp:
firstline = fp.readline()
if re.match("#!.+FIELDS", firstline) == None:
raise NameError('No header found in ' + fname)
names_ = firstline.split()[2:]
return pd.read_csv(fname,delim_whitespace=True,names=names_, comment ='#' )
def block_average(values, nblock):
stride = int(len(values)/nblock)
indexes = np.arange(0, nblock * stride + 1, stride)
slices = []
for i in range(len(indexes)-1):
slices.append(np.mean(values[indexes[i]:indexes[i+1]]))
slices = np.array(slices)
return [np.mean(slices),np.sqrt(np.var(slices,ddof=1)/len(slices))]
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('cv_file', help='input colvar file')
parser.add_argument('--cv_name',
help='cv name in colvar file', default="cv")
parser.add_argument('-m', '--min_nblock',
help='minimum number of blocks', default="3")
args = parser.parse_args()
df = read_colvar(args.cv_file)
values = df[args.cv_name].values
min_block = int(args.min_nblock)
aves = []; stds = []; nblocks = []
nblock = len(values)
while nblock >= min_block:
ave, std = block_average(values, nblock)
aves.append(ave); stds.append(std); nblocks.append(nblock)
nblock = int(nblock / 2)
plt.plot(nblocks, stds)
plt.show()
| true |
ff8fddbfd84f13c8c1cd087aa5419bfc711164f7 | Python | tomoima525/kotoha-slack | /app.py | UTF-8 | 2,109 | 2.546875 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
import requests
import json
import os
import re
from flask import Flask, url_for, request, redirect, Response
app = Flask(__name__)
URL = 'https://kotoha-server.herokuapp.com/api/phrases.json'
# Check token if you don't want to conflict with other slash commands
#SLACK_KEY = os.environ['SLACK_KEY']
@app.route('/kotoha',methods=['POST'])
def kotoha():
"""
Example:
/kotoha (tag|text) []
"""
text = request.values.get('text')
# Check token if you don't want to conflict with other slash commands
#slack_key = request.values.get('token')
#if debug is False:
# if slack_key != SLACK_KEY:
# return 'slack token does not match'
#else:
# print('It\'s debug mode')
if not text:
return 'hint: (tag|text) [word]'
'''parse text'''
match = re.search('(tag|text)\s*(.*)', text)
if match:
kind = match.group(1)
words = match.group(2)
if request.method == 'POST':
req = get_request_string(kind, words)
query_json = json.loads(requests.get(req).content.decode('utf-8'))
resp_qs = [':four_leaf_clover: Kotoha for {}:{}\n'.format(kind,words)]
'''debug'''
#resp_j = map(get_response_string, query_json)
#print('response: {}'.format(resp_j))
'''debug end'''
resp_qs.extend(map(get_response_string, query_json))
return Response('\n'.join(resp_qs), content_type='text/plain; charset=utf-8')
else:
return 'hint: (tag|text) [words]'
@app.route('/')
def index():
return redirect('https://github.com/tomoima525')
def get_request_string(kind, words):
tag_param = '{}={}'.format(kind,words)
return '?'.join([URL,tag_param])
def get_response_string(qdict):
return '{} from {}'.format(qdict.get('text'),qdict.get('tag_list'))
if __name__ == '__main__':
# Bind to PORT if defined, otherwise default to 5000.
port = int(os.environ.get('PORT', 5000))
debug = os.environ.get('DEBUG', False)
app.run(host='0.0.0.0', port=port, debug = debug)
| true |
647979e08af658963d4246257750bb13ef3b0eb1 | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_199/2270.py | UTF-8 | 555 | 3.390625 | 3 | [] | no_license | def inverse(s):
s = s.replace('-', '*')
s = s.replace('+', '-')
s = s.replace('*', '+')
return s
for i in range(int(input())):
s, k = input().split(' ')
k = int(k)
count = 0
minus_pos = s.find('-')
while 0 <= minus_pos < len(s) - k + 1:
s = s[:minus_pos] + inverse(s[minus_pos:minus_pos + k]) + s[minus_pos + k:]
count += 1
minus_pos = s.find('-')
if minus_pos == -1:
print("Case #{}: {}".format(i + 1, count))
else:
print("Case #{}: {}".format(i + 1, "IMPOSSIBLE"))
| true |
216af03993846e49043c5f30e63964b8850a6833 | Python | DmitryVlaznev/leetcode | /706-design-hashmap.py | UTF-8 | 1,860 | 4.375 | 4 | [] | no_license | # 706. Design HashMap
# Easy
# Design a HashMap without using any built-in hash table libraries.
# To be specific, your design should include these functions:
# * put(key, value) : Insert a (key, value) pair into the HashMap. If
# the value already exists in the HashMap, update the value.
# * get(key): Returns the value to which the specified key is mapped, or
# -1 if this map contains no mapping for the key.
# * remove(key) : Remove the mapping for the value key if this map
# contains the mapping for the key.
# Example:
# MyHashMap hashMap = new MyHashMap();
# hashMap.put(1, 1);
# hashMap.put(2, 2);
# hashMap.get(1); // returns 1
# hashMap.get(3); // returns -1 (not found)
# hashMap.put(2, 1); // update the existing value
# hashMap.get(2); // returns 1
# hashMap.remove(2); // remove the mapping for 2
# hashMap.get(2); // returns -1 (not found)
# Note:
# All keys and values will be in the range of [0, 1000000].
# The number of operations will be in the range of [1, 10000].
# Please do not use the built-in HashMap library.
class MyHashMap:
def __init__(self):
self.keys = 10000
self.table = [[] for _ in range(self.keys)]
def put(self, key: int, value: int) -> None:
pairs = self.table[key % self.keys]
for pair in pairs:
if key == pair[0]:
pair[1] = value
return
pairs.append([key, value])
def get(self, key: int) -> int:
pairs = self.table[key % self.keys]
for k, v in pairs:
if key == k:
return v
return -1
def remove(self, key: int) -> None:
pairs = self.table[key % self.keys]
for i, pair in enumerate(pairs):
if key == pair[0]:
del pairs[i]
return
| true |
2854a72b3c3d1f46efcab8a1faa609178a3910a4 | Python | epsilony/terse-demo | /terse_proto/tsmf/model/polygon.py | UTF-8 | 3,673 | 2.796875 | 3 | [] | no_license | '''
Created on 2012-12-22
@author: Man YUAN <epsilonyuan@gmail.com>
@author: Sparrow HU <huhao200709@163.com>
'''
from terse_proto.tsmf.model.segment import Segment2D
import numpy as np
class _Polygon2DIterator(object):
def __init__(self, pg):
self.chains_heads_it=iter(pg.chains_heads)
self.next_item=None
self.current_chain_head=None
def next(self):
if self.current_chain_head is None:
self.current_chain_head=self.chains_heads_it.next()
result=self.current_chain_head
else:
result=self.next_item
self.next_item=result.succ
if self.next_item is self.current_chain_head:
self.current_chain_head=None
return result
class Polygon2D(object):
def __init__(self, vertes):
self.vertes = vertes
self.chains_heads = [Segment2D(e[0]) for e in vertes ]
for i in xrange(len(vertes)):
point = self.chains_heads[i]
for p in vertes[i][1:]:
point.succ = Segment2D(p)
t = point
point = point.succ
point.pred = t
point.succ = self.chains_heads[i]
self.chains_heads[i].pred = point
def __iter__(self):
return _Polygon2DIterator(self)
def ray_crossing(self, xy):
# Originate from:
# Joseph O'Rourke, Computational Geometry in C,2ed. Page 244, Code 7.13
rcross = 0
lcross = 0
x,y=xy
for seg in self:
if np.alltrue(seg.head.coord == xy):
return 'v'
x_i, y_i = seg.head.coord
x_i1, y_i1 = seg.rear.coord
rstrad = (y_i > y) != (y_i1 > y)
lstrad = (y_i < y) != (y_i1 < y)
if rstrad or lstrad:
if rstrad and x_i > x and x_i1 > x:
rcross += 1
elif lstrad and x_i < x and x_i1 < x:
lcross += 1
else:
xcross = (x_i * y - x_i * y_i1 - x_i1 * y + x_i1 * y_i) / (y_i - y_i1)
if(rstrad and xcross > x):
rcross += 1
if(lstrad and xcross < x):
lcross += 1
if rcross % 2 != lcross % 2 :
return 'e'
if rcross % 2 == 1:
return 'i'
else:
return 'o'
def distance_function(self, xy):
r_crs = self.ray_crossing(xy)
if r_crs == 'e' or r_crs == 'v':
return 0
inf_abs = float('inf')
for seg in self:
t = seg.distance_to(xy)
if t < inf_abs:
inf_abs = t
return inf_abs if r_crs == 'i' else -inf_abs
def sample_vertes_xys():
return np.array([[[-1,-1],[1,-1],[1,1],[-1,1]],
[[-0.5,-0.5],[-0.5,0.5],[0.5,0.5],[0.5,-0.5]]], dtype=np.double)
if __name__ == '__main__':
pg = Polygon2D(sample_vertes_xys())
dist_func_py = np.frompyfunc(lambda x, y:pg.distance_function((x, y)), 2, 1)
xs = np.linspace(-1.2, 1.2, 100)
ys = np.linspace(-1.2, 1.2, 100)
(g_xs, g_ys) = np.meshgrid(xs, ys)
g_zs_py = dist_func_py(g_xs, g_ys)
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(121, projection='3d')
ax.contour(g_xs, g_ys, g_zs_py)
ax.plot_wireframe(g_xs, g_ys, g_zs_py, rstride=5, cstride=5)
ax = fig.add_subplot(122, projection='3d')
ax.contour(g_xs, g_ys, g_zs_py, 20)
ax.contourf(g_xs, g_ys, g_zs_py, (0, 0.05))
fig.show()
| true |
2c48f30b450e3b6ea476c70dc8fd3c3828c337f6 | Python | Aasthaengg/IBMdataset | /Python_codes/p02419/s014071170.py | UTF-8 | 234 | 3.046875 | 3 | [] | no_license | s=input().lower()
cnt=0
while True:
l=input().split()
if l[0]=='END_OF_TEXT': break
for i in range(len(l)):
lg=len(l[i])-1
if not l[i][lg].isalpha(): l[i]=l[i][:lg]
cnt+=(s==l[i].lower())
print(cnt) | true |
563dbd201ff3096d0b4f496f10b5726da8c20d38 | Python | hashtag-arbaas/code-20211008-shaikhintekhab | /bmi_calc.py | UTF-8 | 779 | 3.203125 | 3 | [] | no_license |
def get_category_and_risk(bmi):
category_type = 'Underweight'
risk = "Malnutrition risk"
if bmi>=18.5 and bmi<25.0:
category_type = "Normal weight"
risk = "Low risk"
elif bmi>=25.0 and bmi<30.0:
category_type = "Overweight"
risk = "Enhanced risk"
elif bmi>=30.0 and bmi<40.0:
category_type = "Moderately obese"
risk = "Medium risk"
elif bmi>=30.0 and bmi<40.0:
category_type = "Severely obese"
risk = "High risk"
elif bmi>=40:
category_type = "Very severely obese"
risk = "Very high risk"
return category_type, risk
def get_bmi_data(mass, height_cm):
bmi = mass/(height_cm/100)
category, risk = get_category_and_risk(bmi)
return bmi, category, risk
| true |
41ad63dac7791fda7cbc8ffe0c52321f3d95155e | Python | widealpha/python-bigdata-experiment | /大作业3爬虫/小说爬取/novel/xiaoshuolou.py | UTF-8 | 3,257 | 2.671875 | 3 | [] | no_license | import urllib.request
import time
import os
from bs4 import BeautifulSoup
from io import BytesIO
import gzip
def getpath(name):
try:
if len(name) == 0:
name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
path = '/noval/' + name
os.makedirs(path)
except Exception:
print("获取文件路径出错")
return path
return path
def gethtml(url):
req = urllib.request.Request(url)
req.add_header('Connection', 'keep-alive')
req.add_header('Cache-Control', 'max-age=0')
req.add_header('Upgrade-Insecure-Request', '1')
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)\
AppleWebKit/537.36 (KHTML, like Gecko)\
Chrome/58.0.3029.110 Safari/537.36')
req.add_header('Accept', 'text/html,application/xhtml+xml,application/xml;\
q=0.9,image/webp,*/*;q=0.8')
req.add_header('Accept-Encoding', 'gzip, deflate, sdch, br')
req.add_header('Accept-Language', 'zh-CN,zh;q=0.8,en;q=0.6')
response = urllib.request.urlopen(req)
# print(response.info())
try:
buf = BytesIO(response.read())
f = gzip.GzipFile(fileobj=buf)
f_html = f.read()
# f_html = f_html.decode('utf-8')
except Exception as e:
print(e)
# print(f_html)
return f_html
def decode_html(soup):
content = ''
for text in soup.find_all(name='div', attrs={"id": "booktext"}):
content += text.text
# print(text.text)
# for p in text:
# print(p.string)
# if (len(p) == 0):
# break
# if p.string is None:
# continue
# content += " "
# content += p.string
content = content.replace("""☆百☆度☆搜☆索☆小☆说☆楼☆免☆费☆小☆说☆在☆线☆阅☆读☆""", '\
').replace(" 手机阅读请访问『m.xslou.com』无弹窗在线阅读", '\
').replace("『www.xslou.com』", '')
return content
def get_next_url(soup, preUrl):
nexUrl = None
for a in soup.select('a'):
# print(a.string)
if a.string == '下一章':
nexUrl = "https://www.xslou.com/" + a['href']
break
print(nexUrl)
return nexUrl
def get_noval_title(soup):
print(soup.h1.string)
return soup.h1.string
name = str(input("小说的名字:"))
noval_path = getpath(name)
# start = int(input("开始的章节:"))
# end = int(input("结束的章节:"))
start = 1
end = 10
url = str(input("输入小说起始网址:"))
html = gethtml(url)
soup = BeautifulSoup(html, 'lxml')
# decode_html(soup)
# get_next_url(soup, url)
# file_content = open(noval_path + '/content.txt', mode="w+", encoding="utf-8")
# file_content.write(html)
i = start
while i <= end:
print("获取第%d章中。。。" % i)
name_chapter = get_noval_title(soup)
# file_path = noval_path + '/第%d章_%s.txt' % (i, "ss")
file_path = noval_path + '/' + "name_chapter" + '.txt'
file = open(file_path, mode="a+", encoding="utf-8")
file.write('\n' + name_chapter + '\n' + decode_html(soup))
url = get_next_url(soup, url)
html = gethtml(url)
soup = BeautifulSoup(html, 'lxml')
i += 1
| true |
c69296eb3b9def5ac864359b93fc5706d9a90b1e | Python | Tomoki-Kikuta/atcoder | /abc161/D.py | UTF-8 | 531 | 3.390625 | 3 | [] | no_license | import queue
def main():
K = int(input())
number_list = queue.Queue()
for i in range(1, 10):
number_list.put(i)
count = 0
number = 0
while(count != K):
number = number_list.get()
if number % 10 != 0:
number_list.put((10 * number) + (number % 10) - 1)
number_list.put(10 * number + number % 10)
if number % 10 != 9:
number_list.put(10 * number + (number % 10) + 1)
count += 1
print(number)
if __name__ == "__main__":
main()
| true |
c18ecf3368f9bcc884c49711d12dee7b916f1a7e | Python | daniel-reich/ubiquitous-fiesta | /cEzT2e8tLpwYnrstP_21.py | UTF-8 | 93 | 2.765625 | 3 | [] | no_license |
def swap_d(k, v, swapped):
if swapped: k, v = v, k
return {a: b for a, b in zip(k, v)}
| true |
0f5b9df40d324b6c70ddf32b1b7002e98a4f0ca9 | Python | LeftRadio/NelNet | /core/crc8.py | UTF-8 | 2,344 | 2.546875 | 3 | [] | no_license | # python3!
from ctypes import cdll, byref, c_ubyte, c_int
_crc_cext = cdll.LoadLibrary('./crc8_c_lib/crc8.dll')
# calc crc for single byte and old(init) crc value
def ncrc8_byte(byte, crc):
j = 0
while j < 8:
crc_msb = crc & 0x80
crc = (crc << 1) & 0xFF
if (byte & 0x80):
crc |= 0x01
else:
crc &= 0xfe
if crc_msb:
crc ^= 0x85
byte = (byte << 1) & 0xFF
j = j + 1
return (crc & 0xFF)
# calc crc for input buf data
def ncrc8_buf_py(buf):
crc = 0
for j in buf:
crc = ncrc8_byte(j, crc)
return crc
# calc crc for input buf data
def ncrc8_buf_c_win(cubyte_buff, clen):
""" """
return _crc_cext.crc8_buffer( byref(cubyte_buff), clen )
# calc crc for input buf data
def ncrc8_buf(buf):
""" """
crbuf = (c_ubyte * (len(buf)))(*buf)
blck_len = c_int(len(buf))
crc = _crc_cext.crc8_buffer( byref(crbuf), blck_len)
del(crbuf)
return int(crc)
#
if __name__ == '__main__':
""" test """
from logger import NLogger
from time import time
# logger
mainlogger = NLogger.init( '__main__', 'DEBUG' )
# test buffer
rbuf = bytearray( [ 0x5b, 0x00, 0x00, 0x2, 0x86, 0x93 ] )
# test buffer pre-calc control crc
control_crc = 0x8f
# ---
ts = time()
mainlogger.info('START native python on %.8g sec ...' % ts)
for x in range(1000):
calc_crc = ncrc8_buf_py( rbuf )
# ---
ts = time()-ts
mainlogger.info( 'DONE native python - work time is %s msec - %s' % (ts*(10**3), '# --- '*20))
mainlogger.info( 'verify crc: %s' % (control_crc == calc_crc) )
# ------------------------------------------------------------------------------
# ---
ts = time()
mainlogger.info('START C dll on %.8g sec ...' % ts)
crbuf = (c_ubyte * (len(rbuf) + 1))(*rbuf)
blck_len = c_int(len(rbuf))
calc_crc = c_ubyte(0)
x = 0
while x < 1000:
calc_crc = _crc_cext.crc8_buffer( byref(crbuf), blck_len )
x += 1
del(crbuf)
# ---
ts = time()-ts
mainlogger.info( 'DONE C dll - work time is %.8g msec - %s' % (ts*(10**3), '# --- '*20))
mainlogger.info( 'dll calc crc val: 0x%02X' % int(calc_crc) )
mainlogger.info( 'verify crc: %s' % ( control_crc == int(calc_crc) ) )
| true |
1c9484b314f9f267063c3efee0eff5b02454d4bb | Python | MAHESHRAMISETTI/My-python-files | /ip.py | UTF-8 | 438 | 3.46875 | 3 | [] | no_license | import socket
while True:
print("Want to get IP Address ? (y/n): ")
check = input()
if check == 'n':
break
else:
print("Your IP Address is: ",end="")
print(socket.gethostbyname(socket.gethostname()))
print()
import datetime
while True:
print("Want to print Today's Date and Time ? (y/n): ")
check = input()
if check == 'n':
break
else:
print("Today's date and time:")
print(datetime.datetime.today())
print() | true |
c923a4281984dae05404ce4c28702cd3b9a037db | Python | akuchlous/leetcode | /717.py | UTF-8 | 470 | 3.5 | 4 | [] | no_license | class Solution:
def isOneBitCharacter(self, bits):
"""
:type bits: List[int]
:rtype: bool
"""
i = 0
ret = False
while (i<len(bits)):
if (bits[i] == 1):
ret = False
i+=2
else:
ret = True
i+=1
return ret
print (Solution().isOneBitCharacter([1, 1, 1, 0]))
print (Solution().isOneBitCharacter([1, 0,0 ]))
| true |
22eddd3759240925b44248498f2fcebdda823cf2 | Python | nononovak/otwadvent2019-ctfwriteup | /solutions/day16_solver.py | UTF-8 | 4,978 | 2.8125 | 3 | [] | no_license | #!/usr/bin/env python
from __future__ import print_function
import midi
import sys
'''
18 git clone https://github.com/vishnubob/python-midi
19 cd python-midi/
30 python setup.py install
'''
if __name__ == '__main__':
music = midi.read_midifile('Stegno.mid')
music.make_ticks_abs()
#print(music)
# note = midi.NoteOnEvent(tick=5280, channel=0, data=[67, 80])
# note.tick, note.channel, data=[note.pitch, note.velocity]
ticks_per_frame = 1920
for track in music:
notes = [note for note in track if note.name == 'Note On']
pitch = [note.pitch for note in notes]
tick = [note.tick for note in notes]
#tracks += [tick, pitch]
#print music[0][0]
#treble_track = music[0]
arr = []
pitch_lookup = {}
pitch_lookup[79] = '0' # G
pitch_lookup[76] = '5' # E
pitch_lookup[74] = '4' # D
pitch_lookup[72] = '3' # C
pitch_lookup[71] = '2' # B
pitch_lookup[69] = '1' # A
pitch_lookup[67] = '0' # G
pitch_lookup[66] = '6' # F#
pitch_lookup[64] = '5' # E
pitch_lookup[62] = '4' # D
pitch_lookup[47] = '2' # B
pitch_lookup[45] = '1' # A
pitch_lookup[43] = '0' # G
pitch_lookup[42] = '6' # F#
pitch_lookup[40] = '5' # E
pitch_lookup[38] = '4' # D
pitch_lookup[36] = '3' # C
pitch_lookup[35] = '2' # B
pitch_lookup[28] = '5' # E
notes_on = {}
for note in music[0]: # Treble track
if note.name != 'Note On':
continue
pitch = note.pitch
tick = note.tick
if note.velocity > 0: # pressed
notes_on[pitch] = tick
else:
ptick = notes_on[pitch]
del(notes_on[pitch])
#print(pitch, (1.*ptick/ticks_per_frame,1.*tick/ticks_per_frame))
arr.append((1.*ptick/ticks_per_frame,1.*tick/ticks_per_frame,pitch))
notes_on = {}
for note in music[1]: # Treble track
if note.name != 'Note On':
continue
pitch = note.pitch
tick = note.tick
if note.velocity > 0: # pressed
notes_on[pitch] = tick
else:
ptick = notes_on[pitch]
del(notes_on[pitch])
#print(pitch, (1.*ptick/ticks_per_frame,1.*tick/ticks_per_frame))
arr.append((1.*ptick/ticks_per_frame,1.*tick/ticks_per_frame,pitch))
arr.sort()
for (t0,t1,pitch) in arr:
notes = [p for (_t0,_t1,p) in arr if _t0 <= t0 and t0 < _t1]
notes.sort()
#print(t0, ''.join(pitch_lookup[p] for p in notes), notes)
times = []
'''
# Look at the four 8-measure matches side-by-side
for t in range(0,8*16):
t0 = 1.*t/16
notes1 = [p for (_t0,_t1,p) in arr if _t0 <= t0 and t0 < _t1]
notes2 = [p for (_t0,_t1,p) in arr if _t0 <= t0+8 and t0+8 < _t1]
notes3 = [p for (_t0,_t1,p) in arr if _t0 <= t0+40 and t0+40 < _t1]
notes4 = [p for (_t0,_t1,p) in arr if _t0 <= t0+48 and t0+48 < _t1]
notes1.sort()
notes2.sort()
notes3.sort()
notes4.sort()
bnotes1 = ''.join(pitch_lookup[p] for p in sorted(notes1))
bnotes2 = ''.join(pitch_lookup[p] for p in sorted(notes2))
bnotes3 = ''.join(pitch_lookup[p] for p in sorted(notes3))
bnotes4 = ''.join(pitch_lookup[p] for p in sorted(notes4))
print('%.4f'%t0, '%-4s'%bnotes1, '%-4s'%bnotes2, '%-4s'%bnotes3, '%-4s'%bnotes4, '%-10s'%(notes1 == notes3), '%-10s'%(notes2 == notes4), '%-20s'%notes1, '%-20s'%notes2, '%-20s'%notes3, '%-20s'%notes4)
sys.exit(0)
#'''
'''
# Look at 40 measures side-by-side
for t in range(0,40*16-1):
if t%16 == 0:
print('='*100)
t0 = 1.*t/16
t1 = 1.*(t+1)/16
notes1 = [p for (_t0,_t1,p) in arr if _t0 <= t0 and t0 < _t1]
notes1b = [p for (_t0,_t1,p) in arr if _t0 <= t1 and t1 < _t1]
notes2 = [p for (_t0,_t1,p) in arr if _t0 <= t0+40 and t0+40 < _t1]
notes1.sort()
notes2.sort()
bnotes1 = ''.join(pitch_lookup[p] for p in sorted(notes1))
bnotes2 = ''.join(pitch_lookup[p] for p in sorted(notes2))
print('%.4f'%t0, '%-4s'%bnotes1, '%-4s'%bnotes2, '%-10s'%(notes1 == notes2), '%-20s'%notes1, '%-20s'%notes2)
sys.exit(0)
#'''
ans = ''
for t in range(0,80*16,2):
if t%16 == 0:
print('='*100)
t0 = 1.*t/16
t1 = 1.*(t+1)/16
notes1 = [p for (_t0,_t1,p) in arr if _t0 <= t0 and t0 < _t1]
notes2 = [p for (_t0,_t1,p) in arr if _t0 <= t1 and t1 < _t1]
notes1.sort()
notes2.sort()
bnotes1 = ''.join(pitch_lookup[p] for p in sorted(notes1))
bnotes2 = ''.join(pitch_lookup[p] for p in sorted(notes2))
ch = ''
if notes1 != notes2 and len(notes1) == len(notes2):
for i in range(len(bnotes1)):
if bnotes1[i] != bnotes2[i]:
ch = bnotes2[i]
print('%.4f'%t0, '%-4s'%bnotes1, '%-4s'%bnotes2, '%-10s'%(notes1 == notes2), '%-20s'%notes1, '%-20s'%notes2, ch, [chr(int(ans[i:i+3],7)) for i in range(0,len(ans),3)])
ans = ans + ch
print(ans, len(ans))
print([chr(int(ans[i:i+3],7)) for i in range(0,len(ans),3)])
ans2 = ans[:-5]+'1'+ans[-5:]
print([chr(int(ans2[i:i+3],7)) for i in range(0,len(ans2),3)])
# AOTW{ ... }
# A = 122 b7
# O = 142 b7
# T = 150 b7
# W = 153 b7
# { = 234 b7
# _ = 165 b7
# } = 236 b7
# BABAG, D, AABAG, E,
# 79 G
# 76 E
# 74 D
# 72 C
# 71 B
# 69 A
# 67 G
# 66 F#
# 64 E
# 62 D
# Bass
# EG/B, DF/A, ., C/GE, EG/B
# 47 B
# 45 A
# 43 G
# 42 F#
# 40 E
# 38 D
# 36 C
# 35 B
# 28 E
| true |
10355908492abb634b8ca1272acb9453b4ec23e1 | Python | toshima/codejam | /2015-1b/b.py | UTF-8 | 566 | 3.171875 | 3 | [] | no_license |
import sys
def solve(y, x, n):
if y == 1 or x == 1:
a = 0
b = 0
c = int((x * y - 1) / 2)
else:
a = max(0, int(((y-2)*(x-2) + 1) / 2))
c = 4 if x*y%2 else 2
b = max(0, x + y - 2 - c)
print a, b, c
m = x*y-n
return (y * (x-1) + x * (y-1)
- 4 * min(m, a)
- 3 * min(max(0, m-a), b)
- 2 * min(max(0, m-a-b), c))
for tc in range(int(raw_input())):
y, x, n = map(int, raw_input().split())
sys.stdout.write("Case #{}: {}\n".format(tc+1, solve(y, x, n)))
| true |
8599468120472c996ccfa520996ec151f19a3312 | Python | booox/Learn-Notes | /python/small codes/Properties in Python.py | UTF-8 | 868 | 3.125 | 3 | [] | no_license |
# read-only
class C(object):
@property
def age(self):
return self._age
# read-write
class C(object):
@property
def age(self):
return self._age
@age.setter
def age(self, value):
assert value >= 0
self._age = value
# only write, unreadable
class User(object):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), unique=True, index=True)
password_hash = db.Column(db.String(128))
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password) | true |
a84ec181608aa1ed534966cfd56345cdc85968a7 | Python | ayueh0822/ML2017FALL | /hw2/hw2_logistic.py | UTF-8 | 1,827 | 2.625 | 3 | [] | no_license | import sys
import pandas as pd
import numpy as np
np.set_printoptions(suppress=True)
def main(argv):
x_data_table = pd.read_csv(argv[4])
x_data_table = np.array(x_data_table, dtype=float)
feature_select = [i for i in range(0, 106, 1)]
feature_select2 = [i for i in range(0, 106, 1)]
x_test = np.zeros((x_data_table.shape[0],0))
for i in range(0, 106, 1):
feature_vec = x_data_table[:,i].reshape(x_data_table.shape[0],1)
if i in feature_select:
x_test = np.concatenate((x_test, feature_vec), axis=1)
if i in feature_select2:
if i in [0,1,3,4,5]: # continuous value
x_test = np.concatenate((x_test, feature_vec ** 2), axis=1)
else: # 0/1 value
x_test = np.concatenate((x_test, feature_vec * 4), axis=1)
### normalization
for i in range(0, x_test.shape[1], 1):
mean_val = np.mean(x_test[:,i])
std_val = np.std(x_test[:,i])
if std_val != 0:
x_test[:,i] = (x_test[:,i] - mean_val) / std_val
### min-max
# for i in range(0, x_test.shape[1], 1):
# min_val = np.min(x_test[:,i])
# max_val = np.max(x_test[:,i])
# if (max_val - min_val) != 0:
# x_test[:,i] = (x_test[:,i] - min_val) / (max_val - min_val)
x_test = np.concatenate((x_test, np.ones((x_test.shape[0],1))), axis=1)
w = np.load('./model/model_1.npy')
df_table = []
z = np.dot(x_test, w)
sigmoid = 1 / (1 + np.exp(-z))
for i in range(0,x_test.shape[0],1):
if sigmoid[i] > 0.5:
df_table.append([i+1, 1])
else:
df_table.append([i+1, 0])
df = pd.DataFrame.from_records(df_table, columns = ["id","label"])
df.to_csv(argv[5], index=False)
if __name__ == '__main__':
main(sys.argv[1:]) | true |
497f16d5e5c8866cba7d833f0715f407c364d33b | Python | JimmyHengbo/crawlzim | /cralwlZim.py | UTF-8 | 4,585 | 2.734375 | 3 | [] | no_license | from bs4 import BeautifulSoup
import numpy as np
import requests
import pypyodbc
import os
class getZimTrackingInfor:
# initialization the parameters and connection the Database
def __init__(self, db_name, password=""):
if not os.path.exists(db_name):
pypyodbc.win_create_mdb(db_name)
self.db_name = db_name
self.password = password
self.connectDB()
# Information about connection of Database
def connectDB(self):
driver = 'Driver={Microsoft Access Driver (*.mdb,*.accdb)};PWD=' + self.password + ";DBQ=" + self.db_name
try:
self.conn = pypyodbc.win_connect_mdb(driver)
return True
except:
print("connection is false")
return False
# After connection, return the cursor for execution of database
def getCursor(self):
try:
return self.conn.cursor()
except:
return
# The SQL query statement and return all the results
def selectDB(self, cursor, sql):
try:
cursor.execute(sql)
return cursor.fetchall()
except:
return []
# The SQL insert statement and add the result into DB
def insertDB(self, cursor, sql):
try:
cursor.execute(sql)
self.conn.commit()
return True
except:
return False
# The close the connection of DB
def close(self):
try:
self.conn.close()
except:
return
# Get the information of main Item of Tracking information
def mainItem(self, index, content, tableName):
InforInSameContainer = "('" + content[index]["data-cont-id"] + "'" + ",";
for i in range(4):
InforInSameContainer += "'" + content[index].select('td')[i + 1].text + "'" + ","
InforInSameContainer += "'" + content[index]["data-cons-id"] + "')"
sql = "insert into"+tableName+"(containerId,activity,location,dateTrack,vessel,bolnumber) values" + InforInSameContainer
conn.insertDB(cursor, sql)
# Get the information of second Item of Tracking information
def childItem(self, index, parentIndex,content, tableName):
InforInSameContainer = "('" + content[parentIndex]["data-cont-id"] + "'" + ",";
counter = 0;
for i in content[index].select('td'): # 5 data is one set and first data is empty
counter = counter + 1
if counter == 1:
continue
InforInSameContainer += "'" + i.text.replace("'", "''") + "'" + ","
if counter % 5 == 0:
InforInSameContainer += "'" + content[parentIndex]["data-cons-id"] + "')"
sql = "insert into "+tableName+"(containerId,activity,location,dateTrack,vessel,bolnumber) values" + InforInSameContainer
conn.insertDB(cursor, sql)
counter = 0;
InforInSameContainer = "('" + content[parentIndex]["data-cont-id"] + "'" + ",";
# Get all information of one BOL number
def crawlByBOLNum(self, bolNum, tableName):
page = 'https://www.zim.com/tools/track-a-shipment?consnumber='+bolNum
respone =requests.get(page)
respone.encoding = 'utf-8'
soup = BeautifulSoup(respone.text, 'lxml')
content = soup.select('.routing-details table')
parentIndex = 1
for i in range(1, len(content)):
if i % 2 != 0:
parentIndex = i
conn.mainItem(i, content, tableName)
else:
conn.childItem(i, parentIndex, content, tableName)
if __name__ == '__main__':
path = os.path.join("D://", "zimTrack.mdb")
tableName = "zImTrackInfor"
conn = getZimTrackingInfor(path, "")
cursor = conn.getCursor()
# get the BOL number by accessing the csv file
data = []
with open('Master_BOL.CSV') as fileReader:
line = fileReader.readline()
while line:
data.append(line)
line = fileReader.readline()
data = np.array(data) # Transforming the data from list to array
counter = 0 # used for skip the no means information in CSV like title:Master BL#
for oblNum in data:
if counter != 0:
conn.crawlByBOLNum(oblNum.replace("\n",""),tableName) # deleting "line break"
counter = 1
# get all information in DB
sql = "SELECT * from "+tableName
rows = cursor.execute(sql)
for item in rows:
print(item)
# close the connection
cursor.close()
conn.close()
| true |
72bcd6f860d8395d3f6c593e98b5122228ce1f04 | Python | YanaNovikova/python_traning | /test/test_L_F_A.py | UTF-8 | 1,253 | 2.734375 | 3 | [
"Apache-2.0"
] | permissive | import re
from random import randrange
from fixture.orm import ORMFixture
from model.contact import Contact
dat = ORMFixture(host="127.0.0.1", name="addressbook", user="root", password="")
def test_LFA_on_viewpage(app):
old_contacts = app.contact.get_contact_list()
index = randrange(len(old_contacts))
contact_from_homepage = app.contact.get_contact_list()[index]
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(index)
assert contact_from_homepage.lastname == clear(contact_from_edit_page.lastname)
assert contact_from_homepage.firstname == clear(contact_from_edit_page.firstname)
assert contact_from_homepage.address == clear(contact_from_edit_page.address)
def test_LFA_on_homepage_all(app):
db_contacts = dat.get_contact_list()
contact_from_homepage = app.contact.get_contact_list()
list1 = sorted(db_contacts, key=Contact.id_max)
list2 = sorted(contact_from_homepage, key=Contact.id_max)
assert len(list1) == len(list2)
for i in range(0, len(db_contacts)):
assert list1[i].lastname == list2[i].lastname
assert list1[i].firstname == list2[i].firstname
assert list1[i].address == list2[i].address
def clear(s):
return re.sub("[() /-]", "", s) | true |
c50e222104662ac488a5bcc5d88066fbd5fecfdc | Python | shi0524/algorithmbasic2020_python | /Python2/class20/Code01_PalindromeSubsequence.py | UTF-8 | 1,852 | 4.03125 | 4 | [] | no_license | # -*- coding: utf-8 –*-
"""
516. 最长回文子序列
给你一个字符串 s ,找出其中最长的回文子序列,并返回该序列的长度。
子序列定义为:不改变剩余字符顺序的情况下,删除某些字符或者不删除任何字符形成的一个序列。
"""
def lpsl1(s):
if not s:
return 0
return process(s, 0, len(s) - 1)
def process(s, L, R):
if L == R:
return 1
if L == R - 1:
return 2 if s[L] == s[R] else 1
p1 = process(s, L + 1, R - 1)
p2 = process(s, L + 1, R)
p3 = process(s, L, R - 1)
p4 = process(s, L + 1, R - 1) + 2 if s[L] == s[R] else 0
return max(p1, p2, p3, p4)
def lpsl2(s):
if not s:
return 0
N = len(s)
dp = [[0] * N for _ in range(N)]
# 先把对角线的base case 填好
dp[N - 1][N - 1] = 1
for L in range(N - 1):
dp[L][L] = 1
dp[L][L + 1] = 2 if s[L] == s[L + 1] else 1
# 可以斜对角线填dp表
# 也可以从下往上 从左往右填
for L in range(N - 3, -1, -1):
for R in range(L + 2, N):
# p1 = dp[L + 1][R - 1]
# p2 = dp[L + 1][R]
# p3 = dp[L][R - 1]
# p4 = dp[L + 1][R - 1] + 2 if s[L] == s[R] else 0
# dp[L][R] = max(p1, p2, p3, p4)
# 根据位置依赖关系, 可得知, p2 >= p1 p3 >= p1, 故p1情况可不考虑
p2 = dp[L + 1][R]
p3 = dp[L][R - 1]
p4 = dp[L + 1][R - 1] + 2 if s[L] == s[R] else 0
dp[L][R] = max(p2, p3, p4)
return dp[0][N - 1]
"""
其它思路:
可将 s 反转得 s' 求, s 与 s' 的最长公共子序列
求最长公共子序列方法详见 class19/Code04_LongestCommonSubsequence.py
"""
if __name__ == "__main__":
s = "abcba"
ans1 = lpsl1(s)
ans2 = lpsl2(s)
print(ans1, ans2)
| true |
f6476cbedad1a0e119a18a85692b3e9032552b2b | Python | lynnsey/Dragon_Fighter | /Dragon_Fighter.py | UTF-8 | 8,613 | 2.9375 | 3 | [] | no_license | """
Name of File: FinalProject_LynnseyOng.py
Name: Lynnsey Ong
Version : 1.0
Last edited: December 20, 2017
You collect as many coins as you can.
Biblio = https://www.allwallpaper.in/fr/very-cool-blue-sky-wallpaper-13659.html
http://www.pngmart.com/image/32887
http://object-survival-island.wikia.com/wiki/File:NUKEY.png
https://www.youtube.com/watch?v=2C4lFUpI_4U
https://www.youtube.com/watch?v=rPSx_cSPw_0
"""
import pygame
import random
pygame.mixer.init(44100, -16, 2, 2048)
#Constants
BLACK = ( 0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
THISTLE = (255, 225, 255)
SKYBLUE = (135, 206, 255)
PINK = (255, 192, 203)
PALEGREEN = (144, 238, 144)
ORANGERED = (255, 69, 0)
NAVY = (0 ,0, 128)
GOLD = (255, 215, 0)
CRIMSON = (220, 20, 60)
main_character_image = pygame.image.load("dragon_copy2.png")
background_image = pygame.image.load("sky.png")
bomb_image = pygame.image.load("bomb.png")
end_game = pygame.image.load("gameover1.png")
dead_mus = pygame.mixer.Sound("dragon_roar.ogg")
SCREEN_WIDTH = 750
SCREEN_HEIGHT = 425
size = (750, 425)
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Dragon_Fighter")
PLAYER_VELOCITY = 3
x_vel = 0
y_vel = 0
dead = 0
score = 0
DRAGON_SPEED = 5
dragon_xcoord = 320
dragon_ycoord = 390
dragon_xvel = 0
dragon_yvel = 0
coins_cord = []
bombs_cord = []
class Coin(pygame.sprite.Sprite):
def __init__(self):
super(Coin, self).__init__()
# Visual representation of Coin
self.image = pygame.Surface([10, 10])
self.image.set_colorkey(BLACK)
pygame.draw.circle(self.image, GOLD, [5, 5], 5, 0)
# Rectangle to represent position
self.rect = self.image.get_rect()
def reset(self):
self.rect.x = random.randrange(0, SCREEN_WIDTH)
self.rect.y = random.randrange(-1000, -10)
class Bomb(pygame.sprite.Sprite):
def __init__(self):
super(Bomb, self).__init__()
self.image = bomb_image
self.rect = self.image.get_rect()
def reset(self):
self.rect.x = random.randrange(0, SCREEN_WIDTH)
self.rect.y = random.randrange(-1000, -10)
class Player(pygame.sprite.Sprite):
def __init__(self, x, y):
super(Player, self).__init__()
self.image = main_character_image
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
self.xvel = 0
self.yvel = 0
def changevelocity(self, x, y):
""" Changes velocity of the player """
self.xvel += x
self.yvel += y
def update(self):
""" Updates the position of the Player according to velocity """
self.rect.x += self.xvel
self.rect.y += self.yvel
pygame.init()
word_font = pygame.font.SysFont("Bungee", 25)
word_font2 = pygame.font.SysFont("Bungee", 40)
word_font3 = pygame.font.SysFont("Bungee", 25)
screen = pygame.display.set_mode([SCREEN_WIDTH, SCREEN_HEIGHT])
# TODO: Create bad_block_list
# TODO: Create good_block_list
coin_list = pygame.sprite.Group()
bomb_list = pygame.sprite.Group()
# All sprites in app
all_sprites_list = pygame.sprite.Group()
for i in range(200):
coin = Coin()
# Random location for Coin
coin.rect.x = random.randrange(0, SCREEN_WIDTH)
coin.rect.y = random.randrange(-1000, -10)
# Add the block to the list of objects
coin_list.add(coin)
all_sprites_list.add(coin)
coins_cord.append([coin.rect.x, coin.rect.y])
for i in range(15):
bomb = Bomb()
bomb.rect.x = random.randrange(SCREEN_WIDTH)
bomb.rect.y = random.randrange(-1000, -10)
bomb_list.add(bomb)
all_sprites_list.add(bomb)
# TODO: Create instance of player class
player = Player(dragon_xcoord, dragon_ycoord)
all_sprites_list.add(player)
done = False
clock = pygame.time.Clock()
score = 0
pygame.mixer.music.load("Adeventuresong.mp3")
pygame.mixer.music.play(1, 0.0)
# -------- Main Program Loop -----------
while not done:
# TODO: Control character with keyboard
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
dragon_xvel -= DRAGON_SPEED
elif event.key == pygame.K_RIGHT:
dragon_xvel += DRAGON_SPEED
elif event.key == pygame.K_DOWN:
dragon_yvel += DRAGON_SPEED
elif event.key == pygame.K_UP:
dragon_yvel -= DRAGON_SPEED
if event.key == pygame.K_SPACE and dead >= 1:
dead = 0
score = 0
dead_mus.stop()
coin_list = pygame.sprite.Group()
all_sprites_list = pygame.sprite.Group()
bomb_list = pygame.sprite.Group()
for i in range(200):
coin = Coin()
# Random location for Coin
coin.rect.x = random.randrange(0, SCREEN_WIDTH)
coin.rect.y = random.randrange(-1000, -10)
# Add the block to the list of objects
coin_list.add(coin)
all_sprites_list.add(coin)
coins_cord.append([coin.rect.x, coin.rect.y])
for i in range(15):
bomb = Bomb()
bomb.rect.x = random.randrange(SCREEN_WIDTH)
bomb.rect.y = random.randrange(-1000, -10)
bomb_list.add(bomb)
all_sprites_list.add(bomb)
pygame.mixer.music.load("Adeventuresong.mp3")
pygame.mixer.music.play(1, 0.0)
player.rect.x = 320
player.rect.y = 390
all_sprites_list.add(player)
if event.key == pygame.K_ESCAPE:
done = True
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
dragon_xvel = 0
elif event.key == pygame.K_UP or event.key == pygame.K_DOWN:
dragon_yvel = 0
#pos = pygame.mouse.get_pos()
dragon_xcoord += dragon_xvel
dragon_ycoord += dragon_yvel
#animation of the falling object:
for coin in coin_list:
coin.rect.y += 1
if coin.rect.y >= SCREEN_HEIGHT:
coin.rect.y = 0
coin.rect.x = random.randrange(SCREEN_WIDTH)
for bomb in bomb_list:
bomb.rect.y += 1
if bomb.rect.y >= SCREEN_HEIGHT:
bomb.rect.y = random.randrange(SCREEN_WIDTH)
# Clear the screen
screen.blit(background_image, (0,0))
#Sound at the background_image
Instruction = word_font.render("Collect as many coins as you can. Dodge the bombs to survive.", 3, BLACK)
screen.blit(Instruction, (5,5))
Instruction1 = word_font3.render("Bombs may appear out of no where. Good luck!", 3, BLACK)
screen.blit(Instruction1, (5, 20))
Score = word_font2.render("Score: " + str(score), 50, BLACK)
screen.blit(Score, (550,400))
# Get the current mouse position. This returns the position
# as a list of two numbers.
#pos = pygame.mouse.get_pos()
# Fetch the x and y out of the list,
# just like we'd fetch letters out of a string.
# Set the player object to the mouse location
player.rect.x = dragon_xcoord
player.rect.y = dragon_ycoord
# TODO: Check for good and bad collisions
# See if the player block has collided with anything.
blocks_hit_list = pygame.sprite.spritecollide(player, coin_list, True)
dead_hit_list = pygame.sprite.spritecollide(player, bomb_list, True)
# TODO: Update score - good collisions = score + 1
# TODO: Update score - bad collisions = score - 1
# Check the list of collisions.
for block in blocks_hit_list:
score += 1
if not dead >= 1:
print(score)
# Draw all the spites
all_sprites_list.draw(screen)
#for the dead screen to appear:
for bomb in dead_hit_list:
dead += 1
if dead >= 1:
pygame.mixer.music.stop()
screen.blit(end_game, (0,0))
dead_mus.play()
#So the dragon doesn't escape the screen:
if dragon_xcoord <= 5:
dragon_xcoord += 2
dragon_xvel = 0
elif dragon_xcoord >= 650:
dragon_xcoord -= 2
dragon_xvel = 0
elif dragon_ycoord <= 10:
dragon_ycoord += 2
dragon_yvel = 0
elif dragon_ycoord >= 370:
dragon_ycoord -= 2
dragon_yvel = 0
pygame.display.flip()
clock.tick(60)
pygame.quit()
| true |
908697be59347e0cb2fdf716c2df9ee52da849eb | Python | stijnstroeve/advent-of-code-python | /2015/day2/day2-2.py | UTF-8 | 1,178 | 3.53125 | 4 | [] | no_license |
def read_file(path):
with open(path) as file:
lines = file.readlines()
file.close()
return [line.strip() for line in lines]
inputs = read_file('inputs.txt')
def calculate_paper(all_dimensions):
summed = 0
for dimensions in all_dimensions:
split = dimensions.split('x')
l = int(split[0])
w = int(split[1])
h = int(split[2])
calculated = 2*l*w + 2*w*h + 2*h*l
slack = min([l*w, w*h, h*l])
total = calculated + slack
summed += total
return summed
def calculate_ribbons(all_dimensions):
summed = 0
for dimensions in all_dimensions:
split = dimensions.split('x')
l = int(split[0])
w = int(split[1])
h = int(split[2])
sorted_dims = sorted((l, w, h))
smallest = sorted_dims[0]
smallest2 = sorted_dims[1]
present = smallest * 2 + smallest2 * 2
bow = l * w * h
total = present + bow
summed += total
return summed
print(f'The sum of all dimensions of paper is: {calculate_paper(inputs)}')
print(f'The sum of all dimensions of ribbons is: {calculate_ribbons(inputs)}') | true |
1e76a3eed624aa0e6ed3b4848a9765070e292568 | Python | Aasthaengg/IBMdataset | /Python_codes/p02571/s069051471.py | UTF-8 | 151 | 2.84375 | 3 | [] | no_license | S = input()
T = input()
def d(s,t):
return sum([1 if i!=j else 0 for i,j in zip(s,t)])
print(min([d(S[i:], T) for i in range(len(S)-len(T) + 1)]))
| true |
b7699c41f9687730eb14baced717064b1348e185 | Python | ShreeSub/CmyPlot | /src/plotting/utils/functions.py | UTF-8 | 956 | 3.109375 | 3 | [
"MIT"
] | permissive | def fetch_columns_options(data, table=False):
"""Handle creating column options based on the data
Parameters
----------
data: dict
data from stored dcc.Store component
table: bool (def. False)
Flag for returning table list
Returns
----------
options: list of dict
Options for each of the dropdowns in the form of
{'label': 'Example', 'value': 'example'}
"""
if table:
return [{"name": i, "id": i} for i in data[0]]
else:
return [{"label": i, "value": i} for i in data[0]]
def validate_store_data(data):
"""
Parameters
----------
data: dict
data from stored dcc.Store component
Returns
----------
data_in: bool
Determine if there is dataframe data in the data diction
"""
if data and "df" in data and data["df"] is not None:
return True
return False
| true |
cf7b2313a45bf74e125149ada3a0bcc9301d855c | Python | nate-opti/gettext-anywhere | /gettext_anywhere/handlers/aws.py | UTF-8 | 5,267 | 2.546875 | 3 | [] | no_license | import gettext
import logging
import os
from boto.s3 import connection as s3_conn
from boto.s3 import key as s3_key
from . import core
LANGUAGE = "LANGUAGE"
LANG = "LANG"
LC_MESSAGES = "LC_MESSAGES"
LC_ALL = "LC_ALL"
DEFAULT_ENVVARS = [
LANGUAGE,
LC_ALL,
LC_MESSAGES,
LANG
]
_default_localedir = os.path.join("locale")
logger = logging.getLogger(__name__)
class S3FileHandler(core.FileHandler):
"""
A custom file handler to search for and load translations files from an S3
bucket.
The bucket name is pass through via the "options" argument to the core
FileHandler's init function.
The `find` function mimics the default gettext `find` but searches for
file paths inside of an S3 bucket, rather than on the file system itself.
The `open` function simply sets a filename for usage in the `read` function.
The `read` function pulls file contents from an S3 bucket.
The `close` function just nulls out the filename set by `open`.
"""
def __init__(self, *args, **kwargs):
"""
Pulls bucket name and an optional "default_localedir" from the options
dictionary.
Initializes empty filename and connection variables.
"""
super(S3FileHandler, self).__init__(*args, **kwargs)
self._bucket_name = self._options["bucket_name"]
self._aws_access_key_id = self._options.get(
"aws_access_key_id",
os.environ.get("AWS_ACCESS_KEY_ID")
)
self._aws_secret_access_key = self._options.get(
"aws_secret_access_key",
os.environ.get("AWS_SECRET_ACCESS_KEY")
)
self._default_localedir = self._options.get(
"default_localedir",
_default_localedir
)
self._filename = None
self._connection = None
def _get_conn(self):
"""
Open an S3 connection and caches on this instance.
:return: the S3 connection.
"""
if self._connection is None:
self._connection = s3_conn.S3Connection(
aws_access_key_id=self._aws_access_key_id,
aws_secret_access_key=self._aws_secret_access_key
)
return self._connection
def find(self, localedir=None, languages=None, all=0):
"""
Mimic gettext.find almost exactly -- os.path.exists is replaced with
assembling an S3 key and checking for its existence instead.
:param localedir: an optional localedir where translations are found.
:param languages: which languages to search for.
:param all: whether or not to read all found files, or just the first.
:return: a list of file paths or a single file path in S3.
"""
conn = self._get_conn()
bucket = conn.get_bucket(self._bucket_name)
if localedir is None:
localedir = self._default_localedir
if languages is None:
languages = []
for envar in DEFAULT_ENVVARS:
val = os.environ.get(envar)
if val:
languages = val.split(":")
break
if "C" not in languages:
languages.append("C")
nelangs = []
for lang in languages:
for nelang in getattr(gettext, "_expand_lang")(lang):
if nelang not in nelangs:
nelangs.append(nelang)
result = [] if all else None
domain_mo = "%s.mo" % self._domain
for lang in nelangs:
if lang == "C":
break
mofile = os.path.join(
localedir,
lang,
LC_MESSAGES,
domain_mo
)
mofile_lp = os.path.join(
"locale-langpack",
lang,
LC_MESSAGES,
domain_mo
)
key = s3_key.Key(bucket=bucket, name=mofile)
if key.exists():
if all:
result.append(mofile)
else:
return mofile
key = s3_key.Key(bucket=bucket, name=mofile_lp)
if key.exists():
if all:
result.append(mofile_lp)
else:
return mofile_lp
return result
def open(self, filename):
"""
"Opens" a given S3 file.
This just sets the _filename variable for usage in `read`.
:param filename: the filename to 'open'.
"""
self._filename = filename
def read(self):
"""
Get an S3 connection and attempt to read the file.
If the file doesn't exist, return an iterable empty string and let
core gettext blow up as it would on a regular empty file.
:return: file contents or empty string if not found.
"""
conn = self._get_conn()
bucket = conn.get_bucket(self._bucket_name)
key = s3_key.Key(bucket=bucket, name=self._filename)
if key.exists():
return key.get_contents_as_string()
return ""
def close(self):
"""
Clear the _filename variable.
"""
self._filename = None
| true |
9e91e77c08178e552a7c6e0f4c0a403a1e953109 | Python | diegoshakan/sistemas_operacionais | /atv1.py | UTF-8 | 332 | 3 | 3 | [] | no_license | import signal
from time import sleep
def parar(x, y):
print('''Eu não morro com sig
tente com kill -9
Digite o comando ps -aux para me identificar.
Depois tente com o kill -9 e o meu PID''')
while True:
print('Alô mundo cruel, vou dormir por um segundo.')
sleep(1)
signal.signal(signal.SIGINT, parar) | true |
ad7d035bce1003e8d6627e7c18417a93befb4659 | Python | Chandrika-Saha/CNN | /ownnn.py | UTF-8 | 2,819 | 2.828125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 11 13:46:34 2018
@author: Chandrika
"""
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("data",one_hot = True)
n_nodes_hl1 = 1000
n_nodes_hl2 = 1000
n_nodes_hl3 = 1000
n_nodes_hl4 = 1000
n_classes = 10
batch_size = 100
x = tf.placeholder('float',[None,784])
y = tf.placeholder('float')
def neural_network_model(data):
hidden_1_layer = {'weights':tf.Variable(tf.random_normal([784,n_nodes_hl1])),'biases':tf.Variable(tf.random_normal([n_nodes_hl1]))}
hidden_2_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl1,n_nodes_hl2])),'biases':tf.Variable(tf.random_normal([n_nodes_hl2]))}
hidden_3_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl2,n_nodes_hl3])),'biases':tf.Variable(tf.random_normal([n_nodes_hl3]))}
hidden_4_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl3,n_nodes_hl4])),'biases':tf.Variable(tf.random_normal([n_nodes_hl4]))}
output_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl4,n_classes])),'biases':tf.Variable(tf.random_normal([n_classes]))}
l1 = tf.add(tf.matmul(data,hidden_1_layer['weights']),hidden_1_layer['biases'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1,hidden_2_layer['weights']),hidden_2_layer['biases'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2,hidden_3_layer['weights']),hidden_3_layer['biases'])
l3 = tf.nn.relu(l3)
l4 = tf.add(tf.matmul(l3,hidden_4_layer['weights']),hidden_4_layer['biases'])
l4 = tf.nn.relu(l4)
output = tf.matmul(l4,output_layer['weights'])+output_layer['biases']
return output
def train_neural_network(x):
prediction = neural_network_model(x)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction,labels=y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
hm_epoch = 30
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(hm_epoch):
epoch_loss = 0
for _ in range(int(mnist.train.num_examples/batch_size)):
x_epoch,y_epoch = mnist.train.next_batch(batch_size)
_,c = sess.run([optimizer,cost],feed_dict = {x:x_epoch,y:y_epoch})
epoch_loss += c
print('Epoch: ',epoch,'Completed out of :',hm_epoch,'Loss: ',epoch_loss)
correct = tf.equal(tf.argmax(prediction,1),tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct,'float'))*100
print("Accuracy: ", accuracy.eval({x:mnist.test.images,y:mnist.test.labels}))
train_neural_network(x)
| true |
630595ac9b40c72ce248ce193e9f235eb7561296 | Python | thebigbai/Deep-Learning-Code | /miniflow/miniflow.py | UTF-8 | 5,062 | 3.140625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 04 15:15:49 2019
@author: Y. Wang
"""
import numpy as np
class Neuron:
def __init__(self,inbound_neurons=[]):
#the list that this neuron recieves values
self.inbound_neurons=inbound_neurons
#the list that this neuron passes values
self.outbound_neurons=[]
# the value that this nueron will output
self.value=None
#gradients
self.gradients={}
for n in self.inbound_neurons:
n.outbound_neurons.append(self)
def forward(self):
raise NotImplemented
#return NotImplemented
def backward(self):
raise NotImplemented
class Input_Neuron(Neuron):
def __init__(self):
Neuron.__init__(self)
def forward(self,value=None):
if value is not None:
self.value=value
def backward(self):
self.gradients = {self: 0}
#input neuron do nothing
for n in self.outbound_neurons:
grad=n.gradients[self]
self.gradients[self]+=grad
class Add_Neuron(Neuron):
def __init__(self,*x):
Neuron.__init__(self,list(x))
def forward(self):
self.value=0
for i in self.inbound_neurons:
self.value+=i.value
class Mul_Neuron(Neuron):
def __init__(self,*x):
Neuron.__init__(self,list(x))
def forward(self):
self.value=1
for i in self.inbound_neurons:
self.value=self.value*i.value
# calc wx+b
class Linear_Neuron(Neuron):
def __init__(self,x,w,b):
Neuron.__init__(self,[x,w,b])
def forward(self):
x_vector=self.inbound_neurons[0].value
w_vector=self.inbound_neurons[1].value
b=self.inbound_neurons[2].value
"""
x is m by n matrix,the m is num of samples,
the n is the num of node in this layer
w is n by k matrix, the n is the num of node in this layer,
the k is the num of node in next layer
"""
assert x_vector.shape[1]==w_vector.shape[0]
Sum=np.dot(x_vector,w_vector)+b
self.value=Sum
def backward(self):
#init to 0
self.gradients={n: np.zeros_like(n.value) for n in self.inbound_neurons}
for n in self.outbound_neurons:
grad=n.gradients[self]
"""
L=WX+b
dL/dx=dW
dL/dx=dX
dL/db=1
obviously, grad.shape == L.shape
X.shape(n,m) where n is the num of node in this layer, m is sample nums
W.shape(m,k) where k is the num of node in next layer
So, L.shape==(n,k)
when backward, the matrix should keep the same as forward process
so (n,k) dot (k,m)--->the W need to be transposed
(m,n) dot (n,k) --->the X need to be transposed
"""
self.gradients[self.inbound_neurons[0]]+=np.dot(grad,self.inbound_neurons[1].value.T)
self.gradients[self.inbound_neurons[1]]+=np.dot(self.inbound_neurons[0].value.T,grad)
self.gradients[self.inbound_neurons[2]]+=np.sum(grad,axis=0,keepdims=False)
#personly think here shoule be divided by sample nums. using np.mean instead of np.sum
class Sigmoid_Neuron(Neuron):
def __init__(self,z):
Neuron.__init__(self,[z])
def sigmoid(self,z):
return 1./(1.+np.exp(-z))
def forward(self):
z=self.inbound_neurons[0].value
a=self.sigmoid(z)
self.value=a
def backward(self):
#init to 0
self.gradients={n: np.zeros_like(n.value) for n in self.inbound_neurons}
"""
sigmiod'(x)=sigmoid(x)(1-sigmoid(x))
"""
for n in self.outbound_neurons:
#grad is the gradient from backpropagation of next layer
grad=n.gradients[self]
self.gradients[self.inbound_neurons[0]]+=self.value*(1-self.value)*grad
#cost function
class MSE(Neuron):
def __init__(self,y,a):
Neuron.__init__(self,[y,a])
def forward(self):
#convert to col vector
y=self.inbound_neurons[0].value.reshape(-1,1)
a=self.inbound_neurons[1].value.reshape(-1,1)
self.m=y.shape[0]
assert y.shape==a.shape
self.diff=y-a
self.value=1./self.m*(np.sum(self.diff**2))
def backward(self):
"""
C=1/m*(y-a)**2
dC/dy=2/m*(y-a)
dC/da=-2/m(y-a)
"""
self.gradients = {n:np.zeros_like(n.value) for n in self.inbound_neurons}
self.gradients[self.inbound_neurons[0]]=(2./self.m)*self.diff
self.gradients[self.inbound_neurons[1]]=(-2./self.m)*self.diff
| true |
f83485c071fd77846c7a294ec9665c33822eab9f | Python | MouseOnTheKeys/small_projects | /DoubleHashing_method_in_Python.py | UTF-8 | 3,212 | 3.96875 | 4 | [
"MIT"
] | permissive | """
Student:
Nenad Bubalo 1060/19
12. April 2019
Podaci se smeštaju u heš tabelu sa 7 ulaza primenom
metode otvorenog adresiranja sa dvostrukim
heširanjem. Primarna heš funkcija je hp(K)=K mod 7, a
sekundarna heš funkcija je hs(K)=2 + (K mod 3).
Prikazati popunjavanje tabele ako redom dolaze
ključevi 45, 35, 17, 25, 18.
Napomena: Funkcija u slucuju kolizije: hi(K+1)= (hp(K)
+ hs(K)) mod n.
"""
class Element:
def __init__(self, kljuc, vrednost):
self.kljuc = kljuc
self.vrednost = vrednost
self.sledeci = None
class HashTabela:
def __init__(self, limit):
self.tabela = [None] * limit
self.limit = limit
def hp(self, kljuc):
return kljuc % self.limit
def hs(self, kljuc):
return 2 + (kljuc % 3)
# Ubacivanje elemenata sa resavanjem kolizije dvostrukim hesiranjem
def ubaci(self, kljuc, vrednost):
print('')
print('Postupak ubacivanja za element {}:'.format(kljuc))
broj_ulaza = self.hp(kljuc)
novi = Element(kljuc, vrednost)
if self.tabela[broj_ulaza] is None:
self.tabela[broj_ulaza] = novi
print('Element {} je na mestu {}'.format(str(novi.kljuc), str(broj_ulaza)))
else:
while self.tabela[broj_ulaza] is not None:
print("Desila se koalizija za element " + str(kljuc) + " na poziciji " + str(
broj_ulaza) + " trazi se nova pozicija.")
broj_ulaza = (broj_ulaza + self.hs(kljuc)) % self.limit
self.tabela[broj_ulaza] = novi
print('Nova pozicija za element {} je {}'.format(str(kljuc), str(broj_ulaza)))
def prikaz(self):
print('Hes tabela dvostrukim hesiranjem je: ')
for count, element in enumerate(self.tabela):
if element is not None:
print('Na poziciji {} je ({}, {})'.format(str(count), str(element.kljuc), str(element.vrednost)))
element = element.sledeci
else:
print('Na poziciji {} je None'.format(str(count)))
def dohvati(self, kljuc):
broj_ulaza = self.hesFunkcija(kljuc)
tekuci = self.tabela[broj_ulaza]
while tekuci is not None:
if tekuci.kljuc == kljuc:
return tekuci.vrednost
tekuci = tekuci.sledeci
return None
def izbaci(self, kljuc):
broj_ulaza = self.hesFunkcija(kljuc)
tekuci = self.tabela[broj_ulaza]
prethodni = None
while tekuci is not None:
if tekuci.kljuc == kljuc:
if prethodni is None:
self.tabela[broj_ulaza] = tekuci.sledeci
else:
prethodni.sledeci = tekuci.sledeci
prethodni = tekuci
tekuci = tekuci.sledeci
if __name__ == '__main__':
print('Domaci Zadatak 1:')
tabela = HashTabela(7)
kljucevi = [45, 35, 17, 25, 18]
podaci = ['Alpha', 'Beta', 'Gamma', 'Delta', 'Epsilon']
for i in range(5):
tabela.ubaci(kljucevi[i], podaci[i])
print('')
tabela.prikaz()
print('')
| true |
c7207d477c400fc48f91d1f529e6839af765eb2d | Python | justinlocsei/fibula | /ansible/filter_plugins/paths.py | UTF-8 | 1,191 | 3.453125 | 3 | [] | no_license | import os.path
import re
def trailing_slash_fs(value):
"""Ensure that a value ends with a single filesystem-appropriate slash.
Args:
value (str): A filesystem path
Returns:
str: The path with a trailing slash
"""
return _trailing_character(value, os.path.sep)
def trailing_slash_url(value):
"""Ensure that a URL ends with a trailing slash.
Args:
value (str): A URL
Returns:
str: The URL with a trailing slash
"""
return _trailing_character(value, '/')
def _trailing_character(value, terminator):
"""Ensure that a string ends with a single terminating character.
Args:
value (str): The value to modify
terminator (str): The terminal character to use
Returns:
str: A string that ends with the terminal character
"""
base_path = re.sub(r'%s+$' % re.escape(terminator), '', value)
return "%s%s" % (base_path, terminator)
class FilterModule(object):
"""Custom Jinja2 filters for working with paths."""
def filters(self):
return {
'trailing_slash_fs': trailing_slash_fs,
'trailing_slash_url': trailing_slash_url
}
| true |
dbe6dd4165e65595728b4dd15962e970f2f155d5 | Python | LuccaBiasoli/python-cola | /python_colas/lista2.py | UTF-8 | 191 | 3.3125 | 3 | [
"MIT"
] | permissive | l1 = [6,2,8]
l2 = [4,9,7]
newlist = l1 +l2
newlist.sort()
newlist.reverse()
newnew = int(input('Digite alguns numeros'))
newlist.append(newnew)
print(newlist)
newlist.sort()
print(newlist)
| true |
bcb2241bb252e05603fa2cd8a2387822b0462f4b | Python | RafalSkolasinski/simulation-codes-arxiv-1806.01815 | /codes/misc.py | UTF-8 | 6,542 | 3.078125 | 3 | [
"BSD-2-Clause"
] | permissive | # Function defined in this file serve miscellaneous purposes.
# See comments for each group of functions for more details.
import numpy as np
import xarray as xr
from itertools import product
from collections import Mapping, defaultdict
import sympy
import kwant
# Code below is to workaround "flood-fill" algorithm that does not
# fill systems with missing hoppings.
def discretize_with_hoppings(hamiltonian, coords=None, *, grid_spacing=1,
locals=None):
"""Discretize system and add zero-magnitude hoppings where required.
This is modification of the "kwant.continuum.discretize" function
that adds zero-magnitude hoppings in place of missing ones.
Please check "kwant.continuum.discretize" documentation for details.
"""
template = kwant.continuum.discretize(hamiltonian, coords,
grid_spacing=grid_spacing,
locals=locals)
syst = kwant.Builder(template.symmetry)
lat = template.lattice
syst[next(iter(template.sites()))] = np.zeros((lat.norbs, lat.norbs))
syst[lat.neighbors()] = np.zeros((lat.norbs, lat.norbs))
syst.update(template)
return syst
# Function defined in this section come from "kwant.continuum" module
# of Kwant and are currently a part of a non-public API.
# To avoid breakage with future releases, they are defined here.
def make_commutative(expr, *symbols):
"""Make sure that specified symbols are defined as commutative.
Parameters
----------
expr: sympy.Expr or sympy.Matrix
symbols: sequace of symbols
Set of symbols that are requiered to be commutative. It doesn't matter
of symbol is provided as commutative or not.
Returns
-------
input expression with all specified symbols changed to commutative.
"""
names = [s.name if not isinstance(s, str) else s for s in symbols]
symbols = [sympy.Symbol(name, commutative=False) for name in names]
expr = expr.subs({s: sympy.Symbol(s.name) for s in symbols})
return expr
def monomials(expr, gens=None):
"""Parse ``expr`` into monomials in the symbols in ``gens``.
Parameters
----------
expr: sympy.Expr or sympy.Matrix
Sympy expression to be parsed into monomials.
gens: sequence of sympy.Symbol objects or strings (optional)
Generators of monomials. If unset it will default to all
symbols used in ``expr``.
Returns
-------
dictionary (generator: monomial)
Example
-------
>>> expr = kwant.continuum.sympify("A * (x**2 + y) + B * x + C")
>>> monomials(expr, gens=('x', 'y'))
{1: C, x: B, x**2: A, y: A}
"""
if gens is None:
gens = expr.atoms(sympy.Symbol)
else:
gens = [kwant.continuum.sympify(g) for g in gens]
if not isinstance(expr, sympy.MatrixBase):
return _expression_monomials(expr, gens)
else:
output = defaultdict(lambda: sympy.zeros(*expr.shape))
for (i, j), e in np.ndenumerate(expr):
mons = _expression_monomials(e, gens)
for key, val in mons.items():
output[key][i, j] += val
return dict(output)
def _expression_monomials(expr, gens):
"""Parse ``expr`` into monomials in the symbols in ``gens``.
Parameters
----------
expr: sympy.Expr
Sympy expr to be parsed.
gens: sequence of sympy.Symbol
Generators of monomials.
Returns
-------
dictionary (generator: monomial)
"""
expr = sympy.expand(expr)
output = defaultdict(lambda: sympy.Integer(0))
for summand in expr.as_ordered_terms():
key = []
val = []
for factor in summand.as_ordered_factors():
symbol, exponent = factor.as_base_exp()
if symbol in gens:
key.append(factor)
else:
val.append(factor)
output[sympy.Mul(*key)] += sympy.Mul(*val)
return dict(output)
# Various helpers for handling simulation and data.
# This collection of functions helps to organize, combine and plot outputs
# of the simulation.
def reduce_dimensions(data):
"""Reduce dimensions of length equal to 1."""
sel = {k: data.coords[k].data[0]
for k, v in data.dims.items() if v==1}
data = data.sel(**sel)
return data
def dict_product(**parameters):
"""Compute Cartesian product of named sets."""
output = [{k: v for k, v in zip(parameters.keys(), x)}
for x in list(product(*parameters.values()))]
return output
def serialize_none(x):
"""Substitute None with its string representation."""
return str(x) if x is None else x
def to_xarray(coords, data_vars):
"""Represent single simulation as xarray DataSet."""
coords = {k: serialize_none(v) for k, v in coords.items()}
ds = xr.Dataset(data_vars, coords)
#Assign coordinates to dimensions that misses them
assignment = {dim: ds[dim] for dim in set(ds.dims) - set(ds.coords)}
ds = ds.assign_coords(**assignment)
return ds
def combine_datasets(sets, dims=None):
"""Combine datasets along specified dimension.
If "dims" is None it will default to all dimensions
that are not used as coordinates.
"""
ds = xr.concat(sets, dim='internal', coords='all')
if dims is None:
dims = list(set(ds.coords) - set(ds.dims))
ds = ds.set_index(internal=list(dims))
ds = ds.unstack('internal')
return ds
def iterate(dataset, dims):
"""Iterate over all xarray dimensions except specified ones.
Parameters
----------
dataset : xarray Dataset
Input dataset
dims : sequence of strings
Dimension to exclude from iteration
Returns
-------
names : sequence of strings
Names of coordinates that are being iterated.
iterator : iterator over (key, par, val)
key : sequence of values in the same order as returned by "names"
val : subset of dataset corresponding to iteration
"""
names = [p for p in list(dataset.dims) if p not in dims]
stacked = dataset.stack(internal_iterator=names)
stacked = stacked.transpose('internal_iterator', *dims)
def iterator():
for i, p in enumerate(stacked.internal_iterator):
key = p.data.tolist()
val = stacked.sel(internal_iterator=p).drop('internal_iterator')
val = val.assign_coords(**dict(zip(names, key)))
yield (key, val)
return names, iterator()
| true |
913e4e49cf64b9127ff96c131d44d80aea69ae85 | Python | rishabhostwal7/Python | /packageML01/Practical19.py | UTF-8 | 514 | 3.84375 | 4 | [] | no_license | # Rule-1: Lists are Mutable ie it can be changed
'''a = ["Noida", "Delhi", "Lucknow", "Goa", "Kanpur"]
a[3] = "UK"
print(a)
a.append("Patna")
print(a)
del a[1]
print(a)
a.insert(2,"Varanasi")
print(a)
'''
#Rule-2: Tuple is immutable
tu = ("Python", "Java", "J2ee", "Android", "Hadoop")
print(type(tu), tu)
#tu[3] = "RDBMS" #This line is Error
print("tu.index('Android') = ", tu.index("Android") )
print("'Hadoop' in tu = ", "Hadoop" in tu)
print("'Crysta' in tu = ", "Crysta" in tu)
| true |
bd8e8709183af09f66083d9c52da36242406985f | Python | alexviil/progeprojekt | /project/Menu.py | UTF-8 | 15,171 | 2.90625 | 3 | [] | no_license | import pygame as pg
import constants as const
import Draw, Button, Actor, Slider
class Menu:
def __init__(self, main_surface, player, clock, items, buffs, music_volume, effect_volume):
self.main_surface = main_surface
self.player = player
self.clock = clock
self.items = items
self.inventory_surface = pg.Surface((const.INV_MENU_WIDTH, const.INV_MENU_HEIGHT))
self.draw = Draw.Draw(self.main_surface)
self.draw_inv = Draw.Draw(self.inventory_surface)
self.buffs = buffs
self.music_volume = music_volume
self.effect_volume = effect_volume
def menu_main(self):
play_button = Button.Button(self.main_surface, "CONTINUE", (200, 100),
(const.MAIN_SURFACE_WIDTH // 2 - 330, const.MAIN_SURFACE_HEIGHT // 2+50))
new_game_button = Button.Button(self.main_surface, "NEW GAME", (200, 100),
(const.MAIN_SURFACE_WIDTH // 2 - 110, const.MAIN_SURFACE_HEIGHT // 2+50))
settings_button = Button.Button(self.main_surface, "SETTINGS", (200, 100),
(const.MAIN_SURFACE_WIDTH // 2 + 110, const.MAIN_SURFACE_HEIGHT // 2+50))
exit_button = Button.Button(self.main_surface, "EXIT", (200, 100),
(const.MAIN_SURFACE_WIDTH // 2 + 330, const.MAIN_SURFACE_HEIGHT // 2+50))
self.music = pg.mixer.Sound(const.MENU_MUSIC)
self.music.set_volume(self.music_volume)
self.music.play(-1)
menu_open = True
while menu_open:
self.main_surface.blit(pg.image.load("background.png"), (0, 0))
play_button.draw()
new_game_button.draw()
settings_button.draw()
exit_button.draw()
events = pg.event.get()
mouse = pg.mouse.get_pos()
input = (events, mouse)
for event in events:
if event.type == pg.QUIT:
pg.quit()
exit()
if play_button.update(input):
self.music.stop()
return "CONTINUE"
if new_game_button.update(input):
self.music.stop()
return "NEW_GAME"
if settings_button.update(input):
self.settings_menu(self.music)
if exit_button.update(input):
pg.quit()
exit()
font_height = self.draw.get_font_height(const.FONT_CONSOLE)
self.draw.draw_text("Music by Hendy Marvin", 5, const.MAIN_SURFACE_HEIGHT-font_height-5, const.GRAY,
const.FONT_CONSOLE)
self.draw.draw_text("Art by 0x72", const.MAIN_SURFACE_WIDTH - 132, const.MAIN_SURFACE_HEIGHT-font_height-5, const.GRAY,
const.FONT_CONSOLE)
pg.display.update()
def settings_menu(self, music):
sett_surf = pg.Surface((const.SETTINGS_MENU_WIDTH, const.SETTINGS_MENU_HEIGHT))
exit_button = Button.Button(self.main_surface, "EXIT", (100, 50),
(const.MAIN_SURFACE_WIDTH // 2, const.MAIN_SURFACE_HEIGHT // 2 + 100))
music_slider = Slider.Slider(self.main_surface, "MUSIC", (400, 7), music.get_volume(),
(const.MAIN_SURFACE_WIDTH // 2, const.MAIN_SURFACE_HEIGHT // 2 - 60))
effect_slider = Slider.Slider(self.main_surface, "SOUND EFFECTS", (400, 7), self.effect_volume,
(const.MAIN_SURFACE_WIDTH // 2, const.MAIN_SURFACE_HEIGHT // 2 + 15))
close = False
while not close:
events = pg.event.get()
mouse = pg.mouse.get_pos()
input = (events, mouse)
sett_surf.fill(const.BLACK)
self.main_surface.blit(sett_surf, (const.MAIN_SURFACE_WIDTH // 2 - const.SETTINGS_MENU_WIDTH // 2,
const.MAIN_SURFACE_HEIGHT // 2 - const.SETTINGS_MENU_HEIGHT // 2))
exit_button.draw()
music_slider.draw()
effect_slider.draw()
for event in events:
if event.type == pg.QUIT:
pg.quit()
exit()
button_held = music_slider.update(input)
while button_held:
events = pg.event.get()
mouse = pg.mouse.get_pos()
music.set_volume((music_slider.rect.centerx-400)/400)
self.music_volume = (music_slider.rect.centerx-400)/400
self.main_surface.blit(sett_surf, (const.MAIN_SURFACE_WIDTH // 2 - const.SETTINGS_MENU_WIDTH // 2,
const.MAIN_SURFACE_HEIGHT // 2 - const.SETTINGS_MENU_HEIGHT // 2))
if 400 <= mouse[0] <= 800:
music_slider.rect.centerx = mouse[0]
elif mouse[0] < 400:
music_slider.rect.centerx = 400
elif mouse[0] > 800:
music_slider.rect.centerx = 800
music_slider.update(input)
exit_button.draw()
music_slider.draw()
effect_slider.draw()
pg.display.update()
for event in events:
if event.type == pg.MOUSEBUTTONUP:
button_held = False
button_held = effect_slider.update(input)
while button_held:
events = pg.event.get()
mouse = pg.mouse.get_pos()
self.effect_volume = (effect_slider.rect.centerx-400)/400
self.main_surface.blit(sett_surf, (const.MAIN_SURFACE_WIDTH // 2 - const.SETTINGS_MENU_WIDTH // 2,
const.MAIN_SURFACE_HEIGHT // 2 - const.SETTINGS_MENU_HEIGHT // 2))
if 400 <= mouse[0] <= 800:
effect_slider.rect.centerx = mouse[0]
elif mouse[0] < 400:
effect_slider.rect.centerx = 400
elif mouse[0] > 800:
effect_slider.rect.centerx = 800
music_slider.draw()
effect_slider.update(input)
exit_button.draw()
effect_slider.draw()
pg.display.update()
for event in events:
if event.type == pg.MOUSEBUTTONUP:
button_held = False
if exit_button.update(input):
close = True
self.clock.tick(const.FPS_LIMIT)
pg.display.update()
def inventory_menu(self):
close = False
text_height = self.draw_inv.get_font_height(const.FONT_INVENTORY)
while not close:
self.inventory_surface.fill(const.DARK_GRAY)
events = pg.event.get()
if self.player.selection > len(self.player.inventory) - 1:
current_index = self.player.selection = 0
else:
current_index = self.player.selection
# Close inventory if press I
for event in events:
if event.type == pg.KEYDOWN:
if event.key == pg.K_i:
close = True
if event.key == pg.K_s:
self.player.next_selection()
current_index = self.player.selection
if event.key == pg.K_w:
self.player.prev_selection()
current_index = self.player.selection
if event.key == pg.K_f:
if not self.player.inventory:
self.player.messages.append("You have no items to drop")
else:
item_here = False
for item in self.items:
if item.get_location() == self.player.get_location():
self.player.messages.append("There is already an item here.")
item_here = True
break
if not item_here:
self.player.messages.append("Dropped " + self.player.inventory[current_index].name + ".")
self.player.inventory[current_index].drop(self.player, self.items)
elif event.key == pg.K_e:
if not self.player.inventory:
self.player.messages.append("You have no items noob")
else:
if self.player.inventory and not self.player.equipped and isinstance(self.player.inventory[current_index], Actor.Equipable):
self.player.equip(self.player.inventory[current_index])
elif isinstance(self.player.equipped, Actor.Equipable) and not isinstance(self.player.inventory[current_index], Actor.Consumable):
self.player.messages.append("You already have something equipped.")
elif self.player.inventory:
self.player.consume(self.player.inventory[current_index], self.buffs, self.effect_volume)
elif event.key == pg.K_r:
if self.player.equipped is not None:
self.player.unequip(self.player.equipped)
else:
self.player.messages.append("You have nothing equipped.")
# Display list of items
for i, item in enumerate(self.player.inventory):
if i == current_index:
self.draw_inv.draw_text(item.name, 0, 0 + (i * text_height), const.BLACK, const.FONT_INVENTORY,
False, const.WHITE)
else:
self.draw_inv.draw_text(item.name, 0, 0 + (i * text_height), const.WHITE, const.FONT_INVENTORY)
self.main_surface.blit(self.inventory_surface, (0, const.MAIN_SURFACE_HEIGHT // 2 - const.INV_MENU_HEIGHT // 2))
console_surf = pg.Surface((720, text_height*5))
console_surf.fill(const.BLACK)
self.main_surface.blit(console_surf, (0, 4+const.MAIN_SURFACE_HEIGHT-text_height*5))
self.draw.draw_text("w - previous item", 2, const.MAIN_SURFACE_HEIGHT // 2+70-text_height,
const.WHITE, const.FONT_INVENTORY)
self.draw.draw_text("s - next item", 2, const.MAIN_SURFACE_HEIGHT//2+70,
const.WHITE, const.FONT_INVENTORY)
self.draw.draw_text("e - use/equip item", 2, const.MAIN_SURFACE_HEIGHT // 2+text_height+70,
const.WHITE, const.FONT_INVENTORY)
self.draw.draw_text("r - unequip item", 2, const.MAIN_SURFACE_HEIGHT // 2+text_height*2+70,
const.WHITE, const.FONT_INVENTORY)
self.draw.draw_text("f - drop item", 2, const.MAIN_SURFACE_HEIGHT // 2+text_height*3+70,
const.WHITE, const.FONT_INVENTORY)
self.draw.draw_text("spacebar - cast spell", 2, const.MAIN_SURFACE_HEIGHT // 2+text_height*4+70,
const.WHITE, const.FONT_INVENTORY)
self.draw.draw_console_messages(self.player.messages, const.FONT_CONSOLE)
hud_surf = pg.Surface((320, 240))
hud_surf.fill(const.BLACK)
self.main_surface.blit(hud_surf, (0, 0))
self.player.draw_hud()
self.clock.tick(const.FPS_LIMIT)
pg.display.update()
def esc_menu(self):
esc_surf = pg.Surface((const.ESC_MENU_WIDTH, const.ESC_MENU_HEIGHT))
menu_button = Button.Button(self.main_surface, "MAIN MENU", (200, 100),
(const.MAIN_SURFACE_WIDTH // 2 - 220, const.MAIN_SURFACE_HEIGHT // 2))
settings_button = Button.Button(self.main_surface, "SETTINGS", (200, 100),
(const.MAIN_SURFACE_WIDTH // 2, const.MAIN_SURFACE_HEIGHT // 2))
close_button = Button.Button(self.main_surface, "EXIT GAME", (200, 100),
(const.MAIN_SURFACE_WIDTH // 2 + 220, const.MAIN_SURFACE_HEIGHT // 2))
menu_open = True
while menu_open:
esc_surf.fill(const.DARK_GRAY)
self.main_surface.blit(esc_surf, (const.MAIN_SURFACE_WIDTH // 2 - const.ESC_MENU_WIDTH // 2,
const.MAIN_SURFACE_HEIGHT // 2 - const.ESC_MENU_HEIGHT // 2))
menu_button.draw()
settings_button.draw()
close_button.draw()
events = pg.event.get()
mouse = pg.mouse.get_pos()
input = (events, mouse)
for event in events:
if event.type == pg.QUIT:
return "EXIT"
elif event.type == pg.KEYDOWN and event.key == pg.K_ESCAPE:
menu_open = False
if menu_button.update(input):
return "MAIN_MENU"
if settings_button.update(input):
return "SETTINGS"
if close_button.update(input):
return "EXIT"
pg.display.update()
def death_screen_menu(self, floor):
death_surface = pg.Surface((const.MAIN_SURFACE_WIDTH, 300))
menu_button = Button.Button(self.main_surface, "MAIN MENU", (200, 100),
(const.MAIN_SURFACE_WIDTH // 2, const.MAIN_SURFACE_HEIGHT // 2+220))
self.music = pg.mixer.Sound(const.DEATH_MUSIC)
self.music.set_volume(self.music_volume)
self.music.play(-1)
menu_open = True
while menu_open:
death_surface.fill(const.DARK_GRAY)
self.main_surface.blit(death_surface, (0, const.MAIN_SURFACE_HEIGHT//2-150))
self.draw.draw_text("YOU DIED", const.MAIN_SURFACE_WIDTH // 2, const.MAIN_SURFACE_HEIGHT // 2, const.RED, const.FONT_DEATH_MESSAGE, center=True)
if floor == 1:
self.draw.draw_text("Couldn't get past the first floor :)", const.MAIN_SURFACE_WIDTH // 2, const.MAIN_SURFACE_HEIGHT // 2+115, const.RED,
const.FONT_CONSOLE, center=True)
else:
self.draw.draw_text("You made it to floor " + str(floor), const.MAIN_SURFACE_WIDTH // 2,
const.MAIN_SURFACE_HEIGHT // 2 + 115, const.RED,
const.FONT_CONSOLE, center=True)
menu_button.draw()
events = pg.event.get()
mouse = pg.mouse.get_pos()
input = (events, mouse)
for event in events:
if event.type == pg.QUIT:
return "EXIT"
if menu_button.update(input):
self.music.stop()
return "MAIN_MENU"
pg.display.update()
| true |
8cda5347688dde9e44a798b3a65696c4e9258013 | Python | Robo4al/robot | /test.py | UTF-8 | 325 | 2.59375 | 3 | [] | no_license | from time import sleep
from machine import Pin, PWM
print('eita')
IN1=18
IN2=19
IN3=22
IN4=23
led = Pin(2, Pin.OUT)
in1 = Pin(IN1, Pin.OUT)
in2 = Pin(IN2, Pin.OUT)
in3 = Pin(IN3, Pin.OUT)
in4 = Pin(IN4, Pin.OUT)
in1.value(0)
in2.value(1)
in3.value(1)
in4.value(0)
while True:
sleep(1)
led.value(not led.value()) | true |
0234d10f73f8d4321e3bfbaede24478ef8388ce5 | Python | h-mayorquin/mnist_deep_neural_network_BPNNs | /multidimensional_scaling.py | UTF-8 | 772 | 2.984375 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
from sklearn.manifold import MDS
# Load data
percentage = 0.1
percentage = ''
folder = './data/'
name = 'information_distances'
format = '.npy'
file_name = folder + name + str(percentage) + format
distances = np.load(file_name)
dimensions = np.arange(10, 100, 10)
stress_vector = np.zeros_like(dimensions)
for i, dim in enumerate(dimensions):
# Define classifier
n_comp = dim
max_iter = 3000
eps = 1e-9
mds = MDS(n_components=n_comp, max_iter=max_iter, eps=eps,
n_jobs=3, dissimilarity='precomputed')
x = mds.fit(distances)
stress = x.stress_
print 'The stress is', stress
stress_vector[i] = stress
# Plot Here
plt.plot(dimensions, stress_vector, '*')
plt.show()
| true |
debac8631324fb8cfa6783abdace478dcf91ea35 | Python | leocnj/Video-Expression-Recognition | /face_emotion_data_analysis.py | UTF-8 | 1,510 | 3.03125 | 3 | [] | no_license | import os
import matplotlib.pyplot as plt
import pandas as pd
# Extracting the CSV file as pandas dataframe:
df = pd.read_csv("./Extracted_frames/result.csv", index_col=0)
# Adding time information:
time_temp = df["frame_name"].str.split('-').tolist()
time_temp = [x[1] for x in time_temp]
time_temp = [int(x.split('.')[0]) * 0.5 for x in time_temp]
df["Time"] = time_temp
# Selecting the cluster/unique face and refining the results:
os.chdir("output")
list_all_clusters = next(os.walk('.'))[1]
print("List of All clusters:")
print(list_all_clusters)
flag = True
while (flag == True):
input = int(input("For which cluster do you want the emotion?(input the number alone)"))
if (input <= len(list_all_clusters)):
flag = False
os.chdir(("pic" + str(input)))
frames = next(os.walk('.'))[-1]
df2 = df[df['face_name'].isin(frames)]
# Extracting only the necessary columns:
df2 = df2.sort(columns="frame_name")
df2.reset_index()
df3 = df2[["Anger", "Disgust", "Fear", "Happy", "Sad", "Surprise", "Neutral"]].copy()
df3 = df3.reset_index()
del df3["index"]
dominant_emotions = list(df3.idxmax(axis=1))
print(dominant_emotions)
# Zeroing the other emotions:
df4 = df3.copy()
emotion_list = ["Anger", "Disgust", "Fear", "Happy", "Sad", "Surprise", "Neutral"]
for i in range(0, len(dominant_emotions)):
df4.xs(i)[emotion_list] = 0
df4.xs(i)[dominant_emotions[i]] = df3.xs(i)[dominant_emotions[i]]
df4.plot(kind="area", x=df2["Time"], subplots=True, figsize=(6, 6), ylim=[0, 1])
plt.show()
| true |
5e5d0e934c48c488e694adacff2368a13fb81df8 | Python | mkhushi/ML_Models4StockPrediction | /ModelFactory/model_nn.py | UTF-8 | 5,157 | 2.578125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
'''
Created on Sun Oct 7 13:51:52 2018
@author: Thomas Brown
'''
## Importing the necessary packages
import numpy as np
import matplotlib.pyplot as plt
from keras import layers
from keras.models import Sequential
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from ModelFactory.model import Model, Configs
class NN(Model): # pylint: disable=too-many-instance-attributes
'''Artificial neural network model'''
def __init__(self, config, experiment):
super(NN, self).__init__(config, experiment)
self.x_train = None
self.x_test = None
self.y_train = None
self.y_test = None
self.y_train_list = None
self.y_test_list = None
def set_params(self):
'''prepares the hypermarameters to be used by the model'''
if self.config is Configs.Config1:
self.model_params = {
'optimizer': 'adadelta',
'epochs': 200,
'learning_rate' : 0.2,
'batch_size': 16,
'dropout' : 0.5
}
elif self.config is Configs.Config2:
self.model_params = {
'optimizer': 'adadelta',
'epochs': 200,
'learning_rate' : 0.2,
'batch_size': 500,
'dropout' : 0.5
}
else:
assert 0, "Bad Config creation: " + self.config.name
def reformat_target(self):
'''reformat target variable as float32'''
self.x_train, self.y_train = self.data_factory.get_train_data()
self.x_test, self.y_test = self.data_factory.get_test_data()
self.y_train_list = []
self.y_test_list = []
for entry in self.y_test:
if entry == 1:
self.y_test_list.append([1, 0, 0])
elif entry == -1:
self.y_test_list.append([0, 1, 0])
elif entry == 0:
self.y_test_list.append([0, 0, 1])
for entry in self.y_train:
if entry == 1:
self.y_train_list.append([1, 0, 0])
elif entry == -1:
self.y_train_list.append([0, 1, 0])
elif entry == 0:
self.y_train_list.append([0, 0, 1])
self.y_train_list = np.array(self.y_train_list).astype('float32')
self.y_test_list = np.array(self.y_test_list).astype('float32')
def train(self):
'''train on the dataset provided'''
self.set_params()
self.reformat_target()
## The Neural Network:
self.model = Sequential()
# Input - Layer
self.model.add(layers.Dense(64, activation='relu', input_shape=(self.x_train.shape[1], )))
# Hidden - Layers
self.model.add(layers.Dropout(self.model_params['dropout'], noise_shape=None, seed=None))
self.model.add(layers.Dense(48, activation='relu'))
self.model.add(layers.Dropout(self.model_params['dropout'], noise_shape=None, seed=None))
self.model.add(layers.Dense(24, activation='relu'))
self.model.add(layers.Dropout(self.model_params['dropout'], noise_shape=None, seed=None))
self.model.add(layers.Dense(12, activation='relu'))
self.model.add(layers.Dropout(self.model_params['dropout'], noise_shape=None, seed=None))
self.model.add(layers.Dense(6, activation='relu'))
# Output- Layer
self.model.add(layers.Dense(3, activation='softmax'))
self.model.summary()
# Compile the network with specified optimizer, loss and metrics.
self.model.compile(optimizer=self.model_params['optimizer'],
loss='categorical_crossentropy',
metrics=['accuracy'])
# Fitting the model
history = self.model.fit(self.x_train, self.y_train_list,
epochs=self.model_params['epochs'],
batch_size=self.model_params['batch_size'],
validation_data=(self.x_test, self.y_test_list))
print(history)
def test(self):
'''test and return confusion_matrix and classification_report'''
test_preds = self.model.predict(self.x_test)
#Classification Report
preds_list = []
target_list = []
for i in range(len(test_preds)):
preds_list.append(np.argmax(test_preds[i]))
for i in range(len(self.y_test_list)):
target_list.append(np.argmax(self.y_test_list[i]))
cr_nn = classification_report(target_list, preds_list)
## Confusion Matrix
cm_nn = confusion_matrix(target_list, preds_list)
plt.imshow(cm_nn, interpolation='nearest', cmap=plt.cm.Blues)
plt.title('Confusion Matrix')
plt.colorbar()
tick_marks = np.arange(3)
plt.xticks(tick_marks, ['buy', 'sell', 'hold'], rotation=45)
plt.yticks(tick_marks, ['buy', 'sell', 'hold'])
plt.ylabel('Target')
plt.xlabel('Predictions')
plt.tight_layout()
return cm_nn, cr_nn
| true |
5aa9312ff549774815a846196d902239e7f63d82 | Python | mladenangel/scripts | /pyLessions/less16.py | UTF-8 | 287 | 3.609375 | 4 | [] | no_license | class Print_ob:
entero = 1000
texto = 'LaLAla'
def imp(self,msg):
print(msg)
def imp_todo(self,entero,texto,msg):
print(msg,entero,texto)
x = Print_ob()
print(x.entero)
print(x.imp('HOLA'))
print(x.texto)
print(x.imp_todo('HOLA','ESTO','ES LOCURA'))
| true |
758798fb07a6412e4e2c276f84e6b6e0a928988a | Python | rabarar/PyQt-fbs-demo | /src/main/python/main.py | UTF-8 | 973 | 2.859375 | 3 | [] | no_license | from fbs_runtime.application_context import ApplicationContext
from PyQt5.QtWidgets import *
import sys
class AppContext(ApplicationContext): # 1. Subclass ApplicationContext
def on_button_clicked(self):
self.alert = QMessageBox()
self.alert.setText('You clicked the button!')
self.alert.exec_()
def run(self): # 2. Implement run()
self.window = QMainWindow()
self.window.setWindowTitle('Hello World!')
self.window.resize(250, 150)
self.button = QPushButton('Click')
self.button.clicked.connect(self.on_button_clicked)
self.button.show()
self.window.show()
return self.app.exec_() # 3. End run() with this line
if __name__ == '__main__':
appctxt = AppContext() # 4. Instantiate the subclass
exit_code = appctxt.run() # 5. Invoke run()
sys.exit(exit_code)
| true |
c59b172604c4340d5e00b53c114ba67d62167a2b | Python | JunLyu071326/Storage | /demo.py | UTF-8 | 4,975 | 4.3125 | 4 | [] | no_license |
def cube(num):
return num*num*num
print(cube(3))
is_male = False
is_tall = True
if is_male and is_tall:
print("You are a tall male.")
elif is_male and not(is_tall):
print("You are a short male")
else:
print("You are a female.")
def max_num(num1, num2, num3):
if num1 >= num2 and num1 >= num3:
return num1
elif num2 >= num1 and num2 >= num3:
return num2
else:
return num3
print(max_num(3, 4, 5))
#a calculator
num1 = float(input("Enter first number: "))
op = input("Enter operator: ")
num2 = float(input("Enter second number: "))
if op == "+":
print(num1 + num2)
elif op == "-":
print(num1 - num2)
elif op == "/":
print(num1 / num2)
elif op == "*":
print(num1*num2)
else:
print("Invalid.")
#dictionaries
month_conversion = {
"Jan": "January",
"Feb": "February",
"Mar": "March",
}
print(month_conversion["Jan"])
print(month_conversion.get("Luv", "Not a valid key"))
#while loop: "while" is a loop; "if" is not.
i = 1
while i <= 10:
print(i)
i = i + 1
print("Done with loop")
#build a guessing game
secret_word = "giraffe"
guess = ""
while guess != secret_word:
guess = input("Enter a word: ")
print("You win!")
#set a limit on number of trials
secret_word = "giraffe"
guess = ""
guess_count = 0
guess_limit = 3
out_of_guesses = False
while guess != secret_word and not(out_of_guesses):
if guess_count < guess_limit:
guess = input("Enter a word: ")
guess_count = guess_count + 1
else:
out_of_guesses = True
if out_of_guesses:
print("Out of guesses, you lose.")
else:
print("You win!")
#"for" loop
for letter in "Giraffe Academy":
print(letter)
friends = ["Jim", "Karen", "Kevin"]
for index in range(len(friends)):
print(friends[index])
#exponent function
def raise_to_power(base_num, power_num):
result = 1
for index in range(power_num):
result = result*base_num
return result
print(raise_to_power(3, 2))
#2d lists and nested loops
number_grid = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[0]
]
print(number_grid[0][2])
for row in number_grid:
for column in row:
print(column)
#Build a translator
#Giraffe Language
#vowels -> g
#e.g. dog -> dgg; cat -> cgt
def translate(phrase):
translation = ""
for letter in phrase:
if letter in "AEIOUaeiou":
if letter.isupper():
translation = translation + "G"
else:
translation = translation + "g"
else:
translation = translation + letter
return translation
print(translate(input("Enter a phrase: ")))
#Try & except
number = float(input("Enter a number: "))
print(number)
# if a string of letters is entered, error occurs.
try:
number = float(input("Enter a number: "))
print(number)
except:
print("Invalid input.")
#read files: allows you to read different files
open("2019 Payment Prep Form Lyu travel.docx", "r")
open("2019 Payment Prep Form Lyu travel.docx", "w")
open("2019 Payment Prep Form Lyu travel.docx", "a") # append or add at the end
file = open("2019 Payment Prep Form Lyu travel.docx","r")
print(file.readable())
print(file.read())
file.close()
#write to files
file = open("2019 Payment Prep Form Lyu travel.docx", "a")
file.write("\nToby - Human Resources")
file.close()
file = open("2019 Payment Prep Form Lyu travel.docx", "w")
file.write("\nToby - Human Resources") #overwrite everything in the file
file.close()
#python can be used to create html files
file = open("file.html", "w")
file.write("<p>This is HTML.</p>")
file.close()
#classes and objects
class student: #define a new class and attributes associated with it
def __init__(self, name, major, gpa, is_on_probation): #map out what attributes the class "student" should have
self.name = name
self.major = major
self.gpa = gpa
self.is_on_probation = is_on_probation
from demo import student
student1 = student("Jim", "Business", 3.1, False) #"student" object
print(student1.name)
#BUILD A MULTIPLE CHOICE QUESTION
question_prompts = [
"What color are apples?\n(a) Red/Green\n(b) Purple\n(c) Orange\n\n",
"What color are bananas?\n(a) Teal\n(b) Magenta\n(c) Yellow\n\n",
"What color are strawberries?\n(a) Red\n(b) Yellow\n(c) Orange\n\n"
]
#make a new py file, inside it:
class Question:
def __init__(self, prompt, answer):
self.prompt = prompt
self.answer = answer
#now in demo.py
from newfile import Question
questions = [
Question(question_prompts[0],"a"),
Question(question_prompts[1],"c"),
Question(question_prompts[2],"b"),
]
def run_test(questions):
score = 0
for question in questions:
answer = input(question.prompt)
if answer == question.answer
score += 1
print("You got " + str(score) + "/" + str(len(questions)) + "correct")
run_test(questions)
| true |
a895bd43b8ac8dac85541c93e0125d5c5f3f2aaa | Python | Riccellisp/AprendizagemAutomatica | /Projeto/misc/classifiers.py | UTF-8 | 2,032 | 2.59375 | 3 | [] | no_license | import pandas as pd
import numpy as np
from sklearn.metrics import classification_report
from sklearn.neighbors.nearest_centroid import NearestCentroid
import timeit
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
from sklearn.ensemble import AdaBoostClassifier
from sklearn.model_selection import ParameterGrid
from sklearn import metrics
def gridsearchAdaBoost(parametros,xTrain,yTrain,Xval,yVal):
f1Metric = []
par = []
for params in ParameterGrid(parametros):
clf = AdaBoostClassifier(n_estimators=params['n_estimators'],learning_rate=params['learning_rate'])
clf.fit(xTrain,yTrain)
pred = clf.predict(Xval)
f1 = metrics.f1_score(yVal, pred,average='weighted')
print(f1)
f1Metric.append(f1)
par.append(params)
return dict([par,f1Metric])
caminho = '/media/riccelli/1CBBB19519DBB1D2/Bruno/Doutorado/Disciplinas/Aprendizagem Automatica/CIC2017PreProcBinary.csv'
df = pd.read_csv(caminho,low_memory=True)
X = np.array(df.iloc[:,0:97])
Y = np.array(df.iloc[:,-1])
del df
skf = StratifiedKFold(n_splits=10)
skf.get_n_splits(X, Y)
print(skf)
val_perc = 0.2
indexesPerFold = []
for train_index, test_index in skf.split(X, Y):
X_train, X_test = X[train_index,:], X[test_index,:]
y_train, y_test = Y[train_index], Y[test_index]
X_trainDivided, X_val, y_trainDivided, y_val = train_test_split(X_train, y_train, test_size = val_perc,stratify=y_train)
indexesPerFold.append(train_index)
parametros = {'n_estimators':[1, 5, 10],'learning_rate':[0.1, 1, 2]}
resultGrid = gridsearchAdaBoost(parametros,X_trainDivided,y_trainDivided,X_val,y_val)
#for params in ParameterGrid(parametros):
# clf = AdaBoostClassifier(n_estimators=params['n_estimators'],learning_rate=params['learning_rate'])
# clf.fit(X_trainDivided,y_trainDivided)
# pred = clf.predict(X_val)
# f1 = metrics.f1_score(y_val, pred,average='weighted')
#f1Metric.append(f1)
| true |
9073ba3ea36cd8281406746cce6a15ee9b3a7869 | Python | erickmiller/AutomatousSourceCode | /AutonomousSourceCode/data/raw/squareroot/17380385-8f9c-4667-b107-d2025eadfe64__chizer.py | UTF-8 | 551 | 2.9375 | 3 | [] | no_license | def are_chunks_encrypted(fname):
"""
calculate chi square root for each chunks and judge specified file is
encrypted or not.
:param str fname: source file name to be calculated.
:return: 0 if chunks are not encrypted , others if not.
"""
pass
def is_file_encrypted(fname):
"""
calculate chi square root for the specified file and judge specified file
is encrypted or not.
:param str fname: source file name to be calculated.
:return: 0 if file is not encrypted , others if not.
"""
pass
| true |
8d723dffa096dfc381df7638987fa9745472b64a | Python | ChileanVirtualObservatory/vo.chivo.cl | /dm/sil.py | UTF-8 | 6,376 | 2.578125 | 3 | [] | no_license | """
SIL, the Simple Instance Language, is an attempt to allow
data model instances written in a simple, JSON-like language.
"""
import re
from gavo import utils
from gavo.dm import common
# sentinels for further processing
class Atom(unicode):
"""a sentinel class for atomic values of roles
"""
noQuotesOkRE = re.compile("[\w_.]+$")
def asSIL(self):
if self.noQuotesOkRE.match(self):
return unicode(self)
else:
return '"%s"'%(self.replace('"', '""'))
def __repr__(self):
return "a"+unicode.__repr__(self).lstrip("u")
class Reference(unicode):
"""a sentinel class for roles referencing something else.
"""
def asSIL(self):
return "@%s"%self
# parse methods, used by getGrammar, by nonterminal name there
def _pa_attributeDef(s, p, toks):
return ("attr", toks[0], toks[2])
def _pa_typeAnnotation(s, p, toks):
return toks[1]
def _pa_collection(s, p, toks):
if len(toks)==1:
# no explicit type annotation; we return None as type.
return ("coll", None, toks[0])
else:
return ("coll", toks[0], toks[1])
def _pa_obj(s, p, toks):
return ("obj", toks[0], toks[1][2])
def _pa_objectBody(s, p, toks):
return ("uobj", None, toks[1].asList())
def _pa_sequenceBody(s, p, toks):
return [toks[1].asList()]
def _pa_reference(s, p, toks):
return Reference(toks[1])
def _pa_simpleImmediate(s, p, toks):
return Atom(toks[0])
class getGrammar(utils.CachedResource):
"""returns a grammar for parsing a SIL object description.
"""
@classmethod
def impl(cls):
from gavo.imp.pyparsing import (Word, Literal, alphas, alphanums,
QuotedString, Forward, ZeroOrMore, Group, Optional)
with utils.pyparsingWhitechars("\t\n\r "):
qualifiedIdentifier = Word(alphas+"_:", alphanums+"-._:")
plainIdentifier = Word(alphas+"_", alphanums+"-._")
externalIdentifier = Word(alphas+"_", alphanums+"._/#-")
plainLiteral = Word(alphanums+"_-.")
quotedLiteral = QuotedString(quoteChar='"', escQuote='""')
reference = Literal('@') + externalIdentifier
complexImmediate = Forward()
simpleImmediate = plainLiteral | quotedLiteral
value = reference | complexImmediate | simpleImmediate
attributeDef = (plainIdentifier
+ Literal(":")
+ value)
typeAnnotation = (Literal('(')
+ qualifiedIdentifier
+ Literal(')'))
objectBody = (Literal('{')
+ Group(ZeroOrMore( attributeDef ))
+ Literal('}'))
obj = typeAnnotation + objectBody
sequenceBody = (Literal('[')
+ Group(ZeroOrMore(value | objectBody))
+ Literal(']'))
collection = Optional(typeAnnotation) + sequenceBody
complexImmediate << ( obj | collection )
for n, func in globals().iteritems():
if n.startswith("_pa_"):
locals()[n[4:]].setParseAction(func)
cls.symbols = locals()
return obj
@classmethod
def enableDebuggingOutput(cls):
"""(not user-servicable)
"""
from gavo.imp.pyparsing import ParserElement
for name, sym in cls.symbols.iteritems():
if isinstance(sym, ParserElement):
sym.setDebug(True)
sym.setName(name)
def _iterAttrs(node, seqType, roleName):
"""generates parse events for nodes with attribute children.
(see _parseTreeToEvents).
"""
for child in node[2]:
assert child[0]=='attr'
if isinstance(child[2], (Reference, Atom)):
yield ('attr', child[1], child[2])
elif isinstance(child[2], tuple):
for grandchild in _parseTreeToEvents(child[2], roleName=child[1]):
yield grandchild
else:
assert False, "Bad object as parsed value: %s"%repr(child[2])
def _iterObjs(node, seqType, roleName):
for child in node[2]:
if isinstance(child, (Reference, Atom)):
yield ('item', child, None)
else:
# complex child -- yield events
assert child[0]=='uobj'
for grandchild in _parseTreeToEvents(child, seqType=seqType,
roleName=roleName):
yield grandchild
_PARSER_EVENT_MAPPING = {
# -> (iterparse ev name, type source, child parser)
'obj': ('obj', 'fromNode', _iterAttrs),
'uobj': ('obj', 'seqType', _iterAttrs),
'coll': ('coll', 'fromNode', _iterObjs)
}
def _parseTreeToEvents(node, seqType=None, roleName=None):
"""helps iterparse by interpreting the parser events in evStream.
"""
opener, typeSource, childParser = _PARSER_EVENT_MAPPING[node[0]]
if typeSource=='fromNode':
nodeType = node[1]
elif typeSource=='seqType':
nodeType = seqType
else:
assert False
yield (opener, roleName, nodeType)
for child in childParser(node, nodeType, roleName):
yield child
yield ('pop', None, None)
def iterparse(silLiteral):
"""yields parse events for a SIL literal in a string.
The parse events are triples of one of the forms:
* ('attr', roleName, value) add an attribute to the current annotation
* ('obj', roleName, type) create a new object object of type
* ('coll', type, None) create a new collection annotation (type can be None)
* ('item', val, None) add an atomic value to the current collection
* ('pop', None, None) finish current annotation and add it to its container
"""
root = getGrammar().parseString(silLiteral, parseAll=True)[0]
return _parseTreeToEvents(root)
def getAnnotation(silLiteral, annotationFactory):
"""returns an annotation object parsed from silLiteral.
annotationFactory is a callable that takes attributeName/attributeValue
pairs and returns annotations; attributeValue is either an Atom or
a Reference in these cases.
"""
obStack, result = [], None
for evType, arg1, arg2 in iterparse(silLiteral):
if evType=='obj':
obStack.append(common.ObjectAnnotation(arg1, arg2))
elif evType=='coll':
obStack.append(common.CollectionAnnotation(arg1, arg2))
elif evType=='pop':
newRole = obStack.pop()
if obStack:
obStack[-1].add(newRole)
else:
# we've just popped the total result. Make sure
# any furher operations fail.
del obStack
result = newRole
elif evType=='attr':
obStack[-1].add( #noflake: the del obStack up there is conditional
annotationFactory(arg1, arg2))
elif evType=='item':
collection = obStack[-1] #noflake: see above
assert isinstance(collection, common.CollectionAnnotation)
collection.add(annotationFactory(collection.name, arg1))
else:
assert False
assert result is not None
return result
if __name__=="__main__":
g = getGrammar()
getGrammar.enableDebuggingOutput()
res = g.parseString(
"""
(:testclass) {
seq: [a "b c d" @e]}""", parseAll=True)[0]
print res
| true |
529d84734a2eeddb28efad801530c9a38966e007 | Python | lxchavez/Yeezy-Taught-Me | /src/MSongsDB/Tasks_Demos/SQLite/demo_artist_similarity.py | UTF-8 | 4,336 | 3.03125 | 3 | [
"GPL-1.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain",
"CC-BY-NC-SA-2.0",
"LicenseRef-scancode-proprietary-license",
"MIT"
] | permissive | """
Thierry Bertin-Mahieux (2011) Columbia University
tb2332@columbia.edu
This code demo the use of the artist_similarity.db
To create the db, see create_artist_similarity_db.py
You should be able to download the db from the Million
Song Dataset website.
To view a more basic demo on SQLite, start with
demo_artist_term.py
This is part of the Million Song Dataset project from
LabROSA (Columbia University) and The Echo Nest.
Copyright 2011, Thierry Bertin-Mahieux
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import glob
import time
import datetime
import numpy as np
try:
import sqlite3
except ImportError:
print 'you need sqlite3 installed to use this program'
sys.exit(0)
def encode_string(s):
"""
Simple utility function to make sure a string is proper
to be used in a SQLite query
(different than posgtresql, no N to specify unicode)
EXAMPLE:
That's my boy! -> 'That''s my boy!'
"""
return "'"+s.replace("'","''")+"'"
def die_with_usage():
""" HELP MENU """
print 'demo_artist_similarity.py'
print ' by T. Bertin-Mahieux (2011) Columbia University'
print ' tb2332@columbia.edu'
print 'This codes gives examples on how to query the database artist_similarity.db'
print 'To first create this database, see: create_artist_similarity_db.py'
print 'Note that you should first check: demo_track_metadata.py if you are not'
print 'familiar with SQLite.'
print 'usage:'
print ' python demo_artist_similarity.py <database path>'
sys.exit(0)
if __name__ == '__main__':
# help menu
if len(sys.argv) < 2:
die_with_usage()
# params
dbfile = sys.argv[1]
# connect to the SQLite database
conn = sqlite3.connect(dbfile)
# from that connection, get a cursor to do queries
# NOTE: we could query directly from the connection object
c = conn.cursor()
print '*************** GENERAL SQLITE DEMO ***************************'
# list all tables in that dataset
# note that sqlite does the actual job when we call fetchall() or fetchone()
q = "SELECT name FROM sqlite_master WHERE type='table' ORDER BY name"
res = c.execute(q)
print "* tables contained in that SQLite file/database (there should be 3):"
print res.fetchall()
# list all indices
q = "SELECT name FROM sqlite_master WHERE type='index' ORDER BY name"
res = c.execute(q)
print '* indices in the database to make reads faster:'
print res.fetchall()
print '*************** ARTISTS TABLE DEMO ****************************'
# list all artist ID
q = "SELECT artist_id FROM artists"
res = c.execute(q)
print "* number of artist Echo Nest ID in 'artists' table:"
print len(res.fetchall())
print '*************** ARTIST SIMILARITY DEMO ************************'
# get a random similarity relationship
q = "SELECT target,similar FROM similarity LIMIT 1"
res = c.execute(q)
a,s = res.fetchone()
print '* one random similarity relationship (A->B means B similar to A):'
print a,'->',s
# count number of similar artist to a in previous call
q = "SELECT Count(similar) FROM similarity WHERE target="+encode_string(a)
res = c.execute(q)
print '* artist',a,'has that many similar artists in the dataset:'
print res.fetchone()[0]
# count number of artist s (c queries up) is similar to
q = "SELECT Count(target) FROM similarity WHERE similar="+encode_string(s)
res = c.execute(q)
print '* artist',s,'is similar to that many artists in the dataset:'
print res.fetchone()[0]
# DONE
# close cursor and connection
# (if for some reason you added stuff to the db or alter
# a table, you need to also do a conn.commit())
c.close()
conn.close()
| true |
4ddc27a531c760b70d06cce088282961ff0b8c2b | Python | Jerry-jy/ML-NIDS-SDN | /ml-traning/merge-data.py | UTF-8 | 657 | 2.640625 | 3 | [] | no_license | # merge and shuffle separated data files
import pandas as pd
from sklearn.utils import shuffle
types = ['icmpflood', 'ipsweep', 'normal', 'pingofdeath', 'portscan', 'tcpsynflood', 'udpflood']
flag = 3
if flag == 1 :
base1 = 'datatest'
base2 = 'datatest'
prefix = 'test_'
elif flag == 2 :
base1 = 'dataset'
base2 = 'dataset'
prefix = ''
dataset = []
for type in types:
data = pd.read_csv(base1 + '/' + prefix + type)
data = shuffle(data)
dataset.append(data)
final = pd.concat(dataset)
final.drop_duplicates(inplace=True)
final = shuffle(final)
final = shuffle(final)
final.to_csv(base1 +'/' + base2, index=False)
| true |
8bd443a387fae5bb766101a7ef4156361c4e2cd6 | Python | rbmanez/TTA-Python-Projects | /drillPyTkinter/guiDrill.py | UTF-8 | 1,503 | 3.375 | 3 | [] | no_license | # Drill:
# - you will need to write a script that creates a GUI
# Requirements:
# - Your script will need to use Python 3 and the Tkinter module.
# - Your script will need to re-create an exact copy of a GUI from the
# supplied image.
from tkinter import *
class ParentWindow(Frame):
def __init__(self, master):
Frame.__init__(self)
#creating the GUI window
self.master = master
self.master.title("Check Files")
self.master.geometry('530x170')
#creating button
self.btnBrowse1 = Button(self.master, text="Browse...", width=14)
self.btnBrowse1.grid(column=0, row=0, padx=(15, 0), pady=(40, 0))
self.btnBrowse2 = Button(self.master, text="Browse...", width=14)
self.btnBrowse2.grid(column=0, row=1, padx=(15, 0), pady=(10, 0))
self.btnCheck = Button(self.master, text="Check for files...", height=2, width=14)
self.btnCheck.grid(column=0, row=2, padx=(15, 0), pady=(10, 0))
self.btnClose = Button(self.master, text="Close Program", height=2, width=14)
self.btnClose.grid(column=1, row=2, sticky=E)
#creating user input field
self.txt1 = Entry(self.master, width=60)
self.txt1.grid(column=1, row=0, padx=(25, 0), pady=(40, 0), sticky=N)
self.txt2 = Entry(self.master, width=60)
self.txt2.grid(column=1, row=1, padx=(25, 0), pady=(10, 0), sticky=N)
if __name__=="__main__":
root = Tk()
App = ParentWindow(root)
root.mainloop() | true |
defe9dd8db5b68ac1816f0243517e3a391805e6b | Python | JenniferCallahan/LTP2 | /wk 2 lecture code/remove_shared.py | UTF-8 | 831 | 3.53125 | 4 | [] | no_license | def remove_shared(L1, L2):
""" (list list)
Remove items from L1 that are in both L1 and L2.
>>> list_1 = [1, 2, 3, 4, 6]
>>> list_2 = [2, 4, 5, 7]
>>> remove_shared(list_1, list_2)
>>> list_1
[1, 3, 6]
>>> list_2
[2, 4, 5, 7]
"""
## here, no spaces in expected output was enough to make the doctest fail
## e.g., [1,3,6] vs. [1, 3, 6]
## the unittest did not fail for unspaced lists
for v in L2:
if v in L1:
L1.remove(v)
# no return statement -- produces 'None'; no useful return value to
# examine in test; instead we examine list_1 and list_2 -- make sure
# that list_1 has been mutated properly and that list_2 has NOT been
# mutated
if __name__ == '__main__':
import doctest
doctest.testmod()
| true |
001d52b38d7195e9e6fea04dcb3f1f3dce561137 | Python | DukeAML/celitech | /celitech/polar_bar_chart.py | UTF-8 | 2,270 | 3.09375 | 3 | [] | no_license | import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
from dataclean import BYTES_TO_GB,TODAY, is_leap_year
# Change values here to get different graphs
COUNTRY_SUBSET = ['USA', 'DEU'] # options: input ISO3's into list
TIMESPLIT = 'MONTH' # options: DAY|MONTH
DATATYPE = 'DURATION' # options: CALLS|DURATION
REGULAR_YEAR = [0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334]
LEAP_YEAR = [0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335]
YEAR = REGULAR_YEAR
if (is_leap_year(TODAY.year)): YEAR = LEAP_YEAR
def select_data_on_timesplit(df, datatype, time):
# Determine length of num_data
if (time=="DAY"): num_data = [0 for i in range(365)]
else: num_data = [0 for i in range(12)]
# Iterate through data and increment list for each datapoint
# Determine time placement based off CONNECT_TIME
for index, row in df.iterrows():
connect_time = str(row["CONNECT_TIME"])
local_day = int(connect_time[8:10])
month = int(connect_time[5:7])
if(datatype=="CALLS"):
if(time=="DAY"):
day = YEAR[month-1] + local_day
num_data[day-1] += 1
else: num_data[month-1] += 1
else:
if(time=="DAY"):
day = YEAR[month-1] + local_day
num_data[day-1] += int(row["DURATION"]) / BYTES_TO_GB # Bytes to GB
else: num_data[month-1] += int(row["DURATION"]) / BYTES_TO_GB # Bytes to GB
return num_data
def main(df):
# Change arguments based off customizations
num_data = select_data_on_timesplit(df, DATATYPE, TIMESPLIT)
num_splits = len(num_data)
increments = [(x+1)*(360/num_splits) for x in range(num_splits)]
fig = go.Figure(go.Barpolar(
r=num_data,
theta=increments,
marker_color=px.colors.sequential.deep
))
fig.update_layout(
title="Total {} on Each Day of All Years".format(DATATYPE.lower().capitalize()),
)
fig.show()
if __name__=="__main__":
# Make dataframe only include countries of interest and nonzero values for duration
df = pd.read_csv("sample_data.csv")
df = df[(df['COUNTRY_ISO3'].isin(COUNTRY_SUBSET)) & (df['DURATION'] != 0)]
main(df)
| true |
81b651bc82f3579dd36571a0d6361481309ac578 | Python | walkccc/LeetCode | /solutions/1943. Describe the Painting/1943.py | UTF-8 | 524 | 2.875 | 3 | [
"MIT"
] | permissive | from sortedcontainers import SortedDict
class Solution:
def splitPainting(self, segments: List[List[int]]) -> List[List[int]]:
ans = []
prevIndex = 0
runningMix = 0
timeline = SortedDict()
for start, end, color in segments:
timeline[start] = timeline.get(start, 0) + color
timeline[end] = timeline.get(end, 0) - color
for i, mix in timeline.items():
if runningMix > 0:
ans.append([prevIndex, i, runningMix])
runningMix += mix
prevIndex = i
return ans
| true |
bb55665c5d8d575b0c37784d0216cb8f8cc55622 | Python | 01090841589/solved_problem | /others/stack실습/subtree.py | UTF-8 | 680 | 2.90625 | 3 | [] | no_license | import sys
sys.stdin = open('subtree.txt')
def deep(n):
global result
if tree[n][0] != 0:
result += 1
deep(tree[n][0])
if tree[n][1] != 0:
result += 1
deep(tree[n][1])
T = int(input())
for tc in range(1, T+1):
E, N = map(int, input().split())
node = list(map(int, input().split()))
tree = [[0] * 3 for _ in range(E + 2)]
result = 1
for i in range(len(node) // 2):
if tree[node[2 * i]][0] == 0:
tree[node[2 * i]][0] = node[2 * i + 1]
else:
tree[node[2 * i]][1] = node[2 * i + 1]
tree[node[2 * i + 1]][2] = node[2 * i]
deep(N)
print('#{} {}'.format(tc, result)) | true |
5e0302fde764f33936ee764ec0e7281a06e7108e | Python | jimothyGator/pivotal-git-hooks | /hooks/commit-msg | UTF-8 | 1,341 | 2.875 | 3 | [] | no_license | #!/usr/bin/env python
# Prepends the PivotalTracker story number
# Adapted from https://gist.github.com/lorin/2963131
import re
import subprocess
import sys
# Regex pattern for gitbranch name.
# Example: feature/123456-my-cool-feature
# Change the pattern if you put the Pivotal Tracker ticket number
# in a different position, such as at the end of the branch name.
pattern = r"^(feature|hotfix|fix)/(?P<ticket>\d+)-.+"
def get_story():
"""Return the story id number associated with the current feature branch"""
branchname = subprocess.check_output(["/usr/bin/git", "rev-parse", "--abbrev-ref", "HEAD"])
match = re.match(pattern, branchname)
if not match:
raise ValueError("Branch name not in proper format: %s" % branchname)
return match.group('ticket')
def prepend_commit_msg(story):
"""Prepend the commit message with story ID"""
msgfile = sys.argv[1]
with open(msgfile) as f:
contents = f.read()
with open(msgfile, 'w') as f:
# Don't add if it's already there
if ('#' + story) not in contents:
f.write("[#%s] " % story)
f.write(contents)
def main():
try:
story = get_story()
prepend_commit_msg(story)
except Exception, e:
sys.stderr.write(e)
sys.exit(1)
if __name__ == '__main__':
main()
| true |
ad8f20e0933e2d36f9360615605e60f80841db50 | Python | ivinskiy/code-examples | /portfolio-optimization/portfolio-optimization.py | UTF-8 | 3,357 | 3.015625 | 3 | [] | no_license | import pandas as pd
import os
from functools import reduce
import matplotlib.pyplot as plt
import numpy as np
def portfolio_opt(df_list, returns, num_portfolios):
# mean daily returns and covariance matrix
mean_daily_returns = returns.mean()
cov_matrix = returns.cov()
# set up array to hold results
# We have increased the size of the array to hold the weight values for each stock
results = np.zeros((4 + len(df_list) - 1, num_portfolios))
for i in range(num_portfolios):
# select random weights for portfolio holdings
weights = np.array(np.random.random(len(df_list)))
# rebalance weights to sum to 1
weights /= np.sum(weights)
# calculate portfolio return and volatility
portfolio_return = np.sum(mean_daily_returns * weights) * 252 # nr business days
# std comes from definition with dot products
portfolio_std_dev = np.sqrt(np.dot(weights.T, np.dot(cov_matrix, weights))) * np.sqrt(252)
# store results in results array
results[0, i] = portfolio_return
results[1, i] = portfolio_std_dev
# store Sharpe Ratio (return / volatility) - risk free rate element excluded for simplicity
results[2, i] = results[0, i] / results[1, i]
# iterate through the weight vector and add data to results array
for j in range(len(weights)):
results[j + 3, i] = weights[j]
# convert results array to Pandas DataFrame
column_names = ["ret", "stdev", "sharpe"]
for stock in list(data.columns):
column_names.append(stock)
results_frame = pd.DataFrame(results.T, columns=column_names)
# locate position of portfolio with highest Sharpe Ratio
max_sharpe_port = results_frame.iloc[results_frame['sharpe'].idxmax()]
# locate positon of portfolio with minimum standard deviation
min_vol_port = results_frame.iloc[results_frame['stdev'].idxmin()]
# create scatter plot coloured by Sharpe Ratio
plt.scatter(results_frame.stdev, results_frame.ret, c=results_frame.sharpe, cmap='RdYlBu')
plt.xlabel('Volatility')
plt.ylabel('Returns')
plt.colorbar()
# plot red star to highlight position of portfolio with highest Sharpe Ratio
plt.scatter(max_sharpe_port[1], max_sharpe_port[0], marker=(5, 1, 0), color='r', s=1000)
# plot green star to highlight position of minimum variance portfolio
plt.scatter(min_vol_port[1], min_vol_port[0], marker=(5, 1, 0), color='g', s=1000)
plt.show()
return max_sharpe_port, min_vol_port
path = "aktiedata"
# CLOSE PRICES
df_list = []
for r,d,f, in os.walk(path):
for file in f:
stock = pd.read_csv(path+"/"+file)[["Date", "Close"]]
stock.columns = ["Date", file.split("_")[0]]
df_list.append(stock)
# MERGE ALL
data = reduce(lambda x, y: pd.merge(x,y,on = "Date"), df_list)
# FIX DATE
data.Date = pd.to_datetime(data.Date.astype(str), format = "%Y%m%d").dt.date
# SET DATE TO INDEX
data.index = data.Date
data.drop("Date", axis = 1, inplace = True)
# LOG RETURNS
returns = np.log(data).diff()
returns.dropna(inplace=True)
portfolios = portfolio_opt(df_list, returns, 50000)
max_sharpe_p = portfolios[0].iloc[3:]
min_var_p = portfolios[1].iloc[3:]
budget = 82700
| true |
011ad61aa3f725911cea9fdd441aff0b690982d3 | Python | supunj/polish-map-processor-for-mkgmap | /process_map/src/polish/element.py | UTF-8 | 9,492 | 2.78125 | 3 | [] | no_license | '''
Created on Mar 3, 2013
@author: Supun Jayathilake(supunj@gmail.com)
'''
import collections
from multiprocessing.dummy import dict
from polish.util.polishutil import PolishUtil
class Shape:
_Type = ''
_Label = ''
_EndLevel = ''
_Marine = ''
_Data = None
# All polish utilities - This will act as a static
polish_util = PolishUtil()
def __init__(self):
self._Data = dict()
def get_type(self):
return self.__Type
def get_label(self):
return self.__Label
def get_end_level(self):
return self.__EndLevel
def get_marine(self):
return self.__Marine
def set_type(self, value):
self.__Type = value
def set_label(self, value):
self.__Label = value
def set_end_level(self, value):
self.__EndLevel = value
def set_marine(self, value):
self.__Marine = value
def get_data(self, key):
if key not in self._Data.keys():
raise KeyError
return self._Data[key]
def set_data(self, key, value):
self._Data[key] = value.replace('),(', ')^(').split('^')
Type = property(get_type, set_type, None, None)
Label = property(get_label, set_label, None, None)
EndLevel = property(get_end_level, set_end_level, None, None)
Marine = property(get_marine, set_marine, None, None)
class Polyline(Shape):
ROUTABLE_TYPES = ['0x0', '0x1', '0x2', '0x3', '0x4', '0x5', '0x6', '0x7', '0x8',
'0x9', '0xa', '0xb', '0xc', '0x14', '0x16', '0x1a', '0x1b']
ROAD_BOARD_MAP = {'0x1' : '~[0x04]',
'0x2' : '~[0x02]',
'0x3' : '~[0x03]'}
_RoadID = ''
_DirIndicator = ''
_RouteParam = ''
_Nod = None
_split_roads = None
def __init__(self):
super().__init__()
self._Nod = dict()
self._split_roads = list()
def get_nod(self, key):
if key not in self._Data.keys():
raise KeyError
return self._Nod[key]
def set_nod(self, key, value):
self._Nod[key] = value
def get_road_id(self):
return self.__RoadID
def get_dir_indicator(self):
return self.__DirIndicator
def get_route_param(self):
return self.__RouteParam
def set_road_id(self, value):
self.__RoadID = value
def set_dir_indicator(self, value):
self.__DirIndicator = value
def set_route_param(self, value):
self.__RouteParam = value
# Check if the line is a routable type
def isRoutable(self):
if self.Type in self.ROUTABLE_TYPES:
return True
else:
return False
RoadID = property(get_road_id, set_road_id, None, None)
DirIndicator = property(get_dir_indicator, set_dir_indicator, None, None)
RouteParam = property(get_route_param, set_route_param, None, None)
# Add additional Nods
def addAdditionalNods(self, max_nod_id, nod_frequency):
# Do this only for the routable Polylines
if self.isRoutable():
numberofData = len(self._Data[0])
# Add a Nod for every nod_frequency data
for i in range(0, numberofData, nod_frequency):
#Check if Nod already exists
if i in self._Nod.keys():
continue
else:
max_nod_id += 1
self._Nod[int(i)] = str(i) + ',' + str(max_nod_id) + ',' + '0'
return max_nod_id
# Add boards to roads
def addRoadBoards(self):
if self.isRoutable() and self.Type in self.ROAD_BOARD_MAP:
self.Label = self.ROAD_BOARD_MAP[self.Type] + self.Label
# Split road from Nods
def splitRoadfromNods(self, max_road_id):
# No need to split roads with just 2 nods or less
if len(self._Nod) <= 2:
self._split_roads.append(self)
else:
new_road_data_array = list()
# Get the nods in pairs
nod_pairs = self.polish_util.pairwise(sorted(self._Nod.keys()))
# Traverse through the nods
for nod_pair in nod_pairs:
# Clone the road and add a new road id
new_road = self.clone()
max_road_id+=1
new_road.RoadID = str(max_road_id)
# If the array is not empty, clear
if len(new_road_data_array) != 0:
new_road_data_array.clear()
# Load a relevant data set between two nods
for position in range(nod_pair[0], nod_pair[1]+1):
new_road_data_array.append(self._Data[0][position])
# Set new nod and data to the new road
nod0_value_set = self._Nod[nod_pair[0]].split(',')
nod1_value_set = self._Nod[nod_pair[1]].split(',')
new_road.set_data(0, ','.join(new_road_data_array))
new_road.set_nod(0, '0,' + nod0_value_set[1] + ',' + nod0_value_set[2])
new_road.set_nod(len(new_road_data_array)-1, str(len(new_road_data_array)-1) + ',' + nod1_value_set[1] + ',' + nod1_value_set[2])
# Add the new road to the list
self._split_roads.append(new_road)
return self._split_roads, max_road_id
# Build and return the new segment
def buildPolyline(self):
segment = list()
segment.append('[POLYLINE]\n')
segment.append('Type=' + self.Type + '\n')
segment.append('Label=' + self.Label + '\n')
segment.append('EndLevel=' + self.EndLevel + '\n')
if self.DirIndicator != '' : segment.append('DirIndicator=' + self.DirIndicator + '\n')
if self.RoadID != '' : segment.append('RoadID=' + self.RoadID + '\n')
if self.RouteParam != '' : segment.append('RouteParam=' + self.RouteParam + '\n')
# Append Data
for key, value in collections.OrderedDict(sorted(self._Data.items())).items():
segment.append('Data' + str(key) + '=' + ','.join(value) + '\n')
# Append Nod
i = 1
for key, value in collections.OrderedDict(sorted(self._Nod.items())).items():
segment.append('Nod' + str(i) + '=' + value + '\n')
i+=1
segment.append('Marine=' + self.Marine + '\n')
segment.append('[END]\n\n')
return segment
# Clone the object clearing Data and Nods
def clone(self):
cloned = Polyline()
cloned.Type = self.Type
cloned.Label = self.Label
cloned.EndLevel = self.EndLevel
cloned.Marine = self.Marine
cloned.DirIndicator = self.DirIndicator
cloned.RouteParam = self.RouteParam
return cloned
class Polygon(Shape):
def __init__(self):
super().__init__()
class Point(Shape):
def __init__(self):
super().__init__()
class Restriction():
_Nod = None
_TraffPoints_From = None
_TraffPoints_To = None
_TraffRoads_From = None
_TraffRoads_To = None
_Time = None
def get_time(self):
return self.__Time
def set_time(self, value):
self.__Time = value
def get_nod(self):
return self.__Nod
def get_traff_points_from(self):
return self.__TraffPoints_From
def get_traff_points_to(self):
return self.__TraffPoints_To
def get_traff_roads_from(self):
return self.__TraffRoads_From
def get_traff_roads_to(self):
return self.__TraffRoads_To
def set_nod(self, value):
self.__Nod = value
def set_traff_points_from(self, value):
self.__TraffPoints_From = value
def set_traff_points_to(self, value):
self.__TraffPoints_To = value
def set_traff_roads_from(self, value):
self.__TraffRoads_From = value
def set_traff_roads_to(self, value):
self.__TraffRoads_To = value
Nod = property(get_nod, set_nod, None, None)
TraffPoints_From = property(get_traff_points_from, set_traff_points_from, None, None)
TraffPoints_To = property(get_traff_points_to, set_traff_points_to, None, None)
TraffRoads_From = property(get_traff_roads_from, set_traff_roads_from, None, None)
TraffRoads_To = property(get_traff_roads_to, set_traff_roads_to, None, None)
Time = property(get_time, set_time, None, None)
# Build the restriction
def buildRestriction(self):
segment = list()
segment.append('[Restrict]\n')
segment.append('Nod=' + self.Nod + '\n')
segment.append('TraffPoints=' + self.TraffPoints_From + ',' + self.Nod + ',' + self.TraffPoints_To + '\n')
segment.append('TraffRoads=' + self.TraffRoads_From + ',' + self.TraffRoads_To + '\n')
segment.append('Time=' + self.Time + '\n')
segment.append('[END-Restrict]\n\n')
return segment
| true |
069b5b012a29cee0e4bfa8e7699423780a9935bc | Python | 0HJ0/Keynesian-beauty-contest | /LSTMAgent.py | UTF-8 | 4,443 | 2.828125 | 3 | [
"MIT"
] | permissive | import numpy as np
import random
from keras.layers import LSTM, Dense
from keras import Sequential
from keras.optimizers import Adam
from collections import deque
import matplotlib.pyplot as plt
names = ['random', 'fixed', 'repeat', 'follow', 'Nfollow', 'NN', 'DQN', 'LSTM']
class LSTMAgent:
def __init__(self, maxNum, agentNum, agentIndex):
self.name = 'LSTM'
self.maxNum = maxNum
self.agentNum = agentNum
self.agentIndex = agentIndex
self.LSTMlearningRate = 1e-2
self.NNlearningRate = 5e-3
self.sequenceSize = 8
self.epsilon = 1
self.epsilonDecay = 0.999
self.epsilonMin = 0.001
self.LSTMmodel = self._buildModel()
self.NNmodel = self._buildOutputModel()
self.memory = deque([], self.sequenceSize)
self.RHIS = []
def _buildModel(self):
model = Sequential()
model.add(LSTM(10, input_shape=(self.sequenceSize, self.agentNum+1), return_sequences=True,
kernel_initializer='he_uniform'))
model.add(LSTM(10, kernel_initializer='he_uniform'))
model.add(Dense(self.agentNum-1, activation='linear',
kernel_initializer='he_uniform'))
model.summary()
model.compile(loss='mse', optimizer=Adam(lr=self.LSTMlearningRate))
return model
def _buildOutputModel(self):
model = Sequential()
model.add(Dense(10, input_dim=2*self.agentNum, activation='relu',
kernel_initializer='he_uniform'))
model.add(Dense(10, activation='relu',
kernel_initializer='he_uniform'))
model.add(Dense(1, activation='linear'))
model.summary()
model.compile(loss='mse', optimizer=Adam(lr=self.NNlearningRate))
return model
def predict(self):
Pred = np.round(self.maxNum * self.LSTMmodel.predict(
np.divide([np.append(self.memory, np.vstack(np.array(self.memory)[:,self.agentIndex]), axis=1)], self.maxNum)))
arr = np.concatenate((Pred, self.memory[-1], self.memory[-1][self.agentIndex]), axis=None)
X = np.divide([arr], self.maxNum)
action = self.NNmodel.predict(X)
return int(round(self.maxNum * action[0,0]))
def trainLSTM(self, Answers):
X = np.divide([np.append(self.memory, np.vstack(np.array(self.memory)[:,self.agentIndex]), axis=1)], self.maxNum)
Y = np.divide([np.delete(Answers, self.agentIndex)], self.maxNum)
self.LSTMmodel.fit(X, Y, epochs=1, verbose=0)
def trainNN(self, Answers, Rewards):
Pred = np.round(self.maxNum * self.LSTMmodel.predict(
np.divide([np.append(self.memory, np.vstack(np.array(self.memory)[:,self.agentIndex]), axis=1)], self.maxNum)))
self.RHIS.append(np.subtract(Pred, np.delete(Answers, self.agentIndex)))
arr = np.concatenate((Pred, self.memory[-1], self.memory[-1][self.agentIndex]), axis=None)
X = np.divide([arr], self.maxNum)
ans = Answers[np.argmax(Rewards)]
Y = np.divide([[ans]], self.maxNum)
self.NNmodel.fit(X,Y, epochs=1, verbose=0)
def data(self, ContestNum):
plt.figure(figsize=(15,7.5))
for i in range(self.agentNum-1):
tempList = []
for j in range(ContestNum):
tempList.append(np.mean(np.absolute(self.RHIS)[j-100:j+1, 0, i]))
plt.plot(range(ContestNum), tempList, label=names[i])
plt.legend()
plt.savefig('graph.png')
def getAns(self, Answers, Rewards):
if len(self.memory) == self.sequenceSize:
self.trainLSTM(Answers)
self.trainNN(Answers, Rewards)
self.memory.append(Answers)
if random.random() < self.epsilon or len(self.memory) != self.sequenceSize:
action = random.choice(range(self.maxNum+1))
else:
action = self.predict()
if self.epsilon > self.epsilonMin:
self.epsilon*=self.epsilonDecay
""" loss graph
ContestNum = 5000
if len(self.RHIS) == ContestNum:
self.data(ContestNum)
"""
return action
| true |
d6bbb9096ac7fc320c40f00aed3640097feb69f2 | Python | jsbalrog/python-examples | /src/RootbeerSong.py | UTF-8 | 416 | 3.828125 | 4 | [] | no_license | word = "bottles"
for number in reversed(range(1, 100)):
print '%d %s of rootbeer on the wall.' % (number, word)
print '%d %s of rootbeer.' % (number, word)
print 'Take one down, pass it around.'
if number-1 > 0:
if number-1 == 1:
word = "bottle"
print '%d %s of rootbeer on the wall.' % (number-1, word)
else:
print 'No more bottles of rootbeer on the wall.'
| true |
379881c6e9105b4c63c615874c4a367eb0ec1099 | Python | beltonhe/cgi-lab | /login.py | UTF-8 | 1,153 | 2.609375 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python3
import cgi, os, secret
from templates import secret_page
form = cgi.FieldStorage()
username = form.getvalue("username")
pwd = form.getvalue("password")
# set cookies
if secret.username == username and secret.password == pwd:
print("Set-Cookie: userID = %s;" %username)
print("Set-Cookie: Password = %s;" %pwd)
# search for the saved cookies
for param in os.environ.keys():
if param == "HTTP_COOKIE":
cookies = os.environ[param]
cookie_username = ''
cookie_password = ''
cookies = cookies.split("; ")
for key in range(len(cookies)):
block = cookies[key].split("=")
if block[0] == "userID":
cookie_username = block[1]
if block[0] == "Password":
cookie_password = block[1]
if secret.username == cookie_username and secret.password == cookie_password:
print(secret_page(cookie_username, cookie_password))
print("content-type: text/html\r\n\r\n")
print("<html>")
print("<head>")
print("<title>Hello - Second CGI Program</title>")
print("</head>")
print("<body>")
print("<p><b>username:</b> %s<br><b>Password:</b>%s</p>" %(username, pwd))
print("</body>")
print("</html>")
| true |
5cbf0cc3f49b5a2965d3f6c784ce3ea2e1293078 | Python | Mos33001/Unit4_L5 | /U4_L5p4.py | UTF-8 | 243 | 3.140625 | 3 | [] | no_license | from turtle import *
mos = Turtle()
screen = Screen()
mos.color("turquoise")
mos.pensize(5)
mos.speed(6)
mos.turtlesize(1,1,1)
mos.shape("turtle")
screen.bgcolor("black")
for x in range(5):
mos.forward(50)
mos.left(144)
mainloop() | true |
87ff6cd289860501c9322aa03eec90c2cc9c4048 | Python | hankcs/HanLP | /hanlp/datasets/tokenization/loaders/txt.py | UTF-8 | 5,643 | 2.75 | 3 | [
"Apache-2.0",
"CC-BY-NC-SA-4.0"
] | permissive | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-08-01 12:35
from typing import Union, List, Callable
from hanlp.common.dataset import TransformableDataset
from hanlp.utils.io_util import TimingFileIterator
from hanlp.utils.span_util import words_to_bmes, words_to_bi
from hanlp.utils.string_util import split_long_sentence_into
class TextTokenizingDataset(TransformableDataset):
def __init__(self,
data: Union[str, List],
transform: Union[Callable, List] = None,
cache=None,
generate_idx=None,
delimiter=None,
max_seq_len=None,
sent_delimiter=None,
char_level=False,
hard_constraint=False,
) -> None:
"""A dataset for tagging tokenization tasks.
Args:
data: The local or remote path to a dataset, or a list of samples where each sample is a dict.
transform: Predefined transform(s).
cache: ``True`` to enable caching, so that transforms won't be called twice.
generate_idx: Create a :const:`~hanlp_common.constants.IDX` field for each sample to store its order in dataset. Useful for prediction when
samples are re-ordered by a sampler.
delimiter: Delimiter between tokens used to split a line in the corpus.
max_seq_len: Sentences longer than ``max_seq_len`` will be split into shorter ones if possible.
sent_delimiter: Delimiter between sentences, like period or comma, which indicates a long sentence can
be split here.
char_level: Whether the sequence length is measured at char level.
hard_constraint: Whether to enforce hard length constraint on sentences. If there is no ``sent_delimiter``
in a sentence, it will be split at a token anyway.
"""
self.hard_constraint = hard_constraint
self.char_level = char_level
self.sent_delimiter = sent_delimiter
self.max_seq_len = max_seq_len
self.delimiter = delimiter
super().__init__(data, transform, cache, generate_idx)
def load_file(self, filepath: str):
"""Load tokenized corpus. The format is one sentence per line, where each line consisits of tokens seperated
by a delimiter (usually space).
.. highlight:: bash
.. code-block:: bash
$ head train.txt
上海 浦东 开发 与 法制 建设 同步
新华社 上海 二月 十日 电 ( 记者 谢金虎 、 张持坚 )
Args:
filepath: The path to the corpus.
"""
f = TimingFileIterator(filepath)
# longest_sent = 0
for line in f:
line = line.rstrip('\n')
tokens = line.split(self.delimiter)
if not tokens:
continue
if self.max_seq_len and sum(len(t) for t in tokens) > self.max_seq_len:
# debug = []
for short_sents in split_long_sentence_into(tokens, self.max_seq_len, self.sent_delimiter,
char_level=self.char_level,
hard_constraint=self.hard_constraint):
# debug.extend(short_sents)
# longest_sent = max(longest_sent, len(''.join(short_sents)))
yield {'token': short_sents}
# assert debug == tokens
else:
# longest_sent = max(longest_sent, len(''.join(tokens)))
yield {'token': tokens}
f.log(line[:20])
f.erase()
# print(f'Longest sent: {longest_sent} in {filepath}')
def generate_tags_for_subtokens(sample: dict, tagging_scheme='BMES'):
"""
Create a sequence of x for tokenization task. Each x is an atomic subtoken that will be tagged with BMES or BI tags.
Args:
sample: During prediction, it is a dict with 'token' being the input text, 'token_subtoken_offsets' being
incremental offsets per each subtoken. During training, it is a dict with 'token' being a sequence of tokens,
'token_subtoken_offsets' being non-incremental offsets per each subtoken, 'token_subtoken_offsets_group' being
subtoken offsets grouped by each token.
tagging_scheme:
Returns:
"""
# We could use token_token_span but we don't want token_token_span in the batch
subtokens_group = sample.get('token_subtoken_offsets_group', None)
sample['raw_token'] = sample['token']
tokens = sample.get('token_') or sample['token']
if subtokens_group:
sample['token'] = subtokens_group_to_subtokens(tokens, subtokens_group)
if tagging_scheme == 'BMES':
sample['tag'] = words_to_bmes(subtokens_group)
elif tagging_scheme == 'BI':
sample['tag'] = words_to_bi(subtokens_group)
else:
raise NotImplementedError(f'Unsupported tagging scheme {tagging_scheme}.')
else:
sample['token'] = subtoken_offsets_to_subtokens(tokens, sample['token_subtoken_offsets'])
return sample
def subtoken_offsets_to_subtokens(text, token_subtoken_offsets):
results = []
for b, e in token_subtoken_offsets:
results.append(text[b:e])
return results
def subtokens_group_to_subtokens(tokens, subtoken_offsets_group):
results = []
for subtoken_offsets, token in zip(subtoken_offsets_group, tokens):
for b, e in subtoken_offsets:
results.append(token[b:e])
return results
| true |
1bfd684629228ee922fa102c7e677c90531996f9 | Python | timsergor/StillPython | /067.py | UTF-8 | 1,441 | 4.25 | 4 | [] | no_license | #951. Flip Equivalent Binary Trees. Medium. 64.9%.
#For a binary tree T, we can define a flip operation as follows: choose any node, and swap the left and right child subtrees.
#A binary tree X is flip equivalent to a binary tree Y if and only if we can make X equal to Y after some number of flip operations.
#Write a function that determines whether two binary trees are flip equivalent. The trees are given by root nodes root1 and root2.
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def flipEquiv(self, root1: TreeNode, root2: TreeNode) -> bool:
def isLeaf(Node):
if not(Node.left or Node.right):
return(True)
else:
return(False)
def isFlipped(node1, node2):
if node1 == None and node2 == None:
return(True)
elif (node1 == None) or (node2 == None) or (node1.val != node2.val):
return(False)
else:
if isFlipped(node1.left, node2.left) and isFlipped(node1.right, node2.right):
return(True)
elif isFlipped(node1.left, node2.right) and isFlipped(node1.right, node2.left):
return(True)
return(False)
return(isFlipped(root1, root2))
# < 30 min
| true |
0cb53f891ee72de6f92d760d1cde4c4db8bff1c5 | Python | chipuha/StockMarket | /Data/GetStockData.py | UTF-8 | 4,649 | 3.140625 | 3 | [] | no_license | # https://stackoverflow.com/questions/44225771/scraping-historical-data-from-yahoo-finance-with-python
# Data is retrieved for tickers given in the filename variable
import re
import urllib.request
from io import StringIO
from datetime import datetime, timedelta, date
import requests
import pandas as pd
class YahooFinanceHistory:
timeout = 2
crumb_link = 'https://finance.yahoo.com/quote/{0}/history?p={0}'
crumble_regex = r'CrumbStore":{"crumb":"(.*?)"}'
quote_link = 'https://query1.finance.yahoo.com/v7/finance/download/{quote}?period1={dfrom}&period2={dto}&interval=1d&events=history&crumb={crumb}'
def __init__(self, symbol, days_back=7):
self.symbol = symbol
self.session = requests.Session()
self.dt = timedelta(days=days_back)
def get_crumb(self):
response = self.session.get(self.crumb_link.format(self.symbol), timeout=self.timeout)
response.raise_for_status()
match = re.search(self.crumble_regex, response.text)
if not match:
raise ValueError('Could not get crumb from Yahoo Finance')
else:
self.crumb = match.group(1)
def get_quote(self):
if not hasattr(self, 'crumb') or len(self.session.cookies) == 0:
self.get_crumb()
now = datetime.utcnow()
dateto = int(now.timestamp())
datefrom = int((now - self.dt).timestamp())
url = self.quote_link.format(quote=self.symbol, dfrom=datefrom, dto=dateto, crumb=self.crumb)
response = self.session.get(url)
response.raise_for_status()
#Modified to force numerical values to be float types and avoid object type.
return pd.read_csv(StringIO(response.text), parse_dates=['Date'],na_values='.')
# --- UPDATE TICKER DATA ---
filename="TickersToTrack.txt"
print('Fetching data for tickers in '+filename)
file=open(filename,"r")
total = 0 #total number of tickers processed (attempted)
alreadyUpToDate = [] #track which tickers were already up to date
updated = [] #track which tickers were updated
errored = [] #track which tickers encounted errors
for ticker in file:
ticker=ticker.strip('\n')
outputfilename='Data/'+ticker+'.csv'
total = total+1
try: #check if file exists
df = pd.read_csv('Data/'+ticker+'.csv')
days_back = (date.today()-datetime.strptime(df['Date'].tail(1).item(),"%Y-%m-%d").date()).days
print('days back:',days_back)
#if updating during a weekend, days_back needs to be reduced (sat -1; sun -2)
if date.today().weekday() > 4:
days_back = days_back-(date.today().weekday()-4)
yesterday = datetime.strftime(datetime.now()-timedelta(1), "%Y-%m-%d")
today = datetime.strftime(datetime.now()-timedelta(1), "%Y-%m-%d")
#check if the df's latest date is yesterday's date, otherwise update
if df['Date'].tail(1).item()==yesterday:
print(ticker,'is already up to date') #do nothing
alreadyUpToDate.append(ticker)
elif days_back <= 2 and date.today().weekday() > 4: #check that's it's not a weekend
print(ticker,'is update to date enough. Try again Monday')
alreadyUpToDate.append(ticker)
else:
try:
print(ticker,'is updating')
df2 = YahooFinanceHistory(ticker, days_back=days_back).get_quote()
df2['Date'] = df2['Date'].dt.strftime("%Y-%m-%d")
df = pd.concat([df,df2], join='inner', ignore_index=True) #force format of dataframe
df = df.reset_index(drop=True)
df.to_csv(path_or_buf=outputfilename)
updated.append(ticker)
except (urllib.error.HTTPError, requests.exceptions.HTTPError) as err:
print('HTTP error for '+ticker)
errored.append(ticker)
continue
except (FileNotFoundError) as err: #if file doesn't exist, make it
try:
print(ticker+' is getting a new file and updating')
days_back = 365*10
df = YahooFinanceHistory(ticker, days_back=days_back).get_quote()
df.to_csv(path_or_buf=outputfilename)
updated.append(ticker)
except (urllib.error.HTTPError, requests.exceptions.HTTPError) as err:
print('HTTP error for '+ticker)
errored.append(ticker)
continue
file.close()
#print download diagnostics
print('\n'+str(len(alreadyUpToDate))+' out of '+str(total)+' were already up-to-date')
print(str(len(updated))+' out of '+str(total)+' were updated')
if len(errored) > 0:
print(str(errored)+' errored')
| true |
7d30fc3cf54bee473d8dcf942fd8fc1f7f2db18d | Python | VdovichenkoSergey/QAutomation_python_pytest | /for_learning.py | UTF-8 | 403 | 3.859375 | 4 | [] | no_license | '''l = [1, 2, 3, 4, 5, 6]
for x in l:
if x % 2 == 0:
print(x)
else:
print(x + 1)'''
print()
print('Task 2')
l2 = [3, 8, 5, 10]
i = 0
for z in l2:
if z % 2 != 0:
l2[i] = z + 1
i = i + 1
print(l2)
'''print()
print('Task 3')
l3 = [3, 6, 13, 23, 28]
for i in range(len(l3)):
if l3[i] % 2 != 0:
l3[i] = l3[i] + 1
print(l3)
''' | true |