blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
9c3eb33201091d1c0ea679b47dded557e9a72740 | Python | MattFauth/Algoritimos_Programacao2 | /Codigos_Aulas/aula1.2.py | UTF-8 | 982 | 3.609375 | 4 | [] | no_license | def funcao1(x): #só aparece quando chamado
print('x antes de mudar:', x)
print('id(x) antes de mudar:', id(x))
x = 100
print('x apos mudar:', x)
print('id(x) depois de mudar:', id(x))
#main
a = 10 #começa aqui
print('a antes da função1:', a)
print('id(a) antes da funcao1:', id(a))
funcao1(a) #Para e volta para o def, a vira x
print('a depois da funcao1:', a) #volta pra ca normalmente, a continua 10
print('id(a) apos a funcao1:', id(a))
funcao1(7) #aceita qualquer coisa que não altere a função
print('*************')
def funcao2(a): #só aparece quando chamado
print('a antes de mudar:', a)
print('id(a) antes de mudar:', id(a))
a = 200
print('a apos mudar:', a)
print('id(a) depois de mudar:', id(a))
#main
a = 10
print('a antes da função2:', a)
print('id(a) antes da funcao2:', id(a))
funcao2(a)
print('a depois da funcao2:', a) #a de fora da função não tem nada a ver com o de dentro
print('id(a) apos a funcao2:', id(a))
| true |
fe75dfbb243f05418c4c7dee5b873762ecaffcd2 | Python | bernardusrendy/eds | /Lecture_3_MLP/A-03-Matrix.py | UTF-8 | 350 | 3.59375 | 4 | [] | no_license | # -*- coding: utf-8 -*-
# Basic random, dictionary
# Various loops
import numpy as np
# inisialisasi list 10 elemen
a = np.array([1., 2., 3.])
v2 = np.array([4., 5., 6.])
v2t = v2.transpose()
print("v1=", v1)
print("v2=", v2)
print("v2t=", v2t)
print("v1+v2=",v1+v2)
print("v1*v2=",v1*v2)
print("v1.v2=",np.dot(v1, v2))
print("v1*v2t=",v1*v2t)
| true |
87f622824b73c063fafa49144bec64640001e501 | Python | adrianosantospb/unifacisa-visao-computacional | /modulo2/2-classificadores/2.2-alexnet/main.py | UTF-8 | 1,944 | 2.796875 | 3 | [
"MIT"
] | permissive | '''
Curso sobre Visão Computacional
Prof. Adriano Santos, PhD
'''
# AlexNet https://www.learnopencv.com/understanding-alexnet/
# Artigo: https://arxiv.org/abs/1404.5997
from model import AlexNet
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torch.utils.model_zoo import load_url as load_state_dict_from_url
import torch
from torch import nn, optim
from tqdm import tqdm
import argparse
import logging
from PIL import Image
import numpy as np
from util import Preprocessador, getLabel
# Funcao de treino
def main(args=None):
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
logging.info('Arquitetura AlexNet')
parser = argparse.ArgumentParser()
parser.add_argument('--num_classes', help='Num. de classes.', type=int, default=1000)
parser.add_argument('--pretrained', help='Serão utilizados pesos pré-treinados.', type=bool, default=True)
parser.add_argument('--model_url', help='Caminho para os pesos.', default="./pesos/alexnet-owt-4df8aa71.pth")
opt = parser.parse_args(args)
# Dados
proc = Preprocessador()
imagem_url = "./imagens/raposa.jpg"
imagem = Image.open(imagem_url)
imagem = proc.executa(imagem)
#https://jhui.github.io/2018/02/09/PyTorch-Basic-operations/
imagem = imagem.unsqueeze(0)
# Instancia do modelo
model = AlexNet(opt.num_classes)
model.eval()
# Caso deseje utilizar os pesos pré-treinados
if opt.pretrained:
checkpoint = torch.load(opt.model_url)
model.load_state_dict(checkpoint)
# Utiliza a GPU se existir no computador
if torch.cuda.is_available():
model.to('cuda')
with torch.no_grad():
saida = model(imagem)
# Obtem o indice melhor ranqueado
index = np.argmax(saida[0]).item()
acuracia = torch.max(saida).item()
print(getLabel(index), acuracia)
if __name__ == "__main__":
main() | true |
ca025ecc460b5e406d850a35c17eb7e2f1da4c16 | Python | Emilio-Rojas/servicio | /serviciosws/serviciosws/ws/expose_alumno.py | UTF-8 | 4,409 | 2.546875 | 3 | [] | no_license | from django.http import JsonResponse, HttpResponse
from rest_framework.decorators import api_view
from serviciosws.persistence.models import Alumno
import json
from jsonschema import validate
from jsonschema.exceptions import ValidationError
from rest_framework import status
scheme_add_alumno = {
"type" : "object",
"properties": {
"rut":{"type" : "string"},
"nombres":{"type" : "string"},
"apellidos":{"type" : "string"},
"email":{"type" : "string"},
"direccion":{"type" : "string"},
"comuna":{"type" : "string"},
"carrera":{"type" : "string"},
},
"required": ["rut", "nombres", "apellidos", "email", "direccion", "comuna", "carrera"],
"propertiesOrder": ["rut", "nombres", "apellidos", "email", "direccion", "comuna", "carrera"],
}
@api_view(['GET', 'POST'])
def alumno(request):
print('method alumno')
if request.method == 'GET':
return find_all(request)
if request.method == 'POST':
return add_alumno(request)
def add_alumno(request):
print('method add_alumno')
alumno = json.loads(request.body.decode('utf-8'))
print('alumno -> {0}'.format(alumno))
try:
validate(instance=alumno, schema=scheme_add_alumno)
new_alumno = Alumno(
rut = alumno.get('rut'),
nombres = alumno.get('nombres'),
apellidos = alumno.get('apellidos'),
email = alumno.get('email'),
direccion = alumno.get('direccion'),
comuna = alumno.get('comuna'),
carrera = alumno.get('carrera'),
)
new_alumno.save()
return JsonResponse(new_alumno.json(), content_type="application/json",
json_dumps_params={'ensure_ascii': False})
except ValidationError as err:
print(err)
response = HttpResponse('Error en esquema json, estructura no valida.\n {0}'.format(err.message))
response.status_code = status.HTTP_409_CONFLICT
return response
except Exception as err:
print(err)
response = HttpResponse('Error al crear el alumno en el sistema')
response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
return response
def find_all(request):
print('method find_all')
try:
alumnos = Alumno.objects.all().order_by('id').values()
return JsonResponse(list(alumnos), safe=False,
content_type="application/json", json_dumps_params={'ensure_ascii': False})
except Exception as err:
print(err)
response = HttpResponse('Error al buscar los alumnoes en la base de datos')
response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
return response
@api_view(['GET', 'DELETE'])
def alumno_by_id(request, id):
if request.method == 'GET':
return find_by_id(request, id)
if request.method == 'DELETE':
return delete_by_id(request, id)
def find_by_id(request, id):
print('find_by_id')
try:
alumno = Alumno.objects.get(id = id)
return JsonResponse(alumno.json(), content_type="application/json",
json_dumps_params={'ensure_ascii': False})
except Alumno.DoesNotExist as err:
print(err)
response = HttpResponse('Alumno no encontrado. Error al buscar por id -> {0}'.format(id))
response.status_code = status.HTTP_404_NOT_FOUND
return response
except Exception as err:
print(err)
response = HttpResponse('Problemas en la base de datos. Error al buscar por id -> {0}'.format(id))
response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
return response
def delete_by_id(request, id):
print('find_by_id')
try:
alumno = Alumno.objects.get(id = id)
alumno.delete()
response = HttpResponse('Alumno eliminado -> {0}'.format(id))
response.status_code = status.HTTP_200_OK
return response
except Alumno.DoesNotExist as err:
print(err)
response = HttpResponse('Alumno no encontrado. Error al borrando por id -> {0}'.format(id))
response.status_code = status.HTTP_404_NOT_FOUND
return response
except Exception as err:
print(err)
response = HttpResponse('Error al borrar por id -> {0}'.format(id))
response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
return response | true |
fa6a2dc51484c3c89963fe67d7766fdac663852b | Python | kldaji/PS | /baekjoon/Implementation/13460-구슬탈출2.py | UTF-8 | 1,620 | 3.390625 | 3 | [] | no_license | """
<Implementation>
<BFS>
"""
from sys import *
from collections import deque
dx = [1, 0, -1, 0]
dy = [0, 1, 0, -1]
# INPUT
N, M = map(int, stdin.readline().rstrip().split())
BOARD = [list(stdin.readline().rstrip()) for _ in range(N)]
VISIT = [[[[False] * M for _ in range(N)] for _ in range(M)] for _ in range(N)]
def move(x, y, dx, dy):
dd = 0
while BOARD[x][y] != "O" and BOARD[x + dx][y + dy] != "#":
x += dx
y += dy
dd += 1
return x, y, dd
def bfs():
QUEUE = deque() # queue
# Red, Blue
for i in range(N):
for j in range(M):
if BOARD[i][j] == "R":
rx, ry = i, j
elif BOARD[i][j] == "B":
bx, by = i, j
# start
QUEUE.append([rx, ry, bx, by, 1])
VISIT[rx][ry][bx][by] = True
# BFS
while QUEUE:
rx, ry, bx, by, d = QUEUE.popleft()
if d > 10:
return -1
for i in range(4):
nrx, nry, rd = move(rx, ry, dx[i], dy[i])
nbx, nby, bd = move(bx, by, dx[i], dy[i])
if BOARD[nbx][nby] != "O":
if BOARD[nrx][nry] == "O":
return d
if nrx == nbx and nry == nby:
if rd > bd:
nrx -= dx[i]
nry -= dy[i]
else:
nbx -= dx[i]
nby -= dy[i]
if not VISIT[nrx][nry][nbx][nby]:
VISIT[nrx][nry][nbx][nby] = True
QUEUE.append([nrx, nry, nbx, nby, d + 1])
return -1
print(bfs()) | true |
77fbefbf01e4490734a15fde8ebf38fbbefa8bc4 | Python | andrewedbert/My-Programming-Exercise | /ProblemPython2.py | UTF-8 | 103 | 3.75 | 4 | [] | no_license | x=int(input('Silahkan masukkan angka berapapun: '))
y=x**2
print('Kuadrat dari '+str(x)+' = '+str(y)) | true |
1429ff4c76b94793a3434ac891e4adc4cd7612b0 | Python | ostwald/python-lib | /asn/AsnFileTester.py | UTF-8 | 2,038 | 2.59375 | 3 | [] | no_license | import os, sys
import util
from StdDocument import StdDocument
class AsnFileTester (StdDocument):
writeBadFiles = False
def __init__ (self, path):
self.path = path
StdDocument.__init__ (self, path)
self.missingStandards = self.getMissingNodes()
print "\n%s" % os.path.basename (path)
if self.missingStandards:
if self.writeBadFiles:
self.write ()
print "\t%d MISSING standards" % len(self.missingStandards)
for m in self.missingStandards:
print "\t",m
else:
print "\tOK"
def getMissingNodes (self):
missing = []
for id in self.keys():
std = self[id]
if std.children:
# print "%s (%d)" % (id, len(std.children))
for childId in std.children:
# print "\t%s" % childId
if not self[childId]:
missing.append (childId)
return missing
missing_by_traversal = []
def checkNodeStructure (self):
self.visit (self.root.id)
return self.missing_by_traversal
def visit (self, id):
std = self[id]
if std.children:
# print "%s (%d)" % (id, len(std.children))
for childId in std.children:
# print "\t%s" % childId
if not self[childId]:
self.missing_by_traversal.append (childId)
else:
self.visit (childId)
def write (self):
path = self.asnRecord.path
util.beautify (path, os.path.join ("badDocs", os.path.basename(path)))
def dirTest (dir):
print "\nChecking %s\n" % dir
for filename in os.listdir (dir):
if not filename.lower().endswith (".xml"): continue
path = os.path.join (dir, filename)
tester = AsnFileTester (path)
def fileTest ():
dir = "/Documents/Work/DLS/ASN/globe/"
# filename = "1.4.1Math-2000-National Council of Teachers of Mathematics (NCTM)-Principles and Standards for School.xml"
filename = "Math-2000-National Council of Teachers of Mathematics (NCTM)-Principles and Standards for School.xml"
path = os.path.join (dir, filename)
print "\nChecking %s\n" % path
AsnFileTester (path)
if __name__ == '__main__':
# fileTest()
dirTest ("/Documents/Work/DLS/ASN/globe/")
| true |
fcfe18befcbcf08aa30b8a7b385fa009533ec6d2 | Python | eishm721/etf-trader | /Other/backtracking-alg.py | UTF-8 | 4,067 | 3.421875 | 3 | [] | no_license | """
FILENAME: etfTrader
Implements modified backtracking algorithm to find optimium
portfolio distribution for trading ETF option contracts
using modified straddle-based strategy
"""
import collections
import scrapePrices as sp
SHARES_PER_CONTRACT = 100
def getCurrPrice(self, stock):
"""
Extract real-time market price for given stock ticker.
Parses HTML code for Yahoo Finance website
"""
page = requests.get(self.price.format(stock, stock))
tree = html.fromstring(page.content)
return float(tree.xpath('//span[@class="Trsdu(0.3s) Fw(b) Fz(36px) Mb(-4px) D(ib)"]/text()')[0])
class ETFCalculator:
def __init__(self, cash, etfs=('SPY', 'DIA', 'QQQ', 'IWM')):
"""
Initialize with etfs to trade and avaliable cash
"""
self.etfs = sp.StockExtractor().extractPutData(etfs)
print("ETF Data extracted. Finding your assignments...\n")
self.cash = cash
# finds lowest possible strike price out of all contracts
self.cheapestStock = float('inf')
for etf in self.etfs:
for expiration in self.etfs[etf]:
self.cheapestStock = min(self.cheapestStock, self.etfs[etf][expiration]['strikePrice'])
def __calcRemainingCash(self, assignment):
"""
Calculates cash remaining after a particular assignment is taken
"""
balance = self.cash
for etf, expiration in assignment:
balance -= (self.etfs[etf][expiration]['strikePrice'] * SHARES_PER_CONTRACT)
return balance
def __assignStocksRec(self, value, moneyLeft, assignment, cache):
"""
Modified backtracking algorithm wrapper that finds the optimal assignment of stocks
Implements dynamic programming for efficiency
"""
if moneyLeft < 0:
# assignment is not valid
return moneyLeft, assignment
if moneyLeft < self.cheapestStock:
# can't assign more stocks, return current value/premium
return value, assignment
if (value, moneyLeft) in cache:
# precomputed value
return cache[(value, moneyLeft)], assignment
maxValue = 0
bestAssignment = None
# try assigning 1 more of each ETF to current assignment and recursively pick ETF w/ highest value
for etf in self.etfs:
for expiration in self.etfs[etf]:
tempVal = value + self.etfs[etf][expiration]['premium']
tempMoney = moneyLeft - self.etfs[etf][expiration]['strikePrice']
currVal, currAssignment = self.__assignStocksRec(tempVal, tempMoney, assignment + [(etf, expiration)], cache)
if currVal > maxValue:
maxValue = currVal
bestAssignment = currAssignment
cache[(value, moneyLeft)] = maxValue
return maxValue, bestAssignment
def __formatOutput(self, assignment, value, cashRemaining):
"""
Formats all return values from backtracking in useable form
"""
return {
'Put Options': self.etfs,
'Optimal Assignment': dict(collections.Counter(assignment)),
'Value ($x100)': float(str(round(value * SHARES_PER_CONTRACT, 2))),
'Cash Remaining ($)': cashRemaining
}
def assignStocks(self):
"""
Calculates optimal portfolio distribution given current cash and
current options trading prices
"""
if self.cash < self.cheapestStock:
# cannot afford any stocks, exit early
return self.__formatOutput([], 0, self.cash)
assignmentValue, assignment = self.__assignStocksRec(0, self.cash // SHARES_PER_CONTRACT, [], {})
return self.__formatOutput(assignment, assignmentValue, self.__calcRemainingCash(assignment))
def tests():
calc = ETFCalculator(cash=120000)
assignments = calc.assignStocks()
print()
for key in assignments:
print(key+":", assignments[key])
if __name__ == '__main__':
tests()
| true |
286eff873545c38e5dcb7b3b81a45ac07b3a72c0 | Python | wyx13452563584/test0615wangyuxin | /App/fun.py | UTF-8 | 2,501 | 2.546875 | 3 | [] | no_license | from django.http import HttpResponse, JsonResponse
from django.shortcuts import render, redirect
from django.urls import reverse
import numpy as np
import pandas as pd
import math
from pandas import Series,DataFrame
import tushare as ts
import matplotlib.pyplot as plt
def create_boll(data, ndays):
"""
计算布林线指标
基于ndays日均线计算
"""
ma = data['close'].rolling(ndays).mean()
sd = data['close'].rolling(ndays).std()
bu = ma + (2 * sd)
upper = pd.Series(bu, name='bu')
data = data.join(upper)
bl = ma - (2 * sd)
lower = pd.Series(bl, name='bl')
data = data.join(lower)
bm = pd.Series(ma, name='bm')
data = data.join(bm)
return data
#定义标准化函数
def guiyi(x):
return (x - 0)/(max(x) - min(x))
def fun_y(lmc):
# 创建偏移数据,shift(正数)向下移动数据。
X = lmc[['liangfugy']]
yy = lmc[['close', 'bm']]
for i in range(0, 70):
X['ls{}'.format(str(i + 1))] = lmc['liangfugy'].shift(i + 1)
for i in range(0, 10):
yy['wl{}'.format(str(i + 1))] = lmc['close'].shift(-1 - i)
yy['bm32'] = lmc['bm'].shift(32)
# 准备数据
X0 = X[-1:]
X = X[70:-10]
yy = yy[70:-10]
yy1 = yy.copy()
close = yy.pop('close')
bm32 = yy.pop('bm32')
bm = yy.pop('bm')
yy2 = yy.copy()
# 计算买卖点条件
yy['B1'] = (yy2.max(axis=1) - bm) / close * 100 > 2 # 未来16个周期最高涨幅
yy['B2'] = (yy1[['wl1', 'wl2', 'wl3']].mean(axis=1) - bm) / close * 100 < 1 # 未来3个周期平均涨幅
yy['B3'] = False
for i in range(0, len(yy1)):
# 均线值大于历史均线值
if yy1['bm'][i] > yy1['bm32'][i]:
yy['B3'][i] = True
yy['B'] = yy['B1'] * yy['B2'] * yy['B3'] # 定位买点
# yy['S1'] = (yy2.min(axis = 1)-bm)/close*100 < -2
# yy['S2'] = (yy1[['wl1','wl2','wl3']].mean(axis = 1)-bm)/close*100 > -1
# yy['S3'] = False
# for i in range(0,len(yy1)):
# #均线值大于历史均线值
# if yy1['bm'][i] < yy1['bm32'][i]:
# yy['S3'][i] = True
# yy['S'] = yy['S1']*yy['S2']*yy['S3']
# 计算涨跌标签:1,0,-1
yy['y'] = 0
for i in range(0, len(yy)):
if yy['B'][i]:
yy['y'][i] = 1
# if yy['S'][i]:
# yy['y'][i] = -1
y = yy['y']
for i in range(1, len(y) - 1):
if y[i] != y[i + 1] and y[i] != y[i - 1]:
y[i] = y[i + 1]
return X, y | true |
4491fdafe5ce62dc8b26f3a412c93b82f7e5514f | Python | defgsus/cppy | /cppy.py | UTF-8 | 1,705 | 2.6875 | 3 | [
"MIT"
] | permissive | """
Commandline tool to interface with cppy
"""
import argparse, sys, os
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="cppy interface - python to c++ the hard way"
)
parser.add_argument(
"-i",
type=str, #argparse.FileType("r"),
help="The name of the python module file to convert to c++. "
"Should be a fully qualified filename")
parser.add_argument(
"-n",
type=str,
help="The name of the output file, defaults to the name of the module file. "
"Example -n mymod, to generate mymod.h/mymod.cpp")
#parser.add_argument(
# '-o', default=sys.stdout, type=argparse.FileType('w'),
# help='The output file, defaults to stdout')
#parser.add_argument(
# 'command', type=str,
# default="dump",
# help='What to do')
args = parser.parse_args()
if not args.i:
parser.print_help()
exit(-1)
module_file = args.i
module_name = os.path.basename(module_file).split(".")[0]
if args.n:
out_name = str(args.n)
else:
out_name = module_name
out_name = os.path.join(os.path.dirname(module_file), out_name)
print("Importing module '%s'" % module_name)
# Python 3.4 way
from importlib.machinery import SourceFileLoader
module = SourceFileLoader(module_name, module_file).load_module()
from cppy import compiler
ctx = compiler.compile(module)
print("Writing output to %s.cpp" % out_name)
ctx.write_to_file(out_name + ".h", ctx.render_hpp())
ctx.write_to_file(out_name + ".cpp", ctx.render_cpp())
#ctx.context.dump()
#eval("import %s" % args.i)
| true |
19d925cd57a00406aa1155178cc10cd58f564058 | Python | heldersepu/hs-scripts | /Notepad++/superReplace_2.py | UTF-8 | 573 | 2.859375 | 3 | [] | no_license |
editor.convertEOLs(2)
editor.rereplace("{.+", "")
editor.rereplace("!.+", "")
editor.rereplace(".+}", "")
editor.rereplace("|-.+", "|-")
#editor.rereplace("=.+", "")
editor.replace("\n", "")
editor.replace("|-", "\n")
editor.rereplace("|(.+)|(.+)", " |\\2 =\\1")
def testContents(contents, lineNumber, totalLines):
posOne = contents.find("|")
posTwo = contents.find("=")
if ((posOne > 0) and (posTwo > 0)):
editor.replaceLine(lineNumber, contents[0:posOne] + contents[posOne:posTwo].lower() + contents[posTwo:-1])
editor.forEachLine(testContents) | true |
188205c616573b14b7bc393e229f491013a06c8c | Python | kNosek1618/bootpycamp_functions_1.0 | /DocumentingFunctions.py | UTF-8 | 229 | 2.90625 | 3 | [] | no_license |
def say_hello():
"""A simple function that return the string hello"""
return "Hello!"
print(say_hello.__doc__)
# # # ERSULT # # #
# A simple function that return the string hello
#####################################
| true |
eff046c6b810108b5f41c7b5b9a2ca27101c49ec | Python | OmarMedhat22/DataSelectionSwapping | /DataP_3.py | UTF-8 | 3,911 | 2.875 | 3 | [] | no_license | import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import re
df = pd.read_csv('NBO_WithGroupingCategories_updated.csv')
def VisitsCategoriesColumns(df,Visits,Categories,VisitNumber=1):
ColumnsNames = [i for i in df.columns]
TotalVisits = []
TotalNumberOfCategories = []
TotalNumberOfVisits = []
for i in range(0,len(ColumnsNames)):
visit = re.findall(r'Visit|visit',ColumnsNames[i])
cat = re.findall(r'Cat|cat',ColumnsNames[i])
if len(visit) >0 and len(cat)>0:
order = re.findall(r'\d',ColumnsNames[i])
if len(order)==2:
TotalVisits.append(ColumnsNames[i])
TotalNumberOfCategories.append(int(order[1]))
TotalNumberOfVisits.append(int(order[0]))
if len(TotalNumberOfVisits)<1:
print("Wrong input data")
return -1
TotalNumberOfCategories = sorted(TotalNumberOfCategories)
TotalNumberOfVisits = sorted(TotalNumberOfVisits)
NumberOfCategories = TotalNumberOfCategories[-1]
NumberOfVisits = TotalNumberOfVisits[-1]
if Visits > NumberOfVisits:
print("Number Of visits Required Exceed Number of visits in data")
return -1
if Categories > NumberOfCategories:
print("Number Of Categories Required Exceed Number of Categories in data")
return -1
RequiredVisits = VisitNumber+Visits-1
if RequiredVisits>NumberOfVisits:
print("There are only "+str(NumberOfVisits)+" in the dataset")
return -1
try:
FilteredVisits=[]
print(NumberOfCategories)
print(NumberOfVisits) #Visits
for i in range(0,NumberOfVisits):
for j in range(0,Categories):
FilteredVisits.append(TotalVisits[j])
TotalVisits=TotalVisits[NumberOfCategories:]
Start = VisitNumber*Categories-Categories
End = Visits*Categories +Start
FilteredVisits = FilteredVisits[Start:End]
MainVisits = FilteredVisits[::Categories]
print(MainVisits)
df = df[FilteredVisits]
df = df.dropna(subset=MainVisits)
df=df.reset_index()
del df['index']
df = df.fillna(0)
except:
print("Wrong input data")
return -1
return df,MainVisits
def swapping(df,category_number,visits,MainVisits):
category_number=category_number
visits=visits
close=True
drop=[]
drop2=[]
for i in range(0,df.shape[0]):
if not close:
drop.append(i-1)
close=False
for lables in range(df.shape[1]-category_number,df.shape[1]):
if df.iloc[i][lables] ==0:
drop.append(i)
break
switch = -category_number
for j in range(0,(visits-1)*category_number):
if (j)%category_number == 0:
switch = switch+category_number
if df.iloc[i][j] == df.iloc[i][lables]:
df.iloc[i][switch] = df.iloc[i][j]
close = True
break
if close:
df.iloc[i][df.shape[1]-category_number]=df.iloc[i][lables]
break
for i in drop:
#print(df.iloc[i])
if i%10==0:
break
df = df[MainVisits]
#df = df[::category_number]
df = df.drop(drop)
return df[MainVisits[:-1]],df[MainVisits[-1]]
NumberOfVistis = 3
CategoryNumber = 5
StartFromVisit = 1
df,MainVisits = VisitsCategoriesColumns(df,NumberOfVistis,CategoryNumber,StartFromVisit)
x,y = swapping(df,CategoryNumber,NumberOfVistis,MainVisits)
print(x.head())
print(y.head())
| true |
72d1a5f9c79eb6b62569100f674d6a8db9c54ffe | Python | Stef-aap/Woning | /Heat_Convection.py | UTF-8 | 19,430 | 2.703125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 23 14:06:56 2019
@author: StefM
"""
PrintLevel = 5
Emissiviteit = 0.95
Lamel_Eff = 1.3
Print_Checks = True
#https://www.engineeringtoolbox.com/air-absolute-kinematic-viscosity-d_601.html
# in free convection,
# plays the same role as the Reynolds number in forced convection
# for vertical plates, turbulent flow if Grashof > 10**9
# Controle: https://www.thermal-wizard.com/tmwiz/default.htm
#
# https://en.wikipedia.org/wiki/Grashof_number
Bolzman = 0.0000000567
# ***************************************************************************
# Bereken de capaciteit bij een andere temperatuur
# ***************************************************************************
def Capacity_dT ( dT1, P1, dT2, n=1.33 ) :
P2 = P1 * ( ( dT2 / dT1 ) ** n )
return int ( P2 )
# ***************************************************************************
# Bereken de capaciteit
# ***************************************************************************
def Rad_Conv ( Th, Tl, Hoogte, Breedte, N_Panel, N_Lamel, AirSpeed=0 ) :
# ************************************
# Oppervlakte van 1 zijde van 1 paneel
# ************************************
Opp = Hoogte * Breedte
# ************************************
# Straling is enkel van belang voor de 2 buitenste vlakken
# ************************************
Radiation = int ( Emissiviteit * Bolzman * ( (273+Th)**4 - (273+Tl)**4 ) * 2 * Opp )
# ************************************
# Bereken de convectie voor de buiten oppervlakten
# ************************************
Delta_T = Th - Tl
X = Heat_Transfer ()
X.Convection_h ( Delta_T, Hoogte )
h_WOC = X.WOC
Convection = int ( h_WOC * ( Th - Tl ) * 2 * Opp )
# ************************************
# Bereken de convectie voor de binnen oppervlakten
# al dan niet met forced airflow
# ************************************
Delta_T = Th - Tl
X = Heat_Transfer ()
X.Convection_h ( Delta_T, Hoogte, AirSpeed )
h_WOC_Forced = X.WOC
Convection += int ( h_WOC_Forced * ( Th - Tl ) * ( N_Panel - 1 + Lamel_Eff * N_Lamel ) * 2 * Opp )
Total = Radiation + Convection
return Radiation, Convection, Total
# ***************************************************************************
# ***************************************************************************
class Heat_Transfer ( object ) :
# *********************************************
def __init__ ( self ) :
self.Specific_Heat = 1007 # [J/kg.K]
self.Thermal_Conductivity = 0.0261 # [W/m.K]
self.Gravity = 9.81 # [m/s2]
self.Thermal_Expansion = 1/300 # [1/K] equal to approximately 1/T
# *********************************************
# Berekent de warmte overdrachts coefficient h
# voor zowel free als forced air flow
# *********************************************
def Convection_h ( self, Delta_T, Hoogte, Forced_Speed = 0 ) :
self.Delta_T = Delta_T
self.Hoogte = Hoogte
self.Forced_Speed = Forced_Speed
# **************************************
# Viscositeit is temperatuur afhankelijk,
# in dit geval gebruiken we de Sutherland formule, maar er zijn meer benaderingen
# hier nemen we de gemiddelde radiator temperatuur,
# niet helemaal duidelijk of dat de meest geschikte waarde
# als we film temperatuur nemen, maakt niet veel uit
# **************************************
b = 1.458e-6
S = 110.4
T = 273 + 20 + Delta_T
self.Viscosity_Dynamic = b * ( T ** ( 3/2 )) / ( T + S )
# ************************************
# Dichtheid is temperatuur afhankelijk
# hier is het duidelijk datwe filmtemperatuur
# temperatuur halverwege plaat / lucht moeten nemen
# ************************************
T = 273 + 20 + Delta_T / 2
self.Density = 358.517 * ( T ** -1.00212 )
self.Prandtl = self.Viscosity_Dynamic * self.Specific_Heat / self.Thermal_Conductivity
self.Grashof = int ( self.Gravity * \
self.Thermal_Expansion * \
self.Delta_T * \
( self.Hoogte ** 3 ) * \
( self.Density ** 2 ) \
/ \
( self.Viscosity_Dynamic ** 2 ) )
self.Rayleigh = int ( self.Grashof * self.Prandtl )
if self.Rayleigh < 1e9 :
self.Nusselt = 0.68 + 0.67 * ( self.Rayleigh ** (1/4) ) / \
(( 1 + ( 0.492 / self.Prandtl ) ** ( 9/16 )) ** (4/9))
else :
self.Nusselt = ( 0.825 + 0.387 * ( self.Rayleigh ** (1/6) ) / \
(( 1 + ( 0.492 / self.Prandtl ) ** (9/16) ) ** ( 8/27 )) \
) ** 2
self.h_WOC = self.Nusselt * self.Thermal_Conductivity / self.Hoogte
self.Use_Reynolds = False
self.Reynolds = 0
self.Grashof_Reynolds = 0
self.h_WOC_Forced = 0
#self.Nusselt_Total = 0
#self.h_WOC_Total = 0
self.WOC = self.h_WOC
if self.Forced_Speed > 0 :
self.Reynolds = self.Forced_Speed * self.Hoogte * self.Density / self.Viscosity_Dynamic
self.Nusselt_Forced = 0.664 * ( self.Reynolds ** 0.5 ) * ( self.Prandtl ** 0.33 )
self.h_WOC_Forced = self.Nusselt_Forced * self.Thermal_Conductivity / self.Hoogte
self.Grashof_Reynolds = self.Grashof / ( self.Reynolds ** 2 )
self.Nusselt = ( ( self.Nusselt ** 3 ) + ( self.Nusselt_Forced ** 3 ) ) ** (1/3)
self.WOC = self.Nusselt * self.Thermal_Conductivity / self.Hoogte
# *************************************************
# we hebben op zijn minst de natuurlijke convectie,
# dus forced airflow moet een grotere invvloed hebben om actief te worden
# *************************************************
#if self.h_WOC_Forced > self.h_WOC :
# self.Use_Reynolds = True
# self.WOC = self.h_WOC_Forced
# *********************************************
def __repr__ ( self ) :
Line = "===== Convection: Delta_T = %i [K] Hoogte = %.1f [m] Forced_Speed = %.1f [m/s]\n" % (
self.Delta_T, self.Hoogte, self.Forced_Speed )
Line += "Prandtl (=0.714) : %.3f\n" % ( self.Prandtl )
Line += "Grashof (=9.02e8) : %.2e\n" % ( self.Grashof )
if self.Use_Reynolds :
#Line += "Reynolds Turbulent : >5.00e+09 <=====\n"
Line += "Reynolds (=6.44e8) : %.2e\n" % ( self.Reynolds )
Line += "Nusselt (=82.6 ) : %.1f\n" % ( self.Nusselt_Forced )
else :
Line += "Rayleigh Turbulent : >5.00e+09 <=====\n"
Line += "Rayleigh (=6.44e8) : %.2e\n" % ( self.Rayleigh )
Line += "Nusselt (=82.6 ) : %.1f\n" % ( self.Nusselt )
if self.Grashof_Reynolds != 0 :
Line += "Gr / Re^2 : %.2f\n" % ( self.Grashof_Reynolds )
Line += "h (=3.08 ) : %.2f [W/m2.K]\n" % ( self.WOC )
return Line
# ***************************************************************************
# ***************************************************************************
class Radiator_Class ( object ) :
# ***********************************************
def __init__ ( self, Name, Breedte, Hoogte, Type, Ptot=-1, Delta_T=-1, URL="", Px=35 ) :
self.Name = Name
self.Breedte = Breedte / 1000
self.Hoogte = Hoogte / 1000
self.Type = Type
self.URL = URL
self.Opp = self.Breedte * self.Hoogte
self.N_Panel = Type // 10
self.N_Lamel = Type % 10
self.Vent_Aantal = 0
self.Px = Px
self.P20 = -1
self.P35 = -1
self.P50 = -1
self.P60 = -1
self.P20_Rad_VV = -1
self.P20_Conv_VV = -1
self.P20_Tot_VV = -1
self.P35_Rad_VV = -1
self.P35_Conv_VV = -1
self.P35_Tot_VV = -1
self.P50_Rad_VV = -1
self.P50_Conv_VV = -1
self.P50_Tot_VV = -1
self.P60_Rad_VV = -1
self.P60_Conv_VV = -1
self.P60_Tot_VV = -1
if ( Ptot > 0 ) and ( Delta_T > 0 ) :
if Delta_T == 20 :
self.P20 = Ptot
self.P50 = Capacity_dT ( 20, Ptot, 50 )
self.P60 = Capacity_dT ( 20, Ptot, 60 )
elif Delta_T == 50 :
self.P20 = Capacity_dT ( 50, Ptot, 20 )
self.P50 = Ptot
self.P60 = Capacity_dT ( 50, Ptot, 60 )
elif Delta_T == 60 :
self.P20 = Capacity_dT ( 60, Ptot, 20 )
self.P50 = Capacity_dT ( 60, Ptot, 50 )
self.P60 = Ptot
else :
self.P20 = Capacity_dT ( Delta_T, Ptot, 20 )
self.P50 = Capacity_dT ( Delta_T, Ptot, 50 )
self.P60 = Capacity_dT ( Delta_T, Ptot, 60 )
self.Capaciteit ()
# ***********************************************
def Capaciteit ( self ) :
self.P20_Radiation, self.P20_Convection, self.P20_Tot = \
Rad_Conv ( Th=40, Tl=20,
Hoogte=self.Hoogte, Breedte=self.Breedte,
N_Panel=self.N_Panel, N_Lamel=self.N_Lamel)
self.P35_Radiation, self.P35_Convection, self.P35_Tot = \
Rad_Conv ( Th=20+self.Px, Tl=20,
Hoogte=self.Hoogte, Breedte=self.Breedte,
N_Panel=self.N_Panel, N_Lamel=self.N_Lamel)
self.P50_Radiation, self.P50_Convection, self.P50_Tot = \
Rad_Conv ( Th=70, Tl=20,
Hoogte=self.Hoogte, Breedte=self.Breedte,
N_Panel=self.N_Panel, N_Lamel=self.N_Lamel)
self.P60_Radiation, self.P60_Convection, self.P60_Tot = \
Rad_Conv ( Th=80, Tl=20,
Hoogte=self.Hoogte, Breedte=self.Breedte,
N_Panel=self.N_Panel, N_Lamel=self.N_Lamel)
# **************************************************
# en bereken ook een paar waarden met forced airflow
# **************************************************
self.P20_Rad_V1, self.P20_Conv_V1, self.P20_Tot_V1 = \
Rad_Conv ( Th=40, Tl=20,
Hoogte=self.Hoogte, Breedte=self.Breedte,
N_Panel=self.N_Panel, N_Lamel=self.N_Lamel, AirSpeed=1 )
self.P20_Rad_V2, self.P20_Conv_V2, self.P20_Tot_V2 = \
Rad_Conv ( Th=40, Tl=20,
Hoogte=self.Hoogte, Breedte=self.Breedte,
N_Panel=self.N_Panel, N_Lamel=self.N_Lamel, AirSpeed=2 )
self.P20_Rad_V3, self.P20_Conv_V3, self.P20_Tot_V3 = \
Rad_Conv ( Th=40, Tl=20,
Hoogte=self.Hoogte, Breedte=self.Breedte,
N_Panel=self.N_Panel, N_Lamel=self.N_Lamel, AirSpeed=3 )
self.P20_perc_Radiation = round ( 100 * self.P20_Radiation / self.P20_Tot )
self.P35_perc_Radiation = round ( 100 * self.P35_Radiation / self.P35_Tot )
self.P50_perc_Radiation = round ( 100 * self.P50_Radiation / self.P50_Tot )
self.P60_perc_Radiation = round ( 100 * self.P60_Radiation / self.P60_Tot )
self.P20_perc_Convection = round ( 100 * self.P20_Convection / self.P20_Tot )
self.P35_perc_Convection = round ( 100 * self.P35_Convection / self.P35_Tot )
self.P50_perc_Convection = round ( 100 * self.P50_Convection / self.P50_Tot )
self.P60_perc_Convection = round ( 100 * self.P60_Convection / self.P60_Tot )
if self.P20 == -1 : self.P20 = self.P20_Tot
if self.P35 == -1 : self.P35 = self.P35_Tot
if self.P50 == -1 : self.P50 = self.P50_Tot
if self.P60 == -1 : self.P60 = self.P60_Tot
# ***********************************************
def Add_Ventilator ( self, Aantal=1, Diameter=12, Flow=100 ) :
self.Vent_Flow = Flow # [m3/hr]
self.Vent_Diameter = Diameter / 100 # [cm] ==> [m]
self.Vent_Aantal = Aantal
Opp_Tot = self.Vent_Diameter * self.Breedte
self.Vent_AirSpeed = self.Vent_Aantal * self.Vent_Flow / ( Opp_Tot* 3600 )
print ( "Ventilator, N=%i, Flow=%i[m3/hr], Forced AirSpeed = %.1f [m/s]" % (
self.Vent_Aantal, self.Vent_Flow, self.Vent_AirSpeed ) )
self.P20_Rad_VV, self.P20_Conv_VV, self.P20_Tot_VV = \
Rad_Conv ( Th=40, Tl=20,
Hoogte=self.Hoogte, Breedte=self.Breedte,
N_Panel=self.N_Panel, N_Lamel=self.N_Lamel, AirSpeed=self.Vent_AirSpeed )
self.P35_Rad_VV, self.P35_Conv_VV, self.P35_Tot_VV = \
Rad_Conv ( Th=20+self.Px, Tl=20,
Hoogte=self.Hoogte, Breedte=self.Breedte,
N_Panel=self.N_Panel, N_Lamel=self.N_Lamel, AirSpeed=self.Vent_AirSpeed )
self.P50_Rad_VV, self.P50_Conv_VV, self.P50_Tot_VV = \
Rad_Conv ( Th=70, Tl=20,
Hoogte=self.Hoogte, Breedte=self.Breedte,
N_Panel=self.N_Panel, N_Lamel=self.N_Lamel, AirSpeed=self.Vent_AirSpeed )
self.P60_Rad_VV, self.P60_Conv_VV, self.P60_Tot_VV = \
Rad_Conv ( Th=80, Tl=20,
Hoogte=self.Hoogte, Breedte=self.Breedte,
N_Panel=self.N_Panel, N_Lamel=self.N_Lamel, AirSpeed=self.Vent_AirSpeed )
# ***********************************************
def __repr__ ( self ) :
Line = ""
Line += "Name = %s\n" % self.Name
if PrintLevel > 3 :
Line += "Breedte = %s [m]\n" % self.Breedte
Line += "Hoogte = %s [m]\n" % self.Hoogte
Line += "Oppervlakte = %s [m2]\n" % self.Opp
Line += "Type = %s\n" % self.Type
if self.Vent_Aantal > 0 :
Line += "Ventilator = %i * %i [m3/hr]\n" % ( self.Vent_Aantal, self.Vent_Flow )
if PrintLevel > 3 :
Line += "\n"
Line += "Type=%s Hoogte=%s[m] Breedt=%s[m] Lamel-eff=%s emmissiviteit=%s\n" % (
self.Type, self.Hoogte, self.Breedte, Lamel_Eff, Emissiviteit )
if self.Vent_Aantal > 0 :
Line += "Ventilator, N=%i, Flow=%i[m3/hr], Forced AirSpeed = %.1f [m/s]\n" % (
self.Vent_Aantal, self.Vent_Flow, self.Vent_AirSpeed )
if self.P20_Tot != self.P20 :
Line += "P20-Fabrikant = %s [W]\n" % ( self.P20 )
Line += "P20-Totaal = %s [W]\n" % ( self.P20_Tot )
Line += "P20-Radiation = %s [W]\n" % self.P20_Radiation
Line += "P20-Convection = %s [W]\n" % self.P20_Convection
Line += "P20 Rad / Conv = %i %% / %i %%\n" % ( self.P20_perc_Radiation, self.P20_perc_Convection )
if self.Vent_Aantal > 0 :
Line += "P20-Forced-Conv = %i [W]\n" % ( self.P20_Conv_VV )
Line += "P20-Forced-Tot = %i [W]\n" % ( self.P20_Tot_VV )
Line += "\n"
Line += "Type=%s Hoogte=%s[m] Breedt=%s[m] Lamel-eff=%s emmissiviteit=%s\n" % (
self.Type, self.Hoogte, self.Breedte, Lamel_Eff, Emissiviteit )
if self.Vent_Aantal > 0 :
Line += "Ventilator, N=%i, Flow=%i[m3/hr], Forced AirSpeed = %.1f [m/s]\n" % (
self.Vent_Aantal, self.Vent_Flow, self.Vent_AirSpeed )
if self.P35_Tot != self.P35 :
Line += "P%i-Fabrikant = %s [W]\n" % ( self.Px, self.P35 )
Line += "P%i-Totaal = %s [W]\n" % ( self.Px, self.P35_Tot )
Line += "P%i-Radiation = %s [W]\n" % ( self.Px, self.P35_Radiation )
Line += "P%i-Convection = %s [W]\n" % ( self.Px, self.P35_Convection )
Line += "P%i Rad / Conv = %i %% / %i %%\n" % ( self.Px, self.P35_perc_Radiation, self.P35_perc_Convection )
if self.Vent_Aantal > 0 :
Line += "P%i-Forced-Conv = %i [W]\n" % ( self.Px, self.P35_Conv_VV )
Line += "P%i-Forced-Tot = %i [W]\n" % ( self.Px, self.P35_Tot_VV )
Line += "\n"
Line += "Type=%s Hoogte=%s[m] Breedt=%s[m] Lamel-eff=%s emmissiviteit=%s\n" % (
self.Type, self.Hoogte, self.Breedte, Lamel_Eff, Emissiviteit )
if self.Vent_Aantal > 0 :
Line += "Ventilator, N=%i, Flow=%i[m3/hr], Forced AirSpeed = %.1f [m/s]\n" % (
self.Vent_Aantal, self.Vent_Flow, self.Vent_AirSpeed )
if self.P50_Tot != self.P50 :
Line += "P50-Fabrikant = %s [W]\n" % ( self.P50 )
Line += "P50-Totaal = %s [W]\n" % ( self.P50_Tot )
Line += "P50-Radiation = %s [W]\n" % self.P50_Radiation
Line += "P50-Convection = %s [W]\n" % self.P50_Convection
Line += "P50 Rad / Conv = %i %% / %i %%\n" % ( self.P50_perc_Radiation, self.P50_perc_Convection )
if self.Vent_Aantal > 0 :
Line += "P50-Forced-Conv = %i [W]\n" % ( self.P50_Conv_VV )
Line += "P50-Forced-Tot = %i [W]\n" % ( self.P50_Tot_VV )
Line += "\n"
Line += "Type=%s Hoogte=%s[m] Breedt=%s[m] Lamel-eff=%s emmissiviteit=%s\n" % (
self.Type, self.Hoogte, self.Breedte, Lamel_Eff, Emissiviteit )
if self.Vent_Aantal > 0 :
Line += "Ventilator, N=%i, Flow=%i[m3/hr], Forced AirSpeed = %.1f [m/s]\n" % (
self.Vent_Aantal, self.Vent_Flow, self.Vent_AirSpeed )
if self.P60_Tot != self.P60 :
Line += "P60-Fabrikant = %s [W]\n" % ( self.P60 )
Line += "P60-Totaal = %s [W]\n" % ( self.P60_Tot )
Line += "P60-Radiation = %s [W]\n" % self.P60_Radiation
Line += "P60-Convection = %s [W]\n" % self.P60_Convection
Line += "P60 Rad / Conv = %i %% / %i %%\n" % ( self.P60_perc_Radiation, self.P60_perc_Convection )
if self.Vent_Aantal > 0 :
Line += "P60-Forced-Conv = %i [W]\n" % ( self.P60_Conv_VV )
Line += "P60-Forced-Tot = %i [W]\n" % ( self.P60_Tot_VV )
return Line
# ***************************************************************************
# ***************************************************************************
if __name__ == '__main__':
Hoogte = 0.7
X = Heat_Transfer ()
# *******************************************************
# Warmteoverdrachtscoefficient als functie van de Delta_T
# *******************************************************
for Delta_T in [ 20, 30, 40, 50, 60 ] :
X.Convection_h ( Delta_T, Hoogte )
print ( "%i [Celsius] ==> %.1f [W/m2.K]" % ( Delta_T, X.WOC ) )
print ( X )
# *******************************************************
# Warmteoverdrachtscoefficient als functie van de forced airspeed
# *******************************************************
Delta_T = 20
for AirSpeed in [ 0.2, 0.3, 0.4, 0.5, 0.7, 1, 1.5, 2, 3 ] :
X.Convection_h ( Delta_T, Hoogte, AirSpeed )
print ( "%.1f [m/s] ==> %.1f [W/m2.K] Gr/Re^2=%.2f %.1f" % (
AirSpeed, X.WOC, X.Grashof_Reynolds, X.h_WOC_Forced ) )
print ( X )
Radiator_Achter = Radiator_Class ( "Achter", Breedte = 1000, Hoogte = 700, Type = 33, Px=30 )
Radiator_Achter.Add_Ventilator ( Aantal=5, Diameter=12, Flow=126 )
print ( Radiator_Achter )
#Radiator_Beter = Radiator_Class ( "Achter", Breedte = 1000, Hoogte = 700, Type = 33 )
#Radiator_Beter.Add_Ventilator ( Aantal=10, Diameter=12, Flow=126 )
#print ( Radiator_Beter )
"""
for RType in [ 11, 21, 22, 33 ] :
Radson1 = Radiator_Class ( "Radson1", Breedte = 450, Hoogte = 900, Type = RType )
print ( Radson1 )
##"""
| true |
096b591c11f062d9d0a3249489950e81fd17cf6b | Python | graysonbai/RegressionScript | /component/SelectWindow.py | UTF-8 | 817 | 2.625 | 3 | [] | no_license | from selenium.webdriver.support.select import Select
from Retry import *
class SelectWindow():
def __init__( self, label ,Locator ):
self.Locator = Locator
self.label = label
def elementLocator( self ):
return self.Locator()
@retry( " > select" )
def select( self, text ):
Select( self.elementLocator() ).select_by_visible_text( text )
@retry( " > Deselect All" )
def deselectAll( self ):
Select( self.elementLocator() ).deselect_all()
def isVisible( self ):
return isVisible( " > check Visible", self.elementLocator )
def assertVisiable( self ):
if self.isVisible():
raise ( "Button not Visible" )
def assertInVisiable( self ):
if not self.isVisible():
raise ( "ButtonVisible" )
| true |
6c9a1b75241f88e927c0108638761474d2380eea | Python | sofiyamitchell/sofiyamitchell | /countyVisual2.py | UTF-8 | 4,885 | 2.625 | 3 | [] | no_license | import county_demographics
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import numpy as np
list_of_report = county_demographics.get_all_counties()
WIncomes = []
West = ["WA","OR","CA","NV","UT","CO","WY","ID","MO","AK","HI"]
SouthWest = ["AZ","NM","TX","OK"]
MidWest = ["ND","SD","MO","NE","OH","KS","IN","WI","IL","MI","MN","IA"]
SouthEast = ["FL","GA","AL","MS","LA","AR","TN","NC","SC","VA"]
NorthEast = ["WV","MD","RI","PA","NJ","CT","NY","MA","NH","VT","ME","DE"]
SWIncomes = []
MWIncomes = []
SEIncomes = []
NEIncomes = []
for i in range(len(list_of_report)):
state = list_of_report[i]["State"]
if state in West:
WIncomes.append(list_of_report[i]["Income"]["Median Houseold Income"])
if state in MidWest:
MWIncomes.append(list_of_report[i]["Income"]["Median Houseold Income"])
if state in SouthWest:
SWIncomes.append(list_of_report[i]["Income"]["Median Houseold Income"])
if state in SouthEast:
SEIncomes.append(list_of_report[i]["Income"]["Median Houseold Income"])
if state in NorthEast:
NEIncomes.append(list_of_report[i]["Income"]["Median Houseold Income"])
W2040 = 0
W4060 = 0
W6080 = 0
W80100 = 0
W100120 = 0
for i in range(len(WIncomes)):
if WIncomes[i] >= 20000 and WIncomes[i] < 40000:
W2040 = W2040 + 1
if WIncomes[i] >= 40000 and WIncomes[i] < 60000:
W4060 = W4060 + 1
if WIncomes[i] >= 60000 and WIncomes[i] < 80000:
W6080 = W6080 + 1
if WIncomes[i] >= 80000 and WIncomes[i] < 100000:
W80100 = W80100 + 1
if WIncomes[i] >= 100000 and WIncomes[i] < 120000:
W100120 = W100120 + 1
SW2040 = 0
SW4060 = 0
SW6080 = 0
SW80100 = 0
SW100120 = 0
for i in range(len(SWIncomes)):
if SWIncomes[i] >= 20000 and SWIncomes[i] < 40000:
SW2040 = SW2040 + 1
if SWIncomes[i] >= 40000 and SWIncomes[i] < 60000:
SW4060 = SW4060 + 1
if SWIncomes[i] >= 60000 and SWIncomes[i] < 80000:
SW6080 = SW6080 + 1
if SWIncomes[i] >= 80000 and SWIncomes[i] < 100000:
SW80100 = SW80100 + 1
if SWIncomes[i] >= 100000 and SWIncomes[i] < 120000:
SW100120 = SW100120 + 1
MW2040 = 0
MW4060 = 0
MW6080 = 0
MW80100 = 0
MW100120 = 0
for i in range(len(MWIncomes)):
if MWIncomes[i] >= 20000 and MWIncomes[i] < 40000:
MW2040 = MW2040 + 1
if MWIncomes[i] >= 40000 and MWIncomes[i] < 60000:
MW4060 = MW4060 + 1
if MWIncomes[i] >= 60000 and MWIncomes[i] < 80000:
MW6080 = MW6080 + 1
if MWIncomes[i] >= 80000 and MWIncomes[i] < 100000:
MW80100 = MW80100 + 1
if MWIncomes[i] >= 100000 and MWIncomes[i] < 120000:
MW100120 = MW100120 + 1
SE2040 = 0
SE4060 = 0
SE6080 = 0
SE80100 = 0
SE100120 = 0
for i in range(len(SEIncomes)):
if SEIncomes[i] >= 20000 and SEIncomes[i] < 40000:
SE2040 = SE2040 + 1
if SEIncomes[i] >= 40000 and SEIncomes[i] < 60000:
SE4060 = SE4060 + 1
if SEIncomes[i] >= 60000 and SEIncomes[i] < 80000:
SE6080 = SE6080 + 1
if SEIncomes[i] >= 80000 and SEIncomes[i] < 100000:
SE80100 = SE80100 + 1
if SEIncomes[i] >= 100000 and SEIncomes[i] < 120000:
SE100120 = SE100120 + 1
NE2040 = 0
NE4060 = 0
NE6080 = 0
NE80100 = 0
NE100120 = 0
for i in range(len(NEIncomes)):
if NEIncomes[i] >= 20000 and NEIncomes[i] < 40000:
NE2040 = NE2040 + 1
if NEIncomes[i] >= 40000 and NEIncomes[i] < 60000:
NE4060 = NE4060 + 1
if NEIncomes[i] >= 60000 and NEIncomes[i] < 80000:
NE6080 = NE6080 + 1
if NEIncomes[i] >= 80000 and NEIncomes[i] < 100000:
NE80100 = NE80100 + 1
if NEIncomes[i] >= 100000 and NEIncomes[i] < 120000:
NE100120 = NE100120 + 1
W = (W2040,W4060,W6080,W80100,W100120)
SW = (SW2040+W2040,SW4060+W4060,SW6080+W6080,SW80100+W80100,SW100120+W100120)
MW = (MW2040+SW2040+W2040,MW4060+SW4060+W4060,MW6080+SW6080+W6080,MW80100+SW80100+W80100,MW100120+SW100120+W100120)
SE = (SE2040+MW2040+SW2040+W2040,SE4060+MW4060+SW4060+W4060,SE6080+MW6080+SW6080+W6080,SE80100+MW80100+SW80100+W80100,SE100120+MW100120+SW100120+W100120)
NE = (NE2040+SE2040+MW2040+SW2040+W2040,NE4060+SE4060+MW4060+SW4060+W4060,NE6080+SE6080+MW6080+SW6080+W6080,NE80100+SE80100+MW80100+SW80100+W80100, NE100120+SE100120+MW100120+SW100120+W100120)
X = np.arange(5)
WB = plt.bar(X, W, color = 'b')
SWB = plt.bar(X, SW, color = 'r', bottom = W)
MWB = plt.bar(X, MW, color = 'c', bottom = SW)
SEB = plt.bar(X, SE, color = 'g', bottom = MW)
NEB = plt.bar(X, NE, color = 'y', bottom = SE)
incomes=['20-40k','40-60k','60-80k','80-100k','100-120k']
plt.xticks(X, incomes)
plt.title("Average household incomes across United States counties")
plt.xlabel("Average yearly household income")
plt.ylabel("Number of counties in income bracket")
plt.legend((WB[0], SWB[0], MWB[0], SEB[0], NEB[0]), ('West', 'Southwest', 'Midwest','Southeast','Northeast'))
plt.show()
| true |
890f0b3b06c226de9a83e598d1db80e671dc615b | Python | JoyLeeA/CodeUp | /27.py | UTF-8 | 45 | 2.890625 | 3 | [] | no_license | N = float(input())
print("{:.11f}".format(N)) | true |
671a7b31e164f801727953f9965622d650a69cd7 | Python | DoyleW21/parser-gen | /lib/python/DAGBarrierNode.py | UTF-8 | 988 | 3.015625 | 3 | [] | no_license | #!/usr/bin/env python
"""A header node for use in a DAG"""
import Header
from HeaderLib import getHeaderLengths
from DAGNode import DAGNode
class BarrierNode(DAGNode):
"""Barrier node for use in a DAG"""
def __init__(self, hdr, inst, barrierLoc):
self.barrierLoc = barrierLoc
super(self.__class__, self).__init__(hdr, inst, 0)
def getName(self):
return '%s-bar%d' % (self.hdr.name, self.barrierLoc)
def getDecisionBytes(self):
"""Get all decision byte positions"""
return []
def getExtractBytes(self):
"""Get all extract byte positions"""
return []
def getFields(self):
"""Get all fields within the header"""
return []
# Basic test code
if __name__ == '__main__':
hdr = Header.Header('TestHeader')
hdr.addField('f1', 8)
hdr.addField('f2', 16)
hdr.addField('f3', 8)
node = BarrierNode(hdr, 1, 2)
print node
print 'Total length:', node.getTotalLength()
| true |
bb161b3c299a099822f6eb8aaed62c782b616d6f | Python | SuperCowPowers/chains | /chains/utils/data_utils.py | UTF-8 | 2,113 | 3.46875 | 3 | [
"MIT"
] | permissive | """Data utilities that might be useful"""
from __future__ import print_function
from collections import OrderedDict
# Local imports
from chains.utils import log_utils
def make_dict(obj):
"""This method creates a dictionary out of a non-builtin object"""
# Recursion base case
if is_builtin(obj) or isinstance(obj, OrderedDict):
return obj
output_dict = {}
for key in dir(obj):
if not key.startswith('__') and not callable(getattr(obj, key)):
attr = getattr(obj, key)
if isinstance(attr, list):
output_dict[key] = []
for item in attr:
output_dict[key].append(make_dict(item))
else:
output_dict[key] = make_dict(attr)
# All done
return output_dict
def is_builtin(obj):
return obj.__class__.__module__ in ['__builtin__', 'builtins']
def get_value(data, key):
"""Follow the dot notation to get the proper field, then perform the action
Args:
data: the data as a dictionary (required to be a dictionary)
key: the key (as dot notation) into the data that gives the field (IP.src)
Returns:
the value of the field(subfield) if it exist, otherwise None
"""
ref = data
try:
for subkey in key.split('.'):
if isinstance(ref, dict):
ref = ref[subkey]
else:
print('CRITICAL: Cannot use subkey %s on non-dictionary element' % subkey)
return None
return ref
# In general KeyErrors are expected
except KeyError:
return None
def test_utils():
"""Test the utility methods"""
good = {'IP':{'src':'123'}}
bad = {'IP':{'srrcc':'123'}}
bad2 = {'IP':['src','123']}
assert get_value(good, 'IP.src')
assert get_value(bad, 'IP.src') == None
assert get_value(bad2, 'IP.src') == None
class bla(object):
def func_foo(self):
pass
bla.a = 'foo'
bla.b = 'bar'
print(make_dict(bla))
print('Success!')
if __name__ == '__main__':
test_utils()
| true |
65df3542976ceb1bf519ca42a67d1f7e63b83238 | Python | 1122ahhh/ML_algorithm | /main.py | UTF-8 | 799 | 2.625 | 3 | [] | no_license | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
file_path = "data/wineQuality/winequality-red.csv"
all_data = pd.read_csv(file_path,sep=';')
feats = ['fixed acidity', 'volatile acidity', 'citric acid', 'residual sugar',
'chlorides', 'free sulfur dioxide', 'total sulfur dioxide', 'density',
'pH', 'sulphates', 'alcohol']
target = ['quality']
unique_count = all_data.groupby(by=['quality']).size().reset_index()
unique_count.rename(columns={0:'count'},inplace=True)
cnt_srs =all_data['quality'].value_counts()
plt.figure(figsize=(12,6))
sns.barplot(cnt_srs.index, cnt_srs.values, alpha=0.8)
# plt.xticks(rotation='vertical')
# plt.xlabel('Month of transaction', fontsize=12)
# plt.ylabel('Number of Occurrences', fontsize=12)
plt.show() | true |
d250b9f8d3f5c938cb4730cfb3bcca12bd4d1023 | Python | a421101046/-python | /ex42.py | UTF-8 | 1,013 | 3.890625 | 4 | [] | no_license | # -- coding: utf-8 --
# 关于class的构建
class Dinner(object):
def __init__(self):
""" 初始化函数,初始化价格 """
self.price = {"宫保鸡丁": 400}
self.isOK = False
def order(self,things):
""" 下订单时发生的事情 """
if things == "蛋炒饭":
print "这太low,本店没有"
elif things == "鲸鱼肉":
print "呵呵,你在逗我么"
elif things == "披萨":
print "这是中式餐厅!!"
elif things == "宫保鸡丁":
self.isOK = True
print "好的,请稍后"
else:
print "╰(`□′)╯ !!"
def pay(self,things):
""" 付款 """
print self.price[things]
def startOrder(self):
""" 开始下订单 """
print("先生请问你点什么!")
while(not self.isOK):
things = raw_input('>')
self.order(things)
self.pay(things)
d = Dinner()
# d.startOrder()
# 知识点
"""
1.self代表类的实例
2.getattr(对象,属性名,当没有获取到属性名时的默认值)
如getattr(d,'price')
"""
| true |
20e77fac4bbfd51f3e7ae474ac4190bb4dc906b2 | Python | walkingmask/imagesearch_expt | /notebooks/utils.py | UTF-8 | 1,317 | 2.9375 | 3 | [] | no_license | import io
from pathlib import Path
from typing import Optional
from typing import Union
import urllib.request
from PIL import Image
from PIL.Image import Image as PILImage
class ImageLoader:
@staticmethod
def load(location: Union[str, Path]) -> Optional[PILImage]:
if isinstance(location, str):
if location.startswith("http"): # URL
try:
return Image.open(
io.BytesIO(urllib.request.urlopen(location).read())
)
except Exception as e:
print(f"Failed to load image from URL {location}")
print(e)
return None
else: # Local path
location = Path(location)
if isinstance(location, Path): # Local path
location = location.resolve()
if location.exists():
try:
return Image.open(str(location))
except:
print(f"Failed to load image from path {location}")
return None
else:
print(f"Image file not found {location}")
return None
# Unexpected
print(f"Unexpected type of image location {type(location)}")
return None
| true |
d312c2da26c78774b8d892b88a3de43647046340 | Python | spkane31/complex-systems | /archive/hw2.py | UTF-8 | 19,660 | 3.328125 | 3 | [] | no_license | # Homework 2
# Due: March 31, 2020
# Sean Kane, Bayley King, Sean Rice
import numpy as np
import random
from PIL import Image
import matplotlib.pyplot as plt
import datetime
import time
class Schelling():
def __init__(self, N=40, q=100, k=4, epochs=50, iterations=30):
self.N = N
self.population = int(N * N * 0.90)
# each space can be -1, 0, 1
self.space = np.zeros((N, N), dtype=int)
# q is the number of empty sequential cells to move to
self.q = q
# k is the number of neighbors to be around you to be happy
self.k = k
self.epochs = epochs
self.print_statements = False
self.images = False
# Timeseries of happiness values
self.happiness_ts = []
self.iterations = iterations
self.open_spaces = []
def initialize_space(self):
self.space = np.zeros((self.N, self.N), dtype=int)
# randomly initialize the locations
for _ in range(int(self.population/2)):
# first with +1's
x, y = random.randint(0, self.N-1), random.randint(0, self.N-1)
while self.space[x][y] != 0:
x, y = random.randint(0, self.N-1), random.randint(0, self.N-1)
self.space[x][y] = 1
for _ in range(int(self.population/2)):
# second with -1's
x, y = random.randint(0, self.N-1), random.randint(0, self.N-1)
while self.space[x][y] != 0:
x, y = random.randint(0, self.N-1), random.randint(0, self.N-1)
self.space[x][y] = -1
for i in range(self.N):
for j in range(self.N):
if self.space[i, j] == 0:
self.open_spaces.append((i, j))
def get_random_pt(self):
return random.randint(0, self.N-1), random.randint(0, self.N-1)
def random_move(self):
happiness_values = []
for _ in range(self.iterations):
self.initialize_space()
# How to move each object in a random order? Could start at a different place each time
happiness_temp = []
# Find a random starting point and iterate from there
x, y = random.randint(0, self.N-1), random.randint(0, self.N-1)
for _ in range(self.epochs):
for i in range(self.N):
for j in range(self.N):
x0, y0 = (x + i) % self.N, (y + j) % self.N
h = self.happiness(x0, y0)
if h == 0:
# If the agent is unhappy update that position
x1, y1 = self.find_random_open(x0, y0)
self.space[x1, y1] = self.space[x0, y0]
self.space[x0, y0] = 0
self.open_spaces.remove((x1, y1))
self.open_spaces.append((x0, y0))
t_h = self.total_happiness()
# Scenario where everyone is happy
if t_h == self.population:
break
if self.print_statements: print(t_h / self.population)
happiness_temp.append(t_h/self.population)
if self.images: self.space_to_image()
# Produce timeseries for the happiness
happiness_values.append(happiness_temp)
temp = []
for i in range(self.epochs):
t = 0
for j in range(self.iterations):
t += happiness_values[j][i]
temp.append(t / self.iterations)
self.happiness_ts.append(temp)
pass
def happiness(self, x, y):
# Calculate whether an agent is happy at it's own location
# Sums the values of all the neighbors
total = -self.space[x, y]
for i in range(-1, 2, 1):
for j in range(-1, 2, 1):
x0, y0 = (x + i) % self.N, (y + j) % self.N
total += self.space[x0, y0]
# returns 1 if the cell is "happy", 0 otherwise
if total >= self.k and self.space[x, y] == 1:
return 1
elif total <= -self.k and self.space[x, y] == -1:
return 1
return 0
def happiness_value(self, x, y, cur_value):
# Calculate the happiness for a random location
total = 0
# Sums the values of all the neighbors if they're the same as the cur_value
for i in range(-1, 2, 1):
for j in range(-1, 2, 1):
x0, y0 = (x + i) % self.N, (y + j) % self.N
if self.space[x0, y0] == cur_value:
total += self.space[x0, y0]
# Looks at 8 cells, perfect happiness is all similar
return total / 8
def find_random_open(self, x, y):
cur_happiness = self.happiness_value(x, y, self.space[x, y])
x0, y0 = x, y
for i in range(min(len(self.open_spaces), self.q)):
h = self.happiness_value(self.open_spaces[i][0], self.open_spaces[i][1], self.space[x, y])
if h > cur_happiness:
cur_happiness = h
x0, y0 = self.open_spaces[i][0], self.open_spaces[i][1]
# If none make it happier,
if x0 == x and y0 == y:
return self.open_spaces[random.randint(0, len(self.open_spaces)-1)]
return x0, y0
def total_happiness(self):
total = 0
for i in range(self.N):
for j in range(self.N):
total += self.happiness(i, j)
return total
def social_network(self, n=5, p=3, epochs=100):
happiness_values = []
# p = size of square neighborhood to look at
self.p = p
# n = number of friends
self.n = n
for _ in range(self.iterations):
self.initialize_space()
# First find each agents "friends", randomly. This will be stored in a dictionary
# this will require a lot of re-writing, but first thing that came to mind
self.friends = {}
for i in range(self.N):
for j in range(self.N):
if self.space[i, j] != 0:
temp = []
while len(temp) < n:
x, y = self.get_random_pt()
if (x,y) not in temp:
temp.append((x, y))
self.friends[(i, j)] = temp
happiness_temp = []
for _ in range(epochs):
for i in range(self.N):
for j in range(self.N):
if self.happiness(i, j) == 0 and self.space[i, j] != 0:
# Not "happy", look for new place
# print(self.space)
# print(i, j)
# print(self.friends[(i, j)])
locations = self.ask_friends(i, j)
# print(locations)
if len(locations) > 0:
new_loc = locations[random.randint(0, len(locations)-1)]
else:
x, y = self.get_random_pt()
while self.space[x, y] != 0:
x, y = self.get_random_pt()
new_loc = (x, y)
# print(new_loc)
self.friends[new_loc] = self.friends[(i, j)]
self.friends[(i, j)] = []
# print(self.friends[new_loc])
# print(self.friends[(i, j)])
self.space[new_loc[0], new_loc[1]] = self.space[i, j]
self.space[i, j] = 0
# print(self.space)
# quit()
if self.print_statements: print(self.total_happiness() / self.population)
happiness_temp.append(self.total_happiness()/self.population)
happiness_values.append(happiness_temp)
if self.images: self.space_to_image()
temp = []
for i in range(self.epochs):
t = 0
for j in range(self.iterations):
t += happiness_values[j][i]
temp.append(t / self.iterations)
self.happiness_ts.append(temp)
if self.images: self.space_to_image()
pass
def ask_friends(self, x, y):
# TODO: the range to look at needs to be fixed
f = self.friends[(x, y)]
locs = []
for friend in f:
x, y = friend
for i in range(-int(self.p/2), int((self.p+1)/2), 1):
for j in range(-int(self.p/2), int((self.p+1)/2), 1):
x0, y0 = (x + i) % self.N, (y + j) % self.N
if self.space[x0, y0] == 0:
h = self.happiness_value(x0, y0, self.space[x, y])
if h > 0 and self.space[x, y] > 0:
locs.append((x0, y0))
if h < 0 and self.space[x, y] < 0:
locs.append((x0, y0))
return locs
def sean_kane(self):
happiness_values = []
# Sean Kane's choice policy
for _ in range(self.iterations):
# Start by creating a new starting space
self.initialize_space()
happiness_temp = [] # Stores the happiness values at each epoch for each iteration, the avg is taken care of later
for _ in range(epochs):
for i in range(self.N):
for j in range(self.N):
# Here is where your algorithm goes, this iterates through the list in order from top left to bottom right so you may want to change that
# The 'for _ in range(epochs):' should stay, that makes through it goes through the same number of epochs each time
# TODO: insert code
pass
if self.print_statements: print(self.total_happiness() / self.population)
happiness_temp.append(self.total_happiness()/self.population)
happiness_values.append(happiness_temp)
# Save the image of the final neighborhood if this switch is on
if self.images: self.space_to_image()
# This goes through calculating the average happiness at each epoch, leave this alone.
temp = []
for i in range(self.epochs):
t = 0
for j in range(self.iterations):
t += happiness_values[j][i]
temp.append(t / self.iterations)
self.happiness_ts.append(temp)
if self.images: self.space_to_image()
def length_location(self,locations,i,j):
dist = []
for loc in locations:
dist.append(((loc[0] - i)**2 + (loc[1] - j)**2)**.5)
temp = list(zip(dist,locations))
temp.sort()
return temp[0][1]
def bayley_king(self,n=5, p=3,epochs=10,randLocs=10):
happiness_values = []
# p = size of square neighborhood to look at
self.p = p
# n = number of friends
self.n = n
for _ in range(self.iterations):
self.initialize_space()
# First find each agents "friends", randomly. This will be stored in a dictionary
# this will require a lot of re-writing, but first thing that came to mind
self.friends = {}
for i in range(self.N):
for j in range(self.N):
if self.space[i, j] != 0:
temp = []
while len(temp) < n:
x, y = self.get_random_pt()
if (x,y) not in temp:
temp.append((x, y))
self.friends[(i, j)] = temp
happiness_temp = []
for _ in range(epochs):
#print('Epoch:',e)
for i in range(self.N):
for j in range(self.N):
# Here is where your algorithm goes, this iterates through the list in order from top left to bottom right so you may want to change that
# The 'for _ in range(epochs):' should stay, that makes through it goes through the same number of epochs each time
if self.happiness(i, j) == 0 and self.space[i, j] != 0: # if cell is unhappy and is an entity
locations = self.ask_friends(i, j)
if len(locations) > 0:
# calcualte the distance from current point to each suggested location
# sort by minnimum, go to minnimum travel location
new_loc = self.length_location(locations,i,j)
else:
# generate 10 random locations
# move to minnimum travel distance location
locations = []
for i in range(randLocs):
locations.append(self.get_random_pt())
while self.space[locations[i]] != 0:
locations[i] = self.get_random_pt()
new_loc = self.length_location(locations,i,j)
# print(new_loc)
try:
self.friends[new_loc] = self.friends[(i, j)]
except:
print(new_loc)
print(i,j)
self.friends[(i, j)] = []
# print(self.friends[new_loc])
# print(self.friends[(i, j)])
self.space[new_loc[0], new_loc[1]] = self.space[i, j]
self.space[i, j] = 0
# print(self.space)
# quit()
if self.print_statements: print(self.total_happiness() / self.population)
happiness_temp.append(self.total_happiness()/self.population)
happiness_values.append(happiness_temp)
# Save the image of the final neighborhood if this switch is on
if self.images: self.space_to_image()
# This goes through calculating the average happiness at each epoch, leave this alone.
temp = []
for i in range(self.epochs):
t = 0
for j in range(self.iterations):
t += happiness_values[j][i]
temp.append(t / self.iterations)
self.happiness_ts.append(temp)
if self.images: self.space_to_image()
pass
def sean_rice(self):
happiness_values = []
# Sean Rice's choice policy
for _ in range(self.iterations):
# Start by creating a new starting space
self.initialize_space()
happiness_temp = [] # Stores the happiness values at each epoch for each iteration, the avg is taken care of later
for _ in range(epochs):
for i in range(self.N):
for j in range(self.N):
# Here is where your algorithm goes, this iterates through the list in order from top left to bottom right so you may want to change that
# The 'for _ in range(epochs):' should stay, that makes through it goes through the same number of epochs each time
# TODO: insert code
pass
if self.print_statements: print(self.total_happiness() / self.population)
happiness_temp.append(self.total_happiness()/self.population)
happiness_values.append(happiness_temp)
# Save the image of the final neighborhood if this switch is on
if self.images: self.space_to_image()
# This goes through calculating the average happiness at each epoch, leave this alone.
temp = []
for i in range(self.epochs):
t = 0
for j in range(self.iterations):
t += happiness_values[j][i]
temp.append(t / self.iterations)
self.happiness_ts.append(temp)
if self.images: self.space_to_image()
def space_to_image(self):
im = np.zeros((self.N, self.N, 3), dtype=np.uint8)
for i in range(self.N):
for j in range(self.N):
if self.space[i, j] == 1:
im[i, j] = [255, 0, 0]
elif self.space[i, j] == -1:
im[i, j] = [0, 0, 255]
else:
im[i, j] = [255, 255, 255]
# Want the image to be 512 x 512
scale = 512 / (self.N)
img = Image.fromarray(im, 'RGB')
img = img.resize((round(img.size[0]*scale), round(img.size[1]*scale)), Image.NEAREST)
file_name = f"{datetime.datetime.now()}".split()[0]
file_name += f"_k={self.k}_N={self.N}_epochs={self.epochs}"
img.save(file_name+ ".png")
if __name__ == "__main__":
# If you only want to test your algorithm out and skip everything, comment out everything from the 'print("Random...")' to the s.<your_name> line.
# If you want to compare to the others keep that in and you'll get a graphic at the end showing the happiness over the timeseries. This takes
# a while to complete so you probably want to lower epochs and iterations when you're comparing.
epochs = 1
iterations = 1
s = Schelling(N=40, k=4, epochs=epochs, iterations=iterations)
print("Simulating...")
print("Random...")
start = time.time()
s.random_move()
print(f"Execution time: {round(time.time() - start, 2)} seconds")
labels = ["Random"]
fig = plt.figure()
ax = plt.subplot(111)
x = [int(i+1) for i in range(epochs)]
print('Bayleys algorithm...')
for n in [5, 10, 20]:
for p in [3, 5, 7]:
print(f"n={n}\tp={p}")
start = time.time()
s.bayley_king(n=n,p=p,epochs=epochs,randLocs=10)
print(f"Execution time: {round(time.time() - start, 2)} seconds")
labels.append(f"n={n}, p={p}")
# s.sean_kane()
#start = time.time()
#s.bayley_king(epochs=epochs)
#print(f"Execution time: {round(time.time() - start, 2)} seconds")
# s.sean_rice()
# labels.append("Sean Kane")
#labels.append("Bayley King")
# labels.append("Sean Rice")
for (i, h) in enumerate(s.happiness_ts):
ax.plot(x, h, label=labels[i])
plt.xlabel('Epochs')
plt.ylabel('Happiness')
chartBox = ax.get_position()
ax.set_position([chartBox.x0, chartBox.y0, chartBox.width*0.6, chartBox.height])
ax.legend(loc='upper center', bbox_to_anchor=(1.45, 0.8), shadow=True, ncol=1)
filename = f"images/{datetime.datetime.now()}-timeseries-happiness.png"
plt.savefig(filename)
plt.show()
print("Completed") | true |
0f2da373625fb4a6e51e0b00eec1faa852ccccbe | Python | afunsten/oil | /spec/coproc.py | UTF-8 | 626 | 2.84375 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python
"""
coproc.py
"""
import sys
def main(argv):
print >>sys.stderr, argv
try:
command = argv[1]
except IndexError:
command = 'upper'
if command == 'upper':
func = lambda x: x.upper()
else:
func = lambda x: x.lower()
while True:
line = sys.stdin.readline()
if not line:
print >>sys.stderr, 'DONE %s' % command
break
sys.stdout.write(func(line))
# If we don't do this, it hangs forever
sys.stdout.flush()
if __name__ == '__main__':
try:
main(sys.argv)
except RuntimeError as e:
print >>sys.stderr, 'FATAL: %s' % e
sys.exit(1)
| true |
7d5ab69816e0c086fb66eb74fcf44bcf30a60f65 | Python | AI-IshanBhatt/ML-Examples | /K-Means/KMeans.py | UTF-8 | 2,913 | 3.265625 | 3 | [] | no_license | import numpy as np
from pprint import pprint
def has_not_converged(points):
for point in points:
if point.prev != point.curr:
return 1
return 0
def get_mean_centroid(points, i):
x_axis = round(np.mean([point.x for point in points]))
y_axis = round(np.mean([point.y for point in points]))
pprint("THE NEW CENTROID {} {} for centroid {}".format(x_axis, y_axis, i))
return Centroid(x_axis,y_axis,[])
"""
About doing multiprocessing-
1) Re location of centroids can be done using multip processing (1 process per cluster)
2) Divide points by CPU counts and 1 process for one set of points to set previous and current centroids.
"""
def get_minimum_euclidian(centroids, point):
# pprint(centroids) This one is as expected
point_np = np.array(point.to_array())
min_id = 0
min_dist = 9999
for idx, centroid in enumerate(centroids):
centroid = np.array(centroid.to_array())
distance = np.linalg.norm(centroid - point_np, 2)
if distance < min_dist:
min_id = idx
min_dist = distance
centroids[min_id].near_points.append(point)
return centroids[min_id]
class Point:
def __init__(self, x,y):
self.x = x
self.y = y
def to_array(self):
return list((self.x, self.y))
def __str__(self):
return "{} {}".format(self.x, self.y)
def __repr__(self):
return "{} {}".format(self.x, self.y)
class DataPoint(Point):
def __init__(self, x, y):
super().__init__(x, y)
self.prev = None
self.curr = None
def __str__(self):
return "{} {} -> {} {}".format(self.x, self.y, self.prev, self.curr)
def __repr__(self):
return "{} {} -> {} {}".format(self.x, self.y, self.prev, self.curr)
class Centroid(Point):
def __init__(self,x, y, near_points):
super().__init__(x, y)
self.near_points = near_points
def __eq__(self, other):
return other and self.x == other.x and self.y == other.y
def __hash__(self):
return hash((self.x, self.y))
c1 = Centroid(50, 50, [])
c2 = Centroid(100, 100, [])
c3 = Centroid(150, 150, [])
centroids = [c1,c2,c3]
new_centroids = []
data_points = [list(map(int,x.strip().split(","))) for x in open("training.csv")]
points = [DataPoint(*point) for point in data_points]
# Initial Centroind assignments
for point in points:
point.curr = get_minimum_euclidian(centroids,point)
while 1:
if has_not_converged(points):
for i in range(0,len(centroids)):
centroids[i] = get_mean_centroid(centroids[i].near_points, i)
for point in points:
point.prev = point.curr
point.curr = get_minimum_euclidian(centroids, point)
else:
break
print("------------------------------------------------------------------------------")
for c in centroids:
print(c.near_points)
| true |
6a9140a23c84678adee748bb815f3af83440544b | Python | Mohit-Chaudhari/Python-Assignments | /Assignment 6/Assignment6_3.py | UTF-8 | 1,103 | 3.859375 | 4 | [] | no_license | class Arithmetic():
def __init__(self):
self.Value1 = 0
self.Value2 = 0
def Accept(self,n1,n2):
self.Value1 = n1
self.Value2 = n2
def Addition(self):
return self.Value1+self.Value2
def Multiplcation(self):
return self.Value1*self.Value2
def Division(self):
return self.Value1/self.Value2
def Subtraction(self):
return self.Value1-self.Value2
def main():
Obj1 = Arithmetic()
Obj1.Accept(20,10)
print("Additon : ",Obj1.Addition())
print("Subtraction : ",Obj1.Subtraction())
print("Multiplcation : ",Obj1.Multiplcation())
print("Division : ",Obj1.Division())
print()
Obj2 = Arithmetic()
Obj2.Accept(67,12)
print("Additon : ",Obj2.Addition())
print("Subtraction : ",Obj2.Subtraction())
print("Multiplcation : ",Obj2.Multiplcation())
print("Division : ",Obj2.Division())
print()
Obj3 = Arithmetic()
Obj3.Accept(678,234)
print("Additon : ",Obj3.Addition())
print("Subtraction : ",Obj3.Subtraction())
print("Multiplcation : ",Obj3.Multiplcation())
print("Division : ",Obj3.Division())
if __name__ == '__main__':
main() | true |
6938a39c6891ae91d464f5313e76cba164b7c39c | Python | pohily/checkio | /cards.py | UTF-8 | 623 | 3.09375 | 3 | [] | no_license | def cards(deck, hand):
every_card = []
for card in range(1, deck+1):
every_card += [[card-1, card]]
found = True
hand.sort()
#print(every_card)
while hand:
if found == False:
return False
found = False
check = hand[0]
hand = hand[1:]
for card in every_card:
if check in card:
every_card.remove(card)
print(every_card)
found = True
break
return True if found == True else False
print(cards(25,[17,11,16,12,5,12,11]))
"""
"""
| true |
d75157e3590bb08bd551e8326eec24719950f281 | Python | devmike277/DjangoGeoLocationAPP | /geolocationApp/views.py | UTF-8 | 1,450 | 2.515625 | 3 | [] | no_license | from django.shortcuts import render
import requests
import json
import folium
import re
from django.contrib import messages
import ipaddress
# Create your views here.
def index(request):
context = {
'data':'null'
}
return render(request,'index.html',context)
def geolocate(request):
ip = request.POST['ip']
context = {
'data':'null'
}
regex_pattern = "^([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$"
if (bool( re.match( regex_pattern, ip ) )):
res = requests.get('http://ip-api.com/json/'+ip)
location_data = json.loads(res.text)
point = (location_data['lat'],location_data['lon'])
folium_map= folium.Map(width=750,height=450,location=point)
folium.Marker(
[location_data['lat'],location_data['lon']],
tooltip='click here for more information',
popup=location_data['city'],
icon=folium.Icon(color='red')
).add_to(folium_map)
folium_map = folium_map._repr_html_()
context = {
'data':location_data,
'map':folium_map
}
else:
messages.info(request,'Invalid IP')
return render(request,'index.html',context)
| true |
109a8d3ac97be5788c4d005a508fd28b3fbf53f3 | Python | PacktPublishing/Foundations-of-Blockchain | /Chapter02/ECC_pub_key.py | UTF-8 | 2,754 | 3 | 3 | [
"MIT",
"CC-BY-SA-4.0",
"Apache-2.0",
"GPL-3.0-only"
] | permissive | # original source: https://github.com/wobine/blackboard101
# Super simple Elliptic Curve Presentation. No imported libraries, wrappers, nothing.
# For educational purposes only. Remember to use Python 2.7.6 or lower. You'll need to make changes for Python 3.
# Below are the public specs for Bitcoin's curve - the secp256k1
Pcurve = 2**256 - 2**32 - 2**9 - 2**8 - 2**7 - 2**6 - 2**4 -1 # The proven prime
N=0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141 # Number of points in the field
Acurve = 0; Bcurve = 7 # These two defines the elliptic curve. y^2 = x^3 + Acurve * x + Bcurve
Gx = 55066263022277343669578718895168534326250603453777594175500187360389116729240
Gy = 32670510020758816978083085130507043184471273380659243275938904335757337482424
GPoint = (Gx,Gy) # This is our generator point. Trillions of dif ones possible
#Individual Transaction/Personal Information
privKey = 0xA0DC65FFCA799873CBEA0AC274015B9526505DAAAED385155425F7337704883E #replace with any private key
def modinv(a,n=Pcurve): #Extended Euclidean Algorithm/'division' in elliptic curves
lm, hm = 1,0
low, high = a%n,n
while low > 1:
ratio = high/low
nm, new = hm-lm*ratio, high-low*ratio
lm, low, hm, high = nm, new, lm, low
return lm % n
def ECadd(a,b): # Not true addition, invented for EC. Could have been called anything.
LamAdd = ((b[1]-a[1]) * modinv(b[0]-a[0],Pcurve)) % Pcurve
x = (LamAdd*LamAdd-a[0]-b[0]) % Pcurve
y = (LamAdd*(a[0]-x)-a[1]) % Pcurve
return (x,y)
def ECdouble(a): # This is called point doubling, also invented for EC.
Lam = ((3*a[0]*a[0]+Acurve) * modinv((2*a[1]),Pcurve)) % Pcurve
x = (Lam*Lam-2*a[0]) % Pcurve
y = (Lam*(a[0]-x)-a[1]) % Pcurve
return (x,y)
def EccMultiply(GenPoint,ScalarHex): #Double & add. Not true multiplication
if ScalarHex == 0 or ScalarHex >= N: raise Exception("Invalid Scalar/Private Key")
ScalarBin = str(bin(ScalarHex))[2:]
Q=GenPoint
for i in range (1, len(ScalarBin)): # This is invented EC multiplication.
Q=ECdouble(Q); # print "DUB", Q[0]; print
if ScalarBin[i] == "1":
Q=ECadd(Q,GenPoint); # print "ADD", Q[0]; print
return (Q)
print; print "******* Public Key Generation *********";
print
PublicKey = EccMultiply(GPoint,privKey)
print "the private key:";
print privKey; print
print "the uncompressed public key (not address):";
print PublicKey; print
print "the uncompressed public key (HEX):";
print "04" + "%064x" % PublicKey[0] + "%064x" % PublicKey[1];
print;
print "the official Public Key - compressed:";
if PublicKey[1] % 2 == 1: # If the Y value for the Public Key is odd.
print "03"+str(hex(PublicKey[0])[2:-1]).zfill(64)
else: # Or else, if the Y value is even.
print "02"+str(hex(PublicKey[0])[2:-1]).zfill(64) | true |
9c82cf446403b1dddd35b07d5248586ae25df724 | Python | eltechno/python_course | /loop-for.py | UTF-8 | 552 | 3.75 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 11 11:46:02 2019
@author: techno
"""
result = 0
for i in range(4):
number = int(input("Please give me the number: "))
result += number
print ("the result of adding numbers is: ", result)
# =============================================================================
# imprime los numeros divisibles en 2
# =============================================================================
for i in range (1000):
if (i%2 == 0):
print(i," is even number")
| true |
716205a7c64f391c2ca8597730a0578ccc57be9a | Python | YaoweiFan/peg-in-hole | /src/reward_display.py | UTF-8 | 3,437 | 2.890625 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
import math
if __name__ == '__main__':
delta_x = 0.001
x = np.arange(-0.1, 0.1, delta_x)
delta_z = 0.001
z = np.arange(0, 0.24, delta_z)
X, Z = np.meshgrid(x, z)
def reward1(_x, _z):
_reward = np.zeros(_x.shape)
for i in range(_x.shape[0]):
for j in range(_x.shape[1]):
x = _x[i][j]
z = _z[i][j]
sz = z - 0.06
s = math.sqrt(x * x + sz * sz)
sxy = abs(x)
s_max = 0.06
sxy_max = 0.005
delta_z = 0.005
if s > s_max:#靠近阶段
reward = 2 - math.tanh(10 * s) - math.tanh(10 * sxy)
elif sxy > sxy_max or sz > 3 * delta_z:#对齐阶段
reward = 2 - 5 * sxy - 5 * sz
elif z > delta_z:#插入阶段
reward = 4 - 2 * (sz / 0.06)
else:#完成阶段
reward = 10
_reward[i][j] = reward
return _reward
def reward2(_x, _z):
_reward = np.zeros(_x.shape)
for i in range(_x.shape[0]):
for j in range(_x.shape[1]):
x = _x[i][j]
z = _z[i][j]
sz = z - 0.06
s = math.sqrt(x * x + sz * sz)
sxy = abs(x)
sxy_max = 0.005
delta_z = 0.005
if sxy < sxy_max: #对齐阶段
if sz < 0: #插入阶段
if z < delta_z: #完成阶段
reward = 10
else:
reward = 4 - 2 * (sz / 0.06)
else:
reward = 2 - 5 * sxy - 5 * sz
else: #靠近阶段
reward = 2 - math.tanh(10 * s) - math.tanh(10 * sxy)
_reward[i][j] = reward
return _reward
plt.subplot(121)
plt.contourf(X, Z, reward1(X, Z))
plt.contour(X, Z, reward1(X, Z))
box_x = np.arange(-0.06, 0.07, 0.01)
box_z = np.zeros(box_x.shape) + 0.06
plt.plot(box_x, box_z, color='Gray')
box_z = np.arange(0, 0.06, 0.001)
box_x = np.zeros(box_z.shape) - 0.06
plt.plot(box_x, box_z, color='Gray')
box_x = np.zeros(box_z.shape) + 0.06
plt.plot(box_x, box_z, color='Gray')
box_x = np.zeros(box_z.shape) + 0.011
plt.plot(box_x, box_z, color='Gray')
box_x = np.zeros(box_z.shape) - 0.011
plt.plot(box_x, box_z, color='Gray')
plt.axis("equal")
plt.title('reward1')
plt.xlabel('horizontal')
plt.ylabel('vertical')
plt.subplot(122)
plt.contourf(X, Z, reward2(X, Z))
plt.contour(X, Z, reward2(X, Z))
box_x = np.arange(-0.06, 0.07, 0.01)
box_z = np.zeros(box_x.shape) + 0.06
plt.plot(box_x, box_z, color='Gray')
box_z = np.arange(0, 0.06, 0.001)
box_x = np.zeros(box_z.shape) - 0.06
plt.plot(box_x, box_z, color='Gray')
box_z = np.arange(0, 0.06, 0.001)
box_x = np.zeros(box_z.shape) + 0.06
plt.plot(box_x, box_z, color='Gray')
box_x = np.zeros(box_z.shape) + 0.011
plt.plot(box_x, box_z, color='Gray')
box_x = np.zeros(box_z.shape) - 0.011
plt.plot(box_x, box_z, color='Gray')
plt.axis("equal")
plt.title('reward2')
plt.xlabel('horizontal')
plt.ylabel('vertical')
plt.show() | true |
6ed64629e734767d4bf21632f90fc5882a9252ff | Python | woowei0102/code2pro | /data/clean_data/A14/12.py | UTF-8 | 279 | 3.421875 | 3 | [] | no_license | # datas = [10,4,8,5,7,2,9]
def sortList(datas):
for i in range(len(datas)):
for j in range(len(datas)-i-1):
if (datas[j] > datas[j+1]):
datas[j] , datas[j+1] = datas[j+1] ,datas[j]
return datas
print(sortList([10,4,8,5,7,2,9]))
| true |
a7ba65206063751ddb4859b441dbde66599ac1ab | Python | Pabitha-1/python | /B53.py | UTF-8 | 87 | 3.453125 | 3 | [] | no_license | p=int(input())
q=0
while(p>0):
reminder=p%10
q=q+reminder
p=p//10
print(q)
| true |
c736ce1eac2cf77e140b932454a6d0550edc1550 | Python | d2bhatt/HybridFramework | /Utilities/customLogger.py | UTF-8 | 1,612 | 3.0625 | 3 | [] | no_license | # Python has a built-in module logging which allows writing status messages to a file or any other output streams.
# The file can contain the information on which part of the code is executed and what problems have been arisen.
import logging
# so as to read logs from applcation we create custom logger file , so all the actions would be performed inside a
# method which would be called later and this method would be inside a class
class LogGen:
@staticmethod
def loggen(): # removing self as this would be static method and can called without creating object
for handler in logging.root.handlers[:]: logging.root.removeHandler(handler)
logging.basicConfig(filename=".\\Logs\\automation.log",
format='%(asctime)s: %(levelname)s: %(message)s',
datefmt='%m/%d/%y %I:%M:%S %p'
# to have time stamp date format in other way-date(mmm/dd/yy) time
)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
return logger
# logging.DEBUG would return detailed information
# Debug : These are used to give Detailed information
# Info : These are used to Confirm that things are working as expected
# Warning :These are used an indication that something unexpected happened, or indicative of some problem in near future
# Error : This tells that due to a more serious problem, the software has not been able to perform some function
# Critical : This tells serious error, indicating that the program itself may be unable to continue running
| true |
c7a655cb34fbdd1adda7ff38dac326cd252cf0fa | Python | vbardasson/UriRepo | /1103-Alarme-Despertador/1103 - Alarme Despertador.py | UTF-8 | 2,335 | 3.75 | 4 | [] | no_license | # Daniela é enfermeira em um grande hospital, e tem os horários de trabalho muito variáveis. Para piorar, ela tem sono pesado, e uma grande dificuldade para acordar com relógios despertadores.
# Recentemente ela ganhou de presente um relógio digital, com alarme com vários tons, e tem esperança que isso resolva o seu problema. No entanto, ela anda muito cansada e quer aproveitar cada momento de descanso. Por isso, carrega seu relógio digital despertador para todos os lugares, e sempre que tem um tempo de descanso procura dormir, programando o alarme despertador para a hora em que tem que acordar. No entanto, com tanta ansiedade para dormir, acaba tendo dificuldades para adormecer e aproveitar o descanso.
# Um problema que a tem atormentado na hora de dormir é saber quantos minutos ela teria de sono se adormecesse imediatamente e acordasse somente quando o despertador tocasse. Mas ela realmente não é muito boa com números, e pediu sua ajuda para escrever um programa que, dada a hora corrente e a hora do alarme, determine o número de minutos que ela poderia dormir.
# Entrada
# A entrada contém vários casos de teste. Cada caso de teste é descrito em uma linha, contendo quatro números inteiros H 1 , M 1 , H2 e M 2, com H1:M1 representando a hora e minuto atuais, e H2:M2 representando a hora e minuto para os quais o alarme despertador foi programado (0≤H1≤23, 0≤M1≤59, 0≤H2≤23, 0≤M2 ≤59).
# O final da entrada é indicado por uma linha que contém apenas quatro zeros, separados por espaços em branco.
# Saída
# Para cada caso de teste da entrada seu programa deve imprimir uma linha, cada uma contendo um número inteiro, indicando o número de minutos que Daniela tem para dormir.
H1,M1,H2,M2 = map(int, input().split())
import math
while (H1 or M1 or H2 or M2)!=0:
if H1 < H2:
if M1 <= M2:
T = ((H2-H1)*60 + (M2-M1))
elif M1 > M2:
H2 += 24
T = (H2-H1)*60 -(abs(M2-M1))
print(T)
elif H1 == H2:
if M1 <= M2:
T = ((H2-H1)*60 + (M2-M1))
print(T)
elif M2>M1:
H2 += 24
T = (H2-H1)*60 -(M2-M1)
print(T)
if H1 > H2:
H2 += 24
if M1 <= M2:
T = ((H2-H1)*60 + (M2-M1))
elif M1 > M2:
T = (H2-H1)*60 -(abs(M2-M1))
print(T)
H1,M1,H2,M2 = map(int, input().split())
| true |
5850a7f2bd168faeaa67636b7f11f731c2427a29 | Python | Todorovikj/ScrapingFacebookFriends | /data.py | UTF-8 | 1,606 | 2.625 | 3 | [] | no_license | # encoding=utf8
from bs4 import BeautifulSoup
import io
import json
from xlsxwriter import Workbook
def read_file():
data=''
with io.open("D:\\neco skola i rabota\\rabota\\ScrapingFbFriends\\html Data\\Mathew James Stirland.html", "r", encoding="utf-8") as my_file:
data = my_file.read()
# file=open("C:\\Users\\Neco\\Downloads\\New folder\\Mathew James Stirland.html")
# data=file.read()
# file.close()
return data
file=Workbook("friends.xlsx")
workSheet=file.add_worksheet()
workSheet.write(0,0,'Name')
workSheet.write(0,1,'Facebook Id')
workSheet.write(0,2,'Profile Link')
htmlContent=read_file()
soup=BeautifulSoup(htmlContent,'lxml')
#friends=soup.find_all('li',attrs={'class':'_698'}) #3033
#friends=soup.find_all('a',attrs={'class':['_5q6s','_8o','_8t','lfloat','_ohe']}) 3041
friends=soup.find_all('div',attrs={'class':['fsl','fwb','fcb']}) #3042
i=1
for friend in friends:
if friend.a is not None:
#print(friend.a.string)
if(friend.a.get('data-gt') is not None):
#print(friend.a['href'])
#print(friend.a.get('data-gt'))
python_dict = dict(json.loads(friend.a.get('data-gt')))
#print(python_dict)
pom_dict=python_dict['engagement']
#print(pom_dict['eng_tid'])
pLink=friend.a['href']
name=friend.a.string
fb_id=pom_dict['eng_tid']
print(name+" "+fb_id+" "+pLink)
workSheet.write(i,0,name)
workSheet.write(i, 1, fb_id)
workSheet.write(i, 2, pLink)
i=i+1
#break
file.close()
print("finished") | true |
af1c1ef96e679992d7b337cb3674762a1576546a | Python | manwindersingh1712/Facial-Recognition-for-Crime-Detection | /dbHandler.py | UTF-8 | 1,811 | 3.078125 | 3 | [
"MIT"
] | permissive | import pymysql
def insertData(data):
rowId = 0
db = pymysql.connect("localhost", "", "", "testdb")
cursor = db.cursor()
print("database connected")
query = "INSERT INTO criminaldata VALUES(0, '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s');" % \
(data["Name"], data["Father's Name"], data["Mother's Name"], data["Gender"],
data["DOB(yyyy-mm-dd)"], data["Blood Group"], data["Identification Mark"],
data["Nationality"], data["Religion"], data["Crimes Done"])
try:
cursor.execute(query)
db.commit()
rowId = cursor.lastrowid
print("data stored on row %d" % rowId)
except:
db.rollback()
print("Data insertion failed")
db.close()
print("connection closed")
return rowId
def retrieveData(name):
id = None
crim_data = None
db = pymysql.connect("localhost", "criminaluser", "", "criminaldb")
cursor = db.cursor()
print("database connected")
query = "SELECT * FROM criminaldata WHERE name='%s'"%name
try:
cursor.execute(query)
result = cursor.fetchone()
id=result[0]
crim_data = {
"Name" : result[1],
"Father's Name" : result[2],
"Mother's Name" : result[3],
"Gender" : result[4],
"DOB(yyyy-mm-dd)" : result[5],
"Blood Group" : result[6],
"Identification Mark" : result[7],
"Nationality" : result[8],
"Religion" : result[9],
"Crimes Done" : result[10]
}
print("data retrieved")
except:
print("Error: Unable to fetch data")
db.close()
print("connection closed")
return (id, crim_data)
| true |
70d15655fc02a5bf067f6092971dced22933cfc9 | Python | johnmaster/Leetcode | /154.寻找旋转排序数组中的最小值 II/findMin.py | UTF-8 | 693 | 3.09375 | 3 | [] | no_license | """
执行用时 :44 ms, 在所有 python 提交中击败了83.70%的用户
内存消耗 :12 MB, 在所有 python 提交中击败了27.00%的用户
"""
class Solution(object):
def findMin(self, nums):
size = len(nums)
left = 0
right = size - 1
while left < right:
while left < right and nums[left] == nums[left + 1]:
left += 1
while left < right and nums[right] == nums[right - 1]:
right -= 1
mid = left + (right - left) // 2
if nums[mid] > nums[right]:
left = mid + 1
else:
right = mid
return nums[left]
| true |
4b5319b6924c72d4c0c49399d50833e3cb28cf85 | Python | barbagroup/pygbe_lspr_paper | /repro_packs/repro_results_plots/visualizations/visualization_images.py | UTF-8 | 6,707 | 2.796875 | 3 | [
"BSD-3-Clause",
"CC-BY-4.0",
"CC-BY-3.0"
] | permissive | '''This file contains functions that produce the images of the proteins
locations respect to the sensor.
'''
import numpy
from matplotlib import pyplot, rcParams
from mpl_toolkits.mplot3d import Axes3D
import os
def read_data_plot(sensor, prt_one, prt_two, elev, azim, prot_color,
file_name=None, file_ext=None, fig_size=None):
'''Reads the mesh files (.vert and .face) necessary to plot the
sensor-proteins display and save them as png.
Arguments:
---------
sensor : str, path to the sensor mesh files without the extention.
prt_one: str, path to the protein one mesh files without the extention.
prt_two: str, path to the protein two files without the extention.
Example:
If the location of the mesh files, sensor.vert and sensor.face, is mesh_files/
then:
sensor = 'mesh_files/sensor'
elev : float, set the elevation of the axes (elevation angle in the z plane).
azim : float, set the azimuth of the axes (azimuth angle in the x,y plane).
'''
#load sensor
xs, ys, zs = numpy.loadtxt(fname=sensor+'.vert', unpack=True)
face_s = numpy.loadtxt(fname=sensor+'.face')
face_sensor = face_s[:][:] - 1
#load protein 1
xp1, yp1, zp1 = numpy.loadtxt(fname=prt_one+'.vert', unpack=True)
f_11, f_21, f_31, g1, h1 = numpy.loadtxt(fname=prt_one+'.face', unpack=True)
face_prt1 = numpy.array(list(zip(f_11, f_21, f_31)))
face_protein_1 = face_prt1[:][:] - 1
#load protein 2
xp2, yp2, zp2 = numpy.loadtxt(fname=prt_two+'.vert', unpack=True)
f_12, f_22, f_32, g2, h2 = numpy.loadtxt(fname=prt_two+'.face', unpack=True)
face_prt2 = numpy.array(list(zip(f_12, f_22, f_32)))
face_protein_2 = face_prt2[:][:] - 1
### Plot image ###
pyplot.switch_backend('agg') # We changed the backend to not generate the interactive image
fig = pyplot.figure(figsize=fig_size)
#fig.set_figheight(fig_size[1])
#fig.set_figwidth(fig_size[0])
ax = fig.gca(projection='3d')
ax.plot_trisurf(xs, ys, zs, triangles=face_sensor, linewidth=0.1,
edgecolor="black", color="white", alpha=0.2)
ax.plot_trisurf(xp1, yp1, zp1, triangles=face_protein_1, linewidth=0.1,
edgecolor=prot_color, color="white", alpha=0.1 )
ax.plot_trisurf(xp2, yp2, zp2, triangles=face_protein_2, linewidth=0.1,
edgecolor=prot_color, color="white", alpha=0.1 )
ax.set_xlabel('X [$\AA$]')
ax.set_ylabel('Y [$\AA$]')
ax.set_zlabel('Z [$\AA$]')
ax.xaxis.labelpad = 8
ax.yaxis.labelpad = 8
ax.zaxis.labelpad = 8
arrayOfTicks = numpy.linspace(-200, 200, 11)
ax.xaxis.pane.fill = False
ax.yaxis.pane.fill = False
ax.zaxis.pane.fill = False
#ax.xaxis._axinfo["grid"].update({"linewidth":0.1, "color" : '0.7'})
#ax.yaxis._axinfo["grid"].update({"linewidth":0.1, "color" : '0.7'})
#x.zaxis._axinfo["grid"].update({"linewidth":0.1, "color" : '0.7'})
ax.grid(True)
ax.w_xaxis.set_ticks(arrayOfTicks)
ax.w_yaxis.set_ticks(arrayOfTicks)
ax.w_zaxis.set_ticks(arrayOfTicks)
ilim = arrayOfTicks.min()
slim = arrayOfTicks.max()
ax.set_xlim3d(ilim, slim)
ax.set_ylim3d(ilim, slim)
ax.set_zlim3d(ilim, slim)
ax.tick_params(pad=6)
pyplot.xticks(rotation=30)
pyplot.yticks(rotation=30)
#pyplot.tight_layout()
if (azim==-90):
ax.w_yaxis.set_ticklabels([])
ax.set_ylabel('')
if (azim==-180):
ax.w_xaxis.set_ticklabels([])
ax.set_xlabel('')
ax.view_init(elev, azim)
if (file_name and file_ext):
fig.savefig('figures/'+file_name+'.'+file_ext, bbox_inches='tight',
format=file_ext, pad_inches=-0.75)
def file_exist(files_list):
'''Checks if images files exist
'''
status = list()
for file in files_list:
if os.path.exists('figures/'+file):
status.append(True)
else:
status.append(False)
status = all(status)
return status
def main(paper=False):
''' Generates all sphere-BSA visualizations.
'''
if paper:
file_ext = 'pdf'
fig_size = (9, 9)
fs = 10
else:
file_ext = 'png'
fig_size = (9, 9)
fs = 10
rcParams['font.family'] = 'serif'
rcParams['font.size'] = fs
#Case of 2 proteins in z
sensor = 'mesh_files/sensor/sensor_2K_R8nm'
prt_1z = 'mesh_files/BSA_sensor_2pz_d=1_00/bsa_d0.3_R8+1nm_z'
prt_2z = 'mesh_files/BSA_sensor_2pz_d=1_00/bsa_d0.3_R8+1nm_-z'
read_data_plot(sensor, prt_1z, prt_2z, elev=0, azim=-90,
prot_color='red',
file_name='2prot_1nm_z_R8nm',
file_ext=file_ext,
fig_size=fig_size)
#Case of 2 proteins in x
prt_1x = 'mesh_files/BSA_sensor_2px_d=1_00/bsa_d0.3_R8+1nm_x'
prt_2x = 'mesh_files/BSA_sensor_2px_d=1_00/bsa_d0.3_R8+1nm_-x'
read_data_plot(sensor, prt_1x, prt_2x, elev=0, azim=-90,
prot_color='blue',
file_name='2prot_1nm_x_R8nm',
file_ext=file_ext,
fig_size=fig_size)
#Case of 2 proteins in y
prt_1y = 'mesh_files/BSA_sensor_2py_d=1_00/bsa_d0.3_R8+1nm_y'
prt_2y = 'mesh_files/BSA_sensor_2py_d=1_00/bsa_d0.3_R8+1nm_-y'
read_data_plot(sensor, prt_1y, prt_2y, elev=0, azim=-180,
prot_color='green',
file_name='2prot_1nm_y_R8nm',
file_ext=file_ext,
fig_size=fig_size)
if __name__ == "__main__":
files_nb = ['2prot_1nm_z_R8nm.png', '2prot_1nm_x_R8nm.png', '2prot_1nm_y_R8nm.png']
files_paper =['2prot_1nm_z_R8nm.pdf', '2prot_1nm_x_R8nm.pdf', '2prot_1nm_y_R8nm.pdf']
status_nb = file_exist(files_nb)
status_paper = file_exist(files_paper)
if status_nb == False:
print('This can take couple of minutes, please wait while images for '
'notebook report are produced.')
main()
if status_paper == False:
print('This can take couple of minutes, please wait while images for '
'paper are produced.')
main(paper=True)
elif (status_nb and status_paper) == True:
print('Visualizations for notebook and paper already exist! If you want'
'to generate them again, please delete the existing ones.')
| true |
0a13ff28be8ce78861db73a193cc06e76dcc4e92 | Python | luoshihai/appscript | /kuaishou/config.py | UTF-8 | 659 | 3.171875 | 3 | [] | no_license | import random
first = ["你好啊! ", "hello ", "哈哈哈 "]
second = ["想赚钱做兼职吗? ", "想赚钱? 每天不贪稳定收入 ", "收徒弟, 每天赚点小钱 ", "想赚钱吗? 每天几百稳定收入! "]
third = ["加我联系方式 ", "加我哦 ", "加我吧 "]
four = ["看我昵称 ", "看名字 ", "我的名字 "]
five = (".", "..", "...", "....", ".....", "-_-", "_-_")
def get_send_str():
send_str = "".join(
[random.choice(first), random.choice(second), random.choice(third), random.choice(four), random.choice(five)])
return send_str
# print(get_send_str())
| true |
20f5f55582439376410423caaea6c3113438c1cb | Python | Arif159357/deterministic-finite-automaton | /deterministic finite automaton.py | UTF-8 | 2,271 | 3.359375 | 3 | [] | no_license | Name = Arif
ID = 1611041
def move(intial_state, i, state):
v = intial_state
if v in state:
if i in state[v]:
v = state[intial_state][i]
return v
else:
return v
state = {}
user_input = input("how many state, transitions and final states, give space in between--> ")
print(" ")
bol = True
#user_secondInput = input("Numer of transitions--> ")
#print(" ")
user_thirdInput = input("size of alphabet--> ")
u = user_input.split(" ")
print(" ")
n = int(u[0])
m = int(u[1])
F = int(u[2])
a = user_thirdInput
for i in range(0,n):
state[i] = {}
x = input("what are the alphabet symbol give space in between--> ")
xx = x.split(" ")
print(xx)
print(" ")
print("Which states will be connected with which char put space in between. For example 0 1 a and than press enter to give the next connection ")
print(" ")
for j in range(0, m):
input_string = input()
lst = input_string.split(" ")
start = int(lst[0])
char = lst[2]
next = int(lst[1])
if start in state:
if char in state[start]:
print("NOT A DFA")
bol = False
break
else:
state[start][char] = next
if bol == True:
final_state = input("Which states are the final state.If more than 1 give space in between. For example 1 3 are the final states--> ")
print(" ")
final = []
f = final_state.split(" ")
for i in range(len(f)):
f2 = int(f[i])
final.append(f2)
string = (input("Input a string "))
print(" ")
intial_state = 0
for i in string:
intial_state= move(intial_state,i,state)
if intial_state in final:
print("DFA accpets the input string")
else:
if intial_state in state:
for i in range(len(xx)):
if xx[i] in state[intial_state]:
if state[intial_state][xx[i]] in final:
print("DFA reject the input string")
else:
print("NO move from intial state to final")
# print("DFA rejectes the input string")
| true |
1e2b2bb76dc4c87987ce33e9138e5c41f88b32f5 | Python | Akaito/ZeroToBoto | /assets/backup-complete.py | UTF-8 | 1,765 | 2.78125 | 3 | [
"Zlib"
] | permissive | # backup-complete.py
import os
import boto3
#====================================================================
BUCKET_NAME = 'zerotoboto' # (create the bucket first in the AWS GUI)
session = boto3.Session(profile_name='default')
s3 = session.client('s3')
cwd = os.getcwd()
prefix = cwd.split('/')[-1] # get current folder's name; not all stuff leading to it
# visit the current directory and every one within it, recursively
for dir_listing in os.walk(cwd):
here = dir_listing[0] # full path 'here' for this iteration of the loop
dirnames = dir_listing[1] # list of directories here
filenames = dir_listing[2] # list of files here
# don't upload anything in hidden directories
# bug: directories inside hidden directories *will* be uploaded
# extra credit: fix that bug :)
if here[0] == '.':
continue
for filename in filenames:
# don't upload hidden files
if filename[0] == '.':
continue
# absolute, full path to the file on disk
file_abspath = here + '/' + filename
# S3 object key
key = file_abspath[len(cwd):]
if key[0] == '/': # cleaner S3 keys on Linux
key = key[1:] # remove leading slash
# prepend the prefix so files aren't all dumped straight into bucket root
key = prefix + '/' + key
# extra credit: check the hash of the object in S3, and don't upload if the local file is the same.
# easier way: check modtime of file in S3 versus modtime on disk.
# extra credit: sync mode; *download* file if its modtime is newer and hashes differ
s3.upload_file(file_abspath, BUCKET_NAME, key)
print 'Uploaded {}\n to {}:{}'.format(file_abspath, BUCKET_NAME, key)
| true |
6fc9dce60219640c49374dab0d6c6ad9378b421a | Python | ScottishCovidResponse/simple_network_sim | /simple_network_sim/common.py | UTF-8 | 3,363 | 3.109375 | 3 | [
"BSD-2-Clause"
] | permissive | """
Assortment of useful functions
"""
# pylint: disable=import-error
# pylint: disable=duplicate-code
import logging
from enum import Enum
from typing import Callable, Any, NamedTuple, List
import git # type: ignore
from data_pipeline_api import standard_api
logger = logging.getLogger(__name__)
DEFAULT_GITHUB_REPO = "https://github.com/ScottishCovidResponse/simple_network_sim.git"
class IssueSeverity(Enum):
"""
This class defines the severity levels for issues found while running the model or loading data.
"""
LOW = 1
MEDIUM = 5
HIGH = 10
def log_issue(
logger: logging.Logger,
description: str,
severity: IssueSeverity,
issues: List[standard_api.Issue]
) -> List[standard_api.Issue]:
"""
Appends issue to the issues list whilst logging its description at the appropriate log level
:param logger: a python logger object
:param description: an explanation of the issue found
:param severity: the severity of the issue, from an enum of severities
:param issues: list of issues, it will be modified in-place
:return: Returns the same list of issues passed as a parameter for convenience
"""
log = {
IssueSeverity.LOW: logger.info,
IssueSeverity.MEDIUM: logger.warning,
IssueSeverity.HIGH: logger.error,
}[severity]
log(description)
issues.append(standard_api.Issue(description=description, severity=severity.value))
return issues
def generateMeanPlot(listOfPlots):
"""From a list of disease evolution timeseries, compute the average evolution.
:param listOfPlots: List of disease evolutions
:type listOfPlots: list
:return: The average evolution
:rtype: list
"""
meanForPlot = []
logger.debug(listOfPlots)
for i in range(len(listOfPlots[0])):
sumTot = 0
for plot in listOfPlots:
sumTot = sumTot + plot[i]
meanForPlot.append(float(sumTot) / len(listOfPlots))
return meanForPlot
class Lazy:
"""
This class allows lazy evaluation of logging expressions. The idiom to accomplish that can be better explained in
the example below::
logger.info("The value of z is: %s", lazy(lambda: x + y))
that will cause ``x + y`` to only be evaluated if the log level is info.
:param f: A function which takes no parameters and which will only be evaluated when str is called in the returning
object
"""
def __init__(self, f: Callable[[], Any]):
self.f = f
def __str__(self):
return str(self.f())
def __repr__(self):
return repr(self.f())
class RepoInfo(NamedTuple):
"""
The info needed by the data pipeline API
"""
git_sha: str
uri: str
is_dirty: bool
def get_repo_info() -> RepoInfo:
"""
Retrieves the current git sha and uri for the current git repo
:return: A RepoInfo object. If not inside a git repo, is_dirty will be True, git_sha empty and uri will be a
default value
"""
try:
repo = git.Repo()
except git.InvalidGitRepositoryError:
return RepoInfo(git_sha="", uri=DEFAULT_GITHUB_REPO, is_dirty=True)
else:
return RepoInfo(
git_sha=repo.head.commit.hexsha,
uri=next(repo.remote("origin").urls),
is_dirty=repo.is_dirty(),
)
| true |
012f12c68208fc0714907b258827173d3c34b0c9 | Python | alabiansolution/python-wknd1902 | /day1/chapter5/while1.py | UTF-8 | 166 | 3.71875 | 4 | [] | no_license |
a = 1
while a <= 10:
print(a)
a += 1
student = ['Ayo', 'Tosin', 'Rolland', 'Isaac', 'Efe']
x = 0
while x <= len(student):
print(student[x])
x += 1
| true |
7471eebe8b44541752b3bc193c9a02e62bd9c98a | Python | kjk402/PythonWork | /Itiscote/array/array_5.py | UTF-8 | 118 | 2.78125 | 3 | [] | no_license | # 5 안테나
n = int(input())
a = list(map(int, input().split()))
a.sort()
print(a[(n-1)//2])
"""
4
5 1 7 9
""" | true |
e95c540e5e425bdd66385409dfdf5d2ae9abed61 | Python | McdAutomation/dump | /codemonk/5-CM-1d.py | UTF-8 | 437 | 3.140625 | 3 | [] | no_license | #https://www.hackerearth.com/practice/data-structures/arrays/1-d/practice-problems/algorithm/maximize-the-earning-137963bc-323025a6/
T = int(input())
for t in range(T):
N, price = tuple(map(int,input().split()))
arr = list(map(int,input().split()))
max = -1
num_of_buildings = 0
for i in range(N):
if arr[i] > max:
max = arr[i]
num_of_buildings += 1
print(num_of_buildings*price)
| true |
cf43070fe15834170ce32727f8f3324c182a28d7 | Python | olgatsiouri1996/CpG_island_identificator | /src/gui/cpg_island_identificator_gui.py | UTF-8 | 3,825 | 3.015625 | 3 | [
"MIT"
] | permissive | # python3
from gooey import *
from pyfaidx import Fasta
import pandas as pd
# input parameters
@Gooey(required_cols=2, program_name='CpG island identificator', header_bg_color= '#DCDCDC', terminal_font_color= '#DCDCDC', terminal_panel_color= '#DCDCDC')
def main():
ap = GooeyParser(description="identify CpG islands on one or many sequences based on the Gardiner-Garden and Frommer (1987) method")
ap.add_argument("-in", "--input", required=True, widget='FileChooser', help="input single or multi-fasta file")
ap.add_argument("-gc", "--gc", required=False, type=int, default=50, help="min GC content(support for S and W nucleotides)")
ap.add_argument("-ratio", "--ratio", required=False, type=float, default=0.6, help="min ratio of the Obs/Exp value")
ap.add_argument("-step", "--step", required=False, type=int, default=50, help="step size for CpG identification")
ap.add_argument("-win", "--window", required=False, type=int, default=200, help="window size for CpG identification")
ap.add_argument("-out", "--output", required=True, widget='FileSaver', help="output txt file")
args = vars(ap.parse_args())
# calculate obs value
def obs(seq):
return seq.count('CG')
# calculate Exp value
def ratio(seq):
obs = seq.count('CG')
exp = seq.count('C') * seq.count('G') / int(args['window'])
return round(obs/exp, 2)
# calculate gc content
def gc(seq):
gc = sum(seq.count(x) for x in ["G", "C", "S"])
return round(gc * 100 / sum(seq.count(x) for x in ["A", "T", "G", "C", "S", "W"]), 2)
# main
gcobs = []
gccont = []
start = []
end = []
strand = []
headers = []
gcratio = [] # setup empty lists
# import multi-fasta
features = Fasta(args['input'])
for key in features.keys():
# forward sequence
for i in range(0, features[key][:].end - args['window'] + 1, args['step']):
if gc(features[key][i:i + args['window']].seq) >= args['gc'] and ratio(features[key][i:i + args['window']].seq) >= args['ratio']:
gcobs.append(obs(features[key][i:i + args['window']].seq))
gccont.append(gc(features[key][i:i + args['window']].seq))
gcratio.append(ratio(features[key][i:i + args['window']].seq))
start.append(i + 1) # fix python index
end.append(i + args['window']) # retrieve the end position of each putative CpG island
headers.append(key)
strand.append('+')
# reverse complement
if gc(features[key][i:i + args['window']].reverse.complement.seq) >= args['gc'] and ratio(features[key][i:i + args['window']].reverse.complement.seq) >= args['ratio']:
gcobs.append(obs(features[key][i:i + args['window']].reverse.complement.seq))
gccont.append(gc(features[key][i:i + args['window']].reverse.complement.seq))
gcratio.append(ratio(features[key][i:i + args['window']].reverse.complement.seq))
start.append(-1*(i + args['window'] - features[key][:].end))
end.append(-1*(i - features[key][:].end)) # retrieve the end position of each putative CpG island
headers.append(key)
strand.append('-')
# create data frame
df = pd.DataFrame()
df['id'] = headers
df['start'] = start
df['end'] = end
df['strand'] = strand
df[''.join(['%','GC'])] = gccont
df['obs'] = gcobs
df['obs/exp'] = gcratio
df = df.sort_values(by=['obs/exp'], ascending=False) # sort by biggest obs/exp ratio
# export
with open(args['output'], 'a') as f:
f.write(
df.to_csv(header = True, index = False, sep = '\t', doublequote= False, lineterminator= '\n')
)
if __name__ == '__main__':
main() | true |
6a8879b2a82a693452c5f3f19f83e41d523bab31 | Python | ian-r-rose/intake_geopandas | /intake_geopandas/geopandas.py | UTF-8 | 2,266 | 2.625 | 3 | [
"BSD-2-Clause"
] | permissive | # -*- coding: utf-8 -*-
from . import __version__
from intake.source.base import DataSource, Schema
import json
import dask.dataframe as dd
from datetime import datetime, timedelta
class ShapeSource(DataSource):
"""Shape file intake source"""
name = 'shape'
version = __version__
container = 'dataframe'
partition_access = True
def __init__(self, urlpath, bbox=None, geopandas_kwargs=None, metadata=None):
"""
Parameters
----------
urlpath : str or iterable, location of data
Either the absolute or relative path to the file or URL to be opened.
Some examples:
- ``{{ CATALOG_DIR }}data/states.shp``
- ``http://some.domain.com/data/dtates.shp``
bbox : tuple | GeoDataFrame or GeoSeries, default None
Filter features by given bounding box, GeoSeries, or GeoDataFrame.
CRS mis-matches are resolved if given a GeoSeries or GeoDataFrame.
geopandas_kwargs : dict
Any further arguments to pass to geopandas's read_file.
"""
self.urlpath = urlpath
self._bbox = bbox
self._geopandas_kwargs = geopandas_kwargs or {}
self._dataframe = None
super(ShapeSource, self).__init__(metadata=metadata)
def _open_dataset(self, urlpath):
"""Open dataset using geopandas and use pattern fields to set new columns
"""
import geopandas
self._dataframe = geopandas.read_file(
urlpath, bbox=self._bbox, **self._geopandas_kwargs)
def _get_schema(self):
if self._dataframe is None:
self._open_dataset(self.urlpath)
dtypes = self._dataframe.dtypes.to_dict()
dtypes = {n: str(t) for (n, t) in dtypes.items()}
return Schema(datashape=None,
dtype=dtypes,
shape=(None, len(dtypes)),
npartitions=1,
extra_metadata={})
def _get_partition(self, i):
self._get_schema()
return self._dataframe
def read(self):
self._get_schema()
return self._dataframe
def to_dask(self):
raise NotImplementedError()
def _close(self):
self._dataframe = None
| true |
77ee730f4d1fcb1cfdd09eb7af3c9fd2b7cdaa14 | Python | lenhatquang1998/lenhatquang-fundamental-c4e21 | /session3/baitap1.py | UTF-8 | 296 | 3.203125 | 3 | [] | no_license | menu_items = ["com rang", "bun bo", "bun dau"]
for i in range(len(menu_items)):
print(i+1,". ", menu_items[i], sep="")
# menu_items = len(menu_items)
# print(menu_items)
# print(*menu_items, sep=",")
# new=input("ban thich an gi them ?")
# menu_items.append(new)
# print(*menu_items, sep=",") | true |
611e2e9ad36411d193b362f05d02ce33c559669f | Python | khorokhosh/telegram | /telegram_api/app/event/Event.py | UTF-8 | 2,067 | 2.578125 | 3 | [
"Apache-2.0"
] | permissive | # 使用监听的方式 采集消息
import telethon
from telethon import TelegramClient, events
from telethon.tl.types import InputMessagesFilterUrl
from redis import Redis
import time, random, datetime, re, json
# api_id = 1848782
# api_hash = 'db242eb477ce069cb76d299f562adba2'
# phone = '+86 137 8230 8818'
api_id = 1970209
api_hash = '382e4d2d424a8b4dcd808e319de5ea6b'
phone = '+86 176 3001 3170'
client = TelegramClient(phone, api_id, api_hash)
channel = ['https://t.me/onemore321','https://t.me/hao12324','https://t.me/hao123mm','https://t.me/jianghai123','https://t.me/datesales']
#实例化一个redis
redis_obj = Redis(host='localhost',port=6379,password='',decode_responses=True,charset='UTF-8', encoding='UTF-8')
# 过滤清洗数据
def filter_data(str):
r_text = re.compile( r'\d[.].*?(.*?) [(](.*?)[)] - (.*?)人', re.S)
result = re.findall(r_text, str)
if len(result) == 0:
r_text = re.compile( r'\d[.].*?[[](.*?)[]][(](.*?)[)] - (.*?)人', re.S)
result = re.findall(r_text, str)
if len(result) == 0:
r_text = re.compile( r'\d+[.].*?[[](.*?)][(](.*?)[)]\n', re.S)
result = re.findall(r_text, str)
d = []
for i in result:
if len(i) > 2:
title = i[0] + ' - ' + i[2]
else:
title = i[0]
d.append((title,i[1]))
return d
@client.on(events.NewMessage(chats=(channel)))
async def normal_handler(event):
# print("***************event**********************")
# print(event)
# print("***************message**********************")
# print(event.message)
# print("***************text**********************")
# print(event.text)
# 清洗数据并提取消息记录中的有效的链接
result = filter_data(event.text)
# 将数据转为json并写入内存
for i in result:
print(i)
redis_obj.lpush('tg_group_list',json.dumps({"title": i[0],"link": i[1]}))
# seconds = random.randint(120,180)
# print(seconds)
# time.sleep(seconds)
client.start()
client.run_until_disconnected() | true |
d377d9a24c5457bad7d775947976ecd655da2b4c | Python | legendddhgf/CS115-Password-Manager | /Computer_Client/Computer_App/secuure_gui_accinfo.py | UTF-8 | 2,822 | 2.609375 | 3 | [] | no_license | #!/usr/bin/python3
#gui_accinfo for secuure
#developed by Isaak Cherdak
import tkinter
from tkinter import messagebox
from db import *
from secuure_gui_accadd import secuure_accadd
from secuure_gui_accrem import secuure_accrem
from secuure_gui_accwebinfo import secuure_accwebinfo
window_info = None
acc_user = None
def secuure_accinfo(user_str):
global acc_user
global window_info
acc_user = user_str
bcolor = '#9ACAEE'
window_info = tkinter.Toplevel()
natwidth = window_info.winfo_screenwidth() # get native resolutions
natheight = window_info.winfo_screenheight()
window_info.configure(background = bcolor)
window_info.geometry(("%dx%d") % (natwidth / 2,natheight / 2)) # start with a window
window_info.title("Account Information for '%s'" % (user_str))
window_info.bind('<Key>', map_list_account_info_key)
#get accounts
button_usernames = []
""" new """
""""""
data = getPasswordsForUser(acc_user)
for info in data:
button_usernames.append(tkinter.Button(window_info, text =
info[2], command =
lambda: secuure_accwebinfo(acc_user, info[2])))
""""""
for index in range(0, len(button_usernames)):
button_user = button_usernames[index]
button_user.grid(row = index, column = 0)
# need to do this for every button (add account, remove account, etc)
blank_labels = [] # how to artificially make space
num_spaces = 10
space_start = 2
for i in range(0, num_spaces):
blank_labels.append(tkinter.Label(window_info, text = ' ',
background = bcolor))
blank_labels[i].grid(row = 0, column = space_start + i)
button_add = tkinter.Button(window_info, text = "Add Account",
command = add_but_cmd)
button_remove = tkinter.Button(window_info,
text = "Remove Account", command = rem_but_cmd)
button_refresh = tkinter.Button(window_info,
text = "Refresh Info", command = refresh_info)
button_add.grid(row = 0, column = space_start + num_spaces)
button_remove.grid(row = 1, column = space_start + num_spaces)
button_refresh.grid(row = 2, column = space_start + num_spaces)
def add_but_cmd():
global acc_user
global window_info
#window_info.destroy()
win_temp = secuure_accadd(acc_user)
#win_temp.wm_protocol("WM_DELETE_WINDOW", secuure_accinfo(acc_user))
def rem_but_cmd(): # because remove needs arguments
global acc_user
global window_info
#window_info.destroy()
secuure_accrem(acc_user)
def refresh_info():
global window_info
global acc_user
window_info.destroy()
secuure_accinfo(acc_user)
def map_list_account_info_key(event):
global window_info
if (len(event.char) == 1 and ord(event.char) == 27):
window_info.destroy()
| true |
25bbf06709f70d5dfe55f6a3385ed845f1287681 | Python | bnewbold/divergence | /divergence | UTF-8 | 10,772 | 2.53125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
"""
License: MIT
Author: Bryan Newbold <bnewbold@archive.org>
Date: July 2017
See README.md and LICENSE.
"""
from __future__ import print_function
import re
import json
import sys, os
import difflib
import argparse
import requests
import subprocess
import logging as log
DEFAULT_HEADER = """This page was generated automatically from Markdown using
the 'divergence' tool. Edits will need to be merged manually."""
class DivergenceProgram:
def __init__(self, user, password, url, space,
force_update=False,
include_toc=False,
header=None,
no_header=False):
self.api = requests.Session()
self.api.auth = (user, password)
self.api.headers.update({'Content-Type': 'application/json'})
self.base_url = url
self.default_space = space
self.force_update = force_update
self.include_toc = include_toc
self.header = header # from command-line arg
self.no_header = no_header # from command-line arg
# TODO: clean up this code duplication... use pandoc data directory
# instead?
self.pandoc_helper_path = None
for p in ('./pandoc_confluence.lua',
'/usr/local/lib/divergence/pandoc_confluence.lua',
'/usr/lib/divergence/pandoc_confluence.lua'):
if os.path.exists(p):
self.pandoc_helper_path = p
break
if self.pandoc_helper_path is None:
log.error("Could not find pandoc helper (pandoc_confluence.lua), bailing")
sys.exit(-1)
self.pandoc_meta_path = None
for p in ('./meta-json.template',
'/usr/local/lib/divergence/meta-json.template',
'/usr/lib/divergence/meta-json.template'):
if os.path.exists(p):
self.pandoc_meta_path = p
break
if self.pandoc_meta_path is None:
log.error("Could not find pandoc helper (meta-json.template), bailing")
sys.exit(-1)
def get_page(self, title, space_key=None, page_id=None):
"""
Returns None if not found, otherwise a dict with id, space, and body (in storage format)
"""
if space_key is None:
space_key = self.default_space
if not page_id:
resp = self.api.get(self.base_url + "/rest/api/content",
params={"spaceKey": space_key,
"title": title,
"expand": "body.storage,body.editor,version,space",
"type": "page"})
else:
resp = self.api.get(self.base_url + "/rest/api/content/%d" % int(page_id),
params={"expand": "body.storage,body.editor,version,space",
"type": "page"})
log.debug(resp)
log.debug(resp.content)
assert resp.status_code == 200
respj = resp.json()
if not page_id:
if respj['size'] == 0:
assert page_id is None, "Couldn't fetch given page id"
return None
assert respj['size'] == 1, "Expect single result for title lookup"
page = respj['results'][0]
assert page['space']['key'].upper() == space_key.upper(), "Expect spaces to match"
else:
# We did a fetch by page_id directly
page = respj
return {"id": int(page['id']),
"version": int(page['version']['number']),
"space": page['space']['key'],
"body": page['body']['storage']['value'],
"body_editor": page['body']['editor']['value']}
def get_conversion(self, body):
"""
Uses the REST API to convert from storage to 'editor' format.
"""
resp = self.api.post(self.base_url + "/rest/api/contentbody/convert/editor",
json={"representation": "storage",
"value": body })
log.debug(resp)
log.debug(resp.content)
assert resp.status_code == 200
return resp.json()['value']
def create_page(self, title, body, space_key=None):
if space_key is None:
space_key = self.default_space
resp = self.api.post(self.base_url + "/rest/api/content",
json={"space": { "key": space_key },
"type": "page",
"title": title,
"body": {
"storage": {
"representation": "storage",
"value": body } } } )
log.debug(resp)
log.debug(resp.content)
assert resp.status_code == 200
def update_page(self, title, body, page_id, prev_version):
resp = self.api.put(self.base_url + "/rest/api/content/%d" % page_id,
json={"type": "page",
"title": title,
"version": {"number": prev_version+1},
"body": {
"storage": {
"representation": "storage",
"value": body } } } )
log.debug(resp)
log.debug(resp.content)
assert resp.status_code == 200
def title_from_path(self, path):
title = path.split('.')[0].replace('_', ' ')
# TODO: only alphanum and spaces?
return title
def convert(self, f, header=None):
proc = subprocess.run(["pandoc", "-t", self.pandoc_helper_path, f],
stdout=subprocess.PIPE)
assert proc.returncode == 0
body = proc.stdout.decode('UTF-8')
if self.include_toc:
body = """<ac:structured-macro ac:name="toc">
<ac:parameter ac:name="minLevel">1</ac:parameter>
<ac:parameter ac:name="maxLevel">3</ac:parameter>
</ac:structured-macro>""" + body
if header:
body = """<ac:structured-macro ac:name="info">
<ac:rich-text-body>
<p>""" + header + """</p>
</ac:rich-text-body>
</ac:structured-macro>\n""" + body
return body
def metadata(self, f):
proc = subprocess.run(["pandoc", "--template", self.pandoc_meta_path, f],
stdout=subprocess.PIPE)
assert proc.returncode == 0
return json.loads(proc.stdout.decode('UTF-8'))
def strip_tags(self, text):
"""
THIS IS NOT A SANITIZER, just a naive way to strip (most?) HTML tags.
"""
return re.sub('<[^<]+?>', '', text)
def run(self, files):
for f in files:
meta = self.metadata(f)
log.debug(meta)
title = meta.get('confluence-page-title',
self.title_from_path(f))
space_key = meta.get('confluence-space-key',
self.default_space)
page_id = meta.get('confluence-page-id')
header = not self.no_header and ( # --no-header trumps all
self.header or # command-line value gets priority
meta.get('disclaimer-header') or # fall back to per-file
DEFAULT_HEADER )
log.debug(title)
body = self.convert(f, header)
prev = self.get_page(title, space_key=space_key, page_id=page_id)
log.debug(prev)
if prev is None:
self.create_page(title, body, space_key=space_key)
print(f + ": created")
else:
prev_body = self.strip_tags(prev['body_editor'])
this_body = self.strip_tags(self.get_conversion(body))
if prev_body != this_body or self.force_update:
# Show a diff in verbose mode
log.info('Diff of ' + f + ' changes:\n' + ''.join(difflib.unified_diff(
prev_body.splitlines(keepends=True),
this_body.splitlines(keepends=True),
fromfile='old',
tofile='new')))
self.update_page(title, body, prev['id'], prev['version'])
print(f + ": updated")
else:
print(f + ": no change")
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""
Simple Markdown-to-Confluence uploader, using pandoc and the Confluence REST
API.
required environment variables:
CONFLUENCE_USER
CONFLUENCE_PASSWORD
CONFLUENCE_URL
""")
#usage="%(prog)s [options] -s <space-key> <files>")
parser.add_argument("-v", "--verbose",
action="count",
default=0,
help="Show more debugging statements (can be repeated)")
parser.add_argument("-s", "--space-key",
default=None,
help='Confluence Space Key (usually like "PROJ" or "~username")')
parser.add_argument("-f", "--force",
action='store_true',
help='Forces an update even if we think nothing has changed')
parser.add_argument("--header",
action='store',
help='Specify header to insert into the confluence document')
parser.add_argument("--no-header",
action='store_true',
help='Disables inserting disclaimer headers into the confluence document')
parser.add_argument("--toc",
action='store_true',
help='Inserts table-of-contents into the confluence document')
parser.add_argument("FILE", nargs='+')
args = parser.parse_args()
if args.verbose > 1:
log.basicConfig(format="%(levelname)s: %(message)s", level=log.DEBUG)
elif args.verbose > 0:
log.basicConfig(format="%(levelname)s: %(message)s", level=log.INFO)
else:
log.basicConfig(format="%(levelname)s: %(message)s", level=log.WARN)
try:
user = os.environ['CONFLUENCE_USER']
password = os.environ['CONFLUENCE_PASSWORD']
url = os.environ['CONFLUENCE_URL']
except KeyError:
parser.exit(-1, "Need to pass environment variable configs (see --help)\n")
log.info("User: " + user)
log.info("URL: " + url)
if url.endswith('/'):
url = url[:-1]
if args.space_key is None:
args.space_key = "~" + user
log.warning("Defaulting to home space: %s" % args.space_key)
if args.header and args.no_header:
parser.exit(-1, "Pick one of --header and --no_header.\n")
try:
subprocess.check_output(['pandoc', '--version'])
except:
parser.exit(-1, "This script depends on 'pandoc', which doesn't "
"seem to be installed.\n")
dp = DivergenceProgram(user,password, url, args.space_key,
force_update=args.force,
header=args.header,
include_toc=args.toc)
dp.run(args.FILE)
if __name__ == '__main__':
main()
| true |
121a0ac037336117e1ad94b193604f0442aa62a9 | Python | Game-of-Life-191T/Game-of-Life | /eca_reference.py | UTF-8 | 3,913 | 3.6875 | 4 | [] | no_license | # Reference: https://matplotlib.org/matplotblog/posts/elementary-cellular-automata/
import numpy as np
rng = np.random.RandomState(42)
data = rng.randint(0,2,20)
# prints random num from seed state 42 in array size (20) with numbbers 0 & 1
print(data)
# a given cell C only knows about the state of it's left and right neighbors
# labeled L and R respectively. We can define a function or rule, f(L, C, R)
# which maps the cell state to either 0 or 1
# Input cells are binary values there are 2^3 = 8 possible inputs into fxn
# prints binary representation of size 3 8 times (2^3 = 8)
for i in range(8):
print(np.binary_repr(i,3))
# assign 0 or 1 to each input triplet. Output of f is the value which will
# replace the current cell C in the next time step. In total there are 2^2^3 = 2^8 = 256
# possible rules for updating a cell. Known as wolfram code, for the update rules in which
# rule is represented by an 8 bit binary number.
# Example "Rule 30" could be constructed by first converting to binary and then building
# an array for each bit
# assign number to 30
rule_number = 30
# rule string = binary representation of 30 in size 8
rule_string = np.binary_repr(rule_number, 8)
# turn rule_string into an array of size 8
rule = np.array([int(bit) for bit in rule_string])
print(rule, rule_string, rule_number)
# Wolfram code associates the leading bit with '111' and the final bit with '000'
# for rule 30 the relationship between the input, rule index and output is as follows
for i in range(8):
triplet = np.binary_repr(i,3)
print(f"input: {triplet}, index: {7-i}, output {rule[7-i]}")
# can define a fxn which maps the input cell info with the associated rule index
# essentially we convert the binary input to decimal and adjust the index range
# returns the output value at the specified index
def rule_index(triplet):
L, C, R = triplet
index = 7 - (4*L + 2*C + R)
return int(index)
# returns output 0 from index 2 ('101')
print(rule[rule_index((1,0,1))])
# finally use numpy to create a data struct containing all the triplets
# for our state array and apply the fxn across the appropriate axis to
# determine our new state.
# honestly don't get this but come back and try to when everything is done
all_triplets = np.stack([
np.roll(data, 1),
data,
np.roll(data, -1)
])
# prints single update of our cellular automata
new_data = rule[np.apply_along_axis(rule_index,0,all_triplets)]
print(new_data)
# To do many updates and record the state over time, we will create a fxn
# aka all together now
def CA_run(initial_state, n_steps, rule_number):
rule_string = np.binary_repr(rule_number, 8)
rule = np.array([int(bit) for bit in rule_string])
m_cells = len(initial_state)
CA_run = np.zeros((n_steps, m_cells))
CA_run[0, :] = initial_state
for step in range(1, n_steps):
all_triplets = np.stack(
[
np.roll(CA_run[step - 1, :], 1),
CA_run[step - 1, :],
np.roll(CA_run[step - 1, :], -1),
]
)
CA_run[step, :] = rule[np.apply_along_axis(rule_index, 0, all_triplets)]
return CA_run
initial = np.array([0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0])
data = CA_run(initial, 10, 30)
print(data)
# now visiuals since line 102 only gives us binary representation of the results
import matplotlib.pyplot as plt
# color map using binary
plt.rcParams['image.cmap'] = 'binary'
rng = np.random.RandomState(0)
data = CA_run(rng.randint(0, 2, 100), 50, 173)
# This makes a pyramid, test it out as needed
#initial = np.zeros(300)
#initial[300//2] = 1
#data = CA_run(initial, 150, 30)
# set figure size
fig, ax = plt.subplots(figsize=(10, 5))
# plot values of a 2d matrix/array as color-coded image.
ax.matshow(data)
# take off axis numbers
ax.axis(False);
# show the plotted graph
plt.show()
| true |
458110bf61f07755e0a9e737ce582670d8e67fa7 | Python | vijayjoshi16/ML_MODEL_PMAT | /app.py | UTF-8 | 1,617 | 2.640625 | 3 | [] | no_license | from flask import Flask,request,jsonify
from flask_cors import CORS, cross_origin
from scipy.spatial import distance
app = Flask(__name__)
CORS(app, support_credentials=True)
@app.route("/rank_devs",methods=["POST"])
@cross_origin()
def rank_devs():
# print("############################")
desired_skills = request.json['desired_skills'].split(' ')
candidates = request.json['candidates']
desired_skills = [x.lower() for x in desired_skills]
# print(desired_skills)
# print(candidates)
for i in range(len(candidates)):
candidates[i]['mat']=[]
candidates[i]['skills'] = [x.lower() for x in candidates[i]['skills']]
for skill in desired_skills:
if skill in candidates[i]['skills']:
candidates[i]['mat'].append(1)
else:
candidates[i]['mat'].append(0)
# print(candidates)
desired_matrix = [1 for x in desired_skills]
# print(desired_matrix)
for i in range(len(candidates)):
if candidates[i]['mat'] != [0]*len(desired_skills):
candidates[i]['similarity'] = 1 - distance.cosine(candidates[i]['mat'],desired_matrix)
else:
candidates[i]['similarity'] = 0
# print(candidates)
candidates = sorted(candidates,key = lambda x:x['similarity'],reverse=True)
# print("Sorted List")
# for i in candidates:
# print(i)
return jsonify(ranklist = candidates)
@app.route("/health_check", methods=["GET"])
@cross_origin()
def health_check():
return "API Up And Running"
# Default port:
if __name__ == "__main__":
app.run() | true |
d76848e579dafcfd19c021f24e1f853ed22594f1 | Python | yongbing1/MagicStack-Proxy | /common/websocket_api.py | UTF-8 | 4,471 | 2.734375 | 3 | [] | no_license | # coding: utf-8
from Crypto.Cipher import AES
import crypt
import pwd
from binascii import b2a_hex, a2b_hex
import hashlib
import datetime
import random
import subprocess
import uuid
import json
from conf.settings import *
def set_log(level, filename='jumpserver.log'):
"""
return a log file object
根据提示设置log打印
"""
log_file = os.path.join(LOG_DIR, filename)
if not os.path.isfile(log_file):
os.mknod(log_file)
os.chmod(log_file, 0777)
log_level_total = {'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARN, 'error': logging.ERROR,
'critical': logging.CRITICAL}
logger_f = logging.getLogger('jumpserver')
logger_f.setLevel(logging.DEBUG)
fh = logging.FileHandler(log_file)
fh.setLevel(log_level_total.get(level, logging.DEBUG))
formatter = logging.Formatter('%(asctime)s - %(filename)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger_f.addHandler(fh)
return logger_f
def list_drop_str(a_list, a_str):
for i in a_list:
if i == a_str:
a_list.remove(a_str)
return a_list
def chown(path, user, group=''):
if not group:
group = user
try:
uid = pwd.getpwnam(user).pw_uid
gid = pwd.getpwnam(group).pw_gid
os.chown(path, uid, gid)
except KeyError:
pass
class PyCrypt(object):
"""
This class used to encrypt and decrypt password.
加密类
"""
def __init__(self, key):
self.key = key
self.mode = AES.MODE_CBC
@staticmethod
def gen_rand_pass(length=16, especial=False):
"""
random password
随机生成密码
"""
salt_key = '1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_'
symbol = '!@$%^&*()_'
salt_list = []
if especial:
for i in range(length - 4):
salt_list.append(random.choice(salt_key))
for i in range(4):
salt_list.append(random.choice(symbol))
else:
for i in range(length):
salt_list.append(random.choice(salt_key))
salt = ''.join(salt_list)
return salt
@staticmethod
def md5_crypt(string):
"""
md5 encrypt method
md5非对称加密方法
"""
return hashlib.new("md5", string).hexdigest()
@staticmethod
def gen_sha512(salt, password):
"""
generate sha512 format password
生成sha512加密密码
"""
return crypt.crypt(password, '$6$%s$' % salt)
def encrypt(self, passwd=None, length=32):
"""
encrypt gen password
对称加密之加密生成密码
"""
if not passwd:
passwd = self.gen_rand_pass()
cryptor = AES.new(self.key, self.mode, b'8122ca7d906ad5e1')
try:
count = len(passwd)
except TypeError:
raise ServerError('Encrypt password error, TYpe error.')
add = (length - (count % length))
passwd += ('\0' * add)
cipher_text = cryptor.encrypt(passwd)
return b2a_hex(cipher_text)
def decrypt(self, text):
"""
decrypt pass base the same key
对称加密之解密,同一个加密随机数
"""
cryptor = AES.new(self.key, self.mode, b'8122ca7d906ad5e1')
try:
plain_text = cryptor.decrypt(a2b_hex(text))
except TypeError:
raise ServerError('Decrypt password error, TYpe error.')
return plain_text.rstrip('\0')
class ServerError(Exception):
"""
self define exception
自定义异常
"""
pass
def bash(cmd):
"""
run a bash shell command
执行bash命令
"""
return subprocess.call(cmd, shell=True)
def mkdir(dir_name, username='', mode=0755):
"""
insure the dir exist and mode ok
目录存在,如果不存在就建立,并且权限正确
"""
if not os.path.isdir(dir_name):
os.makedirs(dir_name)
os.chmod(dir_name, mode)
if username:
chown(dir_name, username)
def get_tmp_dir():
seed = uuid.uuid4().hex[:4]
dir_name = os.path.join('/tmp', '%s-%s' % (datetime.datetime.now().strftime('%Y%m%d-%H%M%S'), seed))
mkdir(dir_name, mode=0777)
return dir_name
def get_mac_address():
node = uuid.getnode()
mac = uuid.UUID(int=node).hex[-12:]
return mac
CRYPTOR = PyCrypt(KEY)
logger = set_log('debug') | true |
f8fe0e04ee5b81eb0cd21b19ced9e435a441b8da | Python | Dan-Doit/semicolon_recommend | /recommend.py | UTF-8 | 3,705 | 2.546875 | 3 | [] | no_license | from graphqlclient import GraphQLClient
import json
import os
import pandas as pd
import numpy as np
from math import sqrt
from tqdm import tqdm_notebook as tqdm
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
#
# key = os.environ['PRISMA_ENDPOINT']
client = GraphQLClient("END PAGE")
arr = client.execute('''
{
users{
id
likes{
post{
id
}
}
}
}
''',
)
arr = json.loads(arr)
parr = client.execute('''
{
posts{
id
}
}
''')
parr = json.loads(parr)
arr = arr['data']['users']
userId = []
for i in arr:
userId.append(i['id'])
newArr = []
for i in arr:
for j in i['likes']:
newArr.append([i['id'], j['post']['id']])
users = []
posts = []
isLike = []
result = []
for i in arr:
users.append(i['id'])
# 모든 유저값
for i in parr['data']['posts']:
posts.append(i['id'])
# 모든 포스트값
# 좋아하는지 체크
for i in arr:
for j in i['likes']:
isLike.append([i['id'], j['post']['id']])
for i in range(len(users)):
for j in range(len(posts)):
for z in isLike:
if z[0] == users[i] and z[1] == posts[j]:
count = 5
break
else:
count = 0
result.append([users[i], posts[j], count])
people = len(users)
users = []
posts = []
isLike = []
temp = []
# 여기서 데이터 분류
for i in range(len(result)):
users.append(result[i][0])
posts.append(result[i][1])
isLike.append(result[i][2])
temp.append(count)
count = count + 1
ratings_df = pd.DataFrame({'userId': users, 'postId': posts, 'rating': isLike, 'count': temp})
train_df, test_df = train_test_split(ratings_df, test_size=0.2, random_state=1234)
"""### Sparse Matrix 만들기"""
sparse_matrix = train_df.groupby('postId').apply(lambda x: pd.Series(x['rating'].values, index=x['userId'])).unstack()
sparse_matrix.index.name = 'postId'
sparse_matrix
# fill sparse matrix with average of post ratings
sparse_matrix_withpost = sparse_matrix.apply(lambda x: x.fillna(x.mean()), axis=1)
# fill sparse matrix with average of user ratings
sparse_matrix_withuser = sparse_matrix.apply(lambda x: x.fillna(x.mean()), axis=0)
sparse_matrix_withpost
sparse_matrix_withuser
"""## Matrix Factorization with SVD"""
def get_svd(s_matrix, k=people):
u, s, vh = np.linalg.svd(s_matrix.transpose())
S = s[:k] * np.identity(k, np.float)
T = u[:, :k]
Dt = vh[:k, :]
item_factors = np.transpose(np.matmul(S, Dt))
user_factors = np.transpose(T)
return item_factors, user_factors
item_factors, user_factors = get_svd(sparse_matrix_withpost)
prediction_result_df = pd.DataFrame(np.matmul(item_factors, user_factors),
columns=sparse_matrix_withpost.columns.values,
index=sparse_matrix_withpost.index.values)
recommend = prediction_result_df.transpose()
favorite = {}
postId = []
for i in users:
favorite.setdefault(str(i),[])
for i in range(len(recommend.index)):
for j in range(len(recommend.columns)):
if recommend.values[i][j] >= 0.1 and recommend.values[i][j] < 4.9:
favorite[str(recommend.index[i])].append(recommend.columns[j])
print(recommend.index[i], recommend.columns[j], round(recommend.values[i][j]))
from flask import Flask
from flask_restful import Resource, Api
from flask import jsonify
app = Flask(__name__)
api = Api(app)
class RegistUser(Resource):
def get(self):
return jsonify(favorite)
api.add_resource(RegistUser, '/recommendation/key=teamsemicolon')
if __name__ == '__main__':
app.run(host="172.30.1.23",port=5000, debug=True)
| true |
7860b300bae378180553880f3ef985df4fb1ac02 | Python | safi21/LearnGitRepo | /discount_price.py | UTF-8 | 254 | 3.921875 | 4 | [] | no_license | origPrice = float(input("Enter the price $"))
discount = float(input("Enter the discount $"))
mPrice = (1 - discount / 100) * origPrice
print(
"The price of {:.2f} after {:.2f} discount is ${:.2f}".format(
origPrice, discount, mPrice
)
)
| true |
02ba346ebac4f076a08e8c98e981c4e1bfbc9647 | Python | dhmodi/medical-affair-assistant | /cognitiveSQL/Column.py | UTF-8 | 825 | 2.859375 | 3 | [
"GPL-3.0-only",
"Apache-2.0"
] | permissive | # -*- coding: utf-8 -*
import sys
import unicodedata
##reload(sys)
#sys.setdefaultencoding("utf-8")
class Column:
name = ''
type = ''
is_primary = False
def __init__(self, name=None, type=None, is_primary=None):
if name is None:
self.name = ''
else:
self.name = name
if type is None:
self.type = ''
else:
self.type = type
if is_primary is None:
self.is_primary = False
else:
self.is_primary = is_primary
def get_name(self):
return self.name
def set_name(self, name):
self.name = name
def get_type(self):
return self.type
def set_type(self, type):
self.type = type
def is_primary(self):
return self.is_primary | true |
948aba00cb71a996bedea9af56dd6c886389db43 | Python | vitroid/GenIce | /genice2/molecules/thf.py | UTF-8 | 986 | 2.59375 | 3 | [
"MIT"
] | permissive | # coding: utf-8
import numpy as np
# All-atom THF model
from logging import getLogger
import genice2.molecules
desc = {
"usage": "No options available.",
"brief": "An all-atom tetrahydrofuran (THF) model."
}
class Molecule(genice2.molecules.Molecule):
def __init__(self):
self.sites_ = np.array([
[1.2328, -0.0005, 0.0000],
[-1.0107, -0.7202, -0.2205],
[-1.0102, 0.7210, 0.2205],
[0.3936, -1.1560, 0.1374],
[0.3946, 1.1557, -0.1375],
[-1.7823, -1.3279, 0.2593],
[-1.1544, -0.7757, -1.3060],
[-1.7812, 1.3292, -0.2593],
[-1.1537, 0.7766, 1.3061],
[0.4518, -1.4889, 1.1792],
[0.7622, -1.9589, -0.5071],
[0.4532, 1.4885, -1.1793],
[0.7639, 1.9583, 0.5070]]) / 10
self.atoms_ = ["O"] + ["C"] * 4 + ["H"] * 8
self.labels_ = ["O", "CA", "CA", "CB", "CB"] + ["H"] * 8
self.name_ = "THF"
| true |
489b62413a5c37751bf0833b51fd2562029e3269 | Python | OskarKozaczka/pp1-OskarKozaczka | /04-Subroutines/4.16.py | UTF-8 | 188 | 2.78125 | 3 | [] | no_license | def miesiac(x):
nazwy_miesiecy=['styczeń','luty','marzec','kwiecień','maj','czerwiec','lipiec','sierpień','wrzesień','październik','listopad','grudzień']
return nazwy_miesiecy[x-1] | true |
ee361112dc44ec98641281ca38cec39dd98022d9 | Python | kdharlley/kdharlley.github.io | /CS_1110/A4_file_tree_manipulation_100/positions_test.py | UTF-8 | 3,519 | 3.421875 | 3 | [] | no_license | # positions_test.py
# Lillian Lee (LJL2)
# Apr 11, 2018
"""Some demonstrations of the use of class Positions and related functions
from positions.py.
Also tests whether students can use networkx to draw org charts.
STUDENTS: try running this file on the command line, i.e.,
python positions_test.py
if you get an error saying that name networkx is not defined,
do the following on the command line (not interactive mode):
pip install networkx
If you don't get such a message, but running python on this file fails to
produce a figure with a "web" of positions, do the following on the
command line:
pip install --upgrade networkx
If after you try the relevant option above and you still aren't getting
a figure produced when running this file, please post on Piazza with
some details of any error messages or output.
"""
import positions as pfile
# STUDENTS: observe that making a subclass makes it easier to create
# formulaic Positions
class VP(pfile.Position):
"""All Vice Presidents and Provosts are report to the President.
Assumes `president` is a global variable."""
def __init__(self, t, h,):
super().__init__(t, h, [president], [])
class Dean(pfile.Position):
"""Regular Deans report to the Provost. Assumes `provost` is a global
variable."""
def __init__(self, t, h, search_in_progress=False):
super().__init__(t, h, [provost], [], search_in_progress)
if __name__ == '__main__':
# Set up part of the org chart for Cornell
# Start with the "root"
trustees = pfile.Position("Board of Trustees", 0, [], [])
# This netid is made up.
president = pfile.Position("President", "mep100", [trustees], [])
# LL note: my reading of the Cornell bylaws
# (https://trustees.cornell.edu/Shared%20Documents/18-3%20bylaws%20w-TC.pdf)
# is that the University Counsel is a separate position from the
# Secretary of the Corporation, despite the President org chart
# at http://dbp.cornell.edu/university-org-structure/
counsel = pfile.Position("University Counsel",
"mfw68",
[trustees, president],
[])
sec_of_corp = pfile.Position("Secretary of the Corporation",
"mfw68",
[president],
[])
provost = pfile.Position("Provost",
"mik7",
[president],
[])
evp = VP("Executive Vice President and Chief Financial Officer", "jmd11")
pma = VP("Provost for Medical Affairs", "amc562")
vdai = pfile.Position("Vice Dean for Academic Integration",
"gak36",
[provost, pma],
[])
dean_med = pfile.Position("Dean of the Medical College", "amc562", [pma], [])
dean_as = Dean('Dean of Arts and Sciences', "gr72", search_in_progress=True)
dean_bus = Dean('Dean of Business', "ljt3", search_in_progress=True)
# Test some printouts
for posn in [trustees, president, counsel, sec_of_corp, provost,
evp, pma, vdai, dean_med, dean_as, dean_bus]:
print(posn.full_string())
print()
title = "Fragment starting at Board of Trustees (blue dot).\n"
title += "(Resize the window if this figure is hard to read.)"
pfile.draw(trustees, figtitle=title)
| true |
08e9c02be1beb50f9f6c87dca8820af828671651 | Python | RafayAK/CodingPrep | /DailyCodingProblem/119_Google_Find_Smallest_Set_of_Numbers_That_Covers_All_Intervals.py | UTF-8 | 1,952 | 4.25 | 4 | [
"MIT"
] | permissive | """
This problem was asked by Google.
Given a set of closed intervals, find the smallest set of numbers that covers all the intervals.
If there are multiple smallest sets, return any of them.
For example, given the intervals [0, 3], [2, 6], [3, 4], [6, 9], one set of numbers that covers
all these intervals is {3, 6}.
"""
# basicaly we want the smallest set of numbers such that for interval we
# have one number that belongs to that an interval
# 0 1 2 3 4 5 6 7 8 9 10
# ----
# -------------
# ^ -------
# | ----
# ----------
# ^ -------
# | ^
# |
def numbers_that_cover_intterval(intervals):
min_range = min(intervals, key=lambda item: item[0])[0]
max_range = max(intervals, key=lambda item: item[1])[1]
range_dict = {i: set() for i in range(min_range, max_range + 1)}
for interval_num, interval in enumerate(intervals):
for i in range(interval[0], interval[1]+1):
range_dict[i].add(interval_num)
numbers_that_cover_range = []
for key, s in range_dict.items():
if len(numbers_that_cover_range) == 0:
numbers_that_cover_range.append(key)
continue
elif not s.issubset(range_dict[numbers_that_cover_range[-1]]):
if range_dict[numbers_that_cover_range[-1]].issubset(s):
numbers_that_cover_range[-1] = key
else:
numbers_that_cover_range.append(key)
return numbers_that_cover_range
if __name__ == "__main__":
print(numbers_that_cover_intterval(([0, 3], [2, 6], [3, 4], [6, 9])))
print(numbers_that_cover_intterval([[0, 3], [2, 6]]))
print(numbers_that_cover_intterval([[0, 3], [2, 6], [3, 4]]))
print(numbers_that_cover_intterval([[0, 3], [2, 6], [3, 4], [6, 100]]))
print(numbers_that_cover_intterval([[1, 2], [0,4], [5,7],[6,7], [6,9], [8,10]]))
| true |
87eefcd5c7ff3327f1ccfeb24cec450b7e34badc | Python | MarileneGarcia/Initiation_Scientific_Research | /Algoritmo_MEMo/Versao_Final/Grafo_Genes.py | UTF-8 | 497 | 3.171875 | 3 | [] | no_license | ''' Funcao que utiliza os dados da relacao entre os genes para criar um grafo
Variaveis de entrada: --
Variaveis de saida: um grafo da relacao entre os genes '''
from igraph import *
def grafo_genes():
# Gerar o grafo a partir de um arquivo de relacao entre os genes
grafo = Graph.Read_Edgelist('rede_filtrada.txt', directed=False)
# Plotar o grafo
#grafo.vs["color"] = "lightgreen"
#layout = grafo.layout("kk")
#plot(grafo, layout = layout)
# Retornar o grafo
return grafo | true |
9eb77c6ea561c38d935ba619860c57360b70301a | Python | afminmax/YoutubeAPI | /classes/oop/robots2.py | UTF-8 | 845 | 4.875 | 5 | [] | no_license | # Python Classes Part 2
# In this second iteration, we need to add a constructor
# In python, constructors are made with this format __init__
# This constructor takes three arguments and self
# It is good practice to name the attributes the same as the arguments
class Robot:
def __init__(self, name, color, weight):
self.name = name
self.color = color
self.weight = weight
def introduce_self(self):
print("Domo arrigato I am Mrs Roboto " +
self.name + ", I am " + self.color + " and weigh "
+ str(self.weight) + " kilos.")
# Now, to create a new robot instance, instantiate the class and feed it the argument values
r1 = Robot('Anne', 'pink', 25)
r2 = Robot('Emma', 'green', 15)
# Last, we call the function action in each of the objects:
r1.introduce_self()
r2.introduce_self()
| true |
b18cab34ca8562a00062ce42af8fede99f923f24 | Python | vincentinttsh/NCNU_Course | /getData.py | UTF-8 | 4,110 | 2.828125 | 3 | [] | no_license | import requests
import json
import os
import csv
from bs4 import BeautifulSoup as bs
header = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:79.0) Gecko/20100101 Firefox/79.0',
'Cookie': '輸入登入暨大教務系統後所得到的cookie'
}
mainURL = "https://ccweb.ncnu.edu.tw/student/"
courses = []
generalCourse = []
def getGeneralCourseData(year):
'''
透過年份取得 通識課程分類的csv檔
供後續課程對應。
先儲存到 generalCourse list,後續再用 courseID 對應通識分類
'''
# 教務系統有開放 年度的query
# 但實際操作後似乎僅開放當前學年度
response = requests.get(mainURL+"aspmaker_student_common_rank_courses_viewlist.php?x_studentid=0&z_studentid=LIKE&x_year={}&z_year=%3D&cmd=search&export=csv".format(year), headers=header)
data = response.text
courses = data.split('\r\n')[1:-1]
for course in courses:
course = course.split(',')
generalCourse.append(course)
def curlDepartmentCourseTable(year):
'''
先取得各科系的開課表格連結
再將連結丟給 extractDepartmentCourseTable() 取得課程資訊
'''
print("取得所有課程資料:")
response = requests.get(mainURL+"aspmaker_course_opened_semester_stat_viewlist.php?x_year={}&recperpage=ALL".format(year), headers=header)
data = response.text
root = bs(data, "html.parser")
count = 1
departmentsTR = root.findAll('tr')[1:] # 清除 thead
for tr in departmentsTR:
name = tr.findAll('td')[4].find('span').find('span').string # 取得 科系名稱
link = mainURL + tr.find('a').get('data-url').replace('amp;', '') # 清除不必要符號, 取得 連結
print("擷取{}課程... ({}/{})...".format(name, count, len(departmentsTR)))
count += 1
extractDepartmentCourseTable(name, link) # 透過連結 開始擷取 各科系課程
def extractDepartmentCourseTable(departmentName, link):
'''
透過各科系連結取得課程資訊
若為通識類別還要跟csv檔資料做對應,取得正確通識類別
對應後存取到 output.json
'''
response = requests.get(link, headers=header)
data = response.text
root = bs(data, "html.parser")
courseTR = root.findAll('tr')[1:] # 清除 thead
for tr in courseTR:
courseObj = {}
tds = tr.find_all('td')
courseObj['link'] = mainURL + tds[0].find('a').get('href')
courseObj['year'] = tds[1].find('span').string
courseObj['number'] = tds[2].find('span').string
courseObj['class'] = tds[3].find('span').string
courseObj['name'] = tds[4].find('span').string
courseObj['department'] = tds[5].find('span').string
courseObj['graduated'] = tds[6].find('span').string
courseObj['grade'] = tds[7].find('span').string
courseObj['teacher'] = tds[8].find('span').string
courseObj['place'] = tds[9].find('span').string
courseObj['time'] = tds[11].find('span').string
if courseObj['department']=="99, 通識" :
flag = False
for row in generalCourse:
if row[2] == '"{}"'.format(courseObj['number']):
courseObj['department'] = row[0].replace('"', '')
generalCourse.remove(row)
flag = True
break
if not flag:
print(" - 找不到對應的通識類別: {} ( {} )".format(courseObj['name'], courseObj['number']))
courses.append(courseObj)
with open('output.json', 'w') as fp:
json.dump(courses, fp)
if __name__ == "__main__":
year = input("年份: ")
getGeneralCourseData(year)
curlDepartmentCourseTable(year)
print("\n\n=====================")
print("未列入追蹤的通識課程")
print("=====================\n")
for notIn in generalCourse:
if "體育:" not in notIn[5]:
print(" - 未列入追蹤的新通識課程: {}".format(notIn)) | true |
7b7a700b6f1c07cfbacb15ce398a040a55ee4909 | Python | losper/audio | /test/py/shazam.py | UTF-8 | 465 | 2.65625 | 3 | [] | no_license | import numpy as np
import wavinfo as wv
import glob
class Shazam:
def search(self, path):
print("hello world!!!")
def index(self, folder):
for song in glob.glob(folder + "/*.wav"):
try:
self.sample(song)
except Exception:
print("Error in processing: " + song)
def sample(self, song):
print("???")
t = Shazam()
t.index("../data")
t.search("")
| true |
0d109fb7650aea47b190461e654f505a5603cd70 | Python | moreGet/pythonAlgorithm | /코테연습고득점KIT/카펫.py | UTF-8 | 431 | 3.1875 | 3 | [] | no_license | def solution(brown, yellow):
answer = []
total = brown + yellow
temp = []
for i in range(1, total):
if total % i == 0:
temp.append(i)
for i in temp:
for j in temp:
if (i >= j) and (i * j == total) and ((i*2) + (j*2))-4 == brown:
answer = [i, j]
return answer
if __name__ == '__main__':
b = 10
y = 2
print(solution(b, y)) | true |
d08ef9c2a90e10abe55c8ec6600f1fb1b4601925 | Python | sf624/CarND-Behavioral-Cloning-P3 | /model.py | UTF-8 | 2,905 | 3.046875 | 3 | [] | no_license | import csv
import cv2
import numpy as np
lines = []
with open("./data/driving_log.csv") as csvfile:
reader = csv.reader(csvfile)
for line in reader:
lines.append(line)
print("There are " + str(len(lines)) + " data points.")
images = []
measurements = []
for line in lines:
# [0] ... path to center_image
# [1] ... path to left_image
# [2] ... path to right_image
# [3] ... steering angle [-1,1]
# [4] ... throttle [0,1]
# [5] ... break (0)
# [6] ... speed [0,30]
# load images to list
for i in range(3):
source_path = line[i]
image = cv2.imread(source_path)
images.append(image)
# load measurement to list
correction = 0.2
measurement = float(line[3])
measurements.extend([measurement, measurement + correction, measurement - correction])
# augment images and steering
augmented_images, augmented_measurements = [], []
for image, measurement in zip(images, measurements):
augmented_images.append(image)
augmented_measurements.append(measurement)
augmented_images.append(cv2.flip(image,1))
augmented_measurements.append((-1.0)*measurement)
# convert to numpy array to be able to appply to Keras
X_train = np.array(augmented_images)
y_train = np.array(augmented_measurements)
print("There are " + str(len(X_train)) + " training data. (befor split)")
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Dropout, Cropping2D
from keras.layers import Convolution2D
from keras.layers.pooling import MaxPooling2D
# Construct regression DNN (note: not classification network! )
model = Sequential()
model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape = (160,320,3)))
model.add(Cropping2D(cropping=((70,25),(0,0))))
model.add(Convolution2D(24,5,5,subsample=(2,2),activation="relu"))
model.add(Convolution2D(36,5,5,subsample=(2,2),activation="relu"))
model.add(Convolution2D(48,5,5,subsample=(2,2),activation="relu"))
model.add(Convolution2D(64,3,3,activation="relu"))
model.add(Convolution2D(64,3,3,activation="relu"))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(100,activation="tanh"))
model.add(Dense(50,activation="tanh"))
model.add(Dense(10,activation="tanh"))
model.add(Dense(1))
model.compile(loss="mse", optimizer="adam")
history_object = model.fit(X_train,y_train,validation_split=0.2,shuffle=True,epochs=2,verbose=1)
model.save("model.h5")
'''
# Visualize history of mse loss
### print the keys contained in the history object
print(history_object.history.keys())
### plot the training and validation loss for each epoch
import matplotlib.pyplot as plt
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.show()
'''
exit()
| true |
89810ec38df6131e3d939b912e02c3bc4c9a368a | Python | ryndovaira/leveluppythonlevel1_300321 | /topic_04_testing_p1/practice/test_logic_3_check_numbers_by_5.py | UTF-8 | 552 | 3.28125 | 3 | [] | no_license | from topic_02_syntax.practice.logic_3_check_numbers_by_5 import check_numbers_by_5
def test_logic_3_check_numbers_by_5_ep_ok():
assert check_numbers_by_5(1, 2, 6) # не == и не is True, так как это излишне
assert check_numbers_by_5(1, 6, 2) # не == и не is True, так как это излишне
assert check_numbers_by_5(6, 1, 2) is True # не ==
assert check_numbers_by_5(1, 1, 2) is False # не ==
# def test_logic_3_check_numbers_by_5_ep_wrong():
# assert check_numbers_by_5("3", "2", "6")
| true |
26495b2b59d2ddf80cd25a17b56560d197357f83 | Python | michaelriha/KinectNeuralNetwork | /parseMotionData.py | UTF-8 | 4,590 | 2.921875 | 3 | [] | no_license | # CS156 Milestone 1 - Kinect Motion Capture for Neural Network design
#
# This python script prepares the collected data for use by the neural network
# as defined in the Milestone 1 assignment description.
#
# Files in "source" with ".txt" extension are read in. The output should be
# Kinect JSON output. For each sample file, for each joint, and for each frame,
# the x,y,z,r0...r8 data are aggregated and then written to files in
# output/<JSONfilename>/
#
# Files are named J1x, J1y, J1z, J1R0...J1R8
# J2x, J2y, J2z, J2R0...J2R8
# ...
# J24x, J24y, J24z, J24R0...J24R8
#
# Joints 5, 10, 11, and 16 are not recorded by the Kinect so no files are
# created for those numbers
#
# Written by Michael Riha for CS156 Artificial Intelligence at SJSU
import json
import os
source = "KinectMotionData"
output = "ParsedMotionData"
numJoints = 24
# Joints 5, 10, 11, and 16 are not recorded
recordedJoints = [1,2,3,4,6,7,8,9,12,13,14,15,17,18,19,20,21,22,23,24]
# Make an output directory to put the output data
if not os.path.exists(output):
os.mkdir(output)
for root, dirs, files in os.walk(source):
for f in files:
# Open json in each .txt file to extract
if f.endswith(".txt"):
json_data=open(os.path.join(source, f))
data = json.load(json_data)
# Initialize a 2d array for each variable
# first dimension = joint number
# second dimension = data
x = []
y = []
z = []
R0 = []
R1 = []
R2 = []
R3 = []
R4 = []
R5 = []
R6 = []
R7 = []
R8 = []
for i in range(numJoints + 1):
x.append([])
y.append([])
z.append([])
R0.append([])
R1.append([])
R2.append([])
R3.append([])
R4.append([])
R5.append([])
R6.append([])
R7.append([])
R8.append([])
# Process each frame
for frame in range(len(data["motiondata"])):
skeleton = data["motiondata"][frame]["skeleton"]
# Get the data for each joint and add it to the lists
for joint in skeleton:
jointIdx = int(joint)
x[jointIdx].append(str(skeleton[joint]["position"][0]))
y[jointIdx].append(str(skeleton[joint]["position"][1]))
z[jointIdx].append(str(skeleton[joint]["position"][2]))
R0[jointIdx].append(str(skeleton[joint]["rotation"][0]))
R1[jointIdx].append(str(skeleton[joint]["rotation"][1]))
R2[jointIdx].append(str(skeleton[joint]["rotation"][2]))
R3[jointIdx].append(str(skeleton[joint]["rotation"][3]))
R4[jointIdx].append(str(skeleton[joint]["rotation"][4]))
R5[jointIdx].append(str(skeleton[joint]["rotation"][5]))
R6[jointIdx].append(str(skeleton[joint]["rotation"][6]))
R7[jointIdx].append(str(skeleton[joint]["rotation"][7]))
R8[jointIdx].append(str(skeleton[joint]["rotation"][8]))
# Record the data into a folder with files
# J1x, J1y, J1z, J1R0,J1R1...J1R8
# ...
# J24x, J24y, J24z, J24R0, J24R1, J24R8
# Make a directory for this data point (student or master)
name = f[:-4]
outdir = os.path.join(output, name)
if not os.path.exists(outdir):
os.mkdir(outdir)
# Create a file for each dataset
# only use joints that were recorded
datasets = [x,y,z,R0,R1,R2,R3,R4,R5,R6,R7,R8]
dataSetNames = ["x", "y", "z", "R0", "R1", "R2", "R3", "R4", \
"R5", "R6", "R7", "R8"]
for j in recordedJoints:
setNum = 0
for outdata in datasets:
fname = "J%d%s" % (j, dataSetNames[setNum])
outpath = os.path.join(outdir, fname)
# Write the data to the file. Overwrite if it exists
outfile = open(outpath, "w")
outfile.write(' '.join(outdata[j]))
outfile.close()
setNum += 1
| true |
eed5b12868371798f84632550389e2832ba6af45 | Python | ishiikurisu/drivesheet | /py/main.py | UTF-8 | 883 | 2.625 | 3 | [] | no_license | import gspread
from oauth2client.service_account import ServiceAccountCredentials
def main():
scope = ['https://spreadsheets.google.com/feeds']
creds = ServiceAccountCredentials.from_json_keyfile_name('../config/secrets.json', scope)
client = gspread.authorize(creds)
sheet = client.open('qotsa').sheet1
members = sheet.get_all_records()
print(members)
print(sheet.row_values(3))
print(sheet.col_values(3))
print(sheet.cell(2, 3)) # 2nd row, third col
sheet.update_cell(2, 3, 'Guitar and Vocals')
print(sheet.get_all_records())
sheet.update_cell(2, 3, 'Guitar')
print(sheet.get_all_records())
row = [6, 'Jon Theodore', 'Drums']
sheet.insert_row(row, row[0]+1)
print(sheet.get_all_records())
sheet.delete_row(row[0]+1)
print(sheet.get_all_records())
print(sheet.row_count)
if __name__ == '__main__':
main()
| true |
1c87fa2d10c600e84353dbe8c8cf36da019ca03b | Python | CNXuPing/PythonLeedcode | /leedcode-332.py | UTF-8 | 602 | 3.53125 | 4 | [] | no_license | class Solution:
def findItinerary(self, tickets: List[List[str]]) -> List[str]:
def dfs(string):
while graph[string]:
nextstr = heapq.heappop(graph[string])
dfs(nextstr)
stack.append(string)
#dictionary-list structure
graph = collections.defaultdict(list)
for u,v in tickets:
graph[u].append(v)
#Make the list behave like a heap
for i in graph:
heapq.heapify(graph[i])
#Create stack
stack = []
dfs("JFK")
stack.reverse() # stack[::-1]
return stack
#Backtrack from the last node to the first node
#and record the path,
#then output in reverse order | true |
653888e3f160083ab72ab4a5069387cae9c44e9b | Python | YashGandhi21/SocialDistancingMonitor | /PersonDetector/VideoGet.py | UTF-8 | 1,209 | 3.15625 | 3 | [] | no_license | from threading import Thread
import cv2
class VideoGet:
"""
Class that continuously gets frames from a VideoCapture object
with a dedicated thread.
"""
def __init__(self, src=0):
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read()
self.stopped = False
self.counter=0
def start(self):
Thread(target=self.get, args=()).start()
return self
def get(self):
while not self.stopped:
if not self.grabbed:
self.stop()
else:
self.counter += 1
print("VideoGetter", (self.counter))
(self.grabbed, self.frame) = self.stream.read()
def stop(self):
self.stopped = True
# def threadVideoGet(source=0):
# """
# Dedicated thread for grabbing video frames with VideoGet object.
# Main thread shows video frames.
# """
#
# video_getter = VideoGet(source).start()
#
# while True:
# if (cv2.waitKey(1) == ord("q")) or video_getter.stopped:
# video_getter.stop()
# break
#
# frame = video_getter.frame
# cv2.imshow("Thread GUI", frame)
| true |
e49d18911c3e3fd735347a28182439272fe7e24d | Python | AdamZhouSE/pythonHomework | /Code/CodeRecords/2442/60796/259035.py | UTF-8 | 298 | 2.984375 | 3 | [] | no_license | s=input()
ls=s[1:len(s)-1].split(",")
ls=[int(x) for x in ls]
max=0
n=len(ls)
while n>0:
i=0
j=n-2
while i<=j:
if ls[i]>ls[i+1]:
ls[i],ls[i+1]=ls[i+1],ls[i]
i=i+1
n=n-1
for i in range(len(ls)-1):
n=ls[i+1]-ls[i]
if n>max:
max=n
print(max) | true |
e066953881f50b577048360aaf14c2ba200d17b4 | Python | Positronic-IO/air-hockey-training-environment | /tests/test_environment.py | UTF-8 | 447 | 2.71875 | 3 | [] | no_license | """ Test Environment """
from environment import AirHockey
class TestEnvironment:
def setup(self):
pass
def test_puck_update_location(self):
env = AirHockey()
# Some actions
env.update_state((80, 100), "robot")
env.update_state((350, 233), "opponent")
env.update_state((200, 234), "robot")
env.update_state((380, 234), "opponent")
assert env.puck.location() == (438, 240)
| true |
f6fac592bdb9e5abc876214148e90b9ec04c0b48 | Python | ycchhueannu/LeetCode | /python/0788_Rotated_Digits.py | UTF-8 | 1,256 | 3.390625 | 3 | [] | no_license | class Solution(object):
def rotatedDigits(self, N):
"""
:type N: int
:rtype: int
"""
cnt = 0
for num in range(1, N+1):
sn = str(num)
if '3' in sn or '7' in sn or '4' in sn:
continue
if '2' in sn or '5' in sn or '6' in sn or '9' in sn:
cnt += 1
return cnt
"""
# base case:
bc_ans = {1: 0, 2: 1, 3: 1, 4: 1, 5: 2, 6: 3, 7: 3, 8: 3, 9: 4}
if N < 10:
return bc_ans[N]
# we only care about numbers only contain 0, 1, 2, 5, 6, 8, 9
# if a number has 3, 4, 7, then it's not valid. Note that
# a number is valid iff it has 2, 5, 6, or 9 (at least one)
cnt = 0
valid = set({2, 5, 6, 9})
for i in range(10, N+1):
si = str(i)
if set(si).isdisjoint({'3', '4', '7'}):
# remove most significant (digit) number ("169" -> "69")
# if use 'si[i:] in valid', numbers like 806 will fail
if int(si[0]) in valid or int(si[1:]) in valid:
cnt += 1
valid.add(int(si))
return len(valid)
""" | true |
124d75e8595f37643aa235e7fa447dc3bf7e909f | Python | mzyl/PushCal | /textfromphoto.py | UTF-8 | 346 | 2.75 | 3 | [] | no_license | #I think this will be the most challenging part of the project.
from PIL import Image
import pytesseract
#Need to accept photo as input.
class Photo():
def __init__(self, image):
self.data = Image.open(image)
def resize(self, image):
self.image = cv2.resize(warped, (1350, 1150))
#Need to find framework to read text from photo.
| true |
2ad3da8ed9445f0b5a177958893ab385f46b7724 | Python | tuandhtl/Python | /a.py | UTF-8 | 110 | 3.203125 | 3 | [] | no_license | import time
for seconds in range(10,0,-1):
print(seconds)
time.sleep(1)
print("Happy new year!!!!")
| true |
3f8fd8c62fd2b57a6f340d7484dd8eb35ebb7cea | Python | Jody-Lu/Array | /59_spiral_matrix_II/spiral_matrix_II.py | UTF-8 | 1,399 | 3.390625 | 3 | [] | no_license | class Solution(object):
def generateMatrix(self, n):
"""
: type n: int
: rtype: List[List[int]]
"""
array = [i + 1 for i in range(0, n**2)]
result = [[0 for i in range(0, n)] for j in range(0, n)]
beginX = 0
endX = n - 1
beginY = 0
endY = n - 1
count = 0
while True:
# From left to right
for i in range(beginX, endX + 1):
result[beginY][i] = array[count]
count += 1
beginY += 1
if beginY > endY:
break
# From top to bottom
for j in range(beginY, endY + 1):
result[j][endX] = array[count]
count += 1
endX -= 1
if endX < beginX:
break
# From right to left
for i in reversed(range(beginX, endX + 1)):
result[endY][i] = array[count]
count += 1
endY -= 1
if endY < beginY:
break
# From bottom to top
for j in reversed(range(beginY, endY + 1)):
result[j][beginX] = array[count]
count += 1
beginX += 1
if beginX > endX:
break
return result
if __name__ == '__main__':
sol = Solution()
n = 6;
result = sol.generateMatrix(n)
print result
| true |
ad3691a8d7201510e1b7d743c5b2830a95b2ac98 | Python | kimnanhee/learning | /chapter 3/sqlite3_test2.py | UTF-8 | 955 | 4.03125 | 4 | [] | no_license | import sqlite3
# DB 연결하기
dbpath = "test2.sqlite"
conn = sqlite3.connect(dbpath)
# 테이블 생성하기
cur = conn.cursor()
cur.execute("DROP TABLE IF EXISTS items")
cur.execute("""CREATE TABLE items (
item_id INTEGER PRIMARY KEY,
name TEXT,
price INTEGER)""")
conn.commit()
# 데이터 넣기
cur = conn.cursor()
cur.execute(
"INSERT INTO items (name, price) VALUES (?, ?)",
("Orange", 5200))
conn.commit()
# 데이터 연속으로 넣기
cur = conn.cursor()
data = [("Mango", 7700), ("Kiwi", 4000), ("Grape", 8000),
("Peach", 9400), ("Persimon", 7000), ("Strawberry", 4000)]
cur.executemany(
"INSERT INTO items (name, price) VALUES (?, ?)",
data
)
conn.commit()
# 4000~7000 사이의 데이터만 추출하기
cur = conn.cursor()
price_range = (4000, 7000)
cur.execute(
"SELECT * FROM items WHERE price>=? AND price<=?",
price_range)
item_list = cur.fetchall()
for item in item_list:
print(item) | true |
e47f670e72c5918207a0685af3d5bde193a52251 | Python | aenal-abie/Algoritma-dan-Pemrograman | /Praktikum 2/nilai_max.py | UTF-8 | 225 | 3.890625 | 4 | [] | no_license | a = int(input("Masukkan nilai A: "))
b = int(input("Masukkan nilai B: "))
c = int(input("Masukkan nilai C: "))
if(a>b):
if(a>c):
terbesar = a
elif(b>c):
terbesar = b
else:
terbesar = c
print(terbesar) | true |
2a78877e127b91de042bc40aae8fee1db52c15a6 | Python | usnistgov/core_oaipmh_harvester_app | /core_oaipmh_harvester_app/components/oai_harvester_metadata_format_set/api.py | UTF-8 | 2,120 | 2.75 | 3 | [
"NIST-Software",
"Apache-2.0",
"BSD-3-Clause"
] | permissive | """
OaiHarvesterMetadataFormatSet API
"""
from core_oaipmh_common_app.utils import UTCdatetime
from core_oaipmh_harvester_app.components.oai_harvester_metadata_format_set.models import (
OaiHarvesterMetadataFormatSet,
)
def upsert(oai_harvester_metadata_format_set):
"""Create or update an OaiHarvesterMetadataFormatSet.
Args:
oai_harvester_metadata_format_set: OaiHarvesterMetadataFormatSet to create or update.
Returns:
OaiHarvesterMetadataFormatSet instance.
"""
return oai_harvester_metadata_format_set.save()
def upsert_last_update_by_metadata_format_and_set(
harvester_metadata_format, harvester_set, last_update
):
"""Update the last_update date for a given metadata_format and set. Create an
OaiHarvesterMetadataFormatSet if doesn't exist.
Args:
harvester_metadata_format: Metadata format.
harvester_set: Set.
last_update: Last update date.
"""
OaiHarvesterMetadataFormatSet.upsert_last_update_by_metadata_format_and_set(
harvester_metadata_format, harvester_set, last_update
)
def get_by_metadata_format_and_set(
oai_harvester_metadata_format, oai_harvester_set
):
"""Get an OaiHarvesterMetadataFormatSet by its OaiHarvesterMetadataFormat and OaiHarvesterSet.
Args:
oai_harvester_metadata_format:
oai_harvester_set:
Returns:
OaiHarvesterMetadataFormatSet instance.
"""
return OaiHarvesterMetadataFormatSet.get_by_metadata_format_and_set(
oai_harvester_metadata_format, oai_harvester_set
)
def get_last_update_by_metadata_format_and_set(
oai_harvester_metadata_format, oai_harvester_set
):
"""Get the last update by OaiHarvesterMetadataFormat and OaiHarvesterSet.
Args:
oai_harvester_metadata_format:
oai_harvester_set:
Returns:
OaiHarvesterMetadataFormatSet last update (string).
"""
return UTCdatetime.datetime_to_utc_datetime_iso8601(
get_by_metadata_format_and_set(
oai_harvester_metadata_format, oai_harvester_set
).last_update
)
| true |
70ff9c3abcb5d1d0790a4baf583c8a3bdd959b6c | Python | lsvih/tf-zh-docs-web | /check-docs.py | UTF-8 | 1,143 | 2.546875 | 3 | [] | no_license | from filecmp import dircmp
from os import path
from mistune import BlockLexer
from config import *
parser = BlockLexer()
def head_count(token, level):
return len(list(filter(lambda x: x["type"] == "heading" and x["level"] == level, token)))
def cmp_md_struct(file1: str, file2: str) -> None:
token1, token2 = parser.parse(open(file1, encoding="utf-8").read()), parser.parse(
open(file2, encoding="utf-8").read())
for level in range(4):
if head_count(token1, level) != head_count(token2, level):
print("diff struct found, level %d in %s and %s" % (level, file1, file2))
def cmp_files(dcmp: dircmp) -> None:
for file_name in (dcmp.left_only + dcmp.right_only):
print("diff file %s found in %s and %s" % (file_name, dcmp.left, dcmp.right))
for file_name in dcmp.common_files:
cmp_md_struct(path.join(dcmp.left, file_name), path.join(dcmp.right, file_name))
for sub_dcmp in dcmp.subdirs.values():
cmp_files(sub_dcmp)
if __name__ == "__main__":
cmp_files(dircmp(ZH_DOC_PATH, EN_DOC_PATH, ignore=["__init__.py", "images", ".git", "README.md", ".DS_Store"]))
| true |
ae1da66898883bb2404bd610e181543e70809ffe | Python | nilax97/leetcode-solutions | /solutions/Leaf-Similar Trees/solution.py | UTF-8 | 893 | 3.40625 | 3 | [
"MIT"
] | permissive | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def leafSimilar(self, root1: TreeNode, root2: TreeNode) -> bool:
def get_leaf(root):
if root == None:
return []
ans = []
if root.left != None:
ans += (get_leaf(root.left))
if root.right != None:
ans += (get_leaf(root.right))
if root.left == None and root.right == None:
ans = [root.val]
return ans
x = get_leaf(root1)
y = get_leaf(root2)
print(x,y)
if len(x) != len(y):
return False
for i in range(len(x)):
if x[i] != y[i]:
return False
return True
| true |
4822ef99b5e20206d9eccda1f068cdc96a03bcca | Python | DebRC/My-Competitve-Programming-Solutions | /Kattis/lineup.py | UTF-8 | 255 | 3.40625 | 3 | [] | no_license | n=int(input())
a=[0]*n
for i in range(n):
a[i]=input()
#print(a)
temp=sorted(a)
temp2=sorted(a,reverse=True)
#print(temp)
#print(temp2)
if a==temp:
print("INCREASING")
elif a==temp2:
print("DECREASING")
else:
print("NEITHER") | true |
56b2788f8f090df6b7c0b3b41acee9534df97648 | Python | sohyuv/IoT_Car-Final_Project | /Handle&Winker/raspberryPi/serverRequester.py | UTF-8 | 1,008 | 2.671875 | 3 | [] | no_license | import serial
import time
import requests
def onInputTime():
inputTime = time.strftime('%Y-%m-%d %H:%M:%S')
print(type(inputTime))
return inputTime
output = 0
while(1):
try:
ser = serial.Serial("/dev/ttyACM0",baudrate=9600)
readValue =ser.readline()
print("----------------------------")
print(readValue)
print("----------------------------")
value = readValue[0]
handle = readValue[1]
try:
ser.close()
if(value!=output):
output=value
data = {'Value': output,'Time':onInputTime(),'Handle':handle}
print("arduino Data is ->",data)
URL = "http://192.168.0.14:3001/blinkers"
response = requests.post(URL,data)
print(response)
response.close()
except IndexError:
print("Index Error")
except serial.serialutil.SerialException:
print("Seiral ERror") | true |
acbcecc0972d53b946f2e2cd58f8c59e5b73d46e | Python | basekim14/BJOJ_py | /bjoj_10951.py | UTF-8 | 220 | 2.65625 | 3 | [] | no_license | """
ㄱㄱㅊ <basekim14@gmail.com>, 20-06-16
Baekjoon Online Judge Study - 10951(A+B - 4)
"""
from sys import stdin
try:
while True:
A, B = map(int, stdin.readline().split())
print(A + B)
except:
exit()
| true |
dd81d38b382fb9e66967b1529cc6bd1c75cc2f97 | Python | GrahamHutchinso6275/Data-Vis-Module | /choropleth.py | UTF-8 | 694 | 2.6875 | 3 | [] | no_license | import numpy as np
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
#loads a shape file and plots it.
#shapefile = 'Brazil_Admin_1.shp'
#gdf = gpd.read_file(shapefile)
#print(gdf.head)
#gdf.plot()
#plt.show()
#datafile = 'amazon.csv'
#df = pd.read_csv(datafile, encoding='ISO-8859-1')
#print(df.head())
#shapefile = 'data/countries_110m/ne_110m_admin_0_countries.shp'#Read shapefile using Geopandas
#gdf = gpd.read_file(shapefile)[['ADMIN', 'ADM0_A3', 'geometry']]#Rename columns.
#gdf.columns = ['country', 'country_code', 'geometry']
#gdf.head()
world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres'))
print(world.head())
world.plot()
plt.show()
| true |
c97734d9347be205c1e183d94b321aa825c5f1e2 | Python | qutip/qutip | /qutip/continuous_variables.py | UTF-8 | 8,365 | 3.453125 | 3 | [
"BSD-3-Clause"
] | permissive | """
This module contains a collection functions for calculating continuous variable
quantities from fock-basis representation of the state of multi-mode fields.
"""
__all__ = ['correlation_matrix', 'covariance_matrix',
'correlation_matrix_field', 'correlation_matrix_quadrature',
'wigner_covariance_matrix', 'logarithmic_negativity']
from . import expect
import numpy as np
def correlation_matrix(basis, rho=None):
r"""
Given a basis set of operators :math:`\{a\}_n`, calculate the correlation
matrix:
.. math::
C_{mn} = \langle a_m a_n \rangle
Parameters
----------
basis : list
List of operators that defines the basis for the correlation matrix.
rho : Qobj
Density matrix for which to calculate the correlation matrix. If
`rho` is `None`, then a matrix of correlation matrix operators is
returned instead of expectation values of those operators.
Returns
-------
corr_mat : ndarray
A 2-dimensional *array* of correlation values or operators.
"""
if rho is None:
# return array of operators
out = np.empty((len(basis), len(basis)), dtype=object)
for i, op2 in enumerate(basis):
out[i, :] = [op1 * op2 for op1 in basis]
return out
else:
# return array of expectation values
return np.array([[expect(op1 * op2, rho)
for op1 in basis] for op2 in basis])
def covariance_matrix(basis, rho, symmetrized=True):
r"""
Given a basis set of operators :math:`\{a\}_n`, calculate the covariance
matrix:
.. math::
V_{mn} = \frac{1}{2}\langle a_m a_n + a_n a_m \rangle -
\langle a_m \rangle \langle a_n\rangle
or, if of the optional argument `symmetrized=False`,
.. math::
V_{mn} = \langle a_m a_n\rangle -
\langle a_m \rangle \langle a_n\rangle
Parameters
----------
basis : list
List of operators that defines the basis for the covariance matrix.
rho : Qobj
Density matrix for which to calculate the covariance matrix.
symmetrized : bool {True, False}
Flag indicating whether the symmetrized (default) or non-symmetrized
correlation matrix is to be calculated.
Returns
-------
corr_mat : ndarray
A 2-dimensional array of covariance values.
"""
if symmetrized:
return np.array([[0.5 * expect(op1 * op2 + op2 * op1, rho) -
expect(op1, rho) * expect(op2, rho)
for op1 in basis] for op2 in basis])
else:
return np.array([[expect(op1 * op2, rho) -
expect(op1, rho) * expect(op2, rho)
for op1 in basis] for op2 in basis])
def correlation_matrix_field(a1, a2, rho=None):
"""
Calculates the correlation matrix for given field operators :math:`a_1` and
:math:`a_2`. If a density matrix is given the expectation values are
calculated, otherwise a matrix with operators is returned.
Parameters
----------
a1 : Qobj
Field operator for mode 1.
a2 : Qobj
Field operator for mode 2.
rho : Qobj
Density matrix for which to calculate the covariance matrix.
Returns
-------
cov_mat : ndarray
Array of complex numbers or Qobj's
A 2-dimensional *array* of covariance values, or, if rho=0, a matrix
of operators.
"""
basis = [a1, a1.dag(), a2, a2.dag()]
return correlation_matrix(basis, rho)
def correlation_matrix_quadrature(a1, a2, rho=None, g=np.sqrt(2)):
"""
Calculate the quadrature correlation matrix with given field operators
:math:`a_1` and :math:`a_2`. If a density matrix is given the expectation
values are calculated, otherwise a matrix with operators is returned.
Parameters
----------
a1 : Qobj
Field operator for mode 1.
a2 : Qobj
Field operator for mode 2.
rho : Qobj
Density matrix for which to calculate the covariance matrix.
g : float
Scaling factor for `a = 0.5 * g * (x + iy)`, default `g = sqrt(2)`.
The value of `g` is related to the value of `hbar` in the commutation
relation `[x, y] = i * hbar` via `hbar=2/g ** 2` giving the default
value `hbar=1`.
Returns
-------
corr_mat : ndarray
Array of complex numbers or Qobj's
A 2-dimensional *array* of covariance values for the field quadratures,
or, if rho=0, a matrix of operators.
"""
x1 = (a1 + a1.dag()) / g
p1 = -1j * (a1 - a1.dag()) / g
x2 = (a2 + a2.dag()) / g
p2 = -1j * (a2 - a2.dag()) / g
basis = [x1, p1, x2, p2]
return correlation_matrix(basis, rho)
def wigner_covariance_matrix(a1=None, a2=None, R=None, rho=None, g=np.sqrt(2)):
r"""
Calculates the Wigner covariance matrix
:math:`V_{ij} = \frac{1}{2}(R_{ij} + R_{ji})`, given
the quadrature correlation matrix
:math:`R_{ij} = \langle R_{i} R_{j}\rangle -
\langle R_{i}\rangle \langle R_{j}\rangle`, where
:math:`R = (q_1, p_1, q_2, p_2)^T` is the vector with quadrature operators
for the two modes.
Alternatively, if `R = None`, and if annihilation operators `a1` and `a2`
for the two modes are supplied instead, the quadrature correlation matrix
is constructed from the annihilation operators before then the covariance
matrix is calculated.
Parameters
----------
a1 : Qobj
Field operator for mode 1.
a2 : Qobj
Field operator for mode 2.
R : ndarray
The quadrature correlation matrix.
rho : Qobj
Density matrix for which to calculate the covariance matrix.
g : float
Scaling factor for `a = 0.5 * g * (x + iy)`, default `g = sqrt(2)`.
The value of `g` is related to the value of `hbar` in the commutation
relation `[x, y] = i * hbar` via `hbar=2/g ** 2` giving the default
value `hbar=1`.
Returns
-------
cov_mat : ndarray
A 2-dimensional array of covariance values.
"""
if R is not None:
if rho is None:
return np.array([[0.5 * np.real(R[i, j] + R[j, i])
for i in range(4)]
for j in range(4)], dtype=np.float64)
else:
return np.array([[0.5 * np.real(expect(R[i, j] + R[j, i], rho))
for i in range(4)]
for j in range(4)], dtype=np.float64)
elif a1 is not None and a2 is not None:
if rho is not None:
x1 = (a1 + a1.dag()) / g
p1 = -1j * (a1 - a1.dag()) / g
x2 = (a2 + a2.dag()) / g
p2 = -1j * (a2 - a2.dag()) / g
return covariance_matrix([x1, p1, x2, p2], rho)
else:
raise ValueError("Must give rho if using field operators " +
"(a1 and a2)")
else:
raise ValueError("Must give either field operators (a1 and a2) " +
"or a precomputed correlation matrix (R)")
def logarithmic_negativity(V, g=np.sqrt(2)):
"""
Calculates the logarithmic negativity given a symmetrized covariance
matrix, see :func:`qutip.continuous_variables.covariance_matrix`. Note that
the two-mode field state that is described by `V` must be Gaussian for this
function to applicable.
Parameters
----------
V : *2d array*
The covariance matrix.
g : float
Scaling factor for `a = 0.5 * g * (x + iy)`, default `g = sqrt(2)`.
The value of `g` is related to the value of `hbar` in the commutation
relation `[x, y] = i * hbar` via `hbar=2/g ** 2` giving the default
value `hbar=1`.
Returns
-------
N : float
The logarithmic negativity for the two-mode Gaussian state
that is described by the the Wigner covariance matrix V.
"""
A = 0.5 * V[0:2, 0:2] * g ** 2
B = 0.5 * V[2:4, 2:4] * g ** 2
C = 0.5 * V[0:2, 2:4] * g ** 2
sigma = np.linalg.det(A) + np.linalg.det(B) - 2 * np.linalg.det(C)
nu_ = sigma / 2 - np.sqrt(sigma ** 2 - 4 * np.linalg.det(V)) / 2
if nu_ < 0.0:
return 0.0
nu = np.sqrt(nu_)
lognu = -np.log(2 * nu)
logneg = max(0, lognu)
return logneg
| true |
f94a108d22160f48aa4e10b8f581c00a382eb823 | Python | Tensaiyez/NYU-BigData | /task6/map.py | UTF-8 | 203 | 3.21875 | 3 | [] | no_license | #!/usr/bin/env python
import sys
import csv
data=csv.reader(sys.stdin,delimiter=',')
for info in data:
state=info[16]
if state=='99' or state.isalpha():
print(info[14] + ', ' +state + '\t' + '1')
| true |
f17edef192e96b3ded6f96f4598e4fe9dc220a40 | Python | pitchopp/total-lizzy-recommandation | /RecommandationEngine.py | UTF-8 | 5,517 | 2.875 | 3 | [] | no_license | import pandas as pd
from utils import string_to_int
import logging
class CollaborativeFiltering:
TEST_SET_SIZE = 100
def __init__(self, sessions_path, courses_path, logfile="logs.txt"):
self.logger = logging.getLogger("CollaborativeFiltering")
self.logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s::%(name)s::%(levelname)s:: %(message)s')
sh = logging.StreamHandler()
sh.setLevel(logging.DEBUG)
sh.setFormatter(formatter)
self.logger.addHandler(sh)
fh = logging.FileHandler(logfile)
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
self.logger.addHandler(fh)
self.sessions_path = sessions_path
self.courses_path = courses_path
self.participation_matrix = None
self.dataset = None
self.train_dataset = None
self.test_dataset = None
self.courses_size = None
self.logger.info("sessions file : " + sessions_path)
self.logger.info("courses file : " + courses_path)
def _load_files(self, delimiter=';') -> (pd.DataFrame, pd.DataFrame):
self.logger.info("reading sessions file")
sessions_df = pd.read_csv(self.sessions_path, delimiter=delimiter, encoding='utf-8')[["traineeusername", "clientcode", "date", "type"]]
self.logger.info("sessions loaded")
self.logger.info("reading courses file")
courses_df = pd.read_csv(self.courses_path, delimiter=delimiter, encoding='utf-8')[["clientcode", "themecode"]]
self.courses_size = len(courses_df)
self.logger.info("courses loaded")
return sessions_df, courses_df
def prepare_dataset(self):
self.logger.info("preparing dataset")
sessions_df, courses_df = self._load_files()
# extract course code from "clientcode" column
def clientcode_to_actioncode(value):
return string_to_int(value.split("-")[0])
self.logger.info("cleaning and preparing data")
sessions_df['clientcode'] = sessions_df.clientcode.apply(clientcode_to_actioncode)
# convert date
sessions_df["date"] = pd.to_datetime(sessions_df["date"])
# clean session date > today
sessions_df = sessions_df[sessions_df.date <= pd.datetime.today()]
sessions_df = sessions_df[sessions_df.groupby("traineeusername").clientcode.transform("count") > 2]
# convert clientcode to integer
courses_df["clientcode"] = courses_df.clientcode.apply(string_to_int)
# join dataframes to get themecode for each session
self.logger.info("join session and courses dataframes")
themecode_df = sessions_df.merge(courses_df, on="clientcode", how="inner")
themecode_df["rating"] = 1
self.dataset = themecode_df
self.logger.info("splitting dataset into train and test set")
self.dataset.sort_values("date", ascending=False, inplace=True)
self.test_dataset = self.dataset.head(self.TEST_SET_SIZE)
self.train_dataset = self.dataset.tail(len(self.dataset) - self.TEST_SET_SIZE)
self.logger.info("dataset ready")
def fit(self):
self.prepare_dataset()
self.logger.info("creating courses participation matrix")
pivot = self.train_dataset.pivot_table(columns=["traineeusername"], index=["themecode"], values="rating")
pivot = pd.DataFrame(pivot.to_records()).set_index("themecode")
pivot = (pivot.notnull()).astype('int')
self.participation_matrix = pivot
self.logger.info("fitting completed")
def predict(self, user):
self.logger.info("calculating recommandations for " + user)
try:
similarity = self.participation_matrix.corrwith(self.participation_matrix[user])
except KeyError as e:
self.logger.error("new user found : " + user)
return None
rating_toby = self.participation_matrix[user].copy()
rating_c = self.train_dataset.loc[(rating_toby[self.train_dataset.themecode] == 0).values & (self.train_dataset.traineeusername != user)]
rating_c_similarity = rating_c['traineeusername'].map(similarity)
rating_c = rating_c.assign(similarity=rating_c_similarity)
rating_c.sort_values("similarity", ascending=False, inplace=True)
result = rating_c.groupby("themecode").mean().sort_values("similarity", ascending=False).drop(columns=["rating", "clientcode"])
return result
def evaluate_prediction(self, user, course):
scores = self.predict(user)
if scores is None:
return -1
try:
position = scores.index.get_loc(course) + 1
except KeyError:
position = -1
return position
def evaluate_accuracy(self, test_size=None):
df = self.test_dataset.copy()
if test_size is not None:
df = df.head(test_size)
evaluation = pd.np.vectorize(self.evaluate_prediction)(df.traineeusername, df.themecode)
df["evaluation"] = evaluation
evaluation = evaluation[evaluation > 0]
# clean new users and new courses
return evaluation.mean()
if __name__ == "__main__":
# logging.basicConfig(filename="logs.txt", filemode="w")
model = CollaborativeFiltering(
sessions_path="data/InscriptionsAuxSessionsDeFormation20210726_1156.csv",
courses_path="data/ActionsDeFormationV220210630_0904.csv"
)
model.fit()
# print(model.evaluate_accuracy())
| true |
f4929bfe18b822e45cb7c57a5fc96e3240bde5dd | Python | gickowic/RaananaTiraProject | /client/main.py | UTF-8 | 1,698 | 2.765625 | 3 | [] | no_license | import sys
from twisted.internet import reactor, protocol
from twisted.protocols import basic
from components import message
import game
import config
class Client(basic.LineReceiver):
def connectionMade(self):
self.factory.connected(self)
def connectionLost(self, reason):
self.factory.disconnected(reason)
def connectionFailed(self, reason):
self.factory.failed(reason)
def lineReceived(self, line):
msg = message.parse(line)
if msg.action == 'update':
self.factory.update(msg.payload)
elif msg.action == 'error':
self.factory.error(msg.payload)
else:
print 'Unexpected message:', msg.action, msg.payload
class ClientFactory(protocol.ReconnectingClientFactory):
protocol = Client
initialDelay = 2
maxDelay = 2
def __init__(self, name):
self.game = game.Game(self, name)
self.client = None
# Client events
def connected(self, client):
self.resetDelay()
self.client = client
self.game.client_connected()
def disconnected(self, reason):
self.client = None
self.game.client_disconnected(reason)
def failed(self, reason):
self.client = None
self.game.client_failed(reason)
def error(self, error):
self.game.client_error(error)
def update(self, info):
self.game.client_update(info)
# Client interface
def send_message(self, msg):
self.client.sendLine(str(msg))
if len(sys.argv) < 2:
print 'usage: start_client player-name'
sys.exit(2)
reactor.connectTCP(config.host, config.port, ClientFactory(sys.argv[1]))
reactor.run()
| true |
cd7fac3626e749bb53e467f5a58fb7ac023080c7 | Python | chadccollins/algo | /leetcode/src/merge-k-sorted-lists.py | UTF-8 | 2,430 | 3.890625 | 4 | [
"MIT"
] | permissive | # Merge k sorted linked lists and return it as one sorted list. Analyze and describe its complexity.
#
# Example:
#
# Input:
# [
# 1->4->5,
# 1->3->4,
# 2->6
# ]
# Output: 1->1->2->3->4->4->5->6
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def mergeKLists(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
# simple two-list merge function, in place. O(n)
def merge(a, b):
"""
:type: a, b: ListNode
:rtype: ListNode
"""
# if a is null, return b as the merge result
if not a:
return b
# same for b
if not b:
return a
# if a < b recurse set a.next to the merge result continuing from a.next and b
if (a.val < b.val):
a.next = merge(a.next, b)
return a
else:
b.next = merge(a, b.next)
return b
# as always, first some validation
if len(lists) == 0:
return None
if len(lists) == 1:
return lists[0]
# ok, here we go, on to the harder problem
end = len(lists) - 1
# we will know we're done when we have marched 'end' all the way back up the list
while end != 0:
# set up two pointers at opposite ends of the list of lists
i, j = 0, end
# move them towards each other merging while we go
while i < j:
# merge the first list, with the last list, and store the result
lists[i] = merge(lists[i], lists[j])
# move on to the next pair of lists
i += 1
j -= 1
# when i and j collide, reset end to j, and let the outer loop keep going
if i >= j:
end = j
return lists[0]
a, b, c = ListNode(1), ListNode(4), ListNode(5)
a.next = b
b.next = c
d, e, f = ListNode(1), ListNode(3), ListNode(4)
d.next = e
e.next = f
g, h = ListNode(2), ListNode(6)
g.next = h
result = Solution().mergeKLists([a, d, g])
while (True):
print (result.val)
if not result.next:
break
else:
result = result.next
| true |
cca07107ed30ccc2b710ea17f4b6a4db151d2213 | Python | crockeo/pyed-piper | /server/lib/synth/input/keyboard.py | UTF-8 | 915 | 2.6875 | 3 | [
"MIT"
] | permissive | import keyboard
from lib.common import config
from lib.synth.input import BaseInput
from lib.synth.input import InputAction
class KeyboardInput:
def __init__(self):
self.button_states = {}
def get_button_count(self) -> int:
return 16
def is_pressed(self, button: int) -> bool:
key = config.KEYBOARD_BUTTONS[button]
pressed = keyboard.is_pressed(key)
self.button_states[key] = pressed
return pressed
def just_actioned(self, button: int) -> InputAction:
key = config.KEYBOARD_BUTTONS[button]
previously_pressed = self.button_states.get(key, False)
currently_pressed = self.is_pressed(button)
if currently_pressed and not previously_pressed:
return InputAction.Pressed
if not currently_pressed and previously_pressed:
return InputAction.Released
return InputAction.NoAction
| true |
e6c9ecaea0785cbef6f50ee1e87759b9ec7305db | Python | FrancescaR11/ESERCIZIO_TAXI | /Eliminazione_Nan.py | UTF-8 | 920 | 3.484375 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 17 17:35:23 2020
@author: francescaronci, gaiad
"""
from tqdm import tqdm
import time
'''
Creo la funzione che controlla se le colonne del DataFrame di Input contengono valori Nan.
In tal caso sostituisce i NaN con degli zeri.
Importo il modulo 'tqdm' per aggiornare l'utente sullo stato di esecuzione tramite barra di avanzamento
'''
def replace_Nan_with_zeros(DataFrame):
nomi_colonne=list(DataFrame.columns) # Creo lista contenente i nomi delle colonne del DataFrame
for i in tqdm(range(len(nomi_colonne)),desc='Esecuzione eliminazione NaN'): # Scandisco i nomi delle colonne del DataFrame e aggiorno la barra di avanzamento
if DataFrame[nomi_colonne[i]].isnull().sum().sum() > 0: # Se la colonna considerata ha dei valori nulli...
DataFrame[nomi_colonne[i]].fillna(0, inplace=True) #...li scostituisco con zero
| true |
0b8c8aba87cd044f918c8fa470f8c428f780fe95 | Python | aidanjames/space-invaders | /enemy.py | UTF-8 | 718 | 3.875 | 4 | [] | no_license | from turtle import Turtle
class Enemy(Turtle):
def __init__(self, x_pos, y_pos):
super().__init__()
self.color("white")
self.shape("turtle")
self.shapesize(stretch_wid=1.5, stretch_len=1.5)
self.right(90)
self.penup()
self.goto(x_pos, y_pos)
def move_left(self):
if self.xcor() >= -270:
new_x = self.xcor() - 0.5
self.goto(new_x, self.ycor())
def move_right(self):
if self.xcor() <= 270:
new_x = self.xcor() + 0.5
self.goto(new_x, self.ycor())
def move_down(self):
if self.ycor() >= -270:
new_y = self.ycor() - 1
self.goto(self.xcor(), new_y)
| true |
b6fb2d09a0fefe5e81bc34eae804d1f0c20a2617 | Python | edt-yxz-zzd/python3_src | /nn_ns/data_structure/TreeNodeOps/RedBlackTreeNodeOps/copy_from_another_ops_subtree_as_tree__def.py | UTF-8 | 2,709 | 2.953125 | 3 | [] | no_license |
__all__ = '''
copy_from_another_ops_subtree_as_tree__def
'''.split()
def copy_from_another_ops_subtree_as_tree__def(ops, another_ops, another_node):
'''
may return a tree with red root
which is not a red_black_tree
requires:
another_ops / from_ops
.is_leaf
.iter_children
.get_the_color
.get_the_entity
.is_RED
ops / to_ops
.make_root_leaf
.make_root_nonleaf
.get_RED
.get_BLACK
'''
#is_root = another_ops.is_root
is_leaf = another_ops.is_leaf
iter_children = another_ops.iter_children
get_the_color = another_ops.get_the_color
get_the_entity = another_ops.get_the_entity
is_RED = another_ops.is_RED
#assert is_root(another_node)
CASE_recur = 0
CASE_merge = 1
another_stack = [(CASE_recur, another_node)]
# another_stack = [(CASE_recur, another_node)|(CASE_merge, another_node)]
subtree_stack = []
make_root_leaf = ops.make_root_leaf
make_root_nonleaf = ops.make_root_nonleaf
RED = ops.get_RED()
BLACK = ops.get_BLACK()
while another_stack:
case, another_node = another_stack.pop()
if case is CASE_recur:
if is_leaf(another_node):
# basic case of recur
node = make_root_leaf()
subtree_stack.append(node)
else:
another_stack.append((CASE_merge, another_node))
another_left, another_right = iter_children(another_node)
another_stack.append((CASE_recur, another_left))
another_stack.append((CASE_recur, another_right))
# another_stack append: another_left, then another_right
# ==>> another_stack = [..., another_left, another_right]
# ... flip ...
# ==>> will subtree_stack = [..., right, left]
else:
assert case is CASE_merge
assert not is_leaf(another_node)
assert len(subtree_stack) >= 2
# subtree_stack = [..., right, left]
left_child = subtree_stack.pop()
right_child = subtree_stack.pop()
entity = get_the_entity(another_node)
another_color = get_the_color(another_node)
color = RED if is_RED(another_color) else BLACK
# bug: color = get_the_color(another_node)
node = make_root_nonleaf(
color=color, entity=entity
, left_child=left_child, right_child=right_child
)
subtree_stack.append(node)
assert len(subtree_stack) == 1
root, = subtree_stack
may_red_root = root
return may_red_root
| true |
00b4294d340826ffcfb2ad998f3a3a5a0668bd6e | Python | ThotAlion/FIRE | /NAO/control_joystick/2/nao.py | UTF-8 | 5,299 | 2.515625 | 3 | [
"MIT"
] | permissive | from naoqi import ALProxy
import time
class Nao:
def __init__(self,robotIP,robotName, PORT=9559):
self.name = robotName
try:
self.motion = ALProxy("ALMotion", robotIP, PORT)
except Exception, e:
print self.name+" Could not create proxy to ALMotion"
print "Error was: ",e
try:
self.posture = ALProxy("ALRobotPosture",robotIP, PORT)
except Exception, e:
print self.name+" Could not create proxy to ALRobotPosture"
print "Error was: ",e
try:
self.speech = ALProxy("ALTextToSpeech", robotIP, PORT)
except Exception, e:
print self.name+" Could not create proxy to ALTextToSpeech"
print "Error was: ",e
try:
self.memory = ALProxy("ALMemory", robotIP, PORT)
except Exception, e:
print self.name+" Could not create proxy to ALMemory"
print "Error was: ",e
try:
self.leds = ALProxy("ALLeds", robotIP, PORT)
except Exception, e:
print self.name+" Could not create proxy to ALLeds"
print "Error was: ",e
try:
self.behavior = ALProxy("ALBehaviorManager", robotIP, PORT)
except Exception, e:
print self.name+" Could not create proxy to ALBehavior"
print "Error was: ",e
##### Init of nao, position and move
self.is_walking = False
self.is_headmoving = False
self.is_turning = False
self.name = robotName
print "creation du nao: "+self.name
def init_pos(self):
if self.motion:
self.motion.stopMove()
self.motion.setStiffnesses("Body", 1.0)
self.go_posture("Crouch")
## Enable arms control by Motion algorithm
if self.motion:
#self.motion.setMoveArmsEnabled(True, True)
## Enable head to move
self.motion.wbEnableEffectorControl("Head", True)
if self.behavior.isBehaviorInstalled("main_joystick-d361da/behavior_1"):
self.behavior.stopAllBehaviors()
self.behavior.startBehavior("main_joystick-d361da/behavior_1")
### NOT use . Use of memoryEvent("PostureAsked", name ) instead
def go_posture(self, posture_name):
if posture_name != "Rest":
self.motion.stopMove()
self.posture.goToPosture(posture_name, 0.65)
else:
self.motion.rest()
print "rest !"
#####
# If you just want a shortcut to reach the posture quickly when manipulating the robot
# you can use ALRobotPostureProxy::applyPosture() (you will have to help the robot)
#
#Crouch,LyingBack,LyingBelly,Sit,SitRelax,Stand,StandInit,StandZero
##############
def update_walk(self, X, Y, Theta, Speed):
if Speed > 0.01 :
Frequency = abs(Speed)
if X>0 :
self.is_walking = True
else :
self.is_turning = True
try:
#motion.moveToward( X, Y, Theta, [["Frequency", Frequency]])
self.motion.setWalkTargetVelocity( X, Y, Theta, Frequency)
except Exception, errorMsg:
print str(errorMsg)
print " not allowed to walk "
else:
if self.is_turning:
self.motion.moveToward(0,0,0)
self.is_turning = False
if self.is_walking:
self.motion.moveToward(0,0,0)
self.is_walking = False
#motion.stopMove()
#nao_go_posture("StandInit")
def move_head(self, yaw,pitch):
if(not(self.is_headmoving) and abs(yaw * pitch)>0):
self.motion.stiffnessInterpolation("Head", 1.0, 0.1)
self.is_headmoving = True
fractionMaxSpeed = 0.2
self.motion.setAngles("HeadYaw",yaw*3.14/180.0, fractionMaxSpeed);
self.motion.setAngles("HeadPitch",pitch*3.14/180.0, fractionMaxSpeed);
if(not(self.is_headmoving) and (yaw*pitch==0.0)):
self.motion.stiffnessInterpolation("Head", 0.0, 0.4)
self.is_headmoving = False
#timeLists = [[0.2], [0.2]]
#motion.angleInterpolationBezier(names, timeLists, angleLists)
def memoryEvent(self, name, num):
self.memory.raiseEvent(name, num)
def use_leds(self, name, value):
if name == "ear" :
if value > 0 :
self.leds.on("EarLeds")
self.leds.on("BrainLeds")
else:
self.leds.off("EarLeds")
self.leds.off("BrainLeds")
if name == "rotate" :
self.leds.rotateEyes(0x00FF0000, 0.5, value)
if name == "rasta":
self.leds.rasta(value)
def activate(self, is_activated):
#function in order to recognize the current nao remotely controlled
if is_activated and self.leds:
self.use_leds("ear", 1)
elif self.leds :
self.use_leds("ear", 0)
def say(self, toSay):
self.speech.say(toSay)
| true |
a697dfe5a65b40d8cb53c6e86105f94826f6acbe | Python | adarshkr532/cryptopals | /set1/2-fixedxor.py | UTF-8 | 330 | 2.875 | 3 | [] | no_license | def fixed_xor(s, t):
hs = int(s, 16)
ht = int(t, 16)
hs = hs^ht
out = (hex(hs).lstrip('0x'))
return out
if __name__ == '__main__':
s = "1c0111001f010100061a024b53535009181c"
t = "686974207468652062756c6c277320657965"
res = "746865206b696420646f6e277420706c6179"
print (fixed_xor(s, t) == res)
| true |
bd7ff0f3b0be18840ac4d0ea6a9ef4967ef89c6e | Python | cragwolfe/gamblers-dice | /dice/experiment.py | UTF-8 | 2,569 | 2.84375 | 3 | [] | no_license | #!/usr/bin/env python3.4
from datetime import datetime
import math
import os
import sys
import requests
from gamblersdice import GenericDie
class state():
pass
def header():
"""Header for easy-to-read result."""
return ("%-6s %-15s %9s %s" %
('Bias',
'Number of rolls',
'Normalized freq stddev',
'Frequencies')
)
def format_result(the_die):
"""Easy-to-read result."""
return ("%-6s %15d %6.3f %s" %
(the_die.bias,
the_die.n,
the_die.freq_std * math.sqrt(the_die.n),
str(the_die.freq)
)
)
def format_csv_result(the_die):
"""Comma separated row:
bias, total_rolls, normalized_freq_std_dev,
roll_1_freq, ... , roll_n_freq
"""
result = [the_die.bias,
str(the_die.n),
str(the_die.freq_std * math.sqrt(the_die.n))]
result.extend(['%0.18f' % f for f in the_die.freq])
return ",".join(result)
def post_result(the_die, port):
"""Post one result as csv line to data collector."""
record = format_csv_result(the_die)
hostname = os.environ.get('DATA_COLLECTOR_HOSTNAME',
'localhost')
r = requests.post('http://%s:%s' % (hostname, port),
json={'data':record})
if r.status_code != 200:
raise RuntimeError("Error posting to data collector: "+r.text)
if __name__ == '__main__':
die_bias = os.environ.get('DIE_BIAS', 'random')
seconds_threshold = int(os.environ.get('SOFT_STOP_SECONDS', '62400'))
iters_for_first_result = os.environ.get('FIRST_ITER_TO_RECORD', 10)
num_sides = os.environ.get('SIDES_ON_DIE', 6)
data_collector_port = os.environ.get('DATA_COLLECTOR_PORT', '8777')
the_die = GenericDie(sides=num_sides, bias=die_bias)
state = state()
state.begin_time = datetime.now()
state.next_recording = iters_for_first_result
state.iters = 0
def _maybe_record_result():
if state.iters != state.next_recording:
return
state.next_recording = state.iters * 2
print(format_result(the_die), flush=True)
post_result(the_die, data_collector_port)
now = datetime.now()
if (now - state.begin_time).total_seconds() > seconds_threshold:
sys.exit(0)
print("Beginning experiment for %s die" % the_die.bias)
print(header(), flush=True)
while True:
the_die.roll()
state.iters += 1
_maybe_record_result()
| true |