repo_name stringclasses 400 values | branch_name stringclasses 4 values | file_content stringlengths 16 72.5k | language stringclasses 1 value | num_lines int64 1 1.66k | avg_line_length float64 6 85 | max_line_length int64 9 949 | path stringlengths 5 103 | alphanum_fraction float64 0.29 0.89 | alpha_fraction float64 0.27 0.89 |
|---|---|---|---|---|---|---|---|---|---|
dplusplus/anarchy_golf | refs/heads/master | for i in[501,24,25,77,388,22,0,324,297,376,296]:print format(i,'09b')
| Python | 1 | 69 | 69 | /python/748.Bit_Grid.py | 0.685714 | 0.271429 |
dplusplus/anarchy_golf | refs/heads/master | i=99;s=', %s.\n'
f=lambda i:'%d shinichiro%s of hamaji on the wall'%(i,'es'[:i*2-2])
while i:print f(i)+s%f(i)[:-12]+{1:'Go to the store and buy some more'+s%f(99)}.get(i,'Take one down and pass it around'+s%f(i-1));i-=1
| Python | 3 | 72.666664 | 135 | /python/3.99_shinichiroes_of_hamaji.py | 0.606335 | 0.556561 |
dplusplus/anarchy_golf | refs/heads/master | from itertools import permutations as p
for i in p(raw_input()):print''.join(i)
| Python | 2 | 39 | 39 | /python/7.permutater.py | 0.7375 | 0.7375 |
tanmayuw/ContainerProfiler | refs/heads/main | import argparse
import os
import shutil
import sys
import json
import copy
import configparser
from collections import namedtuple
parser = argparse.ArgumentParser(description='process path and file /or string of metrics.')
parser.add_argument('file_path', action='store', help='stores the filepath to the folder holding all the JSON files')
parser.add_argument('delta_interval_time', action='store', help='stores time interval of when to take delta sample')
args= parser.parse_args()
file_path = args.file_path
if os.path.exists(file_path + '/delta_json'):
shutil.rmtree(file_path + '/delta_json')
if not os.path.exists(file_path + '/delta_json'):
os.makedirs(file_path + '/delta_json')
json_array = []
delta_name_array = []
dirs= sorted([i for i in os.listdir( file_path ) if i.endswith(".json")])
for file_name in dirs:
with open(file_path + '/' + file_name) as json_file:
print ('JSON FILES TANMAY:')
print(json_file)
try:
new_json_object = json.load(json_file)
json_array.append(new_json_object)
new_name= ((file_path+'/delta_json/'+file_name).split('.json')[0] + '_delta.json')
delta_name_array.append(new_name)
except Exception as e:
print ("{} invalid file".format(json_file))
pass
def file_subtraction(the_json_one, the_json_two):
json_three = copy.deepcopy(the_json_two)
if ('cCpuTime' in the_json_one.keys()):
json_three['cCpuTime']=the_json_two['cCpuTime']-the_json_one['cCpuTime']
if ('cCpuTimeKernelMode' in the_json_one.keys()):
json_three['cCpuTimeKernelMode']=the_json_two['cCpuTimeKernelMode']-the_json_one['cCpuTimeKernelMode']
if ('cCpuTimeUserMode' in the_json_one.keys()):
json_three['cCpuTimeUserMode']=the_json_two['cCpuTimeUserMode']-the_json_one['cCpuTimeUserMode']
if ('cDiskReadBytes' in the_json_one.keys()):
json_three['cDiskReadBytes']=the_json_two['cDiskReadBytes']-the_json_one['cDiskReadBytes']
if ('cDiskSectorIO' in the_json_one.keys()):
json_three['cDiskSectorIO']=the_json_two['cDiskSectorIO']-the_json_one['cDiskSectorIO']
if ('cDiskWriteBytes' in the_json_one.keys()):
json_three['cDiskWriteBytes']=the_json_two['cDiskWriteBytes']-the_json_one['cDiskWriteBytes']
if ('cNetworkBytesRecvd' in the_json_one.keys()):
json_three['cNetworkBytesRecvd']=the_json_two['cNetworkBytesRecvd']-the_json_one['cNetworkBytesRecvd']
if ('cNetworkBytesSent' in the_json_one.keys()):
json_three['cNetworkBytesSent']=the_json_two['cNetworkBytesSent']-the_json_one['cNetworkBytesSent']
if ('vCpuContextSwitches' in the_json_one.keys()):
json_three['vCpuContextSwitches']=the_json_two['vCpuContextSwitches']-the_json_one['vCpuContextSwitches']
if ('vCpuIdleTime' in the_json_one.keys()):
json_three['vCpuIdleTime']=the_json_two['vCpuIdleTime']-the_json_one['vCpuIdleTime']
if ('vCpuNice' in the_json_one.keys()):
json_three['vCpuNice']=the_json_two['vCpuNice']-the_json_one['vCpuNice']
if ('vCpuSteal' in the_json_one.keys()):
json_three['vCpuSteal']=the_json_two['vCpuSteal']-the_json_one['vCpuSteal']
if ('vCpuTime' in the_json_one.keys()):
json_three['vCpuTime']=the_json_two['vCpuTime']-the_json_one['vCpuTime']
if ('vCpuTimeIOWait' in the_json_one.keys()):
json_three['vCpuTimeIOWait']=the_json_two['vCpuTimeIOWait']-the_json_one['vCpuTimeIOWait']
if ('vCpuTimeKernelMode' in the_json_one.keys()):
json_three['vCpuTimeKernelMode']=the_json_two['vCpuTimeKernelMode']-the_json_one['vCpuTimeKernelMode']
if ('vCpuTimeSoftIntSrvc' in the_json_one.keys()):
json_three['vCpuTimeSoftIntSrvc']=the_json_two['vCpuTimeSoftIntSrvc']-the_json_one['vCpuTimeSoftIntSrvc']
if ('vCpuTimeUserMode' in the_json_one.keys()):
json_three['vCpuTimeUserMode']=the_json_two['vCpuTimeUserMode']-the_json_one['vCpuTimeUserMode']
if ('vDiskMergedReads' in the_json_one.keys()):
json_three['vDiskMergedReads']=the_json_two['vDiskMergedReads']-the_json_one['vDiskMergedReads']
if ('vDiskMergedWrites' in the_json_one.keys()):
json_three['vDiskMergedWrites']=the_json_two['vDiskMergedWrites']-the_json_one['vDiskMergedWrites']
if ('vDiskReadTime' in the_json_one.keys()):
json_three['vDiskReadTime']=the_json_two['vDiskReadTime']-the_json_one['vDiskReadTime']
if ('vDiskSectorWrites' in the_json_one.keys()):
json_three['vDiskSectorWrites']=the_json_two['vDiskSectorWrites']-the_json_one['vDiskSectorWrites']
if ('vDiskSuccessfulReads' in the_json_one.keys()):
json_three['vDiskSuccessfulReads']=the_json_two['vDiskSuccessfulReads']-the_json_one['vDiskSuccessfulReads']
if ('vDiskSuccessfulWrites' in the_json_one.keys()):
json_three['vDiskSuccessfulWrites']=the_json_two['vDiskSuccessfulWrites']-the_json_one['vDiskSuccessfulWrites']
if ('vDiskWriteTime' in the_json_one.keys()):
json_three['vDiskWriteTime']=the_json_two['vDiskWriteTime']-the_json_one['vDiskWriteTime']
if ('vNetworkBytesRecvd' in the_json_one.keys()):
json_three['vNetworkBytesRecvd']=the_json_two['vNetworkBytesRecvd']-the_json_one['vNetworkBytesRecvd']
if ('vNetworkBytesSent' in the_json_one.keys()):
json_three['vNetworkBytesSent']=the_json_two['vNetworkBytesSent']-the_json_one['vNetworkBytesSent']
if ('cProcessorStats' in the_json_one.keys()):
for (each_key) in the_json_two['cProcessorStats']:
if ('cCpu' in each_key and 'TIME' in each_key):
json_three['cProcessorStats'][each_key] = the_json_two['cProcessorStats'][each_key] - the_json_one['cProcessorStats'][each_key]
return json_three
delta_json_array=[]
count = 0
first = json_array[0]
for i in range(1, len(json_array)):
count += (json_array[i]["currentTime"] - json_array[i-1]["currentTime"])
if count >= int(args.delta_interval_time):
delta_json_array.append(file_subtraction(first, json_array[i]))
count = 0
first = json_array[i]
for i in range(len(delta_json_array)):
with open(delta_name_array[i], 'w') as fp:
json.dump(delta_json_array[i], fp, sort_keys=True, indent=2)
| Python | 108 | 53.074074 | 131 | /Graphing/auto_generated_delta_script.py | 0.729966 | 0.728767 |
tanmayuw/ContainerProfiler | refs/heads/main | import psutil
import json
import argparse
from datetime import datetime
import re
import subprocess
import os.path
from os import path
#add the virtual level.
CORRECTION_MULTIPLIER=100
CORRECTION_MULTIPLIER_MEMORY=(1/1000)
parser = argparse.ArgumentParser(description='process path and file /or string of metrics.')
parser.add_argument('output_dir', action='store', help='stores directory to where the files will be output to')
parser.add_argument("-v", "--vm_profiling", action="store_true", default=False, help='list of metrics to graph over')
parser.add_argument("-c", "--container_profiling", action="store_true", default=False, help='list of metrics to graph over')
parser.add_argument("-p", "--processor_profiling", action="store_true", default=False, help='list of metrics to graph over')
args= parser.parse_args()
output_dir = args.output_dir
if all(v is False for v in [args.vm_profiling, args.container_profiling, args.processor_profiling]):
args.vm_profiling = True
args.container_profiling=True
args.processor_profiling=True
filename = datetime.now().strftime(output_dir+"/%Y_%m_%d_%H_%M_%S.json")
output_dict={}
def getContainerInfo():
cpuTime_file = open("/sys/fs/cgroup/cpuacct/cpuacct.usage", "r")
cpuTime=int(cpuTime_file.readline())
container_mem_file = open("/sys/fs/cgroup/memory/memory.stat", "r")
container_mem_stats=container_mem_file.read()#line().split()
cpgfault = int(re.findall(r'pgfault.*', container_mem_stats)[0].split()[1])
cpgmajfault = int(re.findall(r'pgmajfault.*', container_mem_stats)[0].split()[1])
cpuinfo_file= open("/proc/stat", "r")
cpuinfo_file_stats=cpuinfo_file.read()
cCpuTimeUserMode = int(re.findall(r'cpu.*', cpuinfo_file_stats)[0].split()[1])
cCpuTimeKernelMode = int(re.findall(r'cpu.*', cpuinfo_file_stats)[0].split()[3])
cProcessorStatsFile= open("/sys/fs/cgroup/cpuacct/cpuacct.usage_percpu", "r")
cProcessorStatsFileArr= cProcessorStatsFile.readline().split()
cProcessorDict={}
count =0
for el in cProcessorStatsFileArr:
temp_str="cCpu${}TIME".format(count)
count+=1
cProcessorDict[temp_str]=int(el)
cDiskSectorIO=0
if path.exists('/sys/fs/cgroup/blkio/blkio.sectors'):
cDiskSectorIOFile=open("/sys/fs/cgroup/blkio/blkio.sectors", "r")
cDiskSectorIOFileArr = re.findall(r'cpu.*', cDiskSectorIOFile)[0].split()
cDiskSectorIO=sum(cDiskSectorIOFileArr)
cDiskReadBytes=0
cDiskWriteBytes=0
try:
cmd1= ['lsblk', '-a']
cmd2=['grep', 'disk']
p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE)
p2 = subprocess.Popen(cmd2, stdin=p1.stdout, stdout=subprocess.PIPE)
o, e = p2.communicate()
major_minor_arr=[]
for line in o.decode('UTF-8').split(sep='\n')[:-1]:
major_minor_arr.append(line.split()[1])
# temp=($line)
# disk_arr+=(${temp[1]})
#done
#major_minor=str(o.decode('UTF-8')).split()[1]
cDiskReadBytesFile=open("/sys/fs/cgroup/blkio/blkio.throttle.io_service_bytes", "r")
cProcessorStatsFile_info=cDiskReadBytesFile.read()
cDiskReadBytesArr=re.findall(r'.*Read.*', cProcessorStatsFile_info)
for el in cDiskReadBytesArr:
temp_arr = el.split()
for major_minor in major_minor_arr:
if (temp_arr[0] == major_minor):
cDiskReadBytes += int(temp_arr[2])
except:
pass
try:
cmd1= ['lsblk', '-a']
cmd2=['grep', 'disk']
p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE)
p2 = subprocess.Popen(cmd2, stdin=p1.stdout, stdout=subprocess.PIPE)
o, e = p2.communicate()
major_minor_arr=[]
for line in o.decode('UTF-8').split(sep='\n')[:-1]:
major_minor_arr.append(line.split()[1])
cDiskWriteBytesFile=open("/sys/fs/cgroup/blkio/blkio.throttle.io_service_bytes", "r")
cProcessorStatsFile_info=cDiskWriteBytesFile.read()
cDiskWriteBytesArr=re.findall(r'.*Write.*', cProcessorStatsFile_info)
for el in cDiskWriteBytesArr:
temp_arr = el.split()
for major_minor in major_minor_arr:
if (temp_arr[0] == major_minor):
cDiskWriteBytes += int(temp_arr[2])
except:
pass
cNetworkBytesFile=open("/proc/net/dev", "r")
cNetworkBytesFileStats=cNetworkBytesFile.read()
cNetworkBytesRecvd=0
cNetworkBytesSent=0
try:
cNetworkBytesArr=re.findall(r'eth0.*',cNetworkBytesFileStats)[0].split()
cNetworkBytesRecvd=int(cNetworkBytesArr[1])
cNetworkBytesSent=int(cNetworkBytesArr[9])
except:
pass
MEMUSEDC_file=open("/sys/fs/cgroup/memory/memory.usage_in_bytes", "r")
MEMMAXC_file=open("/sys/fs/cgroup/memory/memory.max_usage_in_bytes", "r")
cMemoryUsed=int(MEMUSEDC_file.readline().rstrip('\n'))
cMemoryMaxUsed=int(MEMMAXC_file.readline().rstrip('\n'))
cId_file=open("/etc/hostname", "r")
cId=cId_file.readline().rstrip('\n')
#CPU=(`cat /proc/stat | grep '^cpu '`)
cNumProcesses = sum(1 for line in open("/sys/fs/cgroup/pids/tasks", "r")) -2
container_dict={
"cCpuTime": cpuTime,
"cNumProcessors": psutil.cpu_count(),
"cPGFault": cpgfault,
"cMajorPGFault": cpgmajfault,
"cProcessorStats": cProcessorDict,
"cCpuTimeUserMode": cCpuTimeUserMode,
"cCpuTimeKernelMode": cCpuTimeKernelMode,
"cDiskSectorIO": cDiskSectorIO,
"cDiskReadBytes": cDiskReadBytes,
"cDiskWriteBytes": cDiskWriteBytes ,
"cNetworkBytesRecvd":cNetworkBytesRecvd,
"cNetworkBytesSent": cNetworkBytesSent,
"cMemoryUsed": cMemoryUsed,
"cMemoryMaxUsed": cMemoryMaxUsed,
"cId": cId,
"cNumProcesses": cNumProcesses,
"pMetricType": "Process level"
}
return container_dict
def getVmInfo():
cpu_info=psutil.cpu_times()
net_info=psutil.net_io_counters(nowrap=True)
cpu_info2=psutil.cpu_stats()
disk_info=psutil.disk_io_counters()
memory=psutil.virtual_memory()
loadavg=psutil.getloadavg()
cpu_freq=psutil.cpu_freq()
vm_file = open("/proc/vmstat", "r")
vm_file_stats=vm_file.read()#line().split()
pgfault = int(re.findall(r'pgfault.*', vm_file_stats)[0].split()[1])
pgmajfault = int(re.findall(r'pgmajfault.*', vm_file_stats)[0].split()[1])
cpuinfo_file= open("/proc/cpuinfo", "r")
cpuinfo_file_stats=cpuinfo_file.read()
vCpuType = re.findall(r'model name.*', cpuinfo_file_stats)[0].split(sep=": ")[1]
kernel_info=str(subprocess.Popen("uname -a", shell=True, stdout =subprocess.PIPE).communicate()[0][:-1], 'utf-8')
cmd1=['lsblk', '-nd', '--output', 'NAME,TYPE']
cmd2=['grep','disk']
p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE)
p2 = subprocess.Popen(cmd2, stdin=p1.stdout, stdout=subprocess.PIPE)
o, e = p2.communicate()
mounted_filesys=str(o.decode('UTF-8').split()[0])
vm_disk_file=open("/proc/diskstats", "r")
vm_disk_file_stats=vm_disk_file.read()
vDiskSucessfulReads=int(re.findall(rf"{mounted_filesys}.*", vm_disk_file_stats)[0].split(sep=" ")[1])
vDiskSucessfulWrites=int(re.findall(rf"{mounted_filesys}.*", vm_disk_file_stats)[0].split(sep=" ")[5])
vm_dict={
"vMetricType" : "VM Level",
"vKernelInfo" : kernel_info,
"vCpuTime" : (cpu_info[0] + cpu_info[2]) *CORRECTION_MULTIPLIER ,
"vDiskSectorReads" : disk_info[2]/512,
"vDiskSectorWrites" : disk_info[3]/512,
"vNetworkBytesRecvd" : net_info[1],
"vNetworkBytesSent" : net_info[0],
"vPgFault" : int(pgfault),
"vMajorPageFault" : int(pgmajfault),
"vCpuTimeUserMode" : cpu_info[0] * CORRECTION_MULTIPLIER,
"vCpuTimeKernelMode" : cpu_info[2] * CORRECTION_MULTIPLIER,
"vCpuIdleTime" : cpu_info[3]* CORRECTION_MULTIPLIER,
"vCpuTimeIOWait" : cpu_info[4]* CORRECTION_MULTIPLIER,
"vCpuTimeIntSrvc" : cpu_info[5]* CORRECTION_MULTIPLIER,
"vCpuTimeSoftIntSrvc" : cpu_info[6] * CORRECTION_MULTIPLIER,
"vCpuContextSwitches" : cpu_info2[0]* CORRECTION_MULTIPLIER,
"vCpuNice" : cpu_info[1]* CORRECTION_MULTIPLIER,
"vCpuSteal" : cpu_info[7]* CORRECTION_MULTIPLIER,
"vBootTime" : psutil.boot_time(),
"vDiskSuccessfulReads" : vDiskSucessfulReads,
"vDiskMergedReads" : disk_info[6],
"vDiskReadTime" : disk_info[4],
"vDiskSuccessfulWrites" : vDiskSucessfulWrites,
"vDiskMergedWrites" : disk_info[7],
"vDiskWriteTime" : disk_info[5],
"vMemoryTotal" : round(memory[0] * CORRECTION_MULTIPLIER_MEMORY),
"vMemoryFree" : round(memory[4]* CORRECTION_MULTIPLIER_MEMORY),
"vMemoryBuffers" : round(memory[7]* CORRECTION_MULTIPLIER_MEMORY),
"vMemoryCached" : round(memory[8]* CORRECTION_MULTIPLIER_MEMORY),
"vLoadAvg" : loadavg[0],
"vId" : "unavailable",
"vCpuType" : vCpuType,
"vCpuMhz" : cpu_freq[0]
}
return vm_dict
def getProcInfo():
#need to get pPGFault/pMajorPGFault in a different verbosity level: maybe called MP for manual process
#pResidentSetSize needs to be get in MP
dictlist=[]
for proc in psutil.process_iter():
#procFile="/proc/{}/stat".format(proc.pid)
#log = open(procFile, "r")
#pidProcStat=log.readline().split()
curr_dict={
"pId" : proc.pid,
"pCmdline" : " ".join(proc.cmdline()),
"pName" : proc.name(),
"pNumThreads" : proc.num_threads(),
"pCpuTimeUserMode" : proc.cpu_times()[0]* CORRECTION_MULTIPLIER,
"pCpuTimeKernelMode" : proc.cpu_times()[1]* CORRECTION_MULTIPLIER,
"pChildrenUserMode" : proc.cpu_times()[2]* CORRECTION_MULTIPLIER,
"pChildrenKernelMode" : proc.cpu_times()[3]* CORRECTION_MULTIPLIER,
#"pPGFault" : int(pidProcStat[9]),
#"pMajorPGFault" : int(pidProcStat[11]),
"pVoluntaryContextSwitches" : proc.num_ctx_switches()[0],
"pInvoluntaryContextSwitches" : proc.num_ctx_switches()[1],
"pBlockIODelays" : proc.cpu_times()[4]* CORRECTION_MULTIPLIER,
"pVirtualMemoryBytes" : proc.memory_info()[1]
#"pResidentSetSize" : proc.memory_info()[0]
}
dictlist.append(curr_dict)
return dictlist
seconds_since_epoch = round(datetime.now().timestamp())
output_dict["currentTime"] = seconds_since_epoch #bad value.
if args.vm_profiling == True:
time_start_VM=datetime.now()
vm_info=getVmInfo()
time_end_VM=datetime.now()
VM_write_time=time_end_VM-time_start_VM
output_dict.update(vm_info)
if args.container_profiling == True:
time_start_container=datetime.now()
container_info=getContainerInfo()
time_end_container=datetime.now()
container_write_time=time_end_container-time_start_container
output_dict.update(container_info)
if args.processor_profiling == True:
time_start_proc=datetime.now()
procces_info=getProcInfo()
time_end_proc=datetime.now()
process_write_time=time_end_proc-time_start_proc
output_dict["pProcesses"] = procces_info
if args.vm_profiling == True:
output_dict["VM_Write_Time"] = VM_write_time.total_seconds()
if args.container_profiling == True:
output_dict["Container_Write_Time"] = container_write_time.total_seconds()
if args.processor_profiling == True:
output_dict["Process_Write_Time"] = process_write_time.total_seconds()
with open(filename, 'w') as outfile:
json.dump(output_dict, outfile, indent=4)
| Python | 322 | 32.114906 | 124 | /Profiler_Python/src/rudataall-psutil.py | 0.706869 | 0.694874 |
tanmayuw/ContainerProfiler | refs/heads/main | import argparse
import os
import sys
import json
import copy
#import ConfigParser
import pandas as pd
import time
import csv
import glob
import shutil
import re
#import path
from collections import namedtuple
def read_metrics_file(metrics):
if (len(metrics) == 1): #and path.exists(metrics[0])):
metrics_file= metrics[0]
with open(metrics_file, 'r') as f:
metrics= f.readline().split()
print(metrics)
f.close()
return metrics
else:
print("Error: Too many arguments or path does not exist")
def read_cmdline_metrics(metrics):
return metrics
parser = argparse.ArgumentParser(description='process path and file /or string of metrics.')
parser.add_argument('file_path', action='store', help='stores the filepath to the folder holding all the JSON files')
parser.add_argument('metrics', type=str, nargs='*', help='list of metrics or file for metrics')
parser.add_argument('--infile', dest='read_metrics', action='store_const', const=read_metrics_file, default=read_cmdline_metrics, help='reads metrics from a file or from command line')
args= parser.parse_args()
file_path = args.file_path
metrics = args.read_metrics(args.metrics)
for i in range(0, len(metrics)):
if os.path.exists('{}/{}'.format(file_path, metrics[i])):
shutil.rmtree('{}/{}'.format(file_path, metrics[i]))
if not os.path.exists('{}/{}'.format(file_path, metrics[i])):
os.makedirs('{}/{}'.format(file_path, metrics[i]))
dirs= [i for i in os.listdir( file_path ) if i.endswith(".csv")]
dirs.sort()
used_count = []
for file_name in dirs:
with open(file_path + '/' + file_name) as csv_file:
data_frame = pd.read_csv(csv_file)
data_frame.head()
for i in range(0, len(metrics)):
contains_metric = data_frame['pCmdLine'].astype(str).str.contains(metrics[i], na=False, flags=re.IGNORECASE)
filtered = data_frame[contains_metric]
filtered.head()
if (len(filtered.index) > 1) :
filtered = filtered.loc[:, ~filtered.columns.str.contains('^Unnamed')]
filtered.to_csv('{}/{}/{}'.format(file_path, metrics[i], file_name))
for i in range(0, len(metrics)):
#path = "{}/{}".format(file_path, metrics[i])
path = file_path
all_files = glob.glob(path+ "/*.csv")
li = []
print(path)
for filtered_file in all_files:
df = pd.read_csv(filtered_file, index_col=None, header=0)
li.append(df)
print(filtered_file)
frame = pd.concat(li, axis=0, ignore_index=True)
frame = frame.sort_values(by='currentTime', ascending=True)
frame = frame.loc[:, ~frame.columns.str.contains('^Unnamed: 0')]
frame.drop(frame.columns[0], axis=1)
#frame= frame.groupby(['currentTime']).agg({
# 'filename':'first', 'pBlockIODelays':'sum','pChildrenKernelMode':'sum', 'pChildrenUserMode':'sum','pCmdLine':'first', 'pCpuTimeUserMode':'sum', 'pId':'sum', 'pName':'first', 'pNonvoluntaryContextSwitches':'sum', 'pNumThreads':'sum', 'pResidentSetSize':'sum','pVirtualMemoryBytes': 'sum', 'pVoluntaryContextSwitches':'sum'})
#frame = frame.groupby(['currentTime']).sum()
#frame = frame.diff(axis=1, periods=1)
frame.drop(frame.index[0])
frame['pCpuTime'] = frame['pCpuTimeUserMode'] + frame['pCpuTimeKernelMode']
#print frame
frame.to_csv('{}/{}/{}'.format(file_path, metrics[i], "agg_sum.csv"))
| Python | 97 | 41.340206 | 907 | /Graphing/process_filter.py | 0.541211 | 0.537564 |
tanmayuw/ContainerProfiler | refs/heads/main | #Creates a script based on graph_generation_config.ini to create a delta script to delta certain metrics, and avoids others.
#authors: David Perez and Tanmay Shah
import argparse
import os
import json
import configparser
from collections import namedtuple
generated_script= open("auto_generated_delta_script.py","w")
generated_script.write("import argparse\nimport os\nimport shutil\nimport sys\nimport json\nimport copy\nimport configparser\nfrom collections import namedtuple\n\n")
generated_script.write("parser = argparse.ArgumentParser(description='process path and file /or string of metrics.')\n")
generated_script.write("parser.add_argument('file_path', action='store', help='stores the filepath to the folder holding all the JSON files')\n")
generated_script.write("parser.add_argument('delta_interval_time', action='store', help='stores time interval of when to take delta sample')\n")
generated_script.write("args= parser.parse_args()\n")
generated_script.write("file_path = args.file_path\n")
generated_script.write("if os.path.exists(file_path + \'/delta_json\'):\n")
generated_script.write("\tshutil.rmtree(file_path + \'/delta_json\')\n")
generated_script.write("if not os.path.exists(file_path + '/delta_json'):\n")
generated_script.write("\tos.makedirs(file_path + '/delta_json')\n\n")
generated_script.write("json_array = []\n")
generated_script.write("delta_name_array = []\n")
generated_script.write("dirs= sorted([i for i in os.listdir( file_path ) if i.endswith(\".json\")])\n")
#generated_script.write("dirs.sort()\n")
generated_script.write("for file_name in dirs:\n")
generated_script.write("\twith open(file_path + '/' + file_name) as json_file: \n")
#generated_script.write("\t\tprint ('JSON FILES TANMAY:')\n")
generated_script.write("\t\tprint(json_file)\n")
generated_script.write("\t\ttry:\n")
generated_script.write("\t\t\tnew_json_object = json.load(json_file)\n")#, object_hook=lambda d: namedtuple('X', d.keys())(*d.values()))\n")
generated_script.write("\t\t\tjson_array.append(new_json_object)\n")
generated_script.write("\t\t\tnew_name= ((file_path+'/delta_json/'+file_name).split('.json')[0] + '_delta.json')\n")
generated_script.write("\t\t\tdelta_name_array.append(new_name)\n\n")
generated_script.write("\t\texcept Exception as e:\n")
generated_script.write("\t\t\tprint (\"{} invalid file\".format(json_file))\n")
generated_script.write("\t\t\tpass\n")
config = configparser.ConfigParser()
config.optionxform = str
config.read('graph_generation_config.ini')
#script generation
generated_script.write("def file_subtraction(the_json_one, the_json_two):\n")
generated_script.write("\tjson_three = copy.deepcopy(the_json_two)\n")
#all common attributes across all verbos
for (each_key, each_val) in config.items('all'):
if ( each_val == 'numeric_delta'): #and each_key.isdigit()):
json_one = "the_json_one['" +each_key+"']"
json_two = "the_json_two['" +each_key+"']"
json_three = "json_three['" +each_key+"']"
generated_script.write("\t" + json_three +"=" + json_two +'-' + json_one+"\n")
#check and process attributes only for CPU or VM
verbos = ['cpu_level','vm_level']
for vKey in verbos:
for (each_key, each_val) in config.items(vKey):
if ( each_val == 'numeric_delta'): #and each_key.isdigit()):
generated_script.write("\tif ('" + each_key + "' in the_json_one.keys()):\n")
json_one = "the_json_one['" +each_key+"']"
json_two = "the_json_two['" +each_key+"']"
json_three = "json_three['" +each_key+"']"
generated_script.write("\t\t" + json_three +"=" + json_two +'-' + json_one+"\n")
if (config.get('cprocessorstats','cCpu#TIME')):
generated_script.write("\tif ('cProcessorStats' in the_json_one.keys()):\n")
generated_script.write("\t\tfor (each_key) in the_json_two['cProcessorStats']:\n")
generated_script.write("\t\t\tif ('cCpu' in each_key and 'TIME' in each_key):\n")
generated_script.write("\t\t\t\tjson_three['cProcessorStats'][each_key] = the_json_two['cProcessorStats'][each_key] - the_json_one['cProcessorStats'][each_key]\n")
generated_script.write("\treturn json_three\n\n")
generated_script.write("delta_json_array=[]\n")
generated_script.write("count = 0\n")
generated_script.write("first = json_array[0]\n")
generated_script.write("for i in range(1, len(json_array)):\n")
generated_script.write("\tcount += (json_array[i][\"currentTime\"] - json_array[i-1][\"currentTime\"])\n")
generated_script.write("\tif count >= int(args.delta_interval_time):\n")
generated_script.write("\t\tdelta_json_array.append(file_subtraction(first, json_array[i]))\n")
generated_script.write("\t\tcount = 0\n")
generated_script.write("\t\tfirst = json_array[i]\n")
generated_script.write("\n")
generated_script.write("for i in range(len(delta_json_array)):\n")
generated_script.write("\twith open(delta_name_array[i], 'w') as fp:\n")
generated_script.write("\t\tjson.dump(delta_json_array[i], fp, sort_keys=True, indent=2)\n")
| Python | 102 | 48.931374 | 167 | /Graphing/delta_json_generation.py | 0.678382 | 0.677008 |
tanmayuw/ContainerProfiler | refs/heads/main | from plotly.subplots import make_subplots
import random
import json
import os, sys
import pandas as pd
import subprocess
import numpy as np
import plotly.express as px
import plotly.graph_objects as go
import argparse
from os import path
import math
graphing_methods=['scatter', 'bar']
FONT_SIZE=26;
MARGIN_SIZE=20
TICKFONT_SIZE=20
def export_graphs_as_images(fig, file_name, title):
file_name=file_name.split('.',1)[0]
if not os.path.exists(file_name +"_images"):
os.mkdir(file_name +"_images")
fig.write_image(file_name +"_images/"+title +".png")
print("saved image: " +title +".png to " + os.path.abspath(file_name +"_images"))
def read_metrics_file(metrics, data_frame):
if (len(metrics) == 1 and path.exists(metrics[0])):
metrics_file= metrics[0]
with open(metrics_file, 'r') as f:
metrics= f.readline().split()
f.close()
return metrics
else:
print("Error: Too many arguments or path does not exist")
def read_cmdline_metrics(metrics, data_frame):
if (len(metrics) == 0):
return list(data_frame.columns[1:])
else:
return metrics
def update_fig(figure, y_title, the_title):
figure.update_layout(
annotations=[go.layout.Annotation(
x=.5,
y=-0.29,
showarrow=False,
text="Time (h)",
xref="paper",
yref="paper"
),
go.layout.Annotation(
x=-0.14,
y=0.5,
font=dict(
family="Courier New, monospace",
size=FONT_SIZE,
color="#000000"
),
showarrow=False,
text=y_title,
textangle=0,
xref="paper",
yref="paper"
)
],
#autosize=True,
margin=dict(
b=120,
),
font=dict(
family="Courier New, monospace",
size=MARGIN_SIZE,
color="#000000"
),
showlegend=True
)
figure.update_xaxes(
#ticktext=["end of split", "end of align", "end of merge"],
#tickvals=["2000", "20000", "27500"],
#ticktext=["split", "align", "merge"],
#tickvals=["10", "2100", "20000"],
domain=[0.03, 1],
tickangle=45,
showline=True, linewidth=3, linecolor='black', mirror=True,
tickfont=dict(
family='Courier New, monospace',
size=TICKFONT_SIZE,
color='black'
)
)
figure.update_yaxes(showline=True, linewidth=3, linecolor='black', mirror=True)
figure.update_layout(legend_orientation="h")
figure.update_layout(legend=dict(x=0, y=-.28))
figure.update_layout(title = { 'text':the_title, 'x':.1, 'y':.91})
def normalize(data_frame):
data_frame["vDiskSectorWrites"] = data_frame["vDiskSectorWrites"]*512;
data_frame["cCpuTime"]= data_frame["cCpuTime"]/1000000000;
#return data_frame;
def make_four(data_frame):
titles1=["Cpu Utilization", "Memory Utilization", "Network Utilization", "Disk Utilization"]
ytitles=["% of CPU Utilization", "% Memory Usage Utilization", "# of Bytes recieved/sent", "# of Bytes Read/Written"]
applypercent=[True, True, False, False]
metrics1=["cCpuTime"]
metrics2=["cMemoryUsed", "cMemoryMaxUsed"]
metrics3=["cNetworkBytesRecvd", "cNetworkBytesSent"]
metrics4=["cDiskReadBytes", "cDiskWriteBytes"]
metricslist1 = [metrics1, metrics2, metrics3, metrics4]
titles2=["CPU usage", "Memory Usage", "Network transfer", "Disk Uwrites"]
ytitles2=["Percentage", "Percentage", "GB received","GB written"]
applypercent2=[True, True, False, False]
metrics1=["vCpuTime", "cCpuTime"]
metrics2=["vMemoryFree", "cMemoryUsed"]
metrics3=["vNetworkBytesRecvd", "cNetworkBytesRecvd"]
metrics4=["cDiskSectorWrites", "vDiskSectorWrites"]
metricslist2 = [metrics1, metrics2, metrics3, metrics4]
full_metrics = [metricslist1, metricslist2]
all_percents = [applypercent, applypercent2]
fig = make_subplots(rows=2, cols=2)#, subplot_titles=titles)
titles_all = [titles1, titles2]
ytitles_all = [ytitles, ytitles2]
folder_names=["Container_images", "VM_images"]
num = 0
for metrics in full_metrics:
current_row = 1
current_col = 1
axiscounter=1
count = 0
for sublist in metrics:
export_fig = go.Figure()
for el in sublist:
#the_max= data_frame[sublist].max().max()
the_max= data_frame[el].max()
if all_percents[num][count] == True:
if el == "vMemoryFree":
fig.add_trace(go.Scatter(x=data_frame['currentTime'], y=1-(data_frame[el]/the_max), name=el, hoverinfo='x+y+name'), row=current_row, col=current_col)
export_fig.add_trace(go.Scatter(x=data_frame['currentTime'], y=1-(data_frame[el]/the_max), name=el, hoverinfo='x+y+name'))
else:
fig.add_trace(go.Scatter(x=data_frame['currentTime'], y=data_frame[el]/the_max, name=el, hoverinfo='x+y+name'), row=current_row, col=current_col)
export_fig.add_trace(go.Scatter(x=data_frame['currentTime'], y=data_frame[el]/the_max, name=el, hoverinfo='x+y+name'))
else:
fig.add_trace(go.Scatter(x=data_frame['currentTime'], y=data_frame[el], name=el, hoverinfo='x+y+name'), row=current_row, col=current_col)
export_fig.add_trace(go.Scatter(x=data_frame['currentTime'], y=data_frame[el], name=el, hoverinfo='x+y+name'))
#fig.add_trace(go.Scatter(x=data_frame['currentTime'], y=data_frame[el]),
current_col = current_col +1
if (current_col == 3):
current_col =1
current_row +=1
currentXAxis='xaxis{}'.format(axiscounter)
currentYAxis='yaxis{}'.format(axiscounter)
fig['layout'][currentXAxis].update(title="Time (h)")
fig['layout'][currentYAxis].update(title=ytitles[count])
axiscounter+=1
update_fig(export_fig, ytitles_all[num][count], titles_all[num][count])
count +=1
export_graphs_as_images(export_fig, folder_names[num].format(num), str(count))
num +=1
def makegraphs(metrics, dfs, percentage_flag, graph_title, x_title, y_title):
start =0
fig = go.Figure()
fig.update_layout(
title=go.layout.Title(
text=graph_title,
xref="paper",
x=0,
font=dict(
family="Courier New, monospace",
size=FONT_SIZE,
color="#7f7f7f"
)
),
xaxis=go.layout.XAxis(
title=go.layout.xaxis.Title(
text=x_title,
font=dict(
family="Courier New, monospace",
size=FONT_SIZE,
color="#7f7f7f"
)
)
),
yaxis=go.layout.YAxis(
title=go.layout.yaxis.Title(
text=y_title,
font=dict(
family="Courier New, monospace",
size=FONT_SIZE,
color="#7f7f7f"
)
)
)
)
df = dfs[0]
the_max= df[metrics].max().max()
for df in dfs:
for x in metrics:
if x in list(df.columns.values):
if percentage_flag == True:
fig.add_trace(go.Scatter(x=df['currentTime'], y=df[x]/the_max, name=x, hoverinfo='x+y+name'))
else:
fig.add_trace(go.Scatter(x=df['currentTime'], y=df[x], name=x, hoverinfo='x+y+name', marker=dict(
color='Blue',
size=120,
line=dict(
color='Blue',
width=12
)
)))
export_graphs_as_images(fig, graph_title, "temp3")
fig.show()
parser = argparse.ArgumentParser(description="generates plotly graphs")
parser.add_argument('-c', "--csv_file", action="store", help='determines sampling size')
parser.add_argument("-c2", "--csv_second", action="store", help='determines sampling size')
parser.add_argument("-s", "--sampling_interval", type=int, nargs='?', action="store", help='determines sampling size')
parser.add_argument("-t", "--title", action="store", help='determines sampling size')
parser.add_argument("-xt", "--x_title", action="store", help='determines sampling size')
parser.add_argument("-yt", "--y_title", action="store", help='determines sampling size')
parser.add_argument("-p", "--percentage", action="store_true", help='determines sampling size')
parser.add_argument('metrics', type=str, nargs='*', help='list of metrics to graph over')
parser.add_argument('--infile', dest='read_metrics', action='store_const', const=read_metrics_file, default=read_cmdline_metrics, help='reads metrics from a file or from command line')
args= parser.parse_args()
#dataframe read into from cmdline
data_frame = pd.read_csv(args.csv_file)
data_frame.head()
data_frame['currentTime'] = (data_frame['currentTime'] - data_frame['currentTime'][0])/3600
data_frame.name=args.csv_file
dfs = []
dfs.append(data_frame)
if args.csv_second != None:
data_frame = pd.read_csv(args.csv_second)
data_frame.head()
data_frame['currentTime'] = data_frame['currentTime'] - data_frame['currentTime'][0]
data_frame.name=args.csv_second
dfs.append(data_frame)
#choosing which method to make the graphs
#preparing the x axis of time for all graphs
#obtains the graphs from cmdline, can have no input for every metric in the csv, n metrics space delimited, or a file if --infile tag included at the end
metrics = args.read_metrics(args.metrics, data_frame)
print(metrics)
#makegraphs(metrics, dfs, args.percentage, args.title, args.x_title, args.y_title)
normalize(data_frame);
make_four(data_frame)
| Python | 303 | 28.597361 | 184 | /Graphing/plotly_stack_graphs.py | 0.656445 | 0.638715 |
tanmayuw/ContainerProfiler | refs/heads/main | #Authors: David Perez and Tanmay Shah
import json
import os
import pandas as pd
import argparse
#usage: python csv_generation_2.py path_of_folder_with_json sampling_delta metrics(file or space delimited list, if file include --infile, leave blank for all metrics found in the json files.)
def read_metrics_file(metrics):
if (len(metrics) == 1 and path.exists(metrics[0])):
metrics_file= metrics[0]
with open(metrics_file, 'r') as f:
metrics= f.readline().split()
# print(metrics)
f.close()
return metrics
else:
print("Error: Too many arguments or path does not exist")
def read_cmdline_metrics(metrics):
return metrics
# vm_container dictionary to store the virtual machine and container data. Key is the filename and value is the virtual machine and container data.
vm_container = {}
#Parse for folder path, and metrics to add.
parser = argparse.ArgumentParser(description='process path and file /or string of metrics.')
parser.add_argument('file_path', action='store', help='stores the filepath to the folder holding all the JSON files')
parser.add_argument('sampling_delta', type=int, nargs='?', default=1, help='determines sampling size')
parser.add_argument('metrics', type=str, nargs='*', help='list of metrics or file for metrics')
parser.add_argument('--infile', dest='read_metrics', action='store_const', const=read_metrics_file, default=read_cmdline_metrics, help='reads metrics from a file or from command line')
args= parser.parse_args()
file_path = args.file_path
metrics = args.read_metrics(args.metrics)
#currentTime is necessary to be included in metrics as it is used to create time series. We add it here incase its not already included
metrics.append('currentTime')
metrics = set(metrics)
dirs = os.listdir( file_path )
# processes dictionary to store process level data
processes = dict()
dirs= sorted([i for i in os.listdir( file_path ) if i.endswith(".json")])
for file in dirs:
with open(file_path+'/'+file) as f:
# Deserialize into python object
y = json.load(f)
# A dictionary which contains the value of vm_container dictionary
r = {}
# Check for any list or dictionary in y
# determines what is chosen out of the metrics.
#print metrics
for k in y:
if not (k == "pProcesses" or k == "cProcessorStats"):
if k in metrics or len(metrics) == 1:
r[k] = y[k]
if ("cProcessorStats" in y and "cNumProcessors" in y):
for k in y["cProcessorStats"]:
if (k in metrics or len(metrics) == 0):
r[k] = y["cProcessorStats"][k]
if ("pProcesses" in y):
totalProcesses = len(y["pProcesses"]) - 1
#print y["pProcesses"][len(y["pProcesses"]) - 1]
for k in y["pProcesses"][totalProcesses]:
if k == "pTime":
r["pTime"] = y["pProcesses"][totalProcesses]["pTime"]
# Loop through the process level data
for i in range(totalProcesses):
# A dictinary containing process level data
s = {"filename": file}
for k in y["pProcesses"][i]:
s[k] = y["pProcesses"][i][k]
s["currentTime"] = r["currentTime"]
# If the process id is already in the processes, append to the list of processes
pids = []
if y["pProcesses"][i]["pId"] in processes:
pids = processes[y["pProcesses"][i]["pId"]]
pids.append( s )
processes[y["pProcesses"][i]["pId"]] = pids
#write all metrics to csv file
vm_container[file] = r
#creates empty folder for process info
if not os.path.exists('./process_info/{}'.format(os.path.basename(os.path.normpath(file_path)))):
os.makedirs('./process_info/{}'.format(os.path.basename(os.path.normpath(file_path))))
for key, value in processes.items():
df1 = pd.DataFrame(value)
df1 = df1.sort_values(by='currentTime', ascending=True)
df1.to_csv("./process_info/{}/Pid, {}.csv".format(os.path.basename(os.path.normpath(file_path)),str(key)))
# Create a separate CSV files for each of the processes
# Dump dictionary to a JSON file
with open("vm_container.json","w") as f:
f.write(json.dumps(vm_container))
# Convert JSON to dataframe and convert it to CSV
df = pd.read_json("vm_container.json").T
df=df.iloc[::args.sampling_delta]
df.to_csv("vm_container.csv", sep=',')
# Convert JSON to dataframe and convert it to CSV
df = pd.read_json("vm_container.json").T
df=df.iloc[::args.sampling_delta]
df.to_csv("vm_container.tsv", sep='\t')
| Python | 121 | 38.090908 | 192 | /Graphing/csv_generation_2.py | 0.633904 | 0.631156 |
tanmayuw/ContainerProfiler | refs/heads/main | import argparse
import os
import sys
import json
import copy
import ConfigParser
import pandas as pd
import time
import os
import glob
import pandas as pd
from collections import namedtuple
parser = argparse.ArgumentParser(description='process path and file /or string of metrics.')
parser.add_argument('file_path', action='store', help='')
args= parser.parse_args()
file_path = args.file_path
dirs= [i for i in os.listdir( file_path ) if i.endswith(".csv")]
dirs.sort()
dfObj = pd.DataFrame()
used_count = []
pcmd_list =[]
for file_name in dirs:
with open(file_path + '/' + file_name) as csv_file:
data_frame = pd.read_csv(csv_file)
data_frame.head()
value_counts= data_frame['pCmdLine'].value_counts()
#df = value_counts.rename_axis('unique_values').reset_index(name='counts')
df = pd.DataFrame(value_counts)
pcmd_list.append(df)
series=data_frame.median()
series = series.rename(file_name)
dfObj = dfObj.append(series)
used_count.append(len(data_frame.index))
total = pcmd_list[0]
for i in pcmd_list[1:]:
total = total.add(i, fill_value=0)
total = total.sort_values(by="pCmdLine", ascending=False)
total.to_csv("processes_used.csv", sep=',')
dfObj.insert(len(dfObj.columns) ,"Times Used", used_count)
dfObj= dfObj.sort_values(by="Times Used", ascending=False)
dfObj.index=dfObj["pId"]
dfObj = dfObj.loc[:, ~dfObj.columns.str.contains('^Unnamed')]
dfObj.to_csv("process_info.csv", sep=',')
| Python | 60 | 22.866667 | 92 | /Graphing/process_info_report.py | 0.712195 | 0.710105 |
tanmayuw/ContainerProfiler | refs/heads/main | #author: David Perez
from plotly.subplots import make_subplots
import random
import json
import os, sys
import pandas as pd
import subprocess
import numpy as np
import plotly.express as px
import plotly.graph_objects as go
import argparse
from os import path
import math
import shutil
from os.path import abspath
from subprocess import call
from distutils.dir_util import copy_tree
def read_metrics_file(metrics):
if (len(metrics) == 1 and path.exists(metrics[0])):
metrics_file= metrics[0]
with open(metrics_file, 'r') as f:
metrics= f.readline().split()
f.close()
return ' '.join(metrics)
else:
print("Error: Too many arguments or path does not exist")
def read_cmdline_metrics(metrics):
return ' '.join(metrics)
#give, x folders, give metrics, give smoothening delta,
parser = argparse.ArgumentParser(description="generates plotly graphs by giving folders, metrics, and delta smoothening value")
parser.add_argument('-f', "--folders", action="store", nargs='*', help='determines sampling size')
parser.add_argument("-s", "--sampling_interval", type=str, nargs='?', default=1, action="store", help='determines sampling size')
parser.add_argument("-m", "--metrics", action="store", nargs='*', default=[], help='list of metrics to graph over')
parser.add_argument("-d", "--dynamic_creation", action="store_true", default=False, help='list of metrics to graph over')
parser.add_argument('--infile', dest='read_metrics', action='store_const', const=read_metrics_file, default=read_cmdline_metrics, help='reads metrics from a file or from command line')
args= parser.parse_args()
metrics = args.read_metrics(args.metrics)
#print(args.folders);
#print(args.sampling_interval);
print("making delta_json_gen script")
os.system("python delta_json_generation.py")
print("finished delta_json_gen script")
current_directory = os.getcwd()
final_directory = os.path.join(current_directory, r'graph_all_json')
if os.path.exists(final_directory):
shutil.rmtree(final_directory)
if not os.path.exists(final_directory):
os.makedirs(final_directory)
print("running delta_json_gen on each path given")
for path in args.folders:
path = os.path.expanduser(path)
os.system("python auto_generated_delta_script.py {} {}".format(path, args.sampling_interval))
copy_tree(path+"/delta_json", final_directory)
print("Finished running delta_json_gen on each path given")
print("Creating a csv file based on dela information created")
os.system("python csv_generation_2.py {} {} {}".format(final_directory, "1", metrics))
print("Finished Creating a csv file based on dela information created")
print("Starting Graphing process")
if (args.dynamic_creation) :
#print ("Tanmay METRICS HERE:")
#print (metrics)
os.system("python plotly_graph_generation.py {} {} -d".format("vm_container.csv", metrics))
else :
print ("Tanmay METRICS HERE:")
print (metrics)
os.system("python plotly_graph_generation.py {} {}".format("vm_container.csv", metrics))
print("Finished Graphing process")
| Python | 92 | 31.652174 | 184 | /Graphing/graph_all.py | 0.736439 | 0.734443 |
avinash-arjavalingam/262_project | refs/heads/main | from simulator.event_queue import EventQueue
from simulator.resource import *
from simulator.dag import Dag
from simulator.system import System
from workloads.toy.linear_dag import linear_dag_clockwork_data, linear_instance_list, linear_instance_placements
class SimpleSystem(System):
pools: Dict[str, ResourcePool]
def __init__(self,_events: EventQueue, _pools: Dict[str, ResourcePool]):
super().__init__(_events)
self.pools = _pools
self.dag_maps = {}
def schedule(self, curr_time, events, *args, **kwargs):
# First check for any completed functions
for name, pool in self.pools.items():
for resource in pool.get_all_resources():
completed = resource.remove_at_time(curr_time)
for (fid, tag) in completed:
assert tag in self.outstanding_requests, "Tag needs to map to an outstanding request"
self.outstanding_requests[tag] = (True, self.outstanding_requests[tag][1])
# Now process any new events
for (dag, input) in events:
# for linear_instance in linear_instance_list:
# print(linear_instance.id_res_map)
# print(linear_instance.running_time)
# print(linear_instance.running_cost)
# for price_instance in linear_instance_placements.price_list:
# print(price_instance.running_cost)
# for time_instance in linear_instance_placements.time_list:
# print(time_instance.running_time)
# sample_placement = (linear_instance_placements.get_sample_list(10000, 10000))[0]
# self.dag_maps = sample_placement.id_res_map
print(linear_dag_clockwork_data)
if linear_dag_clockwork_data[1][0] < 20 and linear_dag_clockwork_data[1][1] < 85:
self.dag_maps[dag.name] = 'STD_GPU'
elif linear_dag_clockwork_data[0][0] < 20 and linear_dag_clockwork_data[0][1] < 85:
self.dag_maps[dag.name] = 'STD_CPU'
else:
continue
# print(dag_maps)
# for sample_instance in linear_instance_placements.get_sample_list(10000, 10000):
# print(sample_instance.running_time)
# print(sample_instance.running_cost)
# print("Done")
# print("Hello")
dag.execute() # Need to do this to seal the DAG
self.outstanding_requests[self.__generate_tag(dag, curr_time)] = (True, dag)
# Now schedule functions
for tag, (flag, dag) in self.outstanding_requests.copy().items():
if flag:
if dag.has_next_function():
# Find which resource is faster
nxt = dag.peek_next_function()
# std_cpu = nxt.resources['STD_CPU']
# std_gpu = nxt.resources['STD_GPU']
# cpu_time = std_cpu['pre'].get_runtime() + std_cpu['exec'].get_runtime() + std_cpu['post'].get_runtime()
# gpu_time = std_gpu['pre'].get_runtime() + std_gpu['exec'].get_runtime() + std_gpu['post'].get_runtime()
# if cpu_time < gpu_time:
# pool = self.pools['STD_CPU_POOL']
# else:
# pool = self.pools['STD_GPU_POOL']
# print(self.dag_maps)
# print(nxt.unique_id)
if self.dag_maps[dag.name] == 'STD_GPU':
pool = self.pools['STD_GPU_POOL']
# print("GPU")
else:
pool = self.pools['STD_CPU_POOL']
# print("CPU")
# If there is a resource available, schedule it
result : Optional[Tuple[str, Resource]] = pool.find_first_available_resource(nxt, tag)
if result:
(name, rsrc) = result
rsrc.add_function(dag.next_function(), tag, curr_time)
self.outstanding_requests[tag] = (False, self.outstanding_requests[tag][1])
else:
# Remove if there is no next function
self.outstanding_requests.pop(tag)
def __generate_tag(self, dag: Dag, time: int):
return f"{dag.name}:{time}:{id(dag)}"
def __decode_tag(self, tag: str) -> Dag:
return self.outstanding_requests[tag] | Python | 86 | 41.290699 | 112 | /workloads/toy/simple_system.py | 0.672717 | 0.661991 |
avinash-arjavalingam/262_project | refs/heads/main | from simulator.dag import Dag, Function
from simulator.resource import ResourceType
from simulator.runtime import ConstantTime
from .constants import *
from random import randint, sample
from bisect import bisect
# linear_first = Function(
# unique_id='linear_first',
# resources= {
# 'STD_CPU' : {
# 'type' : ResourceType.CPU,
# 'space': 100.0, # Ignoring space this function requires on the CPU
# 'pre' : ConstantTime(1),
# 'exec' : ConstantTime(3),
# 'post' : ConstantTime(0)
# },
# 'STD_GPU' : {
# 'type' : ResourceType.GPU,
# 'space': 100.0,
# 'pre' : ConstantTime(1),
# 'exec' : ConstantTime(2),
# 'post' : ConstantTime(0)
# }
# }
# )
#
# linear_second = Function( # This function takes a long time to run on a CPU
# unique_id='linear_second',
# resources= {
# 'STD_CPU' : {
# 'type' : ResourceType.CPU,
# 'space': 100.0, # Ignoring space this function requires on the CPU
# 'pre' : ConstantTime(1),
# 'exec' : ConstantTime(5),
# 'post' : ConstantTime(0)
# },
# 'STD_GPU' : {
# 'type' : ResourceType.GPU,
# 'space': 100.0,
# 'pre' : ConstantTime(1),
# 'exec' : ConstantTime(1),
# 'post' : ConstantTime(0)
# }
# }
# )
#
# linear_third = Function( # This function takes a long time to run on a GPU
# unique_id='linear_third',
# resources= {
# 'STD_CPU' : {
# 'type' : ResourceType.CPU,
# 'space': 100.0, # Ignoring space this function requires on the CPU
# 'pre' : ConstantTime(1),
# 'exec' : ConstantTime(1),
# 'post' : ConstantTime(0)
# },
# 'STD_GPU' : {
# 'type' : ResourceType.GPU,
# 'space': 100.0,
# 'pre' : ConstantTime(1),
# 'exec' : ConstantTime(5),
# 'post' : ConstantTime(0)
# }
# }
# )
linear_first = Function(
unique_id='linear_first',
resources= {
'STD_CPU' : {
'type' : ResourceType.CPU,
'space': 100.0, # Ignoring space this function requires on the CPU
'pre' : ConstantTime(1),
'exec' : ConstantTime(3),
'post' : ConstantTime(0)
},
'STD_GPU' : {
'type' : ResourceType.GPU,
'space': 100.0,
'pre' : ConstantTime(1),
'exec' : ConstantTime(1),
'post' : ConstantTime(0)
}
}
)
linear_second = Function( # This function takes a long time to run on a CPU
unique_id='linear_second',
resources= {
'STD_CPU' : {
'type' : ResourceType.CPU,
'space': 100.0, # Ignoring space this function requires on the CPU
'pre' : ConstantTime(1),
'exec' : ConstantTime(5),
'post' : ConstantTime(0)
},
'STD_GPU' : {
'type' : ResourceType.GPU,
'space': 100.0,
'pre' : ConstantTime(1),
'exec' : ConstantTime(2),
'post' : ConstantTime(0)
}
}
)
linear_third = Function( # This function takes a long time to run on a GPU
unique_id='linear_third',
resources= {
'STD_CPU' : {
'type' : ResourceType.CPU,
'space': 100.0, # Ignoring space this function requires on the CPU
'pre' : ConstantTime(1),
'exec' : ConstantTime(8),
'post' : ConstantTime(0)
},
'STD_GPU' : {
'type' : ResourceType.GPU,
'space': 100.0,
'pre' : ConstantTime(1),
'exec' : ConstantTime(3),
'post' : ConstantTime(0)
}
}
)
# Add costs to functions
all_funs = [linear_first, linear_second, linear_third]
for f in all_funs:
for rsrc_name, specs in f.resources.items():
if rsrc_name == 'STD_CPU':
specs['cost'] = COST_PER_CPU_TIME * specs['exec'].get_runtime()
else:
specs['cost'] = COST_PER_GPU_TIME * specs['exec'].get_runtime()
linear_dag = Dag('linear', funs=[linear_first, linear_second, linear_third])
linear_dag.add_edge(linear_first, linear_second)
linear_dag.add_edge(linear_second, linear_third)
linear_dag.sanity_check()
def gen_clockwork(dag_functions):
dag_cpu_time = 0
dag_cpu_cost = 0
dag_gpu_time = 0
dag_gpu_cost = 0
for func in list(dag_functions):
dag_cpu = func.resources['STD_CPU']
dag_gpu = func.resources['STD_GPU']
dag_cpu_time += dag_cpu['pre'].get_runtime() + dag_cpu['exec'].get_runtime() + dag_cpu['post'].get_runtime()
dag_gpu_time += dag_gpu['pre'].get_runtime() + dag_gpu['exec'].get_runtime() + dag_gpu['post'].get_runtime()
dag_cpu_cost += dag_cpu['cost']
dag_gpu_cost += dag_gpu['cost']
return [[dag_cpu_time, dag_cpu_cost], [dag_gpu_time, dag_gpu_cost]]
linear_dag_clockwork_data = gen_clockwork(linear_dag.functions.values())
class DAGInstance:
def __init__(self, dag):
self.dag = dag
self.running_time = 0
self.running_cost = 0
# self.functions_per_resource = {}
self.id_res_map = {}
# self.id_max_map = {}
# for res in ["GPU", "CPU"]:
# self.functions_per_resource[res] = []
# def add_func_res(self, function, resource):
# func_tuple = (function.id, function.get_max_memory(resource))
# self.functions_per_resource[resource].append(func_tuple)
def copy_dag_instance(self):
new_dag_instance = DAGInstance(self.dag)
for id_one, res in list(self.id_res_map.items()):
new_dag_instance.id_res_map[id_one] = res
# for id_two, max_prev in self.id_max_map:
# new_dag_instance.id_max_map[id_two] = max_prev
# for i in range(len(self.functions_per_resource)):
# for func_tuple in self.functions_per_resource[i]:
# new_tuple = (func_tuple[0], func_tuple[1])
# new_dag_instance.functions_per_resource[i].append(new_tuple)
new_dag_instance.running_cost = self.running_cost
new_dag_instance.running_time = self.running_time
return new_dag_instance
def update_dag_instance(self, this_function, res):
self.id_res_map[this_function.unique_id] = res
# func_time = func.get_resource_runtime(res) + self.id_max_map[func.id]
# for root_next_func in func.next_funcs:
# next_max_time = 0
# if root_next_func.id in self.id_max_map:
# next_max_time = self.id_max_map[root_next_func.id]
# self.id_max_map[root_next_func.id] = max(func_time, next_max_time)
# self.running_time = max(self.running_time, func_time)
func_res = this_function.resources[res]
self.running_time = self.running_time + func_res['pre'].get_runtime() + func_res['exec'].get_runtime() + func_res['post'].get_runtime()
self.running_cost = self.running_cost + func_res['cost']
# self.add_func_res(func, res)
# self.id_max_map.pop(func.id, None)
resource_list = ['STD_CPU', 'STD_GPU']
def gen_dag_instances(dag):
dep_queue = dag
instance_list = []
root = dep_queue.pop(0)
for root_res in list(resource_list):
root_dag_instance = DAGInstance(dag)
root_dag_instance.id_res_map[root.unique_id] = root_res
# print(root_dag_instance.id_res_map[root.unique_id])
# for root_next_func in root.next_funcs:
# root_dag_instance.id_max_map[root_next_func.id] = root.get_resource_runtime(root_res)
root_func_res = root.resources[root_res]
root_dag_instance.running_time = root_func_res['pre'].get_runtime() + root_func_res['exec'].get_runtime() + root_func_res['post'].get_runtime()
root_dag_instance.running_cost = root_func_res['cost']
# root_dag_instance.add_func_res(root, root_res)
instance_list.append(root_dag_instance)
while len(dep_queue) > 0:
function = dep_queue.pop(0)
new_instance_list = []
for dag_instance in instance_list:
for res in list(resource_list):
new_dag_instance = dag_instance.copy_dag_instance()
new_dag_instance.update_dag_instance(function, res)
new_instance_list.append(new_dag_instance)
instance_list = new_instance_list
# for finished_dag_instance in instance_list:
# for func_res in list(finished_dag_instance.functions_per_resource.values()):
# sorted(func_res, key=lambda x: x[1])
return instance_list
def select_pareto_instances(instance_list):
pareto_list = []
for instance in instance_list:
pareto_add = True
for comp_instance in instance_list:
if not (instance is comp_instance):
if (comp_instance.running_time <= instance.running_time) and (comp_instance.running_cost <= instance.running_cost):
pareto_add = False
break
if pareto_add:
pareto_list.append(instance)
return pareto_list
linear_instance_list = select_pareto_instances(gen_dag_instances([linear_first, linear_second, linear_third]))
class DAGSelector:
def __init__(self, instance_list, sample_size):
self.price_list = sorted(instance_list, key=lambda x: x.running_cost)
self.time_list = sorted(instance_list, key=lambda x: x.running_time)
self.sample_size = int(max(min(sample_size, len(self.price_list)), 1))
def binary_find_index(self, value, this_list, type):
keys = []
if type == "price":
keys = [this_inst.running_cost for this_inst in this_list]
else:
keys = [this_inst.running_time for this_inst in this_list]
pos = (bisect(keys, value, 0, len(this_list)))
return pos
def get_sample_list(self, price_slo, time_slo):
sample_list = []
price_index = self.binary_find_index(price_slo, self.price_list, "price")
time_index = self.binary_find_index(time_slo, self.time_list, "cost")
if (price_index <= 0) or (time_index <= 0):
return []
end_index = len(self.price_list) - time_index
valid_size = price_index - end_index
if valid_size <= 0:
return []
valid_list = self.price_list[end_index:price_index]
min_size = min(self.sample_size, len(valid_list))
sample_list = sample(valid_list, min_size)
return sample_list
def get_placements(self, cluster, sample_instance):
function_place_map = {}
for res, res_list in list(sample_instance.functions_per_resource.items()):
res_nodes = cluster.nodes_by_res[res]
updated_nodes = []
for func_id, func_mem in res_list:
placed = False
done = False
while (not placed) and (not done):
if len(res_nodes) == 0:
done = True
elif func_mem <= res_nodes[0].memory:
function_place_map[func_id] = res_nodes[0].id
res_nodes[0].memory = res_nodes[0].memory - func_mem
placed = True
else:
popped_node = res_nodes.pop(0)
updated_nodes.append(popped_node)
if done:
break
if len(res_nodes) == 0:
cluster.nodes_by_res[res] = sorted(updated_nodes, key=lambda x: x.memory)
return {}
else:
res_nodes.extend(updated_nodes)
cluster.nodes_by_res[res] = sorted(res_nodes, key=lambda x: x.memory)
return function_place_map
linear_instance_placements = DAGSelector(linear_instance_list, 1) | Python | 322 | 30.667702 | 145 | /workloads/toy/linear_dag.py | 0.654472 | 0.643684 |
davew-msft/MLOps-E2E | refs/heads/master | import json
import numpy
from azureml.core.model import Model
import joblib
def init():
global LGBM_MODEL
# Load the model from file into a global object
model_path = Model.get_model_path(
model_name="driver_model")
LGBM_MODEL = joblib.load(model_path)
def run(raw_data, request_headers):
data = json.loads(raw_data)["data"]
data = numpy.array(data)
result = LGBM_MODEL.predict(data)
# Demonstrate how we can log custom data into the Application Insights
# traces collection.
# The 'X-Ms-Request-id' value is generated internally and can be used to
# correlate a log entry with the Application Insights requests collection.
# The HTTP 'traceparent' header may be set by the caller to implement
# distributed tracing (per the W3C Trace Context proposed specification)
# and can be used to correlate the request to external systems.
print(('{{"RequestId":"{0}", '
'"TraceParent":"{1}", '
'"NumberOfPredictions":{2}}}'
).format(
request_headers.get("X-Ms-Request-Id", ""),
request_headers.get("Traceparent", ""),
len(result)
))
return {"result": result.tolist()}
if __name__ == "__main__":
# Test scoring
init()
TEST_ROW = '{"data":[[0,1,8,1,0,0,1,0,0,0,0,0,0,0,12,1,0,0,0.5,0.3,0.610327781,7,1,-1,0,-1,1,1,1,2,1,65,1,0.316227766,0.669556409,0.352136337,3.464101615,0.1,0.8,0.6,1,1,6,3,6,2,9,1,1,1,12,0,1,1,0,0,1],[4,2,5,1,0,0,0,0,1,0,0,0,0,0,5,1,0,0,0.9,0.5,0.771362431,4,1,-1,0,0,11,1,1,0,1,103,1,0.316227766,0.60632002,0.358329457,2.828427125,0.4,0.5,0.4,3,3,8,4,10,2,7,2,0,3,10,0,0,1,1,0,1]]}' # NOQA: E501
PREDICTION = run(TEST_ROW, {})
print("Test result: ", PREDICTION)
| Python | 44 | 38.93182 | 403 | /Lab12/score.py | 0.610772 | 0.484175 |
davew-msft/MLOps-E2E | refs/heads/master | import argparse
import json
import urllib
import os
import numpy as np
import pandas as pd
import keras
from keras import models
from keras import layers
from keras import optimizers
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Embedding, Flatten, Dense
import azureml.core
from azureml.core import Run
from azureml.core.dataset import Dataset
from azureml.core.datastore import Datastore
from azureml.core.model import Model
print("Executing train.py")
print("As a data scientist, this is where I write my training code.")
print("Azure Machine Learning SDK version: {}".format(azureml.core.VERSION))
#-------------------------------------------------------------------
#
# Processing input arguments
#
#-------------------------------------------------------------------
parser = argparse.ArgumentParser("train")
parser.add_argument("--model_name", type=str, help="model name", dest="model_name", required=True)
parser.add_argument("--build_number", type=str, help="build number", dest="build_number", required=True)
args = parser.parse_args()
print("Argument 1: %s" % args.model_name)
print("Argument 2: %s" % args.build_number)
#-------------------------------------------------------------------
#
# Define internal variables
#
#-------------------------------------------------------------------
datasets_folder = './datasets'
# this is the URL to the CSV file containing the GloVe vectors
glove_url = ('https://quickstartsws9073123377.blob.core.windows.net/'
'azureml-blobstore-0d1c4218-a5f9-418b-bf55-902b65277b85/'
'quickstarts/connected-car-data/glove.6B.100d.txt')
glove_ds_name = 'glove_6B_100d'
glove_ds_description ='GloVe embeddings 6B 100d'
# this is the URL to the CSV file containing the connected car component descriptions
cardata_url = ('https://quickstartsws9073123377.blob.core.windows.net/'
'azureml-blobstore-0d1c4218-a5f9-418b-bf55-902b65277b85/'
'quickstarts/connected-car-data/connected-car_components.csv')
cardata_ds_name = 'connected_car_components'
cardata_ds_description = 'Connected car components data'
embedding_dim = 100
training_samples = 90000
validation_samples = 5000
max_words = 10000
run = Run.get_context()
ws = run.experiment.workspace
ds = Datastore.get_default(ws)
#-------------------------------------------------------------------
#
# Process GloVe embeddings dataset
#
#-------------------------------------------------------------------
# The GloVe embeddings dataset is static so we will only register it once with the workspace
print("Downloading GloVe embeddings...")
try:
glove_ds = Dataset.get_by_name(workspace=ws, name=glove_ds_name)
print('GloVe embeddings dataset already registered.')
except:
print('Registering GloVe embeddings dataset...')
glove_ds = Dataset.File.from_files(glove_url)
glove_ds.register(workspace=ws, name=glove_ds_name, description=glove_ds_description)
print('GloVe embeddings dataset successfully registered.')
file_paths = glove_ds.download(target_path=datasets_folder, overwrite=True)
glove_file_path = file_paths[0]
print("Download complete.")
#-------------------------------------------------------------------
#
# Process connected car components dataset
#
#-------------------------------------------------------------------
print('Processing connected car components dataset...')
# Download the current version of the dataset and save a snapshot in the datastore
# using the build number as the subfolder name
local_cardata_path = '{}/connected-car-components.csv'.format(datasets_folder)
ds_cardata_path = 'datasets/{}'.format(args.build_number)
urllib.request.urlretrieve(cardata_url, local_cardata_path)
ds.upload_files([local_cardata_path], target_path=ds_cardata_path, overwrite=True)
cardata_ds = Dataset.Tabular.from_delimited_files(path=[(ds, 'datasets/{}/connected-car-components.csv'.format(args.build_number))])
# For each run, register a new version of the dataset and tag it with the build number.
# This provides full traceability using a specific Azure DevOps build number.
cardata_ds.register(workspace=ws, name=cardata_ds_name, description=cardata_ds_description,
tags={"build_number": args.build_number}, create_new_version=True)
print('Connected car components dataset successfully registered.')
car_components_df = cardata_ds.to_pandas_dataframe()
components = car_components_df["text"].tolist()
labels = car_components_df["label"].tolist()
print("Processing car components data completed.")
#-------------------------------------------------------------------
#
# Use the Tokenizer from Keras to "learn" a vocabulary from the entire car components text
#
#-------------------------------------------------------------------
print("Tokenizing data...")
tokenizer = Tokenizer(num_words=max_words)
tokenizer.fit_on_texts(components)
sequences = tokenizer.texts_to_sequences(components)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
data = pad_sequences(sequences, maxlen=embedding_dim)
labels = np.asarray(labels)
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
print("Tokenizing data complete.")
#-------------------------------------------------------------------
#
# Create training, validation, and testing data
#
#-------------------------------------------------------------------
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
x_train = data[:training_samples]
y_train = labels[:training_samples]
x_val = data[training_samples: training_samples + validation_samples]
y_val = labels[training_samples: training_samples + validation_samples]
x_test = data[training_samples + validation_samples:]
y_test = labels[training_samples + validation_samples:]
#-------------------------------------------------------------------
#
# Apply the vectors provided by GloVe to create a word embedding matrix
#
#-------------------------------------------------------------------
print("Applying GloVe vectors...")
embeddings_index = {}
f = open(glove_file_path)
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found %s word vectors.' % len(embeddings_index))
embedding_matrix = np.zeros((max_words, embedding_dim))
for word, i in word_index.items():
if i < max_words:
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
print("Applying GloVe vectors completed.")
#-------------------------------------------------------------------
#
# Build and train the model
#
#-------------------------------------------------------------------
# Use Keras to define the structure of the deep neural network
print("Creating model structure...")
model = Sequential()
model.add(Embedding(max_words, embedding_dim, input_length=embedding_dim))
model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.summary()
# fix the weights for the first layer to those provided by the embedding matrix
model.layers[0].set_weights([embedding_matrix])
model.layers[0].trainable = False
print("Creating model structure completed.")
opt = optimizers.RMSprop(lr=0.1)
print("Training model...")
model.compile(optimizer=opt,
loss='binary_crossentropy',
metrics=['acc'])
history = model.fit(x_train, y_train,
epochs=3,
batch_size=32,
validation_data=(x_val, y_val))
print("Training model completed.")
print("Saving model files...")
# create a ./outputs/model folder in the compute target
# files saved in the "./outputs" folder are automatically uploaded into run history
os.makedirs('./outputs/model', exist_ok=True)
# save model
model.save('./outputs/model/model.h5')
print("model saved in ./outputs/model folder")
print("Saving model files completed.")
#-------------------------------------------------------------------
#
# Evaluate the model
#
#-------------------------------------------------------------------
print('Model evaluation will print the following metrics: ', model.metrics_names)
evaluation_metrics = model.evaluate(x_test, y_test)
print(evaluation_metrics)
run = Run.get_context()
run.log(model.metrics_names[0], evaluation_metrics[0], 'Model test data loss')
run.log(model.metrics_names[1], evaluation_metrics[1], 'Model test data accuracy')
#-------------------------------------------------------------------
#
# Register the model the model
#
#-------------------------------------------------------------------
os.chdir("./outputs/model")
# The registered model references the data set used to provide its training data
model_description = 'Deep learning model to classify the descriptions of car components as compliant or non-compliant.'
model = Model.register(
model_path='model.h5', # this points to a local file
model_name=args.model_name, # this is the name the model is registered as
tags={"type": "classification", "run_id": run.id, "build_number": args.build_number},
description=model_description,
workspace=run.experiment.workspace,
datasets=[('training data', cardata_ds), ('embedding data', glove_ds)])
print("Model registered: {} \nModel Description: {} \nModel Version: {}".format(model.name,
model.description, model.version))
| Python | 278 | 34.377697 | 132 | /scripts/train.py | 0.622471 | 0.610168 |
StrikerEureka/DLL | refs/heads/master | class Node :
def __init__(self, data) :
self.data = data
self.next = None
self.prev = None
class doublelinkedlist(object) :
def __init__(self) :
self.head = None
self.tail = None
def tambahbelakang(self, data) :
if self.head is None :
new_node = Node(data)
new_node.prev = None
self.head = new_node
else :
new_node = Node(data)
current_node = self.head
while current_node.next is not None :
current_node = current_node.next
current_node.next = new_node
new_node.prev = current_node
new_node.next = None
self.tail = new_node
print("Data ditambahkan.")
print("")
def tambahdepan(self, data) :
if self.head is None :
new_node = Node(data)
new_node.prev = None
self.head = new_node
else :
new_node = Node(data)
self.head.prev = new_node
new_node.next = self.head
self.head = new_node
new_node.prev = None
print("Data ditambahkan.")
print("")
def tambahsetelah(self, key, data) :
current_node = self.head
while current_node is not None :
if current_node.next is None and current_node.data == key :
self.tambahbelakang(data)
return
elif current_node.data == key :
new_node = Node(data)
nxt = current_node.next
current_node.next = new_node
new_node.next = nxt
new_node.prev = current_node
nxt.prev = new_node
current_node = current_node.next
print("Data ditambahkan.")
print("")
def tambahsebelum(self, key, data) :
current_node = self.head
while current_node is not None :
if current_node.prev is None and current_node.data == key :
self.tambahdepan(data)
return
elif current_node.data == key :
new_node = Node(data)
prev = current_node.prev
prev.next = new_node
current_node.prev = new_node
new_node.next = current_node
new_node.prev = prev
current_node = current_node.next
print("Data ditambahkan.")
print("")
def hapusdepan(self) :
if self.head is None :
print ("Data masih kosong.")
else :
if self.head.next is not None :
self.head.next.prev = None
self.head = self.head.next
print("Data dihapus.")
print("")
def hapusbelakang(self) :
if self.tail is None :
print ("Data masih kosong.")
else :
if self.tail.prev is not None :
self.tail.prev.next = None
self.tail = self.tail.prev
return
print("Data dihapus.")
print("")
def hapustarget (self, data) :
if self.head is None :
print ("Data masih kosong.")
return
current_node = self.head
while current_node.data is not data and current_node.next is not None :
current_node = current_node.next
if current_node.data is not data :
print ("Data tidak ditemukan.")
return
if current_node.prev is not None :
current_node.prev.next = current_node.next
else :
self.head = current_node.next
if current_node.next is not None :
current_node.next.prev = current_node.prev
else :
self.tail = current_node.prev
print("Data dihapus.")
print("")
def tampil(self) :
print("Data : ")
print("")
current_node = self.head
while current_node is not None :
print (current_node.data, end=" -> ")
current_node = current_node.next
def tampilreverse(self) :
current_node = self.tail
while current_node is not None :
print (current_node.data, end=", ")
current_node = current_node.prev
def menuUmum(self):
pilih = "y"
while ((pilih == "y") or (pilih == "Y")):
# os.system('clear')
print('Pilih menu yang anda inginkan')
print('==============================')
print('1. Tambah data di belakang')
print('2. Tambah data di depan')
print('3. Tambah data setelah data')
print('4. Tambah data sebelum data')
print('5. Hapus data di depan')
print('6. Hapus data di belakang')
print('7. Hapus data pilihan')
print('8. Tampilkan data')
pilihan = str(input("Masukkan Menu yang anda pilih : "))
if(pilihan == "1"):
node = str(input("Masukkan data : "))
self.tambahbelakang(node)
elif(pilihan == "2"):
node = str(input("Masukkan data : "))
self.tambahdepan(node)
elif(pilihan == "3"):
node = str(input("Masukkan data : "))
node2 = str(input("Masukkan setelah : "))
self.tambahsetelah(node2, node)
elif(pilihan == "4"):
node = str(input("Masukkan data : "))
node2 = str(input("Masukkan sebelum : "))
self.tambahsebelum(node2, node)
elif(pilihan == "5"):
self.hapusdepan()
elif(pilihan == "6"):
self.hapusbelakang()
elif(pilihan == "7"):
node = str(input("Masukkan data yang ingin dihapus : "))
self.hapustarget(node)
elif(pilihan == "8"):
self.tampil()
x = input("")
else :
pilih ="n"
if __name__ == "__main__" :
d = doublelinkedlist()
d.menuUmum()
| Python | 186 | 31.903225 | 79 | /Double Linked List.py | 0.487908 | 0.484641 |
jDiazPrieto/real_estate_website | refs/heads/master | # A module is basically a file containing a set of functions to include in your application. There are core python modules, modules you can install using the pip package manager (including Django) as well as custom modules
import datetime
import time
import camelcase
import validator
today = datetime.date.today()
print(today)
print(time.time())
camel = camelcase.CamelCase()
print(camel.hump("camelCASE"))
email = "testtest.com"
if validator.validate_email(email):
print("email is good")
else:
print("emal is fucked up") | Python | 19 | 27.157894 | 222 | /python_sandbox_starter/modules.py | 0.765918 | 0.765918 |
Mou97/safeSpace | refs/heads/master | import time
import torch
import numpy as np
import matplotlib.pyplot as plt
import torch.optim as optim
import torch.nn as nn
from collections import OrderedDict
from PIL import Image
import seaborn as sns
import numpy as np
import pandas as pd
import json
# %%
import torch.nn as nn
class SentimentRNN(nn.Module):
def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, drop_prob=0.5):
super(SentimentRNN, self).__init__()
self.output_size = output_size
self.n_layers = n_layers
self.hidden_dim = hidden_dim
# embedding and LSTM layers
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers,
dropout=drop_prob, batch_first=True)
# dropout layer
self.dropout = nn.Dropout(0.3)
# linear and sigmoid layers
self.fc = nn.Linear(hidden_dim, output_size)
self.sig = nn.Sigmoid()
def forward(self, x, hidden):
"""
Perform a forward pass of our model on some input and hidden state.
"""
batch_size = x.size(0)
# embeddings and lstm_out
x = x.long()
embeds = self.embedding(x)
lstm_out, hidden = self.lstm(embeds, hidden)
# stack up lstm outputs
lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim)
# dropout and fully-connected layer
out = self.dropout(lstm_out)
out = self.fc(out)
# sigmoid function
sig_out = self.sig(out)
# reshape to be batch_size first
sig_out = sig_out.view(batch_size, -1)
sig_out = sig_out[:, -1] # get last batch of labels
# return last sigmoid output and hidden state
return sig_out, hidden
def init_hidden(self, batch_size):
''' Initializes hidden state '''
# Create two new tensors with sizes n_layers x batch_size x hidden_dim,
# initialized to zero, for hidden state and cell state of LSTM
weight = next(self.parameters()).data
hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(),
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_())
return hidden
# %%
checkpoint = torch.load('model_devfest_2019.json', map_location=lambda storage, loc: storage)
vocab_to_int = json.load( open( "vocab_to_int.json" ) )
# %%
net = SentimentRNN(7366, 1, 800, 300, 2)
net.load_state_dict(checkpoint)
net.eval()
# %%
from string import punctuation
def pad_features(reviews_ints, seq_length):
features = np.zeros((len(reviews_ints), seq_length), dtype=int)
for i, row in enumerate(reviews_ints):
features[i, -len(row):] = np.array(row)[:seq_length]
return features
def tokenize_review(test_review):
test_review = test_review.lower() # lowercase
# get rid of punctuation
test_text = ''.join([c for c in test_review if c not in punctuation])
# splitting by spaces
test_words = test_text.split()
# tokens
test_ints = []
test_ints.append([vocab_to_int[word] for word in test_words])
return test_ints
def predict(net, test_review, sequence_length=200):
net.eval()
test_ints = tokenize_review(test_review)
seq_length=sequence_length
features = pad_features(test_ints, seq_length)
feature_tensor = torch.from_numpy(features)
batch_size = feature_tensor.size(0)
h = net.init_hidden(batch_size)
output, h = net(feature_tensor, h)
pred = torch.round(output.squeeze())
if(pred.item()==1):
return {"no hate detected!",output.squeeze().item()}
else:
return {"Hate speech detected.",output.squeeze().item()}
def getOutput(model,speech,seq_length):
test_ints = tokenize_review(speech)
features = pad_features(test_ints, seq_length)
feature_tensor = torch.from_numpy(features)
return predict(model,speech,seq_length)
# %%
speech = "please kill your self"
cls, probToNoHate =getOutput(net,speech,200)
print(cls)
print(probToNoHate)
| Python | 140 | 28.707144 | 100 | /source/forDeployment/script.py | 0.628606 | 0.620913 |
faraoman/MachineLearning | refs/heads/master | # -*- coding: utf-8 -*-
"""
Created on Sun May 27 15:06:16 2018
@author: jyoti
"""
import numpy as np
import matplotlib.pyplot as plt
N = 100
D = 2
X = np.random.randn(N, D)
X[:50, :] = X[:50, :] - 2*np.ones((50, D)) #centered at -2
X[50:, :] = X[50:, :] + 2*np.ones((50, D)) #centered at +2
T = np.array([0]*50 + [1]*50) #setting forst 50 elements of array to 0 and next 50 to 1
ones = np.array([[1]*N]).T
Xb = np.concatenate((ones, X), axis = 1)
w = np.random.randn(D + 1)
def sigmoid(a):
return 1/(1 + np.exp(-a))
Y = sigmoid(Xb.dot(w))
def crossEntropyErrorFunction(T, Y):
E = 0
for i in range(N):
if T[i] == 1:
E -= np.log(Y[i])
else:
E -= np.log(1 - Y[i])
return E
learning_rate = 0.1
for i in range(100):
if i % 10 == 0:
print(crossEntropyErrorFunction(T, Y))
w += learning_rate*Xb.T.dot(T - Y)
Y = sigmoid(Xb.dot(w))
print("Final weight, w: ", w) | Python | 47 | 19.340425 | 87 | /LogisticRegression/LogisticRegressionWithGradientDescent.py | 0.53822 | 0.474346 |
faraoman/MachineLearning | refs/heads/master |
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 9 13:01:51 2018
@author: jyoti
"""
import numpy as np
import matplotlib.pyplot as plt
from util import getData
labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
def main():
X, Y = getData(balance_ones = False)
while(True):
for i in range(7):
x, y = X[Y == i], Y[Y == i]
N = len(y)
j = np.random.choice(N)
plt.imshow(x[j].reshape(48, 48), cmap = 'gray')
plt.title(labels[y[j]])
plt.show()
prompt = input("Quit the program? Y/N\n")
if prompt == 'Y':
break
if __name__ == '__main__':
main()
| Python | 31 | 21.645161 | 76 | /Projects/FacialExpressionRecognition/show_images.py | 0.482566 | 0.458856 |
faraoman/MachineLearning | refs/heads/master | # -*- coding: utf-8 -*-
"""
Created on Mon May 28 10:59:55 2018
@author: j.dixit
"""
import numpy as np
import matplotlib.pyplot as plt
N = 100
D = 2
X = np.random.randn(N, D)
X[:50, :] = X[:50, :] - 2*np.ones((50, D)) #centered at -2
X[50:, :] = X[50:, :] + 2*np.ones((50, D)) #centered at +2
T = np.array([0]*50 + [1]*50) #setting forst 50 elements of array to 0 and next 50 to 1
ones = np.array([[1]*N]).T
Xb = np.concatenate((ones, X), axis = 1)
w = np.random.randn(D + 1)
Z = Xb.dot(w)
def sigmoid(a):
return 1/(1 + np.exp(-a))
#def forward(X, w, b):
# return sigmoid(X.dot(w) + b)
Y = sigmoid(Z)
def crossEntropyErrorFunction(T, Y):
E = 0
for i in range(N):
if T[i] == 1:
E -= np.log(Y[i])
else:
E -= np.log(1 - Y[i])
return E
crossEntropyError = crossEntropyErrorFunction(T, Y)
print("With random/normally distributed weights: ",crossEntropyError)
learning_rate = 0.1
L2 = 0.1
for i in range(100):
if i % 10 == 0:
print(crossEntropyErrorFunction(T, Y))
w += learning_rate*(np.dot((T-Y).T, Xb) - L2*w)
Y = sigmoid(Xb.dot(w))
print("Final w: ", w)
| Python | 54 | 20.314816 | 87 | /LogisticRegression/L2regularisation.py | 0.567708 | 0.511285 |
faraoman/MachineLearning | refs/heads/master | # -*- coding: utf-8 -*-
"""
Created on Mon May 28 16:22:16 2018
@author: j.dixit
"""
import numpy as np
import matplotlib.pyplot as plt
N = 4
D = 2
X = np.array([
[0, 0],
[0, 1],
[1, 0],
[1, 1]
])
T = np.array([0, 1, 1, 0])
ones = np.array([[1]*N]).T
#plt.scatter(X[:, 0], X[:, 1], c=T)
#plt.show()
xy = np.matrix(X[:, 0]*X[:, 1]).T
Xb = np.array(np.concatenate((ones, xy, X), axis = 1))
w = np.random.rand(D + 2)
z = Xb.dot(w)
def sigmoid(z):
return 1/(1 + np.exp(-z))
Y = sigmoid(z)
def cross_entropy(T, Y):
E = 0
for i in range(N):
if T[i] == 1:
E -= np.log(Y[i])
else:
E -= np.log(1-np.log(Y[i]))
return E
learning_rate = 0.0001
error = []
for i in range(5000):
e = cross_entropy(T, Y)
error.append(e)
if i % 100 == 0:
print(e)
w += learning_rate*(np.dot((T-Y).T, Xb) - 0.01*w)
Y = sigmoid(Xb.dot(w))
plt.plot(error)
plt.title("Cross-entropy")
print("Final w: ", w)
print("Final classification rate", 1-np.abs(T-np.round(Y)).sum()/N)
| Python | 64 | 16.109375 | 67 | /LogisticRegression/XOR.py | 0.49589 | 0.444749 |
faraoman/MachineLearning | refs/heads/master | # -*- coding: utf-8 -*-
"""
Created on Tue May 29 22:07:08 2018
@author: jyoti
"""
import numpy as np #importing the numpy package with alias np
import matplotlib.pyplot as plt #importing the matplotlib.pyplot as plt
N = 50
D = 50
X = (np.random.random((N, D))-0.5)*10
w_dash = np.array([1, 0.5, -0.5] + [0]*(D-3))
Y = X.dot(w_dash) + np.random.randn(N)*0.5
Y[-1]+=30 #setting last element of Y as Y + 30
Y[-2]+=30 #setting second last element of Y as Y + 30
plt.scatter(X, Y)
plt.title('Relationship between Y and X[:, 1]')
plt.xlabel('X[:, 1]')
plt.ylabel('Y')
plt.show()
X = np.vstack([np.ones(N), X]).T #appending bias data points colummn to X
w_ml = np.linalg.solve(np.dot(X.T, X), np.dot(X.T, Y)) #finding weights for maximum likelihood estimation
Y_ml = np.dot(X, w_ml)
plt.scatter(X[:,1], Y)
plt.plot(X[:,1],Y_ml, color='red')
plt.title('Graph of maximum likelihood method(Red line: predictions)')
plt.xlabel('X[:, 1]')
plt.ylabel('Y')
plt.show()
costs = []
w = np.random.randn(D)/np.sqrt(D)
L1_coeff = 5
learning_rate = 0.001
for t in range(500):
Yhat = X.dot(w)
delta = Yhat - Y
w = w - learning_rate*(X.T.dot(delta) + L1_coeff*np.sign(w))
meanSquareError = delta.dot(delta)/N
costs.append(meanSquareError)
w_map = w
Y_map = X.dot(w_map)
plt.scatter(X[:,1], Y)
plt.plot(X[:,1],Y_ml, color='red',label="maximum likelihood")
plt.plot(X[:,1],Y_map, color='green', label="map")
plt.title('Graph of MAP v/s ML method')
plt.legend()
plt.xlabel('X[:, 1]')
plt.ylabel('Y')
plt.show() | Python | 60 | 25.6 | 109 | /LinearRegression/L1reg.py | 0.608777 | 0.571787 |
faraoman/MachineLearning | refs/heads/master | # -*- coding: utf-8 -*-
"""
Created on Sat May 26 19:13:44 2018
@author: jyoti
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
def get_data():
df = pd.read_csv("ecommerce_data.csv")
data = df.as_matrix()
X = data[:, :-1]
Y = data[:, -1]
X = np.array(X)
Y = np.array(Y)
X[:, 1] = (X[:, 1]-X[:, 1].mean())/X[:, 1].std()
X[:, 2] = (X[:, 2]-X[:, 2].mean())/X[:, 2].std()
N, D = X.shape
X2 = np.zeros((N, D+3))
X2[:, 0: D-2] = X[:, 0: D-2]
for n in range(N):
t = int(X[n, D-1])
X2[n, t+(D-1)] = 1
Z = np.zeros((N, 4))
Z[np.arange(N), X[:, D-1].astype(np.int32)] = 1
#X2[:, -4:] = Z
assert(np.abs(X2[:, -4:]- Z).sum() < 10e-10)
return X2, Y
def get_binary_data():
X, Y = get_data()
X2 = X[Y <= 1]
Y2 = Y[Y <= 1]
return X2, Y2
X, Y = get_binary_data()
D = X.shape[1]
W = np.random.randn(D)
b = 0
def sigmoid(a):
return 1/(1 + np.exp(-a))
def forward(x, w, b):
return sigmoid(x.dot(w) + b)
P_Y_Given_X = forward(X, W, b)
predictions = np.round(P_Y_Given_X)
def classification_rate(Y, P):
return np.mean(Y == P)
print("Score: ", classification_rate(Y, predictions))
| Python | 61 | 19.721312 | 53 | /LogisticRegression/predict_logistic.py | 0.504348 | 0.458498 |
faraoman/MachineLearning | refs/heads/master | # -*- coding: utf-8 -*-
"""
Created on Sun May 27 13:33:29 2018
@author: jyoti
"""
import numpy as np
import matplotlib.pyplot as plt
N = 100
D = 2
X = np.random.randn(N, D)
X[:50, :] = X[:50, :] - 2*np.ones((50, D)) #centered at -2
X[50:, :] = X[50:, :] + 2*np.ones((50, D)) #centered at +2
T = np.array([0]*50 + [1]*50) #setting forst 50 elements of array to 0 and next 50 to 1
ones = np.array([[1]*N]).T
Xb = np.concatenate((ones, X), axis = 1)
w = np.random.randn(D + 1)
Z = Xb.dot(w)
def sigmoid(a):
return 1/(1 + np.exp(-a))
#def forward(X, w, b):
# return sigmoid(X.dot(w) + b)
Y = sigmoid(Z)
def crossEntropyErrorFunction(T, Y):
E = 0
for i in range(N):
if T[i] == 1:
E -= np.log(Y[i])
else:
E -= np.log(1 - Y[i])
return E
crossEntropyError = crossEntropyErrorFunction(T, Y)
print("With random/normally distributed weights: ",crossEntropyError)
w = np.array([0, 4, 4])
Z = Xb.dot(w)
Y = sigmoid(Z)
crossEntropyError = crossEntropyErrorFunction(T, Y)
print("With calculated weights/closed form solution: ",crossEntropyError)
plt.scatter(X[:, 0], X[:, 1], c = T, s = 100, alpha = 0.5)
plt.title("Two Gaussian clouds and the discriminating line")
x_axis = np.linspace(-6, 6, 100)
y_axis = -x_axis
plt.plot(x_axis, y_axis)
plt.show() | Python | 58 | 21.620689 | 87 | /LogisticRegression/CrossEntropyErrorFunction.py | 0.604882 | 0.553013 |
faraoman/MachineLearning | refs/heads/master | # -*- coding: utf-8 -*-
"""
Created on Sun May 27 15:21:54 2018
@author: jyoti
"""
# -*- coding: utf-8 -*-
"""
Created on Sat May 26 19:13:44 2018
@author: jyoti
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
def get_data():
df = pd.read_csv("ecommerce_data.csv")
data = df.as_matrix()
X = data[:, :-1]
Y = data[:, -1]
X = np.array(X)
Y = np.array(Y)
X[:, 1] = (X[:, 1]-X[:, 1].mean())/X[:, 1].std()
X[:, 2] = (X[:, 2]-X[:, 2].mean())/X[:, 2].std()
N, D = X.shape
X2 = np.zeros((N, D+3))
X2[:, 0: D-2] = X[:, 0: D-2]
for n in range(N):
t = int(X[n, D-1])
X2[n, t+(D-1)] = 1
Z = np.zeros((N, 4))
Z[np.arange(N), X[:, D-1].astype(np.int32)] = 1
#X2[:, -4:] = Z
assert(np.abs(X2[:, -4:]- Z).sum() < 10e-10)
return X2, Y
def get_binary_data():
X, Y = get_data()
X2 = X[Y <= 1]
Y2 = Y[Y <= 1]
return X2, Y2
X, Y = get_binary_data()
X, Y = shuffle(X, Y)
X_train = X[:-100]
Y_train = Y[:-100]
X_test = X[-100:]
Y_test = Y[-100:]
D = X.shape[1]
N = X.shape[0]
w = np.random.randn(D)
b = 0
def sigmoid(a):
return 1/(1 + np.exp(-a))
def forward(x, w, b):
return sigmoid(x.dot(w) + b)
def classification_rate(Y, P):
return np.mean(Y == P)
def crossEntropyErrorFunction(T, Y):
return -np.mean(T*np.log(Y) + (1 - T)*np.log(1 - Y))
train_costs = []
test_costs = []
learning_rate = 0.001
for i in range(10000):
pY_train = forward(X_train, w, b)
pY_test = forward(X_test, w, b)
ctrain = crossEntropyErrorFunction(Y_train, pY_train)
ctest = crossEntropyErrorFunction(Y_test, pY_test)
train_costs.append(ctrain)
test_costs.append(ctest)
w -= learning_rate*X_train.T.dot(pY_train - Y_train)
b -= learning_rate*(pY_train - Y_train).sum()
if i % 1000 == 0:
print(i, ctrain, ctest)
print("Final training classification rate: ", classification_rate(Y_train, np.round(pY_train)))
print("Final test classification rate: ", classification_rate(Y_test, np.round(pY_test)))
legend1, = plt.plot(train_costs, label="train cost")
legend2, = plt.plot(test_costs, label="test cost")
plt.legend([legend1, legend2])
plt.show() | Python | 101 | 21.514851 | 95 | /LogisticRegression/EcommerceProject.py | 0.560053 | 0.514298 |
faraoman/MachineLearning | refs/heads/master | # -*- coding: utf-8 -*-
"""
Created on Sun Jun 10 17:55:24 2018
@author: jyoti
"""
from __future__ import division, print_function
from builtins import range
import numpy as np
import matplotlib.pyplot as plt
class LinearRegression(object):
def __init__(self):
pass
def fit(self, X, Y, eta=10, epochs=2000):
N, D = X.shape
self.w = np.random.randn(D)
#self.b = 0
for i in range(epochs):
Yhat = self.predict(X)
delta = Yhat - Y #the error between predicted output and actual output
self.w = self.w - eta*(X.T.dot(delta)) #performing gradient descent for w
print("Final weights are ", self.w)
#print("Final bias point is ", self.b)
print("Final cost is ", self.costs)
def predict(self, X):
Y_cap = X.dot(self.w)
return Y_cap
def costs(self, X, Y):
Yhat = self.predict(X)
cost = (Yhat-Y).dot(Yhat-Y)
return cost
def main():
X = []
Y = []
for line in open("data_2d.csv"):
x1, x2, y = line.split(",")
X.append([float(x1), float(x2)])
Y.append(float(y))
X = np.array(X)
Y = np.array(Y)
model = LinearRegression()
model.fit(X, Y)
#prediction = model.predict()
if __name__ == '__main__':
main()
| Python | 63 | 21.492064 | 85 | /LinearRegression/TemplateCode.py | 0.505563 | 0.488178 |
faraoman/MachineLearning | refs/heads/master | # -*- coding: utf-8 -*-
"""
Created on Tue May 29 21:54:38 2018
@author: jyoti
"""
from __future__ import print_function, division
from builtins import range
import numpy as np # importing numpy with alias np
import matplotlib.pyplot as plt # importing matplotlib.pyplot with alias plt
No_of_observations = 50
No_of_Dimensions = 50
X_input = (np.random.random((No_of_observations, No_of_Dimensions))-0.5)*10 #Generating 50x50 matrix forX with random values centered round 0.5
w_dash = np.array([1, 0.5, -0.5] + [0]*(No_of_Dimensions-3)) # Making first 3 features significant by setting w for them as non-zero and others zero
Y_output = X_input.dot(w_dash) + np.random.randn(No_of_observations)*0.5 #Setting Y = X.w + some random noise
costs = [] #Setting empty list for costs
w = np.random.randn(No_of_Dimensions)/np.sqrt(No_of_Dimensions) #Setting w to random values
L1_coeff = 5
learning_rate = 0.001
for t in range(500):
Yhat = X_input.dot(w)
delta = Yhat - Y_output #the error between predicted output and actual output
w = w - learning_rate*(X_input.T.dot(delta) + L1_coeff*np.sign(w)) #performing gradient descent for w
meanSquareError = delta.dot(delta)/No_of_observations #Finding mean square error
costs.append(meanSquareError) #Appending mse for each iteration in costs list
plt.plot(costs)
plt.title("Plot of costs of L1 Regularization")
plt.ylabel("Costs")
plt.show()
print("final w:", w) #The final w output. As you can see, first 3 w's are significant , the rest are very small
# plot our w vs true w
plt.plot(w_dash, label='true w')
plt.plot(w, label='w_map')
plt.legend()
plt.show() | Python | 43 | 37.255814 | 149 | /LinearRegression/L1regularisation.py | 0.711679 | 0.681874 |
jananijaan12000/CIP_Batch21 | refs/heads/main | def output_lable(n):
if n == 1:
return "Offensive "
elif n ==0:
return "Not Offensive "
def manual_testing(news):
testing_news = {"text":[news]}
new_def_test = pd.DataFrame(testing_news)
new_def_test["text"] = new_def_test["text"]
new_x_test = new_def_test["text"]
new_xv_test = tfidf_vect.transform(new_x_test)
pred_sgdc = model.predict(new_xv_test)
return pred_sgdc
words=news.split()
words2 =[]
for x in words:
res=manual_testing(x)
if res == 1:
words2.append('****')
else:
words2.append(x)
s=' '.join(words2)
return s | Python | 26 | 20.807692 | 48 | /Chat_App/chat/jjj.py | 0.598662 | 0.586957 |
ddward/ansible | refs/heads/master | from db import insert, exists, select_one, update
from werkzeug.security import check_password_hash, generate_password_hash
import logging
import traceback
def create_user(username,password):
try:
formattedUsername = format_username(username)
hashedPassword = generate_password_hash(password)
insert( 'user', ('username', 'password'), (formattedUsername, hashedPassword))
except Exception as e:
logging.error(traceback.format_exc())
def user_exists(username):
try:
formattedUsername = format_username(username)
return exists('user','username',formattedUsername)
except Exception as e:
logging.error(traceback.format_exc())
print("User existence check failed")
def get_user(username):
try:
formattedUsername = format_username(username)
return select_one('user',('username','password'), 'username',formattedUsername)
except Exception as e:
logging.error(traceback.format_exc())
print("Failed to get user")
def update_user(username,password,new_password):
try:
formattedUsername = format_username(username)
user = get_user(formattedUsername)
user_password = user[1]
if(user is not None):
if(check_password_hash(user_password,password)):
newHashedPassword = generate_password_hash(new_password)
update('user',{'password':newHashedPassword},'username',formattedUsername)
except:
logging.error(traceback.format_exc())
def gen_default_user():
while(True):
password = getpass(prompt='Create a password, at least 8 characters: ')
password2 = getpass(prompt='Confirm password: ')
if password == password2:
if len(password) < 8:
print('Password must be at least 8 characters.')
else:
break
else:
print('Passwords do not match')
try:
create_user('default',password)
except:
logging.error(traceback.format_exc())
def format_username(username):
return username.lower() | Python | 62 | 32.854839 | 87 | /user.py | 0.654909 | 0.65205 |
ddward/ansible | refs/heads/master | import re
def sanitize(path):
# escape nasty double-dots
path = re.sub(r'\.\.', '', path)
# then remove any duplicate slashes
path = re.sub(r'(/)\1+', r'\1', path)
# then remove any leading slashes and dots
while(path and (path[0] == '/' or path[0] == '.')):
path = path[1:]
return path | Python | 11 | 28.454546 | 55 | /sanitize_path.py | 0.560372 | 0.544892 |
ddward/ansible | refs/heads/master | from bs4 import BeautifulSoup
import getpass
import requests
import os
def pTest(attack_string, attack_url, password):
payload = {'password': password}
with requests.Session() as s:
p = s.post(attack_url + 'login', data=payload)
r = requests.Request('GET', attack_url)
prepared = s.prepare_request(r)
prepared.url += attack_string
response = s.send(prepared)
print('Sending request with url:', prepared.url)
#print('Request successful:', response.ok)
if response.ok:
soup = BeautifulSoup(response.text, 'html.parser')
safeResponse = s.get(attack_url)
soup2 = BeautifulSoup(safeResponse.text, 'html.parser')
if (response.text == safeResponse.text):
print("Attack Failed - Attack Led to Top Directory")
else:
print("Attack may have succeded")
print("Attack response tags:")
for link in soup.find_all('a'):
print(link.get('href'))
print('')
print('Safe Output')
print('')
for link in soup2.find_all('a'):
print(link.get('href'))
else:
print('Attack Failed - No Such Directory')
def pWrap(attack_string):
pTest(attack_string=attack_string, attack_url=ATTACK_URL, password=PASSWORD)
PASSWORD = os.getenv('PWRD')
ATTACK_URL ='http://127.0.0.1:5050/'
ATTACK_STRINGS = [
'../../../..',
'test/../.././.../',
'..',
'level1/../..',
'level1/../../',
'pwd'
]
if __name__ == '__main__':
if not PASSWORD:
PASSWORD = print('First set environment variable PWRD. (export PWRD=YOUR_PASSWORD)')
else:
for attack in ATTACK_STRINGS:
pWrap(attack)
| Python | 58 | 30.017241 | 92 | /penetrationTesting.py | 0.560311 | 0.54975 |
ddward/ansible | refs/heads/master | # build_dir.py
import os
def build_dir(curPath):
directoryDict = {}
with os.scandir(curPath) as directory:
for entry in directory:
#dont include shortcuts and hidden files
if not entry.name.startswith('.'):
#stat dict reference:
#https://docs.python.org/2/library/stat.html
fileStats = entry.stat()
directoryDict[entry.name] = {"is_dir" : entry.is_dir(),
"size" : fileStats.st_size}
return directoryDict | Python | 16 | 33.875 | 71 | /build_dir.py | 0.540395 | 0.5386 |
ddward/ansible | refs/heads/master | from getpass import getpass
import os
import sqlite3
from werkzeug.security import generate_password_hash
from flask import g
import traceback
import logging
path = os.getcwd()
DATABASE = os.path.join(path, 'ansible.db')
def init_db():
with app.app_context():
db = sqlite3.connect(DATABASE)
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
def get_db(app):
with app.app_context():
if 'db' not in g:
g.db = sqlite3.connect(
DATABASE,
detect_types=sqlite3.PARSE_DECLTYPES
)
g.db.row_factory = sqlite3.Row
return g.db
def insert(table,columnTuple,valueTuple):
try:
dbConnection = sqlite3.connect(DATABASE)
columnTupleString = ', '.join(columnTuple)
dbConnection.execute(
'INSERT INTO ' + table + ' (' + columnTupleString + ') VALUES (?, ?)',
(valueTuple)
)
dbConnection.commit()
except Exception as e:
logging.error(traceback.format_exc())
def select_one(table, return_columns, query_column, value):
try:
dbConnection = sqlite3.connect(DATABASE)
result = (dbConnection.execute(
'SELECT ' + ', '.join(return_columns) + ' FROM ' + table + ' WHERE ' + query_column + '= (?) Limit 1',
(value,)
).fetchone())
return result
except Exception as e:
logging.error(traceback.format_exc())
print("User existence check failed")
def exists(table,column,value):
try:
dbConnection = sqlite3.connect(DATABASE)
result = dbConnection.execute(
'SELECT CASE WHEN EXISTS( SELECT 1 FROM ' + table + ' WHERE ' + column + '= (?)) THEN 1 ELSE 0 END',
(value,)
).fetchone()
if result[0] == 1:
return True
else:
return False
except Exception as e:
logging.error(traceback.format_exc())
def update(table, update_dict, query_column, query_value):
try:
dbConnection = sqlite3.connect(DATABASE)
result = (dbConnection.execute(
'UPDATE ' + table + ' SET ' + build_set_statement(update_dict) + ' WHERE ' + query_column + '= (?)',
(query_value,)
).fetchone())
dbConnection.commit()
return result
except Exception as e:
logging.error(traceback.format_exc())
def build_set_statement(updated_field_dict):
setItems = []
for field in updated_field_dict:
setItems.append(field + ' = \'' + updated_field_dict[field] + '\'')
setFields = ', '.join(setItems)
return setFields
| Python | 86 | 30.023256 | 111 | /db.py | 0.590109 | 0.584489 |
ddward/ansible | refs/heads/master | from cryptography.fernet import Fernet
import datetime
from flask import (flash, Flask, g, Markup, redirect, render_template, request,
send_from_directory, session, url_for)
import functools
import logging
import os
from secrets import token_urlsafe
import sqlite3
import sys
from werkzeug.utils import secure_filename
from werkzeug.security import check_password_hash, generate_password_hash
from build_dir import build_dir
import sanitize_path
from db import get_db
from user import create_user, user_exists, gen_default_user, get_user, update_user
import html
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
app = Flask(__name__)
app.config["SECRET_KEY"] = os.urandom(256) # TODO: change to environemnt variable
app.config["CRYPTO_KEY"] = Fernet.generate_key() # TODO put this somewhere where it wont update often possibly environmnet analize impact of changing.
path = os.getcwd()
database = os.path.join(path, 'ansible.db')
db = get_db(app)
def login_required(view):
@functools.wraps(view)
def wrapped_view(**kwargs):
if 'authenticated' not in session:
return redirect(url_for('login'))
return view(**kwargs)
return wrapped_view
@app.route('/', defaults={'loc': ""}, methods=('GET',))
@app.route('/<path:loc>', methods=('GET',))
@login_required
def ansible(loc):
logging.debug('made it here')
sanitize_path.sanitize(loc)
# TODO: if loc is empty return the home directory for the node
# possible security concern - could ask for a higher level node
# TODO: for future addition of link sending - store encrypted version
# of top level directory in session can possibly use a werkzeug module
# TODO: check if input is an encrypted link (use a /share/ or something to indicate)
# TODO: process encrypted link
# TODO: process a normal link
# TODO: get the the home directory
# TODO: authenticate the requested directory
logging.debug(loc)
currentDir = os.path.join('cloud-drive', loc) #update to be maliable for sharing
currentPath = os.path.join(path, currentDir)
logging.debug(os.path.splitext(currentPath)[1])
logging.debug(currentDir)
logging.debug(path)
logging.debug(currentPath)
logging.debug(loc)
fileExtension = os.path.splitext(currentPath)[1]
if fileExtension:
splitUrl = currentPath.rsplit('/', 1)
localDir = splitUrl[0]
filename = splitUrl[1]
absPath = os.path.join(path, 'cloud-drive', localDir)
return send_from_directory(directory=absPath, filename=filename)
directoryDict = build_dir(currentPath)
return render_template('index-alt.html', directory=directoryDict, curDir=loc)
@app.route("/login", methods=('GET', 'POST'))
def login():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
error = None
user = get_user(username)
if user is not None:
user_password = user[1]
if not check_password_hash(user_password, password):
error = 'Incorrect password, please try again.'
else:
error = 'User not found'
if error is None:
session.clear()
session['authenticated'] = 'true'
session['user_id'] = token_urlsafe()
return redirect(url_for('ansible'))
flash(error)
return render_template('login.html')
@app.route("/signup", methods=('GET','POST'))
def signup():
if request.method == 'POST':
username = request.form['name']
password = request.form['password']
error = None
if not user_exists(username):
create_user(username,password)
else:
error = 'Username already exists.'
if error is None:
return redirect(url_for('login'))
flash(error)
return render_template('signup.html')
@app.route("/updatepassword", methods=('GET','POST'))
def update_password():
if request.method == 'POST':
username = request.form['username']
prev_password = request.form['password']
new_password = request.form['new_password']
verified_new_password = request.form['verify_new_password']
error = None
if(new_password == verified_new_password):
if user_exists(username):
update_user(username,prev_password,new_password)
else:
error = 'User doesnt exist.'
else:
error = 'Passwords do not match'
if error is None:
return redirect(url_for('login'))
flash(error)
return render_template('update-password.html')
@app.route("/logout", methods=('GET',))
def logout():
del session['authenticated']
return redirect(url_for('login'))
| Python | 155 | 30.148388 | 150 | /app.py | 0.651625 | 0.649762 |
Lucasgb7/Simulacao_Discreta | refs/heads/main | import numpy as np
from random import randrange, uniform
class Material():
Type = 0
Time = 0
Weight = 0
TimeStamp = 0
def __init__(self, Type):
self.Type = Type
def materialValues(self):
if self.Type == 0: # Material A
self.Weight = 200 # 200kg
self.Time = int(uniform(3,8)) # 5 +- 2 (uniforme)
elif self.Type == 1: # Material B
self.Weight = 100 # 100kg
self.Time = 6 # 6 (constante)
else: # Material C
self.Weight = 50 # 50kg
if randrange(100) <= 33:
self.Time = 2 # P(2) = 0.33
else:
self.Time = 3 # P(3) = 0.67
if __name__ == "__main__":
simulationTime = 60 # Tempo de simulacao (min)
totalWeight = 0 # Peso do elevador
i = 0 # Contador de minutos
averageTimeA = [] # Calcular tempo medio Mat A
averageTimeB = [] # Calcular tempo medio Mat B
movedMaterialC = 0 # Contagem de Material C
materialsLift = [] # Materiais dentro do elevador
materialsQueue = [] # Materiais na fila do elevador
while i < simulationTime:
print("\nTempo: ", int(i),"min")
mat = Material(randrange(3)) # Criando material (0~2)=(A~C)
mat.materialValues() # Definindo tempo e pesos
mat.TimeStamp = i # Definindo tempo que o material chegou
materialsQueue.append(mat) # Adicionando material na fila
print("MAT[",mat.Type,"]")
for m in materialsQueue: # Verifica a fila de materiais
if m.Weight + totalWeight <= 400: # Checa se pode entrar no elevador
if m.Type == 1:
averageTimeB.append(i - m.TimeStamp) # Monitora o material B
materialsQueue.remove(m)
materialsLift.append(m)
totalWeight += m.Weight
i = i + m.Time
if m.Type == 0: # Monitorar Material A
m.TimeStamp = i
elif m.Type == 2: # Monitorar Material C
movedMaterialC =+ 1
print("-----------------------------------")
waiting = []
queue = []
for m in materialsQueue:
queue.append(m.Type)
print("Fila:", queue)
lift = []
for m in materialsLift:
lift.append(m.Type)
print("Elevador:", lift)
print("Peso elevador:", totalWeight,"kg")
print("Tempo:", i,"min")
print("-----------------------------------")
if totalWeight == 400: # Chega no peso maximo
i = i + 4 # Tempo de subir, descarregar e descer
totalWeight = 0
for m in materialsLift:
if m.Type == 0:
averageTimeA.append((i - 1) - m.TimeStamp) # Monitora tempo total do Material A
materialsLift.clear() # Remove todos os itens do elevador
i += 1
print("\nTempo medio de transito Material A: ", sum(averageTimeA)/len(averageTimeA), "min")
print("Tempo medio de espera do Material B: ", sum(averageTimeB)/len(averageTimeB), "min")
print("Números de caixas de Material C: ", movedMaterialC) | Python | 90 | 40.900002 | 100 | /AV1/elevador.py | 0.448011 | 0.430504 |
Lucasgb7/Simulacao_Discreta | refs/heads/main | import numpy as np
from random import randrange
# gera o numero de clientes com base na probabilidade
def numberCustomers(value):
if value > 65:
return 8
elif value > 35 and value < 65:
return 10
elif value > 10 and value < 35:
return 12
else:
return 14
# gera o numero de duzias por cliente com base na probabilidade
def numberBagelsPerCustomer(value):
if value > 60:
return 1
elif value > 30 and value < 60:
return 2
elif value > 10 and value < 30:
return 3
else:
return 4
if __name__ == "__main__":
days = 15 # nº iteracoes
bagelCost = 3.8 # custo de fabrica da duzia de baguete
bagelPrice = 5.4 # preco da duzia de baguete
bagelsAverage = 0
for day in range(days):
print("\nDia ", day)
# Clientes
value = randrange(100)
customers = numberCustomers(value)
print("Nº Clientes: ", customers)
# Baguetes por cliente
value = randrange(100)
bagelsPerCustomer = numberBagelsPerCustomer(value)
print("Baguetes/Cliente: ", bagelsPerCustomer)
# Baguetes para assar
bagelsToCook = customers * bagelsPerCustomer
print("Baguetes para assar: ", bagelsToCook)
bagelsAverage += bagelsToCook
print("\n\nMedia de Baguetes: ", bagelsAverage/days) | Python | 47 | 28.319149 | 63 | /AV1/padaria.py | 0.616558 | 0.584604 |
Lucasgb7/Simulacao_Discreta | refs/heads/main | import numpy as np
from random import randrange
def draw(value, probability):
return int(np.random.choice(value, 1, replace=False, p=probability))
if __name__ == "__main__":
# Criando os vetores de valores e suas probabilidades
bearingLifeExpect = np.arange(1000, 2000, 100)
probabilityLifeExpect = np.array([0.1, 0.13, 0.25, 0.13, 0.09, 0.12, 0.02, 0.06, 0.05, 0.05])
waitingTimeArray = np.arange(5, 20, 5)
probabilityWaitingTime = [0.6, 0.3, 0.1]
simluationTime = 10000 # 10.000h
bearing = [0,0,0] # Rolamentos
changingTime = [20, 30, 40] # Tempo de troca = 1: 20, 2: 30, 3: 40
# Sorteia tempo de vida para os rolamentos
for i in range(len(bearing)):
bearing[i] = draw(bearingLifeExpect, probabilityLifeExpect)
t = 0 # Contador para o tempo de simulacao
brokenBearings = 0 # Numero de rolamentos quebrados
totalCost = 0 # Custo total da simulacao
commingEvent = []
exitEvent = []
print("--------------------------------\nDefina o numero de rolamentos a serem trocados: ")
print("[1]: Troca UM rolamento quando algum rolamento quebra.")
print("[2]: Troca TRÊS rolamentos quando algum rolamento quebra.")
option = int(input("> "))
print("--------------------------------")
if option == 1:
print("Simulação 1: Troca de UM rolamento por vez\n")
print("--------------------------------")
while t <= simluationTime:
for i in range(len(bearing)):
if bearing[i] == t: # Caso rolamento atinga a vida util
newTime = draw(bearingLifeExpect, probabilityLifeExpect) # Define um novo tempo de vida para o rolamento
print("---------------")
print("Rolamento[", i, "]")
print("Quebrou em: ", t, "h\tExpectativa de vida: ", bearing[i], "h")
print("Nova expectativa de vida: ", newTime, "h")
bearing[i] += newTime # Soma lifetime anterior com novo para posteriormente
brokenBearings += 1 # Incrementa o numero de rolamentos quebrados
if brokenBearings > 0: # Caso haja um rolamento quebrado
waitingTime = draw(waitingTimeArray, probabilityWaitingTime) # Atribui nova vida util
spentTime = changingTime[brokenBearings-1] # Pega o tempo gasto para consertar os bearing
cost = 5 * (waitingTime + spentTime) + spentTime + brokenBearings * 20 # Calcula o valor do concerto
totalCost += cost
print("Tempo concerto: ", spentTime,"\tTempo espera: ", waitingTime)
print("Custo concerto: ", cost, "R$\tCusto total: ", totalCost, "R$")
brokenBearings = 0
t += 100
elif option == 2:
print("Simulação 2: Troca de TRÊS rolamento por vez\n")
print("--------------------------------")
while t <= simluationTime:
for i in range(len(bearing)):
if bearing[i] == t:
newTime1 = draw(bearingLifeExpect, probabilityLifeExpect)
newTime2 = draw(bearingLifeExpect, probabilityLifeExpect)
newTime3 = draw(bearingLifeExpect, probabilityLifeExpect)
print("---------------")
print("Rolamento[1]:")
print("Quebrou em: ", t, "h\tExpectativa de vida: ", bearing[0], "h")
print("Nova expectativa de vida: ", newTime1, "h")
print("---------------")
print("Rolamento[2]:")
print("Quebrou em: ", t, "h\tExpectativa de vida: ", bearing[1], "h")
print("Nova expectativa de vida: ", newTime2, "h")
print("---------------")
print("Rolamento[3]:")
print("Quebrou em: ", t, "h\tExpectativa de vida: ", bearing[2], "h")
print("Nova expectativa de vida: ", newTime3, "h")
print("---------------")
bearing[0] += newTime1
bearing[1] += newTime2
bearing[2] += newTime3
waitingTime = draw(waitingTimeArray, probabilityWaitingTime)
spentTime = changingTime[2]
cost = 5 * (waitingTime +spentTime) + spentTime + 3 * 20
totalCost += cost
print("Tempo concerto: ", spentTime,"\tTempo espera: ", waitingTime)
print("Custo concerto: ", cost, "R$\tCusto total: ", totalCost, "R$")
t += 100 | Python | 101 | 47.58416 | 134 | /AV1/maquina.py | 0.498166 | 0.47289 |
Lucasgb7/Simulacao_Discreta | refs/heads/main | import matplotlib.pyplot as plt
import time
import qi2
# left XOR entre o cara do centro e da direita
def rule(array):
return array[0] ^ (array[1] or array[2])
# primeira linha do mosaico
def init(largura):
array = [0] * largura # inicio do mosaico, no começa inicializa com 1
# se for impar, coloca 1 no meio
if largura % 2:
array[largura // 2] = 1
else: # caso for par coloca so na metade (nao exata)
array.insert(largura//2, 1)
return array
def rule30(linhaAntiga):
largura = len(linhaAntiga)
linhaAntiga = [0] + linhaAntiga + [0] # ajustar com zeros na direita e esquerda da linha
novaLinha = []
for i in range(largura):
novaLinha.append( rule(linhaAntiga[i:i+3]) ) # coloca uma celula (1 ou 0)
return novaLinha
# usa largura e quantos bits vai utilizar pra fazer essa largura
def applyRule(largura, bits):
matriz = [init(largura)]
colunaCentro = []
colunaCentro.append(matriz[0][largura // 2])
while not matriz[-1][0]:
matriz.append(rule30(matriz[-1])) # executa a regra na ultima linha
colunaCentro.append(matriz[-1][largura // 2]) # atualiza o centro da matriz
return [matriz, colunaCentro[-bits:]]
def listToString(s):
# initialize an empty string
str1 = ""
# traverse in the string
for ele in s:
str1 += str(ele)
# return string
return str1
if __name__ == "__main__":
seed = int(str(time.time_ns())[14:17])
bits = 8
#start = time.time()
n = int(input("Número de iterações (n): "))
k = int(input("Número de categorias (k): "))
results = []
for i in range(n):
time.sleep(1)
result = applyRule((seed+bits)*2, bits)
rng = listToString(result[1])
rng = int(listToString(rng), 2)
print(rng)
results.append(rng)
#end = time.time()
'''
x2 = qi2.qi2Test(k, n, results)
print("================= RESULTADOS =================")
#print("Tempo de simulacao: ", end - start)
print("X²: ", x2)
print("Graus de Liberdade (GL):", k - 1)
print("Significância: 0.05")
''' | Python | 78 | 26.5 | 94 | /RNGs/role30_RNG.py | 0.588619 | 0.565765 |
Lucasgb7/Simulacao_Discreta | refs/heads/main | import qi2
def fbn(option, array, mod, k, j):
if option == 0:
result = (array[j-1] + array[k-1]) % mod
elif option == 1:
result = (array[j-1] - array[k-1]) % mod
elif option == 2:
result = (array[j-1] * array[k-1]) % mod
else:
result = (array[j-1] ^ array[k-1]) % mod
return result
seed = '123456789'
#j = int(input("J:"))
j = 1
#k = int(input("K:"))
k = 8
#mod = int(input("MOD:"))
mod = 1000
n = int(input("Numero de iteracoes:"))
categories = int(input("Numero de categorias: "))
results = []
array = []
for i in range(len(seed)):
array.append(int(seed))
print("0: '+' \n1: '-' \n2: '*' \n3: '^'")
option = int(input("Defina a operação: "))
for i in range(n):
result = fbn(option, array, mod, k, j)
print("Resultado: ", result)
array.remove(array[0])
array.append(result)
results.append(result)
x2 = qi2.qi2Test(categories, n, results)
print("================= RESULTADOS =================")
print("X^2: ", x2)
print("GL =", categories - 1)
print("Probabilidade = 0.05") | Python | 45 | 22.51111 | 55 | /RNGs/fibonacci_RNG.py | 0.550615 | 0.511826 |
Lucasgb7/Simulacao_Discreta | refs/heads/main | import time
import numpy as np
import math
import matplotlib.pyplot as plt
from matplotlib.colors import NoNorm
import qi2
def squares(ctr, key):
y = x = ctr * key
z = y + key
two5 = np.uint64(32)
x = x * x + y; x = (x >> two5) | (x << two5)
x = x * x + z; x = (x >> two5) | (x << two5)
x = x * x + y; x = (x >> two5) | (x << two5)
return (x*x + z) >> two5
def draw(i):
nx = int(math.sqrt(i))
#print("tamanho da imagem", nx)
imagem = np.zeros((nx,nx), dtype=np.uint8)
#print("tam: ", i)
p = 0
ny = nx
for i in range(nx):
for j in range(ny):
imagem[i,j] = pixelvet[p]
#print(i, j, pixelvet[p])
p += 1
return imagem
if __name__ == "__main__":
np.seterr(all='ignore') # ignora erros de overflow, divisao/zero, underflow, etc...
key = np.uint64(0xf6235eca95b2c1e7)
#sum = np.uint64(0)
#pixelvet = []
#vetVal = []
n = np.uint64(input("Numero de iteracoes (n): "))
k = int(input("Numero de categorias (k): "))
gl = k - 1; print("Grau de Liberdade (GL): ", gl)
#p = float(input("Probabilidade de sucesso: "))
results = []
#start = time.time()
for i in range(n):
result = squares(np.uint64(i), key)
result = result / (2**32) # normaliza resultado de 32 bits
#print("[", i, "]:", result)
results.append(result)
#pixelvet.append(result)
#vetVal.append(result)
x2, intervals = qi2.qi2Test(k, n, results)
#end = time.time()
print("================= RESULTADOS =================")
#print("Media: ", hex(sum//n))
#print("Tempo de simulacao: ", end - start)
#pIndex = qi2.getProbabilityIndex(p)
#x2Max = qi2.table[gl-1][pIndex]
#print("x2Max: ", x2Max)
print("x2:" , x2)
qi2.histGraph(results, intervals)
'''
plt.figure("Graficos",figsize=(15,12))
plt.subplot(211)
imagem = draw(n)
plt.imshow(imagem, aspect="auto", cmap='gray', vmin=0, vmax=255,norm=NoNorm())
plt.axis("off")
plt.subplot(212)
plt.plot(vetVal, 'ro')
plt.grid(1)
plt.show()
''' | Python | 83 | 25.771084 | 99 | /Fast Counter-Based RNG/counterBasedRNG.py | 0.515083 | 0.484466 |
Lucasgb7/Simulacao_Discreta | refs/heads/main | import time
import numpy as np
import qi2
def xorShift(y):
y ^= np.uint32(y << 13)
y ^= np.uint32(y >> 17)
y ^= np.uint32(y << 15)
return y
if __name__ == "__main__":
np.seterr(all='ignore')
seed = 2463534242
y = np.uint32(seed)
#a, b, c = 13, 17, 15
#iteracoes = 1000
n = np.uint64(input("Numero de iteracoes (n): "))
k = int(input("Numero de categorias (k): "))
gl = k - 1; print("Grau de Liberdade (GL): ", gl)
p = float(input("Probabilidade de sucesso: "))
results = []
#start = time.time()
for i in range(n):
y = (xorShift(y))
aux = y / 4294967295 # normaliza resultado
#print("Valor: ", aux)
#print("y(", i, ") = ", aux)
results.append(aux)
#end = time.time()
x2, intervals = qi2.qi2Test(k, n, results)
print("================= RESULTADOS =================")
#print("Tempo de simulacao: ", end - start)
pIndex = qi2.getProbabilityIndex(p)
x2Max = qi2.table[gl-1][pIndex]
print("x2Max: ", x2Max)
print("x2:" , x2) | Python | 40 | 25.924999 | 59 | /RNGs/xorShift_RNG.py | 0.519517 | 0.464684 |
Lucasgb7/Simulacao_Discreta | refs/heads/main | import numpy as np
import time
import qi2
def wichmann(x, y, z):
x = 171 * (x % 177) - 2 * (x / 177)
y = 172 * (y % 177) - 35 * (y / 176)
z = 170 * (z % 178) - 63 * (z / 178)
if x < 0:
x = x + 30269
elif y < 0:
y = y + 30307
elif z < 0:
z + z + 30323
result = x/30269 + y/30307 + z/30323
result = result - int(result)
return result
if __name__ == "__main__":
np.seterr(all='ignore')
x = 1234
y = x + 1
z = y + 1
#iteracoes = 1000
n = np.uint64(input("Numero de iteracoes (n): "))
k = int(input("Numero de categorias (k): "))
gl = k - 1; print("Grau de Liberdade (GL): ", gl)
p = float(input("Probabilidade de sucesso: "))
results = []
#start = time.time()
for i in range(n):
w = wichmann(x, y, z)
y += 1
z += 2
print("w(", i, ") = ", y)
results.append(w)
#end = time.time()
x2, intervals = qi2.qi2Test(k, n, results)
print("================= RESULTADOS =================")
#print("Tempo de simulacao: ", end - start)
pIndex = qi2.getProbabilityIndex(p)
x2Max = qi2.table[gl-1][pIndex]
print("x2Max: ", x2Max)
print("x2:" , x2) | Python | 51 | 23.058823 | 59 | /RNGs/wichmann_RNG.py | 0.477161 | 0.402121 |
Lucasgb7/Simulacao_Discreta | refs/heads/main | import time
# John von Neumann's Generator
def JVN(x):
x = x ** 2
x = x / 100
x = x % 10000
return int(x)
# Linear Congruential Generator
def LCG(x):
return (a * x + c) % m
if __name__ == "__main__":
# seed = 322
simulationTime = 20
# x = int(input("Valor inicial [X0]: "))
x = 3
# m = int(input("Módulo [M], M>0: "))
m = 10
# a = int(input("Multiplicador [A], M>A>0: "))
a = 2
# c = int(input("Incrementador [C], M>=C>=0: "))
c = 0
start = time.time()
print(start)
for i in range(simulationTime):
# seed = JVN(seed)
# print("Semente: ", seed)
x = LCG(x)
print('X[', i, ']: ', x)
end = time.time()
print("Tempo para o cálculo:", end - start) | Python | 34 | 21.264706 | 52 | /RNGs/jvn_RNG.py | 0.478318 | 0.448095 |
MatheusLealAquino/meuCanal | refs/heads/master | from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.shortcuts import render, redirect, get_object_or_404
from conteudo.models import Video, Categoria
def exibir_catalogo(request):
categorias = Categoria.objects.all()
return render(request, 'conteudo/catalogo_videos.html', {'categorias': categorias})
def cadastro_video(request):
return render(request, 'conteudo/cadastro_video.html')
def editar_video(request):
return render(request, 'conteudo/editar_video.html')
def lista_categoria(request, id=None):
categorias = Categoria.objects.all()
if id != None:
videos_lista = Video.objects.all().filter(categoria_id=id)
else:
videos_lista = Video.objects.all()
paginator = Paginator(videos_lista, 3)
page = request.GET.get('page',1)
try:
videos = paginator.page(page)
except PageNotAnInteger:
videos = paginator.page(1)
except EmptyPage:
videos = paginator.page(paginator.num_pages)
return render(request, 'conteudo/lista_categoria.html', {'categorias': categorias, 'videos' : videos})
def exibir_video(request, id):
video = get_object_or_404(Video, id= id)
categorias = Categoria.objects.all()
return render(request, 'conteudo/player_video.html', {'video':video, 'categorias':categorias})
| Python | 38 | 34.052631 | 106 | /conteudo/views.py | 0.708709 | 0.701952 |
MatheusLealAquino/meuCanal | refs/heads/master | from django.urls import path
from conteudo import views
app_name = 'conteudo'
urlpatterns = [
path('', views.exibir_catalogo, name='catalogo'),
path('cadastro_video/', views.cadastro_video, name='cadastro_video'),
path('editar_video/<int:id>/', views.editar_video, name='editar_video'),
path('<int:id>/', views.exibir_video, name='exibir_video'),
path('categoria/', views.lista_categoria, name='listar_todas_categorias'),
path('categoria/<int:id>/', views.lista_categoria, name='lista_categoria'),
] | Python | 14 | 36.642857 | 79 | /conteudo/urls.py | 0.690114 | 0.690114 |
MatheusLealAquino/meuCanal | refs/heads/master | from django import forms
from conteudo.models import Video, Categoria
class VideoForm(forms.ModelForm):
error_messages = {
'campo invalido' : "Campo inválido"
}
class Meta:
model = Video
fields = ('video_id','categoria', 'nome', 'url', 'capa', 'visualizacao', 'nota', 'sinopse')
video_id = forms.CharField(widget=forms.HiddenInput(), required=False)
categoria = forms.ModelChoiceField(
error_messages={'required': 'Campo obrigatório', },
queryset=Categoria.objects.all().order_by(id),
empty_label='--- Selecionar a Categoria ---',
widget=forms.Select(attrs={'class': 'form-control form-control-sm'}),
required=True
)
nome = forms.CharField(
error_messages = {'required', 'Campo obrigatório',},
widget=forms.TextInput(attrs={'class': 'form-control form-control-sm', 'maxlength': '120'}),
required=True
)
| Python | 29 | 31.068966 | 100 | /conteudo/forms.py | 0.629431 | 0.626208 |
MatheusLealAquino/meuCanal | refs/heads/master | from django.db import models
class Categoria(models.Model):
nome = models.CharField(max_length=255, db_index=True)
slug = models.SlugField(max_length=200)
class Meta:
ordering = ('nome',)
verbose_name = 'categoria'
verbose_name_plural = 'categorias'
def __str__(self):
return self.nome
def videosCategoria(self):
return Video.objects.all().filter(categoria_id=self.id).order_by('-id')[:4]
class Video(models.Model):
categoria = models.ForeignKey(Categoria, on_delete=models.DO_NOTHING)
nome = models.CharField(max_length=255)
url = models.FileField(upload_to='conteudo/videos/')
capa = models.FileField(upload_to='conteudo/images/')
visualizacao = models.DecimalField(max_digits=10, decimal_places=1, default=0)
nota = models.FloatField(max_length=20)
sinopse = models.CharField(max_length=500)
class Meta:
ordering = ('nome',)
verbose_name = 'video'
verbose_name_plural = 'videos'
def __str__(self):
return self.nome | Python | 33 | 31.272728 | 83 | /conteudo/models.py | 0.650376 | 0.632519 |
MatheusLealAquino/meuCanal | refs/heads/master | from django.shortcuts import render
def pagina_inicial(request):
return render(request, 'index.html') | Python | 4 | 25.75 | 40 | /projeto/views.py | 0.773585 | 0.773585 |
MatheusLealAquino/meuCanal | refs/heads/master | from django.urls import path
from login import views
app_name = 'login'
urlpatterns = [
path('', views.pagina_login, name='pagina_login'),
] | Python | 8 | 17.375 | 54 | /login/urls.py | 0.69863 | 0.69863 |
MatheusLealAquino/meuCanal | refs/heads/master | from django.shortcuts import render
def pagina_login(request):
return render(request, 'login/pagina_login.html')
| Python | 4 | 28.5 | 53 | /login/views.py | 0.771186 | 0.771186 |
maryumraza/Walmart-Sales-Predictor | refs/heads/master | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 31 19:57:28 2020
@author: uni tech
"""
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn.metrics import r2_score
from sklearn.impute import SimpleImputer
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
# Initializing datasets
train=pd.read_csv('train.csv')
test=pd.read_csv('test_walmart.csv')
features=pd.read_csv('features.csv')
stores=pd.read_csv('stores.csv')
# Mergign train and features datasets
df= pd.merge(features, train, on=['Store', 'Date', 'IsHoliday'], how='inner')
# One Hot Encoding categorical data
one_hot=pd.get_dummies(stores['Type'])
stores=stores.drop('Type', axis=1)
stores = stores.join(one_hot)
df = pd.merge(df, stores, on=['Store'], how='inner')
# Separating date, month, and year from Date
df['Date']=pd.to_datetime(df['Date'])
df['year']=df['Date'].dt.year
df['month']=df['Date'].dt.month
del df['Date']
holiday= pd.get_dummies(df['IsHoliday'])
df= df.drop('IsHoliday', axis=1)
df= df.join(holiday)
# Fixing null values in markdown with the help of imputer class
se= SimpleImputer()
markdown= pd.DataFrame(se.fit_transform(df[['MarkDown1','MarkDown2', 'MarkDown3', 'MarkDown4', 'MarkDown5']]),columns=['MarkDown1','MarkDown2', 'MarkDown3', 'MarkDown4', 'MarkDown5'])
df= df.drop(['MarkDown1','MarkDown2', 'MarkDown3', 'MarkDown4', 'MarkDown5'], axis=1)
df = pd.concat([df,markdown], axis=1)
X = np.array(df.drop(columns='Weekly_Sales'))
y= np.array(df['Weekly_Sales']).reshape(-1,1)
# Normalizing inputs and outputs
scalar= preprocessing.MinMaxScaler()
X= scalar.fit_transform(X)
y= scalar.fit_transform(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Defining functions for regression
def linear_reg():
clf= LinearRegression()
return clf
def svm_reg():
clf= SVR(kernel='rbf', degree=3, gamma='scale')
return clf
def decision_tree():
clf=DecisionTreeRegressor(criterion='mse',splitter='best')
return clf
def random_forest():
clf= RandomForestRegressor(n_estimators=5, criterion='mse')
return clf
lr_ = linear_reg()
svm_ = svm_reg()
dt_ = decision_tree()
rf_ = random_forest()
models = [lr_ , dt_, svm_ , rf_]
for model in models:
y_train = y_train.ravel()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
score = r2_score(y_test, y_pred)
print(score)
| Python | 103 | 24.009708 | 183 | /walmart_sales.py | 0.671736 | 0.656899 |
lakerrenhu/reinforcement-learning-project | refs/heads/main | # valueIterationAgents.py
# -----------------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
import mdp, util
from learningAgents import ValueEstimationAgent
#import numpy as np
#import matplotlib.pyplot as plt
#import csv
class ValueIterationAgent(ValueEstimationAgent):
"""
* Please read learningAgents.py before reading this.*
A ValueIterationAgent takes a Markov decision process
(see mdp.py) on initialization and runs value iteration
for a given number of iterations using the supplied
discount factor.
"""
def __init__(self, mdp, discount = 0.9, iterations = 100):
"""
Your value iteration agent should take an mdp on
construction, run the indicated number of iterations
and then act according to the resulting policy.
Some useful mdp methods you will use:
mdp.getStates()
mdp.getPossibleActions(state)
mdp.getTransitionStatesAndProbs(state, action)
mdp.getReward(state, action, nextState)
mdp.isTerminal(state)
"""
self.mdp = mdp
self.discount = discount
self.iterations = iterations
self.values = util.Counter() # A Counter is a dict with default 0
# Write value iteration code here
"*** YOUR CODE HERE ***"
## three loops: for iteration,for states,for actions
j=0
tt=0
n=self.iterations
sset=self.mdp.getStates()
Vvalue=[]
#print(sset)
#print(len(sset))
def largest(array,N):
larg = array[0] # Initial value
i=1 # compare every element with
while i<N:
if array[i] > larg:
larg = array[i] # current max
i=i+1
return larg
while j<n: #iteration loop
self.values1=util.Counter()
ts=0
while ts<len(sset):#states loop
st=sset[ts]
#for st in sset:
dt=self.mdp.isTerminal(st)
Qvalue=[]
if dt==False:
#if st!='TERMINAL_STATE':
sets=self.mdp.getPossibleActions(st)
t=0
while t<len(sets):#action loop
tt=self.computeQValueFromValues(st, sets[t])
Qvalue.insert(len(Qvalue),tt)
t=t+1
#for t in sets:
#Qvalue.append(self.computeQValueFromValues(st, act))
# tt=self.computeQValueFromValues(st, t)
# Qvalue.insert(len(Qvalue),tt)
else:
Qvalue.insert(len(Qvalue),0)
larg=largest(Qvalue,len(Qvalue))
self.values1[st]=larg
#observe the evolution of V-value
if st==(0, 2):
#print(st)
#print(larg)
Vvalue.insert(len(Vvalue),larg) #visualize the evolution of V-value
ts=ts+1
self.values=self.values1
j=j+1
#check the stored V-value at state of (0,2)
#print(Vvalue)
# name of csv file
#filename = "Vvalues.csv"
# writing to csv file
# with open(filename, 'w') as csvfile:
# creating a csv writer object
# csvwriter = csv.writer(csvfile)
# writing the data rows
# csvwriter.writerows(Vvalue)
#compare the runtimes of two method
#plt.plot(range(1,len(Vvalue)+1), Vvalue, 'r--')
#plt.xlabel('the number of iteration')
#plt.ylabel('V-value')
#plt.title('The evolution of V-value at (0,2)')
#plt.text(5, 1.5, 'red: iterative method')
#plt.text(5, 1.3, 'green:direct method')
#plt.show()
def getValue(self, state):
"""
Return the value of the state (computed in __init__).
"""
return self.values[state]
def computeQValueFromValues(self, state, action):
"""
Compute the Q-value of action in state from the
value function stored in self.values.
"""
"*** YOUR CODE HERE ***"
#get the list of nextstate and prob from mdp.getTransitionStatesAndProbs(state, action)
#next_state_prob=self.mdp.getTransitionStatesAndProbs(state, action)
#store each transition result
Qvalue=[]
for next_state,prob in self.mdp.getTransitionStatesAndProbs(state, action):
Qvalue.insert(len(Qvalue),prob*(self.mdp.getReward(state, action, next_state)+
self.discount*self.values[next_state]))
return sum(Qvalue)
util.raiseNotDefined()
def computeActionFromValues(self, state):
"""
The policy is the best action in the given state
according to the values currently stored in self.values.
You may break ties any way you see fit. Note that if
there are no legal actions, which is the case at the
terminal state, you should return None.
"""
"*** YOUR CODE HERE ***"
dt=self.mdp.isTerminal(state)
tt=0
def largest(array,N): #find the largest one
larg = array[0] # Initial value
i=1
while i<N:
if array[i] > larg:
larg = array[i] # current max
i=i+1
#print ("Largest in given array is",maxm)
return larg
opt_policy= None
if dt==False:# if it's not terminal state
#acts=self.mdp.getPossibleActions(state)
Q_value=[]
#get all Qvalue
sets=self.mdp.getPossibleActions(state)
#print(len(sets))
#print(sets[0])
t1=0
while t1<len(sets):
tt=self.computeQValueFromValues(state, sets[t1])
Q_value.insert(len(Q_value),tt)
t1=t1+1
#get opt_policy=argmax(Qvalue)
t2=0
while t2<len(sets):
tt=self.computeQValueFromValues(state, sets[t2])
if tt==largest(Q_value,len(Q_value)):
opt_policy=sets[t2]
t2=t2+1
#for t in self.mdp.getPossibleActions(state):
# tt=self.computeQValueFromValues(state, t)
# if tt==largest(Q_value,len(Q_value)):
# opt_policy=t
return opt_policy
else:
return opt_policy
util.raiseNotDefined()
def getPolicy(self, state):
return self.computeActionFromValues(state)
def getAction(self, state):
"Returns the policy at the state (no exploration)."
return self.computeActionFromValues(state)
def getQValue(self, state, action):
return self.computeQValueFromValues(state, action)
| Python | 203 | 36.172413 | 95 | /valueIterationAgents.py | 0.549828 | 0.542274 |
lakerrenhu/reinforcement-learning-project | refs/heads/main | # analysis.py
# -----------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
######################
# ANALYSIS QUESTIONS #
######################
# Set the given parameters to obtain the specified policies through
# value iteration.
def question2():
#the noise affects the prob of jumping into the pits and V values.
#the current settings of discount=0.9 noise=0.2 cannot lead the agent
#to crossing the bridge.If the noise decreases to be 0 or close to 0,
#then the search of agent is treated as a deterministic problem.
#the V values from left to right will be:[5.9, 6.56, 7.29, 8.1, 9].
#theoretically, the agent will cross the bridge from left to right.
answerDiscount = 0.9
answerNoise = 0
return answerDiscount, answerNoise
def question3a():
#if the living reward is a big penalty, the agent tends to end the game quickly
#small noise mean more likely to risk
answerDiscount = 1#0.9
answerNoise = 0.01#0.01
answerLivingReward = -5
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question3b():
#low discount encourages the agent to get reward earlier(+1) than later (+10)
#positive living reward makes the agent want to live longer
#dont want to risk of jumping into pits
answerDiscount = 0.2#0.2
answerNoise = 0.01
answerLivingReward =0 #0.5
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question3c():
#if there's no living penalty,then the agent would prefer (+10)
#small noise lets the agent not worried about pits
#reasonable discount will make the agent find a shortcut
answerDiscount = 0.5#0.7,0.5 works
answerNoise = 0.01
answerLivingReward = 0
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question3d():
#no discount and low living penalty make the agent prefer (+10)
#large noise increases the risk of jumping into pits
answerDiscount = 1
answerNoise = 0.3
answerLivingReward = -0.2
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question3e():
#since living reward is very large, living longer brings more rewards
answerDiscount = 1
answerNoise = 0
answerLivingReward = 100
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question6():
answerEpsilon = None
answerLearningRate = None
return answerEpsilon, answerLearningRate
# If not possible, return 'NOT POSSIBLE'
if __name__ == '__main__':
print 'Answers to analysis questions:'
import analysis
for q in [q for q in dir(analysis) if q.startswith('question')]:
response = getattr(analysis, q)()
print ' Question %s:\t%s' % (q, str(response))
| Python | 90 | 36.955555 | 83 | /analysis.py | 0.70082 | 0.678279 |
lakerrenhu/reinforcement-learning-project | refs/heads/main | # qlearningAgents.py
# ------------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
from game import *
from learningAgents import ReinforcementAgent
from featureExtractors import *
import random,util,math
#import csv
Qvector=[]
class QLearningAgent(ReinforcementAgent):
"""
Q-Learning Agent
Functions you should fill in:
- computeValueFromQValues
- computeActionFromQValues
- getQValue
- getAction
- update
Instance variables you have access to
- self.epsilon (exploration prob)
- self.alpha (learning rate)
- self.discount (discount rate)
Functions you should use
- self.getLegalActions(state)
which returns legal actions for a state
"""
def __init__(self, **args):
"You can initialize Q-values here..."
ReinforcementAgent.__init__(self, **args)
"*** YOUR CODE HERE ***"
self.Qvalues = util.Counter()
def getQValue(self, state, action):
"""
Returns Q(state,action)
Should return 0.0 if we have never seen a state
or the Q node value otherwise
"""
"*** YOUR CODE HERE ***"
#the initial value should be zero
return self.Qvalues[(state,action)]
# util.raiseNotDefined()
def computeValueFromQValues(self, state):
"""
Returns max_action Q(state,action)
where the max is over legal actions. Note that if
there are no legal actions, which is the case at the
terminal state, you should return a value of 0.0.
"""
"*** YOUR CODE HERE ***"
#print(state)
def largest(array,N):
larg = array[0] # Initialial value
i=1 # compare every element with
while i<N:
if array[i] > larg:
larg = array[i] # current max
i=i+1
return larg
Qvalue=[]
sets=self.getLegalActions(state)
if state!='TERMINAL_STATE':
ts=0
while ts<len(sets):
t=sets[ts]
#for t in sets:
#Qvalue.append(self.getQValue(state,act))
Qvalue.insert(len(Qvalue),self.getQValue(state,t))
ts=ts+1
if state==(1, 2):
#print(state)
#print((Qvalue))
Qvector.append(Qvalue) #store Q-value for visualize the evolution
#print(Qvector)
return largest(Qvalue,len(Qvalue))
else:
#Qvalue.insert(len(Qvalue),0)
#Qvector.append(Qvalue)
#print(state)
#print(Qvector)
return 0
# util.raiseNotDefined()
def computeActionFromQValues(self, state):
"""
Compute the best action to take in a state. Note that if there
are no legal actions, which is the case at the terminal state,
you should return None.
"""
"*** YOUR CODE HERE ***"
def largest(array,N):#find the largest one
larg = array[0] # Initial value
i=1 # compare every element with
while i<N:
if array[i] > larg:
larg = array[i] # current max
i=i+1
return larg
opt_policy= None
if state!='TERMINAL_STATE':# if it's not terminal state
sets=self.getLegalActions(state)
Q_value=[]
#get all Qvalue
t1=0
while t1<len(sets):
ct=sets[t1] #get each state
Q_value.insert(len(Q_value),self.getQValue(state,ct))
t1=t1+1
#Q_value.append(self.getQValue(state,act))
t2=0
while t2<len(sets):#get opt_policy=argmax(Qvalue)
ct=sets[t2] #get each state
tt=self.getQValue(state,ct)
if tt==largest(Q_value,len(Q_value)):
opt_policy=ct
t2=t2+1
return opt_policy
else:
return opt_policy
#util.raiseNotDefined()
def getAction(self, state):
"""
Compute the action to take in the current state. With
probability self.epsilon, we should take a random action and
take the best policy action otherwise. Note that if there are
no legal actions, which is the case at the terminal state, you
should choose None as the action.
HINT: You might want to use util.flipCoin(prob)
HINT: To pick randomly from a list, use random.choice(list)
"""
# Pick Action
legalActions = self.getLegalActions(state)
action = None
"*** YOUR CODE HERE ***"
s1=self.computeActionFromQValues(state)
s2=random.choice(self.getLegalActions(state))
s=util.flipCoin(self.epsilon)
if state!='TERMINAL_STATE': # not terminal state
action=s1 if s==False else s2
#action=s
#action=self.getPolicy(state)
#else:
#action=s2
#action=random.choice(legalActions)
return action
else:
return action # if terminal state
#util.raiseNotDefined()
#return action
def update(self, state, action, nextState, reward):
"""
The parent class calls this to observe a
state = action => nextState and reward transition.
You should do your Q-Value update here
NOTE: You should never call this function,
it will be called on your behalf
"""
"*** YOUR CODE HERE ***"
s2= self.getValue(nextState)
sample=self.discount*s2+reward
s1= self.getQValue(state,action)
self.Qvalues[(state,action)] = (1-self.alpha)*s1 + self.alpha*sample
#self.Qvalues[(state,action)] = S1 + self.alpha*(sample-S1)
#util.raiseNotDefined()
def getPolicy(self, state):
return self.computeActionFromQValues(state)
def getValue(self, state):
return self.computeValueFromQValues(state)
class PacmanQAgent(QLearningAgent):
"Exactly the same as QLearningAgent, but with different default parameters"
def __init__(self, epsilon=0.05,gamma=0.8,alpha=0.2, numTraining=0, **args):
"""
These default parameters can be changed from the pacman.py command line.
For example, to change the exploration rate, try:
python pacman.py -p PacmanQLearningAgent -a epsilon=0.1
alpha - learning rate
epsilon - exploration rate
gamma - discount factor
numTraining - number of training episodes, i.e. no learning after these many episodes
"""
args['epsilon'] = epsilon
args['gamma'] = gamma
args['alpha'] = alpha
args['numTraining'] = numTraining
self.index = 0 # This is always Pacman
QLearningAgent.__init__(self, **args)
def getAction(self, state):
"""
Simply calls the getAction method of QLearningAgent and then
informs parent of action for Pacman. Do not change or remove this
method.
"""
action = QLearningAgent.getAction(self,state)
self.doAction(state,action)
return action
class ApproximateQAgent(PacmanQAgent):
"""
ApproximateQLearningAgent
You should only have to overwrite getQValue
and update. All other QLearningAgent functions
should work as is.
"""
def __init__(self, extractor='IdentityExtractor', **args):
self.featExtractor = util.lookup(extractor, globals())()
PacmanQAgent.__init__(self, **args)
self.weights = util.Counter()
def getWeights(self):
return self.weights
def getQValue(self, state, action):
"""
Should return Q(state,action) = w * featureVector
where * is the dotProduct operator
"""
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
def update(self, state, action, nextState, reward):
"""
Should update your weights based on transition
"""
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
def final(self, state):
"Called at the end of each game."
# call the super-class final method
PacmanQAgent.final(self, state)
# did we finish training?
if self.episodesSoFar == self.numTraining:
# you might want to print your weights here for debugging
"*** YOUR CODE HERE ***"
pass
| Python | 276 | 32.405796 | 93 | /qlearningAgents.py | 0.572994 | 0.56692 |
kanak3699/Visualizing-a-Decision-Tree | refs/heads/master |
# coding: utf-8
# In[1]:
from sklearn.datasets import load_iris
# In[2]:
iris = load_iris()
# In[4]:
print(iris.feature_names)
# In[5]:
print(iris.target_names)
# In[7]:
print(iris.data[0])
# In[8]:
print(iris.target[0])
# In[13]:
for i in range(len(iris.target)):
print("Example %d: label %s, features %s" % (i, iris.target[i], iris.data[i]))
# In[17]:
import numpy as np
# In[15]:
iris = load_iris()
# In[18]:
test_idx = [0,50,100]
# In[19]:
train_target =np.delete(iris.target, test_idx)
# In[20]:
train_data = np.delete(iris.data, test_idx, axis=0)
# In[21]:
test_target = iris.target[test_idx]
# In[23]:
test_data = iris.data[test_idx]
# In[24]:
from sklearn import tree
# In[25]:
clf = tree.DecisionTreeClassifier()
# In[26]:
clf.fit(train_data, train_target)
# In[28]:
print(test_target)
# In[29]:
print(clf.predict(test_data))
# In[39]:
import pydotplus
# In[30]:
from sklearn.externals.six import StringIO
# In[32]:
import pydot
# In[49]:
import graphviz
# In[33]:
dot_data = StringIO()
# In[34]:
tree.export_graphviz(clf,
out_file=dot_data,
feature_names=iris.feature_names,
class_names=iris.target_names,
filled=True, rounded=True,
impurity=False)
# In[52]:
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
# In[ ]:
graph.write_pdf("iris.pdf")
| Python | 168 | 7.910714 | 82 | /Visualizing a Decision Tree.py | 0.57038 | 0.533022 |
TishkoffLab/TF_Binding_scores | refs/heads/master | import sys
from pandas import *
import numpy as np
import matplotlib
from matplotlib import pyplot
import random
from scipy.stats import norm
import os
from argparse import ArgumentParser
import pybedtools
import pdb
import math
import time
parser = ArgumentParser()
# parser.add_argument("-i", "--input_genes", dest="input_gene_file",
# help="input file containing the list of TF names, one per row")
parser.add_argument("-m", "--matrix_loc", dest="matrix_loc",
help="full path of the folder that contains the PWM matrix files")
parser.add_argument("-o", "--outname", dest="outname",
help="the name of the file to save the sequence scores to")
parser.add_argument("-f", "--refchrmfastaloc", dest="ref_fasta_loc",
help="location of the reference fasta files (should just be a fasta per chromosome) to use for getting the reference sequences")
parser.add_argument("-b", "--bg_frac_file", dest="bgfrac_file",
help="file containing the background frequency of A/C/T/G, for each autosomal chromosome.")
parser.add_argument("-r", "--reps", dest="reps",
help="number of replicate background binding scores to generate")
parser.add_argument("-g", "--genomesize", dest="genome_size_file",
help="file containing the chromosme names and sizes, to pull random sequences from")
def read_JASPAR_transfac_pfms(infile):
pfms_file = open(infile,'r')
pfms_info = {}
seq_len = 0
for line in pfms_file:
line = line.split('\n')[0]
if(len(line.split(' ')) > 1):
line = line.split(' ')
if(line[0] == 'DE'):
pfms_info['Matrix_ID'] = line[1]
pfms_info['Matrix_Name'] = line[2]
seq_len = 0
elif(line[0] == 'CC'):
temp = line[1].split(':')
pfms_info[temp[0]] = temp[1]
if(seq_len > 0):
pfms_info['TF_len'] = seq_len
elif(len(line.split('\t')) > 1):
line = line.split('\t')
if(line[0] == 'PO'):
curr_matorder = line[1:]
else:
curr_vals = {}
for n,v in enumerate(line[1:]):
curr_vals[curr_matorder[n]] = float(v)+1
pfms_info[int(line[0])] = curr_vals
seq_len = int(line[0])
else:
pass
pfms_file.close()
return pfms_info
def get_matrix_byTF(tfname,info_dicts):
matrix_dict_touse = None
for i in info_dicts:
if(i['Matrix_Name'] == tfname):
matrix_dict_touse = i
break
if(matrix_dict_touse == None):
print('Could not find a PWM for Transcription Factor {0}'.format(tfname))
return None
matrix_aslist = []
for i in range(1,matrix_dict_touse['TF_len']+1):
matrix_aslist.append(matrix_dict_touse[i])
return matrix_aslist
def get_lnPWM_from_fracPWM(fracPWM,bgfreqs):
lnPWM = []
bgfreqs_dict = {x:bgfreqs['frac_{0}'.format(x)].values[0] for x in 'ACTG'}
for en in fracPWM:
temp_matrix = {}
for b in 'ACTG':
f = float(en[b])/bgfreqs_dict[b]
temp_matrix[b] = -np.log(f)
lnPWM.append(temp_matrix)
return lnPWM
def get_fracPWM_from_matrix(pwm):
fracPWM = []
for en in pwm:
temp_matrix = {}
curr_totcount = sum([float(x) for x in en.values()])
for b in 'ACTG':
if(f == 0.0):
temp_matrix[b] = 1/curr_totcount
else:
temp_matrix[b] = float(en[b])/curr_totcount
fracPWM.append(temp_matrix)
return fracPWM
def get_matrix_counts(pwm):
pos_counts = []
for en in pwm:
temp = [float(x) for x in en.values()]
pos_counts.append(sum(temp))
return pos_counts
def get_matrix_scores(pwm,seq):
seqval_list = []
for n,b in enumerate(seq):
try:
seqval_list.append(float(pwm[n][b]))
except:
if(b not in 'ACTG'):
print('Sequence contains a letter, {0}, that is not A/C/G/T at position {1}'.format(b,n))
return None
else:
continue
return seqval_list
def calculate_bgH(seq,ln_pwm,bgfreqs):
currscore_ln = get_matrix_scores(ln_pwm,seq)
Y = compute_Y(seq,bgfreqs)
H = currscore_ln - Y
return np.sum(H)
def get_random_bgseqs(slen,reps,fastaloc,chrmsizes,seqinfo_file=None):
# x = pybedtools.BedTool()
chrms_touse = list(chrmsizes['chrom'])
bgseq_list = []
if(seqinfo_file is not None):
genseqs_df = read_csv(seqinfo_file,delimiter='\t')
for r,seq in genseqs_df.iterrows():
chrmfile = '{0}/chr{1}.fa'.format(fastaloc,seq['Chrm'])
curr_seq = pybedtools.BedTool.seq('chr{0}:{1}-{2}'.format(seq['Chrm'],seq['Start'],seq['End']),chrmfile)
bgseq_list.append([seq['Chrm'],seq['Start'],seq['End'],curr_seq.upper()])
while(len(bgseq_list) < int(reps)):
is_valid_seq = True
try:
curr_chrm = random.randint(1,22)
curr_start = random.randint(1,chrmsizes.loc[chrmsizes['chrom'] == 'chr{0}'.format(curr_chrm)]['size'].values[0])
#TODO check for end being over the size of the chrm
curr_end = curr_start + slen
chrmfile = '{0}/chr{1}.fa'.format(fastaloc,curr_chrm)
curr_seq = pybedtools.BedTool.seq('chr{0}:{1}-{2}'.format(curr_chrm,curr_start,curr_end),chrmfile).upper()
for b in curr_seq:
if(b not in 'ACTG'):
is_valid_seq = False
continue
if(is_valid_seq):
bgseq_list.append([curr_chrm,curr_start,curr_end,curr_seq])
except:
continue
# bgseq_list.append({'chrm':curr_chrm,'start':curr_start,'end':curr_end,'seq':curr_seq.upper()})
return bgseq_list
if __name__ == "__main__":
args = parser.parse_args()
bgfrac_df = read_csv(args.bgfrac_file,delimiter='\t')
chrmsizes_df = read_csv(args.genome_size_file,delimiter='\t')
transfac_matrix_list = os.listdir(args.matrix_loc)
outfile = open(args.outname,'w')
outfile.write('Average Background H score for each TF. Number of replicates: {0}\nTF_name\tBG Z score\n'.format(args.reps))
bg_H_by_TF = {}
outfile.close()
for f in transfac_matrix_list:
start = time.time()
curr_JASPARmatrix = read_JASPAR_transfac_pfms('{0}/{1}'.format(args.matrix_loc,f))
print('starting calculation for TF {0}'.format(curr_JASPARmatrix['Matrix_Name']))
curr_matrix = []
for i in range(1,curr_JASPARmatrix['TF_len']+1):
curr_matrix.append(curr_JASPARmatrix[i])
try:
bgseqs = get_random_bgseqs(curr_JASPARmatrix['TF_len'],args.reps,args.ref_fasta_loc,chrmsizes_df,'bgseqs_info_v1/{0}.{1}reps.random_bgseq.info'.format(curr_JASPARmatrix['Matrix_Name'],args.reps))
except:
bgseqs = get_random_bgseqs(curr_JASPARmatrix['TF_len'],args.reps,args.ref_fasta_loc,chrmsizes_df)
# outfile_currTF = open('bgseqs_info/{0}.{1}reps.random_bgseq.info'.format(curr_JASPARmatrix['Matrix_Name'],args.reps),'w')
# outfile_currTF.write('Chrm\tStart\tEnd\n')
bg_H_list = []
curr_fracPWM = get_fracPWM_from_matrix(curr_matrix)
curr_lnfracPWM_bychrm = []
for n in range(1,23):
bgfreqs_n = bgfrac_df.loc[bgfrac_df['Chrm'] == str(n)][['frac_A','frac_C','frac_G','frac_T']]
curr_lnfracPWM_bychrm.append(get_lnPWM_from_fracPWM(curr_fracPWM,bgfreqs_n))
print('starting to calculate H scores for bg seqs, for TF {0}'.format(curr_JASPARmatrix['Matrix_Name']))
time_allseqs = []
for s in bgseqs:
seqstart = time.time()
curr_seq = s[3]
curr_lnfracPWM = curr_lnfracPWM_bychrm[s[0]-1]
curr_H = np.sum(get_matrix_scores(curr_lnfracPWM,curr_seq))
bg_H_list.append(curr_H)
# outfile_currTF.write('{0}\t{1}\t{2}\n'.format(s[0],s[1],s[2]))
seqend = time.time()
time_allseqs.append(seqend-seqstart)
print('finished H calculations for all bg seqs, average time taken = {0}'.format(np.average(time_allseqs)))
currTF_output_df = DataFrame(bgseqs,columns=['Chrm','Start','End','seq'])
currTF_output_df.to_csv('bgseqs_info/{0}.{1}reps.random_bgseq.info'.format(curr_JASPARmatrix['Matrix_Name'],args.reps),sep='\t',columns=['Chrm','Start','End'],index=False)
curr_z = np.sum([math.exp(-x) for x in bg_H_list])
outfile = open(args.outname,'a')
# bg_H_by_TF[curr_matrix['Matrix_Name']] = sum(curr_z)
outfile.write('{0}\t{1}\n'.format(curr_JASPARmatrix['Matrix_Name'],curr_z))
# bg_H_by_TF[curr_matrix['Matrix_Name']] = bg_H_list
# outfile_currTF.close()
outfile.close()
end = time.time()
print('Finished calculation of bg Z score for TF {0}; time taken = {1}'.format(curr_JASPARmatrix['Matrix_Name'],(end - start)))
| Python | 212 | 40.745281 | 207 | /generate_backgroundH_forTFs.py | 0.598533 | 0.589955 |
TishkoffLab/TF_Binding_scores | refs/heads/master | import sys
from pandas import *
import numpy as np
import matplotlib
from matplotlib import pyplot
import random
from scipy.stats import norm
import os
from argparse import ArgumentParser
import pybedtools
import pdb
import math
import time
parser = ArgumentParser()
parser.add_argument("-i", "--input_genes", dest="input_gene_file",
help="input file containing the list of TF gene names, one per row")
parser.add_argument("-s", "--sequence", dest="sequence",
help="sequence to compute score for, A/C/T/G")
parser.add_argument("-t", "--tfname", dest="tf_tocheck",
help="name of a specfic transcription factor, or a file containing any number of TFs (one per line). If this argument is supplied, then the script only calculates the score for that TF. Must supply a sequence as well.")
parser.add_argument("-m", "--matrix_loc", dest="matrix_loc",
help="full path of the folder that contains the PWM matrix files")
parser.add_argument("-o", "--outname", dest="outname",
help="the name of the file to save the sequence scores to")
parser.add_argument("-r", "--refallele", dest="ref_al",
help="reference allele for the snp of interest, A/C/T/G")
parser.add_argument("-a", "--altallele", dest="alt_al",
help="alternate allele for the snp of interest, A/C/T/G")
parser.add_argument("-p", "--position", dest="position",
help="position, in bp, for the snp of interest")
parser.add_argument("-c", "--refchrmfasta", dest="ref_fasta_file",
help="reference fasta file (should just be a single chromosome) to use for getting the reference sequence")
parser.add_argument("-b", "--bg_frac_file", dest="bgfrac_file",
help="file containing the background frequency of A/C/T/G, for each autosomal chromosome.")
parser.add_argument("-z", "--bg_zscore_file", dest="bgZscore_file",
help="file containing the background Z scores for each TF, precalculated using a significant number of replicates")
parser.add_argument("-f", "--tf_cutoff", dest="tfpbind_cutoff",
help="the cutoff for significant pbinding score. If this is provided, the script will check the snp against all tfs and orientations, then save only the results that are above the threshold.")
#Reads in the JASPAR PWM file (transfac formatted)
# infile (str): the PWM file to read in
#Returns:
# pfms_info (dict): dictionary containig the information about the PWM, from the file (plus an entry with the length of the PWM)
def read_JASPAR_transfac_pfms(infile):
pfms_file = open(infile,'r')
pfms_info = {}
seq_len = 0
for line in pfms_file:
line = line.split('\n')[0]
if(len(line.split(' ')) > 1):
line = line.split(' ')
if(line[0] == 'DE'):
pfms_info['Matrix_ID'] = line[1]
pfms_info['Matrix_Name'] = line[2]
seq_len = 0
elif(line[0] == 'CC'):
temp = line[1].split(':')
pfms_info[temp[0]] = temp[1]
if(seq_len > 0):
pfms_info['TF_len'] = seq_len
elif(len(line.split('\t')) > 1):
line = line.split('\t')
if(line[0] == 'PO'):
curr_matorder = line[1:]
else:
curr_vals = {}
for n,v in enumerate(line[1:]):
curr_vals[curr_matorder[n]] = float(v)+1
pfms_info[int(line[0])] = curr_vals
seq_len = int(line[0])
else:
pass
pfms_file.close()
return pfms_info
#Loops through the info_dicts list (of PWM matrix file info), and returns the PWM matrix dict for the given TF
#Inputs:
# tfname (str): name of the transcription factor
# info_dicts (list): made by looping over all the JASPAR matrix files; this is a list of all of those matrices as dicts
#Returns:
# matrix_dict_touse (dict): the dictionary containing the PWM for the given TF
def get_matrix_byTF(tfname,info_dicts):
matrix_dict_touse = None
for i in info_dicts:
if(i['Matrix_Name'] == tfname):
matrix_dict_touse = i
break
if(matrix_dict_touse == None):
print('Could not find a PWM for Transcription Factor {0}'.format(tfname))
return None
matrix_aslist = []
for i in range(1,matrix_dict_touse['TF_len']+1):
matrix_aslist.append(matrix_dict_touse[i])
return matrix_aslist
#Given a matrix dict for a TF, containing the PWM of the counts for each base in the sequence, returns just the PWM with each position entry being recalculated as a fraction of the count at that position
#Inputs:
# matrix_dict (dict): the dictionary containing the PWM for a given TF, in addition to the other data about that TF
#Returns:
# PWM_dict (dict): a dicitonary where each key is a position relative to the TF (1-indexed) and each value is a dictionary with keys A/C/G/T and values equal to the raw count divided by the total counts for all four bases at that position.
def get_fracPWM_from_matrix(pwm):
fracPWM = []
for en in pwm:
temp_matrix = {}
curr_totcount = sum([float(x) for x in en.values()])
for b in 'ACTG':
if(f == 0.0):
temp_matrix[b] = 1/curr_totcount #TODO: Move check for 0 entry from reading in matrix to here
else:
temp_matrix[b] = float(en[b])/curr_totcount
fracPWM.append(temp_matrix)
return fracPWM
#Given a fractional PWM (made by get_fracPWM_from_matrix) and a set of background frequencies of the four bases, calculate a -log PWM
#Inputs:
# fracPWM (dict): PWM where each entry is a fraction of the counts for each base
# bgfreqs (DataFrame): a dataframe with a single row, containing columns frac_A/frac_C/frac_G/frac_T which has the fraction of the chromosome/genome corrisponding to that base
#Returns:
# lnPWM_dict (dict): PWM where each entry is the fracPWM entry, divided by the background base fraction, then taken the negative natural log of it
def get_lnPWM_from_fracPWM(fracPWM,bgfreqs):
lnPWM = []
bgfreqs_dict = {x:bgfreqs['frac_{0}'.format(x)].values[0] for x in 'ACTG'}
for en in fracPWM:
temp_matrix = {}
for b in 'ACTG':
f = float(en[b])/bgfreqs_dict[b]
temp_matrix[b] = -np.log(f) #TODO speed this up; take sum of fracs and then log the whole thing
lnPWM.append(temp_matrix)
return lnPWM
#For a given sequence, returns the complementary sequence
#Inputs:
# seq (str): sequence of A/C/T/G
#Returns:
# new_seq (str): sequence of A/C/T/G complementary to the original sequence
def get_complseq(seq):
new_seq = []
for b in seq:
if(b == 'A'):
new_seq.append('T')
elif(b == 'T'):
new_seq.append('A')
elif(b == 'G'):
new_seq.append('C')
elif(b == 'C'):
new_seq.append('G')
else:
print('Base pair not A/C/T/G! {0}'.format(b))
return ''.join(new_seq)
#Given a sequence, replaces the allele at a specified position with the given allele
#Inputs:
# fullseq (str): sequence of A/C/T/G
# position (int): position of the allele to be replaced, relative to the length of the input sequence (so it must be <= len(fullseq))
# allele (str): A/C/T/G, to replace the one at the position in fullseq
#Returns:
# new_seq (str): fullseq, with the new allele at the given position
def make_seq(fullseq,position,allele):
new_seq = ''
for n,b in enumerate(fullseq):
try:
if(n==position):
new_seq = ''.join([new_seq,allele])
else:
new_seq = ''.join([new_seq,b])
except:
if(b not in 'ACTG'):
print('Sequence contains a letter, {0}, that is not A/C/G/T at position {1}'.format(b,n))
return None
else:
continue
return new_seq.upper()
#For a given PWM and sequence length, return the sum of the counts for all four bases at each position
#Inputs:
# pwm (dict): position weight matrix, with or without the additional info from the matrix file
# seqlen (int): length of the sequence, so that we can loop over the PWM
#Returns:
# pos_counts (list): list of counts at each position
def get_matrix_counts(pwm):
pos_counts = []
for en in pwm:
temp = [float(x) for x in en.values()]
pos_counts.append(sum(temp))
return pos_counts
#For a given PWM and sequence, calculates the score at each position
#Inputs:
# pwm (dict): position weight matrix, with or without the additional info from the matrix file
# seq (str): A/C/G/T sequence
#Returns:
# seqval_list (list): list of the values at each position given the specific base in seq
def get_matrix_scores(pwm,seq):
seqval_list = []
for n,b in enumerate(seq):
try:
seqval_list.append(float(pwm[n][b]))
except:
if(b not in 'ACTG'):
print('Sequence contains a letter, {0}, that is not A/C/G/T at position {1}'.format(b,n))
return None
else:
continue
return seqval_list
# def calculate_bindingP(hscore,bg_z_score,tf_name):
# return math.exp(-hscore)/bgZ_score
# def ge
# def calculate_normed_bindingP(hscore,bg_z_df,tf_name):
# try:
# bgZ_touse = bg_z_df.loc[bg_z_df['TF_name'] == tf_name]['BG Z score'].values[0]
# except:
# print('Could not find background Z score for TF {0}'.format(tf_name))
# return -1
# return math.exp(-hscore)/bgZ_touse
def get_revseq_foroutput(seq,pos,tf_len):
new_seq = ''.join(list(reversed(seq)))
return [new_seq,(tf_len-pos)]
def get_seq_combos(seq,al,tf_len):
seqs_list = []
complseqs_list = []
for num in range(tf_len):
newseq = ''
curr_ref_start_pos = tf_len-num-1
curr_ref_end_pos = curr_ref_start_pos+tf_len
curr_seq = seq[curr_ref_start_pos:curr_ref_end_pos]
tempseq = ''
for n,b in enumerate(curr_seq):
if((n+curr_ref_start_pos) == (tf_len-1)):
tempseq = ''.join([tempseq,al])
else:
tempseq = ''.join([tempseq,b])
seqs_list.append([tempseq,num])
complseqs_list.append([get_complseq(tempseq),num])
return seqs_list,complseqs_list
def get_scoredict_entry(seq,pwm,lnfracPWM,bgZ_score,bgfreqs,tfname,tfpbind_cutoff=None,seq_pos=None):
# pdb.set_trace()
curr_scoredict = None
try:
rawscore_list = get_matrix_scores(pwm,seq)
pos_counts = get_matrix_counts(pwm)
tot_count = sum(pos_counts)
score_ln = get_matrix_scores(lnfracPWM,seq)
curr_fracscore = sum([rawscore_list[x]/pos_counts[x] for x in range(len(pos_counts))])
curr_H = np.sum(score_ln)
curr_bindingp = math.exp(-curr_H)/bgZ_score #calculate_bindingP(curr_H,bgZ_score,tfname)
curr_normed_bindingp = math.exp(-curr_H)/(bgZ_score+math.exp(-curr_H))
if(tfpbind_cutoff is not None):
if(curr_bindingp >= float(tfpbind_cutoff)):
curr_scoredict = {'tf_name':tfname,'raw_score':sum(rawscore_list),'tf_len':len(pwm),'counts_perpos':min(pos_counts),
'fraction_score':curr_fracscore,'H':curr_H,'bindingP':curr_bindingp,'bindingP_normed':curr_normed_bindingp,'sequence':seq,'motif_pos':seq_pos}
else:
curr_scoredict = {'tf_name':tfname,'raw_score':sum(rawscore_list),'tf_len':len(pwm),'counts_perpos':min(pos_counts),
'fraction_score':curr_fracscore,'H':curr_H,'bindingP':curr_bindingp,'bindingP_normed':curr_normed_bindingp,'sequence':seq}
except:
print('Could not get score dict for given PWM from TF {0}! Check inputs.'.format(tfname))
return None
return curr_scoredict
if __name__ == "__main__":
args = parser.parse_args()
bgfrac_df = read_csv(args.bgfrac_file,delimiter='\t')
if(args.sequence is not None):
sequence = args.sequence
#Read in the matrix files and make dict entries for each one
transfac_matrix_list = os.listdir(args.matrix_loc)
infodicts_list = []
for f in transfac_matrix_list:
curr_infodict = read_JASPAR_transfac_pfms('{0}/{1}'.format(args.matrix_loc,f))
infodicts_list.append(curr_infodict)
score_dict_bytf ={}
tfs_to_check = []
if(args.tf_tocheck is not None):
try:
tf_list_file = open(args.tf_tocheck,'r')
for line in tf_list_file:
tfs_to_check.append(line.split('\n')[0])
tf_list_file.close()
except:
tfs_to_check.append(args.tf_tocheck)
else:
for i in infodicts_list:
tfs_to_check.append(i['Matrix_Name'])
# except:
# for i in infodicts_list:
# tfs_to_check.append(i['Matrix_Name'])
bg_z_df = read_csv(args.bgZscore_file,delimiter='\t',skiprows=1)
bgfreqs = bgfrac_df.loc[bgfrac_df['Chrm'] == 'Total'][['frac_A','frac_C','frac_G','frac_T']]
for tf in tfs_to_check:
try:
bgZ_score = bg_z_df.loc[bg_z_df['TF_name'] == tf]['BG Z score'].values[0]
except:
print('Could not find background Z score for TF {0}'.format(tf))
break
curr_matrix = get_matrix_byTF(tf,infodicts_list)
curr_fracPWM = get_fracPWM_from_matrix(curr_matrix)
curr_lnfracPWM = get_lnPWM_from_fracPWM(curr_fracPWM,bgfreqs)
curr_sd = get_scoredict_entry(sequence,curr_matrix,curr_lnfracPWM,bgZ_score,bgfreqs,tf)
if(curr_sd is not None):
score_dict_bytf[tf] = curr_sd
#Writing the PWM scores to the output file
outfile = open(args.outname,'w')
outfile.write('Scores for Given Transcription Factors for sequence {0}, as a fraction of the total count \nTF_Name\tPWM Fraction Score\tTF Length\tTF Counts per position\tH\tP binding\n'.format(sequence))
for tf,scores in sorted(score_dict_bytf.items(), key=lambda k_v: k_v[1]['bindingP'],reverse=True):
outfile.write('{0}\t{1}\t{2}\t{3}\t{4}\t{5}\n'.format(tf,scores['fraction_score'],scores['tf_len'],scores['counts_perpos'],scores['H'],scores['bindingP'],scores['bindingP_normed']))
outfile.close()
elif(args.tfpbind_cutoff is not None):
# pdb.set_trace()
tpbindcutoff = float(args.tfpbind_cutoff)
position = int(args.position.split(':')[1])
chromosome = int(args.position.split(':')[0])
#Read in the matrix files and make dict entries for each one
transfac_matrix_list = os.listdir(args.matrix_loc)
infodicts_list = []
for f in transfac_matrix_list:
curr_infodict = read_JASPAR_transfac_pfms('{0}/{1}'.format(args.matrix_loc,f))
infodicts_list.append(curr_infodict)
bg_z_df = read_csv(args.bgZscore_file,delimiter='\t',skiprows=1)
try:
bgfreqs = bgfrac_df.loc[bgfrac_df['Chrm'] == str(chromosome)][['frac_A','frac_C','frac_G','frac_T']]
except:
bgfreqs = bgfrac_df.loc[bgfrac_df['Chrm'] == 'Total'][['frac_A','frac_C','frac_G','frac_T']]
infodicts_list_touse = []
if(args.tf_tocheck is not None):
tfs_to_check = []
try:
tf_list_file = open(args.tf_tocheck,'r')
for line in tf_list_file:
tfs_to_check.append(line.split('\n')[0])
tf_list_file.close()
except:
tfs_to_check.append(args.tf_tocheck)
for i in infodicts_list:
if(i['Matrix_Name'] in tfs_to_check):
infodicts_list_touse.append(i)
else:
infodicts_list_touse = infodicts_list
#Creating the final dictionary containing the values for each TF, with the Ref/Alt alleles
sig_score_dicts = []
for i in infodicts_list_touse:
start = time.time()
curr_matrix = []
for n in range(1,i['TF_len']+1):
curr_matrix.append(i[n])
fracPWM = get_fracPWM_from_matrix(curr_matrix)
lnfracPWM = get_lnPWM_from_fracPWM(fracPWM,bgfreqs)
ref_full_forward_seq = pybedtools.BedTool.seq('chr{0}:{1}-{2}'.format(chromosome,(position-len(curr_matrix)+1),(position+len(curr_matrix)-1)),args.ref_fasta_file).upper()
ref_full_reverse_seq = ''.join(list(reversed(ref_full_forward_seq)))
curr_forward_ref_seqlist,curr_forward_compl_ref_seqlist = get_seq_combos(ref_full_forward_seq,args.ref_al,len(curr_matrix))
curr_reverse_ref_seqlist,curr_reverse_compl_ref_seqlist = get_seq_combos(ref_full_reverse_seq,args.ref_al,len(curr_matrix))
try:
bgZ_score = bg_z_df.loc[bg_z_df['TF_name'] == i['Matrix_Name']]['BG Z score'].values[0]
except:
print('Could not find background Z score for TF {0}'.format(tf_name))
break
for s in curr_forward_ref_seqlist:
curr_scoredict = get_scoredict_entry(s[0],curr_matrix,lnfracPWM,bgZ_score,bgfreqs,i['Matrix_Name'],tpbindcutoff,s[1])
if(curr_scoredict is not None):
curr_scoredict['orientation'] = '+'
curr_scoredict['direction'] = 'forward'
curr_scoredict['allele'] = 'ref'
sig_score_dicts.append(curr_scoredict)
for s in curr_forward_compl_ref_seqlist:
curr_scoredict = get_scoredict_entry(s[0],curr_matrix,lnfracPWM,bgZ_score,bgfreqs,i['Matrix_Name'],tpbindcutoff,s[1])
if(curr_scoredict is not None):
curr_scoredict['orientation'] = '-'
curr_scoredict['direction'] = 'forward'
curr_scoredict['allele'] = 'ref'
sig_score_dicts.append(curr_scoredict)
for s in curr_reverse_ref_seqlist:
curr_scoredict = get_scoredict_entry(s[0],curr_matrix,lnfracPWM,bgZ_score,bgfreqs,i['Matrix_Name'],tpbindcutoff,s[1])
if(curr_scoredict is not None):
curr_scoredict['orientation'] = '+'
curr_scoredict['direction'] = 'reverse'
curr_scoredict['allele'] = 'ref'
new_seq,new_pos = get_revseq_foroutput(curr_scoredict['sequence'],curr_scoredict['motif_pos'],curr_scoredict['tf_len']-1)
curr_scoredict['sequence'] = new_seq
curr_scoredict['motif_pos'] = new_pos
sig_score_dicts.append(curr_scoredict)
for s in curr_reverse_compl_ref_seqlist:
curr_scoredict = get_scoredict_entry(s[0],curr_matrix,lnfracPWM,bgZ_score,bgfreqs,i['Matrix_Name'],tpbindcutoff,s[1])
if(curr_scoredict is not None):
curr_scoredict['orientation'] = '-'
curr_scoredict['direction'] = 'reverse'
curr_scoredict['allele'] = 'ref'
new_seq,new_pos = get_revseq_foroutput(curr_scoredict['sequence'],curr_scoredict['motif_pos'],curr_scoredict['tf_len']-1)
curr_scoredict['sequence'] = new_seq
curr_scoredict['motif_pos'] = new_pos
sig_score_dicts.append(curr_scoredict)
curr_forward_alt_seqlist,curr_forward_compl_alt_seqlist = get_seq_combos(ref_full_forward_seq,args.alt_al,len(curr_matrix))
curr_reverse_alt_seqlist,curr_reverse_compl_alt_seqlist = get_seq_combos(ref_full_reverse_seq,args.alt_al,len(curr_matrix))
for s in curr_forward_alt_seqlist:
curr_scoredict = get_scoredict_entry(s[0],curr_matrix,lnfracPWM,bgZ_score,bgfreqs,i['Matrix_Name'],tpbindcutoff,s[1])
if(curr_scoredict is not None):
curr_scoredict['orientation'] = '+'
curr_scoredict['direction'] = 'forward'
curr_scoredict['allele'] = 'alt'
sig_score_dicts.append(curr_scoredict)
for s in curr_forward_compl_alt_seqlist:
curr_scoredict = get_scoredict_entry(s[0],curr_matrix,lnfracPWM,bgZ_score,bgfreqs,i['Matrix_Name'],tpbindcutoff,s[1])
if(curr_scoredict is not None):
curr_scoredict['orientation'] = '-'
curr_scoredict['direction'] = 'forward'
curr_scoredict['allele'] = 'alt'
sig_score_dicts.append(curr_scoredict)
for s in curr_reverse_alt_seqlist:
curr_scoredict = get_scoredict_entry(s[0],curr_matrix,lnfracPWM,bgZ_score,bgfreqs,i['Matrix_Name'],tpbindcutoff,s[1])
if(curr_scoredict is not None):
curr_scoredict['orientation'] = '+'
curr_scoredict['direction'] = 'reverse'
curr_scoredict['allele'] = 'alt'
new_seq,new_pos = get_revseq_foroutput(curr_scoredict['sequence'],curr_scoredict['motif_pos'],curr_scoredict['tf_len']-1)
curr_scoredict['sequence'] = new_seq
curr_scoredict['motif_pos'] = new_pos
sig_score_dicts.append(curr_scoredict)
for s in curr_reverse_compl_alt_seqlist:
curr_scoredict = get_scoredict_entry(s[0],curr_matrix,lnfracPWM,bgZ_score,bgfreqs,i['Matrix_Name'],tpbindcutoff,s[1])
if(curr_scoredict is not None):
curr_scoredict['orientation'] = '-'
curr_scoredict['direction'] = 'reverse'
curr_scoredict['allele'] = 'alt'
new_seq,new_pos = get_revseq_foroutput(curr_scoredict['sequence'],curr_scoredict['motif_pos'],curr_scoredict['tf_len']-1)
curr_scoredict['sequence'] = new_seq
curr_scoredict['motif_pos'] = new_pos
sig_score_dicts.append(curr_scoredict)
# pdb.set_trace()
end = time.time()
# print('time taken to calculate all sequences for tf {0} = {1}'.format(i['Matrix_Name'],(end-start)))
if(len(sig_score_dicts) > 0):
outfile = open(args.outname,'w')
outfile.write('Scores for all Transcription Factors above bindingP score {0}\nTF_Name\tPWM Fraction Score\tTF Length\tTF Counts per position\tH\tP binding\tAllele\tOrientation\tDirection\tPosition in Motif\tBinding Sequence\n'.format(args.tfpbind_cutoff))
for scores in sorted(sig_score_dicts, key=lambda k: k['bindingP'],reverse=True):
outfile.write('{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\t{8}\t{9}\t{10}\n'.format(scores['tf_name'],scores['fraction_score'],scores['tf_len'],scores['counts_perpos'],scores['H'],scores['bindingP'],scores['allele'],scores['orientation'],scores['direction'],scores['motif_pos'],scores['sequence']))
outfile.close()
else:
position = int(args.position.split(':')[1])
chromosome = int(args.position.split(':')[0])
#Reading in the TF_genes file, made by the bash script, which has the TF names, positions, and strandedness
gene_df = read_csv('{0}'.format(args.input_gene_file),header=None,delimiter='\t')
gene_df.columns = ['pos_start','pos_end','tf_name','strand']
#Read in the matrix files and make dict entries for each one
transfac_matrix_list = os.listdir(args.matrix_loc)
infodicts_list = []
for f in transfac_matrix_list:
curr_infodict = read_JASPAR_transfac_pfms('{0}/{1}'.format(args.matrix_loc,f))
infodicts_list.append(curr_infodict)
#Getting the reference sequence that contains all of the TF genes within it, then add the new start/stop coordinates relative to full ref seq to the dataframe
ref_pos_end = max(gene_df['pos_end'])
ref_pos_start = min(gene_df['pos_start'])
ref_full_seq = pybedtools.BedTool.seq('chr{0}:{1}-{2}'.format(chromosome,ref_pos_start,ref_pos_end),args.ref_fasta_file)
updated_pos = (position-ref_pos_start)
gene_df['relative_start'] = gene_df['pos_start']-ref_pos_start
gene_df['relative_end'] = gene_df['pos_end']-ref_pos_start
bg_z_df = read_csv(args.bgZscore_file,delimiter='\t',skiprows=1)
# pdb.set_trace()
#Creating the final dictionary containing the values for each TF, with the Ref/Alt alleles
score_dict_bytf ={}
for i,g in gene_df.iterrows():
curr_relative_pos = abs(updated_pos-g['relative_start'])
curr_refseq = make_seq(ref_full_seq[g['relative_start']:(g['relative_end']+1)],curr_relative_pos,args.ref_al)
curr_altseq = make_seq(ref_full_seq[g['relative_start']:(g['relative_end']+1)],curr_relative_pos,args.alt_al)
if(g['strand'] == '-'):
curr_refseq = get_complseq(curr_refseq)
curr_altseq = get_complseq(curr_altseq)
curr_matrix = get_matrix_byTF(g['tf_name'],infodicts_list)
try:
bgfreqs = bgfrac_df.loc[bgfrac_df['Chrm'] == str(chromosome)][['frac_A','frac_C','frac_G','frac_T']]
except:
bgfreqs = bgfrac_df.loc[bgfrac_df['Chrm'] == 'Total'][['frac_A','frac_C','frac_G','frac_T']]
# pdb.set_trace()
curr_fracPWM = get_fracPWM_from_matrix(curr_matrix)
curr_lnfracPWM = get_lnPWM_from_fracPWM(curr_fracPWM,bgfreqs)
try:
bgZ_score = bg_z_df.loc[bg_z_df['TF_name'] == g['tf_name']]['BG Z score'].values[0]
except:
print('Could not find background Z score for TF {0}'.format(tf_name))
curr_scoredict_ref = get_scoredict_entry(curr_refseq,curr_matrix,curr_lnfracPWM,bgZ_score,bgfreqs,g['tf_name'])
curr_scoredict_alt = get_scoredict_entry(curr_altseq,curr_matrix,curr_lnfracPWM,bgZ_score,bgfreqs,g['tf_name'])
curr_scoredict = {'ref_fraction_score':curr_scoredict_ref['fraction_score'],'alt_fraction_score':curr_scoredict_alt['fraction_score'],
'tf_len':curr_scoredict_ref['tf_len'],'counts_perpos':curr_scoredict_ref['counts_perpos'],'H (REF)':curr_scoredict_ref['H'],'H (ALT)':curr_scoredict_alt['H'],
'BindingP (REF)':curr_scoredict_ref['bindingP'],'BindingP (ALT)':curr_scoredict_alt['bindingP']}
score_dict_bytf[g['tf_name']] = curr_scoredict
#Writing the PWM scores to the output file
outfile = open(args.outname,'w')
outfile.write('Scores for Transcription Factors Containing SNP at {0} on chromosome {1}, as a fraction of the total count \nTF_Name\tPWM Fraction Score (REF allele)\tPWM Fraction Score (ALT allele)\tTF Length\tTF Counts per position\tH (REF)\tH (ALT)\tBinding P (REF)\tBinding P (ALT)\n'.format(position,chromosome))
for tf,scores in sorted(score_dict_bytf.items(), key=lambda k_v: k_v[1]['alt_fraction_score'],reverse=True):
outfile.write('{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\t{8}\n'.format(tf,scores['ref_fraction_score'],scores['alt_fraction_score'],scores['tf_len'],
scores['counts_perpos'],scores['H (REF)'],scores['H (ALT)'],scores['BindingP (REF)'],scores['BindingP (ALT)']))
outfile.close()
| Python | 510 | 52.752941 | 324 | /get_PWMscores.py | 0.603121 | 0.598381 |
NQ31/scrapy_project | refs/heads/master | import scrapy
from qiubaipro.items import QiubaiproItem
class Test2Spider(scrapy.Spider):
name = 'test2'
# allowed_domains = ['https://www.qiushibaike.com/']
start_urls = ['https://www.qiushibaike.com/']
def parse(self, response):
li_list = response.xpath('//*[@id="content"]/div/div[2]/div/ul/li')
all_data = []
for li in li_list:
name = li.xpath('./div/div/a/span/text()')[0].extract()
text = li.xpath('./div/a/text()')[0].extract()
# print(name + ":" + text)
# dict = {
# "name": name,
# "text": text
# }
# all_data.append(dict)
item=QiubaiproItem()
item['name']= name
item['text']=text
yield item
| Python | 24 | 32.125 | 75 | /qiubaipro/qiubaipro/spiders/test2.py | 0.496863 | 0.49059 |
NQ31/scrapy_project | refs/heads/master | import scrapy
from mzitu.items import MzituItem
class MziSpider(scrapy.Spider):
name = 'mzi'
# allowed_domains = ['www.xxx.com']
start_urls = ['https://www.mzitu.com/']
#第几页
def parse(self, response):
page_num=response.xpath('/html/body/div[2]/div[1]/div[3]/div/a[4]/text()').extract_first()
for i in range(0,4):
if i+1==1:
url='https://www.mzitu.com/'
else:
url='https://www.mzitu.com/page/%s/'%(i+1)
# print('第%s页 --'%i,url)
yield scrapy.Request(url=url,callback=self.page_parse,meta={'ref':url})
#获取各个图集url
def page_parse(self,response):
fef=response.meta['ref']
li_list=response.xpath('//div[@class="postlist"]/ul/li')
for li in li_list[0:10]:
tuji_url=li.xpath('./a/@href').extract_first()
tuji_title=li.xpath('./span[1]/a/text()').extract_first()
yield scrapy.Request(url=tuji_url,headers={'referer':fef},callback=self.tuji_parse,meta={'tuji_url':tuji_url,'ref':tuji_url})
#获取每个图集的页数
def tuji_parse(self,response):
item=MzituItem()
ref=response.meta['ref']
tuji_url=response.meta['tuji_url']
tuji_page_num=response.xpath('/html/body/div[2]/div[1]/div[4]/a[5]/span/text()').extract_first()
for i in range(int(tuji_page_num)):
if i+1==1:
url=tuji_url
else:
url=tuji_url+'/%s'%(i+1)
item['img_referer']=url
# print('图集第%s页 -url--'%i,url)
yield scrapy.Request(url=url,headers={'referer':ref},callback=self.img_parse,meta={'item':item})
#下载图集的图片
def img_parse(self,response):
item=response.meta['item']
img_url=response.xpath('/html/body/div[2]/div[1]/div[3]/p/a/img/@src').extract_first()
img_path=response.xpath('/html/body/div[2]/div[1]/div[3]/p/a/img/@alt').extract_first()
item['img_path']=img_path
# print(img_url)
item['img_url']=img_url
# print(item['img_url'])
# print(item['img_path'])
yield item
| Python | 51 | 40.450981 | 137 | /mzitu/mzitu/spiders/mzi.py | 0.55881 | 0.546528 |
NQ31/scrapy_project | refs/heads/master | import scrapy
from pian.items import PianItem
class BizhiSpider(scrapy.Spider):
name = 'bizhi'
# allowed_domains = ['www.xxx.com']
start_urls = ['http://www.netbian.com/meinv/']
def parse(self,response):
page_num=response.xpath('//*[@id="main"]/div[4]/a[8]/text()').extract_first()
#获取各个页的网址
for i in range(5):
if i+1==1:
url='http://www.netbian.com/meinv/'
else:
url='http://www.netbian.com/meinv/index_%s.htm'%(i+1)
yield scrapy.Request(url=url,callback=self.parse_page)
def parse_page(self, response):
item = PianItem()
li_list=response.xpath('//div[@class="list"]/ul/li')
#获取当前页面是第几页
page=response.xpath('//*[@id="main"]/div[4]/b/text()').extract_first()
item['mulu']='第%s页'%(page)
#获取壁纸的原图地址
for li in li_list:
try:
geren_url='http://www.netbian.com'+li.xpath('./a/@href').extract_first()
except:
continue
yield scrapy.Request(url=geren_url, callback=self.parse_detail,meta={'item':item})
def parse_detail(self,response):
item = response.meta['item']
#获取图片地址
img_url=response.xpath('//div[@class="pic"]/p/a/img/@src').extract_first()
item['url']=img_url
yield item
| Python | 37 | 35.567566 | 94 | /pian/pian/spiders/bizhi.py | 0.553585 | 0.548411 |
NQ31/scrapy_project | refs/heads/master | # Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
#导入相应的模块
from scrapy.pipelines.images import ImagesPipeline
import scrapy
from . import settings
import os
# class PianPipeline:
# def process_item(self, item, spider):
# return item
class PianImgPipeline(ImagesPipeline):
# 该方法用来对图片url发起请求
def get_media_requests(self, item, info):
print('开始下载')
#在这里需要把item传给file_path方法进行处理。不按图片分页存放的话,可以不写meta参数
return scrapy.Request(item['url'],meta={'item':item})
#该方法是用来设置图片的下载路径以及图片的名字
def file_path(self, request, response=None, info=None):
item=request.meta['item']
#分类文件夹,
wenjianjia=item['mulu']
'''
根目录,也就是settings文件下创建的存储图片根目录
注意:根目录的设置的时候,不要加“./”,否则下面创建文件夹的时候,会自动创建一个根目录名字的文件夹
'''
img_source=settings.IMAGES_STORE
#图片存放的文件夹路径
img_path = os.path.join(img_source, wenjianjia)
#判断文件夹存放的位置是否存在,不存在则新建文件夹
if not os.path.exists(img_path):
os.makedirs(img_path)
#更改图片名字
url=request.url
url=url.split('/')[-1]
file_name=url
#图片存放路径
image_path=os.path.join(wenjianjia,file_name)
#返回图片的存放路径
return image_path
def item_completed(self, results, item, info):
print('下载完成')
return item | Python | 50 | 29.76 | 66 | /pian/pian/pipelines.py | 0.657775 | 0.657124 |
NQ31/scrapy_project | refs/heads/master | # Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
from scrapy.pipelines.images import ImagesPipeline
import scrapy
from . import settings
import os
# class MzituPipeline:
# def process_item(self, item, spider):
# return item
class myPipeline(ImagesPipeline):
def get_media_requests(self, item, info):
print('下载开始')
return scrapy.Request(item['img_url'],headers={'referer':item['img_referer']},meta={'item':item})
def file_path(self, request, response=None, info=None):
item=request.meta['item']
#获取目录
floder=item['img_path']
source_path=settings.IMAGES_STORE
#路径
img_path=os.path.join(source_path,floder)
if not os.path.exists(img_path):
os.makedirs(img_path)
url = request.url
url = url.split('/')[-1]
img_name=url
img_file_path=os.path.join(floder,img_name)
print(img_file_path)
return img_file_path
def item_completed(self, results, item, info):
print('下载结束')
return item | Python | 39 | 31.641026 | 105 | /mzitu/mzitu/pipelines.py | 0.658019 | 0.657233 |
tagplay/django-uuid-pk | refs/heads/master | import os
SITE_ID = 1
STATIC_URL = '/static/'
SECRET_KEY =';pkj;lkj;lkjh;lkj;oi'
db = os.environ.get('DBENGINE', None)
if db == 'pg':
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'django_uuid_pk',
'HOST': '127.0.0.1',
'PORT': '',
'USER': 'postgres',
'PASSWORD': '',
'OPTIONS': {
'autocommit': True, # same value for all versions of django (is the default in 1.6)
}}}
elif db == 'mysql':
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'django_uuid_pk',
'HOST': '127.0.0.1',
'PORT': '',
'USER': 'aa',
'PASSWORD': ''}}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'django_uuid_pk.sqlite',
'HOST': '',
'PORT': ''}}
INSTALLED_APPS = ('django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django_uuid_pk.tests')
ALLOWED_HOSTS = ('127.0.0.1',)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '%(levelname)-8s: %(asctime)s %(name)10s: %(funcName)40s %(message)s'
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
}
| Python | 59 | 26.288136 | 99 | /django_uuid_pk/tests/settings.py | 0.452174 | 0.434161 |
tagplay/django-uuid-pk | refs/heads/master | import os
import sys
from django.conf import settings
def pytest_configure(config):
if not settings.configured:
os.environ['DJANGO_SETTINGS_MODULE'] = 'django_uuid_pk.tests.settings'
def runtests(args=None):
import pytest
if not args:
args = []
if not any(a for a in args[1:] if not a.startswith('-')):
args.append('django_uuid_pk/tests')
sys.exit(pytest.main(args))
if __name__ == '__main__':
runtests(sys.argv)
| Python | 25 | 17.84 | 78 | /conftest.py | 0.63482 | 0.632696 |
tagplay/django-uuid-pk | refs/heads/master | # from __future__ import absolute_import
# from .tests import *
# from .models import *
| Python | 3 | 28.333334 | 40 | /django_uuid_pk/tests/__init__.py | 0.693182 | 0.693182 |
tagplay/django-uuid-pk | refs/heads/master | import uuid
from django.db import models
from django_uuid_pk.fields import UUIDField
class ModelUUIDField(models.Model):
uuid1 = UUIDField(version=1, auto=True)
uuid3 = UUIDField(namespace=uuid.NAMESPACE_URL, version=3, auto=True)
uuid4 = UUIDField(version=4, auto=True)
uuid5 = UUIDField(namespace=uuid.NAMESPACE_URL, version=5, auto=True)
class AutoUUIDFieldModel(models.Model):
uuid = UUIDField(auto=True)
class ManualUUIDFieldModel(models.Model):
uuid = UUIDField(auto=False)
class NamespaceUUIDFieldModel(models.Model):
uuid = UUIDField(auto=True, namespace=uuid.NAMESPACE_URL, version=5)
class BrokenNamespaceUUIDFieldModel(models.Model):
uuid = UUIDField(auto=True, namespace='lala', version=5)
class PrimaryKeyUUIDFieldModel(models.Model):
uuid = UUIDField(primary_key=True)
#char = models.CharField(max_length=10, null=True)
class BrokenPrimaryKeyUUIDFieldModel(models.Model):
uuid = UUIDField(primary_key=True)
unique = models.IntegerField(unique=True)
def __repr__(self):
return {}
| Python | 37 | 27.81081 | 73 | /django_uuid_pk/tests/models.py | 0.743902 | 0.732645 |
tagplay/django-uuid-pk | refs/heads/master | import json
import uuid
from django.core.serializers import serialize
from django.db import IntegrityError
from django.test import TestCase
import pytest
from django_uuid_pk.fields import StringUUID
from django_uuid_pk.tests.models import (AutoUUIDFieldModel, ManualUUIDFieldModel, NamespaceUUIDFieldModel,
BrokenNamespaceUUIDFieldModel, PrimaryKeyUUIDFieldModel,
BrokenPrimaryKeyUUIDFieldModel, ModelUUIDField)
def assertJSON(data):
try:
json.loads(data)
except ValueError:
raise
@pytest.mark.django_db
class UUIDFieldTestCase(TestCase):
def test_protocols(self):
obj = ModelUUIDField.objects.create()
self.assertTrue(isinstance(obj.uuid1, uuid.UUID))
self.assertTrue(isinstance(obj.uuid3, uuid.UUID))
self.assertTrue(isinstance(obj.uuid4, uuid.UUID))
self.assertTrue(isinstance(obj.uuid5, uuid.UUID))
def test_auto_uuid4(self):
obj = AutoUUIDFieldModel.objects.create()
self.assertTrue(obj.uuid)
self.assertEquals(len(obj.uuid), 32)
#self.assertTrue(isinstance(obj.uuid, uuid.UUID))
self.assertEquals(obj.uuid.version, 4)
def test_raises_exception(self):
self.assertRaises(IntegrityError, ManualUUIDFieldModel.objects.create)
def test_manual(self):
obj = ManualUUIDFieldModel.objects.create(uuid=uuid.uuid4())
self.assertTrue(obj)
self.assertEquals(len(obj.uuid), 32)
#self.assertTrue(isinstance(obj.uuid, uuid.UUID))
self.assertEquals(obj.uuid.version, 4)
def test_namespace(self):
obj = NamespaceUUIDFieldModel.objects.create()
self.assertTrue(obj)
self.assertEquals(len(obj.uuid), 32)
#self.assertTrue(isinstance(obj.uuid, uuid.UUID))
self.assertEquals(obj.uuid.version, 5)
def test_broken_namespace(self):
self.assertRaises(ValueError, BrokenNamespaceUUIDFieldModel.objects.create)
def test_wrongvalue(self):
obj = PrimaryKeyUUIDFieldModel.objects.create()
with pytest.raises(ValueError):
obj.uuid = 1
def test_assign1(self):
obj = PrimaryKeyUUIDFieldModel.objects.create()
obj.uuid = uuid.UUID('5b27d1bd-e7c3-46f3-aaf2-11e4d32f60d4')
obj.save()
assert str(obj.uuid) == '5b27d1bde7c346f3aaf211e4d32f60d4'
#assert obj.uuid == '5b27d1bd-e7c3-46f3-aaf2-11e4d32f60d4'
assert obj.uuid == uuid.UUID('5b27d1bd-e7c3-46f3-aaf2-11e4d32f60d4')
def test_assign2(self):
obj = PrimaryKeyUUIDFieldModel.objects.create()
obj.uuid = '5b27d1bd-e7c3-46f3-aaf2-11e4d32f60d4'
obj.save()
assert str(obj.uuid) == '5b27d1bde7c346f3aaf211e4d32f60d4'
def test_primary_key(self):
obj = PrimaryKeyUUIDFieldModel.objects.create()
assert obj.pk
obj = PrimaryKeyUUIDFieldModel()
assert not obj.pk
# reset primary key if save() fails
BrokenPrimaryKeyUUIDFieldModel.objects.create(unique=1)
obj = BrokenPrimaryKeyUUIDFieldModel(unique=1)
with pytest.raises(IntegrityError):
obj.save()
assert not obj.pk
def test_serialize(self):
obj = PrimaryKeyUUIDFieldModel.objects.create()
obj.uuid = uuid.UUID("2e9280cfdc8e42bdbf0afa3043acaa7e")
obj.save()
serialized = serialize('json', PrimaryKeyUUIDFieldModel.objects.all())
assertJSON(serialized)
#def test_json(self):
# obj = PrimaryKeyUUIDFieldModel.objects.create()
# obj.save()
# serialized = json.dumps(obj)
# assertJSON(serialized)
#deserialized = json.loads(serialized, object_hook=registry.object_hook)
#
#print 111, deserialized
#
#assert PrimaryKeyUUIDField(**deserialized).uuid == obj.uuid
| Python | 111 | 33.855854 | 107 | /django_uuid_pk/tests/tests.py | 0.669682 | 0.632205 |
paolapilar/juegos | refs/heads/master |
import pygame
import base
class Apple( base.Entity ) :
def __init__( self, i, j, cellSize, canvasWidth, canvasHeight ) :
super( Apple, self ).__init__( i, j, 1, 1, cellSize, canvasWidth, canvasHeight )
self._color = ( 255, 255, 0 )
self._alive = True
def draw( self, canvas ) :
_xleft = self._x - 0.5 * self._cellSize
_ytop = self._y - 0.5 * self._cellSize
pygame.draw.rect( canvas,
self._color,
(_xleft, _ytop, self._w, self._h) ) | Python | 18 | 29.444445 | 88 | /collectables.py | 0.510949 | 0.487226 |
paolapilar/juegos | refs/heads/master |
import pygame
import base
from collections import deque
class SnakePart( base.Entity ) :
def __init__( self, i, j, color, cellSize, canvasWidth, canvasHeight ) :
super( SnakePart, self ).__init__( i, j, 1, 1, cellSize, canvasWidth, canvasHeight )
self.color = color
self.lasti = i
self.lastj = j
def draw( self, canvas ) :
_xleft = self._x - 0.5 * self._cellSize
_ytop = self._y - 0.5 * self._cellSize
pygame.draw.rect( canvas,
self.color,
(_xleft, _ytop, self._w, self._h) )
class Snake( base.Entity ) :
def __init__( self, i, j, cellSize, canvasWidth, canvasHeight ) :
super( Snake, self ).__init__( i, j, 1, 1, cellSize, canvasWidth, canvasHeight )
self._bodyParts = [ SnakePart( i, j, ( 50, 50, 50 ), cellSize, canvasWidth, canvasHeight ) ]
self._speed = 800.
self._direction = 'left'
self._displacement = 0.0
self._frameTime = 0.001
self._nx = int( canvasWidth / cellSize )
self._ny = int( canvasHeight / cellSize )
self._alive = True
def alive( self ) :
return self._alive
def head( self ) :
return self._bodyParts[0]
def tail( self ) :
return self._bodyParts[-1]
def setDirection( self, direction ) :
if len( self._bodyParts ) > 1 :
# chequear si quieren ir a la direccion contraria
if ( self._direction == 'left' and direction == 'right' or
self._direction == 'right' and direction == 'left' or
self._direction == 'up' and direction == 'down' or
self._direction == 'down' and direction == 'up' ) :
# mantener la misma direccion
self._direction = self._direction
else :
# cambiar la direction
self._direction = direction
else :
self._direction = direction
def grow( self ) :
_i = self.tail().lasti
_j = self.tail().lastj
_newPart = SnakePart( _i, _j,
( 50, 50, 50 ),
self._cellSize,
self._canvasWidth,
self._canvasHeight )
self._bodyParts.append( _newPart )
def update( self ) :
self._displacement = self._displacement + self._speed * self._frameTime
if self._displacement > self._cellSize :
self.head().lasti = self.head().i
self.head().lastj = self.head().j
# mover una casilla en la direccion adecuada
if self._direction == 'up' :
self.head().j += 1
elif self._direction == 'down' :
self.head().j -= 1
elif self._direction == 'right' :
self.head().i += 1
elif self._direction == 'left' :
self.head().i -= 1
for k in range( 1, len( self._bodyParts ) ) :
self._bodyParts[k].lasti = self._bodyParts[k].i
self._bodyParts[k].lastj = self._bodyParts[k].j
self._bodyParts[k].i = self._bodyParts[k-1].lasti
self._bodyParts[k].j = self._bodyParts[k-1].lastj
# resetear el acumulador
self._displacement = 0.0
if self.head()._x > 800. and self._direction == 'right' :
self.head().i = 0
if self.head()._x < 0. and self._direction == 'left' :
self.head().i = self._nx
if self.head()._y > 600. and self._direction == 'down' :
self.head().j = self._ny
if self.head()._y < 0. and self._direction == 'up' :
self.head().j = 0
for k in range( len( self._bodyParts ) ) :
self._bodyParts[k].update()
for i in range( 1, len( self._bodyParts ) ) :
if self.head().hit( self._bodyParts[i] ):
self._alive = False
def draw( self, canvas ) :
for k in range( len( self._bodyParts ) ) :
self._bodyParts[k].draw( canvas )
## # la misma forma de iterar
## for bodyPart in self._bodyParts :
## bodyPart.draw( canvas ) | Python | 124 | 33.620968 | 100 | /snake.py | 0.496855 | 0.484743 |
paolapilar/juegos | refs/heads/master |
import pygame
import world
class Text( object ) :
def __init__( self, x, y, message, size, color ) :
super( Text, self).__init__()
self._message = message
self._textFont = pygame.font.Font( None, size )
self._textSurface = self._textFont.render( message, True, color )
self._textRect = self._textSurface.get_rect()
self._textRect.center = ( x, y )
def draw( self, canvas ) :
canvas.blit( self._textSurface, self._textRect )
class Screen( object ) :
def __init__( self, canvas, backgroundColor ) :
super( Screen, self ).__init__()
self._canvas = canvas
self._backgroundColor = backgroundColor
self._texts = []
self._keys = None
def setKeys( self, keys ) :
self._keys = keys
def addText( self, text ) :
self._texts.append( text )
def draw( self ) :
self._canvas.fill( self._backgroundColor )
for i in range( len( self._texts ) ) :
self._texts[i].draw( self._canvas )
def update( self ) :
pass
class MenuScreen( Screen ) :
def __init__( self, canvas ) :
super( MenuScreen, self ).__init__( canvas, ( 255, 255, 0 ) )
self._textTitle = Text( 100, 100, 'SNAKE', 50, ( 0, 0, 0 ) )
self._textPlay = Text( 100, 400, 'PLAY', 40, ( 255, 255, 255 ) )
self.addText( self._textTitle )
self.addText( self._textPlay )
class GameOverScreen( Screen ) :
def __init__( self, canvas ) :
super( GameOverScreen, self ).__init__( canvas, ( 0, 0, 0 ) )
self._textGameOver = Text( 100, 100, 'GAME OVER :(', 50, ( 255, 0, 255 ) )
self._textContinue = Text( 100, 400, 'Continue???', 40, ( 255, 255, 255 ) )
self.addText( self._textGameOver )
self.addText( self._textContinue )
class GameScreen( Screen ) :
def __init__( self, canvas, canvasWidth, canvasHeight ) :
super( GameScreen, self ).__init__( canvas, ( 255, 255, 255 ) )
self._world = world.World( 40, canvasWidth, canvasHeight )
def draw( self ) :
super( GameScreen, self ).draw()
self._world.draw( self._canvas )
def update( self ) :
self._world.setKeys( self._keys )
self._world.update()
def lose( self ) :
return self._world.lose()
def win( self ) :
return self._world.win() | Python | 86 | 26.744186 | 84 | /screen.py | 0.55658 | 0.522632 |
paolapilar/juegos | refs/heads/master |
import math
import random
import pygame
from base import Entity
from snake import Snake
from collectables import Apple
class Obstacle( Entity ) :
def __init__( self, i, j, di, dj, cellSize, canvasWidth, canvasHeight ) :
super( Obstacle, self ).__init__( i, j, di, dj, cellSize, canvasWidth, canvasHeight )
self._color = ( 255, 0, 0 )
def draw( self, canvas ) :
_xleft = self._x - 0.5 * self._cellSize
_ytop = self._y - 0.5 * self._cellSize
pygame.draw.rect( canvas,
self._color,
(_xleft, _ytop, self._w, self._h) )
class World( object ) :
def __init__( self, cellSize, canvasWidth, canvasHeight, level = 1 ) :
super( World, self ).__init__()
self._cellSize = cellSize
self._canvasWidth = canvasWidth
self._canvasHeight = canvasHeight
self._level = level
self._nx = int( self._canvasWidth / self._cellSize )
self._ny = int( self._canvasHeight / self._cellSize )
self._maxLives = 4
self._numLives = 4
self._snake = Snake( int( self._nx / 2. ),
int( self._ny / 2. ),
self._cellSize,
self._canvasWidth,
self._canvasHeight )
self._gameWin = False
self._gameOver = False
self._keys = None
self._points = 0
self._font = pygame.font.Font( None, 40 )
self._obstacles = []
self._occupied = []
self._apples = []
self._createObstacles()
self._createWalls()
for obstacle in self._obstacles :
self._occupied.append( ( obstacle.i, obstacle.j ) )
self._createApples( 1 )
if self._level == 1 :
self._snake._speed = 800.
elif self._level == 2 :
self._snake._speed = 2100.
elif self._level == 3 :
self._snake._speed = 2100.
def _createObstacles( self ) :
if self._level == 1 :
return
elif self._level == 2 :
while len( self._obstacles ) < 5 :
_i = random.randint(0, self._nx)
_j = random.randint(0, self._ny)
if _i == int( self._nx / 2 ) and _j == int( self._ny / 2 ) :
continue
self._obstacles.append( Obstacle( _i, _j, 1, 1, self._cellSize, self._canvasWidth, self._canvasHeight ) )
elif self._level == 3 :
while len( self._obstacles ) < 10 :
_i = random.randint(0, self._nx)
_j = random.randint(0, self._ny)
if _i == int( self._nx / 2 ) and _j == int( self._ny / 2 ) :
continue
self._obstacles.append( Obstacle( _i, _j, 1, 1, self._cellSize, self._canvasWidth, self._canvasHeight ) )
def _createWalls( self ) :
if self._level == 1 :
return
elif self._level == 2 :
for i in range( self._nx ) :
self._obstacles.append( Obstacle( i, 0, 1, 1, self._cellSize, self._canvasWidth, self._canvasHeight ) )
self._obstacles.append( Obstacle( i, self._ny - 1, 1, 1, self._cellSize, self._canvasWidth, self._canvasHeight ) )
for j in range( self._ny ) :
self._obstacles.append( Obstacle( 0, j, 1, 1, self._cellSize, self._canvasWidth, self._canvasHeight ) )
self._obstacles.append( Obstacle( self._nx - 1, j, 1, 1, self._cellSize, self._canvasWidth, self._canvasHeight ) )
elif self._level == 3 :
for i in range( self._nx ) :
if i == int( self._nx / 2 ) :
continue
self._obstacles.append( Obstacle( i, 0, 1, 1, self._cellSize, self._canvasWidth, self._canvasHeight ) )
self._obstacles.append( Obstacle( i, self._ny - 1, 1, 1, self._cellSize, self._canvasWidth, self._canvasHeight ) )
for j in range( self._ny ) :
if j == int( self._ny / 2 ) :
continue
self._obstacles.append( Obstacle( 0, j, 1, 1, self._cellSize, self._canvasWidth, self._canvasHeight ) )
self._obstacles.append( Obstacle( self._nx - 1, j, 1, 1, self._cellSize, self._canvasWidth, self._canvasHeight ) )
def _createApples( self, maxApples = 20 ) :
while True :
_i = random.randint( 2, self._nx - 2 )
_j = random.randint( 2, self._ny - 2 )
_canCreate = True
for _occupiedPosition in self._occupied :
_ioccupied = _occupiedPosition[0]
_joccupied = _occupiedPosition[1]
if _i == _ioccupied and _j == _joccupied :
_canCreate = False
break
if _canCreate :
self._apples.append( Apple( _i, _j, self._cellSize, self._canvasWidth, self._canvasHeight ) )
if len( self._apples ) >= maxApples :
break
def setKeys( self, keys ) :
self._keys = keys
def restart( self ) :
self._points = 0
self._snake = Snake( int( self._nx / 2. ),
int( self._ny / 2. ),
self._cellSize,
self._canvasWidth,
self._canvasHeight )
if self._level == 1 :
self._snake._speed = 800.
elif self._level == 2 :
self._snake._speed = 2100.
elif self._level == 3 :
self._snake._speed = 2100.
self._apples = []
self._obstacles = []
self._occupied = []
self._createObstacles()
self._createWalls()
for obstacle in self._obstacles :
self._occupied.append( ( obstacle.i, obstacle.j ) )
self._createApples( 1 )
def _drawGrid( self, canvas ) :
for i in range( self._nx ) :
xline = ( i + 1 ) * self._cellSize
pygame.draw.line( canvas,
( 0, 0, 0 ),
( xline, 0 ),
( xline, self._canvasHeight ),
1 )
for j in range( self._ny ) :
yline = ( j + 1 ) * self._cellSize
pygame.draw.line( canvas,
( 0, 0, 0 ),
( 0, yline ),
( self._canvasWidth, yline ),
1 )
def _drawScore( self, canvas ) :
_textSurface = self._font.render( 'Puntaje: %d - Vidas: %d' % ( self._points, self._numLives ),
True,
( 0, 0, 255 ) )
_textSurface.get_rect().center = ( 30, 30 )
canvas.blit( _textSurface, _textSurface.get_rect() )
def draw( self, canvas ) :
self._drawGrid( canvas )
self._snake.draw( canvas )
for obstacle in self._obstacles :
obstacle.draw( canvas )
for apple in self._apples :
apple.draw( canvas )
self._drawScore( canvas )
def update( self ) :
if self._keys :
if self._keys['up'] == True :
self._snake.setDirection( 'up' )
elif self._keys['down'] == True :
self._snake.setDirection( 'down' )
elif self._keys['right'] == True :
self._snake.setDirection( 'right' )
elif self._keys['left'] == True :
self._snake.setDirection( 'left' )
self._snake.update()
for obstacle in self._obstacles :
obstacle.update()
if self._snake.head().hit( obstacle ) :
self._snake._alive = False
if not self._snake.alive() :
self._numLives = self._numLives - 1
if self._numLives >= 1 :
self.restart()
else :
self._gameOver = True
return
for i in range( len( self._apples ) ) :
self._apples[i].update()
if self._snake.head().hit( self._apples[i] ) :
self._apples[i]._alive = False
self._snake.grow()
self._points = self._points + 1
self._createApples( 1 )
if self._level == 1 and self._points >= 5 :
self._level = 2
self._numLives = 4
self._points = 0
self.restart()
elif self._level == 2 and self._points >= 10 :
self._level = 3
self._numLives = 4
self._points = 0
self.restart()
elif self._level == 3 and self._points >= 15 :
self._gameWin = True
return
_newApples = []
for apple in self._apples :
if apple._alive :
_newApples.append( apple )
self._apples = _newApples
def lose( self ) :
return self._gameOver
def win( self ) :
return self._gameWin | Python | 266 | 33.759399 | 130 | /world.py | 0.465607 | 0.450032 |
paolapilar/juegos | refs/heads/master |
import pygame
import random
import time
from snake import Snake
from collectables import Apple
import screen
class Game :
def __init__( self ) :
pygame.init()
self._canvasWidth = 800
self._canvasHeight = 600
self._canvas = pygame.display.set_mode( ( self._canvasWidth, self._canvasHeight ) )
self._gameExit = False
self._keys = { 'up' : False,
'down' : False,
'right' : False,
'left' : False,
'enter' : False,
'escape' : False }
self._screen = screen.MenuScreen( self._canvas )
self._screenName = 'menu'
def _getEvents( self ) :
for event in pygame.event.get() :
if event.type == pygame.QUIT :
self._gameExit = True
elif event.type == pygame.KEYDOWN :
if event.key == pygame.K_UP :
self._keys['up'] = True
elif event.key == pygame.K_DOWN :
self._keys['down'] = True
elif event.key == pygame.K_RIGHT :
self._keys['right'] = True
elif event.key == pygame.K_LEFT :
self._keys['left'] = True
elif event.key == pygame.K_RETURN :
self._keys['enter'] = True
elif event.key == pygame.K_ESCAPE :
self._keys['escape'] = True
elif event.type == pygame.KEYUP :
if event.key == pygame.K_UP :
self._keys['up'] = False
elif event.key == pygame.K_DOWN :
self._keys['down'] = False
elif event.key == pygame.K_RIGHT :
self._keys['right'] = False
elif event.key == pygame.K_LEFT :
self._keys['left'] = False
elif event.key == pygame.K_RETURN :
self._keys['enter'] = False
elif event.key == pygame.K_ESCAPE :
self._keys['escape'] = False
def _updateScreen( self ) :
self._screen.setKeys( self._keys )
self._screen.update()
self._screen.draw()
if self._screenName == 'menu' and self._keys['enter'] == True :
self._screen = screen.GameScreen( self._canvas, self._canvasWidth, self._canvasHeight )
self._screenName = 'game'
elif self._screenName == 'game' and self._screen.lose() :
self._screen = screen.GameOverScreen( self._canvas )
self._screenName = 'gameover'
elif self._screenName == 'game' and self._screen.win() :
self._screen = screen.MenuScreen( self._canvas )
self._screenName = 'menu'
elif self._screenName == 'gameover' and self._keys['enter'] == True :
self._screen = screen.GameScreen( self._canvas, self._canvasWidth, self._canvasHeight )
self._screenName = 'game'
elif self._screenName == 'gameover' and self._keys['escape'] == True :
self._screen = screen.MenuScreen( self._canvas )
self._screenName = 'menu'
def run( self ) :
while not self._gameExit :
self._getEvents()
self._updateScreen()
# actualizar el canvas
pygame.display.update()
# esperar un ratito
time.sleep( 0.001 )
if __name__ == '__main__' :
_game = Game()
_game.run() | Python | 97 | 35.257732 | 99 | /main.py | 0.493603 | 0.490759 |
paolapilar/juegos | refs/heads/master |
import math
def grid2screen( i, j, cellSize, canvasWidth, canvasHeight ) :
x = ( i + 0.5 ) * cellSize
y = canvasHeight - ( j + 0.5 ) * cellSize
return x, y
def screen2grid( x, y, cellSize, canvasWidth, canvasHeight ) :
i = math.floor( x / cellSize - 0.5 )
j = math.floor( ( canvasHeight - y ) / cellSize - 0.5 )
return i, j | Python | 11 | 30.818182 | 62 | /utils.py | 0.6 | 0.571429 |
paolapilar/juegos | refs/heads/master |
import math
import utils
class Entity( object ) :
def __init__( self, i, j, di, dj, cellSize, canvasWidth, canvasHeight ) :
super( Entity, self ).__init__()
self.i = i
self.j = j
self._cellSize = cellSize
self._canvasWidth = canvasWidth
self._canvasHeight = canvasHeight
self._di = di
self._dj = dj
self._x, self._y = utils.grid2screen( i, j, cellSize, canvasWidth, canvasHeight )
self._w = di * cellSize
self._h = dj * cellSize
self._xc = self._x + self._cellSize * ( math.floor( ( self._di - 1 ) / 2. ) + 0.5 if self._di % 2 == 0 else 0.0 )
self._yc = self._y + self._cellSize * ( math.floor( ( self._dj - 1 ) / 2. ) + 0.5 if self._dj % 2 == 0 else 0.0 )
def x( self ) :
return self._x
def y( self ) :
return self._y
def xc( self ) :
return self._xc
def yc( self ) :
return self._yc
def w( self ) :
return self._w
def h( self ) :
return self._h
def update( self ) :
self._x, self._y = utils.grid2screen( self.i, self.j,
self._cellSize,
self._canvasWidth,
self._canvasHeight )
self._xc = self._x + self._cellSize * ( math.floor( ( self._di - 1 ) / 2. ) + 0.5 if self._di % 2 == 0 else 0.0 )
self._yc = self._y + self._cellSize * ( math.floor( ( self._dj - 1 ) / 2. ) + 0.5 if self._dj % 2 == 0 else 0.0 )
def hit( self, other ) :
_dx = abs( self._xc - other.xc() )
_dy = abs( self._yc - other.yc() )
if ( _dx < ( self._w / 2. ) + ( other.w() / 2. ) and
_dy < ( self._h / 2. ) + ( other.h() / 2. ) ) :
return True
else :
return False | Python | 60 | 30.200001 | 121 | /base.py | 0.453526 | 0.433226 |
JoanJaraBosch/web-personal-django | refs/heads/master | # Generated by Django 2.2.4 on 2019-08-22 20:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('portafolio', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='project',
name='createdDate',
field=models.DateTimeField(auto_now_add=True, verbose_name='Data de creació'),
),
migrations.AlterField(
model_name='project',
name='description',
field=models.TextField(verbose_name='Desccripció'),
),
migrations.AlterField(
model_name='project',
name='image',
field=models.ImageField(upload_to='projects', verbose_name='Imatge'),
),
migrations.AlterField(
model_name='project',
name='title',
field=models.CharField(max_length=200, verbose_name='Títol'),
),
migrations.AlterField(
model_name='project',
name='updatedDate',
field=models.DateTimeField(auto_now=True, verbose_name='Data dactualització'),
),
]
| Python | 38 | 29.394737 | 90 | /webpersonal/portafolio/migrations/0002_auto_20190822_2247.py | 0.568831 | 0.549784 |
JoanJaraBosch/web-personal-django | refs/heads/master | from django.db import models
# Create your models here.
class Project(models.Model):
title = models.CharField(max_length=200, verbose_name = 'Títol')
moreinfo = models.URLField(null=True, blank=True,verbose_name = 'Mes Informació')
description = models.TextField(verbose_name = 'Desccripció')
image = models.ImageField(verbose_name = 'Imatge', upload_to = 'projects')
createdDate = models.DateTimeField(auto_now_add=True, verbose_name = 'Data de creació')
updatedDate = models.DateTimeField(auto_now=True, verbose_name = 'Data dactualització')
class Meta:
verbose_name = 'Projecte'
verbose_name_plural = 'Projectes'
ordering = ["-createdDate"]
def __str__(self):
return self.title | Python | 18 | 40.555557 | 91 | /webpersonal/portafolio/models.py | 0.690763 | 0.686747 |
JoanJaraBosch/web-personal-django | refs/heads/master | # Generated by Django 2.2.4 on 2019-08-22 20:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('portafolio', '0004_auto_20190822_2251'),
]
operations = [
migrations.AlterField(
model_name='project',
name='description',
field=models.TextField(verbose_name='Desccripció'),
),
migrations.AlterField(
model_name='project',
name='moreinfo',
field=models.CharField(max_length=200, verbose_name='Mes Informació'),
),
]
| Python | 23 | 24.913044 | 82 | /webpersonal/portafolio/migrations/0005_auto_20190822_2252.py | 0.587248 | 0.530201 |
JoanJaraBosch/web-personal-django | refs/heads/master | # Generated by Django 2.2.4 on 2019-08-22 20:50
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('portafolio', '0002_auto_20190822_2247'),
]
operations = [
migrations.AddField(
model_name='project',
name='moreinfo',
field=models.TextField(default=django.utils.timezone.now, verbose_name='Mes Informació'),
preserve_default=False,
),
]
| Python | 20 | 24.299999 | 101 | /webpersonal/portafolio/migrations/0003_project_moreinfo.py | 0.626482 | 0.565217 |
JoanJaraBosch/web-personal-django | refs/heads/master | # Generated by Django 2.2.4 on 2019-08-22 20:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('portafolio', '0003_project_moreinfo'),
]
operations = [
migrations.AlterField(
model_name='project',
name='description',
field=models.CharField(max_length=200, verbose_name='Desccripció'),
),
]
| Python | 18 | 22.444445 | 79 | /webpersonal/portafolio/migrations/0004_auto_20190822_2251.py | 0.609005 | 0.556872 |
birkin/ezb_dbprx | refs/heads/master | # -*- coding: utf-8 -*-
import datetime, json, os
import flask
from ezb_dbprx.config import settings
from ezb_dbprx.utils import logger_setup, db_handler
from flask.ext.basicauth import BasicAuth # http://flask-basicauth.readthedocs.org/en/latest/
## setup
app = flask.Flask(__name__)
log = logger_setup.setup_logger()
#
app.config['BASIC_AUTH_USERNAME'] = settings.BASIC_AUTH_USERNAME
app.config['BASIC_AUTH_PASSWORD'] = settings.BASIC_AUTH_PASSWORD
basic_auth = BasicAuth(app)
## experimentation ##
@app.route( '/hello1/', methods=['GET'] )
def hi_a():
""" Tests simple json response return. """
return flask.jsonify( {'hello': 'world'} )
@app.route( '/hello2/', methods=['GET'] )
def hi_b():
""" Tests logging. """
log.info( u'hi there' )
return flask.jsonify( {'hello': 'world2'} )
@app.route( '/basic_auth/', methods=['GET'] )
@basic_auth.required
def try_basic_auth():
""" Tests basic-auth. """
log.info( u'in proxy_app.try_basic_auth()' )
return flask.jsonify( {'got': 'through'} )
@app.route( '/forbidden/', methods=['GET'] )
def try_forbidden():
""" Tests forbidden response. """
log.debug( u'in proxy_app.try_forbidden()' )
return flask.abort( 403 )
@app.route( '/post_test/', methods=['POST'] )
def handle_post():
""" Tests perceiving params response return. """
value_a = flask.request.form['key_a'].strip()
return flask.jsonify( {u'key_a': value_a} )
## real work ##
@app.route( u'/my_ip/', methods=['GET'] )
def show_ip():
""" Returns ip.
Note: this was a test, but could be useful for debugging. """
ip = flask.request.remote_addr
log.debug( u'in proxy_app.show_ip(); remote_addr, `%s`' % ip )
return flask.jsonify( {u'client_ip': ip} )
@app.route( u'/search_new_request/', methods=['GET'] )
@basic_auth.required
def search():
""" Searches for new requests. """
client_ip = flask.request.remote_addr
if not client_ip in settings.LEGIT_IPS.keys():
log.debug( u'- in proxy_app.search_new_request(); client_ip `%s` not in LEGIT_IPS; returning forbidden' % client_ip )
return flask.abort( 403 )
db = db_handler.DB_Handler( log )
result_list = db.search_new_request()
return_dict = {
u'request_type': u'search_new_request',
u'datetime': unicode( datetime.datetime.now() ),
u'result': result_list }
return flask.jsonify( return_dict )
@app.route( u'/update_request_status/', methods=['POST'] )
@basic_auth.required
def update_request_status():
""" Updates db request status. """
log.debug( u'- in proxy_app.update_request_status(); starting' )
client_ip = flask.request.remote_addr
log.debug( u'- in proxy_app.update_request_status(); client_ip, `%s`' % client_ip )
if not client_ip in settings.LEGIT_IPS.keys():
log.debug( u'- in proxy_app.update_request_status(); returning forbidden' )
return flask.abort( 403 )
log.debug( u'- in proxy_app; update_request_status(); ip legit' )
log.debug( u'- in proxy_app; update_request_status(); flask.request.form.keys(), %s' % sorted(flask.request.form.keys()) )
db_id = flask.request.form[u'db_id'] # flask will return a '400 - Bad Request' if getting a value fails
status = flask.request.form[u'status']
try:
assert status in [ u'in_process', u'processed' ] # never changing it to its original 'not_yet_processed'
assert db_id.isdigit()
except Exception as e:
log.error( u'- in proxy_app; update_request_status(); params grabbed; keys good but value(s) bad; db_id, `%s`; status, `%s`' % (db_id, status) )
return flask.abort( 400, u'Bad data.' )
log.debug( u'- in proxy_app; update_request_status(); params grabbed & data is valid' )
db = db_handler.DB_Handler( log )
result_dict = db.update_request_status( db_id, status )
assert result_dict.keys() == [ u'status_update_result' ]
return_dict = {
u'request_type': u'update_request_status',
u'db_id': db_id,
u'requested_new_status': status,
u'datetime': unicode( datetime.datetime.now() ),
u'result': result_dict[ u'status_update_result' ]
}
return flask.jsonify( return_dict )
@app.route( u'/add_history_note/', methods=['POST'] )
@basic_auth.required
def add_history_note():
""" Adds history note. """
log.debug( u'- in proxy_app.add_history_note(); starting' )
if not flask.request.remote_addr in settings.LEGIT_IPS.keys():
log.debug( u'- in proxy_app.add_history_note(); returning forbidden for ip, `%s`' % flask.request.remote_addr )
return flask.abort( 403 )
( db_id, db_h ) = ( flask.request.form[u'db_id'], db_handler.DB_Handler(log) ) # flask will return a '400 - Bad Request' if getting a value fails
result dbh.add_history_note( db_id )
return_dict = {
u'request_type': u'add_history_note', u'db_id': db_id,
u'datetime': unicode( datetime.datetime.now() ), u'result': result }
return flask.jsonify( return_dict )
# if __name__ == '__main__':
# if os.getenv('DEVBOX') == 'true':
# app.run( host='0.0.0.0', debug=True )
# else:
# app.run()
| Python | 141 | 35.716312 | 152 | /proxy_app.py | 0.630481 | 0.624879 |
birkin/ezb_dbprx | refs/heads/master | # -*- coding: utf-8 -*-
""" Handles db connection and executes sql. """
import datetime, json, os, pprint, random, sys
import MySQLdb
from ezb_dbprx.config import settings
class DB_Handler(object):
def __init__(self, file_logger ):
""" Sets up basics. """
self.db_host = settings.DB_HOST
self.db_port = settings.DB_PORT
self.db_username = settings.DB_USERNAME
self.db_password = settings.DB_PASSWORD
self.db_name = settings.DB_NAME
self.connection_object = None # populated during queries
self.cursor_object = None # populated during queries
self.file_logger = file_logger
self.key_mapper = { # converts database fields into more generic keys
u'alt_edition': u'preference_alternate_edition', # needed?
u'barcode': u'patron_barcode',
u'bibno': u'item_bib_number', # needed?
u'created': u'db_create_date',
u'email': u'patron_email',
u'eppn': u'patron_shib_eppn',
u'firstname': u'patron_name_first',
u'group': u'patron_shib_group',
u'id': u'db_id',
u'isbn': u'item_isbn',
u'lastname': u'patron_name_last',
u'loc': u'libary_location', # needed?
u'name': u'patron_name_firstlast',
u'patronId': u'patron_id', # needed?
u'pref': u'preference_quick', # needed?
u'request_status': u'db_request_status',
u'sfxurl': u'item_openurl',
u'staffnote': u'staff_note',
u'title': u'item_title',
u'volumes': u'item_volumes',
u'wc_accession': u'item_worldcat_id'
}
## execute_sql() ##
def execute_sql(self, sql):
""" Executes sql; returns tuple of row-dicts.
Example return data: ( {row1field1key: row1field1value, row1field2key: row1field2value}, {row2field1key: row2field1value, row2field2key: row2field2value} )
Called by self.search_new_request(), self.update_request_status(), and self.update_history_note() """
try:
self._setup_db_connection()
if not self.cursor_object:
return
self.cursor_object.execute( sql )
dict_list = self.cursor_object.fetchall() # really a tuple of row-dicts
dict_list = self._unicodify_resultset( dict_list )
return dict_list
except Exception as e:
message = u'in db_handler.execute_sql(); error: %s' % unicode( repr(e).decode(u'utf8', u'replace') )
self.file_logger.error( message )
return None
finally:
self._close_db_connection()
def _setup_db_connection( self ):
""" Sets up connection; populates instance attributes.
Called by execute_sql() """
self.file_logger.debug( u'in db_handler._setup_db_connection(); starting' )
try:
self.connection_object = MySQLdb.connect(
host=self.db_host, port=self.db_port, user=self.db_username, passwd=self.db_password, db=self.db_name )
self.file_logger.debug( u'in db_handler._setup_db_connection(); connection-object set' )
self.cursor_object = self.connection_object.cursor(MySQLdb.cursors.DictCursor)
return
except Exception as e:
message = u'in db_handler._setup_db_connection(); error: %s' % unicode( repr(e).decode(u'utf8', u'replace') )
self.file_logger.error( message )
def _unicodify_resultset( self, dict_list ):
""" Takes tuple of row-dicts;
Makes true list and ensures all keys and values are unicode;
Returns list of type-corrected dicts.
Called by execute_sql() """
result_list = []
for row_dict in dict_list:
new_row_dict = {}
for key,value in row_dict.items():
if type(value) == datetime.datetime:
value = unicode(value)
new_row_dict[ unicode(key) ] = unicode(value)
result_list.append( new_row_dict )
return result_list
def _close_db_connection( self ):
""" Closes db connection.
Called by execute_sql() """
try:
self.cursor_object.close()
self.connection_object.close()
return
except Exception as e:
message = u'in db_handler._close_db_connection(); error: %s' % unicode( repr(e).decode(u'utf8', u'replace') )
self.file_logger.error( message )
## search_new_request() ##
def search_new_request( self ):
""" Returns json string of list of dicts on find, empty-list on no-find.
Called by: proxy_app.search_new_request() """
sql = settings.SEARCH_SQL
self.file_logger.debug( u'in db_handler.search_new_request; sql, %s' % sql )
raw_dict_list = self.execute_sql( sql )
self.file_logger.debug( u'in db_handler.search_new_request; raw_dict_list, %s' % raw_dict_list )
return_val = []
if raw_dict_list:
if len( raw_dict_list ) > 0:
return_val = self._massage_raw_data( raw_dict_list )
return return_val
def _massage_raw_data( self, raw_dict_list ):
""" Makes keys more generic.
Returns list of updated dicts
Called by search_new_request() .
Possible TODO: add None to self.key_mapper if item isn't needed; test for that here and don't return it. """
updated_list = []
for entry in raw_dict_list:
massaged_dict = {}
for (key, value) in entry.items():
new_key = self.key_mapper[key]
massaged_dict[new_key] = value
updated_list.append( massaged_dict )
return updated_list
## update_request_status ##
def update_request_status( self, db_id, status ):
""" Updates request table status field.
Called by proxy_app.update_request_status() """
## update the status
update_sql = settings.UPDATE_REQUEST_STATUS_SQL_PATTERN % ( status, db_id )
self.file_logger.debug( u'in db_handler.update_request_status(); update_sql, %s' % update_sql )
try:
self.execute_sql( update_sql )
except Exception as e:
self.file_logger.error( u'in db_handler.update_request_status(); problem executing update; exception: %s' % e )
return { u'status_update_result': u'status_update_failed_on_exception' }
## confirm the update was successful
confirmation_sql = settings.CONFIRM_REQUEST_STATUS_SQL_PATTERN % db_id
self.file_logger.debug( u'in db_handler.update_request_status(); confirmation_sql, %s' % confirmation_sql )
try:
result_dict_list = self.execute_sql( confirmation_sql )
self.file_logger.debug( u'in db_handler.update_request_status; result_dict_list, %s' % result_dict_list )
if result_dict_list[0][u'request_status'] == status:
return { u'status_update_result': u'status_updated' }
else:
return { u'status_update_result': u'status_confirmation_failed' }
except Exception as e:
self.file_logger.error( u'in db_handler.update_request_status(); problem executing confirmation; exception: %s' % e )
return { u'status_update_result': u'status_confirmation_failed_on_exception' }
## add history note ##
def add_history_entry( self, request_id ):
""" Creates history table record.
Called by proxy_app.add_history_note() """
add_history_sql = settings.CREATE_HISTORY_ENTRY_PATTERN % request_id
self.file_logger.debug( u'in db_handler.add_history_entry(); add_history_sql, %s' % add_history_sql )
result = self.execute_sql( sql )
self.file_logger.debug( u'in db_handler.add_history_entry(); result, `%s`' % result )
return
# end class DB_Handler()
| Python | 174 | 45.155174 | 167 | /utils/db_handler.py | 0.588147 | 0.585408 |
birkin/ezb_dbprx | refs/heads/master | # -*- coding: utf-8 -*-
""" Handles log setup. """
import logging, os
import logging.handlers
from ezb_dbprx.config import settings
def setup_logger():
""" Returns a logger to write to a file. """
filename = u'%s/ezb_dbprx.log' % settings.LOG_DIR
formatter = logging.Formatter( u'[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s' )
logger = logging.getLogger( u'ezb_dbprx' )
level_dict = { u'debug': logging.DEBUG, u'info':logging.INFO }
logger.setLevel( level_dict[settings.LOG_LEVEL] )
file_handler = logging.handlers.RotatingFileHandler( filename, maxBytes=(5*1024*1024), backupCount=1 )
file_handler.setFormatter( formatter )
logger.addHandler( file_handler )
return logger
| Python | 20 | 35.75 | 106 | /utils/logger_setup.py | 0.680272 | 0.665306 |
birkin/ezb_dbprx | refs/heads/master | # -*- coding: utf-8 -*-
import json, os
## db access
DB_HOST = unicode( os.environ.get(u'ezb_dbprx__DB_HOST') )
DB_PORT = int( unicode(os.environ.get(u'ezb_dbprx__DB_PORT')) )
DB_USERNAME = unicode( os.environ.get( u'ezb_dbprx__DB_USERNAME') )
DB_PASSWORD = unicode( os.environ.get(u'ezb_dbprx__DB_PASSWORD') )
DB_NAME = unicode( os.environ.get( u'ezb_dbprx__DB_NAME') )
## db sql
SEARCH_SQL = unicode( os.environ.get( u'ezb_dbprx__SEARCH_SQL') ) # for db_handler.DB_Handler.search_new_request()
UPDATE_REQUEST_STATUS_SQL_PATTERN = unicode( os.environ.get( u'ezb_dbprx__UPDATE_REQUEST_STATUS_SQL_PATTERN') ) # for db_handler.DB_Handler.update_request_status()
CONFIRM_REQUEST_STATUS_SQL_PATTERN = unicode( os.environ.get( u'ezb_dbprx__CONFIRM_REQUEST_STATUS_SQL_PATTERN') ) # for db_handler.DB_Handler.update_request_status()
CREATE_HISTORY_ENTRY_PATTERN = unicode( os.environ.get( u'ezb_dbprx__CREATE_HISTORY_ENTRY_SQL_PATTERN') ) # for db_handler.DB_Handler.add_history_entry()
## file-logger
LOG_DIR = unicode( os.environ.get(u'ezb_dbprx__LOG_DIR') )
LOG_LEVEL = unicode( os.environ.get(u'ezb_dbprx__LOG_LEVEL') )
## basic auth
BASIC_AUTH_USERNAME = unicode( os.environ.get(u'ezb_dbprx__BASIC_AUTH_USERNAME') )
BASIC_AUTH_PASSWORD = unicode( os.environ.get(u'ezb_dbprx__BASIC_AUTH_PASSWORD') )
## other
LEGIT_IPS = json.loads( unicode(os.environ.get(u'ezb_dbprx__LEGIT_IPS')) )
# end
| Python | 31 | 44.161289 | 166 | /config/settings.py | 0.711429 | 0.710714 |
byambaa1982/combine_tables | refs/heads/master | import pandas as pd
import numpy as np
# ------- Read CSV data ----------
# stop=pd.read_csv('Arkiv/stops.txt')
stop_times=pd.read_csv('Arkiv/stop_times.txt')
# calendar=pd.read_csv('Arkiv/calendar.txt')
calendar_dates=pd.read_csv('Arkiv/calendar_dates.txt')
trips=pd.read_csv('Arkiv/trips.txt')
# ----------Conditional Subset ----------
new_calendar_dates=calendar_dates[(calendar_dates.date>20200225 )& (calendar_dates.date<20200301)]
#----------- remove useless columns from calendar data----------------------------
new_csv=stop_times.iloc[0:,0:5]
# ---------------Merge them on service_id and make a new column named "unique_trip_id"
a=trips
b=new_calendar_dates
c=pd.merge(a, b, on='service_id', how='left')
c['unique_trip_id']=c.index+1
e=stop_times
f=pd.merge(c, e, on='trip_id', how='left')
df=f
# result['unique_trip_id'] = result.groupby(['trip_id','end_date']).ngroup()
# result=result.sort_values(by=['unique_trip_id', 'stop_sequence'])
# unique_trip_id=1
# new=[]
# for i in range(0,len(my_list)-1):
# if my_list[i] == my_list[i+1]:
# new.append(unique_trip_id)
# else:
# unique_trip_id+=1
# new.append(unique_trip_id)
# -------- Make int into string and combine two column on new columns-------
df['unique_trip_id']=df['unique_trip_id'].map(lambda x: x+1)
df['first']=df['unique_trip_id'].map(lambda x: str(x))
df['second']=df['stop_sequence'].map(lambda x: str(x))
df['first_date']=df['start_date'].map(lambda x: str(x))
df['second_date']=df['end_date'].map(lambda x: str(x))
df['unique_sub_trip_id']= df[['first', 'second']].apply(lambda x: '.'.join(x), axis=1)
df['arrival_time']= df[['second_date', 'arrival_time']].apply(lambda x: ' '.join(x), axis=1)
df['departure_time']= df[['first_date', 'departure_time']].apply(lambda x: ' '.join(x), axis=1)
# --------- Rerange data ---------------
df=df[['unique_trip_id','unique_sub_trip_id','trip_id','stop_id','stop_sequence','arrival_time','departure_time']]
unique_trip_id_list=df.unique_trip_id.unique().tolist()
df_list=[]
for i in unique_trip_id_list:
df1 = df.loc[df['unique_trip_id'] == i]
df1['arrival_time'] = df1['arrival_time'].shift(-1)
df1['stop_sequence'] = df1['stop_sequence'].shift(-1)
df_list.append(df1)
final_result=pd.concat(df_list)
final_result.to_csv('result.csv')
| Python | 75 | 29.719999 | 114 | /main.py | 0.631076 | 0.615017 |
byambaa1982/combine_tables | refs/heads/master | import pandas as pd
import numpy as np
stop=pd.read_csv('Arkiv/stops.txt')
stop_times=pd.read_csv('Arkiv/stop_times.txt')
calendar=pd.read_csv('Arkiv/calendar.txt')
calendar_dates=pd.read_csv('Arkiv/calendar_dates.txt')
trips=pd.read_csv('Arkiv/trips.txt')
print(stop.shape)
print(stop_times.shape)
print(calendar.shape)
print(calendar_dates.shape)
print(trips.shape)
# ----------Conditional Subset ----------
new_calendar_dates=calendar_dates[(calendar_dates.date>20200225 )& (calendar_dates.date<20200301)]
print(new_calendar_dates.date.min())
print(new_calendar_dates.date.max())
print(new_calendar_dates.shape)
trips=trips.iloc[0:,1:3]
print(trips.head())
print(trips.shape) | Python | 27 | 24.407408 | 98 | /test.py | 0.741606 | 0.713869 |
darshanime/scrapy-tutorials | refs/heads/master | from scrapy import Item, Field
class CardekhoItem(Item):
title = Field()
price = Field()
distance = Field() | Python | 6 | 19.166666 | 30 | /cardekho/cardekho/items.py | 0.658333 | 0.658333 |
darshanime/scrapy-tutorials | refs/heads/master | from housing.items import HousingItemBuy
from scrapy import Spider
from scrapy.http.request import Request
#To parse the JSON received
import json
class HousingSpider(Spider):
name = "housing"
allowed_domains = ["housing.com"]
custom_settings = {'USER_AGENT' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.111 Safari/537.36'}
def start_requests(self):
#We have 1080 pages to fetch
for count in range(1,1081):
print "Getting page : %s" %count
yield Request("https://buy.housing.com/api/v1/buy/index/filter?poly=f97f947ffae6408ac295&results_per_page=30&p=" + str(count) + "&resale_total_count=30045&np_total_count=2329", self.parse_buy)
def parse_buy(self, response):
#Since the response is purely JSON
text = response.body
#Parsing it using the builtin json utility
parsed_json = json.loads(text)
#For each entry, we will store all the information we defined earlier in items.py
#The parsed json can be read as a dict. Examining the JSON, we can easily navigate
#to where we have the data we need.
for iter in range(30):
item = HousingItemBuy()
item['ad_price'] = parsed_json["hits"][iter]["formatted_price"]
item['ad_url'] = parsed_json["hits"][iter]["inventory_canonical_url"]
item['ad_title'] = parsed_json["hits"][iter]["title"]
item['ad_coordinates'] = parsed_json["hits"][iter]["location_coordinates"]
item['ad_date_added'] = parsed_json["hits"][iter]["date_added"]
item['ad_area'] = parsed_json["hits"][iter]["inventory_configs"][0]["area"]
item['ad_bedrooms'] = parsed_json["hits"][iter]["inventory_configs"][0]["number_of_bedrooms"]
item['ad_toilets'] = parsed_json["hits"][iter]["inventory_configs"][0]["number_of_toilets"]
item['ad_contact_persons_number'] = parsed_json["hits"][iter]["contact_persons_info"][0]["contact_no"]
item['ad_contact_persons_id'] = parsed_json["hits"][iter]["contact_persons_info"][0]["profile_id"]
item['ad_contact_persons_name'] = parsed_json["hits"][iter]["contact_persons_info"][0]["name"]
#Some entries do not have the ad_city/ad_locality variable.
try:
item['ad_city'] = parsed_json["hits"][iter]["display_city"][0]
except :
item['ad_city'] = "None given"
try:
item['ad_locality'] = parsed_json["hits"][iter]["display_city"][1]
except :
item['ad_locality'] = "None given"
item['ad_gas_pipeline'] = parsed_json["hits"][iter]["inventory_amenities"]["has_gas_pipeline"]
item['ad_lift'] = parsed_json["hits"][iter]["inventory_amenities"]["has_lift"]
item['ad_parking'] = parsed_json["hits"][iter]["inventory_amenities"]["has_parking"]
item['ad_gym'] = parsed_json["hits"][iter]["inventory_amenities"]["has_gym"]
item['ad_swimming_pool'] = parsed_json["hits"][iter]["inventory_amenities"]["has_swimming_pool"]
item['ad_id'] = parsed_json["hits"][iter]["id"]
yield item | Python | 66 | 49.89394 | 204 | /housing/housing/spiders/housing_spider.py | 0.587254 | 0.566409 |
darshanime/scrapy-tutorials | refs/heads/master | from scrapy import Item, Field
class HousingItemBuy(Item):
ad_id = Field()
ad_title = Field()
ad_price = Field()
ad_area = Field()
ad_url = Field()
ad_date_added = Field()
ad_coordinates = Field()
ad_bedrooms = Field()
ad_toilets = Field()
ad_gas_pipeline = Field()
ad_lift = Field()
ad_parking = Field()
ad_gym = Field()
ad_swimming_pool = Field()
ad_city = Field()
ad_locality = Field()
ad_contact_persons_name = Field()
ad_contact_persons_number = Field()
ad_contact_persons_id = Field()
count = Field() | Python | 23 | 26.086956 | 59 | /housing/housing/items.py | 0.559486 | 0.559486 |
darshanime/scrapy-tutorials | refs/heads/master | from cardekho.items import CardekhoItem
from scrapy import Spider
from scrapy.http.request import Request
class CardekhoSpider(Spider):
name = "cardekho"
allowed_domains = ["http://www.cardekho.com"]
start_urls = ["http://www.cardekho.com/used-cars+in+mumbai-all/"]
#This is to not get redirected by CarDekho. We are identifying ourselves as a web-browser.
custom_settings = {'USER_AGENT' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.111 Safari/537.36'}
def start_requests(self):
#There are 162 pages, we are asking Scrapy to get us all of them.
for i in range(162):
yield Request("http://www.cardekho.com/used-cars+in+mumbai-all/" + str(i), self.parse)
def parse(self, response):
for sel in response.xpath('/html/body/main/div/div[2]/div[2]/div[9]/form/ul/li'):
item = CardekhoItem()
item ['title'] = sel.xpath('div[1]/div[2]/div[1]/a/text()').extract()
item ['price'] = sel.xpath('div[1]/div[3]/div[1]/text()').extract()
item ['distance'] = sel.xpath('div[1]/div[2]/div[3]/ul/li[1]/div[2]/span/text()').extract()
yield item | Python | 24 | 50.958332 | 162 | /cardekho/cardekho/spiders/cardekho_spider.py | 0.62199 | 0.58427 |
darshanime/scrapy-tutorials | refs/heads/master | from scrapy.spiders import BaseSpider
from scrapy101.items import Scrapy101Item
class Scrapy101Spider(BaseSpider):
name = "dmoz"
allowed_domains = ["dmoz.org/"]
start_urls = ["http://www.dmoz.org/"]
def parse(self, response):
for div in response.xpath('/html/body/div[3]/div[3]/div[1]/div'):
for entry in div.xpath('span'):
item = Scrapy101Item()
item['title'] = entry.xpath('a/text()').extract()
print item['title'] | Python | 14 | 35.214287 | 73 | /scrapy 101/scrapy101/spiders/dmoz.py | 0.592885 | 0.563241 |
darshanime/scrapy-tutorials | refs/heads/master | from scrapy import Item, Field
class Scrapy101Item(Item):
title = Field() | Python | 4 | 18.25 | 30 | /scrapy 101/scrapy101/items.py | 0.75 | 0.710526 |
prakashpatil1430/Fashionproject | refs/heads/main | # Generated by Django 3.2.6 on 2021-09-25 07:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fashion', '0002_cart_orderplaced_product'),
]
operations = [
migrations.AlterField(
model_name='product',
name='category',
field=models.CharField(choices=[('TS', 'Tshirts'), ('W', 'Watches'), ('P', 'Perfumes'), ('S', 'Shoes')], max_length=2),
),
]
| Python | 18 | 25.444445 | 131 | /fashion/migrations/0003_alter_product_category.py | 0.573529 | 0.531513 |
prakashpatil1430/Fashionproject | refs/heads/main |
from django.urls import path
# from.views import address,add_to_cart,mobile,checkout,orders,ProductView,ProductDetailView,CustomerRegistrationView,ProfileView,show_cart,laptop,fashion_top,fashion_bottom,gym_product,home_decor,plus_cart,minus_cart,remove_cart,payment_done,orders
from django.conf import settings
from django.conf.urls.static import static
# from django.contrib.auth import views as auth_views
from fashion.views import HomeView,perfume_view,product_view,shoes_view,watch_view,tshirt_view,ProductDetailView,add_to_cart,CustomerRegistrationView,ProfileView,address,show_cart,remove_cart,checkout,orders
from django.contrib.auth import views as auth_views
from .forms import LoginForm,MyPasswordChangeForm
# ,MyPasswordResetForm,MySetPasswordForm
urlpatterns = [
path('',HomeView.as_view(),name='home'),
path('alldata/',product_view,name="alldata"),
path('perfume/',perfume_view,name="perfume"),
path('perfume/<slug:data>/',perfume_view,name="perfume"),
path('watches/',watch_view,name="watches"),
path('watches/<slug:data>/',watch_view,name="watches"),
path('tshirts/',tshirt_view,name="tshirts"),
path('tshirts/<slug:data>/',tshirt_view,name="tshirts"),
path('shoes/',shoes_view,name="shoes"),
path('shoes/<slug:data>/',shoes_view,name="shoes"),
path('product-detail/<int:pk>',ProductDetailView.as_view(),name="product-detail"),
path('add-to-cart/',add_to_cart,name="add-to-cart"),
path('cart/',show_cart,name='cart'),
path('removecart/<int:pk>/',remove_cart,name='removecart'),
path('profile/',ProfileView.as_view(),name="profile"),
path('address/',address,name="address"),
path('orders/',orders,name="orders"),
path('regestration/',CustomerRegistrationView.as_view(),name="customerregistration"),
path('login/', auth_views.LoginView.as_view(template_name='fashion/login.html',authentication_form=LoginForm), name='login'),
path('logout/', auth_views.LogoutView.as_view(next_page='login') ,name='logout'),
path('passwordchange/',auth_views.PasswordChangeView.as_view(template_name='fashion/passwordchange.html',form_class=MyPasswordChangeForm,success_url='/passwordchangedone/'),name="passwordchange"),
path('passwordchangedone/', auth_views.PasswordChangeDoneView.as_view(template_name='fashion/passwordchangedone.html'), name='passwordchangedone'),
path('checkout/',checkout,name='checkout'),
]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| Python | 38 | 64.263161 | 249 | /fashion/urls.py | 0.744458 | 0.744458 |
prakashpatil1430/Fashionproject | refs/heads/main | from django.shortcuts import render
from django.views import View
from .models import Product, Customer, Cart, OrderPlaced
from django.shortcuts import render, redirect, HttpResponse
from .forms import CustomerRegistrationForm, CustomerProfileForm
from django.contrib import messages
from django.db.models import Q
# Create your views here.
class HomeView(View):
def get(self, request):
all_product = Product.objects.all()
totalitem = 0
if request.user.is_authenticated:
totalitem = len(Cart.objects.filter(user=request.user))
return render(request, 'fashion/index2.html', context={'data': all_product, 'totalitem': totalitem})
def product_view(request, data=None):
all_product = Product.objects.all()
return render(request, 'fashion/index2.html', {'data': all_product})
class ProductDetailView(View):
def get(self, request, pk):
totalitem = 0
product = Product.objects.get(pk=pk)
item_already_in_cart = False
if request.user.is_authenticated:
totalitem = len(Cart.objects.filter(user=request.user))
item_already_in_cart = Cart.objects.filter(
Q(product=product.id) & Q(user=request.user)).exists()
return render(request, 'fashion/productdetail.html', {'product': product, 'totalitem': totalitem, 'item_already_in_cart': item_already_in_cart})
def perfume_view(request, data=None):
all_product = Product.objects.all()
totalitem = 0
if request.user.is_authenticated:
totalitem = len(Cart.objects.filter(user=request.user))
if data == None:
perfume = Product.objects.filter(category='P')
elif data == 'Below1000':
perfume = Product.objects.filter(
category='P').filter(discounted_price__lt=1000)
elif data == 'Above1000':
perfume = Product.objects.filter(
category='P').filter(discounted_price__gt=1000)
return render(request, 'fashion/index2.html', {'perfume': perfume, 'totalitem': totalitem, 'data': all_product})
def tshirt_view(request, data=None):
all_product = Product.objects.all()
totalitem = 0
if request.user.is_authenticated:
totalitem = len(Cart.objects.filter(user=request.user))
if data == None:
tshirts = Product.objects.filter(category='TS')
elif data == 'm-tshirt':
tshirts = Product.objects.filter(category='TS').filter(brand=data)
elif data == 'w-tshirt':
tshirts = Product.objects.filter(category='TS').filter(brand=data)
elif data == 'Below1000':
tshirts = Product.objects.filter(
category='TS').filter(discounted_price__lt=1000)
elif data == 'Above1000':
tshirts = Product.objects.filter(
category='TS').filter(discounted_price__gt=1000)
return render(request, 'fashion/index2.html', {'tshirts': tshirts, 'totalitem': totalitem, 'data': all_product})
def watch_view(request, data=None):
all_product = Product.objects.all()
totalitem = 0
if request.user.is_authenticated:
totalitem = len(Cart.objects.filter(user=request.user))
if data == None:
watches = Product.objects.filter(category='W')
elif data == 'm-watch':
watches = Product.objects.filter(category='W').filter(brand=data)
elif data == 'w-match':
tshirts = Product.objects.filter(category='W').filter(brand=data)
elif data == 'Below1000':
watches = Product.objects.filter(
category='W').filter(discounted_price__lt=1000)
elif data == 'Above1000':
watches = Product.objects.filter(
category='W').filter(discounted_price__gt=1000)
return render(request, 'fashion/index2.html', {'watches': watches, 'totalitem': totalitem, 'data': all_product})
def shoes_view(request, data=None):
all_product = Product.objects.all()
totalitem = 0
if request.user.is_authenticated:
totalitem = len(Cart.objects.filter(user=request.user))
if data == None:
shoes = Product.objects.filter(category='S')
elif data == 'man-shoes':
shoes = Product.objects.filter(category='S').filter(brand=data)
elif data == 'women-shoes':
shoes = Product.objects.filter(category='S').filter(brand=data)
elif data == 'Above1000':
shoes = Product.objects.filter(
category='S').filter(discounted_price__gt=1000)
elif data == 'Below1000':
shoes = Product.objects.filter(
category='S').filter(discounted_price__lt=1000)
return render(request, 'fashion/index2.html', {'shoes': shoes, 'totalitem': totalitem, 'data': all_product})
def add_to_cart(request):
if request.user.is_authenticated:
user = request.user
product_id = request.GET.get('prod_id')
product = Product.objects.get(id=product_id)
Cart(user=user, product=product).save()
return redirect('/cart')
else:
return redirect('/login')
def remove_cart(request, pk):
user = request.user
product = Product.objects.get(pk=pk)
c = Cart.objects.get(Q(product=product) & Q(user=user))
c.delete()
return redirect('/cart')
class CustomerRegistrationView(View):
def get(self, request):
form = CustomerRegistrationForm()
return render(request, 'fashion/customer_reg.html', {'form': form})
def post(self, request):
form = CustomerRegistrationForm(request.POST)
if form.is_valid():
messages.success(
request, 'Congratulations!! Registered Successfully.')
form.save()
return render(request, 'fashion/customer_reg.html', {'form': form})
class ProfileView(View):
def get(self, request):
totalitem = 0
if request.user.is_authenticated:
totalitem = len(Cart.objects.filter(user=request.user))
form = CustomerProfileForm()
return render(request, 'fashion/profile.html', {'form': form, 'active': 'btn-primary', 'totalitem': totalitem})
def post(self, request):
totalitem = 0
if request.user.is_authenticated:
totalitem = len(Cart.objects.filter(user=request.user))
form = CustomerProfileForm(request.POST)
if form.is_valid():
usr = request.user
name = form.cleaned_data['name']
locality = form.cleaned_data['locality']
city = form.cleaned_data['city']
state = form.cleaned_data['state']
zipcode = form.cleaned_data['zipcode']
reg = Customer(user=usr, name=name, locality=locality,
city=city, state=state, zipcode=zipcode)
reg.save()
messages.success(
request, 'Congratulations!! Profile Updated Successfully.')
return render(request, 'fashion/profile.html', {'form': form, 'active': 'btn-primary', 'totalitem': totalitem})
def checkout(request):
if request.user.is_authenticated:
user = request.user
addr = Customer.objects.filter(user=user)
cart_items = Cart.objects.filter(user=user)
amount = 0.0
shipping_amount = 70
total_amount = 0.0
cart_product = [p for p in Cart.objects.all() if p.user == user]
if cart_product:
for p in cart_product:
tempamount = (p.quantity * p.product.discounted_price)
amount = amount+tempamount
total_amount = amount + shipping_amount
return render(request, 'fashion/checkout.html', {'addr': addr, 'cart_items': cart_items, 'total_amount': total_amount})
else:
return redirect('/login')
def address(request):
totalitem = 0
if request.user.is_authenticated:
totalitem = len(Cart.objects.filter(user=request.user))
addr = Customer.objects.filter(user=request.user)
return render(request, 'fashion/address.html', {'addr': addr, 'active': 'btn-primary', 'totalitem': totalitem})
def show_cart(request):
if request.user.is_authenticated:
user = request.user
cart = Cart.objects.filter(user=user)
amount = 0.0
shipping_amount = 70
total_amount = 0.0
cart_product = [p for p in Cart.objects.all() if p.user == user]
if cart_product:
for p in cart_product:
tempamount = (p.quantity * p.product.discounted_price)
amount = amount+tempamount
total_amount = amount + shipping_amount
return render(request, 'fashion/addtocart.html', {'carts': cart, 'amount': amount, 'total_amount': total_amount})
else:
return render(request, 'fashion/emptycart.html')
else:
return redirect('/login')
def orders(request):
user = request.user
customer_id = user.id
print(customer_id)
cartid = Cart.objects.filter(user=user)
customer = Customer.objects.get(id=customer_id)
for cid in cartid:
OrderPlaced(user=user, customer=customer,
product=cid.product, quantity=cid.quantity).save()
# print("Order Saved")
cid.delete()
# print("Cart Item Deleted")
return redirect("/orders")
op = OrderPlaced.objects.filter(user=request.user)
return render(request, 'fashion/orders.html', {'order_placed': op})
| Python | 243 | 37.271606 | 152 | /fashion/views.py | 0.635269 | 0.625484 |
001001matheus001001/Minecraft-python | refs/heads/master | # Conectar ao Minecraft
from mcpi.minecraft import Minecraft
mc = Minecraft.create()
# String para variaveis de 3D
x = input("localização desejada para x ")
y = input("localização desejada para y ")
z = input("localização desejada para z ")
# Mudar a posição do jogador
mc.player.setPos(x, y, z)
print("Fim de locomoção ->", x, y ,z) | Python | 15 | 21.6 | 41 | /teleportpreciso.py | 0.715976 | 0.713018 |
001001matheus001001/Minecraft-python | refs/heads/master | # Conectar ao Minecraft
from mcpi.minecraft import Minecraft
mc = Minecraft.create()
# String para variaveis de 3D
bloco = input("Numero do bloco desejado:")
x = input("localização desejada para: x ")
y = input("localização desejada para: y ")
z = input("localização desejada para: z ")
mc.setBlock(x, y, z, bloco)
print("Fim de script obs: cria 1 bloco por vez ! ") | Python | 15 | 23.866667 | 51 | /CriaBlocos.py | 0.712366 | 0.706989 |
JiriPapousek/facebook-analysis | refs/heads/master | from os import listdir
import matplotlib.pyplot as plt
import pylab
import operator
import numpy as np
import sys
import calendar
def clear_data(first_tag, last_lag, text):
"""
This function returns string between first_tag and last_tag in text. It also
returns changed text so that it will not include this string and tags.
"""
first_index = text.find(first_tag) + len(first_tag)
last_index = text.find(last_lag)
result = text[first_index:last_index]
text = text[last_index + len(last_lag):len(text)]
return result, text
def messages_to_a_list():
"""
This function makes a list of data from the html files of conversations,so
that it would be easier to work with those data later.
"""
chat_names = listdir(sys.argv[1] + "/messages")
final_list = []
for chat_name in chat_names:
file = open(sys.argv[1] + "/messages/" + chat_name)
conversation = file.read()
file.close()
conversation = conversation.split("<div class=\"message\">")
people_in_conversation, conversation[0] = clear_data(
"Konverzace s ", "</title>", conversation[0])
people_in_conversation = people_in_conversation.split(",")
final_list.append([people_in_conversation, []])
conversation.pop(0)
for message in conversation:
"""
Finds a name of user who sent the message, time of sending and
message itself, afterwards it gets rid of all tags around and
appends the result as a new value to the list.
"""
clear_name, message = clear_data(
"<span class=\"user\">", "</span>", message)
clear_time, message = clear_data(
"<span class=\"meta\">", "</span>", message)
clear_text, message = clear_data("<p>", "</p>", message)
final_list[len(final_list) -
1][1].append([clear_name, clear_time, clear_text])
return final_list
def identify_the_owner():
"""
This function returns the full name of owner of the account.
"""
file = open(sys.argv[1] + "/index.htm")
profile = file.read()
file.close()
result, profile = clear_data("<h1>", "</h1>", profile)
return result
def count_sent_vs_received_messsages(data, number_of_results):
"""
This function counts all received and sent messages in every face to face
conversation.
"""
final = []
for conversation in data:
if len(conversation[0]) == 1:
final.append([conversation[0], 0, 0, 0])
for message in conversation[1]:
final[len(final) - 1][3] += 1
if message[0] == identify_the_owner():
final[len(final) - 1][1] += 1
else:
final[len(final) - 1][2] += 1
final = sorted(final, key=operator.itemgetter(3))[::-1]
names = []
my_messages = []
others_messages = []
for i in range(number_of_results):
names.append(final[i][0][0])
my_messages.append(final[i][1])
others_messages.append(final[i][2])
print(names)
print(my_messages)
print(others_messages)
return names, my_messages, others_messages
def show_sent_vs_received_messages(data, number_of_results):
"""
This function shows the top results of received and sent messages in
every face to face conversation in the bar chart.
"""
result = count_sent_vs_received_messsages(data, number_of_results)
names = result[0]
my_messages = result[1]
others_messages = result[2]
plt.figure(figsize=(10, 6))
plt.title("Sent and received messages in the most used conversations")
plt.bar(
np.arange(len(my_messages)),
my_messages,
width=0.4,
align='edge',
alpha=0.7,
color='r',
label="Sent messages")
plt.bar(
np.arange(len(others_messages)) + 0.4,
others_messages,
width=0.4,
align='edge',
alpha=0.7,
color='b',
label="Received messages")
plt.legend()
plt.xticks(np.arange(len(names))+0.4, names, rotation=90)
plt.ylabel("Number of messages")
plt.xlim(0, number_of_results)
plt.tight_layout()
pylab.savefig("sent_vs_received.png")
plt.show()
def count_word(data, word, person):
"""
The function returns the list of all messages including certain word written
by specified person.
"""
word_number = 0
for conversation in data:
for message in conversation[1]:
if word in message[2] and message[0] == person:
print(str(conversation[0]) + " " +
message[1] + " " + message[2])
word_number += 1
return word_number
def clear_time(str):
"""
Takes plain time string as an argument and converts it to the list of
separated values.
"""
minutes = int(str[str.find(":") + 1:str.find(":") + 3])
if str[str.find(":") - 2] != " ":
hours = int(str[str.find(":") - 2:str.find(":")])
else:
hours = int(str[str.find(":") - 1:str.find(":")])
day = int(str[0:str.find(".")])
month = str.split(" ")[1]
year = int(str.split(" ")[2])
return [hours, minutes, day, month, year]
def count_messages_throughout_a_day(data):
"""
The function counts all sent and received messages messages in every
minute of a day.
"""
my_daily_messages = [0] * 60 * 24
others_daily_messages = [0] * 60 * 24
for conversation in data:
if len(conversation[0]) == 1:
for message in conversation[1]:
t = clear_time(message[1])
time = 60 * t[0] + t[1]
if message[0] == identify_the_owner():
my_daily_messages[time] += 1
else:
others_daily_messages[time] += 1
return my_daily_messages, others_daily_messages
def show_messages_throughout_a_day(data):
"""
The function shows all sent and received messages messages in every
minute of a day in a plot chart.
"""
result = count_messages_throughout_a_day(data)
my_daily_messages = result[0]
others_daily_messages = result[1]
plt.figure(figsize=(10, 6))
plt.title("Sent and received messages throughout a day")
plt.ylabel("Number of messages")
plt.plot(
np.arange(len(my_daily_messages)),
my_daily_messages,
color='b',
alpha=0.7,
label="Sent messages")
plt.plot(
np.arange(len(others_daily_messages)),
others_daily_messages,
color='r',
alpha=0.7,
label="Received messages")
plt.legend(loc='upper left')
times = [
"0:00",
"3:00",
"6:00",
"9:00",
"12:00",
"15:00",
"18:00",
"21:00"]
plt.xticks([180 * i for i in range(8)], times)
plt.ylabel("Number of messages")
plt.xlim(0, 1440)
plt.tight_layout()
pylab.savefig("messages_throughout_a_day.png")
plt.show()
def count_men_vs_women(data):
"""
This function counts all sent and received messages to men and women
separately.
"""
sent_to_women = 0
sent_to_men = 0
received_from_women = 0
received_from_men = 0
for conversation in data:
if len(conversation[0]) == 1:
for message in conversation[1]:
name = conversation[0][0]
if message[0] == identify_the_owner():
if name[len(name) - 3:len(name)] == "ová":
sent_to_women += 1
else:
sent_to_men += 1
else:
if name[len(name) - 3:len(name)] == "ová":
received_from_women += 1
else:
received_from_men += 1
return sent_to_men, sent_to_women, received_from_men, received_from_women
def show_men_vs_women(data):
"""
This function shows all sent and received messages to men and women
separately in the bar chart.
"""
result = count_men_vs_women(data)
sent_to_men = result[0]
sent_to_women = result[1]
received_from_men = result[2]
received_from_women = result[3]
plt.figure(figsize=(10, 6))
plt.title("Exchanged messages with men and women")
plt.bar(np.arange(2), [sent_to_men, sent_to_women],
color='r', width=0.4, alpha=0.7, label="Sent messages")
plt.bar(np.arange(2) + 0.40, [received_from_men, received_from_women],
color='b', width=0.4, alpha=0.7, label="Received messages")
plt.legend(loc='upper left')
plt.xticks(np.arange(2)+0.2, ["Men", "Women"])
plt.ylabel("Number of messages")
pylab.savefig("men_vs_women.png")
plt.show()
def count_who_starts_conversation(data, number_of_results):
"""
This function counts the messages starting conversations sent by me vs.
those sent by someone else.
"""
final = []
list_of_greetings = [
"zdravíčko",
"ahoj",
"čau",
"čus",
"nazdar",
"nazdárek",
"dobrý den"]
for conversation in data:
if len(conversation[0]) == 1:
final.append([conversation[0][0], 0, 0, 0])
conversation[1] = conversation[1][::-1]
previous_message = conversation[1][0]
previous_time = clear_time(previous_message[1])
for i in range(1, len(conversation[1])):
message = conversation[1][i]
time = clear_time(message[1])
if time[2] != previous_time[2]:
if time[3] != previous_time[3] or time[4] != previous_time[4] or (
time[2] - previous_time[2]) != 1:
if message[0] == identify_the_owner():
final[len(final) - 1][1] += 1
else:
final[len(final) - 1][2] += 1
final[len(final) - 1][3] += 1
else:
for greeting in list_of_greetings:
if message[2].lower().find(greeting) != -1:
if message[0] == identify_the_owner():
final[len(final) - 1][1] += 1
else:
final[len(final) - 1][2] += 1
final[len(final) - 1][3] += 1
previous_time = time
final = sorted(final, key=operator.itemgetter(3))[::-1]
names = []
me = []
them = []
for i in range(number_of_results):
names.append(final[i][0] + " ")
me.append(final[i][1])
them.append(final[i][2])
return names, me, them
def show_who_starts_conversation(data, number_of_results):
"""
This function creates the bar chart showing the rates of messages starting
the conversation compared on basis of who sent that message.
"""
result = count_who_starts_conversation(data, number_of_results)
names = result[0]
me = result[1]
them = result[2]
plt.figure(figsize=(10, 6))
plt.title("Who starts the conversation first")
plt.bar(
np.arange(len(me)),
me,
width=0.4,
align="edge",
alpha=0.7,
color="r",
label="Me")
plt.bar(
np.arange(len(them)) + 0.4,
them,
width=0.4,
align="edge",
alpha=0.7,
color="b",
label="Other person")
plt.legend()
plt.xticks(np.arange(len(names))+0.4, names, rotation=90)
plt.ylabel("Number of openings")
plt.xlim(0, number_of_results)
plt.tight_layout()
pylab.savefig("who_starts_the_conversation.png")
plt.show()
def count_msgs_throughout_a_year(data, year):
"""
This function returns all messages in year by month, separated on messages
sent by the account owner and received by him.
"""
months_my_messages = {}
months_others_messages = {}
for conversation in data:
for message in conversation[1]:
time = clear_time(message[1])
if time[4] == year:
if message[0] == identify_the_owner():
if time[3] in months_my_messages:
months_my_messages[time[3]] += 1
else:
months_my_messages[time[3]] = 0
else:
if time[3] in months_others_messages:
months_others_messages[time[3]] += 1
else:
months_others_messages[time[3]] = 0
my_messages = []
others_messages = []
months = [
"leden",
"únor",
"březen",
"duben",
"květen",
"červen",
"červenec",
"srpen",
"září",
"říjen",
"listopad",
"prosinec"]
for month in months:
if month in months_my_messages:
my_messages.append(months_my_messages[month])
else:
my_messages.append(0)
if month in months_others_messages:
others_messages.append(months_others_messages[month])
else:
others_messages.append(0)
return my_messages, others_messages
def show_msgs_throughout_a_year(data):
"""
This function draws a chart of sent and received messages by month
throughout several years.
"""
sent = []
received = []
for year in [i + 2014 for i in range(4)]:
result = count_msgs_throughout_a_year(data, year)
sent.append(result[0])
received.append(result[1])
colors = ["r", "b", "g", "m"]
plt.figure(figsize=(10, 6))
plt.title("Sent and received messages by month in last years")
color_lines = []
for i in range(4):
color_lines.append(plt.plot(
np.arange(12),
sent[i],
ls="solid",
color=colors[i],
alpha=0.8,
label=str(2014 + i))[0])
plt.plot(
np.arange(12),
received[i],
ls="dashed",
color=colors[i],
alpha=0.8)
black_lines = []
black_lines.append(plt.plot([], [], color="#000000", ls="solid")[0])
black_lines.append(plt.plot([], [], color="#000000", ls="dashed")[0])
colors_legend = plt.legend(color_lines,
[str(i + 2014) for i in range(4)],
loc="upper left")
plt.legend(black_lines,
["Sent messages","Received messages"],
loc="upper right")
plt.gca().add_artist(colors_legend)
plt.xticks(np.arange(13) - 1, calendar.month_name, rotation=70)
plt.xlim(0, 11)
plt.ylabel("Number of messages")
plt.tight_layout()
pylab.savefig("msgs_throughout_by_month.png")
plt.show()
show_msgs_throughout_a_year(messages_to_a_list())
show_who_starts_conversation(messages_to_a_list(), 15)
show_men_vs_women(messages_to_a_list())
show_sent_vs_received_messages(messages_to_a_list(), 15)
show_messages_throughout_a_day(messages_to_a_list()) | Python | 466 | 31.633047 | 86 | /analysis.py | 0.5461 | 0.52499 |
sdotson/udacity-machine-learning-nanodegree | refs/heads/master | # third party imports
import argparse
import json
# local imports
from model import predict, load_checkpoint
from utils import determine_device
from validation import validate_predict_args
# CLI defaults
TOP_K_DEFAULT = 1
# configure argument parser
parser = argparse.ArgumentParser(description="Trains model and saves checkpoint")
parser.add_argument("image_path", help="the path for the image you wish to classify")
parser.add_argument("checkpoint", help="the model checkpoint you would like to use")
parser.add_argument("--category_names")
parser.add_argument("--gpu", action="store_true")
parser.add_argument("--top_k", type=int, default=TOP_K_DEFAULT)
# parse and validate args
args = parser.parse_args()
validate_predict_args(args)
# Getting category to name mapping
cat_to_name = None
if args.category_names:
with open(args.category_names, "r") as f:
cat_to_name = json.load(f)
# use gpu if available and requested in args
device = determine_device(args.gpu)
print("Using device {}...".format(device.type))
print("Loading checkpoint...")
model = load_checkpoint(args.checkpoint, device)
print("Predicting class for image...")
chart_data = predict(args.image_path, model, device, cat_to_name, args.top_k)
print("Printing chart of classes and probabilities...")
print(chart_data)
| Python | 42 | 30.095238 | 85 | /classifying-flowers/predict.py | 0.751149 | 0.750383 |
sdotson/udacity-machine-learning-nanodegree | refs/heads/master | from os import path
import torch
from torchvision import models
# validates train.py args
def validate_train_args(args):
# check cuda
if args.gpu and torch.cuda.is_available() == False:
# we don't want to throw sand in the user's face
# but let them know we are falling back to CPU
print("GPU is not enabled for this device, falling back to CPU")
# check data_directory existance
if path.exists(args.data_directory) == False:
raise ValueError(
"data directory does not exist: {}".format(args.data_directory)
)
# check save_dir existance
if args.save_dir and path.exists(args.save_dir) == False:
raise ValueError("save directory does not exist: {}".format(args.save_dir))
# validates predict.py args
def validate_predict_args(args):
# check cuda
if args.gpu and torch.cuda.is_available() == False:
# we don't want to throw sand in the user's face
# but let them know we are falling back to CPU
print("GPU is not enabled for this device, falling back to CPU")
# check data_directory existance
if path.exists(args.image_path) == False:
raise ValueError("image path does not exist: {}".format(args.image_path))
# check checkpoint existance
if path.exists(args.checkpoint) == False:
raise ValueError("checkpoint does not exist: {}".format(args.checkpoint))
# check category names existance
if args.category_names and path.exists(args.category_names) == False:
raise ValueError(
"category names does not exist: {}".format(args.category_names)
)
| Python | 44 | 36.06818 | 83 | /classifying-flowers/validation.py | 0.667075 | 0.667075 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.