seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
19423963567 | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 23 19:44:34 2018
@author: whockei1
"""
import numpy as np, matplotlib.pyplot as plt, random, json, pickle, datetime, copy, socket, math
from scipy.stats import sem
import matplotlib.colors as colors
from scipy.ndimage import gaussian_filter as gauss # for smoothing ratemaps
import sys, os, csv
import utility_fx as util
import ratterdam_ParseBehavior as Parse
import ratterdam_Defaults as Def
import ratterdam_CoreDataStructures as Core
import ratterdam_DataFiltering as Filt
def poolTrials(unit, alley, labels, txt):
"""
Pool all trials that will form a group.
Group defined as linear RM
from all visits to a given alley when it harbored a given texture.
This does not subsample to approx. balance group sizes. That is done after.
Labels is a list of texture labels, either real or shuffled prior to this fx
"""
rms = []
idx = []
visits = unit.alleys[alley]
for i,visit in enumerate(visits):
if labels[i] == txt:
rm = visit['ratemap1d']
if type(rm) == np.ndarray:
rm = np.nan_to_num(rm)
rms.append(rm)
idx.append(i)
rms = np.asarray(rms)
return idx, rms
def computeTestStatistic_Diffs(groupX, groupY):
"""
Takes two arrays. Each of which is a stack
of single trial {RM or avg? decide}.
Avgs them to a summary trace and returns their bin-wise diff
"""
maskX= np.ma.masked_invalid(groupX)
avgX = maskX.mean(axis=0) # ignores inf and nan
maskY= np.ma.masked_invalid(groupY)
avgY = maskY.mean(axis=0) # ignores inf and nan
return avgX-avgY
def computeTestStatistic_AUCDiffs(groupX, groupY):
"""
Takes two arrays. Each of which is a stack
of single trial
Avgs them to summary traces, compute diff
and return the area of that diff
"""
maskX= np.ma.masked_invalid(groupX)
avgX = maskX.mean(axis=0) # ignores inf and nan
maskY= np.ma.masked_invalid(groupY)
avgY = maskY.mean(axis=0) # ignores inf and nan
diffauc = np.abs(scipy.integrate.simps(avgX)-scipy.integrate.simps(avgY))
return diffauc
def getLabels(unit, alley):
"""
Get actual trial labels for a group
Group defined as visits to a given txt at given alley
"""
visits = unit.alleys[alley]
labels = []
for visit in visits:
labels.append(visit['metadata']['stimulus'])
return labels
def genSingleNullStat(unit, alley, txtX, txtY, labels):
"""
DEPRECATED - making them all array-style in genNNulls
Generate a single null test statistic (diff x-y here)
Shuffle labels, recompute means and take diff. 1x
"""
shuffLabels = np.random.permutation(labels)
idxX, rmsX = poolTrials(unit, alley, shuffLabels, txtX)
idxY, rmsY = poolTrials(unit, alley, shuffLabels, txtY)
null = computeTestStatistic_Diffs(rmsX, rmsY)
return null
def genRealStat(unit, alley, txtX, txtY):
labels = getLabels(unit, alley)
idxX, rmsX = poolTrials(unit, alley, labels, txtX)
idxY, rmsY = poolTrials(unit, alley, labels, txtY)
stat = computeTestStatistic_Diffs(rmsX, rmsY)
return stat
def computeBandThresh(nulls, alpha, side):
'''Given a list of null array traces, find ordinate at
at each point that admits a proportion of nulls equal to cutoff'''
if side == 'upper':
isReversed = True
elif side == 'lower':
isReversed = False
propNull = int(((alpha / 2) * len(nulls)) + 1)
datarange = range(len(nulls[0]))
significanceBand = []
for point in datarange:
nullOrdinates = nulls[:,point]
sortedVals = list(sorted(nullOrdinates, reverse=isReversed))
significanceBand.append(sortedVals[propNull - 1]) #explicitly +1 to cutoff and -1 here to keep clear where thresh is and how 0idx works
significanceBand = np.asarray(significanceBand)
return significanceBand
def computeGlobalCrossings(nulls, lowerBand, upperBand):
"""
Given an array of null test statistics, compute
the number of crossings *anywhere* given the supplied
significance bands. Return proportion (obs. p-value)
"""
passBools = [any(np.logical_or(probe > upperBand, probe < lowerBand)) for probe in nulls] # eg [T,F,F,T..etc]
return sum(passBools)/len(passBools)
def global_FWER_alpha(nulls, unit, alpha=0.05): # fwerModifier should be 3 (txts) x n alleys. 9 in beltway task. But below adjust by how many alleys actually have activity so 9 may become smaller
"""
Calculates the global, FWER corrected p-value at each bin of the data trace
Returns the actual global P and the bands of test statistic ordinates that
are the thresholds.
"""
FWERalphaSelected = None
globalLower, globalUpper = None, None
FWERalpha = unit.acorr # nb this is a proportion (decimal) not a list cutoff (integer)
alphaIncrements = np.linspace(0.017, 1e-4, 50) # start at 0.017 because thats the largest the adj p value could be: 0.05/(3*1)
fwerSatisfied = False
for adjustedAlpha in alphaIncrements:
if not fwerSatisfied:
lowerBand, upperBand = computeBandThresh(nulls, adjustedAlpha, 'lower'), computeBandThresh(nulls, adjustedAlpha, 'upper')
propCrossings = computeGlobalCrossings(nulls, lowerBand, upperBand)
if propCrossings < FWERalpha:
fwerSatisfied = True
FWERalphaSelected = adjustedAlpha
globalLower, globalUpper = lowerBand, upperBand
return FWERalphaSelected, globalLower, globalUpper
def shuffleArray(array, field_idx):
for row in range(len(array)):
array[row,field_idx] = np.random.permutation(array[row,field_idx])
def consecutive(data, stepsize=1):
return np.split(data, np.where(np.diff(data) != stepsize)[0]+1)
def findField(rms,sthresh=3,rthresh=0.2):
"""
Identify a field as a set of sthresh or more contiguous bins
greater than rthresh of max
"""
mean = np.nanmean(rms, axis=0)
fi = np.where(mean>=(rthresh*np.nanmax(mean)))[0]
field = True
try:
field_idx = np.concatenate(([i for i in consecutive(fi) if len(i)>=sthresh]))
except:
field = False
field_idx = None
return field, field_idx
def genNNulls(n, rms, labels, txtX, txtY):
"""
Generates n null test statistics, hard coded
now to be the binwise diff of avg(txtA) - avg(txtB)
Returns np array nXl where l is length of 1d RM in bins
"""
shuffpos = False # toggle to shuffle bins within field
nulls = np.empty((0,Def.singleAlleyBins[0]-1)) # by convention long dim is first
if shuffpos:
result, field_idx = findField(rms)
if result == False: # no good field
shuffpos = False
rmsshuffle = copy.deepcopy(rms)
for i in range(n):
shufflabels = np.random.permutation(labels)
if shuffpos:
shuffleArray(rmsshuffle, field_idx) # shuffle in place within rows
srmsX, srmsY = rmsshuffle[np.where(shufflabels==txtX)[0],:], rmsshuffle[np.where(shufflabels==txtY)[0],:]
null = computeTestStatistic_Diffs(srmsX, srmsY)
nulls = np.vstack((nulls, null))
return nulls
def makeRMS(unit, alley):
"""
Create array of 1d ratemaps each row is a visit
return array and label array of txt present
"""
rms = np.empty((0, Def.singleAlleyBins[0]-1))
labels = np.empty((0))
for visit in unit.alleys[alley]:
rms = np.vstack((rms, visit['ratemap1d']))
labels = np.hstack((labels, visit['metadata']['stimulus']))
return rms, labels
def unitPermutationTest_SinglePair(unit, alley, txtX, txtY, nnulls, plot=False, returnInfo=True):
"""
Wrapper function for global_FWER_alpha() that plots results
"""
rms, labels = makeRMS(unit, alley)
nulls = genNNulls(nnulls,rms,labels,txtX,txtY)
FWERalphaSelected, glowerBand, gupperBand = global_FWER_alpha(nulls, unit)
if FWERalphaSelected == None:
glowerBand, gupperBand, pwAlphaLower, pwAlphaUpper = None, None, None, None
globalCrossings, pointwiseCrossings, bounds, stat = None, None, None, None
else:
stat = genRealStat(unit, alley, txtX, txtY)
#Below, calculate the pw alpha bc significantly modulated regions are defined
# as those that pass the global band somewhere but then their extent is defined
# as the whole region where they pass the pointwise band. See Buzsaki paper.
pwAlphaUpper, pwAlphaLower = computeBandThresh(nulls, 0.05, 'upper'), computeBandThresh(nulls, 0.05, 'lower')
globalCrossings = np.where(np.logical_or(stat > gupperBand, stat < glowerBand))[0]
if globalCrossings.shape[0] > 0:
pointwiseCrossings = np.where(np.logical_or(stat > pwAlphaUpper, stat < pwAlphaLower))[0]
else:
globalCrossings, pointwiseCrossings = None, None
if plot:
plt.plot(nulls.T, 'k', alpha=0.4)
plt.plot(stat,'g')
plt.xlabel("Linearized Position, Long Axis of Alley")
plt.ylabel("Difference in Firing Rate")
plt.title(f"Permutation Test Results for Texture {txtX} vs {txtY} on Alley {alley}")
for band, style in zip([glowerBand, gupperBand, pwAlphaLower, pwAlphaUpper], ['r', 'r', 'r--', 'r--']):
plt.plot(band, style)
if returnInfo:
bounds = glowerBand, gupperBand, pwAlphaLower, pwAlphaUpper
return globalCrossings, pointwiseCrossings, bounds, stat
def permutationResultsLogger(d,fname):
doesPass = False
for alley in [1,3,5,7,8,10,11,16,17]:
for pair in ["AB", "BC", "CA"]:
for crossType in ["global", "pointwise"]:
if d[alley][pair][crossType] != 'XXX':
doesPass = True
if doesPass:
savename = fname + "_PASS"
else:
savename = fname
with open(savename+'.csv', "w") as f:
w = csv.writer(f, delimiter = ' ')
for alley in [1,3,5,7,8,10,11,16,17]:
w.writerow([alley])
for pair in ["AB", "BC", "CA"]:
w.writerow([pair])
for crossType in ["global", "pointwise"]:
w.writerow([crossType, d[alley][pair][crossType]])
f.close()
def unitPermutationTest_AllPairsAllAlleys(unit, nnulls,fpath, logger=True, plot='sepFile'):
"""
Wrapper function to complete permutation tests for a unit
across all alleys and all pairwise stim (A,B,C) combinations
Pointwise p-value is set to 0.05
Global p-value is set to 0.00098 (0.05/(3*17))
All perm tests can be saved to a file for later use, depending on option:
Plots will be in a 17x3 grid where each row is an alley 1-17
and each column is a test stat in order AB, BC, CA
plot = False -> Do not plot
plot = sepFile -> Plot all test results to it's own file in the fpath dir
plot = addFile -> Do not save as this plot is an addon to another file's
plotting routines (which will save the file itself)
"""
if plot != False:
fig, axes = plt.subplots(9, 3, figsize=(12,12), dpi=200) #bigger plot, bigger dpi
pairs = ["AB", "BC", "CA"]
fname = fpath + f"{stamp}_{unit.name}_{Def.singleAlleyBins[0]-1}bins_{Def.smoothing_1d_sigma}smooth_{Def.includeRewards}R_{Def.velocity_filter_thresh}vfilt_permutationResults"
crossings = {i:{pair:{'global':"XXX", 'pointwise':"XXX"} for pair in pairs} for i in [1,3,5,7,8,10,11,16,17]}
axCounter = 0
for alley in unit.validAlleys:
print(f"Running Permutation test in alley {alley}")
for pair in pairs:
txtX, txtY = pair[0], pair[1]
globalCrossings, pointwiseCrossings, bounds, stat = unitPermutationTest_SinglePair(unit, alley, txtX, txtY, nnulls,
plot=False, returnInfo=True)
if globalCrossings is not None:
crossings[alley][pair]['global'] = globalCrossings
crossings[alley][pair]['pointwise'] = pointwiseCrossings
conditionName = unit.name + "_" + str(alley) + "_" + pair
if plot != False and bounds[0] is not None:
# the plot keyword will tell plotting fx whether to save sep or leave live for sep file to save
plotPermutationResults(unit, bounds, stat, conditionName, globalCrossings, pointwiseCrossings, fig.axes[axCounter])
axCounter += 1 # increment to get the next subplot next iteration.
plt.suptitle(f"Permutation Test Results for {unit.name}")
if logger == True:
permutationResultsLogger(crossings, fname)
if plot == 'sepFile':
# just in case this is buggy in future: when sep script is saving the fpath var is ''
plt.savefig(fname + ".svg")
plt.close()
elif plot == 'addFile':
pass # just to be explicit that if another script is saving this plot
# to its own set of plots (e.g the ratemap routine) then leave open
def plotPermutationResults(unit, bounds, stat, conditionName, globalCrossings, pointwiseCrossings, ax):
"""
If the observed test statistic passes the test, plot bounds.
Plot test statistic and original linear ratemaps on top
Does not save, that is done in the wrapper fx for all pairs/alleys (or
in sep script calling it)
"""
colorLookup = {'A':'r', 'B':'b', 'C': 'g'} # keep color coordination
# Get the real traces. Should refactor so I don't need to do this here and in test itself.
txtX, txtY = conditionName.split("_")[2]
alley = int(conditionName.split("_")[1])
labels = getLabels(unit, alley)
_, rmsX = poolTrials(unit, alley, labels, txtX)
_, rmsY = poolTrials(unit, alley, labels, txtY)
traceX, traceY = np.mean(rmsX, axis=0), np.mean(rmsY, axis=0)
g_upper, g_lower, pw_upper, pw_lower = bounds
ax.fill_between(range(len(g_upper)), g_upper, g_lower, color='cornflowerblue')
ax.fill_between(range(len(pw_upper)), pw_upper, pw_lower, color='darkblue')
ax.plot(stat, 'k')
ax.plot(traceX, colorLookup[txtX])
ax.plot(traceY, colorLookup[txtY])
# Were plotting all test results so if it failed, no crossings to highlight
if globalCrossings is not None:
ax.scatter(globalCrossings, stat[globalCrossings], c='cornflowerblue', marker='^')
ax.scatter(pointwiseCrossings, stat[pointwiseCrossings], c='darkblue', marker='^')
if globalCrossings is not None:
ax.set_title(f"{conditionName.split('_')[1:]}", color='r')
else:
ax.set_title(f"{conditionName.split('_')[1:]}", color='k')
if __name__ == '__main__':
rat = "R886"
expCode = "BRD1"
datafile = f"E:\\Ratterdam\\{rat}\\{rat}{expCode}\\"
fpath = f"E:\\Ratterdam\{rat}\\permutation_tests\\{expCode}\\"
stamp = util.genTimestamp()
alleyTracking, alleyVisits, txtVisits, p_sess, ts_sess = Parse.getDaysBehavioralData(datafile, expCode)
if not os.path.isdir(fpath):
os.mkdir(fpath)
print(expCode)
for subdir, dirs, fs in os.walk(datafile):
for f in fs:
if 'cl-maze1' in f and 'OLD' not in f and 'Undefined' not in f:
clustname = subdir[subdir.index("TT"):] + "\\" + f
unit = Core.UnitData(clustname, datafile, expCode, Def.alleyBounds, alleyVisits, txtVisits, p_sess, ts_sess)
unit.loadData_raw()
validalleys = []
for a in [16, 17, 3, 1, 5, 7, 8, 10, 11]:
valid, acorr, alleys = util.checkInclusion(unit, 3)
if valid:
print(clustname)
unit.acorr = acorr
unit.validAlleys = alleys
unitPermutationTest_AllPairsAllAlleys(unit, 1000, fpath)
else:
print(f"{clustname} not run")
| whock3/ratterdam | Beltway_Project/ratterdam_PermutationTests.py | ratterdam_PermutationTests.py | py | 16,496 | python | en | code | 0 | github-code | 36 |
35603667971 | import pika
import ssl
import json
class Dev:
def __init__(self):
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
ssl_context.set_ciphers('ECDHE+AESGCM:!ECDSA')
url = f"amqps://ryanl:842684265santos@b-b86d75fd-5111-4c3c-b62c-b999e666760a.mq.us-east-1.amazonaws.com:5671"
parameters = pika.URLParameters(url)
parameters.ssl_options = pika.SSLOptions(context=ssl_context)
conexão = pika.BlockingConnection(parameters)
self.canal = conexão.channel()
def send(self, nome: str, logo: str, message: str, hora: str):
mensagem = json.dumps(
{"nome": nome, "logo": logo, "hora": hora, "mensagem": message})
self.canal.basic_publish(exchange='chat', body=mensagem, routing_key='tag_mensagem',
properties=pika.BasicProperties(delivery_mode=2))
self.canal.close()
# cliente = Dev()
# cliente.send("fredekel", "java", "Boa tio, ficou show de bola!", "18:43")
| ryanbsdeveloper/opensource-chat | modules/chat/dev.py | dev.py | py | 993 | python | en | code | 2 | github-code | 36 |
26771615241 | #!/usr/bin/python3
""" Display the id of a Github user using Github's API """
import requests
import sys
def get_hub():
""" Get the id of the user using their personal access token password """
req = requests.get("https://api.github.com/user",
auth=(sys.argv[1], sys.argv[2]))
print(req.json().get('id'))
if __name__ == '__main__':
get_hub()
| Alouie412/holbertonschool-higher_level_programming | 0x11-python-network_1/10-my_github.py | 10-my_github.py | py | 385 | python | en | code | 0 | github-code | 36 |
18832494510 | from collections import defaultdict
class Solution:
def isAnagram(self, s: str, t: str) -> bool:
history = defaultdict(int)
for c in s:
history[c] += 1
for c in t:
history[c] -= 1
for v in history.values():
if v != 0:
return False
return True
s = Solution()
print(s.isAnagram("anagram", "nagaram"))
print(s.isAnagram("rat", "car")) | parkjuida/leetcode | python/valid_anagram.py | valid_anagram.py | py | 434 | python | en | code | 0 | github-code | 36 |
6383195963 | def process_basic_information(model, data):
data["MinCoeff"] = model.MinCoeff
data["MaxCoeff"] = model.MaxCoeff
data["MinBound"] = model.MinBound
data["MaxBound"] = model.MaxBound
data["MinRHS"] = model.MinRHS
data["MaxRHS"] = model.MaxRHS
data["MaxQCCoeff"] = model.MaxQCCoeff
data["MinQCCoeff"] = model.MinQCCoeff
data["MaxQCLCoeff"] = model.MaxQCLCoeff
data["MinQCLCoeff"] = model.MinQCLCoeff
data["MaxQCRHS"] = model.MaxQCRHS
data["MinQCRHS"] = model.MinQCRHS
data["NumNZs"] = model.NumNZs
data["DNumNZs"] = model.DNumNZs
data["NumQNZs"] = model.NumQNZs
data["NumQCNZs"] = model.NumQCNZs
data["NumConstrs"] = model.NumConstrs
data["NumQConstrs"] = model.NumQConstrs
data["NumSOS"] = model.NumSOS
data["NumGenConstrs"] = model.NumGenConstrs
data["MinObjCoeff"] = model.MinObjCoeff
data["MaxObjCoeff"] = model.MaxObjCoeff
data["MaxQObjCoeff"] = model.MaxQObjCoeff
data["MinQObjCoeff"] = model.MinQObjCoeff
data["NumVars"] = model.NumVars
data["NumIntVars"] = model.NumIntVars
data["NumBinVars"] = model.NumBinVars
data["NumPWLObjVars"] = model.NumPWLObjVars
data["ModelName"] = model.ModelName
| Gurobi/gurobi-modelanalyzer | src/gurobi_modelanalyzer/basic_analyzer.py | basic_analyzer.py | py | 1,218 | python | en | code | 11 | github-code | 36 |
39151761897 | import io
import json
import logging
from fdk import response
def handler(ctx, data: io.BytesIO = None):
name = "World"
try:
body = json.loads(data.getvalue())
name = body.get("name")
except (Exception, ValueError) as ex:
logging.getLogger().info('error parsing json payload: ' + str(ex))
logging.getLogger().info("Inside Python Hello World function")
return response.Response(
ctx, response_data=json.dumps(
{"message": "Hello {0}".format(name)}),
headers={"Content-Type": "application/json"}
)
| wlloyduw/SAAF | jupyter_workspace/platforms/oracle/hello_world/func.py | func.py | py | 576 | python | en | code | 25 | github-code | 36 |
22460147801 | import zipper
import arcpy
try:
# Inputs
dir = arcpy.GetParameterAsText(0)
zipfile = arcpy.GetParameterAsText(1)
mode = arcpy.GetParameterAsText(2)
shape_zipper = zipper.ShapefileZipper() # Create Class Instance
result = shape_zipper.zip_shapefile_directory(input_dir=dir, output_zipfile=zipfile, zip_file_mode=mode)
if len(result) > 0:
results = str(result)
arcpy.SetParameterAsText(3, results)
arcpy.AddMessage("!!!!!!!!!!!!!\n@@ SUCCESS @@\n!!!!!!!!!!!!!\nResult: " + results)
else:
raise
except:
arcpy.AddError("ZIP FILES NOT CREATED!") | igrasshoff/zip-shapefiles | ScriptToolZipDirShapefiles.py | ScriptToolZipDirShapefiles.py | py | 613 | python | en | code | 3 | github-code | 36 |
11360707561 | import sys
sys.stdin = open('단조.txt')
T = int(input())
for tc in range(1, T+1):
N = int(input())
data = list(map(int, input().split()))
tmp = []
lis = []
a = 2
result = -1
for i in range(len(data)):
for j in range(1+i, len(data)):
tmp.append(data[i]*data[j])
for i in tmp:
tmp2 = str(i)
chk = 0
for j in range(len(tmp2)-1):
if tmp2[j] > tmp2[j+1]: #중요
chk = 1
break
if not chk:
result = i
print('#{} {}'.format(tc, result)) | Jade-KR/TIL | 04_algo/수업/0903/단조.py | 단조.py | py | 582 | python | en | code | 0 | github-code | 36 |
75001876582 | # Given a list of student grades in the format:
# records - "[name]: [grade]"
# find the student with the highest avg grade
# all students have different avgs
# no spaces in names
# each grade is an int
# output = "John"
def solution(records):
# init gradebook dict {"student_name": {total: num, entries: num, "avg": float}}
gradebook = {}
# loop through each student in records:
for student in records:
# isolate grade and student as vars
# ex current student = "John: 5"
cur_student = student.split(":")[0]
cur_grade = int(student.split(": ")[1])
# print("student", cur_student)
# print("grade", cur_grade)
# if student in dict:
if cur_student in gradebook:
# update dict[cur_student].total += cur_grade
gradebook[cur_student]["total"] += cur_grade
# update dict[cur_student].entries += 1
gradebook[cur_student]["entries"] += 1
# update dict[cur_student]["avg"] = dict[cur_student]["total"] / dict[cur_student]["entries"]
gradebook[cur_student]["avg"] = gradebook[cur_student]["total"] / gradebook[cur_student]["entries"]
# else student not in the book yet
else:
# create an entry for the student
#dict[cur_student] = {"total": cur_grade, "entries": 1, "avg": cur_grade}
gradebook[cur_student] = {"total": cur_grade, "entries": 1, "avg": cur_grade}
# get the max avg value grade in the entire dict
grade_list = gradebook.items()
max_avg = 0
valedictorian = ""
for student in grade_list:
if student[1]["avg"] > max_avg:
max_avg = student[1]["avg"]
valedictorian = student[0]
# print(student[0], student[1]["avg"])
return valedictorian
print(solution(["John: 5", "Michael: 4", "Ruby: 2", "Ruby: 5", "Michael: 5"])) # "John"
print(solution(["Kate: 5", "Kate: 5", "Maria: 2", "John: 5", "Michael: 4", "John: 4"])) # "Kate"
| stkirk/algorithm-practice | assessments/db2_gradebook.py | db2_gradebook.py | py | 2,002 | python | en | code | 0 | github-code | 36 |
10222533594 | '''
Created on Nov 04, 2015 5:49:26 PM
@author: cx
what I do:
i parse the freebase dump lines readed by FreebaseDumpReader
what's my input:
what's my output:
'''
import json
class FreebaseDumpParserC(object):
def __init__(self):
self.TypeEdge = "<http://rdf.freebase.com/ns/type.object.type>"
self.DespEdge = "<http://rdf.freebase.com/ns/common.topic.description>"
self.NameEdge = "<http://www.w3.org/2000/01/rdf-schema#label>"
self.AliasEdge = "<http://rdf.freebase.com/ns/common.topic.alias>"
self.NotableEdge = "<http://rdf.freebase.com/ns/common.topic.notable_types>"
self.InstanceEdge = "<http://rdf.freebase.com/ns/type.type.instance>"
self.lWikiUrlEdge = ["<http://rdf.freebase.com/ns/common.topic.topic_equivalent_webpage>","<http://rdf.freebase.com/ns/common.topic.topical_webpage>"]
self.WikiEnIdEdge = '<http://rdf.freebase.com/key/wikipedia.en_id>'
@staticmethod
def GetObjId(lvCol):
if lvCol == []:
return ""
return FreebaseDumpParserC.GetIdForCol(lvCol[0][0])
@staticmethod
def DiscardPrefix(col):
if len(col) < 2:
return col
if (col[0] != '<') | (col[len(col) - 1] !=">"):
return col
mid = col.strip("<").strip(">")
vCol = mid.split("/")
target = vCol[len(vCol)-1]
return '/' + target.replace('.','/')
#return target
@staticmethod
def GetIdForCol(col):
target = FreebaseDumpParserC.DiscardPrefix(col)
if len(target) < 2:
return ""
if (target[:len('/m/')] == "/m/") | (target[:len('/en/')]=='/en/'):
return target
return ""
@staticmethod
def FetchTargetsWithEdge(lvCol,Edge):
'''
fetch col with edge (obj edge col)
'''
lTar = []
for vCol in lvCol:
if vCol[1] == Edge:
lTar.append(vCol[2])
return lTar
@staticmethod
def FetchPairWithEdge(lvCol, Edge):
lTar = []
for vCol in lvCol:
if vCol[1] == Edge:
lTar.append((vCol[0], vCol[2]))
return lTar
@staticmethod
def FetchPairStringWithEdge(lvCol, Edge):
lTar = FreebaseDumpParserC.FetchPairWithEdge(lvCol, Edge)
lStr = []
for (mid, wiki) in lTar:
if (not FreebaseDumpParserC.IsString(mid)) or (not FreebaseDumpParserC.IsString(wiki)):
continue
lStr.append((mid, wiki))
return lStr
def FetchWikiPair(self, lvCol):
return self.FetchPairWithEdge(lvCol, self.WikiEnIdEdge)
@staticmethod
def FetchTargetStringWithEdge(lvCol,Edge):
'''
same, but only look for english strings
'''
lTar = FreebaseDumpParserC.FetchTargetsWithEdge(lvCol, Edge)
# print 'curent obj:%s' %(json.dumps(lvCol))
# print 'edge [%s] get targets [%s]' %(Edge,json.dumps(lTar))
lStr = []
for tar in lTar:
if not FreebaseDumpParserC.IsString(tar):
continue
text,tag = FreebaseDumpParserC.SegLanguageTag(tar)
if (tag == "") | (tag == 'en'):
lStr.append(text)
# print 'get text [%s]' %(json.dumps(lStr))
return lStr
def GetField(self,lvCol,field):
if field.title() == 'Name':
return self.GetName(lvCol)
if field.title() == 'Desp':
return self.GetDesp(lvCol)
if field.title() == 'Alias':
return '\n'.join(self.GetAlias(lvCol))
raise NotImplementedError
def GetName(self,lvCol):
lStr = self.FetchTargetStringWithEdge(lvCol, self.NameEdge)
if [] == lStr:
return ""
return lStr[0]
def GetAlias(self,lvCol):
return self.FetchTargetStringWithEdge(lvCol, self.AliasEdge)
def GetDesp(self,lvCol):
return '\n'.join(self.FetchTargetStringWithEdge(lvCol, self.DespEdge))
def GetWikiId(self,lvCol):
lWikiId = self.FetchTargetStringWithEdge(lvCol, self.WikiEnIdEdge)
if [] == lWikiId:
return ""
return lWikiId[0]
def GetNeighbor(self,lvCol):
lNeighbor = []
for vCol in lvCol:
NeighborId = self.GetIdForCol(vCol[2])
if "" != NeighborId:
NeighborEdge = self.DiscardPrefix(vCol[1])
lNeighbor.append([NeighborEdge,NeighborId])
return lNeighbor
def GetWikiUrl(self,lvCol):
lWikiUrl = []
for edge in self.lWikiUrlEdge:
lTar = self.FetchTargetsWithEdge(lvCol, edge)
# if [] != lTar:
# print 'wiki target %s' %(json.dumps(lTar))
for tar in lTar:
if not 'http' in tar:
continue
if not 'en.wikipedia' in tar:
continue
lWikiUrl.append(tar.strip('<').strip('>'))
# if [] != lWikiUrl:
# print 'wikiurl: %s' %(json.dumps(lWikiUrl))
return lWikiUrl
def GetType(self,lvCol):
lTar = self.FetchTargetsWithEdge(lvCol, self.TypeEdge)
lType = []
for tar in lTar:
Type = self.DiscardPrefix(tar)
# if '/common' == Type[:len('/common')]:
# continue
lType.append(Type)
return lType
def GetNotable(self,lvCol):
lTar = self.FetchTargetsWithEdge(lvCol, self.NotableEdge)
if [] == lTar:
return ""
return self.DiscardPrefix(lTar[0])
@staticmethod
def IsString(s):
if s[0] != '\"':
return False
if s[-1] == '\"':
return True
vCol = s.split('@')
if vCol[0][-1] == '\"':
return True
return False
@staticmethod
def SegLanguageTag(s):
vCol = s.split("@")
lang = ""
text = vCol[0].strip('"')
if (len(vCol) >= 2):
lang = vCol[1]
return text,lang
| xiaozhuyfk/AMA | query_processor/FreebaseDumpParser.py | FreebaseDumpParser.py | py | 6,212 | python | en | code | 0 | github-code | 36 |
39416108196 | #genral tree implementation
class Tree:
Root=None
toSearch=None
locy=0
def __init__(self):
self.Root=Node(int(input("enter the value of root node")))
def insert(self,value,i):
if self.Root==None:
self.Root=Node(value)
return
while True:
print(i.data,value)
if value>i.data:
if i.left==None:
print("aaya")
i.left=Node(value)
return
else:
self.insert(value,i.left)
return
else :
print("ldd")
if i.right==None:
i.right=Node(value)
return
else:
self.insert(value,i.left)
return
def delete(self,delloc):
previousnode=self.getAddress(delloc[:len(delloc)-1])
def display(self,childnode):
print(childnode.data)
if childnode.right!=None:
self.display(childnode.right)
else:
return childnode
if childnode.left!=None:
self.display(childnode.left)
else:
return childnode
class Node:
def __init__(self,value):
self.data=value
self.left=None
self.right=None
t=Tree()
while True:
option=int(input("1.insert\n2.delete\n3.display\n4.exit"))
if option==1:
data=int(input("enter data to insert"))
t.insert(data,t.Root)
if option ==2:
print(t.Root.right,t.Root.left)
#t.delete(int(input("enter the node location")))
if option==3:
t.display(t.Root)
if option==4:
exit()
| USAMAWIZARD/datastructure | Python/Tree/Linked List implementation/Binnary Tree/Binnary Search Tree/Binnary search Tree.py | Binnary search Tree.py | py | 1,727 | python | en | code | 1 | github-code | 36 |
21491336327 |
import tensorflow as tf
import random
from tensorflow.contrib import rnn
from tensorflow.examples.tutorials.mnist import input_data
#from cell import ConvLSTMCell
timesteps=28
batch_size=128
total_step=10000
class Minist(object):
def __init__(self, timesteps=0, batch_size=0,total_step=0, learning_rate=1):
self.timesteps = timesteps
self.num_input = 28
self.num_class = 10
self.batch_size = batch_size
self.total_step = total_step
self.learning_rate =learning_rate
def set_archit():
my_list = ['conv', 'lstm', 'pool', 'fc']
new_list=[]
for i in range(8):
secure_random = random.SystemRandom()
x=secure_random.choice(my_list)
if (i==0)and (x=='pool'):
x = secure_random.choice(my_list)
if (i==7)and (x!='fc'):
x='fc'
if (x=='pool')and (i>0):
if (new_list[i-1]=='lstm'):
x='lstm'
print (x)
new_list.append(x)
return new_list
def conv_net(x, reuse, nb_filter, size_kernel):
#with tf.variable_scope('ConvNet', reuse=reuse):
x = tf.reshape(x, shape=[-1, 28, 28, 1])
conv1 = tf.layers.conv2d(x, nb_filter, size_kernel, activation=tf.nn.relu)
return conv1
def conv_nethidden(lastlayer,seq_len, nb_filter, size_kernel,LSTM=True):
# with tf.variable_scope('ConvNetH', reuse=reuse):
if LSTM:
size=lastlayer.get_shape().as_list()
print(size[1])
lastlayer=tf.reshape(lastlayer,shape=[-1,seq_len,int(size[1]/seq_len),1])
conv1 = tf.layers.conv2d(lastlayer, nb_filter, size_kernel, activation=tf.nn.relu)
else:
conv1 = tf.layers.conv2d(lastlayer, nb_filter, size_kernel, activation=tf.nn.relu)
return conv1
def LSTMlayer(x,seq_len,lstm_size,_weights,_biases):
x = tf.unstack(x, seq_len, 1)
print(len(x))
lstm = rnn.BasicLSTMCell(lstm_size,forget_bias=1.0)
outputs, states = rnn.static_rnn(lstm, x, dtype=tf.float32)
print(len(outputs))
transformed_outputs = [tf.matmul(output, _weights['out']) + _biases['out'] for output in outputs]
final= tf.concat(axis=1, values=transformed_outputs)
return outputs, states,final
def from_conv_TO_lstm(net,lstm_size):
#nn=tf.reshape(net,[-1,8,128])
print ('net shape',net.get_shape())
x=net.get_shape().as_list()
print(x)
r=int(x[1]/lstm_size)
nn=tf.reshape(net,[-1,r,lstm_size])
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
outputs, states = tf.nn.dynamic_rnn(lstm, nn, dtype=tf.float32)
val = tf.transpose(outputs, [1, 0, 2])
lstm_last_output = val[-1]
return outputs, states,lstm_last_output
#timesteps,num_hidden
def LSTM_conv(X,weights,biases,seq_len,lstm_size,nb_filter,size_kernel):
x, y, z = LSTMlayer(X, seq_len=seq_len, lstm_size=lstm_size,_weights=weights,_biases=biases)
conv = conv_nethidden(lastlayer=z,seq_len=seq_len, nb_filter=nb_filter, size_kernel=size_kernel, LSTM=True)
return conv
def CONV_lstm(X,timesteps, weights,biases,nb_filter,size_kernel,lstm_size):
#transition between CONV- pool-dense--lstm
conv1=conv_net(X,reuse=False,nb_filter=nb_filter,size_kernel=size_kernel)
pool = tf.layers.max_pooling2d(conv1, 2, 2)
flat = tf.contrib.layers.flatten(pool)
dense1 = tf.layers.dense(inputs=flat, units=1024)
outputs,states,lstm_last_output=from_conv_TO_lstm(dense1,lstm_size=lstm_size)
final = tf.matmul(lstm_last_output, weights['out']) + biases['out']
return outputs,states, final
#mnit=Minist(timesteps,batch_size,total_step,learning_rate=0.001)
def main(total_step=10000 ,batch_size=128,timesteps=28,lstm_size=128,nb_filter=32,size_kernel=[5,5]):
mnit=Minist(timesteps,batch_size,total_step,learning_rate=0.001)
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
X = tf.placeholder(tf.float32, [None, mnit.timesteps, mnit.num_input])
Y = tf.placeholder(tf.float32, [None, mnit.num_class])
list_archi = ['conv', 'conv', 'pool', 'fc', 'fc']
if len(list_archi)>1:
num_hidden=len(list_archi)-1
else:
num_hidden=0
weights = {
'out': tf.Variable(tf.random_normal([num_hidden, mnit.num_class]))}
biases = {
'out': tf.Variable(tf.random_normal([mnit.num_class]))}
#print(mnit.timesteps)
#print( mnit.num_input)
#list_archi=set_archit()
#list_archi=['conv','lstm']
list_archi=['conv', 'conv', 'pool', 'fc', 'fc']
if list_archi==['conv', 'conv', 'pool', 'fc', 'fc']:
conv1=conv_net(X, reuse=False, nb_filter=24, size_kernel=[5,5])
size=conv1.get_shape().as_list()
conv2=conv_nethidden(conv1, seq_len=timesteps, nb_filter=32, size_kernel=[3,3], LSTM=False)
pool = tf.layers.max_pooling2d(conv2, 2, 2)
print('pool', pool.get_shape().as_list())
flat = tf.contrib.layers.flatten(pool)
logits = tf.layers.dense(inputs=flat, units=1024)
logits2 = tf.layers.dense(inputs=logits, units=10)
if list_archi==['lstm', 'conv']:
#première LSTM-CONV
conv = LSTM_conv(X, weights, biases, timesteps, lstm_size, nb_filter, size_kernel)
pool = tf.layers.max_pooling2d(conv, 2, 2)
flat = tf.contrib.layers.flatten(pool)#essential to move to fully connect
logits = tf.layers.dense(inputs=flat, units=1024)
logits2 = tf.layers.dense(inputs=logits, units=10)
if list_archi==['conv','lstm']:
#deuxième CONV_LSTM
x,y,logits2=CONV_lstm(X, timesteps, weights, biases, nb_filter, size_kernel,lstm_size)
prediction = tf.nn.softmax(logits2)
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=Y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(loss_op)
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for step in range(1, total_step + 1):
batch_x, batch_y = mnist.train.next_batch(mnit.batch_size)
batch_x = batch_x.reshape((mnit.batch_size,mnit.timesteps, mnit.num_input))
sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})
if step % 200 == 0 or step == 1:
loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,
Y: batch_y})
print("Step " + str(step) + ", Minibatch Loss= " + \
"{:.4f}".format(loss) + ", Training Accuracy= " + \
"{:.3f}".format(acc))
main() | amaltarifa100/AutoNew | prepareNetwork.py | prepareNetwork.py | py | 6,693 | python | en | code | 0 | github-code | 36 |
9411931518 | # Primary game file
import sys, pygame
from pygame.locals import *
display_surf = pygame.display.set_mode((800, 600))
pygame.display.set_caption('Hello Pygame World!')
def run():
"""This allows for the running of the game from outside the package"""
print("Started trying to run")
# main game loop
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit() | mlansari/ShellShockClone | ShellShockClone/game.py | game.py | py | 460 | python | en | code | 0 | github-code | 36 |
12369223367 | """Add viewed column to batch_job
Revision ID: b23863a37642
Revises: 72a8672de06b
Create Date: 2018-12-31 17:13:54.564192
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b23863a37642'
down_revision = '72a8672de06b'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("batch_job") as batch_op:
batch_op.add_column(sa.Column('viewed', sa.Boolean(), nullable=False, server_default='1'))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("batch_job") as batch_op:
batch_op.drop_column('viewed')
# ### end Alembic commands ###
| golharam/NGS360-FlaskApp | migrations/versions/b23863a37642_add_viewed_column_to_batch_job.py | b23863a37642_add_viewed_column_to_batch_job.py | py | 802 | python | en | code | 3 | github-code | 36 |
39112326593 | import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler
from transformers import AutoTokenizer
from sklearn.model_selection import train_test_split
'''
Read the data from a pre-processed CADEC dataset and process them into a format compatible with BERT
'''
class DataProcessor():
"""
Loads the data from a pre-processed CADEC named-entity dataset and creates a BERT dataset
"""
def __init__(self, filename, model, seed, batch_size = 32, max_length = 512):
# Set the device
if torch.cuda.is_available():
self.device = torch.device('cuda')
else:
self.device = torch.device('cpu')
# Initialize attribute variables
self.max_length = max_length
self.filename = filename
self.seed = seed # For test and train split
self.model = model
self.batch_size = batch_size
# Initialize tokenizer
self.tokenizer = AutoTokenizer.from_pretrained(self.model)
print('Parsing the data file...')
# Obtain sentences and labels
self.tokens, self.labels = self.sentence_parser()
# Split sentences if their associated wordpiece encoding is longer than max_length
self.split_tokens, self.split_labels = [], []
for tok, lab in zip(self.tokens, self.labels):
split_tok, split_lab = self.split_sentences(tok, lab)
self.split_tokens.extend(split_tok)
self.split_labels.extend(split_lab)
# Create ids for labels and split into training and test set
self.label2id, self.id2label = self.get_label_encoding_dict() # Initialize mapping of labels to ids
# Split the dataset into 0.8 training and 0.2 test
self.tokens_train, self.tokens_test, self.labels_train, self.labels_test = train_test_split(self.split_tokens, self.split_labels, test_size=0.20, random_state=self.seed)
# Split the training set into 0.875 training and 0.125 validation (0.7 and 0.1 of total dataset, respectively)
self.tokens_train, self.tokens_val, self.labels_train, self.labels_val = train_test_split(self.tokens_train, self.labels_train, test_size=0.125, random_state=self.seed)
print('Tokenize sentences...')
# Tokenize for BERT
# Training set
self.tokenized_input_train = self.tokenizer(self.tokens_train, truncation=True, is_split_into_words=True,
add_special_tokens=True, padding=True)
self.tokenized_input_train = self.add_word_ids(self.tokenized_input_train)
self.train_tags = self.get_bert_labels(self.tokenized_input_train, self.labels_train)
self.train_max_length = len(self.tokenized_input_train['input_ids']) # The length of the longest training message
# Validation set
self.tokenized_input_val = self.tokenizer(self.tokens_val, truncation=True, is_split_into_words=True,
add_special_tokens=True, padding=True, max_length = self.train_max_length)
self.tokenized_input_val = self.add_word_ids(self.tokenized_input_val)
self.val_tags = self.get_bert_labels(self.tokenized_input_val, self.labels_val)
# Test set
self.tokenized_input_test = self.tokenizer(self.tokens_test, truncation=True, is_split_into_words=True,
add_special_tokens=True, padding=True, max_length = self.train_max_length)
self.tokenized_input_test = self.add_word_ids(self.tokenized_input_test)
self.test_tags = self.get_bert_labels(self.tokenized_input_test, self.labels_test)
print('Preparing the dataset...')
# Prepare the data so it is compatible with torch
self.y_train = torch.tensor(self.train_tags).to(self.device)
self.y_val = torch.tensor(self.val_tags).to(self.device)
self.y_test = torch.tensor(self.test_tags).to(self.device)
self.train_dataloader = self.create_data_loaders(self.tokenized_input_train, self.y_train)
self.val_dataloader = self.create_data_loaders(self.tokenized_input_val, self.y_val)
self.test_dataloader = self.create_data_loaders(self.tokenized_input_test, self.y_test)
def sentence_parser(self):
'''
Read the content of filename and parses it into labels and tokens
:return: tokens and labels: two lists containing the tokens and the labels in the dataset
'''
with open(self.filename, 'r') as f:
data_raw = f.read()
sentences = [sent.split('\n') for sent in data_raw.split('\n\n')[:-1]] # Read the sentences
tokens = [[pair.split('\t')[0] for pair in sent] for sent in sentences] # Colect labels and tokens
labels = [[pair.split('\t')[1] for pair in sent] for sent in sentences]
labels = [[lab if lab not in ('I-Finding', 'B-Finding') else 'O' for lab in sent] for sent in labels]
return tokens, labels
def split_sentences(self, sentence, labels):
'''
Read the tokenized sentences and split them if they are longer than a maximum length (by default, 512)
:param: An input tokenized sentence
:param: The labels corresponding to the tokenized sentence
:return: The tokenized sentence
'''
# The BERT encoding of the period token
period_tok = '.'
# Recursion takes place only if the split has to be performed
if len(self.tokenizer.encode(sentence, is_split_into_words=True)) > self.max_length:
idx_half = len(sentence)//2
# Dictionary with position associated to how far each period (if any) is from the middle of the sentence
period_offsets = {pos: abs(idx_half - pos) for pos in range(len(sentence)) if sentence[pos] == period_tok}
if period_offsets != {}:
# If there is a period, sort period locations based on the distance from the central point
period_offsets_sorted = sorted(period_offsets.items(), key=lambda x: x[1])
split_point = period_offsets_sorted[0][0] # The period location closest to the centre of the sequence
else:
# If there is no period, take the middle index
split_point = idx_half
# Define the splits based on the found splitting point
sent1, sent2 = sentence[:split_point+1], sentence[split_point+1:]
lab1, lab2 = labels[:split_point+1], labels[split_point+1:]
split1, split2 = self.split_sentences(sent1, lab1), self.split_sentences(sent2, lab2) # Recursive call
return split1[0]+split2[0], split1[1]+split2[1] # Compose lists of sub-lists of split sentences
else:
return [sentence], [labels]
def train_test_split(self, test_size):
'''
Splits the dataset into training and test observations
:return: Training and test data and labels
'''
X_train, X_test, y_train, y_test = train_test_split(self.split_tokens, self.split_labels, test_size=test_size,
random_state=self.seed)
return X_train, X_test, y_train, y_test
def get_label_encoding_dict(self):
'''
Given the training data, associate each distinct label to an id
:return: lab2id: a dictionary mapping unique labels to ids
'''
labels = [] # list of unique labels
for sent in self.labels:
for label in sent:
if label not in labels and label != 'O':
labels.append(label)
# Sort labels by the first letter after B- and I- in the BIO tag
labels = ['O'] + sorted(labels, key=lambda x: x[2:])
lab2id = {lab: id for lab, id in zip(labels, range(len(labels)))}
id2lab = labels
return lab2id, id2lab
def add_word_ids(self, tokenized_data):
"""
Adds to the tokenized object the original word ids of the token to reconstruct from wordpiece
:param tokenized_data: A dictionary object of tokenized data
:return: The same tokenized data with the word ids for each sentence
"""
word_ids = []
for i in range(len(tokenized_data['input_ids'])):
batch_word_id = tokenized_data.word_ids(batch_index=i)
# Convert Nones to 0 and augment all IDs by 1 (used when we create tensors)
batch_word_id = [i+1 if i!=None else 0 for i in batch_word_id]
word_ids.append(batch_word_id)
tokenized_data['word_ids'] = word_ids
return tokenized_data
def get_bert_labels(self, tokenized_words, labels):
'''
Align labels with the pre-processed token sequences
:return: A list of label sequences for sentences
'''
labels_bert = []
for i, label in enumerate(labels): # Loop over token sentences
# Map each tokenized word to its ID in the original sentence
word_ids = tokenized_words.word_ids(batch_index=i)
# Contains the label ids for a sentence
label_ids = []
for word_idx in word_ids:
# Special characters ([CLS], [SEP], [PAD]) set to -100
if word_idx is None:
label_ids.append(self.label2id['O']) # Assign the O label to the special characters
# If a word is broken by wordpiece, just add as many labels as word chunk
else:
label_ids.append(self.label2id[label[word_idx]])
labels_bert.append(label_ids)
return labels_bert
def create_data_loaders(self, bert_ds, labels):
'''
Create a dataset compatible with torch
:param bert_ds: A tokenized object containing both input_ids and mask ids
:param labels: The label sequence associated to the tokens
:return: A torch DataLoader object
'''
# Create the DataLoader for our training set
# So now only use the inputs, not the original data anymore
data = TensorDataset(torch.tensor(bert_ds['input_ids']), torch.tensor(bert_ds['attention_mask']), labels,
torch.tensor(bert_ds['word_ids']))
sampler = RandomSampler(data)
# For each data loader we need the data, a sampler and a batch size
data_loader = DataLoader(dataset=data, sampler=sampler, batch_size=self.batch_size)
return data_loader
| allepalma/Text-mining-project | bert_data_creation.py | bert_data_creation.py | py | 10,521 | python | en | code | 0 | github-code | 36 |
3146953868 | from setuptools import setup, find_packages
from os import path
DIR = path.abspath(path.dirname(__file__))
description = """SharePy will handle authentication for your SharePoint Online/O365 site, allowing
you to make straightforward HTTP requests from Python. It extends the commonly used Requests module,
meaning that returned objects are familliar, easy to work with and well documented."""
with open(path.join(DIR, './README.md')) as f:
long_description = f.read()
setup(
name='sharepy',
version='2.0.0',
description='Simple SharePoint Online authentication for Python',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='sharepoint online authentication',
author='Jonathan Holvey',
author_email='jonathan.holvey@outlook.com',
url='https://github.com/JonathanHolvey/sharepy',
project_urls={
'Issues': 'https://github.com/JonathanHolvey/sharepy/issues',
},
license='GPLv3',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Internet',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python :: 3',
],
packages=find_packages('./src'),
package_dir={'': './src'},
package_data={'sharepy.auth.templates': ['*']},
python_requires='>=3.6, <4',
install_requires=['requests>=2,<3']
)
| JonathanHolvey/sharepy | setup.py | setup.py | py | 1,452 | python | en | code | 165 | github-code | 36 |
30695862500 |
def solve(feet):
# 1 foot = 12 inch
inch = feet * 12
# 1 mile = 5280 feet
mile = feet / 5280
# 1 mile = 3 yard
yard = mile * 3
print(inch)
print(mile)
print(yard)
solve(int(input("Enter foot to convert to inch, yard and mile: "))) | nooruddin-rahmani/python-tasks | 15-Distance_Units.py | 15-Distance_Units.py | py | 276 | python | en | code | 1 | github-code | 36 |
27768921922 | import pandas as pd
from bs4 import BeautifulSoup
import requests
import random
import time
url='https://www.tianyancha.com/search?base=bj'
headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36'}
response = requests.get(url,headers=headers)
response.encoding='utf8'
soup = BeautifulSoup(response.text,'lxml')
html = soup.prettify()
# print(html)
columns=['公司','状态','主要标签','法人','注册资本','成立日期','邮箱','地址']
item = soup.find(name = 'div',class_='result-list')
result_list = item.find_all('div','search-item sv-search-company')
# print(result_list[5])
name = result_list[5].find('a','name select-none').string
status = result_list[5].find('div','tag-common -normal-bg').string
label = result_list[5].find('div','tag-list').text if result_list[5].find('div','tag-list')!=None else None
legal = result_list[5].find('a','legalPersonName link-click').text
capital = result_list[5].find('div','title -narrow text-ellipsis').find('span').text
build = result_list[5].find('div','title text-ellipsis').find('span').text
email = result_list[5].find_all('div','contact row')[0].find_all('div','col')[1].find_all('span')[1].text
address= result_list[5].find_all('div','contact row')[1].find('div','col').find_all('span')[1].text
#contact row 有0,1、2 三种情况
#0,email和address 都为None
#1,一定是address
#2,第一个是电话和邮箱,取邮箱;第二个是地址
# 复杂情况,用函数
def getContract(html):
print('----------')
email=None
address = None
contract_list = html.find_all('div','contact row')
num =len(contract_list)
print(contract_list)
if num==0:
return (email,address)
if num==1:
address =contract_list[0].find('div','col').find_all('span')[-1].text
print(email,address)
return (email,address)
elif num==2:
email = contract_list[0].find_all('div','col')[-1].find_all('span')[1].text if len(contract_list[0].find_all('div','col')) !=0 else None
address = contract_list[1].find('div','col').find_all('span')[-1].text
print(email,address)
return (email,address)
print('===========')
print(name,status,label,legal,capital,build,email,address)
data = []
name= []
status= []
label= []
legal= []
capital= []
build= []
email= []
address= []
for i in result_list:
# print(i)
i_name=i.find('a','name select-none').string
i_status=i.find('div','tag-common -normal-bg').string if i.find('div','tag-common -normal-bg')!=None else None
i_label=i.find('div','tag-list').text if i.find('div','tag-list')!=None else None
i_legal =i.find('a','legalPersonName link-click').text if i.find('a','legalPersonName link-click') !=None else None
i_capital=i.find('div','title -narrow text-ellipsis').find('span').text
i_build=i.find('div','title text-ellipsis').find('span').text
i_email,i_address = getContract(i)
print(i_name,i_status,i_label,i_legal,i_capital,i_build,i_email,i_address)
name.append(i_name)
status.append(i_status)
label.append(i_label)
legal.append(i_legal)
capital.append(i_capital)
build.append(i_build)
email.append(i_email)
address.append(i_address)
for i in range(len(name)):
data.append([name[i],status[i],label[i],legal[i],capital[i],build[i],email[i],address[i]])
df = pd.DataFrame(data = data ,columns=columns)
print(df)
import pymysql
conn = pymysql.connect(host='192.168.10.108',
user='root',
password='123456',
db='dangdang',
charset='utf-8',
cursorclass=pymysql.cursors.DictCursor)
cursor = conn.cursor()
#创建表,如果不存在就创建
print('============#先删除表,后创建表================')
cursor.execute('drop table emp')
| kshsky/PycharmProjects | case/crawler/TianYanCha.py | TianYanCha.py | py | 3,711 | python | en | code | 0 | github-code | 36 |
2791422043 | import numpy as np
GRAV = -9.8
R_COEF_TABLE = np.array([0.73, 0.73, -0.92]) # restitution coefficient of table
class Tracker:
prediction = None
def __init__(self, dt):
raise NotImplementedError()
def update(self, z_measured, dt):
raise NotImplementedError()
def get_state(self, dt=0.0):
raise NotImplementedError()
class LinearKF(Tracker):
_transition_matrix = None
_state_post = None
_state_pre = None
_measurement_matrix = None
_measurement_noise_cov = None
_process_noise_cov = None
_error_cov_post = None
_error_cov_pre = None
_must_reset = True
_accumulated_initialization_samples = np.zeros((3, 10))
_number_of_accumulated_initialization_samples = 0
_rejected_samples = 0
def __init__(self, dt):
self._upd_transition_matrix(dt)
#"""
self._process_noise_cov = np.array([[0.001, 0, 0, 0, 0, 0, 0],
[0, 0.001, 0, 0, 0, 0, 0],
[0, 0, 0.001, 0, 0, 0, 0],
[0, 0, 0, 0.01, 0, 0, 0],
[0, 0, 0, 0, 0.01, 0, 0],
[0, 0, 0, 0, 0, 0.01, 0],
[0, 0, 0, 0, 0, 0, 0.001]])
#"""
#self._process_noise_cov = np.zeros((7, 7))
self._error_cov_post = np.array([[0.0001, 0, 0, 0, 0, 0, 0],
[0, 0.0001, 0, 0, 0, 0, 0],
[0, 0, 0.0001, 0, 0, 0, 0],
[0, 0, 0, 10, 0, 0, 0],
[0, 0, 0, 0, 10, 0, 0],
[0, 0, 0, 0, 0, 10, 0],
[0, 0, 0, 0, 0, 0, 0.001]])
self._error_cov_pre = np.array([[0.0001, 0, 0, 0, 0, 0, 0],
[0, 0.0001, 0, 0, 0, 0, 0],
[0, 0, 0.0001, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0.001]])
self._measurement_noise_cov = (0.005 ** 2) * np.identity(3)
self._measurement_matrix = np.zeros((3, 7))
self._measurement_matrix[0:3, :3] = np.identity(3)
self._state_post = np.array([[1.0, 1.0, 1.0, 0, 0, 0, GRAV]]).transpose()
self._state_pre = np.array([[1.0, 1.0, 1.0, 0, 0, 0, GRAV]]).transpose()
def _upd_transition_matrix(self, dt):
self._transition_matrix = np.array([[1, 0, 0, dt, 0, 0, 0],
[0, 1, 0, 0, dt, 0, 0],
[0, 0, 1, 0, 0, dt, 0.5 * dt * dt],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, dt],
[0, 0, 0, 0, 0, 0, 1]])
def set_state_pre_post(self, x, y, z, vx, vy, vz, az):
self._state_pre[:, 0] = self._state_post[:, 0] = (x, y, z, vx, vy, vz, az)
def _predict(self):
res = self._transition_matrix.dot(self._state_post)
if res[2, 0] <= 0.025:
res[3:6, 0] = R_COEF_TABLE * res[3:6, 0]
self._state_pre = res
#self._state_pre = self._transition_matrix.dot(self._state_post)
self._error_cov_pre = self._transition_matrix.dot(self._error_cov_post).dot(
self._transition_matrix.transpose()) + self._process_noise_cov
def update(self, z_measured, dt):
#check if measurement is valid
z_measured = np.array(z_measured, ndmin=2)
valid_z_measured = np.isfinite(z_measured).all() and z_measured.shape == (1, 3)
# reset when following a new ball launch (discontinuity)
if self._must_reset and valid_z_measured:
self._accumulated_initialization_samples[:,
self._number_of_accumulated_initialization_samples] = z_measured
self._number_of_accumulated_initialization_samples += 1
if self._number_of_accumulated_initialization_samples > 9:
dts = np.arange(0.0, 10*(1.0/120.0), 1.0/120.0)
px = np.polyfit(dts, self._accumulated_initialization_samples[0, :], 1)
py = np.polyfit(dts, self._accumulated_initialization_samples[1, :], 1)
pz = np.polyfit(dts, self._accumulated_initialization_samples[2, :], 1)
self.set_state_pre_post(z_measured[0, 0], z_measured[0, 1], z_measured[0, 2], px[0], py[0], pz[0], GRAV)
self._must_reset = False
self._number_of_accumulated_initialization_samples = 0
else:
return None
#KF prediction step
self._upd_transition_matrix(dt)
self._predict()
if not valid_z_measured:
return np.nan
else:
# measurement update
Z = z_measured
y = Z.transpose() - self._measurement_matrix.dot(self._state_pre)
#self.error.append(np.sum(y**2))
S = self._measurement_matrix.dot(self._error_cov_pre.dot(self._measurement_matrix.transpose())) \
+ self._measurement_noise_cov
#Validation Gate, if residual error too large is an outlier, best if done outside tracker
g = 10
if y.T.dot(np.linalg.inv(S).dot(y)) > g ** 2:
self._rejected_samples += 1
if self._rejected_samples > 4: # More than 4 outliers reset filter on next sample
self._must_reset = True
# since sample is not used set state as only the prediction
self._state_post = self._state_pre
self._error_cov_post = self._error_cov_pre
return None
else: # sample is valid and not an outlier perform update step
self._rejected_samples = 0 # Sample is not rejected by validation gate
K = self._error_cov_pre.dot(self._measurement_matrix.transpose().dot(np.linalg.inv(S)))
self._state_post = self._state_pre + (K.dot(y))
self._error_cov_post = (
np.identity(self._error_cov_pre.shape[0]) - (K.dot(self._measurement_matrix))).dot(
self._error_cov_pre)
return y.T.dot(np.linalg.inv(S).dot(y))
def get_state(self, dt=0.0):
if self._must_reset:
nans = np.empty(self._state_post.shape)
nans.fill(np.NaN)
return nans
if dt == 0.0:
return self._state_post[:, 0]
else:
time_step = 1.0 / 120.0
rc_table = R_COEF_TABLE
state_pre = self._state_pre
self._upd_transition_matrix(time_step)
for i in range(int(dt / time_step)):
res = self._transition_matrix.dot(state_pre)
#error_cov = self._transition_matrix.dot(self._error_cov_pre).dot(
# self._transition_matrix.T) + self._process_noise_cov
if res[2, 0] <= 0.025:
res[3:6, 0] = rc_table * res[3:6, 0]
state_pre = res
return res
if __name__ == '__main__':
KF = LinearKF(1.0 / 120)
x, y, z, vx, vy, vz, az = KF.get_state()
KF.update([0.1, 0.1, 0.1], 1.0 / 120)
KF.predict_ahead(0.25)
x, y, z, vx, vy, vz, az = KF.get_state()
| carlos-cardoso/robot-skills | kalman_tracker/src/python_tracker.py | python_tracker.py | py | 7,735 | python | en | code | 23 | github-code | 36 |
39430381428 | import unittest
from helpers import FakeReader, a_wait
import grole
class TestEncoding(unittest.TestCase):
def setUp(self):
self.req = grole.Request()
self.req.data = b'{"foo": "bar"}'
def test_body(self):
self.assertEqual(self.req.body(), '{"foo": "bar"}')
def test_json(self):
self.assertEqual(self.req.json(), {'foo': 'bar'})
class TestReading(unittest.TestCase):
def setUp(self):
self.req = grole.Request()
def test_readline_returns_data(self):
reader = FakeReader(b'foo\r\nnope')
line = a_wait(self.req._readline(reader))
self.assertEqual(line, b'foo\r\n')
def test_readline_raises_eof(self):
reader = FakeReader(b'')
with self.assertRaises(EOFError):
line = a_wait(self.req._readline(reader))
def test_buffer_body_content_len_0(self):
reader = FakeReader(b'foo')
self.req.headers = {'Content-Length': 0 }
self.req.data = b''
a_wait(self.req._buffer_body(reader))
self.assertEqual(b'', self.req.data)
def test_buffer_body_content(self):
reader = FakeReader(b'foobar')
self.req.headers = {'Content-Length': 3 }
self.req.data = b''
a_wait(self.req._buffer_body(reader))
self.assertEqual(b'foo', self.req.data)
def test_buffer_body_not_enough_data(self):
reader = FakeReader(b'foo')
self.req.headers = {'Content-Length': 4 }
self.req.data = b''
with self.assertRaises(EOFError):
a_wait(self.req._buffer_body(reader))
def test_header(self):
header = b'\r\n'.join([b'GET /foo?bar=baz&spam=eggs&chips HTTP/1.1',
b'foo: bar',
b'']) + b'\r\n'
a_wait(self.req._read(FakeReader(header)))
self.assertEqual(self.req.method, 'GET')
self.assertEqual(self.req.version, 'HTTP/1.1')
self.assertEqual(self.req.path, '/foo')
self.assertEqual(self.req.query, {'bar': 'baz', 'spam': 'eggs', 'chips': None})
self.assertEqual(self.req.headers, {'foo': 'bar'})
self.assertEqual(self.req.data, b'')
if __name__ == '__main__':
unittest.main()
| witchard/grole | test/test_request.py | test_request.py | py | 2,226 | python | en | code | 5 | github-code | 36 |
6817402374 | from keras.applications.vgg16 import preprocess_input
from keras.preprocessing.image import ImageDataGenerator
# models
from keras.applications.vgg16 import VGG16
from keras.models import Model
# clustering and dimension reduction
# from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
# for everything else
import numpy as np
# import pandas as pd
import pickle
datagen = ImageDataGenerator()
"""
# path to DataGen folder
# DataGen folder must contain two folders inside with name test and train
with each folder containing folders having different image types
# DataGen/train -->airplanes,bikes,cars,faces folders
# DataGen/test -->airplanes,bikes,cars,faces folders
"""
home_path = r'D:\sem1_2021\DIP\assinments\Assignment05\Images\DataGen'
print("getting data using ImageDataGenerator")
train_data = datagen.flow_from_directory(
directory=home_path + r'/train/',
target_size=(224,224), # resize to this size to the size required fo VGG16
color_mode="rgb", # for coloured images
batch_size=1, # number of images to extract from folder for every batch
class_mode="binary", # classes to predict (single class classifier)
)
test_data = datagen.flow_from_directory(
directory=home_path + r'/test/',
target_size=(224,224), # resize to this size to the size required fo VGG16
color_mode="rgb", # for coloured images
batch_size=1, # number of images to extract from folder for every batch
class_mode="binary",
)
model = VGG16()
model = Model(inputs = model.inputs, outputs = model.layers[-2].output) #taking features from the secondlast layer of VGG16
def extract_features(file, model):
imgx = preprocess_input(file) #reshaped_img
# get the feature vector
features = model.predict(imgx, use_multiprocessing=True)
return features
data = {}
p = r'D:\sem1_2021\DIP\assinments\Assignment05\Images\except'
print("exracting features of train/test image using VGG")
features_train = [] #array containg features of each image
labels_train = [] #array containg label(class of img)
i=0
for i in range(120): # 120 is number of traing images
print("train" ,i)
# extract the features and update the dictionary
batchX, batchY = train_data.next() # batchx contains the image aray of particular index
try: # batchy contains the label number present in train_data from DataGen operation
feat = extract_features(batchX,model) #getting features of particular image from VGG model
labels_train.append(batchY)
features_train.append(feat)
# error handling / can ignore
except:
with open(p,'wb') as file:
pickle.dump(data,file)
# similar as train_data operation
features_test = []
labels_test = []
i=0
for i in range(80):
print("test",i)
# try to extract the features and update the dictionary
batchX, batchY = test_data.next()
try:
feat = extract_features(batchX,model)
labels_test.append(batchY)
features_test.append(feat)
# if something fails, save the extracted features as a pickle file (optional)
except:
with open(p,'wb') as file:
pickle.dump(data,file)
features_train = np.array(features_train)
labels_train = np.array(labels_train)
features_test = np.array(features_test)
labels_test = np.array(labels_test)
# reshape so that there are 120 and 80 respective samples of 4096 vectors
features_train = features_train.reshape(-1,4096)
# print(features_train.shape)
features_test = features_test.reshape(-1,4096)
# reduce the amount of dimensions in the feature vector by extracting most dependent featues only using PCA
print("PCA_TRAIN")
pca = PCA(n_components=40, random_state=78) #4096 to 40 features for easy computation by our KNN
pca.fit(features_train)
x_train = pca.transform(features_train)
print("PCA_TEST")
pca = PCA(n_components=40, random_state=78)
pca.fit(features_test)
x_test = pca.transform(features_test)
print("KNN_MODEL")
training_data = np.column_stack((x_train,labels_train)) #merging the two arrays to one to pass to KNN function
testing_data = np.column_stack((x_test,labels_test))
def EUC_DIST(v1,v2): #function returning euclidean distance between any two vectors of equal dim
v1,v2 = np.array(v1),np.array(v2)
distance = 0
for i in range(len(v1)-1):
distance += (v1[i]-v2[i])**2
return np.sqrt(distance)
def Predict(k,train_data,test_instance): # k = number of nearest neighb ,train_data = whole train array , test = only one single test image and its label
distances = [] #array containing euc dist of test image with every training image respectively
for i in range(len(train_data)):
dist = EUC_DIST(train_data[i][:-1], test_instance)
distances.append((train_data[i],dist))
distances.sort(key=lambda x: x[1]) #sorting with least distance on top
neighbors = []
for i in range(k):
neighbors.append(distances[i][0]) #contain array of labels of image with least euc dist to test image
classes = {}
for i in range(len(neighbors)):
response = neighbors[i][-1]
if response in classes:
classes[response] += 1
else:
classes[response] = 1
sorted_classes = sorted(classes.items() , key = lambda x: x[1],reverse = True )
return sorted_classes[0][0] #return the predicted class/label of test img
def Eval_Acc(y_data,y_pred): #function to calculate accuracy from 80 predicted images
correct = 0
for i in range(len(y_pred)):
if y_data[i][-1] == y_pred[i]: #if given data image label is equal to prdicted label of test image
correct += 1
return (correct / len(y_pred))*100
y_pred = [] #array containg KNN predicted labels/class of each image in test_data
for i in range(len(testing_data)):
y_pred.append(Predict(2,training_data, testing_data[i]))
print(Eval_Acc(testing_data, y_pred))
| AnmolGarg98/KNN_image-classification | KNN_VGG16_pretrained_features.py | KNN_VGG16_pretrained_features.py | py | 6,453 | python | en | code | 0 | github-code | 36 |
73349078504 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Car',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('brand', models.CharField(max_length=64, null=True)),
('model', models.CharField(max_length=64, null=True)),
('color', models.CharField(max_length=64)),
('reg_number', models.CharField(unique=True, max_length=16)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Driver',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('account', models.ForeignKey(to=settings.AUTH_USER_MODEL, unique=True)),
('car', models.ForeignKey(to='TaxiService.Car', unique=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Ride',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('fromAddress', models.CharField(max_length=256)),
('toAddress', models.CharField(max_length=256)),
('date', models.DateTimeField()),
('car', models.ForeignKey(to='TaxiService.Car')),
],
options={
},
bases=(models.Model,),
),
]
| IlyaSergeev/taxi_service | TaxiService/migrations/0001_initial.py | 0001_initial.py | py | 1,872 | python | en | code | 0 | github-code | 36 |
18050194084 | # food restaurant delivery
order ={
"client": "John Doe",
"item": "Salad",
"quantity":8,
"price":15.00
}
order["total"]=order["price"]*order["quantity"]
if order["quantity"]>7:
order["price"]*=0.8 #offer 20% discount for orders from 8 pcs
order["total"]=order["price"]*order["quantity"]
print ("\nYou've got 20% discount")
delivery_request = input("\nDo you need delivery? Yes/No : ")
if delivery_request=="Yes" and order["total"]>300:
print(f"\nYou've got free delivery. You have to pay {order['total']}")
elif delivery_request=="Yes":
order["delivery_cost"]=50
print (f'''You have to pay: \n{order["item"]:10}:{order["total"]:9}
Delivery : {order["delivery_cost"]:8.1f}
Total : {order["total"]+order["delivery_cost"]:8}''', sep="")
else:
print(f"""\nYou have to pick up the order yourself.
You have to pay {order['total']}
""")
| jvasea1990/dictionaries | food restaurant delivery.py | food restaurant delivery.py | py | 895 | python | en | code | 0 | github-code | 36 |
9360337388 | import fire
from flask import Flask, jsonify, request
from flask_cors import CORS
from flask_restful import Resource, Api
from graph_knn import load_entity_knn, load_multi_knn
class Knn(Resource):
def __init__(self, **kwargs):
self.knn = kwargs['knn']
def post(self):
json_data = request.get_json(force=True)
entity_uri = json_data["entity"]
relation_uri = json_data["relation"]
k = int(json_data["k"])
direction = json_data["direction"]
uris, dists, names = self.knn.find_entity_knn(entity_uri, relation_uri, k, direction)
response = [{'uri': uri, 'dist': float(dist), 'name': name} for [uri, dist, name] in
zip(uris, dists, names)]
return jsonify(response)
class EntitySearch(Resource):
def __init__(self, **kwargs):
self.ent_dict_name = kwargs['ent_dict_name']
def post(self):
json_data = request.get_json(force=True)
query = json_data["query"]
limit = int(json_data["limit"])
offset = int(json_data["offset"])
result = [{'value': k, 'label': v} for k, v in self.ent_dict_name.items() if
query.lower() in v.lower()]
filtered = result[offset:(offset + limit)]
response = {'result': filtered, 'size': len(result)}
return jsonify(response)
class RelationSearch(Resource):
def __init__(self, **kwargs):
self.rel_dict_uri = kwargs['rel_dict_uri']
def post(self):
json_data = request.get_json(force=True)
query = json_data["query"]
limit = int(json_data["limit"])
offset = int(json_data["offset"])
result = [{'value': k, 'label': k} for k, v in self.rel_dict_uri.items() if
query.lower() in k.lower()]
filtered = result[offset:(offset + limit)]
response = {'result': filtered, 'size': len(result)}
return jsonify(response)
class IndexedEntitySearch(Resource):
def __init__(self, **kwargs):
self.entity_index = kwargs['entity_index']
def post(self):
json_data = request.get_json(force=True)
query = json_data["query"]
limit = int(json_data["limit"])
offset = int(json_data["offset"])
result = [{'value': entity.uri, 'label': f'{entity.name} {entity.count} {entity.uri}'}
for entity in self.entity_index.find_entity(query)]
filtered = result[offset:(offset + limit)]
response = {'result': filtered, 'size': len(result)}
return jsonify(response)
class IndexedRelationSearch(Resource):
def __init__(self, **kwargs):
self.entity_index = kwargs['entity_index']
def post(self):
json_data = request.get_json(force=True)
query = json_data["query"]
limit = int(json_data["limit"])
offset = int(json_data["offset"])
result = [{'value': entity.uri, 'label': f'{entity.name} {entity.count} {entity.uri}'}
for entity in self.entity_index.find_entity(query)]
filtered = result[offset:(offset + limit)]
response = {'result': filtered, 'size': len(result)}
return jsonify(response)
def launch_api(ent_path, rel_path, dict_path, name_dict_path):
app = Flask(__name__)
api = Api(app)
knn = load_entity_knn(ent_path, rel_path, dict_path, name_dict_path)
api.add_resource(Knn, "/knn", resource_class_kwargs={'knn': knn})
api.add_resource(EntitySearch, "/knn-entity-search",
resource_class_kwargs={'ent_dict_name': knn.ent_dict_name})
api.add_resource(RelationSearch, "/knn-relation-search",
resource_class_kwargs={'rel_dict_uri': knn.rel_dict_uri})
CORS(app)
app.run(host="0.0.0.0", port="5006")
def launch_api_multi(ent_paths, rel_path, entity_name_file, relation_name_file, port):
app = Flask(__name__)
api = Api(app)
knn = load_multi_knn(ent_paths, rel_path, entity_name_file, relation_name_file)
api.add_resource(Knn, "/knn", resource_class_kwargs={'knn': knn})
api.add_resource(IndexedEntitySearch, "/knn-entity-search",
resource_class_kwargs={'entity_index': knn.entity_index})
api.add_resource(RelationSearch, "/knn-relation-search",
resource_class_kwargs={'rel_dict_uri': knn.relation_index.uri_to_entity})
CORS(app)
app.run(host="0.0.0.0", port=port)
if __name__ == "__main__":
fire.Fire(launch_api_multi)
| graph-embeddings/pbg-helper | knn-graph-viewer/back/api.py | api.py | py | 4,457 | python | en | code | 21 | github-code | 36 |
36656977883 | def read_polynomial(num_variables, degree):
"""
Reads a multilinear polynomial from the user.
Args:
num_variables (int): The number of variables in the polynomial.
degree (int): The degree of the polynomial.
Returns:
A list containing the coefficients of the monomials in the polynomial.
"""
num_monomials = 2 ** num_variables
polynomial = [0] * num_monomials
for i in range(num_monomials):
monomial = []
for var in range(num_variables):
power = (i >> var) & 1
monomial.append(power)
coeff = input(f"Enter the coefficient for the monomial {tuple(monomial)}: ")
polynomial[i] = int(coeff)
return polynomial
def read_partial_assignment(num_variables):
"""
Reads a partial assignment from the user.
Args:
num_variables (int): The number of variables in the polynomial.
Returns:
A dictionary containing the partial assignment of variables.
"""
partial_assignment = {var: 0 for var in range(num_variables)}
for var in range(num_variables):
val = input(f"Enter the assignment for variable {var}: ")
partial_assignment[var] = int(val)
return partial_assignment
def restricted_polynomial(polynomial, partial_assignment):
"""
Computes the restricted polynomial of a multilinear polynomial over F2.
Args:
polynomial (list): A list containing the coefficients of the monomials in the polynomial.
partial_assignment (dict): A dictionary containing the partial assignment of variables.
Returns:
A list containing the coefficients of the monomials in the restricted polynomial.
"""
num_variables = len(partial_assignment)
num_monomials = 2 ** num_variables
restricted_poly = [0] * num_monomials
for i in range(num_monomials):
monomial = []
coeff = polynomial[i]
for var in range(num_variables):
power = (i >> var) & 1
if var in partial_assignment and partial_assignment[var] != power:
coeff = 0
break
monomial.append(power)
if coeff != 0:
restricted_poly[i] = coeff
return restricted_poly
if __name__ == "__main__":
num_variables = int(input("Enter the number of variables in the polynomial: "))
degree = int(input("Enter the degree of the polynomial: "))
polynomial = read_polynomial(num_variables, degree)
print(f"Polynomial: {polynomial}")
partial_assignment = read_partial_assignment(num_variables)
print(f"Partial assignment: {partial_assignment}")
restricted_poly = restricted_polynomial(polynomial, partial_assignment)
print(f"Restricted polynomial: {restricted_poly}")
| shivamsinoliyainfinity/Polynomial_restrictor | trial2.py | trial2.py | py | 2,771 | python | en | code | 0 | github-code | 36 |
10579988765 | #a = 4678678678
#b = 4678678678
#import numpy as np
#a = np.int64(a)
#b = np.int64(b)
#c = a + b
#print(2**32 - 1)
def shuffle_seed(array):
import numpy as np
a = np.random.randint(0,4294967296, dtype=np.int64)
np.random.seed(a)
shuffle_seed = np.random.permutation(array)
return shuffle_seed, a
array = [1, 2, 3, 4, 5]
print(shuffle_seed(array))
# (array([1, 3, 2, 4, 5]), 2332342819)
#shuffle_seed(array)
# (array([4, 5, 2, 3, 1]), 4155165971) | lightarum/my_first_project | project_0/test.py | test.py | py | 467 | python | en | code | 4 | github-code | 36 |
16490037129 | #!/usr/bin/env python
# coding: utf-8
# In[7]:
import pandas as pd
body_df = pd.read_csv('./body.csv')
# In[8]:
# Q1. 전체데이터의 수축기혈압(최고) - 이완기혈압(최저)의 평균을 구해보세요.
# In[24]:
result = (body_df['수축기혈압(최고) : mmHg']-body_df['이완기혈압(최저) : mmHg']).mean()
print(result)
# In[9]:
# Q2. 50~59세의 신장평균을 구해보세요
# In[32]:
average_height = body_df[(body_df['측정나이']<60)&(body_df['측정나이']>=50)].iloc[:,3].mean()
print(average_height)
# In[33]:
# Q3. 연령대 (20~29:20대
# 30~39: 30대)등 각 연령대별 인원수를 구해보세요
# In[38]:
body_df['연령대'] = body_df.측정나이 //10 * 10
body_df['연령대'].value_counts()
# In[39]:
# Q4. 남성 중 A등급과 D등급의 체지방률 평균의 차이(큰 값에서 작은 값의 차)를 구해보세요.
# In[46]:
import numpy as np
A_grade = body_df[(body_df.측정회원성별 == 'M') & (body_df.등급 == 'A')].iloc[:,5].mean()
D_grade = body_df[(body_df.측정회원성별 == 'M') & (body_df.등급 == 'D')].iloc[:,5].mean()
np.abs(A_grade - D_grade)
# In[12]:
# Q5. 여성 중 A등급과 D등급의 체지방률 평균의 차이(큰 값에서 작은 값의 차)를 구해보세요.
# In[47]:
import numpy as np
A_grade = body_df[(body_df.측정회원성별 == 'F') & (body_df.등급 == 'A')].iloc[:,5].mean()
D_grade = body_df[(body_df.측정회원성별 == 'F') & (body_df.등급 == 'D')].iloc[:,5].mean()
np.abs(A_grade - D_grade)
# In[13]:
# Q6 bmi는 자신의 몸무게(kg)를 키의 제곱(m)으로 나눈 값입니다. 데이터의 bmi를 구한 새로운
# 컬럼을 만들고 남성과 여성의 bmi 평균을 구해보세요.
# In[62]:
height_squared = (body_df['신장 : cm']/100)**2 # m 단위므로 cm를 /100으로 나누어 줍니다.
bmi = body_df['체중 : kg']/height_squared
body_df['bmi'] = bmi
male_average = body_df[body_df['측정회원성별'] == 'M'].bmi.mean()
female_average = body_df[body_df['측정회원성별'] == 'F'].bmi.mean()
print('남성 평균:', male_average)
print('여성 평균:', female_average)
# In[14]:
# Q7 bmi보다 체지방률이 높은 사람들의 체중 평균을 구해보세요.
# In[68]:
answer = body_df[(body_df['bmi']<body_df['체중 : kg'])]['체중 : kg'].mean()
print(answer)
# In[15]:
# Q8 남성과 여성의 악력 평균의 차이를 구해보세요.
# In[76]:
import numpy as np
import pandas as pd
male_average_grip = body_df[body_df.측정회원성별 == 'M']['악력D : kg'].mean()
female_average_grip = body_df[body_df.측정회원성별 == 'F']['악력D : kg'].mean()
np.abs(male_average_grip - female_average_grip)
### 또는
result = body_df.groupby('측정회원성별')['악력D : kg'].mean()
np.abs(result.M - result.F)
# In[16]:
# Q9 남성과 여성의 교차 윗몸일으키기 횟수의 평균의 차이를 구해보세요.
# In[77]:
result1 = body_df.groupby('측정회원성별')['교차윗몸일으키기 : 회'].mean()
np.abs(result1.M - result1.F)
# In[78]:
# end of file
| polkmn222/Statistic-Python | 0622/대한민국 체력장 데이터.py | 대한민국 체력장 데이터.py | py | 3,145 | python | ko | code | 0 | github-code | 36 |
73118844584 | import socket
my_dict = {"python": "питон", "very": "очень", "like": "нравится"}
class TcpServer:
def __init__(self, host, port):
self.host = host
self.port = port
self._socket = None
self._runnning = False
def run(self):
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind((self.host, self.port))
self._socket.listen(5)
self._runnning = True
print('Server is up')
while True:
conn, addr = self._socket.accept()
with conn:
print(f"К серверу подключился {addr}")
data = conn.recv(1024)
s_word = data.decode().split()
s_data = ""
for k in s_word:
if k in my_dict.keys():
k = my_dict.get(k)
else:
k = "(NOT IN THE DICT)"
s_data += k + " "
conn.send(s_data.encode())
def stop(self):
self._runnning = False
self._socket.close()
print('Server is down')
if __name__ == '__main__':
srv = TcpServer(host='127.0.0.1', port=5555)
try:
srv.run()
except KeyboardInterrupt:
srv.stop()
| IlyaOrlov/PythonCourse2.0_September23 | Practice/achernov/module_13/task_1_server.py | task_1_server.py | py | 1,395 | python | en | code | 2 | github-code | 36 |
10256295781 | import ast
import json
import cv2
from deepface import DeepFace
from django.contrib.auth import login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import UserPassesTestMixin
from django.db import connections
from django.db.utils import ProgrammingError
from django.http import HttpRequest
from django.http.response import HttpResponse, JsonResponse
from django.shortcuts import render, redirect
from django.utils.autoreload import logger
from django.views import View
from django.views.decorators.http import require_http_methods
from app.models import User, Theme, Task, TaskGroup, Grade
from app.utils import dictfetchall
class LoginView(View):
def get(self, request):
if request.user.is_authenticated:
return redirect(request.GET.get("next", '/home/'))
return render(request, 'login.html', context={"head": "Login, please!"})
def post(self, request):
user = User.objects.filter(email=request.POST["email"], password=request.POST["password"]).first()
if user:
login(request, user)
return redirect(request.GET.get("next", '/home/'))
return render(request, 'login.html', context={"head": "user not found"})
@require_http_methods(["GET"])
def start_page(request):
return redirect("/login/")
@require_http_methods(["GET"])
@login_required(login_url='/login/')
def home(request):
themes = Theme.objects.filter(user=request.user)
themes_list = list()
for theme in themes:
if not theme.taskgroup_set.all():
continue
task_group = theme.taskgroup_set.all()[0]
themes_list.append(
{
"description": theme.description,
"time": sum([task.time for task in task_group.task_set.all()]),
"max_grade": sum([task.coefficient for task in task_group.task_set.all()]),
"is_complete": bool(Grade.objects.filter(task__in=task_group.task_set.all())),
"id": theme.id
}
)
return render(request, 'home.html', context={
"themes": themes_list
})
@require_http_methods(["GET"])
@login_required(login_url='/login/')
def my_grades(request):
themes = Theme.objects.filter(user=request.user)
themes_list = list()
for theme in themes:
grades = Grade.theme_is_passed(theme, request.user)
if not theme.taskgroup_set.all() or not grades:
continue
task_group = theme.taskgroup_set.all()[0]
themes_list.append(
{
"description": theme.description,
"my_grade": sum([grade.final_score for grade in grades]),
"max_grade": sum([task.coefficient for task in task_group.task_set.all()]),
"id": theme.id
}
)
return render(request, 'grades.html', context={
"themes": themes_list
})
class CustomAuthMixin(UserPassesTestMixin):
login_url = '/login/'
class SuperUserAuthMixin(CustomAuthMixin):
def test_func(self):
if self.request.user.is_superuser:
return True
return False
class ThemeView(CustomAuthMixin, View):
def test_func(self):
if not self.request.user.is_authenticated:
return False
allow_themes = list(Theme.objects.filter(user=self.request.user).values_list('id', flat=True))
theme_id = self.request.build_absolute_uri().split('/')[-2]
return int(theme_id) in allow_themes
def get(self, request, theme_id):
theme = Theme.objects.get(pk=theme_id)
task_group = TaskGroup.objects.filter(theme=theme).first()
grades = Grade.theme_is_passed(theme, request.user)
return render(request, "theme.html", context={
"description": theme.description,
"time": sum([task.time for task in task_group.task_set.all()]),
"start_link": task_group.id,
"button_desc": "Результати" if grades else "Почати",
"subject_title": task_group.subject_area.title,
"subject_image": task_group.subject_area.schema
})
class TaskGroupView(CustomAuthMixin, View):
def setup(self, request, *args, **kwargs):
super().setup(request, args, kwargs)
self.task_group = TaskGroup.objects.filter(pk=kwargs["task_group_id"]).first()
def test_func(self):
if not self.request.user.is_authenticated:
return False
theme = Theme.objects.filter(user=self.request.user)
self.task_groupes = TaskGroup.objects.filter(theme__in=theme).values_list('id', flat=True)
allow_task_group = list(self.task_groupes)
task_group_id = self.request.build_absolute_uri().split('/')[-2]
return int(task_group_id) in allow_task_group
def get(self, request, task_group_id):
# if Grade.objects.filter(task__in=Task.objects.filter(task_group=self.task_group)):
# return redirect(f'/grade_theme/{self.task_group.theme.id}', self.request)
tasks = [
{
"id": task.id,
"description": task.description,
} for task in Task.objects.filter(task_group=self.task_group)
]
return render(request, 'task.html', context={
"tasks": tasks,
"id": tasks[0]["id"],
"subject_title": self.task_group.subject_area.title,
"subject_img": self.task_group.subject_area.schema
})
def post(self, request, task_group_id):
tasks = [
{
"id": task.id,
"description": task.description,
} for task in Task.objects.filter(task_group=self.task_group)
]
return JsonResponse({"tasks": tasks})
@require_http_methods(["GET"])
def logout_view(request):
logout(request)
return redirect('/login', request)
class VerifyImage(SuperUserAuthMixin, View):
def post(self, request: HttpRequest):
img = bytes(request.POST["img"][22:], 'utf-8')
with open("app/avatars/current_image.jpg", "wb") as fh:
import base64
fh.write(base64.decodebytes(img))
try:
data = DeepFace.verify(
img1_path="app/avatars/current_image.jpg",
img2_path=request.user.avatar,
model_name='ArcFace'
)
except ValueError as e:
print(e)
data = dict()
data["verified"] = False
if data["verified"]:
return HttpResponse('verified', status=200)
return HttpResponse('not verified', status=400)
class GetImage(SuperUserAuthMixin, View):
def get(self, request):
camera = cv2.VideoCapture(0)
import os
try:
os.remove("app/avatars/img_from_opencv.jpg")
except:
pass
for i in range(10):
return_value, image = camera.read()
if return_value:
cv2.imwrite('app/avatars/img_from_opencv.jpg', image)
del camera
cv2.destroyAllWindows()
return HttpResponse('Image successfully saved on app/avatars/img_from_opencv.jpg')
class CheckSyntaxOfTask(View):
def post(self, request):
user_cursor = connections['postgres_trade'].cursor()
try:
user_cursor.execute(request.POST['script'])
dictfetchall(user_cursor)
return JsonResponse({"msg": "OK"})
except ProgrammingError as ex:
logger.error(f'DB Error: {ex}')
return JsonResponse({'error': str(ex)}, status=400)
class TaskView(CustomAuthMixin, View):
def test_func(self):
if not self.request.user.is_authenticated:
return False
theme = Task.objects.get(pk=self.request.POST["task_id"]).task_group.theme
return self.request.user in theme.user.all()
def post(self, request):
task = Task.objects.get(pk=self.request.POST["task_id"])
return JsonResponse(
{
"description": task.description
}
)
class GradeTask(CustomAuthMixin, View):
def test_func(self):
if not self.request.user.is_authenticated:
return False
if self.request.method == 'POST':
theme = Task.objects.get(pk=self.request.POST["task"]).task_group.theme
else:
theme = Task.objects.get(pk=self.request.GET["task_id"]).task_group.theme
return self.request.user in theme.user.all()
def get(self, request):
grade = Grade.objects.get(task_id=request.GET["task_id"], user=request.user)
return JsonResponse({
"description": grade.task.description,
"user_script": grade.user_script,
"grade": grade.get_grade_json()
})
def post(self, request):
user_cursor = connections['postgres_trade'].cursor()
correct_cursor = connections['postgres_trade'].cursor()
task = Task.objects.get(pk=self.request.POST["task"])
grade = Grade.find_or_create(user=self.request.user, task=task)
user_script = request.POST['script']
correct_cursor.execute(task.correct_script)
correct_result = dictfetchall(correct_cursor)
try:
user_cursor.execute(user_script)
user_result = dictfetchall(user_cursor)
grade.user_script = user_script
for keyword in task.key_words.all():
if user_script.find(keyword.word) == -1:
grade.keywords_are_used = False
break
if len(user_result) == len(correct_result):
grade.is_same_count_of_lines = True
if user_result == correct_result:
grade.is_same_output = True
except ProgrammingError as e:
print(e)
grade.is_work = False
grade.keywords_are_used = False
grade.set_final_score()
return JsonResponse({"msg": "OK"})
class FinishTheme(CustomAuthMixin, View):
def test_func(self):
if not self.request.user.is_authenticated:
return False
theme = TaskGroup.objects.get(pk=self.request.POST["task_group"]).theme
return self.request.user in theme.user.all()
def post(self, request):
tasks = TaskGroup.objects.get(pk=self.request.POST["task_group"]).task_set.all()
for task in tasks:
grade = Grade.find_or_create(request.user, task)
if not grade.user_script:
grade.set_not_done()
return JsonResponse({"msg": "OK"})
class GradeTheme(CustomAuthMixin, View):
def test_func(self):
if not self.request.user.is_authenticated:
return False
allow_themes = list(Theme.objects.filter(user=self.request.user).values_list('id', flat=True))
theme_id = self.request.build_absolute_uri().split('/')[-2]
return int(theme_id) in allow_themes
def get(self, request, theme_id):
theme = Theme.objects.get(pk=theme_id)
task_group = TaskGroup.objects.filter(theme=theme).first()
grades = Grade.theme_is_passed(theme, request.user)
if grades:
return render(request, "theme_passed.html", context={
"tasks": [
{
"id": task.id,
"description": task.description,
"grade": Grade.objects.get(task=task, user=request.user).final_score
} for task in task_group.task_set.all()
],
"current_grade": sum([grade.final_score for grade in grades]),
"max_grade": len(grades),
"complete": sum([grade.final_score for grade in grades]) > len(grades) / 0.6
})
return render(request, "theme.html", context={
"description": theme.description,
"time": sum([task.time for task in task_group.task_set.all()]),
"start_link": task_group.id,
"subject_title": task_group.subject_area.title,
"subject_image": task_group.subject_area.schema
})
| lekarus/SQLQueries | web_app/app/views.py | views.py | py | 12,185 | python | en | code | 0 | github-code | 36 |
21414967137 | from colour import Color
import cv2 as cv2
import numpy as np
a= input('enter a color=')
b = Color(a)
c = b.hsl
d = tuple(255*x for x in c)
print(d)
print(list(d))
img = cv2.imread('color.png')
hsl1 = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
green = np.uint8([[list(d)]])
hsv_green = cv2.cvtColor(green,cv2.COLOR_BGR2HSV)
print (hsv_green)
f = hsv_green[0][0][0]
if f>10:
lower = np.array([f-10,50,50], np.uint8)
else:
lower = np.array([f,100,100, np.uint8])
upper = np.array([f+10,255,255], np.uint8)
colors = cv2.inRange(hsl1, lower,upper)
res = cv2.bitwise_and(img, img, mask = colors)
cv2.imshow('original', img)
cv2.imshow(a, res)
cv2.waitKey(0)
| DESK-webdev/team_webdev | img_pros/star_4.py | star_4.py | py | 657 | python | en | code | 0 | github-code | 36 |
12982777466 | import h5py
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.ndimage import gaussian_filter1d
from scipy.ndimage.morphology import distance_transform_edt
from scipy.stats import linregress
from skimage.future import graph
from skimage.measure import regionprops
from sklearn.linear_model import LinearRegression
def get_myo_offset(idx, tf, n=70):
no_cell_mask = segmentation[tf] != idx
dist_tr = distance_transform_edt(no_cell_mask)
dist_tr_around = dist_tr * (dist_tr <= n) * no_cell_mask
mask_around = dist_tr_around > 0
myo_around = myosin[tf] * mask_around
weighed_myo = myosin[tf] * dist_tr_around
return np.sum(weighed_myo) / np.sum(myo_around)
def get_myo_around(idx, tf, n=10, exclude=None, cut=None):
no_cell_mask = segmentation[tf] != idx
dist_tr = distance_transform_edt(no_cell_mask)
mask_around = (dist_tr <= n) * no_cell_mask
if exclude is not None:
assert cut is not None
myo_around = cut_doughnut(mask_around, np.invert(no_cell_mask), cut, exclude)
myo_around = myosin[tf] * mask_around
return np.sum(myo_around) / (np.sum(mask_around) * 0.0148)
def show_myo(idx, tf, n=70):
no_cell_mask = segmentation[tf] != idx
cell_mask = segmentation[tf] == idx
dist_tr = distance_transform_edt(no_cell_mask)
cell_countour = (dist_tr <= 2) * no_cell_mask
myo_countour = (dist_tr < n+1) * (dist_tr > n-1)
mask_around = (dist_tr <= n) * no_cell_mask
myo_around = myosin[tf] * mask_around
myo_in = myosin[tf] * cell_mask
viewer = napari.Viewer()
viewer.add_image(cell_countour + myo_countour, blending='additive')
viewer.add_image(myo_around + myo_in, blending='additive')
def cut_doughnut(myo_mask, cell_mask, line='h', excl='in'):
x_min, y_min, x_max, y_max = regionprops(cell_mask.astype(int))[0]['bbox']
if line == 'h' and excl == 'in':
myo_mask[x_min:x_max] = 0
if line == 'h' and excl == 'out':
myo_mask[:x_min] = 0
myo_mask[x_max:] = 0
if line == 'v' and excl == 'in':
myo_mask[:, y_min:y_max] = 0
if line == 'v' and excl == 'out':
myo_mask[:, :y_min] = 0
myo_mask[:, y_max:] = 0
return myo_mask
def get_myo_in(idx, tf):
cell_mask = segmentation[tf] == idx
myo_in = myosin[tf] * cell_mask
return np.sum(myo_in) / (np.sum(cell_mask) * 0.0148)
def get_area(idx, tf):
return np.sum(segmentation[tf] == idx)
def smooth(values, sigma=3, tolerance=0.1):
values = np.array(values)
# check if any value is suspicious (definitely a merge)
for i in range(1, len(values) - 1):
avg_neigh = (values[i - 1] + values[i + 1]) / 2
if not (1 + tolerance) > (values[i] / avg_neigh) > (1 - tolerance):
#replace this value with neighbors' average
values[i] = avg_neigh
values = gaussian_filter1d(values, sigma=sigma)
return values[1:-1]
def get_size_and_myo_dict(myo_s=3, area_s=3):
all_myo_conc = {}
all_sizes = {}
all_offsets = {}
idx2row = {}
for idx in np.unique(segmentation):
if idx == 0: continue
tps = [tp for tp, segm_tp in enumerate(segmentation) if (idx in segm_tp)]
if len(tps) < 5: continue
myo = [get_myo_in(idx, tp) for tp in tps]
myo = smooth(myo, sigma=myo_s, tolerance=1)
offset = [get_myo_offset(idx, tp) for tp in tps]
offset = smooth(offset, sigma=area_s, tolerance=1)
area = [get_area(idx, tp) for tp in tps]
area = smooth(area, sigma=area_s, tolerance=0.1)
all_myo_conc[idx] = {t: m for t, m in zip(tps[1:-1], myo)}
all_sizes[idx] = {t: s for t, s in zip(tps[1:-1], area)}
all_offsets[idx] = {t: o for t, o in zip(tps[1:-1], offset)}
return all_myo_conc, all_sizes, all_offsets
def get_myo_time_points(myo_conc, sizes, offs, ex=None, plane=None):
points_list = []
for idx in myo_conc.keys():
tps = myo_conc[idx].keys()
for tp in range(min(tps), max(tps) - 1):
if tp not in tps or tp+1 not in tps: continue
size_change = sizes[idx][tp + 1] / sizes[idx][tp]
cell_myo = myo_conc[idx][tp]
nbr_myo = get_myo_around(idx, tp, 70, ex, plane)
offset = offs[idx][tp]
points_list.append([size_change, cell_myo, nbr_myo, offset, idx, tp])
return np.array(points_list)
def train_regr(data):
np.random.shuffle(data)
half = int(len(data) / 2)
data, labels = data[:, 1:3], data[:, 0]
linear_regr = LinearRegression(normalize=True)
linear_regr.fit(data[:half], labels[:half])
score = linear_regr.score(data[half:], labels[half:])
return score
def get_best_regr(data, n=100):
accuracies = [train_regr(data) for i in range(n)]
print("Max accuracy is", np.max(accuracies))
print("Mean accuracy is", np.mean(accuracies))
data_h5 = '/home/zinchenk/work/drosophila_emryo_cells/data/img5_new.h5'
with h5py.File(data_h5, 'r') as f:
myosin = f['myosin'][3:-3]
segmentation = f['segmentation'][3:-3]
myo, area, offsets = get_size_and_myo_dict(myo_s=3, area_s=3)
to_plot = get_myo_time_points(myo, area, offsets)
get_best_regr(to_plot, 400)
fp = '/home/zinchenk/work/drosophila_emryo_cells/imgs/revision_svg/'
## the loglog plot
fig, ax = plt.subplots()
plt.scatter(to_plot[:, 1], to_plot[:, 2], c=to_plot[:, 0], cmap='RdYlBu', vmin=0.9, vmax=1.1, s=20)
ax.vlines([80000, 100000], 24000, 220000, linestyles='dotted')
ax.hlines([24000, 220000], 80000, 100000, linestyles='dotted')
plt.xlabel("[cellular myosin]", size=35)
plt.ylabel("[surrounding myosin]", size=35)
#plt.title('Embryo 5', size=35)
ax.tick_params(length=15, width=3)
ax.tick_params(length=8, width=1, which='minor')
plt.xticks(fontsize=35)
plt.yticks(fontsize=35)
plt.loglog()
cb = plt.colorbar()
for t in cb.ax.get_yticklabels():
t.set_fontsize(35)
figure = plt.gcf()
figure.set_size_inches(16, 12)
plt.savefig(fp + 'fig3j.svg', format='svg')
# the zoom in plot colored by size
plot_cutout = to_plot[(80000 < to_plot[:, 1]) & (to_plot[:, 1] < 100000)]
slope, intercept, rvalue, _, _ = linregress(plot_cutout[:, 0], plot_cutout[:, 2])
y = intercept + slope * plot_cutout[:, 0]
fig, ax = plt.subplots()
ax.plot(plot_cutout[:, 0], y, 'red', label='linear fit')
ax.scatter(plot_cutout[:, 0], plot_cutout[:, 2], s=200, c='tab:grey')
plt.xlabel("Relative size change", size=35)
plt.ylabel("Surrounding myosin", size=35)
plt.text(1.03, 40000, "Correlation={:.4f}".format(rvalue), size=35)
plt.legend(loc='upper left', fontsize=35)
ax.tick_params(length=15, width=3)
plt.xticks(fontsize=35)
plt.yticks(fontsize=35)
figure = plt.gcf()
figure.set_size_inches(16, 16)
plt.savefig(fp + 'fig3k.svg', format='svg')
# the ratio vs size change plot
exp = to_plot[np.where(to_plot[:, 0] > 1.015)]
constr = to_plot[np.where(to_plot[:, 0] < 0.985)]
middle = to_plot[np.where((to_plot[:, 0] >= 0.985) & (to_plot[:, 0] <= 1.015))]
fig, ax = plt.subplots()
ax.scatter(exp[:, 1] / exp[:, 2], exp[:, 0], c='tab:blue')
ax.scatter(constr[:, 1] / constr[:, 2], constr[:, 0], c='tab:red')
ax.scatter(middle[:, 1] / middle[:, 2], middle[:, 0], c='y')
ax.hlines(1, 0.4, 4.9, color='black')
ax.vlines(1, 0.83, 1.10, color='black')
[tick.label.set_fontsize(25) for tick in ax.xaxis.get_major_ticks()]
[tick.label.set_fontsize(25) for tick in ax.yaxis.get_major_ticks()]
plt.xlabel("cellular/neighbourhood myosin ratio", size=35)
plt.ylabel("relative size change", size=35)
#plt.title('Embryo 5', size=35)
#plt.legend(loc='lower right', fontsize=25)
plt.show()
sm_range = np.arange(0.25, 5.25, 0.125)
fig, ax = plt.subplots()
plt.hist(exp[:, 1] / exp[:, 2], bins=sm_range, density=True, histtype='bar', label='Expanding', color='tab:blue', alpha=0.6)
plt.hist(constr[:, 1] / constr[:, 2], bins=sm_range, density=True, histtype='bar', label='Constricting', color='tab:red', alpha=0.6)
plt.ylabel('probability density', size=35)
plt.xlabel('cellular/neighbourhood myosin ratio', size=35)
plt.legend(loc='upper right', fontsize=25)
[tick.label.set_fontsize(25) for tick in ax.xaxis.get_major_ticks()]
[tick.label.set_fontsize(25) for tick in ax.yaxis.get_major_ticks()]
#plt.title('Embryo 5', size=35)
plt.show()
# the offset vs myo in
fig, ax = plt.subplots()
plt.scatter(to_plot[:, 1], to_plot[:, 3] * 0.1217, c=to_plot[:, 0], cmap='RdYlBu', vmin=0.9, vmax=1.1, s=20)
plt.xscale('log')
plt.xlabel("[cellular myosin]", size=35)
plt.ylabel("Myosin offset in the neighbourhood", size=35)
cb = plt.colorbar()
ax.tick_params(length=15, width=3)
ax.tick_params(length=8, width=1, which='minor')
plt.xticks(fontsize=35)
plt.yticks(fontsize=35)
for t in cb.ax.get_yticklabels():
t.set_fontsize(35)
figure = plt.gcf()
figure.set_size_inches(16, 12)
plt.savefig(fp + 'fig3i.svg', format='svg')
plt.show()
| kreshuklab/drosophila_embryo_cells | scripts/predict_fate.py | predict_fate.py | py | 8,815 | python | en | code | 0 | github-code | 36 |
25715720301 | from functools import partial
from typing import Dict, Callable
from squirrel.driver.msgpack import MessagepackDriver
from squirrel.serialization import MessagepackSerializer
from squirrel.store import SquirrelStore
from squirrel.iterstream import IterableSource, Composable
import numpy as np
N_SAMPLES = 2500
MAX_VALUE = 10.0
SPLIT_25 = int(N_SAMPLES * 0.25)
SPLIT_50 = int(N_SAMPLES * 0.5)
SPLIT_80 = int(N_SAMPLES * 0.8)
SPLIT_90 = int(N_SAMPLES * 0.9)
N_SHARD = 100
def update_range_dict(range_dict: Dict, name: str, value: np.array, op: Callable = np.maximum) -> None:
"""Track maximum and minimum values for normalization"""
if name in range_dict:
range_dict[name] = op(value, range_dict[name])
else:
range_dict[name] = value
def unify_range_dicts(range_dict1: Dict, range_dict2: Dict, op: Callable = np.maximum) -> Dict:
"""Unify maximum and minimum values"""
result = {}
for name in range_dict1:
result[name] = op(range_dict1[name], range_dict2[name])
return result
def map_update_ranges(sample: Dict, range_dict: Dict) -> Dict:
"""Iterate samples and update minimums and maximums"""
max_x = np.amax(np.abs(sample["data_x"]), axis=0)
max_y = np.amax(np.abs(sample["data_y"]), axis=0)
update_range_dict(range_dict, "x_range", max_x)
update_range_dict(range_dict, "y_range", max_y)
return sample
def get_range_dict(base_url: str, split: str) -> Dict:
"""Get maximums and minimums for normalization"""
range_dict = {}
it = MessagepackDriver(f"{base_url}/{split}").get_iter()
it.map(partial(map_update_ranges, range_dict=range_dict)).tqdm().join()
return range_dict
def save_shard(it: Composable, store: SquirrelStore) -> None:
"""Save set of shards"""
store.set(value=list(it))
def scale(sample: Dict, range_dict: Dict) -> Dict:
"""Normalize example using the extreme values"""
range_x = np.clip(range_dict["x_range"], a_min=0.000001, a_max=None)
range_y = np.clip(range_dict["y_range"], a_min=0.000001, a_max=None)
return {
"data_x": sample["data_x"] / range_x.reshape(1, -1),
"data_y": sample["data_y"] / range_y.reshape(1, -1),
"edge_index": sample["edge_index"],
}
def filter_max(sample: Dict) -> bool:
"""Filter outliers"""
if sample["data_x"].max() > MAX_VALUE:
return False
if sample["data_y"].max() > MAX_VALUE:
return False
return True
def save_stream(
it: Composable, output_url: str, split: str, range_dict: Dict = None, filter_outliers: bool = True
) -> None:
"""Scale, filter outliers and save composable as shards"""
if it is None:
return
store = SquirrelStore(f"{output_url}/{split}", serializer=MessagepackSerializer())
if range_dict is not None:
it = it.map(partial(scale, range_dict=range_dict))
if filter_outliers:
it = it.filter(filter_max)
it.batched(N_SHARD, drop_last_if_not_full=False).map(partial(save_shard, store=store)).tqdm().join()
def iterate_source_data(fem_generator: str) -> None:
"""Filter data for a single generator and iterate if necessary to create splits"""
mesh_generators = [
"square",
"disk",
"cylinder",
"l_mesh",
"u_mesh",
"square_extra",
"disk_extra",
"cylinder_extra",
"l_mesh_extra",
"u_mesh_extra",
"square_rand",
"disk_rand",
"cylinder_rand",
"l_mesh_rand",
"u_mesh_rand",
]
for mesh_g in mesh_generators:
key = f"{fem_generator}_{mesh_g}"
path = f"gs://squirrel-core-public-data/gnn_bvp_solver/{key}"
iter = MessagepackDriver(path).get_iter()
print("GENERATING:", fem_generator, mesh_g)
if mesh_g.startswith("u_mesh"):
if mesh_g == "u_mesh":
# test set 2
# TRAIN1, VAL1, TRAIN2, VAL2, TEST1, TEST2
yield None, None, None, None, None, iter
else:
# all but U-mesh
if mesh_g.endswith("extra"):
all_data = iter.tqdm().collect()
# test set 1
# TRAIN1, VAL1, TRAIN2, VAL2, TEST1, TEST2
yield None, None, None, None, IterableSource(all_data[:SPLIT_25]), None
elif mesh_g.endswith("rand"):
all_data = iter.tqdm().collect()
# train/val set 2
# TRAIN1, VAL1, TRAIN2, VAL2, TEST1, TEST2
yield None, None, IterableSource(all_data[:SPLIT_80]), IterableSource(all_data[SPLIT_80:]), None, None
else:
all_data = iter.tqdm().collect()
# train/val set 1
# TRAIN1, VAL1, TRAIN2, VAL2, TEST1, TEST2
yield IterableSource(all_data[:SPLIT_80]), IterableSource(all_data[SPLIT_80:]), None, None, None, None
def scale_and_store(in_split: str, out_split: str, range_dict: Dict, base_url_in: str, base_url_out: str) -> None:
"""Normalize one stream and save it"""
it = MessagepackDriver(f"{base_url_in}/{in_split}").get_iter()
save_stream(it, base_url_out, out_split, range_dict)
def main(fem_generator: str, out_url: str) -> None:
"""Generate split for a single generator"""
for append_train1, append_val1, append_train2, append_val2, append_test1, append_test2 in iterate_source_data(
fem_generator
):
print("saving splits")
print("train1")
save_stream(append_train1, out_url, "raw_train1")
print("val1")
save_stream(append_val1, out_url, "raw_val1")
print("train2")
save_stream(append_train2, out_url, "raw_train2")
print("val2")
save_stream(append_val2, out_url, "raw_val2")
print("test1")
save_stream(append_test1, out_url, "raw_test1")
print("test2")
save_stream(append_test2, out_url, "raw_test2")
print("moving on")
def main_scale(in_url: str, out_url: str) -> None:
"""Apply normalization to generated data"""
range_dict1 = get_range_dict(in_url, "raw_train1")
range_dict2 = get_range_dict(in_url, "raw_train2")
range_dict = unify_range_dicts(range_dict1, range_dict2)
print("unnormalized ranges: ", range_dict)
print("scale and store")
print("train")
scale_and_store("raw_train1", "norm_train_no_ma", range_dict, in_url, out_url)
scale_and_store("raw_train2", "norm_train_ma", range_dict, in_url, out_url)
print("val")
scale_and_store("raw_val1", "norm_val_no_ma", range_dict, in_url, out_url)
scale_and_store("raw_val2", "norm_val_ma", range_dict, in_url, out_url)
print("test1")
scale_and_store("raw_test1", "norm_test_sup", range_dict, in_url, out_url)
print("test2")
scale_and_store("raw_test2", "norm_test_shape", range_dict, in_url, out_url)
def process(generator_key: str) -> None:
"""Process data from a single fem generator"""
base_url_gs = f"gs://squirrel-core-public-data/gnn_bvp_solver/{generator_key}"
base_url = f"data/{generator_key}" # store intermediate results locally
main(generator_key, base_url)
main_scale(base_url, base_url_gs)
if __name__ == "__main__":
for label_g in ["ElectricsRandomChargeGenerator", "MagneticsRandomCurrentGenerator", "ElasticityFixedLineGenerator"]:
process(label_g)
| merantix-momentum/gnn-bvp-solver | gnn_bvp_solver/preprocessing/split_and_normalize.py | split_and_normalize.py | py | 7,373 | python | en | code | 12 | github-code | 36 |
43362574911 | from collections import deque
def bfs_shortest_path(adj_matrix, src, dest):
dist = [float('inf')] * n
dist[src] = 0
q = deque()
q.append(src)
while q:
curr = q.popleft()
if curr == dest:
return dist[dest]
for neighbor in range(n):
if adj_matrix[curr][neighbor] and dist[neighbor] == float('inf'):
dist[neighbor] = dist[curr] + 1
q.append(neighbor)
return -1
n = int(input())
matrix = [tuple(map(int, input().split())) for _ in range(n)]
src, dest = map(int, input().split())
print(bfs_shortest_path(matrix, src-1, dest-1)) | slayzerg01/yandex-training-3.0 | 36/task36.py | task36.py | py | 635 | python | en | code | 0 | github-code | 36 |
22356028668 | import pickle
import json
import sys
from sklearn.feature_extraction.text import CountVectorizer
# Loading the saved model
loaded_model = pickle.load(open('C:/Users/abhis/OneDrive/Desktop/UsingSpawn/logreg_model.pkl', 'rb'))
# Loading the CountVectorizer vocabulary
loaded_vec = CountVectorizer(vocabulary=pickle.load(open('C:/Users/abhis/OneDrive/Desktop/UsingSpawn/count_vector.pkl', 'rb')))
loaded_tfidf = pickle.load(open('C:/Users/abhis/OneDrive/Desktop/UsingSpawn/tfidf.pkl', 'rb'))
# Defining the target names
target_names = ["Bank Account services", "Credit card or prepaid card", "Others", "Theft/Dispute Reporting", "Mortgage/Loan"]
def make_prediction(input_data):
# Perform any necessary data preprocessing here
# Input data should be a Python dictionary
# Example preprocessing:
text = input_data['text']
# Convert input_data to a suitable format for prediction
X_new_counts = loaded_vec.transform([text])
X_new_tfidf = loaded_tfidf.transform(X_new_counts)
prediction_index = loaded_model.predict(X_new_tfidf)[0]
prediction_target_names= target_names[prediction_index]
# Format the prediction label as needed
return {'prediction': prediction_target_names}
if __name__ == '__main__':
# Receive input data from the command line
input_data = json.loads(sys.argv[1])
# Make a prediction
prediction = make_prediction(input_data)
# Output the prediction as a JSON string
print(json.dumps(prediction))
| abtyagi15/Automatic-Ticket-Classification | classify.py | classify.py | py | 1,499 | python | en | code | 0 | github-code | 36 |
37012746527 | from collections import deque
from typing import List
class Solution:
@staticmethod
def maxSlidingWindow(nums: List[int], k: int) -> List[int]:
if not nums or len(nums) < k:
raise ValueError()
window = deque()
res = []
for i in range(len(nums)):
while window and (i - k) >= window[0][1]:
window.popleft()
while window and (nums[i] >= window[-1][0]):
window.pop()
window.append((nums[i], i))
if window and i >= k - 1:
res.append(window[0][0])
return res
# Checking in console
if __name__ == '__main__':
Instant = Solution()
Solve = Instant.maxSlidingWindow(nums = [1,3,-1,-3,5,3,6,7], k = 3 )
# nums = [1,3,-1,-3,5,3,6,7], k = 3 -> [3,3,5,5,6,7]
# nums = [1], k = 1 -> [1]
print(Solve)
# # Alternative method:
# class Solution:
# def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:
# if not nums or len(nums) < k:
# raise ValueError()
#
# n = len(nums)
# left, right = [0] * (n + 1), [0] * (n + 1)
# left[-1], right[-1] = float('-inf'), float('-inf')
#
# for i,j in zip(range(0, n), reversed(range(0, n))):
# left[i] = nums[i] if i % k == 0 else max(left[i-1], nums[i])
# right[j] = nums[j] if (j + 1) % k == 0 else max(right[j+1], nums[j])
#
# res = []
# for i in range(n - k + 1):
# res.append(max(left[i + k - 1], right[i]))
#
# return res
| Manu87DS/Solutions-To-Problems | LeetCode/Python Solutions/Sliding Window Maximum/sliding.py | sliding.py | py | 1,559 | python | en | code | null | github-code | 36 |
22292471973 | '''
동빈나 럭키 스트레이트
입력예제 123402 답 LUCKY
7755 답 READY
'''
s=input()
n=len(s)
left=s[:n//2]
right=s[n//2:]
left_sum=sum([int(i) for i in left])
right_sum=sum([int(i) for i in right])
if left_sum==right_sum:
print('LUCKY')
else:
print('READY')
# 답
n=input()
x=len(n)
summary=0
for i in range(x//2):
summary+=int(n[i])
for i in range(x//2,x):
summary-=int(n[i])
if summary==0:
print('LUCKY')
else:
print('READY') | 98hyun/algorithm | implement/b_20.py | b_20.py | py | 472 | python | en | code | 0 | github-code | 36 |
17952993717 | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 14 13:57:29 2019
@author: Witold Klimczyk
# ICEM
foil = Airfoil(filein = r'E:\propeller\mh_airofils\mh117/mh117.txt', t = 0.001, chord = 0.2)
foil.runFluent(15,.2,1)#
# XFOIL
foil2 = Airfoil(ftype = 'XFOIL', filein = r'E:\AIRFOIL\airfoils/naca0012.txt', t = 0.001, chord = 0.2)
# x,y
X = pd.read_csv(f'http://airfoiltools.com/airfoil/seligdatfile?airfoil={foilname}-il')
X.to_csv(r'E:\AIRFOIL\temp.csv', header = False, index = False)
X = np.loadtxt(r'E:\AIRFOIL\temp.csv')
foil = Airfoil( 'XFOIL', r'E:\AIRFOIL\temp.csv')
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
rc('text', usetex=True)
import subprocess
import os
from subprocess import run, PIPE
import gc
import pandas as pd
from urllib.error import HTTPError
from fluentScheme import generateScheme
from icemScheme import generateICEMScheme
class Airfoil():
def __init__(self, ftype = 'ICEM', filein = None, x = None, y = None, T_req = None, camber = None,
chord = None, beta = None, z = 0, fileoutICEM = None, t = 0, dx = 0, dy = 0, split = False, origin = 0,
camb = False, r_LE = None, verbose = False, workingdir = r'E:\AIRFOIL', xfoildir = None):
"""
inputs:
- ftype/name: 'ICEM' / 'XFOIL' / 'XY', specifies type of airofil input data or its name to download from airfoiltools
- filein: '.txt' file with points coords (can be non-txt)
- chord: dimensionless chord
- beta: originally used for propeller pitch, for wing stands for twist
- z: specifies third coordinae used for 3d wing stacking
- fileoutICEM: full path and name for ICEM output file format, no extension (only name)
- TEcut: specifies location of vertical cut
- t: float: te thickness
- T_req: maximum thickness to match required absolute thickness
- origin: float: used to keep particular airfoil poitn in center, e.g. origin = .25 keeps quarter chord in center
- camb: True/False: if we want to scael camber with thickness
- workingdir: specify if other than current
- xfoildir: contains xfoil.exe and uses this directory to save .txt files, if not given assumes it is in folder XFOIL under the same directory as current working folder
attributes:
- x: x-coords
- y: y-coords
- z: z-coords
"""
gc.collect()
self.camber = camber
self.chord = chord
self.z = z
self.filein = filein
self.workingdir = workingdir if workingdir != None else os.getcwd().strip('\\python')
print('workingdir {}'.format(self.workingdir))
self.xfoildir = self.workingdir + '/XFOIL/'
self.filebuffer = self.xfoildir + '/xfoilairfoil.txt'
self.filecp = self.xfoildir + '/xfoilairfoilcp.txt'
self.fileCptex = self.xfoildir + '/xfoilairfoilcptex.txt'
self.camber_t = self.xfoildir + '/camber_t.txt'
self.xfoilpath = self.xfoildir + '/xfoil.exe'
# directories to check before analysis
self.fileFig = self.workingdir + '/saved_plots/airfoil'
self.meshin = self.workingdir + '/mesh/fluent.msh'
self.meshDir = self.workingdir + '/mesh/'
self.fileoutICEM = self.workingdir+'/mesh/foilICEM' if fileoutICEM is None else fileoutICEM
self.fluentdir = self.workingdir + '/fluent/'
self.ftype = ftype
self.verbose = verbose
self.camber = None
self.thickness = None
self.split = False
self.t = t
if ftype == 'ICEM':
self.readICEM()
elif ftype == 'XFOIL':
self.readXFOIL()
self.saveXFOIL()
elif ftype == 'XY':
self.x = x
self.y = y
else:
try:
X = pd.read_csv(f'http://airfoiltools.com/airfoil/seligdatfile?airfoil={ftype}-il')
X.to_csv(r'E:\AIRFOIL\temp.csv', header = False, index = False)
X = np.loadtxt(r'E:\AIRFOIL\temp.csv')
self.x = X[:,0]
self.y = X[:,1]
self.z = self.z
except HTTPError:
print('error reading airofil from web')
return None
# chord scaling
if chord is None:
self.chord = np.max(self.x) - np.min(self.x)
if self.verbose:
print('evaluated chord is {:.2f}'.format(self.chord))
else:
self.chord = np.max(self.x) - np.min(self.x)
self.scale_XFOIL(chord/np.max(self.x))
self.saveXFOIL()
if self.verbose:
print('scaled airfoil to desired chord {:.3f}'.format(self.chord))
self.x1 = None
self.x2 = None
self.y1 = None
self.y2 = None
self.z1 = z
self.z2 = z
# imposing required thickness
if T_req is not None:
self.thicken(T_req, camb)
# cut TE
if t > 0:
self.cutTE_XFOIL(t, r = .5)
if r_LE is not None:
r_LE_current = self.LEradius()[0]
if r_LE > r_LE_current:
print('modifying LE radius')
factor = r_LE / r_LE_current
self.modify_XFOIL(1,1,factor)
print('LE factor = {:.1f}'.format(factor))
# twisting airfoil to required beta
if beta is not None:
self.rotate_XFOIL(beta, origin)
# translating airofil to match required origin
self.translate_XFOIL(dx - origin * self.chord, dy)
# setting split after all airofil modifications
self.split = split
if self.split:
self.splitCurve()
def readICEM(self):
X = np.loadtxt(self.filein, delimiter = '\t', skiprows = 1)
self.x = X[:,0]
self.y = X[:,1]
self.z = self.z
def readXFOIL(self, file = None):
if file is None:
X = np.loadtxt(self.filein, skiprows = 1)
else:
X = np.loadtxt(file, skiprows = 1)
self.x = X[:,0]
self.y = X[:,1]
self.z = self.z
self.filein = self.filebuffer
def saveXFOIL(self):
""" saves airfoil coords to .txt file with specified path
"""
# close trailing edge
# save coords to file
if not self.split:
with open(self.filebuffer, "w") as text_file:
print("airfoil", file = text_file)
for i in range(len(self.x)):
print(" {} {}".format(self.x[i], self.y[i]), file=text_file)
else:
with open(self.filebuffer, "w") as text_file:
print("airfoil", file = text_file)
for i in range(len(self.x2)-1,0,-1):
print(" {} {}".format(self.x2[i], self.y2[i]), file=text_file)
for i in range(len(self.x1)):
print(" {} {}".format(self.x1[i], self.y1[i]), file=text_file)
###
### =================== GEOMETRY SECTION ==========================
###
def cutTE_XFOIL(self, t = .005, r = 0.5):
""" modifies airfoil using xfoil to maintain camber
t: thickness
r: blending radius
"""
self.saveXFOIL()
airfoilIN = self.filebuffer
airfoilOUT = self.filebuffer
command = 'load ' + airfoilIN + '\npane\ngdes\ntgap '+ '{} {}'.format(t,r) + '\n\npane\n\nsave '+airfoilOUT+'\ny\n\nquit\n'
run([self.xfoilpath], stdout=PIPE, input=command, encoding='ascii', shell = False)
if self.verbose:
print('succesfully modified TE using xfoil')
self.readXFOIL(airfoilOUT)
def modify_XFOIL(self, thicken = 1, camber = 1, LE_radius=1):
""" modifies airfoil using xfoil to scale"
thickness and camber distribution
values below 1 decrease, above 1 increase scale (1 is no change)
"""
self.saveXFOIL()
airfoilIN = self.filebuffer
airfoilOUT = self.filebuffer
command = 'load ' + airfoilIN + '\npane\ngdes\ntfac '+ '{} {}'.format(thicken, camber) +'\nlera {} {}'.format(LE_radius, .2)+ '\n\npane\n\nsave '+airfoilOUT+'\ny\n\nquit\n'
p = run([self.xfoilpath], stdout=PIPE, input=command, encoding='ascii', shell = False)
if self.verbose:
print('modified thickness scaled by {}'.format(thicken))
self.readXFOIL(airfoilOUT)
def thicken(self, req_T, camb = False):
""" modifies thickness to required value of maximum thickness
can also modify camber of airfoil
"""
self.findCamberThickness()
factor = req_T/(self.t_max*self.chord)
print(f'{factor}')
if camb==True:
camb = factor
self.modify_XFOIL(thicken = factor, camber = camb)
else:
camb = 1
self.modify_XFOIL(thicken = factor, camber = camb)
if self.verbose:
print('modified thickness to desired value, i.e. {:.3f}, by a factor of {:.2f}'.format(req_T, factor))
def scale_XFOIL(self, factor = 1):
""" scales airfoil using xfoil
"""
print('chord before modification: {:.3f}'.format(self.chord))
self.saveXFOIL()
airfoilIN = self.filebuffer
airfoilOUT = self.filebuffer
command = 'load ' + airfoilIN + '\npane\ngdes\nscal '+ '{}'.format(factor) + '\n\npane\n\nsave '+airfoilOUT+'\ny\n\nquit\n'
p = run([self.xfoilpath], stdout=PIPE, input=command, encoding='ascii', shell = False)
if self.verbose:
print('modified chord by factor {}'.format(factor))
self.readXFOIL(airfoilOUT)
self.chord *= factor
print('chord after modification: {:.3f}'.format(self.chord))
def translate_XFOIL(self, dx = 0, dy = 0):
""" translates airfoil by specified dx and dy
"""
self.saveXFOIL()
airfoilIN = self.filebuffer
airfoilOUT = self.filebuffer
command = 'load ' + airfoilIN + '\npane\ngdes\ntran '+ '{} {}'.format(dx, dy) + '\n\npane\n\nsave '+airfoilOUT+'\ny\n\nquit\n'
p = run([self.xfoilpath], stdout=PIPE, input=command, encoding='ascii', shell = False)
if self.verbose:
print('airfoil translated by {:.3f} in x and {:.3f} in y'.format(dx, dy))
self.readXFOIL(airfoilOUT)
def rotate_XFOIL(self, angle = 0, origin = 0):
""" rotates airfoil using xfoil by specified angle in degrees, around (0,0), positive angle moves TE down
"""
if origin is not 0:
self.translate_XFOIL(dx = -origin*self.chord)
self.saveXFOIL()
airfoilIN = self.filebuffer
airfoilOUT = self.filebuffer
command = 'load ' + airfoilIN + '\npane\ngdes\nadeg '+ '{}'.format(angle) + '\n\npane\n\nsave '+airfoilOUT+'\ny\n\nquit\n'
p = run([self.xfoilpath], stdout=PIPE, input=command, encoding='ascii', shell = False)
if self.verbose:
print('airfoil rotated by {:.2f}'.format(angle))
self.readXFOIL(airfoilOUT)
if origin is not 0:
self.translate_XFOIL(dx = origin*self.chord )
def findCamberThickness(self, plot = False, tex = False, name = ''):
""" finds camber and thickness distributions usign xfoil """
self.saveXFOIL()
airfoilIN = self.filebuffer
airfoilOUT = self.filebuffer
command = 'load ' + airfoilIN + '\npane\ngdes\ntcpl\ncamb\nwrtc\n{}\n\n\nquit\n'.format(self.camber_t)
p = run([self.xfoilpath], stdout=PIPE, input=command, encoding='ascii', shell = False)
if p.returncode ==2:
if self.verbose:
print('found camber and thickness distributions')
X = np.loadtxt(self.camber_t, skiprows = 1)
self.camber = X[:,:2]
self.thickness = X[:,2:]
self.readXFOIL(airfoilOUT)
self.t_max = 2* np.max(self.thickness[:,1])
if plot:
plt.figure(figsize = (6,2),dpi = 200)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.plot(self.camber[:,0], self.camber[:,1], 'k-',linewidth = 1.2, label = 'camber')
plt.plot(self.thickness[:,0], self.thickness[:,1], 'k--',linewidth = 1.2, label = 'thickness')
plt.plot(self.thickness[:,0], -self.thickness[:,1], 'k--',linewidth = 1.2)
plt.xlabel(r'$x/c$')
plt.ylabel(r'$y/c$',fontsize=12)
plt.title(r"{}".format('camber and thickness distributions'), fontsize=12)
#plt.subplots_adjust(top=0.8)
plt.axis('equal')
plt.legend()
plt.tight_layout()
plt.grid('major', linewidth = .2)
plt.savefig(self.fileFig+'ct', dpi = 1000)
plt.show()
if tex:
camberdir = self.workingdir + r'\wing3d\tex-plots\{}camber.txt'.format(name)
thicknessdir = self.workingdir + r'\wing3d\tex-plots\{}thickness.txt'.format(name)
np.savetxt(camberdir, self.camber)
np.savetxt(thicknessdir, self.thickness)
def t_x(self, x=None):
""" finds thickness at specified x-position, x is x/c (i.e. between 0-1)
self.t_x(0.5) returns thickness at x/c = 0.5
if no argument passed, returns max thickness
"""
self.findCamberThickness()
i = 0
if x is None:
return self.t_max
for i in range(len(self.thickness[:,0])):
if self.thickness[i,0] > x :
return 2*self.thickness[i,1]
if self.verbose:
print('invalid argument')
def LEradius(self, plot = False, dpi = 500, saveFig = False):
""" method to find leading edge radius
buids many circles, each from 3 points from leading edge region
lowest radius circle is chosen as le radius
allows to plot le region to investigate le radius
"""
def findCircle(P1, P2, P3):
import sympy as sym
a, b, r2 = sym.symbols('a, b, r2')
e1 = sym.Eq((P1[0]-a)**2+(P1[1]-b)**2, r2**2)
e2 = sym.Eq((P2[0]-a)**2+(P2[1]-b)**2, r2**2)
e3 = sym.Eq((P3[0]-a)**2+(P3[1]-b)**2, r2**2)
solution = sym.solve([e1, e2, e3], (a, b, r2))
r = float(np.abs(solution[0][2]))
x = float(np.abs(solution[0][0]))
y = float(np.abs(solution[0][1]))
return x,y,r
i = np.where(self.x == min(self.x))[0][0]
# find several circles around LE
r = 1
j = 1
k = 1
while j<5:
while k<5:
x_temp,y_temp,r_temp = findCircle( [self.x[i-j], self.y[i-j]], [self.x[i], self.y[i]], [self.x[i+k], self.y[i+k]] )
if r_temp<r:
r = r_temp
x = x_temp
y = y_temp
k+=1
j+=1
if plot:
an = np.linspace(0, 2*np.pi, 100)
plt.figure(dpi = dpi)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.plot(self.x, self.y, 'ko-', linewidth = 1.4)
plt.plot([x],[y],'ro')
plt.plot(r*np.cos(an)+x, r*np.sin(an)+y, 'r-', linewidth = 1.4)
plt.title(r"{}".format('leading edge radius close up'), fontsize=12)
plt.axis('equal')
plt.ylim(-r, r*3.5)
if saveFig:
plt.savefig(self.fileFig, dpi = 1000)
plt.show()
fig, ax = plt.subplots(dpi = 500)
ax.plot(self.x, self.y, 'ko-', linewidth = 1.4)
ax.plot([x],[y],'ro')
ax.plot(r*np.cos(an)+x, r*np.sin(an)+y, 'r-', linewidth = 1.4)
ax.set_xlim(-r, r*3.5)
ax.set_ylim(-r*2, r*2)
ax.set_title('mh117: R=2')
ax.set_aspect(1.0)
ax.grid(which='major', linewidth = 0.2)
plt.show()
return r, x , y
def saveICEM(self, airfoilfile = None):
""" saves points in icem format, either as a single curve of splits to upper and lower (recommended) """
if self.y[1]>self.y[-1]:
self.x = np.flip(self.x, axis = 0)
self.y = np.flip(self.y, axis = 0)
if airfoilfile is not None:
self.fileoutICEM = airfoilfile
if not self.split:
self.zs = np.ones(len(self.x))*self.z
self.fileoutICEM += '.txt'
with open( self.fileoutICEM, 'w') as f:
f.write('{}\t{}\n'.format(len(self.x), 1))
for i in range(len(self.x)):
f.write('{}\t{}\t{}\n'.format(self.x[i]*1000, self.y[i]*1000, self.zs[i]*1000) )
else:
self.z1 = np.ones(len(self.x1))*self.z
self.z2 = np.ones(len(self.x2))*self.z
with open( self.fileoutICEM + '.0.txt', 'w') as f:
f.write('{}\t{}\n'.format(len(self.x1), 1))
for i in range(len(self.x1)):
f.write('{}\t{}\t{}\n'.format(self.x1[i]*1000, self.z1[i]*1000, self.y1[i]*1000) )
with open( self.fileoutICEM + '.1.txt', 'w') as f:
f.write('{}\t{}\n'.format(len(self.x2), 1))
for i in range(len(self.x2)):
f.write('{}\t{}\t{}\n'.format(self.x2[i]*1000, self.z2[i]*1000, self.y2[i]*1000) )
def saveSW(self, airfoilfile):
""" saves points in sw format, either as a single curve of splits to upper and lower (recommended) """
if not self.split:
self.zs = np.ones(len(self.x))*self.z
airfoilfile += '.txt'
with open( airfoilfile, 'w') as f:
for i in range(len(self.x)):
f.write('{}\t{}\t{}\n'.format(self.x[i]*1000, self.zs[i]*1000, self.y[i]*1000) )
else:
self.z1 = np.ones(len(self.x1))*self.z
self.z2 = np.ones(len(self.x2))*self.z
with open( airfoilfile + '.0.txt', 'w') as f:
for i in range(len(self.x1)):
f.write('{}\t{}\t{}\n'.format(self.x1[i]*1000, self.z1[i]*1000, self.y1[i]*1000) )
with open( airfoilfile + '.1.txt', 'w') as f:
for i in range(len(self.x2)):
f.write('{}\t{}\t{}\n'.format(self.x2[i]*1000, self.z2[i]*1000, self.y2[i]*1000) )
###
### =================== ANALYSIS SECTION ==========================
###
def runXFOIL(self, cl=.2, alfa = None, re=1e6, m =.2, n_crit = 6, iters = 500, cp = False):
self.saveXFOIL()
airfoilIN = self.filebuffer
if alfa is None:
S = cl
s = 'cl'
if self.verbose:
print('running XFOIL for: cl={}'.format(cl))
else:
S = alfa
s = 'a'
if self.verbose:
print('running XFOIL for: aoa={}'.format(alfa))
if not cp:
commands = 'load ' + airfoilIN + '\npane\noper\nvpar\nn {}\n\nvisc {}'.format(n_crit, re) + '\niter '+str(iters)+'\n{} {}'.format(s, S) + '\n\nquit\n'
p = run([self.xfoilpath], stdout=PIPE,
input=commands, encoding='ascii')
else:
commands = 'load ' + airfoilIN + '\npane\noper\nvpar\nn {}\n\nvisc {}'.format(n_crit, re) + '\niter '+str(iters)+'\n{} {} '.format(s, S) + '\ncpwr\n{}\n\nquit\n'.format(self.filecp)
p = run([self.xfoilpath], stdout=PIPE,
input=commands, encoding='ascii')
return 0
try:
alfa = float(p.stdout[-130:-118])
Cl = float(p.stdout[-112:-106])
Cd = float(p.stdout[-78:-69])
Cm = float(p.stdout[-94:-86])
print(alfa,Cl,Cd,Cm)
except ValueError:
if self.verbose:
print('error running xfoil, try slighlty different cl/alpha') # the reason is xfoil may not converge for this particular condition but in general it converges
if alfa is None:
alfa, Cd, Cm, Cl = self.runXFOIL(cl = 1.01*cl, re = re, m = m, n_crit = n_crit, iters = iters)
else:
alfa, Cd, Cm, Cl = self.runXFOIL(alfa = .01+alfa, re = re, m = m, n_crit = n_crit, iters = iters)
#return 1, 1, 1, 1
return alfa, Cd, Cm, Cl
def runPolar(self, a0=-4, a1=8, re=1e6, m=.2, n_crit = 6, plot = False):
alfas = np.zeros(a1-a0)
cds = np.zeros(a1-a0)
cls = np.zeros(a1-a0)
cms = np.zeros(a1-a0)
i=0
for aoa in np.arange(a0,a1,1):
alfas[i], cds[i], cms[i], cls[i] = self.runXFOIL(alfa = aoa, re= re, m = m, n_crit = n_crit)
i+=1
alfas = np.delete(alfas, np.where(cds == 1))
print (alfas)
if plot:
plt.figure()
plt.plot(alfas, cls, 'o-')
plt.xlabel(r'$ \alpha [^\circ]$')
plt.ylabel(r'$C_L$')
plt.show()
plt.figure()
plt.plot(alfas, cds, 'o-')
plt.xlabel(r'$ \alpha[^\circ]$')
plt.ylabel(r'$C_D$')
plt.show()
return alfas, cds, cms, cls
def plotCp(self, outputtex = False, dpi = 200, name = None, saveFig = False, airfoil= True, alfa = None):
X = np.loadtxt(self.filecp, skiprows = 3)
x = X[:,0]
cp = X[:,2]
if outputtex:
np.savetxt(self.fileCptex, X)
if name is None:
if alfa is not None:
name = '$C_p$ distribution at $\alpha = {}$'.format(alfa)
else:
name = '$C_p$ distribution'
plt.figure(figsize = (6,4),dpi = dpi)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.plot(x, -cp, 'k-',linewidth = 1)
if airfoil:
plt.plot(self.x/self.chord, self.y/self.chord*3-np.max(cp), 'k-',linewidth = 1)
plt.xlabel(r'$x/c$',fontsize=12)
plt.ylabel(r'$-C_p$',fontsize=12)
plt.title(r"{}".format(name), fontsize=12)
plt.subplots_adjust(top=0.8)
# plt.axis('equal')
plt.grid(which='major', linewidth = 0.2)
plt.tight_layout()
# plt.grid(True)
if saveFig:
plt.savefig(self.fileFig, dpi = 1000)
plt.show()
def runFluent(self, alfa, mach, chord,
rho = 1.225, T = 300, viscosity = 1.78e-5,
name = 'airfoil', path = None, ID = 0,
mesh = 'o', y1 = 0.01, n_r = 120, n_le = 30, n_top = 120,
model = 'kw-sst', intermittency = False, lowre = False, polar = False,
onlymesh = False, onlyfluent = False, mshin = None, meshunits = 'mm',
tt = 1, farfieldnames = ['farfield'], outletnames = [], interiornames = ['int_fluid']
):
"""
chord used to scale mesh in fluent and use for coefficients
if using auto o-mesh, generate airfoil with unit chord and scale mesh to required value
static method: can be applied for given mesh, without airfoil initialization
"""
if path is None:
path = self.workingdir + r'\fluent'
import time
start = time.time()
# begin with structured mesh generation
#
import subprocess
def subprocess_cmd(command):
process = subprocess.Popen(command,stdout=subprocess.PIPE, shell=True)
proc_stdout = process.communicate()[0].strip()
# print(proc_stdout)
return proc_stdout
if not onlyfluent:
self.saveICEM(self.fileoutICEM)
ICEMrun ='"C:\\Program Files\\ANSYS Inc\\v194\\icemcfd\\win64_amd\\bin\\icemcfd" -script'
# pick mesh replay file to generate mesh
if mesh == 'o':
meshrpl = self.meshDir + 'omesh.rpl'
# ICEMscr = r'"E:\propeller\python\wing3d\rpl42.rpl"'
# ICEMscr = r'"E:\propeller\python\wing3d\omesh\omesh.rpl"'
ICEMscr = f'"{meshrpl}"'
elif mesh == 'unstructured':
ICEMscr = r'"C:\Users\wk5521\Documents\ICEM\airfoil replays\mesh_output.rpl"'
generateICEMScheme( y1 = y1, n_r = n_r, n_le = n_le, n_top = n_top, file = meshrpl)
ICEM = ICEMrun + ' ' + ICEMscr
subprocess_cmd(ICEM)
# now having the mesh, run shceme generation, hence fluent
if onlymesh:
print('finished mesh')
return 0
fluentjournal = self.workingdir + '/fluent/journal.txt'
casename = f'foil,{model},{alfa},{mach},{chord},{self.t}'
if polar:
casename = f'foil,{model},{mach},{chord},{self.t}'
if lowre:
casename+=',lowre'
if intermittency:
casename+= 'inter'
meshin = mshin if mshin is not None else self.meshin
generateScheme(filename = fluentjournal,
casename = casename,
chord = chord,
viscosity = viscosity,
T=T,
alfa = alfa,
mach = mach,
meshin = meshin,
meshunits = meshunits,
farfieldnames = farfieldnames,
outletnames = outletnames,
interiornames = interiornames,
path = self.fluentdir,
model = model,
intermittency = intermittency,
lowre = lowre,
polar = polar,
tt =tt
)
FLUENTrun = '"C:\\Program Files\\ANSYS Inc\\v194\\fluent\\ntbin\\win64\\fluent.exe" 2d -t8 -wait -i'
FLUENT = FLUENTrun + ' '+ '"{}"'.format(fluentjournal)
subprocess_cmd(FLUENT)
end = time.time()
showresult = False
if showresult:
result = np.loadtxt('{}/reports/{}.out'.format(self.fluentdir, casename), skiprows = 100)
result = result[-10:]
result = np.mean(result, axis = 0)
lift = result[1]
drag = result[2]
moment = result[3]
duration = end - start
print('mesh size: {}, lift: {:.4f}, drag: {:.6f}, duration: {}'.format(2*(n_le+n_top)*n_r , lift , drag , duration))
return 2*(n_le+n_top)*n_r , lift , drag
def splitCurve(self):
""" splits curve into two curves at leading edge by front-most point """
i_min = np.where(self.x == np.amin(self.x))[0][0]
self.split = True
self.x1 = self.x[:i_min+1]
self.y1 = self.y[:i_min+1]
self.x2 = self.x[i_min:]
self.y2 = self.y[i_min:]
self.z1 = np.ones(len(self.x1))*self.z
self.z2 = np.ones(len(self.x2))*self.z
def qpropData(self, m, re, n = 12, n_crit = 5):
""" this method finds coefficients required to define qprop input file
returns (cl0, clalfa, cd0, clcd0, cd2u, cd2l)
"""
# collect some data for range of angles of attack
alfas = np.zeros(n)
cds = np.zeros(n)
cms = np.zeros(n)
cls = np.zeros(n)
j = 0
for i in range(-6, 6, 1):
self.cutTE_XFOIL(t = 0.005, r = .3)
alfas[j], cds[j], cms[j], cls[j] = self.runXFOIL(alfa = i, re = re, m = m, iters = 1000, n_crit = n_crit)
j+=1
cl0 = cls[6]
clalfa = (cls[-1] - cls[4]) / np.radians(7)
# now begin drag section
from scipy.optimize import minimize, fmin
cd0 = cds.min()
for index in range(len(cds)):
if cds[index] == cd0:
break
clcd0 = cls[index]
def merit(x):
# args = (cd0, )
merit = np.abs( cds[index + 1] + cds[index + 2] - (cd0 + x * (cls[index + 1] - clcd0 )**2 + cd0 + x * ( cls[index + 2] - clcd0 )**2 ) )
return merit
result = fmin(merit, .1)
cd2u = result[0]
def merit2(x, *args):
return np.abs( cds[args[0] - 1] + cds[args[0] - 2] - (cd0 + x * (cls[args[0] - 1] - clcd0 )**2 + cd0 + x * ( cls[args[0] - 2] - clcd0 )**2 ) )
result2 = minimize(merit2, .05, args = (index))
cd2l = result2.x[0]
print('cl0, clalfa, cd0, clcd0 = {:.3f} {:.3f} {:.3f} {:.3f}'.format( cl0, clalfa, cd0, clcd0))
return cl0, clalfa, cd0, clcd0, cd2u, cd2l
def plotAirfoil(self, name=None, saveFig = False, dpi = 200, tex = False , nametex = ''):
if name is None:
name = 'airfoil'
plt.figure(figsize = (6,2),dpi = dpi)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
if self.split:
plt.plot(self.x1/self.chord, self.y1/self.chord, 'k-',linewidth = 1.2)
plt.plot(self.x2/self.chord, self.y2/self.chord, 'k-',linewidth = 1.2)
else:
plt.plot(self.x/self.chord, self.y/self.chord, 'k-',linewidth = 1.2)
plt.xlabel(r'$x/c$',fontsize=12)
plt.ylabel(r'$y/c$',fontsize=12)
plt.title(r"{}".format(name), fontsize=12)
plt.subplots_adjust(top=0.8)
plt.axis('equal')
plt.grid(which='major', linewidth = 0.2)
plt.tight_layout()
if saveFig:
plt.savefig(self.fileFig, dpi = 1000)
plt.show()
if tex:
X = np.append((self.x/self.chord).reshape(-1,1), (self.y/self.chord).reshape(-1,1), axis = 1 )
savedir = self.workingdir + r'\wing3d\tex-plots\{}airfoil.txt'.format(nametex)
np.savetxt(savedir, X)
| Witekklim/propellerDesign | airfoil.py | airfoil.py | py | 30,685 | python | en | code | 1 | github-code | 36 |
41068970621 | import matplotlib.pyplot as plt
import random
import matplotlib
from matplotlib import font_manager
import numpy as np
# 设置图片大小及像素
plt.figure(figsize=(20, 8), dpi=80)
# 设置中文
my_font = font_manager.FontProperties(
fname='/System/Library/Fonts/Hiragino Sans GB.ttc')
# 生成数据
x = range(0, 120)
random.seed(10) # 生成随机种子,不同时候得到的随机结果都一样
y = [random.randint(20, 35) for i in range(120)]
# 画图
plt.plot(x, y)
# 设置坐标轴刻度
_xticks_lables = ['10点{}分'.format(i) for i in x if i<60 ]
_xticks_lables += ['11点{}分'.format(i-60) for i in x if i>=60]
# 取步长和数字和字符串一一对应,数据的长度一样.rotation:旋转度数
plt.xticks(x[::3], _xticks_lables[::3],
rotation=45, fontproperties=my_font)
# 添加坐标轴描述信息
plt.xlabel('时间', fontproperties=my_font)
plt.ylabel('温度 单位(℃)', fontproperties=my_font)
plt.title('10点到11点每分钟的气温变化情况', fontproperties=my_font)
# 展示
plt.show() | XiongZhouR/python-of-learning | matplotlib/plot_1.py | plot_1.py | py | 1,047 | python | zh | code | 1 | github-code | 36 |
72221051305 | '''
Problem :- Vaccine Production
Platform :- Codechef
Link :- https://www.codechef.com/DEC20B/problems/VACCINE1
Problem statement :- Increasing COVID cases have created panic amongst the people of Chefland,
so the government is starting to push for production of a vaccine.
It has to report to the media about the exact date when vaccines will be available.
There are two companies which are producing vaccines for COVID.
Company A starts producing vaccines on day D1 and it can produce V1 vaccines per day.
Company B starts producing vaccines on day D2 and it can produce V2 vaccines per day.
Currently, we are on day 1. We need a total of P vaccines. How many days are required to produce enough vaccines?
Formally, find the smallest integer d such that we have enough vaccines at the end of the day d.
Example-1:
Input : 1 2 1 3 14
Output : 3
Example-2:
Input : 5 4 2 10 100
Output : 9
'''
d1, v1, d2, v2, p = list(map(int, input().split(' ')))
sum = 0
days = 0
for i in range(1, max(d1, d2)+1):
if sum >= p:
break
if i >= d1:
sum += v1
if i >= d2:
sum += v2
days += 1
while sum < p:
days += 1
sum += (v1 + v2)
print(days) | ELLIPSIS009/100-Days-Coding-Challenge | Day_4/Vaccine Production/vaccine.py | vaccine.py | py | 1,216 | python | en | code | 0 | github-code | 36 |
10993947420 | import sys, os, argparse, yaml
from datasets.config.config import data_analysis_parameters
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cbook as cbook
def analysis_kitti(args):
# Load the data
flow_volume = []
masks = []
height, width = args.height , args.width
for flow_path in os.listdir(args.flow_path_occ):
flow = cv.resize(cv.imread(os.path.join(args.flow_path_occ, flow_path), cv.IMREAD_ANYCOLOR | cv.IMREAD_ANYDEPTH), (width, height))
u = flow[:,:,2]/64.0 - 512
v = flow[:,:,1]/64.0 - 512
mask = flow[:,:,0]
mag = np.sqrt(u**2 + v**2)
flow_volume.append(mag)
masks.append(mask + 0.00000001)
flow_volume = np.array(flow_volume)
mean = np.mean(flow_volume,axis=0)
# standard_deviation = np.std(flow_volume,axis=0)
masks = np.array(masks)
mean = np.average(flow_volume,weights = masks,axis=0)
# add plot and colorbar
fig, ax = plt.subplots(1,1)
mean_flow_plot = ax.imshow(mean,cmap='Blues',norm=colors.LogNorm(vmin=mean.min()+ 0.01, vmax=mean.max()+0.0000001))
fig.colorbar(mean_flow_plot, ax=ax)
# ax[1].imshow(standard_deviation,cmap='rainbow')
plt.show()
def analysis_vkitti(args):
flow_volume = []
masks = []
height, width = args.height , args.width
for flow_path in os.listdir(args.flow_path_occ):
flow = cv.resize(cv.imread(os.path.join(args.flow_path_occ, flow_path), cv.IMREAD_ANYCOLOR | cv.IMREAD_ANYDEPTH), (width, height))
u = flow[:,:,2]
v = flow[:,:,1]
u = 2*(u/(2**16)) #- 0.5
v = 2*(v/(2**16)) #- 0.5
# u = u*(width - 1)
# v = v*(height - 1)
mask = flow[:,:,0]
print(min(u.flatten()),max(u.flatten()))
mag = np.sqrt(u**2 + v**2)
flow_volume.append(mag)
masks.append(mask)
flow_volume = np.array(flow_volume)
masks = np.array(masks)
mean = np.average(flow_volume,weights = masks,axis=0)
# standard_deviation = np.std(flow_volume,axis=0)
# add plot and colorbar
fig, ax = plt.subplots(1,1)
mean_flow_plot = ax.imshow(mean,cmap='Blues',norm=colors.LogNorm(vmin=mean.min() + 0.01, vmax=mean.max()))
fig.colorbar(mean_flow_plot, ax=ax)
# ax[1].imshow(standard_deviation,cmap='rainbow')
plt.show()
# Load dataset and the parameters to analyse from the config file
parser = argparse.ArgumentParser()
parser.add_argument('-config', help="configuration file *.yml", type=str, required=False, default='data_analysis/config/vkitti.yml')
parser.add_argument('-dataset', help="dataset", type=str, required=False, default="vkitti")
analysis_args = parser.parse_args()
# Load the configuration file arguments
args = data_analysis_parameters(analysis_args.dataset, analysis_args.config)
analysis_vkitti(args)
| sushlokshah/new_approach | general_file/analysis.py | analysis.py | py | 2,885 | python | en | code | 0 | github-code | 36 |
11352712717 | import json
import requests
import random
def filmes_assistidos_json():
with open('../DadosJSON/filmesAssistidos.json', 'r') as json_file:
dados = json.load(json_file)
return dados
def preferencias_json():
with open('../DadosJSON/preferencias.json', 'r') as json_file:
dados = json.load(json_file)
return dados
def filmes_generos_json():
with open('../DadosJSON/filmesGenero.json', 'r') as json_file:
dados = json.load(json_file)
return dados
def links_imdb_json():
with open('../DadosJSON/linksImdb.json', 'r') as json_file:
dados = json.load(json_file)
return dados
def verificados_json():
with open('../DadosJSON/verificados.json', 'r') as json_file:
dados = json.load(json_file)
return dados
def recomendados_json():
with open('../DadosJSON/recomendacao.json', 'r') as json_file:
dados = json.load(json_file)
return dados
def salva_verificados(imdbid):
verificados.append(imdbid)
with open('../DadosJSON/verificados.json', 'w') as f:
json.dump(verificados, f)
filmesAssistidos_users = filmes_assistidos_json()
preferencias_users = preferencias_json()
filmesPorGenero = filmes_generos_json()
linksImdb = links_imdb_json()
verificados = verificados_json()
def request(imdbid, s):
url = requests.get(f"https://api.themoviedb.org/3/find/{imdbid}?api_key=254c6407feb51fd7f478ec3e6b1abc23"
"&language=en-US&external_source=imdb_id")
data = url.json()
try:
data = data['movie_results'][0]
except IndexError:
data = data['tv_results'][0]
finally:
return data[s]
def retorna_imdbid(movieid):
for i in range(len(linksImdb)):
if linksImdb[i]['movieId'] == movieid:
return linksImdb[i]['imdbId']
def salva_recomendacoes(lista, userid, rod):
if rod == 1:
recomendacao_users.append([])
recomendacao_users[userid].extend(lista)
else:
recomendacao_users[userid].extend(lista)
with open('../DadosJSON/recomendacao.json', 'w') as f:
json.dump(recomendacao_users, f)
recomendacao_users = recomendados_json()
def recomendacao(userid, rod):
recomendados = []
if rod == 1:
count = 3
genero = preferencias_users[userid]['topGeneros'][0]
elif rod == 2:
count = 3
genero = preferencias_users[userid]['topGeneros'][1]
elif rod == 3:
count = 2
genero = random.choice(preferencias_users[userid]['topGeneros'])
else:
count = 1
genero = random.choice(preferencias_users[userid]['outros'])
assistidos = filmesAssistidos_users[userid]['filmes']
while count > 0:
movieid = random.choice(filmesPorGenero[0][genero])
imdbid = retorna_imdbid(movieid)
pop = request(imdbid, "popularity")
if imdbid not in verificados and pop < 30.000:
salva_verificados(imdbid)
continue
elif imdbid in verificados:
continue
if movieid not in assistidos and movieid not in recomendados:
if rod > 1:
if movieid not in recomendacao_users[userid]:
recomendados.append(imdbid)
print('..')
count -= 1
else:
print('..')
recomendados.append(imdbid)
count -= 1
salva_recomendacoes(recomendados, userid, rod)
def main():
try:
for userid in range(len(recomendacao_users), 10):
recomendacao(userid, 1)
print('--')
recomendacao(userid, 2)
print('---')
recomendacao(userid, 3)
print('-----')
recomendacao(userid, 4)
print('-------')
recomendacao(userid, 4)
print('||||||||||')
except:
print('erro')
if len(recomendacao_users[len(recomendacao_users) - 1]) < 10:
recomendacao_users.pop()
with open('../DadosJSON/recomendacao.json', 'w') as f:
json.dump(recomendacao_users, f)
finally:
main()
main()
| CassioFig/Sistema-Recomendacao | backend/recomendacao.py | recomendacao.py | py | 4,167 | python | pt | code | 1 | github-code | 36 |
72835791463 | import sqlite3
import click
from flask import current_app, g
from flask.cli import with_appcontext
def get_db():
if 'db' not in g:
g.db = sqlite3.connect(
current_app.config['DATABASE'],
detect_types=sqlite3.PARSE_DECLTYPES
)
g.db.row_factory = sqlite3.Row
return g.db
def close_db(e=None):
db = g.pop('db', None)
if db is not None:
db.close()
def init_db():
db = get_db()
with current_app.open_resource('schema.sql') as f:
db.executescript(f.read().decode('utf8'))
@click.command('init-db')
@with_appcontext
def init_db_command():
init_db()
click.echo('Initialized the database.')
def init_app(app):
app.teardown_appcontext(close_db)
app.cli.add_command(init_db_command)
| yukoga/flask_sample_001 | flaskr/db.py | db.py | py | 794 | python | en | code | 0 | github-code | 36 |
70562902825 | import configparser
import os
class AWSAnmeldung():
def __init__(self,benutzer,account):
self.benutzer = benutzer
self.account = account
configName = "credentials"
configPfad = os.path.join("/","home",self.benutzer,".aws",configName)
self.config = configparser.ConfigParser()
self.config.read(configPfad)
self.aws_access_key_id = self.leseEintrag(account,"aws_access_key_id")
self.aws_secret_access_key = self.leseEintrag(account,"aws_secret_access_key")
self.region_name = self.leseEintrag(account,"region_name")
def leseEintrag(self,auswahl,zeile):
self.config.get(auswahl,zeile)
return self.config.get(auswahl,zeile)
if __name__ == '__main__':
test = AWSAnmeldung("studium","default")
print(test.aws_secret_access_key,test.aws_access_key_id)
print(test.leseEintrag("default","aws_access_key_id")) | charlenebertz/fhb-ws1516-sysint | target/dist/fhb-ws1516-sysint-1.0.dev0/build/lib/config.py | config.py | py | 911 | python | de | code | 0 | github-code | 36 |
32752270722 | import tempfile
import unittest
import pytest
from os import environ
from os.path import join, isdir, getmtime
from time import time
from selenium.webdriver.common.timeouts import Timeouts
from selenium.common.exceptions import TimeoutException
from tbselenium import common as cm
from tbselenium.test import TBB_PATH
from tbselenium.test.fixtures import TBDriverFixture
from selenium.webdriver.common.utils import free_port
from tbselenium.utils import is_busy
class TBDriverTest(unittest.TestCase):
def setUp(self):
self.tb_driver = TBDriverFixture(TBB_PATH)
def tearDown(self):
self.tb_driver.quit()
def test_should_load_check_tpo(self):
congrats = "Congratulations. This browser is configured to use Tor."
self.tb_driver.load_url_ensure(cm.CHECK_TPO_URL)
status = self.tb_driver.find_element_by("h1.on")
self.assertEqual(status.text, congrats)
def test_should_load_hidden_service(self):
# https://support.torproject.org/onionservices/v2-deprecation/index.html
TPO_V3_ONION_URL = "http://2gzyxa5ihm7nsggfxnu52rck2vv4rvmdlkiu3zzui5du4xyclen53wid.onion/" # noqa
self.tb_driver.load_url_ensure(TPO_V3_ONION_URL, wait_for_page_body=True)
self.assertEqual(
'Tor Project | Anonymity Online',
self.tb_driver.title)
def test_should_check_environ_in_prepend(self):
self.tb_driver.quit()
self.tb_driver = TBDriverFixture(TBB_PATH)
paths = environ["PATH"].split(':')
tbbpath_count = paths.count(self.tb_driver.tbb_browser_dir)
self.assertEqual(tbbpath_count, 1)
def test_should_set_timeouts(self):
LOW_PAGE_LOAD_LIMIT = 0.05
self.tb_driver.timeouts = Timeouts(page_load=LOW_PAGE_LOAD_LIMIT)
timed_out = False
t_before_load = time()
try:
self.tb_driver.load_url(cm.CHECK_TPO_URL)
except TimeoutException:
timed_out = True
finally:
t_spent = time() - t_before_load
self.assertAlmostEqual(t_spent, LOW_PAGE_LOAD_LIMIT, delta=1)
assert timed_out
class TBDriverCleanUp(unittest.TestCase):
def setUp(self):
self.tb_driver = TBDriverFixture(TBB_PATH)
def test_should_terminate_geckodriver_process_on_quit(self):
driver = self.tb_driver
geckodriver_process = driver.service.process
self.assertEqual(geckodriver_process.poll(), None)
driver.quit()
self.assertNotEqual(geckodriver_process.poll(), None)
def test_should_remove_profile_dirs_on_quit(self):
temp_profile_dir = self.tb_driver.temp_profile_dir
self.assertTrue(isdir(temp_profile_dir))
self.tb_driver.quit()
self.assertFalse(isdir(temp_profile_dir))
class TBDriverTorDataDir(unittest.TestCase):
TOR_DATA_PATH = join(TBB_PATH, cm.DEFAULT_TOR_DATA_PATH)
@pytest.mark.skipif(cm.TRAVIS, reason="Requires Tor bootstrap,"
"unreliable on Travis")
def test_temp_tor_data_dir(self):
"""Tor data directory in TBB should not be modified if
we use a separate tor_data_dir.
"""
tmp_dir = tempfile.mkdtemp()
mod_time_before = getmtime(self.TOR_DATA_PATH)
with TBDriverFixture(TBB_PATH, tor_data_dir=tmp_dir) as driver:
driver.load_url_ensure(cm.CHECK_TPO_URL)
mod_time_after = getmtime(self.TOR_DATA_PATH)
self.assertEqual(mod_time_before, mod_time_after)
class TBDriverProfile(unittest.TestCase):
TBB_PROFILE_PATH = join(TBB_PATH, cm.DEFAULT_TBB_PROFILE_PATH)
def test_custom_profile_and_tbb_path(self):
"""Make sure we use the right profile directory when the TBB
path and profile path is provided.
"""
tmp_dir = tempfile.mkdtemp()
mod_time_before = getmtime(self.TBB_PROFILE_PATH)
with TBDriverFixture(
TBB_PATH, tbb_profile_path=tmp_dir,
use_custom_profile=True) as driver:
assert isdir(tmp_dir)
assert driver.temp_profile_dir == tmp_dir
driver.load_url_ensure(cm.CHECK_TPO_URL)
mod_time_after = getmtime(self.TBB_PROFILE_PATH)
self.assertEqual(mod_time_before, mod_time_after)
def test_custom_profile_and_binary(self):
"""Make sure we use the right directory when a binary
and profile is provided.
"""
tmp_dir = tempfile.mkdtemp()
fx_binary = join(TBB_PATH, cm.DEFAULT_TBB_FX_BINARY_PATH)
mod_time_before = getmtime(self.TBB_PROFILE_PATH)
with TBDriverFixture(
tbb_fx_binary_path=fx_binary, tbb_profile_path=tmp_dir,
use_custom_profile=True) as driver:
assert isdir(tmp_dir)
assert driver.temp_profile_dir == tmp_dir
driver.load_url_ensure(cm.CHECK_TPO_URL)
mod_time_after = getmtime(self.TBB_PROFILE_PATH)
self.assertEqual(mod_time_before, mod_time_after)
class TBDriverCustomGeckoDriverPort(unittest.TestCase):
def test_should_accept_custom_geckodriver_port(self):
"""Make sure we accept a custom port number to run geckodriver on."""
random_port = free_port()
with TBDriverFixture(TBB_PATH, geckodriver_port=random_port) as driver:
driver.load_url_ensure(cm.ABOUT_TOR_URL)
self.assertTrue(is_busy(random_port)) # check if the port is used
# check if the port is closed after we quit
self.assertFalse(is_busy(random_port))
if __name__ == "__main__":
unittest.main()
| webfp/tor-browser-selenium | tbselenium/test/test_tbdriver.py | test_tbdriver.py | py | 5,565 | python | en | code | 483 | github-code | 36 |
29739664502 | from random import randrange
class Game:
def init(self):
self.distance = 230
self.shots = 0
self.running = True
self.club = False
self.choice = False
def set_username(self):
self.username = input('welcome to niggaboy golf. Enter your username: ')
return self.username
def main_menu(self):
print(f'welcome to niggaboy golf {self.username}\n')
print("(I)nstructions\n(P)lay golf\n(Q)uit")
self.choice = input("Choice: ").lower()
self.handle_choice()
def handle_choice(self):
if self.choice == "i":
print("This is a simple golf game in which each hole is 230m game away with par 5.You are able to choose from 3 clubs, the Driver, Iron or Putter. The Driverwill hit around 100m, the Iron around 30m and the Putter around 10m. Theputter is best used very close to the hole.")
elif self.choice == "q":
print(f"Farewell and thanks for playing {self.username}")
self.running = False
elif self.choice == "p":
self.play()
def play(self):
while self.distance:
self.set_club()
self.swing()
if self.shots > 5:
print(f"Clunk... After {self.shots} hits, the ball is in the hole!\n Disappointing. You are 5 over par.")
elif self.shots < 5:
print(f"Clunk... After {self.shots} hits, the ball is in the hole!\n Congratulations, you are {self.shots - 1} under par.")
elif self.shots == 5:
print(f"Clunk... After {self.shots} hits, the ball is in the hole! And that’s par.")
def swing(self):
if not self.club in ['d', 'p', 'i']:
self.shots += 1
print(f"Invalid club selection = air swing :( Your shot went 0m. You are {self.distance} from the hole, after {self.shots} shot/s")
else:
if self.club == "d":
average_distace = 100
self.swing_club(average_distace)
elif self.club == "i":
average_distace = 30
self.swing_club(average_distace)
elif self.club == "p":
average_distace = 10
self.swing_club(average_distace)
def swing_club(self, average_distace):
shot_distance = randrange(average_distace * 0.80, average_distace * 1.20)
self.distance = abs(self.distance - shot_distance)
self.shots += 1
print(f"Your shot went {shot_distance}m.\n You are {self.distance}m from the hole, after {self.shots} shot/s.")
def set_club(self):
print("Club selection: press D for driver Avg 100m, I for Iron Avg 30m, P for Putter Avg 10m\n")
self.club = input("choose a club: ").lower()
def start(self):
self.init()
while self.running:
self.set_username()
self.main_menu()
game = Game()
game.start() | Syncxv/golf-uni-assignment | golf-game.py | golf-game.py | py | 2,933 | python | en | code | 0 | github-code | 36 |
2999426198 | '''
deleteLater() # 在代码执行完之后删除对象
'''
################################
# PyQt5中文网 - PyQt5全套视频教程 #
# https://www.PyQt5.cn/ #
# 主讲: 村长 #
################################
from PyQt5.Qt import *
import sys
class Window(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("父子关系")
self.resize(600, 500)
self.func_list()
def func_list(self):
self.func()
def func(self):
obj1 = QObject()
self.obj1 = obj1 #全局变量存储在堆中
obj2 = QObject()
obj3 = QObject()
obj2.setParent(obj1)
obj3.setParent(obj2)
print(obj1)
print(obj2)
print(obj3)
obj1.destroyed.connect(lambda :print('obj1被释放'))
obj2.destroyed.connect(lambda :print('obj2被释放'))
obj3.destroyed.connect(lambda :print('obj3被释放'))
#del obj2 #删除栈中的对象,该对象指向堆中的全局变量
print(obj2.deleteLater()) #deleteLater:删除堆中的对象
print(obj1.children()) #deleteLater在代码执行完成之后删除对象,所以可以打印出obj2
#案例
label1 = QLabel(self)
label1.setText('label1')
label1.move(50,50)
label1.setStyleSheet('background-color:green')
label2 = QLabel(self)
label2.setText('label2')
label2.move(100, 100)
label2.setStyleSheet('background-color:green')
label3 = QLabel(self)
label3.setText('label3')
label3.move(150, 150)
label3.setStyleSheet('background-color:green')
#label2.deleteLater()
del label2
if __name__ == '__main__':
app = QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec_()) | litteprience/pyqt5-210401 | first/1.4对象删除.py | 1.4对象删除.py | py | 1,893 | python | zh | code | 0 | github-code | 36 |
37635067200 | # Given the coordinates of two rectilinear rectangles in a 2D plane, return the total area covered by the two rectangles.
# The first rectangle is defined by its bottom-left corner (ax1, ay1) and its top-right corner (ax2, ay2).
# The second rectangle is defined by its bottom-left corner (bx1, by1) and its top-right corner (bx2, by2).
# Example 1:
# Rectangle Area
# Input: ax1 = -3, ay1 = 0, ax2 = 3, ay2 = 4, bx1 = 0, by1 = -1, bx2 = 9, by2 = 2
# Output: 45
# Example 2:
# Input: ax1 = -2, ay1 = -2, ax2 = 2, ay2 = 2, bx1 = -2, by1 = -2, bx2 = 2, by2 = 2
# Output: 16
# Constraints:
# -104 <= ax1, ay1, ax2, ay2, bx1, by1, bx2, by2 <= 104
class Solution:
def computeArea(self, ax1: int, ay1: int, ax2: int, ay2: int, bx1: int, by1: int, bx2: int, by2: int) -> int:
area_a = (ax2-ax1)*(ay2-ay1)
area_b = (bx2-bx1)*(by2-by1)
x_l = max(ax1,bx1)
x_r = min(ax2,bx2)
y_b = max(ay1,by1)
y_t = min(ay2,by2)
intx = (x_r-x_l) if x_r>x_l else 0
inty = (y_t-y_b) if (y_t>y_b) else 0
return area_a+area_b-intx*inty | sunnyyeti/Leetcode-solutions | 223. Rectangle Area.py | 223. Rectangle Area.py | py | 1,093 | python | en | code | 0 | github-code | 36 |
41924896443 | import sys
import sqlite3
from orders_management import*
#menu for managing customer
class order_menu():
def __init__(self):
self.running = None
self.active_detail = orders_manage()
def run_menu(self,choice):
if choice == 1:
order_date = input("please enter the order date: ")
order_size = input("please enter the size of the order: ")
values = (order_date,order_size)
self.active_detail.insert_order_data(values)
elif choice == 2:
id = input("please enter the id of the product you wish to change: ")
choice = self.get_answers()
if choice == 1:
order_date = input("please enter the date of the order: ")
value = (order_date,id)
self.active_detail.update_order_date(value)
elif choice == 2:
order_size = input("please enter the new size of the order: ")
value = (order_size,id)
self.active_detail.update_order_size(value)
elif choice == 3:
order_date = input("please enter the date of the order: ")
order_size = input("please enter the new size of the order: ")
value = (order_date,order_size,id)
self.active_detail.update_order_sizedate(value)
elif choice == 3:
order = self.active_detail.order_data()
print(order)
elif choice == 4:
done = False
while not done:
print("would you like to search by order_num or by order_date: ",end = "")
choices = input()
choices = choices.lower()
if choices in ["order_num","order num","order number","order_number"]:
print("please enter the order number you wish to view: " ,end = "")
id = input()
rename = self.active_detail.display_order_data(id)
print(rename)
done = True
elif choices in ["order_date","order date"]:
print("please enter the customer id you wish to view: ",end = "")
name = input()
rename = self.active_detail.display_order_data(name)
print(rename)
done = True
else:
print("please enter a valid choice")
done = False
elif choice == 5:
choice = input("which id do you want to delete: ")
self.active_detail.delete_order_data(choice)
def get_order_date(self,id):
with sqlite3.connect("pharmacy_database.db") as db:
cursor = db.cursor()
cursor.execute("select OrderDate from Orders where OrderNum=?",(id,))
Product = cursor.fetchone()
def get_order_size(self,id):
with sqlite3.connect("pharmacy_database.db") as db:
cursor = db.cursor()
cursor.execute("select OrderSize from Orders where OrderNum=?",(id,))
Product = cursor.fetchone()
return Product
def get_answers(self):
print("what do you want to update?")
print()
print("1.order_date")
print("2.order_size")
print("3.update all")
print("what is your choice: ",end = "")
try:
choice = int(input())
except ValueError:
print()
self.get_answers()
return choice
| henrymlongroad/computing-coursework.exe | Implementation/order_menu.py | order_menu.py | py | 3,649 | python | en | code | 0 | github-code | 36 |
7677909103 | from controller import Robot, Motor, DistanceSensor
import numpy as np
from collections import deque
# import opencv
import cv2 as cv
MAX_SPEED = 47.6
WHEEL_RADIUS = 21
INF = float('inf')
class ChaseFoodState:
def __init__(self, r):
self.r=r
def check_transition(self):
if self.r.has_bumped:
# if we bump to the food we are done
print("donete")
self.r.stop()
def tick(self):
# compute food angle
food_angle = self.r.get_food_angle(2000)
if food_angle == "none":
print("we lost food")
self.r.state = WallFollowState(self.r)
# turn to food
if food_angle == "left":
print("turning left")
self.r.turn_left(.2*MAX_SPEED, 20)
elif food_angle == "right":
print("turning right")
self.r.turn_right(.2*MAX_SPEED, 20)
else:
print("moving forward")
self.r.move_forward(.2*MAX_SPEED, 500)
# force sensors update
self.r.update_sensors(bump_th=250, color_th=4000)
# check transitions
self.check_transition()
def __str__(self):
return "ChaseFoodState"
class WallFollowState:
def __init__(self, r):
self.r=r
self.current_wall = "straight"
def check_transition(self):
if self.r.has_food:
print("going to chase food")
self.r.state = ChaseFoodState(self.r)
elif self.r.has_enemy:
print("going to avoid enemy")
self.r.state = AvoidEnemyState(self.r)
elif self.r.has_danger:
print("going to avoid danger")
self.r.state = AvoidDangerState(self.r)
def tick(self):
# just follow wall
self.r.follow_wall(self.current_wall)
# check transitions (sensors are updated regullary)
self.check_transition()
def __str__(self):
return "WallFollowState"
class AvoidDangerState:
def __init__(self, r):
self.r=r
def check_transitions(self):
if self.r.has_food:
print("going to chase food")
self.r.state = ChaseFoodState(self.r)
elif self.r.has_enemy:
print("going to avoid enemy")
self.r.state = AvoidEnemyState(self.r)
if not self.r.has_danger:
print("going to wall follow")
self.r.state = WallFollowState(self.r)
def tick(self):
# move fast backwards and turn back
self.r.turn_back(0.5*MAX_SPEED, 20)
# force sensors update
self.r.update_sensors(bump_th=250, color_th=2500)
# check transitions
self.check_transitions()
def __str__(self):
return "AvoidDangerState"
class AvoidEnemyState:
def __init__(self, r):
self.r=r
def check_transitions(self):
if self.r.has_food:
print("going to chase food")
self.r.state = ChaseFoodState(self.r)
elif self.r.has_danger:
print("going to avoid danger")
self.r.state = AvoidDangerState(self.r)
if not self.r.has_enemy:
print("going to wall follow")
self.r.state = WallFollowState(self.r)
def tick(self):
# move slowly backwards and turn left
print("avoiding enemy")
self.r.move_backward_turn(0.25*MAX_SPEED, 200)
print("avoiding enemy done")
# force sensors update
self.r.update_sensors(bump_th=250, color_th=4000)
# check transitions
self.check_transitions()
def __str__(self):
return "AvoidEnemyState"
class FixBumpState:
def __init__(self, r):
self.r=r
def check_transition(self):
if not self.r.has_bumped:
print("going to wall follow")
self.r.state = WallFollowState(self.r)
def tick(self):
# move backwards and turn back. If we still are bumping
# repeat the process
self.r.move_backward(MAX_SPEED, 100)
self.r.turn_back(MAX_SPEED, 4)
# force sensors update
print("fixing bump")
self.r.update_sensors(bump_th=250, color_th=4000)
self.check_transition()
def __str__(self):
return "FixBumpState"
class KheperaBot:
def __init__(self):
self.robot = Robot()
self.ts = int(self.robot.getBasicTimeStep())
self.pic_idx = 0
self.sensors = {
"left": self.robot.getDevice("left infrared sensor"),
"right": self.robot.getDevice("right infrared sensor"),
"front": self.robot.getDevice("front infrared sensor"),
"front left": self.robot.getDevice("front left infrared sensor"),
"front right": self.robot.getDevice("front right infrared sensor"),
"camera": self.robot.getDevice("camera")
}
self.motors={
"left wheel": self.robot.getDevice("left wheel motor"),
"right wheel": self.robot.getDevice("right wheel motor")
}
self.init_sensors()
self.init_motors()
self.has_bumped = False # bump = the robot has ran into a wall
self.has_enemy = False # enemy = the robot found something blue
self.has_food = False # food = the robot found something green
self.has_danger = False # danger = the robot found something red
self.state = WallFollowState(self)
# initialization
def init_sensors(self):
# init sensors -> enable them by timestep
for sensor in self.sensors.values():
sensor.enable(self.ts)
def init_motors(self):
# init motors -> set position to inf and velocity to 0
for motor in self.motors.values():
motor.setPosition(float('inf'))
motor.setVelocity(0)
# movements
def move_forward(self, velocity, ammount):
# move forward -> set velocity both wheels the same value
self.motors["left wheel"].setVelocity(velocity)
self.motors["right wheel"].setVelocity(velocity)
self.robot.step(ammount)
def move_backward(self, velocity, ammount):
# move backward -> set velocity both wheels the same value but negative
self.motors["left wheel"].setVelocity(-velocity)
self.motors["right wheel"].setVelocity(-velocity)
self.robot.step(self.ts*ammount)
def move_backward_turn(self, velocity, ammount):
# move backward and turn -> set velocity left wheel to negative velocity and right wheel to 0
self.motors["left wheel"].setVelocity(-velocity)
self.motors["right wheel"].setVelocity(-velocity)
self.robot.step(int(0.75*self.ts*ammount))
self.motors["left wheel"].setVelocity(-velocity)
self.motors["right wheel"].setVelocity(-0.25*velocity)
self.robot.step(int(0.25*self.ts*ammount))
def turn_left(self, velocity, ammount=2):
# turn left -> set velocity left wheel to 0 and right wheel to velocity
self.motors["left wheel"].setVelocity(0)
self.motors["right wheel"].setVelocity(velocity)
self.robot.step(self.ts*ammount)
def turn_right(self, velocity, ammount=2):
# turn right -> set velocity left wheel to velocity and right wheel to 0
self.motors["left wheel"].setVelocity(velocity)
self.motors["right wheel"].setVelocity(0)
self.robot.step(self.ts*ammount)
def turn_back(self, velocity, ammount):
# turn_back -> set velocity both wheels to negative velocity
self.motors["left wheel"].setVelocity(0)
self.motors["right wheel"].setVelocity(velocity)
self.robot.step(self.ts*ammount)
self.has_danger=False
def stop(self):
# stop -> set velocity both wheels to 0
self.motors["left wheel"].setVelocity(0)
self.motors["right wheel"].setVelocity(0)
self.robot.step(self.ts)
self.ts = -1
return
def follow_wall(self, w=None, threshold=150):
speed_offset = 0.3 * (MAX_SPEED - 0.03 * self.sensors["front"].getValue())
fl, fr = self.sensors["front left"].getValue(), self.sensors["front right"].getValue()
l, r = self.sensors["left"].getValue(), self.sensors["right"].getValue()
delta_r, delta_l = 0.02, 0.02
# if we loose our wall turn HARDER
if w=="right" and r<threshold and fr<threshold and l<threshold and fl<threshold:
delta_l=2*delta_l
if w=="left" and l<threshold and fl<threshold and r<threshold and fr<threshold:
delta_r=2*delta_r
speed_delta = delta_l * fl - delta_r * fr
self.motors["left wheel"].setVelocity(speed_offset + speed_delta)
self.motors["right wheel"].setVelocity(speed_offset - speed_delta)
if max(fl,l)<threshold and max(fr,r)<threshold:
return "straight"
return "left" if max(fl, l)>max(fr, r) else "right"
# sensors
def process_camera(self):
# process image camera and returns an array of the number
# of red, green and blue pixels
w,h = self.sensors["camera"].getWidth(), self.sensors["camera"].getHeight()
img = self.sensors["camera"].getImage()
image_array = np.array(self.sensors["camera"].getImageArray(), dtype=np.uint8)
image_array = cv.resize(image_array, (h//2, w//2))
# take only center of image
image_w, image_h = image_array.shape[0], image_array.shape[1]
delta_size = 100
image_array = image_array[image_w//2-delta_size:image_w//2+delta_size, image_h//2-delta_size:image_h//2+delta_size]
# rotate image -90 degrees
image_array = cv.rotate(image_array, cv.ROTATE_90_CLOCKWISE)
# flip image
image_array = cv.flip(image_array, 1)
# save image as rgb
if self.pic_idx%3==0 and False:
print("save image")
image_rgb = cv.cvtColor(image_array, cv.COLOR_BGR2RGB)
cv.imwrite("image"+str(self.pic_idx)+".png", image_rgb)
# remove white pixels
#image_array[image_array.all() > 100] = 0
# save red channel
red_channel = image_array[:,:,0]
red_channel[red_channel < 175] = 0
red_channel[red_channel > 0] = 255
# save green channel
green_channel = image_array[:,:,1]
green_channel[green_channel < 150] = 0
green_channel[green_channel > 0] = 255
# save blue channel
blue_channel = image_array[:,:,2]
blue_channel[blue_channel < 150] = 0
blue_channel[blue_channel > 0] = 255
# save image channels
if self.pic_idx%3==0 and False:
cv.imwrite("red"+str(self.pic_idx)+".png", red_channel)
cv.imwrite("green"+str(self.pic_idx)+".png", green_channel)
cv.imwrite("blue"+str(self.pic_idx)+".png", blue_channel)
self.pic_idx += 1
blue_channel[green_channel > 0] = 0
blue_channel[red_channel > 0] = 0
green_channel[blue_channel > 0] = 0
green_channel[red_channel > 0] = 0
red_channel[blue_channel > 0] = 0
red_channel[green_channel > 0] = 0
red_px = np.count_nonzero(red_channel)
# count food pixels by summing left third, center third and right third
green_px_left = np.count_nonzero(green_channel[:, :green_channel.shape[1]//3])
green_px_center = np.count_nonzero(green_channel[:, green_channel.shape[1]//3:green_channel.shape[1]//3*2])
green_px_right = np.count_nonzero(green_channel[:, green_channel.shape[1]//3:])
green_px = green_px_left+green_px_right
blue_px = np.count_nonzero(blue_channel)
return red_px, green_px, blue_px, (green_px_left, green_px_center, green_px_right)
def get_food_angle(self, th):
# get food position by counting pixels
r, g, b, (gl, gc, gr) = self.process_camera()
print("-> Food:",gl, gc, gr)
if gl<th and gr<th and gc<th:
return "none"
if gl>gr and gl>gc:
return "left"
elif gl<gr and gr>gc:
return "right"
else:
return "center"
def update_sensors(self, bump_th=1000, color_th=15000):
bump_left_val = self.sensors["left"].getValue()
bump_right_val = self.sensors["right"].getValue()
bump_front_val = self.sensors["front"].getValue()
print("-> Bumpers values:",bump_left_val, bump_right_val, bump_front_val)
bump_left = self.sensors["left"].getValue() > bump_th
bump_right = self.sensors["right"].getValue() > bump_th
bump_front = self.sensors["front"].getValue() > bump_th
self.has_bumped = bump_left or bump_right or bump_front
print("-> Bumpers:",bump_left, bump_right, bump_front)
r, g, b, _ = self.process_camera()
negative_th = color_th
self.has_enemy = r > color_th and g < negative_th and b < negative_th
self.has_food = g > color_th and r < negative_th and b < negative_th
self.has_danger = b > color_th and r < negative_th and g < negative_th
print("-> colors (RGB):",r,g,b)
print("-> Enemy, Food or Danger:",self.has_enemy, self.has_food, self.has_danger)
def main_loop(self):
while self.robot.step(self.ts) != -1:
if self.robot.getTime() % 1 <= self.ts / 500:
self.update_sensors(bump_th=250, color_th=2550)
self.state.tick()
robot = KheperaBot()
robot.main_loop() | Polifack/Subsummed-Architecture-Webots | controllers/khepera4_controller/khepera4_controller.py | khepera4_controller.py | py | 13,750 | python | en | code | 0 | github-code | 36 |
19389873266 | import sqlite3
insert()
def insert(cur,name,adress,phone,email):
try:
cur.execute(''' INSERT INTO Contact (name,adress,phone,email)
VALUES (?,?,?,?)
''',(name,adress,phone,email))
print('Sucess: The contact:',(name,adress,phone,email),'has been added to the database')
except:
print('Failed: The contact name already exists.... ')
def update(cur,name):
cur.execute('SELECT * FROM Contact WHERE name= ?',(name,))
row=cur.fetchone()
if row is None:
print("Failed: Contact doesn't exist in the database")
else:
print("Contact found please enter new informations")
adress=input('Enter new adress: ')
phone=input('Enter new phone: ')
email=input('Enter new email: ')
cur.execute('''UPDATE Contact SET adress= ?,
phone= ?,
email= ?
WHERE name= ? ''',(adress,phone,email,name))
print("Sucess: Contact has been updated")
conn=sqlite3.connect('db.sqlite')
cur=conn.cursor()
email=''
#email=input('Enter your email')
print('Your email is',email)
name=input ('Enter Name: ')
adress='TestAdress'
email='TestEmail'
phone='TestPhone'
insert(cur,name,adress,email,phone)
conn.commit()
cur.execute('SELECT * FROM Contact WHERE name= ?',(name,))
row=cur.fetchone()
if row is None:
print('Contact not found in the database')
else:
print('Contact found')
print('\tName:',row[0])
print('\tAdress:',row[1])
print('\tEmail:',row[2])
print('\tPhone:',row[3])
conn.commit()
print('------UDPATE TESTING ---------')
name=input('Enter name: ')
update(cur,name)
conn.commit()
| Mysticboi/Contact_Database | test.py | test.py | py | 1,625 | python | en | code | 0 | github-code | 36 |
28066573272 | n = int(input())
data = list(map(int, input().split()))
data.sort()
rest = 0
sum = 0
if n == 1:
print(data[0])
else:
for i in range(n):
sum += data[i] + rest
rest += data[i]
print(sum) | hwanginbeom/algorithm_study | 1.algorithm_question/1.greedy/1. ATM_Seonyeong.py | 1. ATM_Seonyeong.py | py | 198 | python | en | code | 3 | github-code | 36 |
43041165646 | import logging
import os
import snyk
# Set up logger
logger = logging.getLogger(__name__)
logger.setLevel(os.getenv("LOG_LEVEL", default="INFO"))
def get_org_admins(org):
"""
Returns a list of org admins
:param org: the org object
:return: a list of org admins
"""
logger.debug("Getting list of admins from %s", org.name)
return org.members.filter(role="admin")
class SnykApiFacade:
def __init__(self, settings):
token = os.getenv(settings.config("snyk_token_env_var_name"))
self.settings = settings
self.client_ll = snyk.SnykClient(
token, version="2022-08-12", url="https://api.snyk.io/api/v1"
)
self.client_hl = snyk.SnykClient(token)
def create_organisation(self, name):
"""
Will try and create a new Snyk organisation with the given name, under the group defined
in the settings file
:param name: the name of the org to create
:return: Either the json response from the API, or False in the case of an error
"""
try:
return self.client_ll.post(
"/org", {"name": name, "groupId": self.settings.config("snyk_group_id")}
).json()
except Exception as error:
logger.error(
"Unable to create organisation, API call threw error %s", str(error)
)
return False
def org_name_exists(self, name):
"""
Because it's possible for multiple orgs to have the same name within Snyk, we must manually
check to ensure that
our org name isn't already in Snyk.
:param name: the name of the org (generated from user input)
:return: Truthy (org id) if the org already exists within our group, False otherwise
"""
logger.debug("Checking if org %s already exists", name)
orgs = self.client_hl.organizations.filter(
name=name
) # TODO: Filter by group ID here too
if orgs:
return [x.id for x in orgs]
return False
def get_user(self, email_address):
"""
Gets the specified user from the Snyk group
:param group_id: the group we're working with
:param email_address: the email address of the user to lookup
:return: a dict of the user if found, None otherwise
"""
try:
logger.debug("Checking if user %s exists in Snyk", email_address)
result = self.client_ll.get(
f"/group/{self.settings.config('snyk_group_id')}/members"
).json()
for user in result:
if user.get("email") == email_address:
return user
except Exception as error:
logger.error(
"Error checking if user %s exists in Snyk - API threw error %s",
email_address,
str(error),
)
return None
def add_user_to_org(self, org_id, user_id):
"""
Will add a user to the specified organisation
:param group_id: the group ID within Snyk
:param org_id: the org ID we want to add the user to
:param user_id: the user ID in Snyk of the user we wish to add
:param role: the role we'll assign the user (default: admin)
:return: True if addition was successful, False otherwise
"""
try:
logger.debug("Adding user %s to org %s", user_id, org_id)
self.client_ll.post(
f"/group/{self.settings.config('snyk_group_id')}/org/{org_id}/members",
{"userId": user_id, "role": "admin"},
).json()
return True
except Exception as error:
logger.error(
"Error adding user %s to org %s - API threw error %s",
user_id,
org_id,
str(error),
)
return False
def get_org_from_name(self, org_name):
"""
Looks up an org by its name in Snyk and returns the org ID
:param org_name: the org ID to look for
:return: the org id, or None if we weren't successful
"""
try:
logger.debug("Looking up org %s by name", org_name)
found_org = self.client_hl.organizations.filter(name=org_name)[0]
return found_org
except Exception as error:
logger.error(
"Error getting org %s by name - API threw error %s",
org_name,
str(error),
)
return None
| snyk-playground/snyk-org-slackbot | snyk_slackbot/api.py | api.py | py | 4,589 | python | en | code | 0 | github-code | 36 |
16046372668 | """Pakcage Metadata."""
import pathlib
from setuptools import setup
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
# This call to setup() does all the work
setup(
name="bank-of-england",
version="0.0.1",
description="Retrieve data from the Bank of England's Statistical Interactive Database (IADB)",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/ronaldocpontes/bank-of-england",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Operating System :: OS Independent",
],
package_dir={"": "src"},
include_package_data=True,
package_data={"": ["data/*.*"],},
py_modules=["bank_of_england"],
install_requires=["pandas", "requests"],
extras_require={"dev": ["pytest", "tox"]},
)
| ronaldocpontes/bank-of-england | setup.py | setup.py | py | 1,019 | python | en | code | 2 | github-code | 36 |
11602597643 | import streamlit as st
import time
import re
import chardet
import pandas as pd
import numpy as np
import geopandas as gpd
import matplotlib.pyplot as plt
from matplotlib.patches import ConnectionPatch
from functools import wraps
from shapely.geometry import Point
def main():
if 'run' not in st.session_state:
st.session_state.run = 0
if 'layer_selector' not in st.session_state:
st.session_state.layer_selector = 0
st.title('区域划分工具')
region_dict = read_layer_and_check('qgis')
selected_name, submit = layer_selector(region_dict)
input_mode = input_mode_selector()
if st.session_state.layer_selector:
if input_mode == '文本输入':
run_manual_input(region_dict, selected_name)
else:
run_file_input(region_dict, selected_name)
st.write('执行次数为:', st.session_state.run)
@st.cache
def read_layer_and_check(geofolder):
try:
dictionary = dict(pd.read_csv(f'.//{geofolder}//图层信息.csv', encoding='gb18030').loc[:, ['字段名称', '图层名称']].values)
key_list = dictionary.keys()
file_extension = 'shp' if geofolder == 'mapinfo' else 'gpkg'
for index, name in enumerate(key_list):
gdf = gpd.read_file(f'.//{geofolder}//{dictionary[name]}.{file_extension}', encoding='utf-8')
if name not in list(gdf):
st.error(f'图层字段<{name}>不在图层<{dictionary[name]}.{file_extension}>中')
else:
dictionary[name] = [dictionary[name]]
dictionary.setdefault(name, []).append(gdf)
return dictionary
except IOError:
st.error(f'找不到图层信息')
def layer_selector(region_dictionary):
st.header('1、图层展示')
with st.form(key='selector'):
# st.subheader('图层信息选择')
region_name = st.multiselect(
"请选择图层",
region_dictionary.keys(),
default=['区县', '三方区域', '规划区域'],
)
submit = st.form_submit_button(label='确认', on_click=layer_selector_counter)
figure = layer_ploting(region_dictionary, region_name, 3)
if region_name:
name_list = '、'.join(region_name)
st.write(f'选择的图层为:{name_list}')
st.pyplot(figure)
return region_name, submit
@st.cache(suppress_st_warning=True, allow_output_mutation=True)
def layer_ploting(region_dictionary, region_name, fig_cols):
plt.rcParams['font.size'] = 5
num_fig = len(region_name)
if num_fig > 0:
nrows = (num_fig - 1) // fig_cols + 1
fig, ax = plt.subplots(nrows, fig_cols, figsize=(3 * fig_cols, 3 * nrows))
for i, field_name in enumerate(region_name):
geo_df = region_dictionary[field_name][1]
if nrows == 1:
ax_i = ax[i]
else:
ax_rows, ax_cols = i // fig_cols, i % fig_cols
ax_i = ax[ax_rows][ax_cols]
ax_i.set_xlim(119.1, 120.3)
ax_i.set_ylim(31.1, 32.1)
geo_df.plot(ax=ax_i, column=field_name, cmap='Spectral')
# 去掉坐标轴
mod_num = num_fig % fig_cols
if mod_num != 0:
if nrows == 1:
for n in range(mod_num, fig_cols):
ax[n].axis('off')
else:
for n in range(mod_num, fig_cols):
ax[nrows - 1][n].axis('off')
else:
fig, ax = plt.subplots()
ax.axis('off')
# st.write("Cache miss: layer_ploting")
return fig
def input_mode_selector():
st.header('2、数据选择')
st.sidebar.header('输入模式选择')
return st.sidebar.radio(
'请选择输入方式',
('文件导入', '文本输入'),
help='首次执行请先在图层选择处点击确认。'
)
def run_manual_input(region_dictionary, region_name):
st.write('数据选择模式:文本输入')
input_text = st.sidebar.text_input(
'输入经纬度',
value='例如:119.934 31.8528 119.939 31.84',
help='输入经纬度数据,可直接复制粘贴excel表格中的经度、纬度2列数据'
)
df_source = text_to_df(input_text)
st.write('数据源:')
st.table(df_source)
if not st.sidebar.button('执行区域划分'):
st.stop()
else:
st.sidebar.header('输出结果')
result = region_division(df_source, region_dictionary, region_name)
st.header('3、输出表格')
st.table(result)
st.header('4、地图展示')
st.map(result.rename(columns={'经度': 'lon', '纬度': 'lat'}))
st.sidebar.header('数据下载')
name_list = '、'.join(region_name)
st.sidebar.download_button(
label='下载结果',
data=ouput(result),
file_name=f'区域划分结果-{name_list}.csv',
mime='text/csv',
)
def text_to_df(text):
search_result = re.findall(r'(?P<lon>1[12][0-9].\d+)[\s,,]*(?P<lat>3[12].\d+)', text)
if search_result:
point = {}
for lon_lat in search_result:
point.setdefault('经度', []).append(float(lon_lat[0]))
point.setdefault('纬度', []).append(float(lon_lat[1]))
return pd.DataFrame(data=point)
else:
st.error('输入格式错误')
def run_file_input(region_dictionary, region_name):
st.write('数据选择模式:文件导入')
file_obj = st.sidebar.file_uploader(
'上传一个表格',
type=['csv', 'xlsx', 'xls'],
help='上传文件格式为csv、xlsx、xls,需包含表头为经度、纬度的2列数据',
)
if file_obj:
# 清理数据、执行区域划分
df_source = read_df(file_obj)
if df_source is None:
st.stop()
st.sidebar.header('输出结果')
result = region_division(df_source, region_dictionary, region_name)
# 显示数据源
render_rows = 10 if df_source.shape[0] >= 10 else df_source.shape[0] // 5 * 5
rows = st.sidebar.slider(
'选择数据源显示行数',
0, 50, render_rows, 5
)
st.write(f'数据源(前{rows}行):')
st.dataframe(df_source.head(rows))
# 结果采样
st.header('3、输出表格')
sample_rows = st.sidebar.slider(
'选择结果采样行数',
0, 50, render_rows, 5
)
st.write(f'随机采样{sample_rows}行:')
df_sample = result.sample(sample_rows)
st.dataframe(df_sample)
# 结果可视化
st.header('4、统计图表')
summary, rail_data = reslut_summary(result, region_name)
fig_list = summary_ploting(summary, rail_data)
for figure in fig_list:
st.pyplot(figure)
# 数据下载
st.sidebar.header('数据下载')
name_list = '、'.join(region_name)
st.sidebar.download_button(
label='下载明细结果',
data=ouput(result),
file_name=f'区域划分结果-{name_list}.csv',
mime='text/csv',
help='区域划分的明细数据',
)
st.sidebar.download_button(
label='下载统计结果',
data=output_summary(summary),
file_name=f'区域划分统计结果-{name_list}.csv',
mime='text/csv',
help='统计每个图层各个区域的数量',
)
def time_costing(step):
def func_name(func):
@wraps(func)
def core(*args, **kwargs):
start = time.time()
res = func(*args, **kwargs)
region_name = args[2]
if isinstance(region_name, str):
region_name = [region_name]
elif isinstance(region_name, list):
pass
st.sidebar.write('、'.join(region_name) + '已划分')
st.sidebar.write(f'{step}耗时:{float(time.time() - start):.3f}秒')
return res
return core
return func_name
LONLAT_STR_FORMAT = {'经度': 'string', '纬度': 'string'}
LONLAT_FLOAT_FORMAT = {'经度': 'float64', '纬度': 'float64'}
def df_clean(df):
if {'经度', '纬度'}.issubset(set(list(df))):
return df.pipe(clean_lotlan).astype(LONLAT_FLOAT_FORMAT)
else:
st.error('当前表格格式错误')
st.sidebar.error('当前表格格式错误')
def clean_lotlan(df_cell):
for col_name in list(df_cell.loc[:, ['经度', '纬度']]):
df_cell[col_name] = df_cell.astype({col_name: 'string'})[col_name].str.replace(r'\s', '', regex=True)
df_cell_split_list = df_cell['经度'].str.contains('/')
df_cell_split = df_cell[df_cell_split_list]
if not df_cell_split.empty:
df_comb = pd.DataFrame([], index=df_cell_split.index)
for col_name in list(df_cell_split.loc[:, ['经度', '纬度']]):
df_comb = pd.concat([df_comb, (df_cell_split[col_name].str.split('/', expand=True)
.stack().reset_index(level=1).rename(columns={0: col_name}))], axis=1)
df_cell = pd.concat([df_cell[~df_cell_split_list],
df_cell_split.iloc[:, :3].join(df_comb.drop(['level_1'], axis=1))]).reset_index(drop=True)
return df_cell
@st.cache(suppress_st_warning=True)
def read_df(file):
f_ext = file.name.split('.')[1]
df = None
if f_ext == 'csv':
encode = str.lower(chardet.detect(file.readline())["encoding"]).replace('-', '_')
file.seek(0)
if encode == 'utf-8':
df = pd_read(file, f_ext, 'utf-8')
elif encode == 'gb2312':
try:
df = pd_read(file, f_ext, 'gbk')
except UnicodeDecodeError:
df = pd_read(file, f_ext, 'gb18030')
elif encode == 'utf_8_sig':
df = pd_read(file, f_ext, 'utf_8_sig')
elif encode == "iso-8859-1":
df = pd_read(file, f_ext, 'gbk')
else:
st.error('文件编码错误')
elif f_ext in ['xlsx', 'xls']:
df = pd_read(file, f_ext)
else:
st.error('文件格式错误')
# st.write("Cache miss:read_df")
return df
def pd_read(file, extension, encode_n=None):
try:
if extension == 'csv':
return pd.read_csv(file, dtype=LONLAT_STR_FORMAT, encoding=encode_n, low_memory=False)
elif extension in ['xlsx', 'xls']:
return pd.read_excel(file, dtype=LONLAT_STR_FORMAT)
else:
st.error('文件格式错误')
except ValueError:
st.error('文件读取错误')
@st.cache(suppress_st_warning=True, allow_output_mutation=True)
@time_costing('区域划分')
def region_division(df, region_dictionary, region_name):
lanlot_cols = ['经度', '纬度']
df = df_clean(df)
if isinstance(region_name, str):
region_name = [region_name]
elif isinstance(region_name, list):
pass
else:
st.error('错误:区域名称错误')
df_dropdu = df.drop_duplicates(subset=lanlot_cols).reset_index(drop=True)
my_bar = st.sidebar.progress(0)
for index, name in enumerate(region_name):
gdf_region = region_dictionary[name][1]
gdf_region = gdf_region.to_crs('EPSG:2381') if gdf_region.crs is None else gdf_region.to_crs('EPSG:2381')
lanlot = gpd.GeoSeries([Point(x, y) for x, y in zip(df_dropdu[lanlot_cols[0]], df_dropdu[lanlot_cols[1]])])
lanlot_region = gpd.sjoin(lanlot.reset_index().rename(columns={0: 'geometry'})
.set_crs('epsg:4326').to_crs('EPSG:2381'), gdf_region.loc[:, [name, 'geometry']])
df_dropdu = df_dropdu.join(lanlot_region.set_index('index').loc[:, name])
my_bar.progress((index + 1) / len(region_name))
df = df.merge(df_dropdu.loc[:, lanlot_cols + region_name], how='left', on=lanlot_cols)
# st.write("Cache miss: region_division")
run_counter()
return df
def run_counter():
st.session_state.run += 1
def layer_selector_counter():
st.session_state.layer_selector += 1
def ouput(df):
return df.to_csv(index=False).encode('utf-8-sig')
def output_summary(summary):
df_summary = pd.DataFrame([])
for key in summary.keys():
df_summary = pd.concat([df_summary, summary[key]], axis=1)
return df_summary.to_csv(index=False).encode('utf-8-sig')
@st.cache(suppress_st_warning=True)
def reslut_summary(df, region_name):
for name in region_name:
if name == '规划区域':
df['规划区域'] = df['规划区域'].fillna('农村')
elif name == '网格区域':
df['网格区域'] = df['网格区域'].fillna('网格外')
elif name == '高铁周边':
df['高铁周边'] = df['高铁周边'].fillna('铁路外')
else:
df[name] = df[name].fillna('其他')
county_order = ['天宁', '钟楼', '武进', '新北', '经开', '金坛', '溧阳', '其他']
third_party_order = ['华星', '华苏-武进', '华苏-金坛', '华苏-溧阳', '其他']
planning_region_order = ['主城区', '一般城区', '县城', '乡镇', '农村']
grid_order = ['网格内', '网格边界200米', '网格外']
rail_surrounding_order = ['京沪周边500米', '京沪周边1.5公里', '沪宁周边500米', '沪宁周边1.5公里', '宁杭周边500米', '宁杭周边1.5公里', '铁路外']
tag_order = ['主城区', '县城', '其他']
name_list = ['区县', '三方区域', '规划区域', '网格区域', '高铁周边', '标签区域']
order_list = [county_order, third_party_order, planning_region_order, grid_order, rail_surrounding_order, tag_order]
region_order_dict = dict(zip(name_list, order_list))
summary = {}
for name in region_name:
summary[name] = (df.groupby(name)['ECGI'].count().reset_index(name='数量')
.assign(temp=lambda x: x[name].astype('category').cat.set_categories(region_order_dict[name]))
.sort_values(by=['temp'], ignore_index=True).drop('temp', axis=1))
rail_data = summary.pop('高铁周边') if summary.get('高铁周边') is not None else None
# st.write("Cache miss: reslut_summary")
return summary, rail_data
@st.cache(suppress_st_warning=True, allow_output_mutation=True)
def summary_ploting(summary, rail_data):
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
fig_list = []
region_name = list(summary.keys())
num_name = len(region_name)
nrows = (num_name - 1) // 4 + 1
if num_name > 0:
fig, ax = plt.subplots(nrows, 2, figsize=(10, 4.8 * nrows))
# 每2组画环形饼图(剔除高铁)
for index in range(0, num_name, 2):
name_1 = region_name[index]
name_2 = region_name[index + 1] if index < num_name - 1 else None
if nrows == 1:
ax_i = ax[index // 2]
else:
ax_rows, ax_cols = index // 2 // 2, index // 2 % 2
ax_i = ax[ax_rows][ax_cols]
if name_2 is not None:
size = 0.3
labels_1, vals_1 = summary[name_1][name_1].to_list(), summary[name_1]['数量'].values
labels_2, vals_2 = summary[name_2][name_2].to_list(), summary[name_2]['数量'].values
num_label_1, num_label_2 = len(labels_1), len(labels_2)
cmap = plt.get_cmap("tab20c")
if num_label_1 <= num_label_2:
outer_labels, outer_vals = labels_1, vals_1
inner_labels, inner_vals = labels_2, vals_2
outer_colors = cmap(tab20c_color_array(num_label_1, 'outer'))
inner_colors = cmap(tab20c_color_array(num_label_2, 'inner'))
else:
outer_labels, outer_vals = labels_2, vals_2
inner_labels, inner_vals = labels_1, vals_1
outer_colors = cmap(tab20c_color_array(num_label_2, 'outer'))
inner_colors = cmap(tab20c_color_array(num_label_1, 'inner'))
wedges1, texts1, autotexts1 = ax_i.pie(
inner_vals, radius=1 - size, labels=inner_labels, colors=inner_colors,
autopct=lambda pct: pct_func(pct, inner_vals), pctdistance=0.75, labeldistance=0.3,
startangle=90, wedgeprops=dict(width=size, edgecolor='w')
)
wedges2, texts2, autotexts2 = ax_i.pie(
outer_vals, radius=1, labels=outer_labels, colors=outer_colors,
autopct=lambda pct: pct_func(pct, outer_vals), pctdistance=0.85,
startangle=90, wedgeprops=dict(width=size, edgecolor='w')
)
plt.setp(autotexts1, size=10, weight="bold", color="w")
plt.setp(autotexts2, size=10, weight="bold", color="w")
plt.setp(texts1, size=10, color="k")
plt.setp(texts2, size=10, color="k")
ax_i.set(aspect="equal")
else:
# 单独剩一个画传统饼图
labels_1, vals_1 = summary[name_1][name_1].to_list(), summary[name_1]['数量'].values
num_label_1 = len(labels_1)
cmap = plt.get_cmap("tab20c")
outer_colors = cmap(tab20c_color_array(num_label_1, 'inner'))
wedges, texts, autotexts = ax_i.pie(vals_1, radius=1, labels=labels_1, colors=outer_colors,
autopct=lambda pct: pct_func(pct, vals_1), startangle=90)
plt.setp(autotexts, size=10, weight="bold", color="w")
plt.setp(texts, size=10, weight="bold", color="k")
ax_i.set(aspect="equal")
plt.axis('off')
fig_list.append(fig)
# 画高铁复合饼图
if rail_data is not None:
fig = plt.figure(figsize=(10, 4.8))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
fig.subplots_adjust(wspace=0)
merged_label = ['高铁周边', '铁路外']
df_rail = rail_data.query('高铁周边 != "铁路外"')
merged_val = [df_rail['数量'].sum(), rail_data.query('高铁周边 == "铁路外"')['数量'].sum()]
angle = -180 * merged_val[0] / merged_val[1]
explode = [0.1, 0]
cmap = plt.get_cmap("tab20c")
merged_colors = cmap([4, 0])
wedges1, texts1, autotexts1 = ax1.pie(merged_val, radius=1, labels=merged_label, colors=merged_colors,
autopct=lambda pct: pct_func(pct, merged_val),
startangle=angle, explode=explode)
plt.setp(autotexts1, size=10, weight="bold", color="w")
plt.setp(texts1, size=12, color="k")
detail_label, detail_val = df_rail['高铁周边'].to_list(), df_rail['数量'].values
num_label = len(detail_label)
cmap = plt.get_cmap("tab20c")
detail_colors = cmap(tab20c_color_array(num_label, 'inner'))
r2 = 0.8
wedges2, texts2, autotexts2 = ax2.pie(detail_val, radius=r2, labels=detail_label, colors=detail_colors,
autopct=lambda pct: pct_func(pct, detail_val),
startangle=90, counterclock=False)
plt.setp(autotexts2, size=10, weight="bold", color="w")
plt.setp(texts2, size=10, color="k")
# 饼图边缘的数据
theta1 = ax1.patches[0].theta1
theta2 = ax1.patches[0].theta2
center = ax1.patches[0].center
r = ax1.patches[0].r
width = 0.2
# 上边缘的连线
x = r * np.cos(np.pi / 180 * theta2) + center[0]
y = r * np.sin(np.pi / 180 * theta2) + center[1]
con_a = ConnectionPatch(xyA=(-width / 2, r2), xyB=(x, y), coordsA='data', coordsB='data', axesA=ax2, axesB=ax1)
# 下边缘的连线
x = r * np.cos(np.pi / 180 * theta1) + center[0]
y = r * np.sin(np.pi / 180 * theta1) + center[1]
con_b = ConnectionPatch(xyA=(-width / 2, -r2), xyB=(x, y), coordsA='data', coordsB='data', axesA=ax2, axesB=ax1)
for con in [con_a, con_b]:
con.set_linewidth(1) # 连线宽度
con.set_color = ([0, 0, 0]) # 连线颜色
ax2.add_artist(con) # 添加连线
fig_list.append(fig)
else:
pass
# st.write("Cache miss: summary_ploting")
return fig_list
def pct_func(pct, allvals):
absolute = int(round(pct/100.*np.sum(allvals)))
return "{:d}\n{:.1f}%".format(absolute, pct)
def tab20c_color_array(num_label, outer_or_inner):
array = np.empty((0, 5))
if outer_or_inner == 'outer':
outer_layer_num = (num_label - 1) // 5 + 1
for i in range(outer_layer_num):
array = np.append(array, np.arange(5) * 4 + i)
array = np.sort(array).astype(int)
elif outer_or_inner == 'inner':
inner_layer_num = (num_label - 1) // 10 + 1
for i in range(inner_layer_num):
if i == 0:
array = np.append(np.arange(5) * 4 + 1, np.arange(5) * 4 + 2)
else:
array = np.append(array, np.arange(5) * 4 + i + 2)
return np.sort(array)
if __name__ == "__main__":
main()
| spiritdncyer/region-divsion-streamlit | demo-regionDiv.py | demo-regionDiv.py | py | 21,686 | python | en | code | 0 | github-code | 36 |
74928155943 | from tkinter import *
master = Tk()
cv_width = 300
cv_height = 300
def diagonal_square(i, a): # i = represents the squares position on a diagonal line; a = size of the square
canvas.create_rectangle(i*a, i*a, a+i*a, a+i*a, fill = "purple")
canvas = Canvas(width=cv_width, height=cv_height)
canvas.pack()
def diagonal_squares(num_of_boxes, square_size):
for j in range(1, num_of_boxes+1):
diagonal_square(j,square_size)
diagonal_squares(19,11)
master.mainloop()
| greenfox-zerda-lasers/tamasc | week-04/day-3/litte_squares.py | litte_squares.py | py | 486 | python | en | code | 0 | github-code | 36 |
73087528423 | # -*- coding: utf-8 -*-
# @Author: ahmedkammorah
# @Date: 2019-04-04 15:54:42
# @Last Modified by: Ahmed kammorah
# @Last Modified time: 2019-04-08 22:58:45
from enum import Enum
import json
from MainService.main.email_provider_connector import RESPONSE_STATE
from MainService.main.ak_ep_services import AKEmailServices, AKProviderService,SERVICE_STATUS, logger
class EmailMessage(object):
def __init__(self, to_emails, from_email, subject, body):
if to_emails == None or from_email == None:
return None
if len(to_emails) == 0 or len(from_email) == 0:
return None
self._to_emails = to_emails
self._from_email = from_email
self._subject = subject
self._body = body
@property
def to_emails(self):
return self._to_emails
@property
def from_email(self):
return self._from_email
@property
def subject(self):
return self._subject
@property
def body(self):
return self._body
def __str__(self):
return 'Eamil for subject:{} from_email:{} to_emails:{} \nbody:{}'.format(self.subject, self.from_email, self.to_emails, self.body)
def build_sparkpost_msg(self):
data = {
"recipients": [
],
"content": {
"from": {
"email": "ahmedkammorah@trendship.net",
"name": ""
},
"subject": "",
"html": "<html><body> </body></html>",
"text": ""
}
}
# data['content']['from']['email'] = self.from_email
data['content']['from']['name'] = self.from_email
data['content']['subject'] = self.subject
data['content']['html'] = self.body
data['content']['text'] = self.body
for em in self.to_emails:
newRec = {
"address": em
}
data['recipients'].append(newRec)
return json.dumps(data)
class AKMainEmailService(AKEmailServices):
"""The Main Email service Class
Attributes:
redis_util: instance of the redis util to be manger of the commancation with redis
service_provider_list: List of email provider names
services: map of all avaiable and registered service
"""
def __init__(self):
"""Intiialize the Main Email service with regestering all service providers"""
super().__init__()
def _pick_service(self):
"""Picking the first operational service provider
Args:
Returns:
AKProviderService instance of the first running provider
OR None if there is no up and running provider
"""
logger.debug('Start picking one of the running service provider service ')
for ser_name in self.service_provider_list:
status = self.redis_util.get_ser_status(ser_name)
print(status)
print(SERVICE_STATUS.UP.value)
if status == SERVICE_STATUS.UP.value:
return self.services.get(ser_name, AKProviderService(ser_name))
logger.error("No Service Provider is up right now")
return None
def send_email(self, email_message:EmailMessage):
""" Sending Email messgae by picking the first avaliblae running email service Provider
Args:
email_message: full email email_message
Returns:
response to user
"""
if email_message == None:
logger.error("Can't send Empty or null Email")
return
logger.info('Start the process of Sending Eamil email_message')
email_ser = self._pick_service()
if email_ser == None:
logger.error("No Email Service Provider up and running to Use ")
# TODO: fire slack event to notify the dev team
# TODO: add this request to a queue for next run when there is service to use
return
logger.info("Start using email provider {} for sending email".format(email_ser.name))
email_connector = email_ser.connector
res_status, response = email_connector.send_email(email_message)
if res_status == RESPONSE_STATE.OK:
logger.info("Successfully sending the email by {}".format(email_ser.name))
return (res_status, 'success send the email')
elif res_status == RESPONSE_STATE.USER_ERROR:
logger.error("User email_message related error: {} when sending email by: {} provider".format(response, email_ser.name))
return (res_status, response)
elif res_status == RESPONSE_STATE.SERVICE_ERROR:
# Fail over start use different provider
logger.error("Email Service provider {} is down for now".format(email_ser.name))
email_ser.status = SERVICE_STATUS.DOWN
self.redis_util.set_ser_status(email_ser)
return self.send_email(email_message)
elif res_status == RESPONSE_STATE.OVERRATE_ERROR:
# Fail over start use different provider
logger.error("Email Service provider {} is overlimt for now".format(email_ser.name))
email_ser.status = SERVICE_STATUS.OVERLIMIT
self.redis_util.set_ser_status(email_ser)
return self.send_email(email_message)
elif res_status == RESPONSE_STATE.REQUEST_ERROR:
logger.error("Request related error: {} when sending by: {} provider".format(response, email_ser.name))
# TODO: Notify dev team with this error by slack or push it to error topic in kafka
return
elif res_status == RESPONSE_STATE.OTHER_ERROR:
logger.error("unidentified error: {} when use provider {}".format(response, email_ser.name))
return
return
if __name__ == "__main__":
ak = AKMainEmailService()
| AhmedKammorah/AKEmailService | MainService/main/ak_main_email_service.py | ak_main_email_service.py | py | 5,942 | python | en | code | 0 | github-code | 36 |
955800912 | pkgname = "python-snowballstemmer"
pkgver = "2.2.0"
pkgrel = 0
build_style = "python_module"
hostmakedepends = ["python-setuptools"]
depends = ["python"]
pkgdesc = "Snowball stemming library collection for Python"
maintainer = "q66 <q66@chimera-linux.org>"
license = "BSD-3-Clause"
url = "https://github.com/shibukawa/snowball_py"
source = f"$(PYPI_SITE)/s/snowballstemmer/snowballstemmer-{pkgver}.tar.gz"
sha256 = "09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1"
def post_install(self):
self.install_license("COPYING")
| chimera-linux/cports | main/python-snowballstemmer/template.py | template.py | py | 544 | python | en | code | 119 | github-code | 36 |
24938208176 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# filename: tietuku.py
# modified: 2019-03-30
"""
贴图库 api 类
"""
__all__ = [
"TietukuClient",
]
import os
import time
from io import BytesIO
from .base import BaseClient
from .utils import get_links_cache_json, save_links_cache_json
from ..utils.log import cout
from ..utils.funcs import xMD5, xSHA1
from ..utils.meta import Singleton
from ..utils.decorator import cached_property
from ..const import TIETUKU_TOKEN, TIETUKU_AID, TIETUKU_CACHE_EXPIRED, TIETUKU_LINKS_CACHE_JSON
from ..exceptions import TietukuUploadError
class TietukuClient(BaseClient, metaclass=Singleton):
"""
贴图库客户端类
"""
def __init__(self):
super().__init__()
self._imgLinks = get_links_cache_json(TIETUKU_LINKS_CACHE_JSON)
def upload(self, filename, imgBytes):
"""
图片上传接口
Args:
filename str 图片名
imgBytes bytes 图片的 bytes
Return:
links dict 该文件的外链信息
{
'url': 图片链接
'md5': 图片 MD5
'sha1': 图片 SHA1
'expire_time': 图片过期的 Unix 时间/s
}
Raise:
TietukuUploadError 图片上传错误,请求状态码非 200 可以查询 code 字段的信息
-------------------------------------------------
请求成功的返回 json 包
{
"width": 1280,
"height": 711,
"type": "jpg",
"size": 24640,
"ubburl": "[img]http://i1.bvimg.com/656554/0cf57e9173c0acaf.jpg[/img]",
"linkurl": "http://i1.bvimg.com/656554/0cf57e9173c0acaf.jpg",
"htmlurl": "<img src='http://i1.bvimg.com/656554/0cf57e9173c0acaf.jpg' />",
"markdown": "",
"s_url": "http://i1.bvimg.com/656554/0cf57e9173c0acafs.jpg",
"t_url": "http://i1.bvimg.com/656554/0cf57e9173c0acaft.jpg",
"findurl": "7cbf06538e66e772"
}
请求失败的返回 json 包,可通过 code 查询相应错误类型,错误信息 == info
{
"code": "4511",
"info": "\u76f8\u518c\u4e0d\u5b58\u5728\u6216\u5df2\u7ecf\u5220\u9664"
}
"""
imgMD5 = xMD5(imgBytes)
imgSHA1 = xSHA1(imgBytes)
links = self._imgLinks.get(imgSHA1)
if links is not None and links['expire_time'] > time.time():
cout.info('Get image %s from cache' % filename)
return links
else:
cout.info('uploading image %s' % filename)
key = "{basename}{ext}".format(
basename=imgSHA1,
ext=os.path.splitext(filename)[1]
)
r = self._post('http://up.imgapi.com/',
data={
'Token': TIETUKU_TOKEN,
'deadline': int(time.time() + 60), # 官方要求的参数,不清楚什么用
'aid': TIETUKU_AID,
'from': 'file', # 可选项 file 或 web ,表示上传的图片来自 本地/网络
},
files={
'file': (key, BytesIO(imgBytes)),
}
)
respJson = r.json()
if "code" in respJson:
raise TietukuUploadError("[%s] %s" % ( respJson['code'], respJson['info'] ) )
links = {
"url": respJson['linkurl'],
# "o_url": respJson['linkurl'], # 原始图
# "s_url": respJson['s_url'], # 展示图
# "t_url": respJson['t_url'], # 缩略图
"md5": imgMD5,
"sha1": imgSHA1,
"expire_time": int(time.time() + TIETUKU_CACHE_EXPIRED) # 用于校验图片有效性
}
self._imgLinks[imgSHA1] = links
save_links_cache_json(TIETUKU_LINKS_CACHE_JSON, self._imgLinks)
return links
| pkuyouth/pkuyouth-html-coder | htmlcoder/core/client/tietuku.py | tietuku.py | py | 4,184 | python | en | code | 5 | github-code | 36 |
7870451227 | from datetime import datetime
import requests
from bs4 import BeautifulSoup
from uk_bin_collection.uk_bin_collection.common import *
from uk_bin_collection.uk_bin_collection.get_bin_data import \
AbstractGetBinDataClass
# import the wonderful Beautiful Soup and the URL grabber
class CouncilClass(AbstractGetBinDataClass):
"""
Concrete classes have to implement all abstract operations of the
base class. They can also override some operations with a default
implementation.
"""
def parse_data(self, page: str, **kwargs) -> dict:
# Get and check UPRN
user_uprn = kwargs.get("uprn")
check_uprn(user_uprn)
user_uprn = user_uprn.zfill(
12
) # Wigan is expecting 12 character UPRN or else it falls over, expects 0 padded UPRNS at the start for any that aren't 12 chars
user_postcode = kwargs.get("postcode")
check_postcode(user_postcode)
# Start a new session to walk through the form
requests.packages.urllib3.disable_warnings()
s = requests.session()
# Get our initial session running
response = s.get("https://apps.wigan.gov.uk/MyNeighbourhood/")
soup = BeautifulSoup(response.text, features="html.parser")
soup.prettify()
# Grab the ASP variables needed to continue
payload = {
"__VIEWSTATE": (soup.find("input", {"id": "__VIEWSTATE"}).get("value")),
"__VIEWSTATEGENERATOR": (
soup.find("input", {"id": "__VIEWSTATEGENERATOR"}).get("value")
),
"__EVENTVALIDATION": (
soup.find("input", {"id": "__EVENTVALIDATION"}).get("value")
),
"ctl00$ContentPlaceHolder1$txtPostcode": (user_postcode),
"ctl00$ContentPlaceHolder1$btnPostcodeSearch": ("Search"),
}
# Use the above to get to the next page with address selection
response = s.post("https://apps.wigan.gov.uk/MyNeighbourhood/", payload)
soup = BeautifulSoup(response.text, features="html.parser")
soup.prettify()
# Load the new variables that are constant and can't be gotten from the page
payload = {
"__EVENTTARGET": ("ctl00$ContentPlaceHolder1$lstAddresses"),
"__EVENTARGUMENT": (""),
"__LASTFOCUS": (""),
"__VIEWSTATE": (soup.find("input", {"id": "__VIEWSTATE"}).get("value")),
"__VIEWSTATEGENERATOR": (
soup.find("input", {"id": "__VIEWSTATEGENERATOR"}).get("value")
),
"__EVENTVALIDATION": (
soup.find("input", {"id": "__EVENTVALIDATION"}).get("value")
),
"ctl00$ContentPlaceHolder1$txtPostcode": (user_postcode),
"ctl00$ContentPlaceHolder1$lstAddresses": ("UPRN" + user_uprn),
}
# Get the final page with the actual dates
response = s.post("https://apps.wigan.gov.uk/MyNeighbourhood/", payload)
soup = BeautifulSoup(response.text, features="html.parser")
soup.prettify()
data = {"bins": []}
# Get the dates.
for bins in soup.find_all("div", {"class": "BinsRecycling"}):
bin_type = bins.find("h2").text
binCollection = bins.find("div", {"class": "dateWrapper-next"}).get_text(
strip=True
)
binData = datetime.strptime(
re.sub(r"(\d)(st|nd|rd|th)", r"\1", binCollection), "%A%d%b%Y"
)
if binData:
data[bin_type] = binData.strftime(date_format)
return data
| robbrad/UKBinCollectionData | uk_bin_collection/uk_bin_collection/councils/WiganBoroughCouncil.py | WiganBoroughCouncil.py | py | 3,612 | python | en | code | 51 | github-code | 36 |
16442806200 | # from utils.txt_file_ops import *
from utils.Database_conn import *
from object.base_class import *
from loguru import logger
from tabulate import tabulate
from datetime import datetime
class Subject:
def __init__(self, sub_id='', sub_name=''):
self.__sub_id = sub_id
self.__sub_name = sub_name
db_obj = SQLConnector()
self.__db_conn = db_obj.create_connection()
self.__db_cursor = self.__db_conn.cursor()
def display_menu(self):
while True:
print("--------------------------------")
print("PLEASE SELECT A FUNCTION")
print("1. ADD NEW SUBJECT")
print("2. UPDATE SUBJECT")
print("3. DELETE SUBJECT")
print("4. FIND SUBJECT")
print("5. SHOW ALL SUBJECTS")
print("0. EXIT")
key = input("ENTER YOUR CHOICE: ")
if key == '1':
self.__add_data()
elif key == '2':
self.__update_data()
elif key == '3':
self.__delete_data()
elif key == '4':
self.__search_data()
elif key == '5':
self.__get_data()
elif key == '0':
print("EXITING...")
return
else:
print("INVALID CHOICE")
print("PLEASE TRY AGAIN")
def __get_data(self):
sql_cmd = "SELECT * FROM subject"
self.__db_cursor.execute(sql_cmd)
result = self.__db_cursor.fetchall()
sub_list = []
for row in enumerate(result):
sub_info = [row[0], row[1]]
sub_list.append(sub_info)
print(tabulate(sub_list, headers = ['ID', 'NAME']))
def __input_sub_info(self):
self.__sub_name = input("SUBJECT NAME: ")
def __add_data(self):
#Input information from keyboard
print("--INPUT SUBJECT INFORMATION--")
self.__input_sub_info()
sql_cmd = """INSERT INTO subject (subject_name)
VALUES (%s)
"""
vals = (self.__sub_name,)
self.__db_cursor.execute(sql_cmd, vals)
self.__db_conn.commit()
logger.info("SUBJECT ADDED SUCCESSFULLY")
def __update_data(self):
while True:
print("--UPDATE SUBJECT INFORMATION--")
print("ENTER SUBJECT ID:")
sub_ID_input = input("STUDENT ID: ")
self.__input_sub_info()
sql_cmd = "UPDATE subject SET subject_name = %s WHERE subject_id = %s"
self.__db_cursor.execute(sql_cmd, (self.__sub_name, sub_ID_input))
if (self.__db_conn.commit()):
logger.error("UPDATE SUBJECT FAILED!")
else:
logger.info("UPDATE SUBJECT SUCCESSFULLY!")
print('ID NOT FOUND')
def __delete_data(self):
while True:
print("--DELETE SUBJECT--")
sub_ID_input = input("ENTER SUBJECT ID: ")
sql_cmd = "DELETE FROM subject WHERE subject_id = %s"
self.__db_cursor.execute(sql_cmd, [sub_ID_input])
if(self.__db_conn.commit()):
logger.error("DELETE SUBJECT FAILED!")
else:
logger.info("DELETE SUBJECT SUCCESSFULLY!")
print('ID NOT FOUND')
def __search_data(self):
print("--FIND SUBJECT INFORMATION--")
while True:
print("1. FIND SUBJECT BY ID")
print("2. FIND SUBJECT BY NAME")
key = input("Enter your choice: ")
if key == '1':
self.__search_sub_byID()
elif key == '2':
self.__search_sub_byName()
elif key == '0':
print ("You have exited the program")
return
else:
print ("Invalid choice")
print ("Please try again")
def __search_sub_byID(self):
while True:
print("--FIND SUBJECT INFORMATION--")
sub_ID_input = int(input("ENTER SUBJECT ID: "))
sql_cmd = "SELECT * FROM students WHERE subject_id = %s"
self.__db_cursor.execute(sql_cmd, [sub_ID_input])
results = self.__db_cursor.fetchall()
for row in results:
#logger.info(row)
print("--SUBJECT INFORMATION--")
print(f"SUBJECT ID: {sub_ID_input}")
print(f"SUBJECT NAME: {row[1]}")
print('ID NOT FOUND')
def __search_sub_byName(self):
while True:
print("--FIND SUBJECT INFORMATION--")
sub_name_input = input("ENTER SUBJECT NAME: ")
sql_cmd = "SELECT * FROM students WHERE subject_name = %s"
self.__db_cursor.execute(sql_cmd, [sub_name_input])
results = self.__db_cursor.fetchall()
for row in results:
#logger.info(row)
print("--SUBJECT INFORMATION--")
print(f"SUBJECT ID: {row[0]}")
print(f"SUBJECT NAME: {row[sub_name_input]}")
print('SUB NOT FOUND')
| thanhtugn/python_core_thanhtugn | Lesson_14/object/subject.py | subject.py | py | 5,166 | python | en | code | 1 | github-code | 36 |
5877766545 | # -*- coding:utf-8 -*-
"""
说明:
这里实现了单篇文章和专栏的爬取。
article 根据article_id发起网络请求,返回的json文件中包含文章的基本信息和文章主体内容,解析文章的基本信息生成一个msg
字典对象,再将文章主体解析成BeautifulSoup对象,连同msg字典一起交给document模块下的Article解析并保存成markdown文件。
根据专栏id获得专栏下的文章所有文章id后,逐一看成是单一的文章,由article爬取。
"""
from zhihu_spider.util import net, document
from zhihu_spider.util import const
import re
import os
from zhihu_spider.util import timer
from bs4 import BeautifulSoup
import zhihu_spider
__all__ = ['article', 'articles']
TIME_LIMIT_FLAG = False
def articles(column_id, time_limit, topic_limit, save_path):
global TIME_LIMIT_FLAG
# print('正在获取专栏文章 ID ...'.encode('utf-8'))
# 若不是首次运行,就按time_limit爬;否则,获取过去所有文章
if bool(time_limit) and os.path.exists(save_path) and bool(os.listdir(save_path)):
num_limit = (int(time_limit)+1) * 7
else:
num_limit = 0
time_limit = 0
articles_list = articles_id(column_id, num_limit)
request_times = dict([(i, 0) for i in articles_list])
# print('专栏文章总数:'.encode('utf-8'), len(articles_list))
# print('正在获取文章 ...'.encode('utf-8'))
ars = []
while len(articles_list) != 0:
# if len(articles_list) % 10 == 0:
# print(len(articles_list))
article_id = articles_list.pop(0)
try:
ar = article(article_id, topic_limit, time_limit)
if ar:
ars.append(ar)
except ValueError:
if request_times.get(article_id) < 5:
articles_list.append(article_id)
request_times[articles_id] += 1
except IndexError:
# 非论文速递的文章
continue
timer.random_sleep(end=zhihu_spider.SLEEP)
if TIME_LIMIT_FLAG:
break
for article_id, times in request_times.items():
if times >= 5:
print(net.article_spider_url(article_id))
# print('爬取完毕 ...'.encode('utf-8'))
return ars
def articles_id(column_id, num_limit):
article_list = list()
offset = zhihu_spider.Controller()
while not offset.is_end():
response = net.column_spider(column_id, offset.next_offset(), limit=100)
if response is None:
raise ValueError('Response is None')
content = response.text
totals = re.search(r'"totals":\W(\d+)', content).group(1)
offset.totals = int(totals)
article_id_list = re.findall(r'"id":\W(\d+)', content)
offset.increase(len(article_id_list))
article_list.extend(article_id_list)
article_id_list.clear()
timer.random_sleep(end=zhihu_spider.SLEEP)
if bool(num_limit) and len(article_list) > num_limit:
offset.to_stop()
if num_limit:
article_list = article_list[:num_limit]
return article_list
def article(article_id, topic_limit, time_limit):
global TIME_LIMIT_FLAG
response = net.article_spider(article_id)
if response is not None:
response_json = response.json()
topic = re.findall(r'(\w*?)每?日?论文速递', response_json['title'])[0]
create_date = timer.timestamp_to_date(response_json['created'])
time_diff = timer.time_diff(create_date)
if bool(time_limit) and time_diff > int(time_limit):
TIME_LIMIT_FLAG = True
return
elif len(topic_limit) > 0 and topic not in topic_limit:
return
content = BeautifulSoup(response_json['content'], 'lxml').body
article_dict = {'topic': topic,
'create_date': create_date,
'content': str(content.contents)}
return article_dict
else:
raise ValueError('Response is None')
def article_msg(content):
original_url = const.ARTICLE_URL.format(content['id'])
title = content['title']
background_image = content['image_url']
date = timer.timestamp_to_date(content['created'])
author = content['author']['name']
author_page = const.AUTHOR_PAGE_URL.format(content['author']['url_token'])
avatar = content['author']['avatar_url']
article_dict = {'author': author, 'author_avatar_url': avatar, 'author_page': author_page, 'title': title,
'original_url': original_url, 'created_date': date, 'background': background_image}
return document.Meta(**article_dict) | Arlenelalala/ArxivPaper | zhihu_spider/article/__init__.py | __init__.py | py | 4,672 | python | en | code | 7 | github-code | 36 |
70571115625 | import unittest
import logging
import pdb
import random
def binary_search_recursive(l, value, low=0, high=None):
"""Return True if value is in sorted list l."""
if high is None:
high = len(l) - 1
if high < low:
return False
middle = (low + high) / 2
#logging.warning("Searching for %d in %s, in list[%d:%d] = %s, middle: list[%d] = %d" % (value, l, low, high+1, l[low:high+1], middle, l[middle]))
if value < l[middle]:
return binary_search_recursive(l, value, low, middle - 1)
elif value > l[middle]:
return binary_search_recursive(l, value, middle + 1, high)
else:
return True
def binary_search_iterative(l, value):
lo, hi = 0, len(l) - 1
while lo <= hi:
middle = (hi + lo) / 2
#logging.warning("Searching for %d in %s, in list[%d:%d] = %s, middle: list[%d] = %d" % (value, l, lo, hi+1, l[lo:hi+1], middle, l[middle]))
if value > l[middle]:
lo = middle + 1
elif value < l[middle]:
hi = middle - 1
else:
return True
return False
class TestSearch(unittest.TestCase):
def setUp(self):
self.lists = []
for i in range(10):
number_of_items = random.randrange(20)
self.lists.append(sorted(random.sample(range(100), number_of_items)))
def test_search(self):
for l in self.lists:
value = random.randrange(100)
if value in l:
self.assertTrue(binary_search_iterative(l, value))
self.assertTrue(binary_search_recursive(l, value))
else:
self.assertFalse(binary_search_iterative(l, value))
self.assertFalse(binary_search_recursive(l, value))
if __name__ == '__main__':
#pdb.run("binary_search_iterative(2, [3, 4, 5])")
unittest.main()
| charlax/IntroductionToAlgorithms | Chapter2/exercise-2.3-4.py | exercise-2.3-4.py | py | 1,869 | python | en | code | 4 | github-code | 36 |
74289510182 | import re # regex
from animius.Utils import sentence_to_index
class Parse:
@staticmethod
def cornell_cleanup(sentence):
# clean up html tags
sentence = re.sub(r'<.*?>', '', sentence.lower())
# clean up \n and \r
return sentence.replace('\n', '').replace('\r', '')
@staticmethod
def load_cornell(path_conversations, path_lines):
movie_lines = {}
lines_file = open(path_lines, 'r', encoding="iso-8859-1")
for line in lines_file:
line = line.split(" +++$+++ ")
line_number = line[0]
character = line[1]
movie = line[2]
sentence = line[-1]
if movie not in movie_lines:
movie_lines[movie] = {}
movie_lines[movie][line_number] = (character, sentence)
questions = []
responses = []
conversations_file = open(path_conversations, 'r', encoding="iso-8859-1")
for line in conversations_file:
line = line.split(" +++$+++ ")
movie = line[2]
line_numbers = []
for num in line[3][1:-2].split(", "):
line_numbers.append(num[1:-1])
# Not used since the cornell data set already placed
# the lines of the same character together
#
# lines = []
#
# tmp = []
#
# teacher = movie_lines[movie][line_numbers[0]][0]
# # teacher is the one that speaks first
# was_teacher = True
#
# for num in line_numbers:
#
# line = movie_lines[movie][num]
# if line[0] == teacher:
# if not was_teacher: # was the bot
# lines.append([True, tmp]) # append previous conversation and mark as "is bot"
# tmp = []
# tmp.append(cornell_cleanup(line[1]))
# was_teacher = True
# else: # bot speaking
# if was_teacher: # was teacher
# lines.append([False, tmp]) # append previous conversation and mark "is not bot"
# tmp = []
# tmp.append(cornell_cleanup(line[1]))
# was_teacher = False
#
# if len(tmp) > 0:
# lines.append([not was_teacher, tmp]) # append the last response (not b/c of the inverse)
#
# conversations.append(lines)
for i in range(len(line_numbers) - 1):
questions.append(Parse.cornell_cleanup(movie_lines[movie][line_numbers[i]][1]))
responses.append(Parse.cornell_cleanup(movie_lines[movie][line_numbers[i + 1]][1]))
return questions, responses
# Used for Marsan-Ma-zz/chat_corpus
# can also be adopted for any other file w/ a sentence on each line
@staticmethod
def load_twitter(path):
lines_x = []
lines_y = []
lines = open(path, 'r', encoding='utf-8')
is_x = True
for line in lines:
if is_x:
lines_x.append(line.lower())
else:
lines_y.append(line.lower())
is_x = not is_x
return lines_x, lines_y
@staticmethod
def split_sentence(sentence):
# collect independent words
result = re.findall(r"[\w]+|[.,!?;\"\']", sentence.replace('\'', ''))
return result
@staticmethod
def split_data(data):
result = []
for line in data:
result.append(Parse.split_sentence(line))
return result
@staticmethod
def data_to_index(data_x, data_y, word_to_index, max_seq=20):
x, x_length, x_unk = sentence_to_index(data_x, word_to_index, max_seq=max_seq, go=True, eos=True)
y, y_length, y_unk = sentence_to_index(data_y, word_to_index, max_seq=max_seq, go=True, eos=True)
y_target = y[1:]
y_target.append(word_to_index["<EOS>"])
return x, y, x_length, y_length, y_target
| gundamMC/animius | animius/Chatbot/ParseData.py | ParseData.py | py | 4,106 | python | en | code | 16 | github-code | 36 |
43051538216 | import numpy as np
import networkx as nx
import random as pr
import matplotlib.pyplot as pl
import pp
import time
import copy
import sys
import os
import PIL
from Tkinter import *
import tkFileDialog
import tkSimpleDialog
import tkMessageBox
from fomite_ABM import *
from math import *
from PIL import Image
from PIL import ImageTk
global image1
global image2
def vp_start_gui():
'''Starting point when module is the main routine.'''
global val, w, root, top, mod, dummy1, dummy2, dummy3, dummy4
global parameters
mod = 0
parameters = {'contactRateHH':0.0, 'contactRateHF':0.0, 'pickupFr':0.0, 'transferFr':0.0, 'faceTouchRate':0.0, 'infProb':0.0, 'washRate':0.0, 'incubationRate':0.0, 'recoveryRate':0.0, 'sheddingRate':0.0, 'shedding':0.0, 'dieOff':0.0, 'deconFreq':None, 'dayLength':0.0}
root = Tk()
top = New_Toplevel_1 (root)
root.protocol('WM_DELETE_WINDOW',lambda: close())
dummy1 = open('fig1.png', 'w')
dummy2 = open('fig2.png', 'w')
dummy3 = open('fig3.png', 'w')
dummy4 = open('fig4.png', 'w')
root.resizable(width=False, height=False)
root.mainloop()
def close():
#check for extraneous/duplicates
dummy1.close()
dummy2.close()
dummy3.close()
dummy4.close()
os.remove('fig1.png')
os.remove('fig2.png')
os.remove('fig3.png')
os.remove('fig4.png')
root.destroy()
class New_Toplevel_1:
def __init__(self, top=None):
'''This class configures and populates the toplevel window.
top is the toplevel containing window.'''
_bgcolor = '#d9d9d9' # X11 color: 'gray85'
_fgcolor = '#000000' # X11 color: 'black'
_compcolor = '#d9d9d9' # X11 color: 'gray85'
_ana1color = '#d9d9d9' # X11 color: 'gray85'
_ana2color = '#d9d9d9' # X11 color: 'gray85'
font10 = "-family {DejaVu Sans Mono} -size 15 -weight normal " \
"-slant roman -underline 0 -overstrike 1"
font11 = "-family {DejaVu Sans Mono} -size 15 -weight bold " \
"-slant roman -underline 0 -overstrike 0"
font9 = "-family {DejaVu Sans Mono} -size 15 -weight normal " \
"-slant roman -underline 0 -overstrike 0"
global days, agents
days = 10
agents = 20
top.geometry("1031x593+89+80")
top.title('Maize & Blue SIWR v2.11')
top.configure(background="#135bd9")
top.configure(highlightcolor="black")
top.configure(cursor='pencil')
self.Label1 = Label(top)
self.Label1.place(relx=0.01, rely=0.03, height=18, width=126)
self.Label1.configure(activebackground="#135bd9")
self.Label1.configure(activeforeground="white")
self.Label1.configure(background="#135bd9")
self.Label1.configure(text='''Contact Rate HH''')
self.Label15 = Label(top)
self.Label15.place(relx=0.03, rely=0.07, height=18, width=126)
self.Label15.configure(activebackground="#f9f9f9")
self.Label15.configure(background="#135bd9")
self.Label15.configure(text='''Contact Rate HF''')
self.Label14 = Label(top)
self.Label14.place(relx=-.01, rely=0.11, height=18, width=126)
self.Label14.configure(activebackground="#f9f9f9")
self.Label14.configure(background="#135bd9")
self.Label14.configure(text='''Pickup FR''')
self.Label5 = Label(top)
self.Label5.place(relx=0.015, rely=0.15, height=18, width=126)
self.Label5.configure(activebackground="#f9f9f9")
self.Label5.configure(background="#135bd9")
self.Label5.configure(text='''Transfer FR''')
self.Label4 = Label(top)
self.Label4.place(relx=0.01, rely=0.19, height=18, width=126)
self.Label4.configure(activebackground="#f9f9f9")
self.Label4.configure(background="#135bd9")
self.Label4.configure(text='''Face Touch Rate''')
self.Label6 = Label(top)
self.Label6.place(relx=0.008, rely=0.23, height=18, width=126)
self.Label6.configure(activebackground="#f9f9f9")
self.Label6.configure(background="#135bd9")
self.Label6.configure(text='''INF Prob''')
self.Label7 = Label(top)
self.Label7.place(relx=-.01, rely=0.27, height=18, width=126)
self.Label7.configure(activebackground="#f9f9f9")
self.Label7.configure(background="#135bd9")
self.Label7.configure(text='''Wash Rate''')
self.Label8 = Label(top)
self.Label8.place(relx=0.03, rely=0.31, height=18, width=126)
self.Label8.configure(activebackground="#f9f9f9")
self.Label8.configure(background="#135bd9")
self.Label8.configure(text='''Incubation Rate''')
self.Label9 = Label(top)
self.Label9.place(relx=0.003, rely=0.35, height=18, width=126)
self.Label9.configure(activebackground="#f9f9f9")
self.Label9.configure(background="#135bd9")
self.Label9.configure(text='''Recovery Rate''')
self.Label10 = Label(top)
self.Label10.place(relx=0.027, rely=0.39, height=18, width=126)
self.Label10.configure(activebackground="#f9f9f9")
self.Label10.configure(background="#135bd9")
self.Label10.configure(text='''Shedding Rate''')
self.Label11 = Label(top)
self.Label11.place(relx=-.01, rely=0.43, height=18, width=126)
self.Label11.configure(activebackground="#f9f9f9")
self.Label11.configure(background="#135bd9")
self.Label11.configure(text='''Shedding''')
self.Label12 = Label(top)
self.Label12.place(relx=0.00, rely=0.47, height=18, width=126)
self.Label12.configure(activebackground="#f9f9f9")
self.Label12.configure(background="#135bd9")
self.Label12.configure(text='''Dieoff''')
self.Label3 = Label(top)
self.Label3.place(relx=-.003, rely=0.51, height=18, width=126)
self.Label3.configure(activebackground="#f9f9f9")
self.Label3.configure(background="#135bd9")
self.Label3.configure(text='''Decon Freq''')
self.Label13 = Label(top)
self.Label13.place(relx=0.018, rely=0.55, height=18, width=126)
self.Label13.configure(activebackground="#f9f9f9")
self.Label13.configure(background="#135bd9")
self.Label13.configure(text='''Day Length''')
self.Entry1 = Entry(top)
self.Entry1.place(relx=0.17, rely=0.03, relheight=0.03, relwidth=0.14)
self.Entry1.configure(background="white")
self.Entry1.configure(font="TkFixedFont")
self.Entry1.configure(selectbackground="#c4c4c4")
self.Entry2 = Entry(top)
self.Entry2.place(relx=0.19, rely=0.07, relheight=0.03, relwidth=0.14)
self.Entry2.configure(background="white")
self.Entry2.configure(font="TkFixedFont")
self.Entry2.configure(selectbackground="#c4c4c4")
self.Entry3 = Entry(top)
self.Entry3.place(relx=0.17, rely=0.11, relheight=0.03, relwidth=0.14)
self.Entry3.configure(background="white")
self.Entry3.configure(font="TkFixedFont")
self.Entry3.configure(selectbackground="#c4c4c4")
self.Entry4 = Entry(top)
self.Entry4.place(relx=0.19, rely=0.15, relheight=0.03, relwidth=0.14)
self.Entry4.configure(background="white")
self.Entry4.configure(font="TkFixedFont")
self.Entry4.configure(selectbackground="#c4c4c4")
self.Entry5 = Entry(top)
self.Entry5.place(relx=0.17, rely=0.19, relheight=0.03, relwidth=0.14)
self.Entry5.configure(background="white")
self.Entry5.configure(font="TkFixedFont")
self.Entry5.configure(selectbackground="#c4c4c4")
self.Entry6 = Entry(top)
self.Entry6.place(relx=0.19, rely=0.23, relheight=0.03, relwidth=0.14)
self.Entry6.configure(background="white")
self.Entry6.configure(font="TkFixedFont")
self.Entry6.configure(selectbackground="#c4c4c4")
self.Entry7 = Entry(top)
self.Entry7.place(relx=0.17, rely=0.27, relheight=0.03, relwidth=0.14)
self.Entry7.configure(background="white")
self.Entry7.configure(font="TkFixedFont")
self.Entry7.configure(selectbackground="#c4c4c4")
self.Entry8 = Entry(top)
self.Entry8.place(relx=0.19, rely=0.31, relheight=0.03, relwidth=0.14)
self.Entry8.configure(background="white")
self.Entry8.configure(font="TkFixedFont")
self.Entry8.configure(selectbackground="#c4c4c4")
self.Entry9 = Entry(top)
self.Entry9.place(relx=0.17, rely=0.35, relheight=0.03, relwidth=0.14)
self.Entry9.configure(background="white")
self.Entry9.configure(font="TkFixedFont")
self.Entry9.configure(selectbackground="#c4c4c4")
self.Entry10 = Entry(top)
self.Entry10.place(relx=0.19, rely=0.39, relheight=0.03, relwidth=0.14)
self.Entry10.configure(background="white")
self.Entry10.configure(font="TkFixedFont")
self.Entry10.configure(selectbackground="#c4c4c4")
self.Entry11 = Entry(top)
self.Entry11.place(relx=0.17, rely=0.43, relheight=0.03, relwidth=0.14)
self.Entry11.configure(background="white")
self.Entry11.configure(font="TkFixedFont")
self.Entry11.configure(selectbackground="#c4c4c4")
self.Entry12 = Entry(top)
self.Entry12.place(relx=0.19, rely=0.47, relheight=0.03, relwidth=0.14)
self.Entry12.configure(background="white")
self.Entry12.configure(font="TkFixedFont")
self.Entry12.configure(selectbackground="#c4c4c4")
self.Entry13 = Entry(top)
self.Entry13.place(relx=0.17, rely=0.51, relheight=0.03, relwidth=0.14)
self.Entry13.configure(background="white")
self.Entry13.configure(font="TkFixedFont")
self.Entry13.configure(selectbackground="#c4c4c4")
self.Entry14 = Entry(top)
self.Entry14.place(relx=0.19, rely=0.55, relheight=0.03, relwidth=0.14)
self.Entry14.configure(background="white")
self.Entry14.configure(font="TkFixedFont")
self.Entry14.configure(selectbackground="#c4c4c4")
self.Button1 = Button(top)
self.Button1.place(relx=0.02, rely=0.65, height=26, width=157)
self.Button1.configure(activebackground="#d9d9d9")
self.Button1.configure(background="#d9d938")
self.Button1.configure(font=font9)
self.Button1.configure(text='''Save''')
self.Button1.configure(cursor='crosshair')
self.Button1.configure(command=lambda: but1Press())
self.Button2 = Button(top)
self.Button2.place(relx=0.18, rely=0.65, height=26, width=157)
self.Button2.configure(activebackground="#d9d9d9")
self.Button2.configure(background="#d9d938")
self.Button2.configure(font=font9)
self.Button2.configure(text='''Load''')
self.Button2.configure(cursor='crosshair')
self.Button2.configure(command=lambda: but2Press())
self.Button3 = Button(top)
self.Button3.place(relx=0.02, rely=0.71, height=26, width=157)
self.Button3.configure(activebackground="#d9d9d9")
self.Button3.configure(background="#d9d938")
self.Button3.configure(font=font11)
self.Button3.configure(text='''Generate''')
self.Button3.configure(cursor='crosshair')
self.Button3.configure(command=lambda: but3Press())
self.Button4 = Button(top)
self.Button4.place(relx=0.18, rely=0.71, height=26, width=157)
self.Button4.configure(activebackground="#d9d9d9")
self.Button4.configure(background="#d9d938")
self.Button4.configure(font=font10)
self.Button4.configure(text='''Clear''')
self.Button4.configure(cursor='crosshair')
self.Button4.configure(command=lambda: but4Press())
self.Button6 = Button(top)
self.Button6.place(relx=0.02, rely=0.80, height=26, width=322)
self.Button6.configure(activebackground="#d9d9d9")
self.Button6.configure(background="#d9d938")
self.Button6.configure(font=font9)
self.Button6.configure(text='''Economic Analysis''')
self.Button6.configure(cursor='crosshair')
self.Button6.configure(command=lambda: but6Press())
self.Button7 = Button(top)
self.Button7.place(relx=0.02, rely=0.86, height=26, width=322)
self.Button7.configure(activebackground="#d9d9d9")
self.Button7.configure(background="#d9d938")
self.Button7.configure(font=font9)
self.Button7.configure(text='''Curve Interpolation''')
self.Button7.configure(cursor='crosshair')
self.Button7.configure(command=lambda: but7Press())
self.Button8 = Button(top)
self.Button8.place(relx=0.02, rely=0.92, height=26, width=322)
self.Button8.configure(activebackground="#d9d9d9")
self.Button8.configure(background="#d9d938")
self.Button8.configure(font=font9)
self.Button8.configure(text='''Oppa Gangnam Style''')
self.Button8.configure(cursor='crosshair')
self.Button8.configure(command=lambda: but8Press())
self.Label2 = Label(top)
self.Label2.place(relx=0.4, rely=0.03, height=18, width=33)
self.Label2.configure(activebackground="#f9f9f9")
self.Label2.configure(background="#135bd9")
self.Label2.configure(text='''Days''')
self.Entry15 = Entry(top)
self.Entry15.place(relx=0.44, rely=0.03, relheight=0.03, relwidth=0.14)
self.Entry15.configure(background="white")
self.Entry15.configure(font="TkFixedFont")
self.Entry15.configure(selectbackground="#c4c4c4")
self.Entry15.insert(0,days)
self.Label16 = Label(top)
self.Label16.place(relx=0.6, rely=0.03, height=18, width=51)
self.Label16.configure(activebackground="#f9f9f9")
self.Label16.configure(background="#135bd9")
self.Label16.configure(text='''Agents''')
self.Entry16 = Entry(top)
self.Entry16.place(relx=0.656, rely=0.03, relheight=0.03, relwidth=0.14)
self.Entry16.configure(background="white")
self.Entry16.configure(font="TkFixedFont")
self.Entry16.configure(selectbackground="#c4c4c4")
self.Entry16.insert(0,agents)
self.Button5 = Button(top)
self.Button5.place(relx=0.4, rely=0.12, height=486, width=587)
self.Button5.configure(activebackground="#d9d9d9")
self.Button5.configure(state=ACTIVE)
self.Button5.configure(cursor='exchange')
self.Button5.configure(command=lambda: but5Press())
def take(self):
global days, agents
self.entries = []
self.entries.append(self.Entry1.get())
self.entries.append(self.Entry2.get())
self.entries.append(self.Entry3.get())
self.entries.append(self.Entry4.get())
self.entries.append(self.Entry5.get())
self.entries.append(self.Entry6.get())
self.entries.append(self.Entry7.get())
self.entries.append(self.Entry8.get())
self.entries.append(self.Entry9.get())
self.entries.append(self.Entry10.get())
self.entries.append(self.Entry11.get())
self.entries.append(self.Entry12.get())
self.entries.append(self.Entry13.get())
self.entries.append(self.Entry14.get())
days = int(self.Entry15.get())
agents = int(self.Entry16.get())
def give(self, vals=[]):
print(vals)
self.Entry1.insert(0,vals[0])
self.Entry2.insert(0,vals[1])
self.Entry3.insert(0,vals[2])
self.Entry4.insert(0,vals[3])
self.Entry5.insert(0,vals[4])
self.Entry6.insert(0,vals[5])
self.Entry7.insert(0,vals[6])
self.Entry8.insert(0,vals[7])
self.Entry9.insert(0,vals[8])
self.Entry10.insert(0,vals[9])
self.Entry11.insert(0,vals[10])
self.Entry12.insert(0,vals[11])
self.Entry13.insert(0,vals[12])
self.Entry14.insert(0,vals[13])
def _set_out(self, val, agents):
self._total = val
self._agents = agents
def but1Press():
dialog = tkSimpleDialog.askstring('SIWR Input', 'Input a file name:')
dialog += '.siwr'
out = open(dialog, 'w')
top.take()
for x in top.entries:
out.write(x)
out.write(' ')
def but2Press():
name = tkFileDialog.askopenfilename()
out = open(name, 'r')
params = out.read().split()
top.give(params)
def but3Press():
global parameters
top.take()
parameters['contactRateHH'] = float(top.entries[0])
parameters['contactRateHF'] = float(top.entries[1])
parameters['pickupFr'] = float(top.entries[2])
parameters['transferFr'] = float(top.entries[3])
parameters['faceTouchRate'] = float(top.entries[4])
parameters['infProb'] = float(top.entries[5])
parameters['washRate'] = float(top.entries[6])
parameters['incubationRate'] = float(top.entries[7])
parameters['recoveryRate'] = float(top.entries[8])
parameters['sheddingRate'] = float(top.entries[9])
parameters['shedding'] = float(top.entries[10])
parameters['dieOff'] = float(top.entries[11])
if(float(top.entries[12]) != 0):
parameters['deconFreq'] = float(top.entries[12])
else:
parameters['deconFreq'] = None
parameters['dayLength'] = float(top.entries[13])
gen()
'''except:
tkMessageBox.showwarning("Warning!","Unfilled Parameters!")'''
def but4Press():
top.Entry1.delete(0,END)
top.Entry2.delete(0,END)
top.Entry3.delete(0,END)
top.Entry4.delete(0,END)
top.Entry5.delete(0,END)
top.Entry6.delete(0,END)
top.Entry7.delete(0,END)
top.Entry8.delete(0,END)
top.Entry9.delete(0,END)
top.Entry10.delete(0,END)
top.Entry11.delete(0,END)
top.Entry12.delete(0,END)
top.Entry13.delete(0,END)
top.Entry14.delete(0,END)
def but5Press():
global mod
if mod == 1:
top.Button5.configure(image=image2)
mod = 2
elif mod == 2:
top.Button5.configure(image=image1)
mod = 1
def but6Press():
from fomite_ABM_econGUI import vp_start_econgui
vp_start_econgui(top)
def but7Press():
#polynomial interpolation lagrange
from Numericals import lagrange_interpolation
from matplotlib.pylab import arange
try:
discretization_range = arange(0,days-1,.01)
incubating_out = []
symptomatic_out = []
xvals = [x[-1] for x in complete_output]
inyvals = [x[2] for x in complete_output]
symyvals = [x[3] for x in complete_output]
conyvals = [x[4] for x in complete_output]
incubating_out = lagrange_interpolation(discretization_range, xvals, inyvals)
symptomatic_out = lagrange_interpolation(discretization_range, xvals, symyvals)
contamination_out = lagrange_interpolation(discretization_range, xvals, conyvals)
print(xvals)
print(incubating_out)
global image1, image2, mod
pl.clf()
pl.plot(discretization_range,symptomatic_out,label='Symptomatic')
pl.plot(discretization_range,incubating_out,label='Incubating')
pl.legend()
pl.ylabel('Population')
pl.xlabel('Days')
pl.savefig('fig1')
pl.plot(discretization_range,contamination_out, label=None)
pl.ylabel('Fomite contamination')
pl.xlabel('Days')
pl.legend().remove()
pl.savefig('fig2')
pl.clf()
img = Image.open('fig1.png')
img = img.resize((587,486), PIL.Image.ANTIALIAS)
img.save('fig1.png')
img = Image.open('fig2.png')
img = img.resize((587,486), PIL.Image.ANTIALIAS)
img.save('fig2.png')
image1 = ImageTk.PhotoImage(file='fig1.png')
image2 = ImageTk.PhotoImage(file='fig2.png')
mod = 1
top.Button5.configure(image=image1)
except:
tkMessageBox.showwarning("Warning!","No Curve to Interpolate!")
def but8Press():
print('gangnam style')
#retrieve TSV and integrate to model
name = tkFileDialog.askopenfilename()
from sickchildcare_parser import cases_to_agents
agents = cases_to_agents(name, 'all', 'e', 5)
print(agents)
for i in agents:
print(i.data)
def gen():
from fomite_ABM import Agent, Fomite
### A bunch of crap to test run the model
agentList = []
fomite = Fomite(id='1f')
nAgents = agents
for i in range(nAgents):
agentList.append(Agent(id=i))
agentList[1].state = 3
#agentList[1].recoveryTime = 7
agentList[1].contamination = 500
## This matrix assumes one fomite that everybody touches
G = nx.complete_graph(nAgents)
#print G.edges()
nx.set_node_attributes(G,'bipartite',1)
G.add_node(fomite.id,bipartite=0)
for i in range(nAgents):
G.add_edge(i,'1f')
#print G.neighbors(1)
#param = parameters.values()
#print('param', len(param))
print(parameters)
print(days)
print(agents)
param = copy.deepcopy(parameters)
#print globals()
#reformatted parameters as dictionary for retrieval
#GUI generation
### parallelized multiple runs
'''
servers = ('local',)
jobServer = pp.Server(ppservers=servers)
print 'active nodes', jobServer.get_active_nodes()
mList = [Model(copy.deepcopy(agentList),[copy.deepcopy(fomite)],28,G,param) for i in range(200)]
output = []
start = time.time()
jobs = [jobServer.submit(run_model,args=(m,),modules=('numpy as np','networkx as nx','random as pr')) for m in mList]
for job in jobs:
output.append(job())
print 'time elapsed', time.time()-start
output = np.array(output)
avgOutput = np.mean(output,axis=0)
stdOutput = np.std(output,axis=0)
upperBound = avgOutput + stdOutput
lowerBound = avgOutput - stdOutput
days = avgOutput[:,-1]
pl.plot(days,avgOutput[:,3],'b',lw=4,label='Symptomatic')
pl.fill_between(days,lowerBound[:,3],upperBound[:,3],facecolor='b',lw=0,alpha=0.5)
pl.plot(days,avgOutput[:,2],'g',lw=4,label='Incubating')
pl.fill_between(days,lowerBound[:,2],upperBound[:,2],facecolor='g',lw=0,alpha=0.5)
pl.legend(loc=0)
pl.ylabel('Symptomatic')
pl.xlabel('Days')
pl.ylim(ymin=0)
pl.figure()
pl.plot(days,avgOutput[:,4],color='r',lw=4)
pl.fill_between(days,lowerBound[:,4],upperBound[:,4],facecolor='r',lw=0,alpha=0.5)
pl.ylabel('Fomite contamination')
pl.xlabel('Days')
pl.ylim(ymin=0)
pl.show()
'''
m = Model(agentList,[fomite,],days,G,param)
#print m.contactPairs.edges()
m.run()
global complete_output
complete_output = m.output
#safe copy by value NOT reference
top._set_out(complete_output, agentList)
out = np.array(complete_output)
#print out[:,2]
pl.plot(out[:,-1],out[:,3],label='Symptomatic')
pl.plot(out[:,-1],out[:,2],label='Incubating')
pl.legend()
pl.ylabel('Population')
pl.xlabel('Days')
pl.savefig('fig1')
pl.plot(out[:,-1],out[:,4], label=None)
pl.ylabel('Fomite contamination')
pl.xlabel('Days')
pl.legend().remove()
pl.savefig('fig2')
pl.clf()
global image1
global image2
global mod
mod = 1
img = Image.open('fig1.png')
img = img.resize((587,486), PIL.Image.ANTIALIAS)
img.save('fig1.png')
img = Image.open('fig2.png')
img = img.resize((587,486), PIL.Image.ANTIALIAS)
img.save('fig2.png')
image1 = ImageTk.PhotoImage(file='fig1.png')
image2 = ImageTk.PhotoImage(file='fig2.png')
top.Button5.configure(image=image1)
#print 'fomite contamination', m.fomite.contamination
#for a in m.agentList:
# print 'state', a.state
# print 'contamination', a.contamination
#for a in m.agentList:
# print a.neighbors
if __name__ == '__main__':
vp_start_gui()
| malhayashi/childcarefomites | fomite_ABM_GUI.py | fomite_ABM_GUI.py | py | 23,870 | python | en | code | 0 | github-code | 36 |
19715634890 | import networkx as nx
from networkx.algorithms import isomorphism
import argparse
import pickle
from tqdm import tqdm
"""get nx graphs from remapped_fp_file"""
def get_single_subgraph_nx(frequent_subgraph_lines):
'''
create graph from gSpan data format
'''
graph_id = frequent_subgraph_lines[0].strip().split(' ')[2]
support = frequent_subgraph_lines[0].strip().split(' ')[4]
graph_nx = nx.DiGraph(name = graph_id)
for line in frequent_subgraph_lines[1:]:
if line.startswith('v'):
parsed_line = line.strip().split(' ')
node_id = parsed_line[1]
node_type = parsed_line[2]
graph_nx.add_node(node_id,type = node_type)
elif line.startswith('e'):
parsed_line = line.strip().split(' ')
node_from = parsed_line[1]
node_to = parsed_line[2]
edge_type = parsed_line[3]
graph_nx.add_edge(node_from,node_to,type=edge_type)
return graph_nx
def get_subgraphs_nx(remapped_fp_file):
fsg_lines_list = []
fsg_lines = []
with open(remapped_fp_file) as f:
for line in f:
if line == '\n' and fsg_lines != []:
fsg_lines_list.append(fsg_lines)
fsg_lines = []
else:
fsg_lines.append(line)
nx_subgraphs = []
for fsg_ls in fsg_lines_list:
nx_subgraphs.append(get_single_subgraph_nx(fsg_ls))
return nx_subgraphs
"""set up node matcher and edge matcher"""
NODE_MATCHER_FUNC = isomorphism.categorical_node_match("type",None)
EDGE_MATCHER_FUNC = isomorphism.categorical_edge_match("type",None)
def is_subgraph(g,G):
'''check if g is a subgraph of G'''
dgmather = isomorphism.DiGraphMatcher(G,g,node_match = NODE_MATCHER_FUNC, edge_match = EDGE_MATCHER_FUNC)
if dgmather.subgraph_is_isomorphic():
return True
else:
return False
def sort_nx_graphs(nx_graphs, order = 'increasing'):
if order == 'increasing':
return sorted(nx_graphs, key = lambda g: g.number_of_nodes())
elif order == 'decreasing':
return sorted(nx_graphs, key = lambda g: g.number_of_nodes(), reverse = True)
else:
raise NotImplementedError
def filter_mined_nx_subgraphs(nx_subgraphs, save_path = None):
'''filter out mined subgraphs by which is a subgrpah in other mined subgraphs'''
sorted_nx_graphs = sort_nx_graphs(nx_subgraphs, order = 'increasing')
filtered_nx_graphs = []
for i in tqdm(range(len(sorted_nx_graphs)-1)):
g = sorted_nx_graphs[i]
filtered_nx_graphs.append(g)
for j in range(i+1,len(sorted_nx_graphs)):
G = sorted_nx_graphs[j]
if is_subgraph(g,G):
filtered_nx_graphs.pop()
break
if save_path is not None:
write_graphs(filtered_nx_graphs, save_path)
print('write graphs to :', save_path)
return filtered_nx_graphs
def write_graphs(nx_subgraphs, output_pickle_path):
# Dump List of graphs
with open(output_pickle_path, 'wb') as f:
pickle.dump(nx_subgraphs, f)
def load_graphs(input_pickle_path):
# Load List of graphs
with open(input_pickle_path, 'rb') as f:
return pickle.load(f)
'''arg parser'''
if __name__ == "__main__":
# parser = argparse.ArgumentParser(description='filter mined subgraphs')
parser.add_argument('-i', '--input_fp_file', help='input remapped fp file path', required=True)
parser.add_argument('-o', '--output_path', help='write filtered graphs in pickle format', required=False, default = "")
args = vars(parser.parse_args())
remapped_fp_path = args['input_fp_file']
save_path = args['output_path']
'''usage'''
nx_subgraphs = get_subgraphs_nx(remapped_fp_path)
print('original frequent subgraph number: ', len(nx_subgraphs))
filtered_nx_subgraphs = filter_mined_nx_subgraphs(nx_subgraphs)
print('filtered frequent subgraph number: ', len(filtered_nx_subgraphs))
if save_path != "":
write_graphs(filtered_nx_subgraphs,save_path)
# load graphs
# loaded_graphs = load_graphs('/shared/nas/data/m1/wangz3/schema_composition/Schema_Composition/gSpan_official/gSpan6/test_save.pickle')
# print(isinstance(loaded_graphs,list))
# print(len(loaded_graphs))
# print(loaded_graphs[0].nodes.data())
| MikeWangWZHL/Schema_Composition | gSpan_official/gSpan6/filter_mined_graph.py | filter_mined_graph.py | py | 4,396 | python | en | code | 1 | github-code | 36 |
2723556029 | #!/usr/bin/python3
import images
import os
import time
import random
# Replace RPG starter project with this code when new instructions are live
images.title()
time.sleep(5)
os.system("clear")
steel_count = 0
iron_count = 0
tin_count = 0
pewter_count = 0
def showInstructions():
# print a main menu and the commands
print('''
Mistborn (A Brandon Sanderson fan-fiction Text-Based RPG)
========
Commands:
go [direction]
get [item]
burn [metal that was ingested]: one word command
push [if steel burned]
pull [if iron burned]
boost [if pewter burned]: one word command
drink [from an item]
back [to go back to previous room]
''')
def backstory():
print("""For a thousand years the ash fell and no flowers bloomed. For a thousand years the Skaa slaved in misery and lived in fear. For a thousand years the Lord Ruler, the "Sliver of Infinity," reigned with absolute power and ultimate terror, divinely invincible. Then, when hope was so long lost that not even its memory remained, a terribly scarred, heart-broken half-Skaa rediscovered it in the depths of the Lord Ruler's most hellish prison. Kelsier "snapped" and found in himself the powers of a Mistborn. A brilliant thief and natural leader, he turned his talents to the ultimate caper, with the Lord Ruler himself as the mark.
Kelsier recruited the underworld's elite, the smartest and most trustworthy allomancers, each of whom shares one of his many powers, and all of whom relish a high-stakes challenge. Only then does he reveal his ultimate dream, not just the greatest heist in history, but the downfall of the divine despot.
But even with the best criminal crew ever assembled, Kel's plan looks more like the ultimate long shot, until luck brings a ragged girl named Vin into his life.
You are Vin, and find yourself locked away after a mission gone horribly wrong. You wake up in a cell.
"I need to get out."
""")
input("Press Enter to continue...")
def showStatus():
# print the player's current status
print('---------------------------')
print('You are in the ' + currentRoom)
print(rooms[currentRoom]['description'])
# print the current inventory
print('Inventory : ' + str(inventory))
# print an item if there is one
if "item" in rooms[currentRoom]:
print('You see a ' + rooms[currentRoom]['item'])
print("Type help if you need instructions again.")
print("---------------------------")
# an inventory, which is initially empty
inventory = []
metals = ['steel', 'iron', 'pewter']
# a dictionary linking a room to other rooms
# A dictionary linking a room to other rooms
rooms = {
'Cell': {
'description': 'Thick steel bars block your exit to the south.\nThrough the bars is a sleeping guard and beyond him is a staircase leading up.\nIn your cell is a bucket to do your business in, and a straw mat.\nStarving you might be on your captor\'s agenda but they did leave you a cup of water.',
'exits': {
'south': 'Guard\'s Room'},
'item': 'cup',
'locked': True,
'metal': 'bars',
'clear': True
},
'Guard\'s Room': {
'description': 'A simple desk for a guard to set his dinner, stairs leading up, and the guard occupy the room.\n A key ring hangs on the guard\'s belt.',
'exits': {
'north': 'Cell',
'up': 'Stairs'
},
'person': 'guard',
'item': 'keys',
'metal': 'keys',
'locked': False,
'clear': False
},
'Stairs': {
'description': 'A single guard carries a tray of food down the stairs, and you catch them unaware.\nA bag hangs from the belt of the guard. \nYou need to keep going up.',
'exits': {
'up': 'Hall',
'down': 'Guard\'s Room'},
'item': 'coins',
'person': 'guard',
'metal': 'coins',
'clear': False,
'locked': False,
},
'Hall': {
'description': 'An empty hallway running east to west.\n You see bright lights illuminating the end of the hall to the east.\nThe west is dimly lit by slow burning candles.\n With two guards taken care of below, you are aware it\'s not going to be easy to escape.',
'exits': {
'east': 'Grand Hall',
'west': 'Guard Tower',
'down': 'Stairs'
},
'clear': True,
'locked': False
},
'Grand Hall': {
'description': 'A gleaming crystal chandelier hangs in the middle of the Grand Hall, but that is not the first thing you notice.\n50 of the top noblemen and women of Luthadel and surrounding Scadriel cities.\nBarring the doors to the keep are four Obligators, spikes piercing their eyes, and yes, they notice you.',
'death': 'obligator',
'clear': False,
'locked': False,
},
'Guard Tower': {
'description': 'A wooden door blocks the way into the tower.\nIf I can get to a window up high, I can escape.',
'exits': {
'up': 'Landing',
'east': 'Hall'},
'clear': True,
'locked': True
},
'Landing': {
'description': 'Halfway up the tower a door opens up to a landing. \nThe room\'s furniture is arranged in an office style and at a desk is large, bald beast of a man. He looks up in shock. Clearly he was not expecting you to be out of your cell. He calls over to you as he readies himself to subdue you. "You should have stayed in the cell.\nNow, the pain you feel will leave you wishing you were dead. When I\'m through with you, the Obligators downstairs will be happy to take you to the Ministry of Steel. \nThey\'ll start with spikes driven through your wrists, and then work their way up.\n\nNow! Feel the pain, you little half-breed!"',
'exits': {
'up': 'Tower Window',
'down': 'Landing'},
'metal': 'vial',
'item': 'vial',
'person': 'Captain of the Guard',
'clear': False
},
'Tower Window': {
'description': 'The city of Luthadel sprawls outward with torches illuminating the street corners and the mists crawling to fill the night air.',
'clear': True
}
}
# start the player in the Hall
currentRoom = 'Cell'
previousRoom = ''
backstory()
showInstructions()
# loop forever
while True:
showStatus()
# get the player's next 'move'
# .split() breaks it up into an list array
# eg typing 'go east' would give the list:
# ['go','east']
move = ''
while move == '':
move = input('>')
# split allows an items to have a space on them
# get golden key is returned ["get", "golden key"]
move = move.lower().split(" ", 1)
os.system('clear')
if move[0] == "back":
# If the previous room is not empty, go back to the previous room
if previousRoom:
currentRoom, previousRoom = previousRoom, currentRoom
print(f"You go back to {currentRoom}")
# Otherwise, print an error message and stay in the current room
else:
print("You can't go back any further")
# Check if the player wants to move to a new room
if move[0] == 'go':
# check that they are allowed wherever they want to go
if 'person' in rooms[currentRoom] and not rooms[currentRoom]['clear']:
print(
f"The {rooms[currentRoom]['person']} in the {currentRoom} will not let you get by so easily.")
else:
if 'locked' in rooms[currentRoom] and rooms[currentRoom]['locked'] == True:
print(f"The door is locked. You must find another way or find a key.")
if 'keys' in inventory:
print("You use the keys to unlock the door.")
rooms[currentRoom]['locked'] = False
# not removing keys because they can be used as a weapon.
else:
print("You do not have the keys. But that won't hold you back for long. Kelsier said even the water we drink has trace metals that might be worth burning.")
elif len(move) > 1 and move[1] in rooms[currentRoom]["exits"]:
currentRoom = rooms[currentRoom]["exits"][move[1]]
else:
print("You can't go that way!")
# if they type 'get' first
if move[0] == 'get':
# if the room contains an item, and the item is the one they want to get
# otherwise, if the item isn't there to get
if rooms[currentRoom]['clear'] == False:
print(
f"The {rooms[currentRoom]['person']} in the {currentRoom} will not let you get the {rooms[currentRoom]['item']} so easily")
elif "item" in rooms[currentRoom] and move[1] in rooms[currentRoom]['item']:
# add the item to their inventory
inventory += [move[1]]
# display a helpful message
print(move[1] + ' taken!')
# delete the item from the room
del rooms[currentRoom]['item']
else:
# tell them they can't get it
print('Can\'t get ' + move[1] + '!')
# if user forgets the commands
if move[0] == 'help':
showInstructions()
if move[0] == 'drink':
if 'cup' in inventory:
inventory.remove('cup')
inventory += metals
print("You sense a pool of power within.")
elif 'vial' in inventory:
inventory.remove('vial')
inventory += metals
print("You sense a pool of power within.")
else:
print("You don\'t have anything to drink!")
if move[0] == 'burn':
metalchoice = input("Which metal will you burn?\n")
if 'steel' in inventory and metalchoice == 'steel':
print("You can feel that the metal around pulses, and blue lines shoot out from you to each metal object in the room. You can now push metal away from you.")
steel_count = 3
print(
f"Blue lines extend to objects in the room. You can push the {rooms[currentRoom]['metal']}.")
elif 'iron' in inventory and metalchoice == 'iron':
print("You can feel that the metal around pulses, and blue lines shoot out from you to each metal object in the room. You can now pull metal to you.")
iron_count = 3
print(
f"Blue lines extend to objects in the room. You can pull the {rooms[currentRoom]['metal']}.")
elif 'pewter' in inventory and metalchoice == 'pewter':
print("You feel your body tighten with strength. You feel like you can take a hit from a hammer, break bones with your bare hands, or balance atop a lightpost. You can now boost your physical prowess.")
pewter_count = 3
# Tin is not useful in my scenario so I'm removing it. Could be used in future iterations if more stealth is desired.
# elif metalchoice == 'tin':
# print(
# "Your senses heighten. You can hear, see, smell, taste, and feel beyond even the most adept.")
# tin_count = 3
else:
print("You have not ingested any metals. Find some. Your fate depends on it!")
# Allomancy actions
if move[0] == 'push':
steel_count - 1
if 'coins' in inventory and move[1] and steel_count > 0:
print(
f"A nifty little Kelsier taught you. You push the {move[1]} with ferocious velocity. The {rooms[currentRoom]['person']} is torn to shreds.")
rooms[currentRoom]['clear'] = True
rooms[currentRoom]['item'] = move[1]
rooms[currentRoom]['description'] = 'Any threats have been neutralized. You need to move before anyone checks this room.'
inventory.remove('coins')
elif 'keys' in inventory and move[1] and steel_count > 0:
bodypart = ['head', 'chest', 'leg', 'throat']
randpart = random.choice(bodypart)
print(
f" The keys blur with such speed they cut through any living thing. The {rooms[currentRoom]['person']} has key-sized hole through their {randpart}.")
rooms[currentRoom]['clear'] = True
inventory.remove('keys')
rooms[currentRoom]['item'] = move[1]
rooms[currentRoom]['description'] = 'Any threats have been neutralized. You need to move before anyone checks this room.'
elif 'metal' in rooms[currentRoom] and move[0] == 'push' and steel_count > 0:
print(
f"Blue lines illuminate {rooms[currentRoom]['metal']}. You push! The {rooms[currentRoom]['metal']} bursts forward and sends you back into the wall behind you.")
if rooms[currentRoom]['locked'] == True and rooms[currentRoom]['metal'] == 'bars':
rooms[currentRoom]['locked'] = False
print(
"You have created your own key. The guard wakes with a start and lunges toward you.")
elif 'exits' in rooms[currentRoom] and move[1] in rooms[currentRoom]['exits']:
previousRoom = currentRoom
currentRoom = rooms[currentRoom]['exits'][move[1]]
print(f"You enter the {currentRoom}.")
else:
print("You can't go that way!")
else:
print("You need more steel. Find some!\n(You might try to burn the metal if you have it in your inventory.)")
if move[0] == 'pull':
iron_count - 1
if 'coins' in rooms[currentRoom] and move[1] and iron_count > 0:
print(
f"You pull the bag of coins to your hand. These might be useful.")
inventory += ['item']['coins']
del rooms[currentRoom]['item']
elif 'metal' in rooms[currentRoom] and move[0] == 'pull' and iron_count > 0:
if rooms[currentRoom]['metal'] == rooms[currentRoom]['item']:
print(
f"You pocket the {rooms[currentRoom]['item']}, and think of devious ways to use it.")
inventory.append(rooms[currentRoom]['item'])
del rooms[currentRoom]['item']
elif rooms[currentRoom]['locked'] == True and rooms[currentRoom]['metal'] == 'bars':
rooms[currentRoom]['locked'] = False
print(
f"Blue lines illuminate {rooms[currentRoom]['metal']}. You pull! The {rooms[currentRoom]['metal']} breaks free of the hinges and comes straight toward you You dodge just in time, and raise a clatter.")
print(
"You have created your own key. The guard wakes with a start and lunges toward you.")
elif 'exits' in rooms[currentRoom] and move[1] in rooms[currentRoom]['exits']:
previousRoom = currentRoom
currentRoom = rooms[currentRoom]['exits'][move[1]]
print(f"You enter the {currentRoom}.")
else:
print("You can't go that way!")
# Trying to pull multiple metals to you.
# if rooms[currentRoom]['metal'] == move[1]:
# print(f"You pull {move[1]} to you.")
# if rooms[currentRoom]['metal'] == 'belt buckle':
# print(
# f"The rooms {[currentRoom]['person']}'s pants drop to their ankles, giving you a laugh.")
# elif rooms[currentRoom]['metal'] == 'dagger' or 'spear':
# print(
# f"You take the rooms{[currentRoom]['person']}'s crude weapon. You toss it aside and laugh at their look of utter defeat.")
else:
print("You need more iron. Find some!\n(You might try to burn the metal if you have it in your inventory.)")
if move[0] == 'boost':
pewter_count - 1
if 'person' in rooms[currentRoom] and pewter_count > 0:
get_wrecked = ['jugular', 'skull', 'limbs']
pewterdeath = random.choice(get_wrecked)
rooms[currentRoom]['clear'] = True
rooms[currentRoom]['description'] = 'Any threats have been neutralized. You need to move before anyone checks this room.'
if pewterdeath == 'jugular':
print(
f" The {rooms[currentRoom]['person']} gasps when they see the 'little girl' in front of them shirk off any blows that would have maimed any average person.\n You laugh under your breath and send a quick jab to their throat, causing them to splutter, gasping for their last breaths.")
elif pewterdeath == 'skull':
print(
f" You flip over the {rooms[currentRoom]['person']} and smashing their skull with a pewter enhanced fist.\n Good night, tough guy.")
else:
print(
f"A series of blows from kicks and jabs to the {rooms[currentRoom]['person']} limbs leave them broken but still breathing.\n You slap the {rooms[currentRoom]['person']} which put them to sleep. Life is more than they deserve.")
else:
print("You need more pewter. Find some!\n(You might try to burn the metal if you have it in your inventory.)")
# Define how a player can win
if currentRoom == 'Tower Window' and 'steel' in inventory and 'coins' in inventory:
print('You leap out of the window, like Kelsier taught you, tossing a coin down below. You push off the coin, sending you skyward, and soar out into the mists... YOU WIN!')
images.ending()
break
elif currentRoom == 'Tower Window' and 'steel' in inventory and 'keys' in inventory:
print('You leap out of the window, like Kelsier taught you, tossing the cell\'s keys towards the cobblestone streets below. Keys were indeed a means to escape. You trace the blue lines in your sight and steel push out into the mists... YOU WIN!')
images.ending()
break
# If a player enters a room with a obligators
elif rooms.get(currentRoom, {}).get('death') == 'obligator':
print('An Obligator found you and easily detains you. You are but a flea compared to the Lord Ruler and his Obligators... GAME OVER!')
images.obligator()
break
print("If you enjoyed the story so far, check out the series Mistborn by Brandon Sanderson.")
| chadkellum/mycode | project2rpg.py | project2rpg.py | py | 18,358 | python | en | code | 0 | github-code | 36 |
952551392 | pkgname = "libice"
pkgver = "1.1.1"
pkgrel = 0
build_style = "gnu_configure"
hostmakedepends = [
"pkgconf",
"automake",
"libtool",
"xorg-util-macros",
"xtrans",
]
makedepends = ["xorgproto", "xtrans"]
pkgdesc = "Inter Client Exchange (ICE) library for X"
maintainer = "q66 <q66@chimera-linux.org>"
license = "MIT"
url = "https://xorg.freedesktop.org"
source = f"$(XORG_SITE)/lib/libICE-{pkgver}.tar.gz"
sha256 = "04fbd34a11ba08b9df2e3cdb2055c2e3c1c51b3257f683d7fcf42dabcf8e1210"
def post_install(self):
self.install_license("COPYING")
@subpackage("libice-devel")
def _devel(self):
return self.default_devel()
| chimera-linux/cports | main/libice/template.py | template.py | py | 641 | python | en | code | 119 | github-code | 36 |
15452563301 | from base import VisBase
from helper import get_heat_map
import os
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import torch
import torch.nn.functional as F
import math
import numpy as np
import matplotlib as mpl
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.axes_grid1 import ImageGrid
VIS_ROOT = os.path.dirname(os.path.realpath(__file__))
class ProjVis(VisBase):
def __init__(self,
exp,
**kwargs):
super(ProjVis, self).__init__(exp, **kwargs)
self.save_each = False
self.show = True
self.batch_id = 0
self.target_id = 0
self.center_tf = (23, 23)
self.rect_color = 'yellow'
def center_scan(self):
half_width = 21
half_height = 21
max_t = 250
max_f = 128
t_grid = np.linspace(half_width + 2, max_t - half_width - 2, num=6)
f_grid = np.linspace(half_height + 2, max_f - half_height - 2, num=5)
center_list = []
for t in t_grid:
for f in f_grid:
center_tf = (int(t), int(f))
center_list.append(center_tf)
return center_list
def fig_structure_grid(self):
fig = plt.figure(figsize=(7, 1.5))
grid = ImageGrid(fig, 111,
nrows_ncols=(1, 5),
axes_pad=0.07,
share_all=True,
cbar_mode='single',
label_mode='L')
im1 = self.fig_spec(ax=grid[0])
im2 = self.fig_entropy_softmax(ax=grid[1])
im3 = self.fig_pos_entropy_softmax(ax=grid[2])
im4 = self.fig_entropy_sparsemax(ax=grid[3])
im5 = self.fig_pos_entropy_sparsemax(ax=grid[4])
max_val = im4.get_array().max()
min_val = im4.get_array().min()
if max_val > 0.2 and max_val - min_val > 0.1:
max_val = round(max_val - 0.1, 1)
min_val = round(min_val, 1)
plt.colorbar(im4, cax=grid.cbar_axes[0], ticks=[min_val, max_val])
grid.cbar_axes[0].set_yticklabels([min_val, str(max_val)])
else:
plt.colorbar(im3, cax=grid.cbar_axes[0])
# plt.colorbar(im3, cax=grid.cbar_axes[0])
fontsz = 12
grid[0].set_xlabel('(a) spectrogram', fontsize=fontsz, labelpad=6.2)
grid[1].set_xlabel(r'(b) $\tilde{\mathbf{h}}$', fontsize=fontsz)
grid[2].set_xlabel(r'(c) $\tilde{\mathbf{h}}^\dag$', fontsize=fontsz)
grid[3].set_xlabel(r'(d) $\bar{\mathbf{h}}$', fontsize=fontsz)
grid[4].set_xlabel(r'(e) $\bar{\mathbf{h}}^\dag$', fontsize=fontsz)
grid[0].get_xaxis().set_ticks([])
if self.show:
# fig.suptitle('{}_structure_grid_b{}.png'.format(self.label, str(self.batch_id)))
plt.show()
else:
fig.savefig('{}/{}/{}_structure.png'.format(VIS_ROOT, self.label, self.label))
def fig_relation_grid(self, suffix=None):
fig = plt.figure(figsize=(4.8, 1.8))
grid = ImageGrid(fig, 111,
nrows_ncols=(1, 3),
axes_pad=0.07,
share_all=True,
cbar_mode='single',
label_mode='L'
)
self.fig_spec_rect(ax=grid[0])
self.fig_spec_rect(ax=grid[0])
im1 = self.fig_relation(ax=grid[1])
im2 = self.fig_pos_relation(ax=grid[2])
fontsz = 12
grid[0].set_xlabel('(a) spectrogram', fontsize=fontsz)
grid[1].set_xlabel(r'(b) $\mathbf{E}_i$', fontsize=fontsz)
grid[2].set_xlabel(r'(c) $\mathbf{E}_i^{\dag}$', fontsize=fontsz)
grid[0].get_xaxis().set_ticks([])
if im1.get_array().max() == 0. and im2.get_array().max() == 0.:
import matplotlib.colors
norm = matplotlib.colors.Normalize(vmax=1., vmin=0.)
plt.colorbar(matplotlib.cm.ScalarMappable(norm=norm, cmap='jet'), cax=grid.cbar_axes[0])
elif im1.get_array().max() == 0.:
plt.colorbar(im2, cax=grid.cbar_axes[0])
elif im2.get_array().max() == 0.:
plt.colorbar(im1, cax=grid.cbar_axes[0])
else:
plt.colorbar(im1, cax=grid.cbar_axes[0])
if self.show:
# fig.suptitle('{}_relation_grid_b{}_{}.png'.format(self.label, self.batch_id, str(self.center_tf)))
plt.show()
else:
if suffix is not None:
fig.savefig('{}/{}/{}_relation_grid_{}.png'.format(VIS_ROOT,
self.label,
self.label,
str(suffix)))
else:
fig.savefig('{}/{}/{}_relation_grid.png'.format(VIS_ROOT,
self.label,
self.label))
def fig_spec(self, ax=None):
if not ax:
fig, ax = plt.subplots()
self.feed(batch_id=self.batch_id, data_id=0, target_id=self.target_id)
folder = '{}/{}'.format(VIS_ROOT, self.label)
if not os.path.exists(folder):
os.makedirs(folder)
self.plot_spec(ax=ax)
if self.save_each:
fig.savefig('{}/{}/{}_spec.png'.format(VIS_ROOT, self.label, self.label))
else:
pass
# plt.show()
def fig_spec_rect(self, ax=None):
if not ax:
fig, ax = plt.subplots()
self.feed(batch_id=self.batch_id, data_id=0, target_id=self.target_id)
folder = '{}/{}'.format(VIS_ROOT, self.label)
if not os.path.exists(folder):
os.makedirs(folder)
self.plot_spec_rect(ax)
if self.save_each:
fig.savefig('{}/{}/{}_spec_rect.png'.format(VIS_ROOT, self.label, self.label))
else:
pass
# plt.show()
def fig_relation(self, ax=None):
self.reload(exp="esc-folds-rblock",
r_structure_type="zero", softmax_type="softmax")
self.feed(batch_id=self.batch_id, data_id=0, target_id=self.target_id)
folder = '{}/{}'.format(VIS_ROOT, self.label)
if not os.path.exists(folder):
os.makedirs(folder)
if not ax:
fig, ax = plt.subplots()
im = self.plot_relation_heatmap(ax=ax)
if self.save_each:
fig.savefig('{}/{}/{}_relation.png'.format(VIS_ROOT, self.label, self.label))
else:
pass
# plt.show()
return im
def fig_pos_relation(self, ax=None):
self.reload(exp="esc-folds-rblock-pe",
r_structure_type="zero", softmax_type="softmax")
self.feed(batch_id=self.batch_id, data_id=0, target_id=self.target_id)
folder = '{}/{}'.format(VIS_ROOT, self.label)
if not os.path.exists(folder):
os.makedirs(folder)
if not ax:
fig, ax = plt.subplots()
im = self.plot_relation_heatmap(ax=ax)
if self.save_each:
fig.savefig('{}/{}/{}_pos_relation.png'.format(VIS_ROOT, self.label, self.label))
else:
pass
# plt.show()
return im
def fig_entropy_softmax(self, ax=None):
self.reload(exp="esc-folds-rblock",
r_structure_type="minus_entropy", softmax_type="softmax")
self.feed(batch_id=self.batch_id, data_id=0, target_id=self.target_id)
folder = '{}/{}'.format(VIS_ROOT, self.label)
if not os.path.exists(folder):
os.makedirs(folder)
if not ax:
fig, ax = plt.subplots()
im = self.plot_structure_feat(ax)
if self.save_each:
fig.savefig('{}/{}/{}_entropy_softmax.png'.format(VIS_ROOT, self.label, self.label))
else:
pass
# plt.show()
return im
def fig_entropy_sparsemax(self, ax=None):
self.reload(exp="esc-folds-rblock",
r_structure_type="minus_entropy", softmax_type="sparsemax")
self.feed(batch_id=self.batch_id, data_id=0, target_id=self.target_id)
folder = '{}/{}'.format(VIS_ROOT, self.label)
if not os.path.exists(folder):
os.makedirs(folder)
if not ax:
fig, ax = plt.subplots()
im = self.plot_structure_feat(ax)
if self.save_each:
fig.savefig('{}/{}/{}_entropy_sparsemax.png'.format(VIS_ROOT, self.label, self.label))
else:
pass
# plt.show()
return im
def fig_pos_entropy_softmax(self, ax=None):
self.reload(exp="esc-folds-rblock-pe",
r_structure_type="minus_entropy", softmax_type="softmax")
self.feed(batch_id=self.batch_id, data_id=0, target_id=self.target_id)
folder = '{}/{}'.format(VIS_ROOT, self.label)
if not os.path.exists(folder):
os.makedirs(folder)
if not ax:
fig, ax = plt.subplots()
im = self.plot_structure_feat(ax)
if self.save_each:
fig.savefig('{}/{}/{}_pos_entropy_softmax.png'.format(VIS_ROOT, self.label, self.label))
else:
pass
# plt.show()
return im
def fig_pos_entropy_sparsemax(self, ax=None):
self.reload(exp="esc-folds-rblock-pe",
r_structure_type="minus_entropy", softmax_type="sparsemax")
self.feed(batch_id=self.batch_id, data_id=0, target_id=self.target_id)
folder = '{}/{}'.format(VIS_ROOT, self.label)
if not os.path.exists(folder):
os.makedirs(folder)
if not ax:
fig, ax = plt.subplots()
im = self.plot_structure_feat(ax)
if self.save_each:
fig.savefig('{}/{}/{}_pos_entropy_sparsemax.png'.format(VIS_ROOT, self.label, self.label))
else:
pass
# plt.show()
return im
def plot_spec(self, ax):
ax.imshow(self.spec, cmap='magma', origin='lower')
def plot_spec_rect(self, ax):
ax.imshow(self.spec, cmap='magma', origin='lower')
self.plot_rect(ax)
def plot_rect(self, ax, text=None):
width = 43
height = 43
lower_left = (self.center_tf[0] - math.floor(width / 2), self.center_tf[1] - math.floor(height / 2))
rect = Rectangle(xy=lower_left, width=width, height=height, linewidth=1,
edgecolor=self.rect_color, facecolor='none')
ax.add_patch(rect)
# ax.scatter(self.center_tf[0], self.center_tf[1], s=10, marker='x', c=self.rect_color)
if text == 'p':
ax.text(self.center_tf[0] - 10, self.center_tf[1] - 8, r'$p$', fontsize=10, color=self.rect_color)
elif text == 'q':
ax.text(self.center_tf[0] - 10, self.center_tf[1] - 8, r'$q$', fontsize=10, color=self.rect_color)
def plot_relation_heatmap(self, ax, fig=None, alpha=1.):
fsz, tsz = self.spec.shape
heat_map = get_heat_map(self.spec, nl_map=self.nl_map, center_tf=self.center_tf)
# (F, T)
heat_map = F.interpolate(torch.from_numpy(heat_map),
size=(fsz, tsz),
mode='bicubic').squeeze()
heat_map = heat_map.clamp_(min=0.).numpy()
# alpha, multiply heat_map by alpha
im = ax.imshow(heat_map, cmap='jet', alpha=alpha, origin='lower')
self.plot_rect(ax)
return im
def plot_structure_feat(self, ax, fig=None, alpha=1.):
fsz, tsz = self.spec.shape
structure_feat = F.interpolate(torch.from_numpy(self.relation_feat),
size=(fsz, tsz),
mode='bicubic').squeeze()
structure_feat.clamp_(min=0., max=1.)
structure_feat = structure_feat.numpy()
# alpha, multiply heat_map by alpha
im = ax.imshow(structure_feat, cmap='bwr', origin='lower', alpha=alpha)
return im
def add_colorbar(self, ax):
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(cax=cax)
def plot_relation(vis):
vis.batch_id = 0
for l in range(50):
vis.target_id = l
for i, center_tf in enumerate(vis.center_scan()):
vis.center_tf = center_tf
vis.fig_relation_grid(suffix=i)
audio_path = "{}/{}/{}.wav".format(VIS_ROOT, vis.label, vis.label)
vis.save_audio(wav_path=audio_path)
def plot_structure(vis):
vis.batch_id = 0
# vis.target_id = 47
# vis.fig_structure_grid()
for l in range(50):
vis.target_id = l
vis.fig_structure_grid()
if __name__ == '__main__':
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
vis = ProjVis(exp="esc-folds-rblock",
ckpt_prefix="Run029")
plt.rcParams['figure.dpi'] = 300
plt.rcParams['text.usetex'] = True
plt.rc('font', family='Times Roman')
vis.show = False
plot_relation(vis)
plot_structure(vis)
# vis.target_id = 23
# for i, center_tf in enumerate(vis.center_scan()):
# # if i != 6:
# # continue
# vis.center_tf = center_tf
# vis.fig_relation_grid(suffix=i)
# # break
| hackerekcah/ESRelation | vis_proj/vis_proj.py | vis_proj.py | py | 13,438 | python | en | code | 0 | github-code | 36 |
11045304899 | from datetime import datetime, timedelta
from functools import reduce
from django import db
from django.conf import settings
from django.db import models
from django.db.models import Sum
from django.contrib.auth import get_user_model
from jalali_date import date2jalali
from dmo.models import DmoDay, Dmo
User = get_user_model()
class TodoListManager(models.Manager):
def get_todo_list(self, user, date):
todo_list, _ = self.get_or_create(user=user, date=date)
self._attach_dmo_items(todo_list)
return todo_list
def _attach_dmo_items(self, todo_list):
date = date2jalali(todo_list.date)
user_dmos = Dmo.objects.filter(user=todo_list.user, year=date.year, month=date.month)
for dmo in user_dmos:
if todo_list.items.filter(title=dmo.goal).exists():
continue
todo_list.items.create(title=dmo.goal)
def move_lists_to_today(self):
today = datetime.now()
self.update(date=today)
def move_lists_to_date(self, date):
self.update(date=date)
class TodoListItemManager(models.Manager):
def move_tasks_to_today_list(self):
users = self.values('todo_list__user')
if len(users) > 1:
raise Exception('Multiple users found.')
user = users[0]['todo_list__user']
today_list = TodoList.objects.get_today(user)
self.update(todo_list=today_list)
def add_item(self, title, desc, user, date=None, stauts=None):
if not date:
date = datetime.now()
if not status:
status = TodoList.Statuses.PENDING
todo_list = TodoList.objects.get_todo_list(user, date)
self.create(todo_list=todo_list, title=title, desceription=desc, status=status)
class TodoList(models.Model):
date = models.DateField()
user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='کاربر',
related_name='todo_lists')
updated_at = models.DateTimeField(auto_now=True, verbose_name='آخرین ویرایش')
created_at = models.DateTimeField(auto_now_add=True, verbose_name='ایجاد شده در')
objects = TodoListManager()
class Meta:
verbose_name = 'Todo لیست'
verbose_name_plural = 'Todo لیست'
unique_together = ('date', 'user', )
def __str__(self):
return f'{self.user} - {self.date}'
def move_list_to_date(self, to_date, commit=True):
self.date = to_date
if commit:
self.save()
class TodoListItem(models.Model):
class Statuses(models.IntegerChoices):
PENDING = 0, 'در انتظار انجام'
DONE = 100, 'انجام شد'
NOT_DONE = 200, 'انجام نشد'
todo_list = models.ForeignKey(TodoList, verbose_name='Todo', related_name='items',
on_delete=models.CASCADE)
title = models.CharField(max_length=255, verbose_name='عنوان')
desc = models.TextField(verbose_name='توضیحات', blank=True)
status = models.IntegerField(verbose_name='وضعیت', choices=Statuses.choices,
default=Statuses.PENDING)
dmo_day = models.OneToOneField(DmoDay, on_delete=models.CASCADE, verbose_name='دمو',
related_name='todo_list_item', null=True, blank=True)
updated_at = models.DateTimeField(auto_now=True, verbose_name='آخرین ویرایش')
created_at = models.DateTimeField(auto_now_add=True, verbose_name='ایجاد شده در')
objects = TodoListItemManager()
class Meta:
verbose_name = 'آیتم Todo لیست'
verbose_name_plural = 'آیتم Todo لیست'
def __str__(self):
return self.title
def change_status(self, status: Statuses, commit=True):
self.status = status
if commit:
self.save()
def done_task(self, commit=True):
self.status = self.Statuses.DONE
jalali_date = date2jalali(self.todo_list.date)
dmo = Dmo.objects.filter(user=self.todo_list.user, goal=self.title,
year=jalali_date.year, month=jalali_date.month
).first()
if dmo:
dmo.complete(jalali_date.day, done=True)
if commit:
self.save()
def undone_task(self, commit=True):
self.end_datetime = datetime.now()
self.status = self.Statuses.NOT_DONE
jalali_date = date2jalali(self.todo_list.date)
dmo = Dmo.objects.filter(user=self.todo_list.user, goal=self.title,
year=jalali_date.year, month=jalali_date.month
).first()
if dmo:
dmo.complete(jalali_date.day, done=False)
if commit:
self.save()
def start_task(self):
if self.time_tracks.filter(end_datetime__isnull=True).exists():
raise Exception('Task is already started.')
TodoListItemTimeTrack.objects.create(
item=self,
start_datetime=datetime.now(),
end_datetime=None
)
def finish_task(self):
now = datetime.now()
self.time_tracks.filter(end_datetime=None).update(end_datetime=now)
def toggle_start_stop(self):
started_tracks = self.time_tracks.filter(end_datetime__isnull=True)
if started_tracks.exists():
started_tracks.update(end_datetime=datetime.now())
return
TodoListItemTimeTrack.objects.create(
item=self,
start_datetime=datetime.now(),
end_datetime=None
)
def get_total_time_seconds(self):
# db aggrigation doesn't work for some databases, so it's safer to use python
time_tracks = self.time_tracks.filter(end_datetime__isnull=False).values('start_datetime', 'end_datetime')
durations = [time['end_datetime'] - time['start_datetime'] for time in time_tracks]
return reduce(lambda a, b: a+b, durations, timedelta(seconds=0)).seconds
def get_last_ongoing_time_track(self):
return self.time_tracks.filter(end_datetime__isnull=True).last()
class TodoListItemTimeTrack(models.Model):
item = models.ForeignKey(TodoListItem, on_delete=models.CASCADE, verbose_name='آیتم',
related_name='time_tracks')
start_datetime = models.DateTimeField(verbose_name='شروع', null=True, blank=True)
end_datetime = models.DateTimeField(verbose_name='پایان', null=True, blank=True)
updated_at = models.DateTimeField(auto_now=True, verbose_name='آخرین ویرایش')
created_at = models.DateTimeField(auto_now_add=True, verbose_name='ایجاد شده در')
class Meta:
verbose_name = 'Todo لیست زمان'
verbose_name_plural = 'Todo لیست زمان'
def __str__(self):
return f'{self.item}'
| mohsen-hassani-org/teamche | todo_list/models.py | models.py | py | 6,959 | python | en | code | 0 | github-code | 36 |
35048984657 | import os
# Directory containing the captured video files
video_directory = "captured_video"
# Function to rename files with sequential names
def rename_files(directory):
if not os.path.exists(directory):
print(f"Directory '{directory}' does not exist.")
return
file_list = os.listdir(directory)
file_list.sort() # Sort the files alphabetically
count = 0
for filename in file_list:
if filename.endswith(".png"): # Make sure to specify the correct file extension
new_filename = f"{count:06d}.png" # Format the new name with leading zeros
old_path = os.path.join(directory, filename)
new_path = os.path.join(directory, new_filename)
try:
os.rename(old_path, new_path)
print(f"Renamed '{filename}' to '{new_filename}'")
count += 1
except Exception as e:
print(f"Error renaming '{filename}': {e}")
# Call the function to rename files in the specified directory
rename_files(video_directory)
| LandonDoyle7599/CS5510-Assignment2 | renameFiles.py | renameFiles.py | py | 1,064 | python | en | code | 0 | github-code | 36 |
37738454658 | __docformat__ = 'restructuredtext en'
from collections import OrderedDict
import six
from six import string_types
from geoid.util import isimplify
from geoid.civick import GVid
from geoid import parse_to_gvid
from dateutil import parser
from sqlalchemy import event
from sqlalchemy import Column as SAColumn, Integer, UniqueConstraint
from sqlalchemy import String, ForeignKey
from sqlalchemy.orm import relationship, object_session, backref
from ambry.identity import ObjectNumber, PartialPartitionName, PartitionIdentity
from ambry.orm.columnstat import ColumnStat
from ambry.orm.dataset import Dataset
from ambry.util import Constant
import logging
from ambry.util import get_logger
logger = get_logger(__name__)
# logger.setLevel(logging.DEBUG)
from . import Base, MutationDict, MutationList, JSONEncodedObj, BigIntegerType
class PartitionDisplay(object):
"""Helper object to select what to display for titles and descriptions"""
def __init__(self, p):
self._p = p
desc_used = False
self.title = self._p.title
self.description = ''
if not self.title:
self.title = self._p.table.description
desc_used = True
if not self.title:
self.title = self._p.vname
if not desc_used:
self.description = self._p.description.strip('.') + '.' if self._p.description else ''
self.notes = self._p.notes
@property
def geo_description(self):
"""Return a description of the geographic extents, using the largest scale
space and grain coverages"""
sc = self._p.space_coverage
gc = self._p.grain_coverage
if sc and gc:
if parse_to_gvid(gc[0]).level == 'state' and parse_to_gvid(sc[0]).level == 'state':
return parse_to_gvid(sc[0]).geo_name
else:
return ("{} in {}".format(
parse_to_gvid(gc[0]).level_plural.title(),
parse_to_gvid(sc[0]).geo_name))
elif sc:
return parse_to_gvid(sc[0]).geo_name.title()
elif sc:
return parse_to_gvid(gc[0]).level_plural.title()
else:
return ''
@property
def time_description(self):
"""String description of the year or year range"""
tc = [t for t in self._p.time_coverage if t]
if not tc:
return ''
mn = min(tc)
mx = max(tc)
if not mn and not mx:
return ''
elif mn == mx:
return mn
else:
return "{} to {}".format(mn, mx)
@property
def sub_description(self):
"""Time and space dscription"""
gd = self.geo_description
td = self.time_description
if gd and td:
return '{}, {}. {} Rows.'.format(gd, td, self._p.count)
elif gd:
return '{}. {} Rows.'.format(gd, self._p.count)
elif td:
return '{}. {} Rows.'.format(td, self._p.count)
else:
return '{} Rows.'.format(self._p.count)
class Partition(Base):
__tablename__ = 'partitions'
STATES = Constant()
STATES.SYNCED = 'synced'
STATES.CLEANING = 'cleaning'
STATES.CLEANED = 'cleaned'
STATES.PREPARING = 'preparing'
STATES.PREPARED = 'prepared'
STATES.BUILDING = 'building'
STATES.BUILT = 'built'
STATES.COALESCING = 'coalescing'
STATES.COALESCED = 'coalesced'
STATES.ERROR = 'error'
STATES.FINALIZING = 'finalizing'
STATES.FINALIZED = 'finalized'
STATES.INSTALLING = 'installing'
STATES.INSTALLED = 'installed'
TYPE = Constant
TYPE.SEGMENT = 's'
TYPE.UNION = 'u'
sequence_id = SAColumn('p_sequence_id', Integer)
vid = SAColumn('p_vid', String(16), primary_key=True, nullable=False)
id = SAColumn('p_id', String(13), nullable=False)
d_vid = SAColumn('p_d_vid', String(13), ForeignKey('datasets.d_vid'), nullable=False, index=True)
t_vid = SAColumn('p_t_vid', String(15), ForeignKey('tables.t_vid'), nullable=False, index=True)
name = SAColumn('p_name', String(200), nullable=False, index=True)
vname = SAColumn('p_vname', String(200), unique=True, nullable=False, index=True)
fqname = SAColumn('p_fqname', String(200), unique=True, nullable=False, index=True)
title = SAColumn('p_title', String())
description = SAColumn('p_description', String())
notes = SAColumn('p_notes', String())
cache_key = SAColumn('p_cache_key', String(200), unique=True, nullable=False, index=True)
parent_vid = SAColumn('p_p_vid', String(16), ForeignKey('partitions.p_vid'), nullable=True, index=True)
ref = SAColumn('p_ref', String(16), index=True,
doc='VID reference to an eariler version to use instead of this one.')
type = SAColumn('p_type', String(20), default=TYPE.UNION,
doc='u - normal partition, s - segment')
table_name = SAColumn('p_table_name', String(50))
time = SAColumn('p_time', String(20)) # FIXME: add helptext
space = SAColumn('p_space', String(50))
grain = SAColumn('p_grain', String(50))
variant = SAColumn('p_variant', String(50))
format = SAColumn('p_format', String(50))
segment = SAColumn('p_segment', Integer,
doc='Part of a larger partition. segment_id is usually also a source ds_id')
epsg = SAColumn('p_epsg', Integer, doc='EPSG SRID for the reference system of a geographic dataset. ')
# The partition could hold data that is considered a dimension -- if multiple datasets
# were joined, that dimension would be a dimension column, but it only has a single
# value in each partition.
# That could be part of the name, or it could be declared in a table, with a single value for all of the
# rows in a partition.
min_id = SAColumn('p_min_id', BigIntegerType)
max_id = SAColumn('p_max_id', BigIntegerType)
count = SAColumn('p_count', Integer)
state = SAColumn('p_state', String(50))
data = SAColumn('p_data', MutationDict.as_mutable(JSONEncodedObj))
space_coverage = SAColumn('p_scov', MutationList.as_mutable(JSONEncodedObj))
time_coverage = SAColumn('p_tcov', MutationList.as_mutable(JSONEncodedObj))
grain_coverage = SAColumn('p_gcov', MutationList.as_mutable(JSONEncodedObj))
installed = SAColumn('p_installed', String(100))
_location = SAColumn('p_location', String(100)) # Location of the data file
__table_args__ = (
# ForeignKeyConstraint( [d_vid, d_location], ['datasets.d_vid','datasets.d_location']),
UniqueConstraint('p_sequence_id', 'p_d_vid', name='_uc_partitions_1'),
)
# For the primary table for the partition. There is one per partition, but a table
# can be primary in multiple partitions.
table = relationship('Table', backref='partitions', foreign_keys='Partition.t_vid')
stats = relationship(ColumnStat, backref='partition', cascade='all, delete, delete-orphan')
children = relationship('Partition', backref=backref('parent', remote_side=[vid]), cascade='all')
_bundle = None # Set when returned from a bundle.
_datafile = None # TODO: Unused variable.
_datafile_writer = None # TODO: Unused variable.
_stats_dict = None
@property
def identity(self):
"""Return this partition information as a PartitionId."""
if self.dataset is None:
# The relationship will be null until the object is committed
s = object_session(self)
ds = s.query(Dataset).filter(Dataset.id_ == self.d_id).one()
else:
ds = self.dataset
d = {
'id': self.id,
'vid': self.vid,
'name': self.name,
'vname': self.vname,
'ref': self.ref,
'space': self.space,
'time': self.time,
'table': self.table_name,
'grain': self.grain,
'variant': self.variant,
'segment': self.segment,
'format': self.format if self.format else 'db'
}
return PartitionIdentity.from_dict(dict(list(ds.dict.items()) + list(d.items())))
@property
def display(self):
"""Return an acessor object to get display titles and descriptions"""
return PartitionDisplay(self)
@property
def bundle(self):
return self._bundle # Set externally, such as Bundle.wrap_partition
@property
def is_segment(self):
return self.type == self.TYPE.SEGMENT
@property
def headers(self):
return [c.name for c in self.table.columns]
def __repr__(self):
return '<partition: {} {}>'.format(self.vid, self.vname)
def set_stats(self, stats):
self.stats[:] = [] # Delete existing stats
for c in self.table.columns:
if c.name not in stats:
continue
d = stats[c.name].dict
del d['name']
del d['flags']
cs = ColumnStat(p_vid=self.vid, d_vid=self.d_vid, c_vid=c.vid, **d)
self.stats.append(cs)
def parse_gvid_or_place(self, gvid_or_place):
try:
return parse_to_gvid(gvid_or_place)
except KeyError:
places = list(self._bundle._library.search.search_identifiers(gvid_or_place))
if not places:
err_msg = "Failed to find space identifier '{}' in full " \
"text identifier search for partition '{}'" \
.format(gvid_or_place, str(self.identity))
self._bundle.error(err_msg)
return None
return parse_to_gvid(places[0].vid)
def set_coverage(self, stats):
""""Extract time space and grain coverage from the stats and store them in the partition"""
from ambry.util.datestimes import expand_to_years
scov = set()
tcov = set()
grains = set()
def summarize_maybe(gvid):
try:
return parse_to_gvid(gvid).summarize()
except:
return None
def simplifiy_maybe(values, column):
parsed = []
for gvid in values:
# The gvid should not be a st
if gvid is None or gvid == 'None':
continue
try:
parsed.append(parse_to_gvid(gvid))
except ValueError as e:
if self._bundle:
self._bundle.warn("While analyzing geo coverage in final partition stage, " +
"Failed to parse gvid '{}' in {}.{}: {}"
.format(str(gvid), column.table.name, column.name, e))
try:
return isimplify(parsed)
except:
return None
def int_maybe(year):
try:
return int(year)
except:
return None
for c in self.table.columns:
if c.name not in stats:
continue
try:
if stats[c.name].is_gvid or stats[c.name].is_geoid:
scov |= set(x for x in simplifiy_maybe(stats[c.name].uniques, c))
grains |= set(summarize_maybe(gvid) for gvid in stats[c.name].uniques)
elif stats[c.name].is_year:
tcov |= set(int_maybe(x) for x in stats[c.name].uniques)
elif stats[c.name].is_date:
# The fuzzy=True argument allows ignoring the '-' char in dates produced by .isoformat()
try:
tcov |= set(parser.parse(x, fuzzy=True).year if isinstance(x, string_types) else x.year for x in
stats[c.name].uniques)
except ValueError:
pass
except Exception as e:
self._bundle.error("Failed to set coverage for column '{}', partition '{}': {}"
.format(c.name, self.identity.vname, e))
raise
# Space Coverage
if 'source_data' in self.data:
for source_name, source in list(self.data['source_data'].items()):
scov.add(self.parse_gvid_or_place(source['space']))
if self.identity.space: # And from the partition name
try:
scov.add(self.parse_gvid_or_place(self.identity.space))
except ValueError:
# Couldn't parse the space as a GVid
pass
# For geo_coverage, only includes the higher level summary levels, counties, states,
# places and urban areas.
self.space_coverage = sorted([str(x) for x in scov if bool(x) and x.sl
in (10, 40, 50, 60, 160, 400)])
#
# Time Coverage
# From the source
# If there was a time value in the source that this partition was created from, then
# add it to the years.
if 'source_data' in self.data:
for source_name, source in list(self.data['source_data'].items()):
if 'time' in source:
for year in expand_to_years(source['time']):
if year:
tcov.add(year)
# From the partition name
if self.identity.name.time:
for year in expand_to_years(self.identity.name.time):
if year:
tcov.add(year)
self.time_coverage = [t for t in tcov if t]
#
# Grains
if 'source_data' in self.data:
for source_name, source in list(self.data['source_data'].items()):
if 'grain' in source:
grains.add(source['grain'])
self.grain_coverage = sorted(str(g) for g in grains if g)
@property
def dict(self):
"""A dict that holds key/values for all of the properties in the
object.
:return:
"""
d = {p.key: getattr(self, p.key) for p in self.__mapper__.attrs
if p.key not in ('table', 'dataset', '_codes', 'stats', 'data', 'process_records')}
if self.data:
# Copy data fields into top level dict, but don't overwrite existind values.
for k, v in six.iteritems(self.data):
if k not in d and k not in ('table', 'stats', '_codes', 'data'):
d[k] = v
return d
@property
def detail_dict(self):
"""A more detailed dict that includes the descriptions, sub descriptions, table
and columns."""
d = self.dict
def aug_col(c):
d = c.dict
d['stats'] = [s.dict for s in c.stats]
return d
d['table'] = self.table.dict
d['table']['columns'] = [aug_col(c) for c in self.table.columns]
return d
@property
def stats_dict(self):
class Bunch(object):
"""Dict and object access to properties"""
def __init__(self, o):
self.__dict__.update(o)
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return repr(self.__dict__)
def keys(self):
return list(self.__dict__.keys())
def items(self):
return list(self.__dict__.items())
def iteritems(self):
return iter(self.__dict__.items())
def __getitem__(self, k):
if k in self.__dict__:
return self.__dict__[k]
else:
from . import ColumnStat
return ColumnStat(hist=[])
if not self._stats_dict:
cols = {s.column.name: Bunch(s.dict) for s in self.stats}
self._stats_dict = Bunch(cols)
return self._stats_dict
def build_sample(self):
name = self.table.name
count = int(
self.database.connection.execute('SELECT count(*) FROM "{}"'.format(name)).fetchone()[0])
skip = count / 20
if count > 100:
sql = 'SELECT * FROM "{}" WHERE id % {} = 0 LIMIT 20'.format(name, skip)
else:
sql = 'SELECT * FROM "{}" LIMIT 20'.format(name)
sample = []
for j, row in enumerate(self.database.connection.execute(sql)):
sample.append(list(row.values()))
self.record.data['sample'] = sample
s = self.bundle.database.session
s.merge(self.record)
s.commit()
@property
def row(self):
# Use an Ordered Dict to make it friendly to creating CSV files.
SKIP_KEYS = [
'sequence_id', 'vid', 'id', 'd_vid', 't_vid', 'min_key', 'max_key',
'installed', 'ref', 'count', 'state', 'data', 'space_coverage',
'time_coverage', 'grain_coverage', 'name', 'vname', 'fqname', 'cache_key'
]
d = OrderedDict([('table', self.table.name)] +
[(p.key, getattr(self, p.key)) for p in self.__mapper__.attrs
if p.key not in SKIP_KEYS])
return d
def update(self, **kwargs):
if 'table' in kwargs:
del kwargs['table'] # In source_schema.csv, this is the name of the table, not the object
for k, v in list(kwargs.items()):
if hasattr(self, k):
setattr(self, k, v)
def finalize(self, ps=None):
self.state = self.STATES.FINALIZING
# Write the stats for this partition back into the partition
with self.datafile.writer as w:
for i, c in enumerate(self.table.columns, 1):
wc = w.column(i)
assert wc.pos == c.sequence_id, (c.name, wc.pos, c.sequence_id)
wc.name = c.name
wc.description = c.description
wc.type = c.python_type.__name__
self.count = w.n_rows
w.finalize()
if self.type == self.TYPE.UNION:
ps.update('Running stats ', state='running')
stats = self.datafile.run_stats()
self.set_stats(stats)
self.set_coverage(stats)
self._location = 'build'
self.title = PartitionDisplay(self).title
self.description = PartitionDisplay(self).description
self.state = self.STATES.FINALIZED
# =============
# These methods are a bit non-cohesive, since they require the _bundle value to be set, which is
# set externally, when the object is retured from a bundle.
def clean(self):
"""Remove all built files and return the partition to a newly-created state"""
if self.datafile:
self.datafile.remove()
@property
def location(self):
base_location = self._location
if not base_location:
return None
if self._bundle.build_fs.exists(base_location):
if self._bundle.build_fs.hashsyspath(base_location):
return self._bundle.build_fs.getsyspath(base_location)
return base_location
@location.setter
def location(self, v):
self._location = v
@property
def datafile(self):
from ambry.exc import NotFoundError
if self.is_local:
# Use the local version, if it exists
logger.debug('datafile: Using local datafile {}'.format(self.vname))
return self.local_datafile
else:
# If it doesn't try to get the remote.
try:
logger.debug('datafile: Using remote datafile {}'.format(self.vname))
return self.remote_datafile
except NotFoundError:
# If the remote doesnt exist, return the local, so the caller can call exists() on it,
# get its path, etc.
return self.local_datafile
@property
def local_datafile(self):
"""Return the datafile for this partition, from the build directory, the remote, or the warehouse"""
from ambry_sources import MPRowsFile
from fs.errors import ResourceNotFoundError
from ambry.orm.exc import NotFoundError
try:
return MPRowsFile(self._bundle.build_fs, self.cache_key)
except ResourceNotFoundError:
raise NotFoundError(
'Could not locate data file for partition {} (local)'.format(self.identity.fqname))
@property
def remote(self):
"""
Return the remote for this partition
:return:
"""
from ambry.exc import NotFoundError
ds = self.dataset
if 'remote_name' not in ds.data:
raise NotFoundError('Could not determine remote for partition: {}'.format(self.identity.fqname))
return self._bundle.library.remote(ds.data['remote_name'])
@property
def remote_datafile(self):
from fs.errors import ResourceNotFoundError
from ambry.exc import AccessError, NotFoundError
from boto.exception import S3ResponseError
try:
from ambry_sources import MPRowsFile
remote = self.remote
datafile = MPRowsFile(remote.fs, self.cache_key)
if not datafile.exists:
raise NotFoundError(
'Could not locate data file for partition {} from remote {} : file does not exist'
.format(self.identity.fqname, remote))
except ResourceNotFoundError as e:
raise NotFoundError('Could not locate data file for partition {} (remote): {}'
.format(self.identity.fqname, e))
except S3ResponseError as e:
# HACK. It looks like we get the response error with an access problem when
# we have access to S3, but the file doesn't exist.
raise NotFoundError("Can't access MPR file for {} in remote {}".format(self.cache_key, remote.fs))
return datafile
@property
def is_local(self):
"""Return true is the partition file is local"""
from ambry.orm.exc import NotFoundError
try:
if self.local_datafile.exists:
return True
except NotFoundError:
pass
return False
def localize(self, ps=None):
"""Copy a non-local partition file to the local build directory"""
from filelock import FileLock
from ambry.util import ensure_dir_exists
from ambry_sources import MPRowsFile
from fs.errors import ResourceNotFoundError
if self.is_local:
return
local = self._bundle.build_fs
b = self._bundle.library.bundle(self.identity.as_dataset().vid)
remote = self._bundle.library.remote(b)
lock_path = local.getsyspath(self.cache_key + '.lock')
ensure_dir_exists(lock_path)
lock = FileLock(lock_path)
if ps:
ps.add_update(message='Localizing {}'.format(self.identity.name),
partition=self,
item_type='bytes',
state='downloading')
if ps:
def progress(bts):
if ps.rec.item_total is None:
ps.rec.item_count = 0
if not ps.rec.data:
ps.rec.data = {} # Should not need to do this.
return self
item_count = ps.rec.item_count + bts
ps.rec.data['updates'] = ps.rec.data.get('updates', 0) + 1
if ps.rec.data['updates'] % 32 == 1:
ps.update(message='Localizing {}'.format(self.identity.name),
item_count=item_count)
else:
from ambry.bundle.process import call_interval
@call_interval(5)
def progress(bts):
self._bundle.log("Localizing {}. {} bytes downloaded".format(self.vname, bts))
def exception_cb(e):
raise e
with lock:
# FIXME! This won't work with remote ( http) API, only FS ( s3:, file:)
if self.is_local:
return self
try:
with remote.fs.open(self.cache_key + MPRowsFile.EXTENSION, 'rb') as f:
event = local.setcontents_async(self.cache_key + MPRowsFile.EXTENSION,
f,
progress_callback=progress,
error_callback=exception_cb)
event.wait()
if ps:
ps.update_done()
except ResourceNotFoundError as e:
from ambry.orm.exc import NotFoundError
raise NotFoundError("Failed to get MPRfile '{}' from {}: {} "
.format(self.cache_key, remote.fs, e))
return self
@property
def reader(self):
from ambry.orm.exc import NotFoundError
from fs.errors import ResourceNotFoundError
"""The reader for the datafile"""
try:
return self.datafile.reader
except ResourceNotFoundError:
raise NotFoundError("Failed to find partition file, '{}' "
.format(self.datafile.path))
def select(self, predicate=None, headers=None):
"""
Select rows from the reader using a predicate to select rows and and itemgetter to return a
subset of elements
:param predicate: If defined, a callable that is called for each row, and if it returns true, the
row is included in the output.
:param headers: If defined, a list or tuple of header names to return from each row
:return: iterable of results
WARNING: This routine works from the reader iterator, which returns RowProxy objects. RowProxy objects
are reused, so if you construct a list directly from the output from this method, the list will have
multiple copies of a single RowProxy, which will have as an inner row the last result row. If you will
be directly constructing a list, use a getter that extracts the inner row, or which converts the RowProxy
to a dict:
list(s.datafile.select(lambda r: r.stusab == 'CA', lambda r: r.dict ))
"""
# FIXME; in Python 3, use yield from
with self.reader as r:
for row in r.select(predicate, headers):
yield row
def __iter__(self):
""" Iterator over the partition, returning RowProxy objects.
:return: a generator
"""
with self.reader as r:
for row in r:
yield row
@property
def analysis(self):
"""Return an AnalysisPartition proxy, which wraps this partition to provide acess to
dataframes, shapely shapes and other analysis services"""
if isinstance(self, PartitionProxy):
return AnalysisPartition(self._obj)
else:
return AnalysisPartition(self)
@property
def measuredim(self):
"""Return a MeasureDimension proxy, which wraps the partition to provide access to
columns in terms of measures and dimensions"""
if isinstance(self, PartitionProxy):
return MeasureDimensionPartition(self._obj)
else:
return MeasureDimensionPartition(self)
# ============================
def update_id(self, sequence_id=None):
"""Alter the sequence id, and all of the names and ids derived from it. This
often needs to be done after an IntegrityError in a multiprocessing run"""
if sequence_id:
self.sequence_id = sequence_id
self._set_ids(force=True)
if self.dataset:
self._update_names()
def _set_ids(self, force=False):
if not self.sequence_id:
from .exc import DatabaseError
raise DatabaseError('Sequence ID must be set before insertion')
if not self.vid or force:
assert bool(self.d_vid)
assert bool(self.sequence_id)
don = ObjectNumber.parse(self.d_vid)
assert don.revision
on = don.as_partition(self.sequence_id)
self.vid = str(on.rev(don.revision))
self.id = str(on.rev(None))
if not self.data:
self.data = {}
def _update_names(self):
"""Update the derived names"""
d = dict(
table=self.table_name,
time=self.time,
space=self.space,
grain=self.grain,
variant=self.variant,
segment=self.segment
)
assert self.dataset
name = PartialPartitionName(**d).promote(self.dataset.identity.name)
self.name = str(name.name)
self.vname = str(name.vname)
self.cache_key = name.cache_key
self.fqname = str(self.identity.fqname)
@staticmethod
def before_insert(mapper, conn, target):
"""event.listen method for Sqlalchemy to set the sequence for this
object and create an ObjectNumber value for the id_"""
target._set_ids()
if target.name and target.vname and target.cache_key and target.fqname and not target.dataset:
return
Partition.before_update(mapper, conn, target)
@staticmethod
def before_update(mapper, conn, target):
target._update_names()
@staticmethod
def before_delete(mapper, conn, target):
pass
event.listen(Partition, 'before_insert', Partition.before_insert)
event.listen(Partition, 'before_update', Partition.before_update)
event.listen(Partition, 'before_delete', Partition.before_delete)
class PartitionProxy(object):
__slots__ = ["_obj", "__weakref__"]
def __init__(self, obj):
object.__setattr__(self, "_obj", obj)
#
# proxying (special cases)
#
def __getattr__(self, name):
return getattr(object.__getattribute__(self, "_obj"), name)
def __delattr__(self, name):
delattr(object.__getattribute__(self, "_obj"), name)
def __setattr__(self, name, value):
setattr(object.__getattribute__(self, "_obj"), name, value)
def __nonzero__(self):
return bool(object.__getattribute__(self, "_obj"))
def __str__(self):
return "<{}: {}>".format(type(self), str(object.__getattribute__(self, "_obj")))
def __repr__(self):
return "<{}: {}>".format(type(self), repr(object.__getattribute__(self, "_obj")))
def __iter__(self):
return iter(object.__getattribute__(self, "_obj"))
class AnalysisPartition(PartitionProxy):
"""A subclass of Partition with methods designed for analysis with Pandas. It is produced from
the partitions analysis property"""
def dataframe(self, predicate=None, filtered_columns=None, columns=None, df_class=None):
"""Return the partition as a Pandas dataframe
:param predicate: If defined, a callable that is called for each row, and if it returns true, the
row is included in the output.
:param filtered_columns: If defined, the value is a dict of column names and
associated values. Only rows where all of the named columms have the given values will be returned.
Setting the argument will overwrite any value set for the predicate
:param columns: A list or tuple of column names to return
:return: Pandas dataframe
"""
from operator import itemgetter
from ambry.pands import AmbryDataFrame
df_class = df_class or AmbryDataFrame
if columns:
ig = itemgetter(*columns)
else:
ig = None
columns = self.table.header
if filtered_columns:
def maybe_quote(v):
from six import string_types
if isinstance(v, string_types):
return '"{}"'.format(v)
else:
return v
code = ' and '.join("row.{} == {}".format(k, maybe_quote(v))
for k, v in filtered_columns.items())
predicate = eval('lambda row: {}'.format(code))
if predicate:
def yielder():
for row in self.reader:
if predicate(row):
if ig:
yield ig(row)
else:
yield row.dict
df = df_class(yielder(), columns=columns, partition=self.measuredim)
return df
else:
def yielder():
for row in self.reader:
yield row.values()
# Put column names in header order
columns = [c for c in self.table.header if c in columns]
return df_class(yielder(), columns=columns, partition=self.measuredim)
def geoframe(self, simplify=None, predicate=None, crs=None, epsg=None):
"""
Return geopandas dataframe
:param simplify: Integer or None. Simplify the geometry to a tolerance, in the units of the geometry.
:param predicate: A single-argument function to select which records to include in the output.
:param crs: Coordinate reference system information
:param epsg: Specifiy the CRS as an EPGS number.
:return: A Geopandas GeoDataFrame
"""
import geopandas
from shapely.wkt import loads
from fiona.crs import from_epsg
if crs is None and epsg is None and self.epsg is not None:
epsg = self.epsg
if crs is None:
try:
crs = from_epsg(epsg)
except TypeError:
raise TypeError('Must set either crs or epsg for output.')
df = self.dataframe(predicate=predicate)
geometry = df['geometry']
if simplify:
s = geometry.apply(lambda x: loads(x).simplify(simplify))
else:
s = geometry.apply(lambda x: loads(x))
df['geometry'] = geopandas.GeoSeries(s)
return geopandas.GeoDataFrame(df, crs=crs, geometry='geometry')
def shapes(self, simplify=None, predicate=None):
"""
Return geodata as a list of Shapely shapes
:param simplify: Integer or None. Simplify the geometry to a tolerance, in the units of the geometry.
:param predicate: A single-argument function to select which records to include in the output.
:return: A list of Shapely objects
"""
from shapely.wkt import loads
if not predicate:
predicate = lambda row: True
if simplify:
return [loads(row.geometry).simplify(simplify) for row in self if predicate(row)]
else:
return [loads(row.geometry) for row in self if predicate(row)]
def patches(self, basemap, simplify=None, predicate=None, args_f=None, **kwargs):
"""
Return geodata as a list of Matplotlib patches
:param basemap: A mpl_toolkits.basemap.Basemap
:param simplify: Integer or None. Simplify the geometry to a tolerance, in the units of the geometry.
:param predicate: A single-argument function to select which records to include in the output.
:param args_f: A function that takes a row and returns a dict of additional args for the Patch constructor
:param kwargs: Additional args to be passed to the descartes Path constructor
:return: A list of patch objects
"""
from descartes import PolygonPatch
from shapely.wkt import loads
from shapely.ops import transform
if not predicate:
predicate = lambda row: True
def map_xform(x, y, z=None):
return basemap(x, y)
def make_patch(shape, row):
args = dict(kwargs.items())
if args_f:
args.update(args_f(row))
return PolygonPatch(transform(map_xform, shape), **args)
def yield_patches(row):
if simplify:
shape = loads(row.geometry).simplify(simplify)
else:
shape = loads(row.geometry)
if shape.geom_type == 'MultiPolygon':
for subshape in shape.geoms:
yield make_patch(subshape, row)
else:
yield make_patch(shape, row)
return [patch for row in self if predicate(row)
for patch in yield_patches(row)]
class MeasureDimensionPartition(PartitionProxy):
"""A partition proxy for accessing measure and dimensions. When returning a column, it returns
a PartitionColumn, which proxies the table column while adding partition specific functions. """
def __init__(self, obj):
super(MeasureDimensionPartition, self).__init__(obj)
self.filters = {}
def column(self, c_name):
return PartitionColumn(self.table.column(c_name), self)
@property
def columns(self):
"""Iterate over all columns"""
return [PartitionColumn(c, self) for c in self.table.columns]
@property
def primary_columns(self):
"""Iterate over the primary columns, columns which do not have a parent"""
return [c for c in self.columns if not c.parent]
@property
def dimensions(self):
"""Iterate over all dimensions"""
from ambry.valuetype.core import ROLE
return [c for c in self.columns if c.role == ROLE.DIMENSION]
@property
def primary_dimensions(self):
"""Iterate over the primary columns, columns which do not have a parent and have a
cardinality greater than 1"""
from ambry.valuetype.core import ROLE
return [c for c in self.columns
if not c.parent and c.role == ROLE.DIMENSION and c.pstats.nuniques > 1]
@property
def measures(self):
"""Iterate over all measures"""
from ambry.valuetype.core import ROLE
return [c for c in self.columns if c.role == ROLE.MEASURE]
def measure(self, vid):
"""Return a measure, given its vid or another reference"""
from ambry.orm import Column
if isinstance(vid, PartitionColumn):
return vid
elif isinstance(vid, Column):
return PartitionColumn(vid)
else:
return PartitionColumn(self.table.column(vid), self)
def dimension(self, vid):
"""Return a dimention, given its vid or another reference"""
from ambry.orm import Column
if isinstance(vid, PartitionColumn):
return vid
elif isinstance(vid, Column):
return PartitionColumn(vid)
else:
return PartitionColumn(self.table.column(vid), self)
@property
def primary_measures(self):
"""Iterate over the primary measures, columns which do not have a parent"""
return [c for c in self.measures if not c.parent]
@property
def dict(self):
d = self.detail_dict
d['dimension_sets'] = self.enumerate_dimension_sets()
return d
def dataframe(self, measure, p_dim, s_dim=None, filters={}, df_class=None):
"""
Return a dataframe with a sumse of the columns of the partition, including a measure and one
or two dimensions. FOr dimensions that have labels, the labels are included
The returned dataframe will have extra properties to describe the conversion:
* plot_axes: List of dimension names for the first and second axis
* labels: THe names of the label columns for the axes
* filtered: The `filters` dict
* floating: The names of primary dimensions that are not axes nor filtered
THere is also an iterator, `rows`, which returns the header and then all of the rows.
:param measure: The column names of one or more measures
:param p_dim: The primary dimension. This will be the index of the dataframe.
:param s_dim: a secondary dimension. The returned frame will be unstacked on this dimension
:param filters: A dict of column names, mapped to a column value, indicating rows to select. a
row that passes the filter must have the values for all given rows; the entries are ANDED
:param df_class:
:return: a Dataframe, with extra properties
"""
import numpy as np
measure = self.measure(measure)
p_dim = self.dimension(p_dim)
assert p_dim
if s_dim:
s_dim = self.dimension(s_dim)
columns = set([measure.name, p_dim.name])
if p_dim.label:
# For geographic datasets, also need the gvid
if p_dim.geoid:
columns.add(p_dim.geoid.name)
columns.add(p_dim.label.name)
if s_dim:
columns.add(s_dim.name)
if s_dim.label:
columns.add(s_dim.label.name)
def maybe_quote(v):
from six import string_types
if isinstance(v, string_types):
return '"{}"'.format(v)
else:
return v
# Create the predicate to filter out the filtered dimensions
if filters:
selected_filters = []
for k, v in filters.items():
if isinstance(v, dict):
# The filter is actually the whole set of possible options, so
# just select the first one
v = v.keys()[0]
selected_filters.append("row.{} == {}".format(k, maybe_quote(v)))
code = ' and '.join(selected_filters)
predicate = eval('lambda row: {}'.format(code))
else:
code = None
def predicate(row):
return True
df = self.analysis.dataframe(predicate, columns=columns, df_class=df_class)
if df is None or df.empty or len(df) == 0:
return None
# So we can track how many records were aggregated into each output row
df['_count'] = 1
def aggregate_string(x):
return ', '.join(set(str(e) for e in x))
agg = {
'_count': 'count',
}
for col_name in columns:
c = self.column(col_name)
# The primary and secondary dimensions are put into the index by groupby
if c.name == p_dim.name or (s_dim and c.name == s_dim.name):
continue
# FIXME! This will only work if the child is only level from the parent. Should
# have an acessor for the top level.
if c.parent and (c.parent == p_dim.name or (s_dim and c.parent == s_dim.name)):
continue
if c.is_measure:
agg[c.name] = np.mean
if c.is_dimension:
agg[c.name] = aggregate_string
plot_axes = [p_dim.name]
if s_dim:
plot_axes.append(s_dim.name)
df = df.groupby(list(columns - set([measure.name]))).agg(agg).reset_index()
df._metadata = ['plot_axes', 'filtered', 'floating', 'labels', 'dimension_set', 'measure']
df.plot_axes = [c for c in plot_axes]
df.filtered = filters
# Dimensions that are not specified as axes nor filtered
df.floating = list(set(c.name for c in self.primary_dimensions) -
set(df.filtered.keys()) -
set(df.plot_axes))
df.labels = [self.column(c).label.name if self.column(c).label else c for c in df.plot_axes]
df.dimension_set = self.dimension_set(p_dim, s_dim=s_dim)
df.measure = measure.name
def rows(self):
yield ['id'] + list(df.columns)
for t in df.itertuples():
yield list(t)
# Really should not do this, but I don't want to re-build the dataframe with another
# class
df.__class__.rows = property(rows)
return df
def dimension_set(self, p_dim, s_dim=None, dimensions=None, extant=set()):
"""
Return a dict that describes the combination of one or two dimensions, for a plot
:param p_dim:
:param s_dim:
:param dimensions:
:param extant:
:return:
"""
if not dimensions:
dimensions = self.primary_dimensions
key = p_dim.name
if s_dim:
key += '/' + s_dim.name
# Ignore if the key already exists or the primary and secondary dims are the same
if key in extant or p_dim == s_dim:
return
# Don't allow geography to be a secondary dimension. It must either be a primary dimension
# ( to make a map ) or a filter, or a small-multiple
if s_dim and s_dim.valuetype_class.is_geo():
return
extant.add(key)
filtered = {}
for d in dimensions:
if d != p_dim and d != s_dim:
filtered[d.name] = d.pstats.uvalues.keys()
if p_dim.valuetype_class.is_time():
value_type = 'time'
chart_type = 'line'
elif p_dim.valuetype_class.is_geo():
value_type = 'geo'
chart_type = 'map'
else:
value_type = 'general'
chart_type = 'bar'
return dict(
key=key,
p_dim=p_dim.name,
p_dim_type=value_type,
p_label=p_dim.label_or_self.name,
s_dim=s_dim.name if s_dim else None,
s_label=s_dim.label_or_self.name if s_dim else None,
filters=filtered,
chart_type=chart_type
)
def enumerate_dimension_sets(self):
dimension_sets = {}
dimensions = self.primary_dimensions
extant = set()
for d1 in dimensions:
ds = self.dimension_set(d1, None, dimensions, extant)
if ds:
dimension_sets[ds['key']] = ds
for d1 in dimensions:
for d2 in dimensions:
if d2.cardinality >= d1.cardinality:
d1, d2 = d2, d1
ds = self.dimension_set(d1, d2, dimensions, extant)
if ds:
dimension_sets[ds['key']] = ds
return dimension_sets
class ColumnProxy(PartitionProxy):
def __init__(self, obj, partition):
object.__setattr__(self, "_obj", obj)
object.__setattr__(self, "_partition", partition)
MAX_LABELS = 75 # Maximum number of uniques records before it's assume that the values aren't valid labels
class PartitionColumn(ColumnProxy):
"""A proxy on the Column that links a Column to a Partition, for direct access to the stats
and column labels"""
def __init__(self, obj, partition):
super(PartitionColumn, self).__init__(obj, partition)
object.__setattr__(self, "pstats", partition.stats_dict[obj.name])
@property
def children(self):
""""Return the table's other column that have this column as a parent, excluding labels"""
for child in self.children:
yield PartitionColumn(child, self._partition)
@property
def label(self):
""""Return first child that of the column that is marked as a label"""
for c in self.table.columns:
if c.parent == self.name and 'label' in c.valuetype:
return PartitionColumn(c, self._partition)
@property
def value_labels(self):
"""Return a map of column code values mapped to labels, for columns that have a label column
If the column is not assocaited with a label column, it returns an identity map.
WARNING! This reads the whole partition, so it is really slow
"""
from operator import itemgetter
card = self.pstats.nuniques
if self.label:
ig = itemgetter(self.name, self.label.name)
elif self.pstats.nuniques < MAX_LABELS:
ig = itemgetter(self.name, self.name)
else:
return {}
label_set = set()
for row in self._partition:
label_set.add(ig(row))
if len(label_set) >= card:
break
d = dict(label_set)
assert len(d) == len(label_set) # Else the label set has multiple values per key
return d
@property
def cardinality(self):
"""Returns the bymber of unique elements"""
return self.pstats.nuniques
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, self.name)
| CivicSpleen/ambry | ambry/orm/partition.py | partition.py | py | 48,749 | python | en | code | 5 | github-code | 36 |
10261032869 | # from multiprocessing import Process, Queue
from queue import Queue
import threading
from crawler.reviewCrawler import ReviewCrawler
from crawler.userCrawler import UserCrawler
import json
from GameListCrawler import getGameList
import time
from utils.redisUtis import RedisUtil
from utils.sqlUtils import dbconnector
from gameCrawler import GameCrawler
import requests
import properties
game_queue = Queue()
user_queue = Queue()
review_queue = Queue()
def game_consumer(game_queue,user_queue,review_queue):
while True:
game_info_str = game_queue.get(block=True)
try:
game_info = json.loads(game_info_str)
game_helper(game_queue = game_queue,user_queue = user_queue,review_queue = review_queue,id = game_info['id'], url = game_info['url'])
except Exception as e:
print("game_consumer_error:",game_info_str)
time.sleep(1)
def game_helper(game_queue,user_queue,review_queue,id, url):
# crawler review
review_queue.put(id)
redisUtil = RedisUtil()
if redisUtil.checkGameExist(id):
print("exist game"+str(id))
return
gameCrawler = GameCrawler()
gameCrawler.infoSave(id,url)
redisUtil.setGameExist(id)
def review_consumer(game_queue,user_queue,review_queue):
while True:
appid = review_queue.get(block=True)
try:
review_helper(game_queue = game_queue,user_queue = user_queue,review_queue = review_queue,appid = appid)
except Exception as e:
print("review_consumer_error:",appid)
time.sleep(1)
def review_helper(game_queue,user_queue,review_queue,appid):
rc = ReviewCrawler(appid)
reviews = rc.requestReview()
rc.saveReview()
for review in reviews:
steamid = review['steamid']
user_queue.put(steamid)
def user_consumer(game_queue,user_queue,review_queue):
while True:
steamid = user_queue.get(block=True)
try:
user_helper(game_queue = game_queue,user_queue = user_queue,review_queue = review_queue, steamid = steamid)
except Exception as e:
print("user_consumer_error:",steamid)
time.sleep(1)
def user_helper(game_queue,user_queue,review_queue,steamid):
uc = UserCrawler(steamid)
friendList = uc.requestFriendList()
uc.saveFriendList()
if friendList != None:
for friend in friendList:
user_queue.put(friend['steamid'])
ownedGameList = uc.requestOwnedGames()
uc.saveOwnedGames()
# put game task
if ownedGameList != None:
for game in ownedGameList:
url = "https://store.steampowered.com/app/" + str(game['appid'])
try:
response = requests.get(url, headers=properties.headers, timeout=10)
except Exception as e:
print("add owned game to gamelist error: no response and",e)
game_queue.put(json.dumps({"id": game['appid'], "url": url}))
def provider(game_queue):
sql = dbconnector()
start_games =[{"id":"10","url":"https://store.steampowered.com/app/10/CounterStrike/"},{"id":"20","url":"https://store.steampowered.com/app/20/Team_Fortress_Classic/"}]
for item in start_games:
game_info_str = json.dumps(item)
game_queue.put(game_info_str)
if __name__ == '__main__':
# redisUtil = RedisUtil()
game_consumer_num = 5
review_consumer_num = 5
user_consumer_num = 5
game_consumer_list = []
review_consumer_list = []
user_consumer_list = []
game_list_provider_threading = threading.Thread(target=getGameList, args=(game_queue,))
game_list_provider_threading.start()
print("start allocating threading")
for i in range(game_consumer_num):
game_consumer_process = threading.Thread(target=game_consumer, args=(game_queue, user_queue, review_queue,))
game_consumer_list.append(game_consumer_process)
game_consumer_process.start()
for i in range(user_consumer_num):
user_consumer_process = threading.Thread(target=user_consumer, args=(game_queue, user_queue, review_queue,))
user_consumer_list.append(user_consumer_process)
user_consumer_process.start()
for i in range(review_consumer_num):
reveiw_consumer_process = threading.Thread(target=review_consumer, args=(game_queue, user_queue, review_queue,))
review_consumer_list.append(reveiw_consumer_process)
reveiw_consumer_process.start()
| Alex1997222/dataming-on-steam | SteamCrawler/main.py | main.py | py | 4,442 | python | en | code | 2 | github-code | 36 |
31187142575 | def reverse(L, a):
n = len(L)
if a < n//2:
L[a], L[-1-a] = L[-1-a], L[a]
reverse(L, a+1)
L = list(input()) # 문자열을 입력받아 리스트로 변환
reverse(L, 0)
print(''.join(str(x) for x in L))
#재귀적으로 리스트 뒤집기를 한다면 양끝단 -> 그다음 -> 그다음 -> ... -> 가운데 순으로
#reverse를 호출하고, 결과값은 역순으로 나온다.
#재귀적 리스트 뒤집기의 바닥 조건은 a < n//2 이다. a가 n//2보다 크거나 같아지면 더 이상 reverse가 호출되지 않는다. | Ha3To/2022_2nd | python_workspace/Reverse_Str_Recursion.py | Reverse_Str_Recursion.py | py | 543 | python | ko | code | 0 | github-code | 36 |
12485539130 | num = int(input())
odd_sum = 0
max_odd = -999999999999999
min_odd = 999999999999999
even_sum = 0
max_even = -999999999999999
min_even = 999999999999999
for i in range(1, num + 1):
in_num = float(input())
if i % 2 != 0:
odd_sum += in_num
if in_num > max_odd:
max_odd = in_num
if in_num < min_odd:
min_odd = in_num
elif i % 2 == 0:
even_sum += in_num
if in_num > max_even:
max_even = in_num
if in_num < min_even:
min_even = in_num
print(f"OddSum={odd_sum:.2f},")
if min_odd == 999999999999999:
print("OddMin=No,")
else:
print(f"OddMin={min_odd:.2f},")
if max_odd == -999999999999999:
print("OddMax=No,")
else:
print(f"OddMax={max_odd:.2f},")
print(f"EvenSum={even_sum:.2f},")
if min_even == 999999999999999:
print("EvenMin=No,")
else:
print(f"EvenMin={min_even:.2f},")
if max_even == -999999999999999:
print("EvenMax=No")
else:
print(f"EvenMax={max_even:.2f}")
| SimeonTsvetanov/Coding-Lessons | SoftUni Lessons/Python Development/Python Basics April 2019/Lessons and Problems/11 - For Loop Exercise/03. Odd Even Position .py | 03. Odd Even Position .py | py | 1,054 | python | en | code | 9 | github-code | 36 |
25947439528 | import os
import sqlite3
from datetime import datetime, timedelta
import telebot
bot = telebot.TeleBot(os.getenv("BOT_TOKEN"))
memes_chat_id = int(os.getenv("MEMES_CHAT_ID"))
flood_thread_id = int(os.getenv("FLOOD_THREAD_ID", 1))
memes_thread_id = int(os.getenv("MEMES_THREAD_ID", 1))
conn = sqlite3.connect("memes.db", check_same_thread=False)
def main():
seven_days_ago = datetime.now() - timedelta(days=7)
query = "SELECT user_id, MAX(username), count(*) FROM memes_posts_v2 WHERE created_at > ? GROUP BY user_id ORDER BY 3 DESC, 3 DESC LIMIT 3"
rows = conn.execute(query, (seven_days_ago,)).fetchall()
msg = ["Количество сброшенных мемов\n"]
stack = ["🥉", "🥈", "🥇"]
for row in rows:
user_id, username, memes_count = row
message = "[{username}](tg://user?id={user_id}) {memes_count} - {medal}".format(
username=username,
user_id=user_id,
memes_count=memes_count,
medal=stack.pop(),
)
msg.append(message)
bot.send_message(
memes_chat_id,
"\n".join(msg),
message_thread_id=flood_thread_id,
parse_mode="Markdown",
)
if __name__ == "__main__":
main()
| dzaytsev91/tachanbot | cron_job_memes_count.py | cron_job_memes_count.py | py | 1,239 | python | en | code | 2 | github-code | 36 |
6084390921 | # is unique: Implement an algorithm to determine
# if a string has all unique characters. What if you
# cannot use additional data structures?
# since we check if characters in a string are not duplicated
# we can use a boolean hash map to check if that character
# already exists
def is_unique(string):
# ASCII -> we have base case: string length > 128 => return false
if len(string) > 128:
return False
# else, we first initialize a hash_map
# -> space complexity: O(N)
# then go through the string
string_hash_map = {}
# go through the string
for c in string:
# check if c is already in hash map
if c in string_hash_map:
return False
else:
string_hash_map[c] = True
return True
# Time Complexity: O(N) - N: length of string
# Space Complexity: O(128) - O(1)
print(is_unique('abn'))
# ------------- Better solution with O(1) space complexity -------------
# ------------ Hint: Bit Manipulation -------------- | phuclinh9802/data_structures_algorithms | chapter 1/1_1.py | 1_1.py | py | 1,011 | python | en | code | 0 | github-code | 36 |
12573577450 | def based(n, b, k):
a = [0] * k
if n < 0:
return 0
if b <= 1:
return 1
x = n
counter = 0
while n >= b:
q = n / b
t = n - q * b
a[counter] = t
n = q
counter += 1
a[counter] = n
final_num = ""
for i in range(counter):
h = a[counter-i]
final_num += str(h)
#print(h)
#print(a[0])
#print(b, x)
#return(counter+1)
return final_num + str(a[0])
"""
def to_decimal(number, base):
return sum([int(character) * base ** index for index,character in enumerate(str(number)[::-1])])
"""
def answer(n,b):
if n == 0 or n == 1:
return 1
if int(max(n)) > (b-1):
return 0
k = len(n)
print("N is: " + n)
print("B is: " + str(b))
print("K: " + str(k))
counter = 0
container = []
print("Started compution")
while counter < 50:
new_n = [int(i) for i in n]
x = list(new_n)
y = list(new_n)
x.sort(reverse=True)
y.sort()
x = ''.join(str(e) for e in x)
y = ''.join(str(e) for e in y)
print(x,y)
x = int(str(x),b)
y = int(str(y),b)
print("X | Y")
print(x,y)
z = x - y
z = int(str(z), 10)
print("Z: " + str(z))
z = based(z,b, k)
if len(str(z)) == k:
n = str(z)
else:
z = list(str(z))
while len(z) < k:
z.insert(0,0)
z = ''.join(str(e) for e in z)
n = str(z)
n = str(z)
print(n)
counter += 1
if n not in container:
container.append(str(n))
else:
last_num = container.index(str(n))
return len(container) - last_num
print("Counter: " + str(counter))
print(container)
#return len(set(container))
print(answer("6050", 3))
| AG-Systems/programming-problems | google-foobar/hey_i_already_did_that.py | hey_i_already_did_that.py | py | 1,896 | python | en | code | 10 | github-code | 36 |
18903357112 | from abc import ABCMeta
from json import dumps
from logging import getLogger
from uchicagoldrtoolsuite import log_aware
from ..materialsuite import MaterialSuite
__author__ = "Brian Balsamo, Tyler Danstrom"
__email__ = "balsamo@uchicago.edu, tdanstrom@uchicago.edu"
__company__ = "The University of Chicago Library"
__copyright__ = "Copyright University of Chicago, 2016"
__publication__ = ""
__version__ = "0.0.1dev"
log = getLogger(__name__)
class AccessionContainer(metaclass=ABCMeta):
"""
A Stage is a structure which holds an aggregates contents
as they are being processed for ingestion into long term storage
"""
@log_aware(log)
def __init__(self, identifier):
"""
Creates a new Stage
__Args__
param1 (str): The identifier that will be assigned to the Stage
"""
log.debug("Entering ABC init")
self._identifier = None
self._materialsuite_list = []
self._accessionrecord = []
self._adminnote = []
self._legalnote = []
self.set_identifier(identifier)
log.debug("Exiting ABC init")
@log_aware(log)
def __repr__(self):
attr_dict = {
'identifier': self.identifier,
'materialsuite_list': [str(x) for x in self.materialsuite_list],
'accessionrecord_list': [str(x) for x in self.accessionrecord_list],
'adminnote_list': [str(x) for x in self.adminnote_list],
'legalnote_list': [str(x) for x in self.legalnote_list]
}
return "<{} {}>".format(str(type(self)),
dumps(attr_dict, sort_keys=True))
@log_aware(log)
def get_identifier(self):
return self._identifier
@log_aware(log)
def set_identifier(self, identifier):
log.debug("{}({}) identifier being set to {}".format(
str(type(self)),
str(self.identifier), identifier)
)
self._identifier = identifier
log.debug(
"{} identifier set to {}".format(str(type(self)), identifier)
)
@log_aware(log)
def get_materialsuite_list(self):
return self._materialsuite_list
@log_aware(log)
def set_materialsuite_list(self, x):
self.del_materialsuite_list()
for y in x:
self.add_materialsuite(y)
@log_aware(log)
def del_materialsuite_list(self):
while self.materialsuite_list:
self.pop_materialsuite()
@log_aware(log)
def add_materialsuite(self, x):
if not isinstance(x, MaterialSuite):
raise ValueError()
self._materialsuite_list.append(x)
@log_aware(log)
def get_materialsuite(self, index):
return self.materialsuite_list[index]
@log_aware(log)
def pop_materialsuite(self, index=None):
if index is None:
self.materialsuite_list.pop()
else:
self.materialsuite_list.pop(index)
@log_aware(log)
def get_accessionrecord_list(self):
return self._accessionrecord
@log_aware(log)
def set_accessionrecord_list(self, acc_rec_list):
self.del_accessionrecord_list()
for x in acc_rec_list:
self.add_accessionrecord(x)
@log_aware(log)
def del_accessionrecord_list(self):
while self.get_accessionrecord_list():
self.pop_accessionrecord()
@log_aware(log)
def add_accessionrecord(self, accrec):
self._accessionrecord.append(accrec)
log.debug("Added accession record to {}({}): ({})".format(
str(type(self)),
self.identifier,
str(accrec))
)
@log_aware(log)
def get_accessionrecord(self, index):
return self.get_accessionrecord_list()[index]
@log_aware(log)
def pop_accessionrecord(self, index=None):
if index is None:
x = self.get_accessionrecord_list.pop()
else:
x = self.get_accessionrecord_list.pop(index)
log.debug("Popped accession record from {}({}): {}".format(
str(type(self)),
self.identifier,
str(x))
)
return x
@log_aware(log)
def get_adminnote_list(self):
return self._adminnote
@log_aware(log)
def set_adminnote_list(self, adminnotelist):
self.del_adminnote_list()
for x in adminnotelist:
self.add_adminnote(x)
@log_aware(log)
def del_adminnote_list(self):
while self.get_adminnote_list():
self.pop_adminnote()
@log_aware(log)
def add_adminnote(self, adminnote):
self.get_adminnote_list().append(adminnote)
log.debug("Added adminnote to {}({}): {}".format(
str(type(self)),
self.identifier,
str(adminnote))
)
@log_aware(log)
def get_adminnote(self, index):
return self.get_adminnote_list()[index]
@log_aware(log)
def pop_adminnote(self, index=None):
if index is None:
x = self.get_adminnote_list().pop()
else:
x = self.get_adminnote_list().pop(index)
log.debug("Popped adminnote from {}({}): {}".format(
str(type(self)),
self.identifier,
str(x))
)
return x
@log_aware(log)
def get_legalnote_list(self):
return self._legalnote
@log_aware(log)
def set_legalnote_list(self, legalnote_list):
self.del_legalnote_list()
for x in legalnote_list:
self.add_legalnote(x)
@log_aware(log)
def del_legalnote_list(self):
while self.get_legalnote_list():
self.pop_legalnote()
@log_aware(log)
def add_legalnote(self, legalnote):
self.get_legalnote_list().append(legalnote)
log.debug("Added legalnote to {}: {}".format(
str(type(self)),
str(legalnote))
)
@log_aware(log)
def get_legalnote(self, index):
return self.get_legalnote_list()[index]
@log_aware(log)
def pop_legalnote(self, index=None):
if index is None:
return self.get_legalnote_list().pop()
else:
return self.get_legalnote_list().pop(index)
identifier = property(get_identifier,
set_identifier)
materialsuite_list = property(get_materialsuite_list,
set_materialsuite_list,
del_materialsuite_list)
accessionrecord_list = property(get_accessionrecord_list,
set_accessionrecord_list,
del_accessionrecord_list)
adminnote_list = property(get_adminnote_list,
set_adminnote_list,
del_adminnote_list)
legalnote_list = property(get_legalnote_list,
set_legalnote_list,
del_legalnote_list)
| uchicago-library/uchicagoldr-toolsuite | uchicagoldrtoolsuite/bit_level/lib/structures/abc/accessioncontainer.py | accessioncontainer.py | py | 7,200 | python | en | code | 0 | github-code | 36 |
8757599845 | # -*- coding: utf-8 -*-
from odoo import models, fields, api, _
from odoo.exceptions import AccessError
class OFSaleConfiguration(models.TransientModel):
_inherit = 'sale.config.settings'
of_deposit_product_categ_id_setting = fields.Many2one(
'product.category',
string=u"(OF) Catégorie des acomptes",
help=u"Catégorie des articles utilisés pour les acomptes"
)
stock_warning_setting = fields.Boolean(
string="(OF) Stock", required=True, default=False,
help=u"Afficher les messages d'avertissement de stock ?"
)
of_position_fiscale = fields.Boolean(string="(OF) Position fiscale")
of_allow_quote_addition = fields.Boolean(string=u"(OF) Devis complémentaires")
group_of_afficher_total_ttc = fields.Boolean(
string=u"(OF) Afficher les sous-totaux TTC par ligne de commande", default=False,
help=u"Affiche les sous-totaux TTC par ligne de commande. Uniquement dans le formulaire et non dans les "
u"rapports.", implied_group='of_sale.group_of_afficher_total_ttc', group='base.group_user')
group_of_order_line_option = fields.Boolean(
string=u"(OF) Options de ligne de commande", implied_group='of_sale.group_of_order_line_option',
group='base.group_portal,base.group_user,base.group_public')
group_of_sale_multiimage = fields.Selection([
(0, 'One image per product'),
(1, 'Several images per product')],
string='(OF) Multi Images', implied_group='of_sale.group_of_sale_multiimage',
group='base.group_portal,base.group_user,base.group_public')
of_sale_print_multiimage_level = fields.Selection([
(0, 'Do not print'),
(1, 'Print on each line'),
(2, 'Print on appendix')], string='(OF) Print product images on Sale Order')
group_of_sale_print_one_image = fields.Boolean(
'Print on each line', implied_group='of_sale.group_of_sale_print_one_image',
group='base.group_portal,base.group_user,base.group_public')
group_of_sale_print_multiimage = fields.Boolean(
'Print on appendix', implied_group='of_sale.group_of_sale_print_multiimage',
group='base.group_portal,base.group_user,base.group_public')
group_of_sale_print_attachment = fields.Selection([
(0, 'Do not print'),
(1, 'Print on appendix')], string='(OF) Print product attachments on Sale Order',
implied_group='of_sale.group_of_sale_print_attachment',
group='base.group_portal,base.group_user,base.group_public')
of_invoice_grouped = fields.Selection(selection=[
(0, 'Groupement par partenaire + devise'),
(1, 'Groupement par commande'), ], string=u"(OF) Facturation groupée")
sale_show_tax = fields.Selection(selection_add=[('both', 'Afficher les sous-totaux HT (B2B) et TTC (B2C)')])
of_propagate_payment_term = fields.Boolean(
string=u"(OF) Terms of payment",
help=u"Si décoché, les conditions de règlement ne sont pas propagées aux factures")
of_sale_order_margin_control = fields.Boolean(
string=u"(OF) Contrôle de marge", help=u"Activer le contrôle de marge à la validation des commandes")
group_product_variant_specific_price = fields.Selection(selection=[
(0, u"Handle pricing by attribute"),
(1, u"Handle pricing by variant")], string=u"(OF) Product variant pricing",
implied_group='of_product.group_product_variant_specific_price',
group='base.group_portal,base.group_user,base.group_public')
@api.multi
def set_stock_warning_defaults(self):
return self.env['ir.values'].sudo().set_default(
'sale.config.settings', 'stock_warning_setting', self.stock_warning_setting)
@api.multi
def set_of_deposit_product_categ_id_defaults(self):
return self.env['ir.values'].sudo().set_default(
'sale.config.settings', 'of_deposit_product_categ_id_setting', self.of_deposit_product_categ_id_setting.id)
@api.multi
def set_of_position_fiscale(self):
view = self.env.ref('of_sale.of_sale_order_form_fiscal_position_required')
if view:
view.write({'active': self.of_position_fiscale})
return self.env['ir.values'].sudo().set_default(
'sale.config.settings', 'of_position_fiscale',
self.of_position_fiscale)
@api.multi
def set_of_allow_quote_addition_defaults(self):
return self.env['ir.values'].sudo().set_default(
'sale.config.settings', 'of_allow_quote_addition', self.of_allow_quote_addition)
@api.multi
def set_of_invoice_grouped_defaults(self):
return self.env['ir.values'].sudo().set_default(
'sale.config.settings', 'of_invoice_grouped', self.of_invoice_grouped)
@api.multi
def set_of_sale_print_multiimage_level_defaults(self):
return self.env['ir.values'].sudo().set_default(
'sale.config.settings', 'of_sale_print_multiimage_level', self.of_sale_print_multiimage_level)
@api.onchange('of_sale_print_multiimage_level')
def onchange_of_sale_print_multiimage_level(self):
self.group_of_sale_print_one_image = self.of_sale_print_multiimage_level == 1
self.group_of_sale_print_multiimage = self.of_sale_print_multiimage_level == 2
@api.onchange('sale_show_tax')
def _onchange_sale_tax(self):
# Erase and replace parent function
if self.sale_show_tax == "subtotal":
self.update({
'group_show_price_total': False,
'group_show_price_subtotal': True,
})
elif self.sale_show_tax == "total":
self.update({
'group_show_price_total': True,
'group_show_price_subtotal': False,
})
else:
self.update({
'group_show_price_total': True,
'group_show_price_subtotal': True,
})
@api.multi
def set_of_propagate_payment_term(self):
return self.env['ir.values'].sudo().set_default(
'sale.config.settings', 'of_propagate_payment_term', self.of_propagate_payment_term)
@api.multi
def set_of_sale_order_margin_control(self):
return self.env['ir.values'].sudo().set_default(
'sale.config.settings', 'of_sale_order_margin_control', self.of_sale_order_margin_control)
@api.multi
def execute(self):
"""This function is called when the user validate the settings.
We overrided it to add the check of modified groups to allow the recompute only for groups thoses has been
modified and not for all.
"""
self.ensure_one()
if not self.env.user._is_superuser() and not self.env.user.has_group('base.group_system'):
raise AccessError(_("This setting can only be enabled by the administrator, "
"please contact support to enable this option."))
# Get the default values of the groups and check if the value has been changed
groups_fields = [field_name for field_name in self.fields_get().keys() if field_name.startswith('group_')]
salesettings_groups_cache = {
field_name: default_value
for field_name, default_value in self.default_get(self.fields_get().keys()).iteritems()
if field_name.startswith('group_')}
salesettings_groups_has_changed = [
field_name
for field_name in groups_fields
if getattr(self, field_name) != salesettings_groups_cache[field_name]]
self = self.with_context(active_test=False)
classified = self._get_classified_fields()
# default values fields
IrValues = self.env['ir.values'].sudo()
for name, model, field in classified['default']:
if isinstance(self[name], models.BaseModel):
if self._fields[name].type == 'many2one':
value = self[name].id
else:
value = self[name].ids
else:
value = self[name]
IrValues.set_default(model, field, value)
# To avoid a very long time of computation (for database with a lot a Users/Groups), we don't want to recompute
# the groups if they haven't been changed in the settings.
if salesettings_groups_has_changed:
# filter groups to recompute only modified ones
only_changed_values = filter(
lambda gval: gval and gval[0] in salesettings_groups_has_changed, classified['group'])
if only_changed_values:
with self.env.norecompute():
for name, groups, implied_group in only_changed_values:
if self[name]:
groups.write({'implied_ids': [(4, implied_group.id)]})
else:
groups.write({'implied_ids': [(3, implied_group.id)]})
implied_group.write({'users': [(3, user.id) for user in groups.mapped('users')]})
self.recompute()
# other fields: execute all methods that start with 'set_'
for method in dir(self):
if method.startswith('set_'):
getattr(self, method)()
# module fields: install/uninstall the selected modules
to_install = []
to_uninstall_modules = self.env['ir.module.module']
lm = len('module_')
for name, module in classified['module']:
if self[name]:
to_install.append((name[lm:], module))
else:
if module and module.state in ('installed', 'to upgrade'):
to_uninstall_modules += module
if to_uninstall_modules:
to_uninstall_modules.button_immediate_uninstall()
action = self._install_modules(to_install)
if action:
return action
if to_install or to_uninstall_modules:
# After the uninstall/install calls, the registry and environments
# are no longer valid. So we reset the environment.
self.env.reset()
self = self.env()[self._name]
config = self.env['res.config'].next() or {}
if config.get('type') not in ('ir.actions.act_window_close',):
return config
# force client-side reload (update user menu and current view)
return {
'type': 'ir.actions.client',
'tag': 'reload',
}
@api.multi
def action_printings_params(self):
return {
'type': 'ir.actions.act_window',
'res_model': 'of.sale.wizard.set.printing.params',
'view_mode': 'form',
'view_type': 'form',
'target': 'new'
}
| odof/openfire | of_sale/models/sale_config_settings.py | sale_config_settings.py | py | 10,803 | python | en | code | 3 | github-code | 36 |
24801071982 | import numpy as np
from scipy import spatial
import matplotlib.pyplot as plt
def fft_smoothing(coords):
#TODO: More relevant procedure required
signal = coords[:,0] + 1j*coords[:,1]
# FFT and frequencies
fft = np.fft.fft(signal)
freq = np.fft.fftfreq(signal.shape[-1])
# filter
cutoff = 0.1
fft[np.abs(freq) > cutoff] = 0
# IFFT
signal_filt = np.fft.ifft(fft)
coords[:,0] = signal_filt.real
coords[:,1] = signal_filt.imag
return coords
def pl_cytopath_alignment(adata, basis="umap", smoothing=False, figsize=(15,4), size = 3,
show=True, save=False,save_type='png', folder=""):
map_state = adata.obsm['X_'+basis]
av_allign_score_glob=[]
std_allign_score_glob=[]
step_time = adata.uns['trajectories']['step_time']
fate_prob = adata.uns['trajectories']['cell_fate_probability']
sequence=0
# TODO: Separate per step average alignment score calculation from plotting
for end_point_cluster in adata.uns['run_info']["end_point_clusters"]:
trajectories = adata.uns['trajectories']["cells_along_trajectories_each_step"]\
[np.where(adata.uns['trajectories']["cells_along_trajectories_each_step"]["End point"]==end_point_cluster)[0]]
for i in range(adata.uns['run_info']['trajectory_count'][end_point_cluster]):
av_trajectories=trajectories[np.where(trajectories["Trajectory"]==i)[0]]
av_allign_score=np.zeros((len(np.unique(av_trajectories["Step"]))))
std_allign_score=np.zeros((len(np.unique(av_trajectories["Step"]))))
for l in range(len(np.unique(av_trajectories["Step"]))):
av_allign_score[l]=np.average((av_trajectories[np.where(av_trajectories["Step"]==l)[0]]["Allignment Score"]))
std_allign_score[l]=np.std((av_trajectories[np.where(av_trajectories["Step"]==l)[0]]["Allignment Score"]))
# Plotting
path = folder+"_end_point_"+end_point_cluster+"_cytopath_"+str(i)+\
"occurance"+str(adata.uns['run_info']["trajectories_sample_counts"][end_point_cluster][i])+"."+save_type
fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=figsize)
ax1.plot(range(len(np.unique(av_trajectories["Step"]))), av_allign_score, color='black')
ax1.fill_between(range(len(np.unique(av_trajectories["Step"]))),
av_allign_score+std_allign_score, av_allign_score-std_allign_score, facecolor='grey', alpha=0.6)
ax1.set_ylabel('Mean/std. of alignment scores per step')
ax1.set_xlabel('Steps')
# Plot step size for aligned cells
sc_step = ax2.scatter(map_state[:,0], map_state[:,1], alpha=0.6, s=size, color="whitesmoke")
sc_step = ax2.scatter(map_state[:,0], map_state[:,1], alpha=0.9, s=size,
vmin=0, vmax=np.nanmax(step_time), c=step_time[sequence,:], cmap='YlGnBu')
fig.colorbar(sc_step, ax=ax2, label='Step time')
ax2.set_ylabel(basis.upper()+' 2')
ax2.set_xlabel(basis.upper()+' 1')
ax2.set_title('End point: {}-{} Support: {}/{}'.format(end_point_cluster, i,
adata.uns['run_info']['trajectories_sample_counts'][end_point_cluster][i],
int(adata.uns['samples']['cell_sequences'].shape[0]/\
adata.uns['run_info']['end_point_clusters'].shape[0])))
# Plot alignment score
sc_score = ax3.scatter(map_state[:,0], map_state[:,1], alpha=0.6, s=size, color="whitesmoke")
sc_score = ax3.scatter(map_state[:,0], map_state[:,1], alpha=0.9, s=size,
vmin=0, vmax=1, c=fate_prob[sequence,:], cmap='Reds')
fig.colorbar(sc_score, ax=ax3, label='Cell fate probability')
ax3.set_ylabel(basis.upper()+' 2')
ax3.set_xlabel(basis.upper()+' 1')
# Plot trajectory
if basis in adata.uns['run_info']['projection_basis']:
coords = np.array(adata.uns['trajectories']['trajectories_coordinates'][end_point_cluster]['trajectory_'+str(i)+'_coordinates'])
elif ('pca' in adata.uns['run_info']['projection_basis']) and (basis != 'pca'):
coords_ = np.array(adata.uns['trajectories']['trajectories_coordinates'][end_point_cluster]['trajectory_'+str(i)+'_coordinates'])
cell_sequences=[]
for j in range(len(coords_)):
cell_sequences.append(spatial.KDTree(adata.obsm['X_pca']).query(coords_[j])[1])
coords = map_state[cell_sequences]
if smoothing == True:
coords = fft_smoothing(coords)
ax2.plot(coords[:, 0], coords[:, 1], color='black')
ax3.plot(coords[:, 0], coords[:, 1], color='black')
plt.tight_layout()
if save:
fig.savefig(path, bbox_inches='tight', dpi=300)
if show:
plt.show()
# End plotting
sequence+=1
av_allign_score_glob.append(av_allign_score)
std_allign_score_glob.append(std_allign_score)
| aron0093/cytopath | cytopath/plotting_functions/plot_alignment.py | plot_alignment.py | py | 5,486 | python | en | code | 10 | github-code | 36 |
22365841878 | from django import forms
from .models import UserProfile
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
exclude = ['user']
def __init__(self, *args, **kwargs):
"""
Add placeholders and classes, remove auto-generated
labels and set autofocus on first field
"""
super().__init__(*args, **kwargs)
placeholders = {
'user_phone_number': 'Phone Number',
'user_zip': 'ZIP',
'user_city': 'City',
'user_address_line_1': 'Address line 1',
'user_address_line_2': 'Address line 2',
'user_state': 'State',
}
self.fields['user_phone_number'].widget.attrs['autofocus'] = True
for field in self.fields:
if field != 'user_country':
if self.fields[field]:
placeholder = f'{placeholders[field]}'
else:
placeholder = placeholders[field]
self.fields[field].widget.attrs['placeholder'] = placeholder
self.fields[field].label = False
| folarin-ogungbemi/Gosip-Bookstore | profiles/forms.py | forms.py | py | 1,120 | python | en | code | 1 | github-code | 36 |
23713128222 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import yaml
from yaml.loader import SafeLoader
import subprocess
import netifaces
import argparse
import os
import time
import fcntl
'''yaml
if_list:
- ipaddr: 10.90.3.37
prefix: 24
mac: 52:54:84:11:00:00
gateway: 10.90.3.1
- ipaddr: 192.168.100.254
prefix: 24
mac: 52:54:84:00:08:38
eip_list:
- eip: 10.90.2.252
vm-ip: 192.168.100.192
- eip: 10.90.2.253
vm-ip: 192.168.100.193
port_forward_list:
# master1.kcp5-arm.iefcu.cn
- eip: 10.90.2.254
protocal: udp
port: 80
end_port: 82
vm-port: 80
vm-ip: 192.168.100.190
'''
# https://blog.csdn.net/sunny_day_day/article/details/119893768
def load_interface():
"""获取接口mac地址对应名称"""
macMap = {}
for interface in netifaces.interfaces():
macAddr = netifaces.ifaddresses(interface)[netifaces.AF_LINK][0]['addr']
macMap[macAddr] = interface
# print(macMap)
return macMap
# 测试:
# 没有文件的情况; 文件为空内容的情况; 语法异常的情况; 配置缺失的情况;
# 读取配置文件
def load_config():
# Open the file and load the file
with open('/etc/kylin-vr/kylin-vr.yaml') as f:
data = yaml.load(f, Loader=SafeLoader)
# print(data)
return data
return nil
# XXX: 优化, 增量更新配置文件?
def one_interface_conf(ifname, ifconf, eip_list):
"""docstring for one_interface"""
# filename = '/etc/sysconfig/network-scripts/ifcfg-' + ifname
filename = '/var/run/kylin-vr/ifcfg-' + ifname
# print(filename)
fp = open(filename, 'w')
fp.write('NAME=%s\nDEVICE="%s"\n' % (ifname, ifname))
fp.write('''BOOTPROTO="none"
ONBOOT="yes"
TYPE="Ethernet"
IPV6INIT="no"
''')
fp.write('''
IPADDR=%s
PREFIX=%s
''' % (ifconf['ipaddr'], ifconf['prefix']))
if 'gateway' in ifconf:
fp.write('GATEWAY=%s\n' % ifconf['gateway'])
for i, eip in enumerate(eip_list):
fp.write('''IPADDR%d=%s\nPREFIX%d=32\n''' % (i+1, eip, i+1))
fp.close()
def get_eip_list(data):
eip_set = set()
for eip in data['eip_list']:
eip_set.add(eip['eip'])
for port_forward in data['port_forward_list']:
eip_set.add(port_forward['eip'])
return eip_set
# XXX: 处理参数异常情况!
def gen_network_conf(data):
macMap = load_interface()
eip_list = []
for i, if_conf in enumerate(data['if_list']):
mac = if_conf['mac']
if mac not in macMap:
# debug log
continue
interface = macMap[mac]
data['if_list'][i]['ifname'] = interface
if 'gateway' in if_conf: # 网关接口为公网物理出口
data['ifname'] = interface
eip_list = get_eip_list(data)
# print(mac)
# print(interface)
one_interface_conf(interface, if_conf, eip_list)
# 最后, 替换成新的ifcfg-xxx配置
subprocess.call("rm -f /etc/sysconfig/network-scripts/ifcfg-eth*", shell=True)
subprocess.call("mv /var/run/kylin-vr/ifcfg-eth* /etc/sysconfig/network-scripts", shell=True)
# 生成eip规则
def gen_eip_iptable_conf(f, data):
# 1. 通过网关地址获取到公网接口名称
# ip route | head -1 | grep default | awk '{print $5}'
# 2. 或者通过mac地址获取公网接口名称
if 'ifname' not in data:
return
ifname = data['ifname']
for eip_item in data['eip_list']:
extern_ip=eip_item['eip']
vm_ip=eip_item['vm-ip']
f.write("-A POSTROUTING -s %s/32 -o %s -j SNAT --to-source %s\n" % (vm_ip, ifname, extern_ip))
f.write("-A PREROUTING -i %s -d %s/32 -j DNAT --to-destination %s\n" % (ifname, extern_ip, vm_ip))
# 生成snat规则
def gen_snat_iptable_conf(f, data):
if 'ifname' not in data:
return
ifname = data['ifname']
# 默认网关接口开启snat
f.write('-A POSTROUTING -o %s -j MASQUERADE\n' % ifname)
# 生成端口转发iptable规则表
def gen_port_forward_iptable_conf(f, data):
for port_forward in data['port_forward_list']:
extern_ip = port_forward['eip']
vm_ip = port_forward['vm-ip']
protocal = port_forward['protocal']
port = port_forward['port']
vm_port = port_forward['vm-port']
if 'end_port' not in port_forward: # 单端口映射
f.write("-A PREROUTING -p %s -d %s --dport %d -j DNAT --to %s:%d\n" % (protocal, extern_ip, port, vm_ip, vm_port))
f.write("-A POSTROUTING -p %s -s %s --sport %d -j SNAT --to %s:%d\n" % (protocal, vm_ip, vm_port, extern_ip, port))
else: # 端口范围映射
end_port = port_forward['end_port']
f.write("-A PREROUTING -p %s -d %s --dport %d:%d -j DNAT --to %s:%d-%d\n" % (protocal, extern_ip, port, end_port, vm_ip, port, end_port))
f.write("-A POSTROUTING -p %s -s %s --sport %d:%d -j SNAT --to %s:%d-%d\n" % (protocal, vm_ip, port, end_port, extern_ip, port, end_port))
# eip, snat, port forward的iptable规则配置
def gen_iptable_conf(data):
f = open("/var/run/kylin-vr/iptable.txt", 'w')
f.write('''
*filter
:INPUT ACCEPT
:FORWARD ACCEPT
:OUTPUT ACCEPT
COMMIT
*mangle
:PREROUTING ACCEPT
:INPUT ACCEPT
:FORWARD ACCEPT
:OUTPUT ACCEPT
:POSTROUTING ACCEPT
COMMIT
*nat
:PREROUTING ACCEPT
:INPUT ACCEPT
:OUTPUT ACCEPT
:POSTROUTING ACCEPT
''')
gen_eip_iptable_conf(f, data)
gen_port_forward_iptable_conf(f, data)
gen_snat_iptable_conf(f, data)
f.write('\nCOMMIT\n')
f.close()
# 恢复iptable配置
def reload_iptable():
return_code = subprocess.call(["iptables-restore","/var/run/kylin-vr/iptable.txt"])
print('iptable reload return %d' % return_code)
# 重置network配置
def reload_network(data):
"""docstring for reload_network"""
return_code = subprocess.call("nmcli c reload", shell=True)
print('nmcli c reload return %d' % return_code)
for if_conf in data['if_list']:
if 'ifname' not in if_conf:
continue
cmd = 'nmcli c up %s' % if_conf['ifname']
return_code = subprocess.call(cmd, shell=True)
print('up connection `%s` return %d' % (cmd, return_code))
def check_flag():
return os.path.exists('/var/run/kylin-vr')
def gen_flag():
os.makedirs('/var/run/kylin-vr')
def config_init():
gen_flag()
data = load_config()
if not data:
print('load config failed!')
return
gen_network_conf(data)
gen_iptable_conf(data)
reload_iptable()
pass
# 系统起来之后的配置更新
def config_reload(device):
if not check_flag():
print('kylin-vr service is not started, can not reload config!')
return
data = load_config()
if not data:
print('load config failed!')
return
gen_network_conf(data)
gen_iptable_conf(data)
reload_network(data)
reload_iptable()
pass
def is_running(file):
fd = open(file, "w")
try:
fcntl.lockf(fd, fcntl.LOCK_EX|fcntl.LOCK_NB)
except :
return None
return fd
def get_lock():
lockfile = "/var/run/kylin-vr-running"
while True:
fd = is_running(lockfile)
if fd:
return fd
time.sleep(1)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--command', help='sub command, Note: the allocate command needs to be used with -d parameters', \
choices=['init', 'reload', 'subnet'], \
default='init')
parser.add_argument('-d', '--device', help='the subnet command needs to specify interface name. Example: -c subnet -d eth2')
args = parser.parse_args()
# 加锁保证单例执行
a = get_lock()
cmd = args.command if args.command else 'init'
if 'reload' == cmd:
config_reload(args.device)
# elif 'subnet' == cmd:
# config_subnet(args.device)
else: # init
config_init()
if __name__ == '__main__':
main()
| adamxiao/adamxiao.github.io | openstack/asserts/kylin-vr.py | kylin-vr.py | py | 7,510 | python | en | code | 0 | github-code | 36 |
15136620120 | import time
import warnings
import mmcv
import torch
from mmcv.runner import RUNNERS, IterBasedRunner, IterLoader, get_host_info
@RUNNERS.register_module()
class MultiTaskIterBasedRunner(IterBasedRunner):
def train(self, data_loader, **kwargs):
self.model.train()
self.mode = 'train'
self.data_loader = data_loader[0]
self._epoch = data_loader[0].epoch
data_batch = []
for dl in data_loader:
data_batch.append(next(dl))
self.call_hook('before_train_iter')
outputs = self.model.train_step(data_batch, self.optimizer, **kwargs)
if not isinstance(outputs, dict):
raise TypeError('model.train_step() must return a dict')
if 'log_vars' in outputs:
self.log_buffer.update(outputs['log_vars'], outputs['num_samples'])
self.outputs = outputs
self.call_hook('after_train_iter')
self._inner_iter += 1
self._iter += 1
def run(self, data_loaders, workflow, max_iters=None, **kwargs):
"""Start running.
Args:
data_loaders (list[:obj:`DataLoader`]): Dataloaders for training
and validation.
workflow (list[tuple]): A list of (phase, iters) to specify the
running order and iterations. E.g, [('train', 10000),
('val', 1000)] means running 10000 iterations for training and
1000 iterations for validation, iteratively.
"""
assert isinstance(data_loaders, list)
assert mmcv.is_list_of(workflow, tuple)
# assert len(data_loaders) == len(workflow)
if max_iters is not None:
warnings.warn(
'setting max_iters in run is deprecated, '
'please set max_iters in runner_config', DeprecationWarning)
self._max_iters = max_iters
assert self._max_iters is not None, (
'max_iters must be specified during instantiation')
work_dir = self.work_dir if self.work_dir is not None else 'NONE'
self.logger.info('Start running, host: %s, work_dir: %s',
get_host_info(), work_dir)
self.logger.info('Hooks will be executed in the following order:\n%s',
self.get_hook_info())
self.logger.info('workflow: %s, max: %d iters', workflow,
self._max_iters)
self.call_hook('before_run')
iter_loaders = [IterLoader(x) for x in data_loaders]
self.call_hook('before_epoch')
while self.iter < self._max_iters:
for i, flow in enumerate(workflow):
self._inner_iter = 0
mode, iters = flow
if not isinstance(mode, str) or not hasattr(self, mode):
raise ValueError(
'runner has no method named "{}" to run a workflow'.
format(mode))
iter_runner = getattr(self, mode)
for _ in range(iters):
if mode == 'train' and self.iter >= self._max_iters:
break
iter_runner(iter_loaders, **kwargs)
time.sleep(1) # wait for some hooks like loggers to finish
self.call_hook('after_epoch')
self.call_hook('after_run')
| CVIU-CSU/PSSNet | mmseg/core/runners/multi_task_iterbased_runner.py | multi_task_iterbased_runner.py | py | 3,324 | python | en | code | 1 | github-code | 36 |
43951125487 | test_cases = int(input())
all_times = []
displayed_time = 0
for test in range(test_cases):
current_time = int(input())
all_times.append(current_time)
# even number of presses means watch is still running
if test_cases % 2 != 0:
print("still running")
# odd number means we have to add up all the times
# take the difference bw every other press and add it to the total time
# manually tracking index...maybe theres a better way?
else:
index = 0
for time in all_times:
if index % 2 != 0:
displayed_time += time - all_times[index-1]
# print(time)
index += 1
print(displayed_time)
| EthanCloin/kattis_solutions | Stopwatch/stopwatch.py | stopwatch.py | py | 648 | python | en | code | 0 | github-code | 36 |
26590188131 | import imp
from ecs import World, Entity
from coolClasses import *
def entityAtPos(world : World, x, y, *Components) -> list[Entity]:
testPos = Posistion(x,y)
entitys = []
for i in world.getView(Posistion, *Components):
pos = i.getComponent(Posistion)
if pos == testPos:
entitys.append(i)
return entitys | FisherSTA/BrokenSeal | helpers.py | helpers.py | py | 348 | python | en | code | 0 | github-code | 36 |
6061528518 | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 12 16:12:53 2020
@author: Monik
"""
import os, tifffile
import numpy as np
import matplotlib.pyplot as plt
import SOFI2_0_fromMatlab as sofi2
#%% helper functions
def where_max(a):
print(a.shape)
return np.unravel_index(np.argmax(a, axis=None), a.shape)
#%% read data and show mean
data_dir='SOFI2-demo-data/'
T=20
data_timelapse=[np.array(tifffile.imread(os.path.join(data_dir, 'Block'+str(k)+'.tif')), dtype=np.float32) for k in range(1, T+1)]
data_mean_series=np.array([np.mean(data_timelapse[k], axis=0) for k in range(T)])
plt.imshow(data_mean_series[-1])
plt.colorbar()
#%% calculate m6 for all data
m6_series=np.array([sofi2.M6(data_timelapse[k], verbose=True, comment=str(k)) for k in range(T)])
plt.imshow(m6_series[-1])
#%% here I need a better deconvolution!
m6_f=sofi2.filter_timelapse(sofi2.kill_outliers(m6_series))
m6_dcnv=np.array([sofi2.deconvolution(m6_f[k], verbose=True, comment=str(k)) for k in range(T)], dtype=np.float32)
m6_dcnv_f=sofi2.filter_timelapse(m6_dcnv)
#plt.imshow(m6_dcnv_f[-1])
#plt.colorbar()
plt.imshow(m6_dcnv_f[-1])
#%% do ldrc
m6_ldrc_series=np.array([sofi2.ldrc(m6_dcnv_f[k], data_mean_series[k], 25) for k in range(T)])
plt.imshow(m6_ldrc_series[-1])
plt.colorbar()
#%% alternative: ldrc without deconv
m6_ldrc_nodeconv=np.array([sofi2.ldrc(m6_f[k], data_mean_series[k], 25) for k in range(T)])
plt.imshow(m6_ldrc_series[-1])
plt.colorbar()
#%%
tifffile.imsave('demo_means'+'.tif', np.uint16(65500*data_mean_series/data_mean_series.max()))
tifffile.imsave('demo_M6_Deconv_ldrc'+'.tif', np.uint16(65500*m6_ldrc_series/m6_ldrc_series.max()))
tifffile.imsave('demo_M6_noDeconv_ldrc'+'.tif', np.uint16(65500*m6_ldrc_nodeconv/m6_ldrc_nodeconv.max()))
| pawlowska/SOFI2-Python-Warsaw | SOFI2_demo.py | SOFI2_demo.py | py | 1,760 | python | en | code | 0 | github-code | 36 |
37977450402 | import unittest
import sys
sys.path.insert(1, '..')
import easy_gui
class GUI(easy_gui.EasyGUI):
def __init__(self):
self.geometry('300x300')
self.date = self.add_widget('date')
self.add_widget(type='button', text='Print Date', command_func=self.print_date)
def print_date(self, *args):
print(self.date.get())
class TestEasyGUI(unittest.TestCase):
def test_gui_creation(self):
gui = GUI()
if __name__ == '__main__':
unittest.main() #buffer=True)
| zachbateman/easy_gui | tests/test_datepicker.py | test_datepicker.py | py | 519 | python | en | code | 1 | github-code | 36 |
37463181641 | import Dataset as datos
import matplotlib.pyplot as plt
import numpy as np
import os
df_ventas = datos.get_df_ventas()
resample_meses = datos.get_resample_meses()
facturacion_por_juego = datos.get_facturacion_por_juego()
cantidad_ventas_por_juego = datos.get_cantidad_ventas_por_juego()
#----------------------------------------------------------------------------------------------------------------- guardo los datos en Excel
datos.guardar_en_excel(datos.get_facturacion_por_juego())
datos.guardar_en_excel(datos.get_cantidad_ventas_por_juego())
#----------------------------------------------------------------------------------------------------------------- imprimo datos en consola de python
print(facturacion_por_juego)
print(cantidad_ventas_por_juego)
#----------------------------------------------------------------------------------------------------------------- agrego descripcion de los juegos
a = datos.get_facturacion_por_juego().reset_index()
a.set_index("descripcion", inplace=True)
a.name = "Comparación de la facturación de cada juego "
#----------------------------------------------------------------------------------------------------grafico comparación facturacion de los juegos
plt.figure(figsize=[11,6]).suptitle("Comparación facturación (neta) de cada juego:")
plt.subplots_adjust(bottom=0.34, right=0.99, left=0.1, top=0.95)
plt.ylabel(a.columns[1] + " en $")
f1 = plt.bar(a.index, a["facturacion neta"], tick_label=a.index)
plt.grid(which="major", axis="y", color="black", alpha=0.15)
plt.axhline(y=a["facturacion neta"].mean(),ls="--", label= "Promedio: $" +
"{:,}".format(round(a["facturacion neta"].mean(),2)).replace(',','x').replace('.',',').replace('x','.')+
" (no muy útil porque se comparan \n todos los juegos, que son muy distintos)")
plt.xticks( rotation=90)
plt.yticks(np.arange(0,a["facturacion neta"].max()*1.1,datos.escala_grafico(a["facturacion neta"].max())))
plt.ticklabel_format(axis="y",style="plain", useLocale=True,)
plt.legend(loc="upper right")
axes = plt.gca()
axes.set_ylim([0,a["facturacion neta"].max()*1.1])
plt.savefig("Gráficos generales/"+a.name+".jpg")
plt.show()
plt.close()
#--------------------------------------------------------------------------------------- grafico de juegos vendidos por mes, de todos los juegos
contador=1
for juego in df_ventas.articulo.unique():
juego = datos.get_tabla_juegos(juego)
plt.figure().suptitle(juego)
plt.xlabel(resample_meses.index.name)
plt.ylabel("Número de juegos vendidos")
f1 = plt.bar(resample_meses.index, resample_meses[juego], width=30, tick_label=resample_meses.index.strftime('%m/%y'))
plt.grid(which="major", axis="y", color="black", alpha=0.15)
plt.axhline(y=resample_meses[juego].mean(), ls="--",label="Promedio: $" +
"{:,}".format(round(resample_meses[juego].mean(), 2)).replace(',', 'x').replace('.', ',').replace('x', '.'))
plt.xticks(rotation=45)
plt.yticks(np.arange(0, resample_meses[juego].max() * 1.1, datos.escala_grafico(resample_meses[juego].max())))
plt.ticklabel_format(axis="y", style="plain", useLocale=True, )
plt.legend(loc="upper right")
axes = plt.gca()
axes.set_ylim([0, resample_meses[juego].max() * 1.1])
for i in f1:
x = i.get_x()
y = i.get_height()
ancho = i.get_width()
plt.text(x + ancho / 2, 0, y, fontsize=10, color="black", ha="center")
print(contador," Se guardó " + juego+".jpg" )
contador+=1
plt.savefig("Gráfico de cada juego/"+juego+".jpg")
plt.close()
del contador
#-------------------------------------------------------------------------------------------------------------------------------- GUI image viewer
from tkinter import *
from PIL import ImageTk, Image
root = Tk()
root.title('CIENCIAS PARA TODOS - Estadísticas')
image_list = []
for foto in os.listdir("Gráfico de cada juego/"):
aux = ImageTk.PhotoImage(Image.open("Gráfico de cada juego/"+foto))
image_list.append(aux)
my_label = Label(image=image_list[0])
my_label.grid(row=0, column=0, columnspan=3)
def forward(image_number):
global my_label
global button_forward
global button_back
my_label.grid_forget()
my_label = Label(image=image_list[image_number - 1])
button_forward = Button(root, text=">>", command=lambda: forward(image_number + 1))
button_back = Button(root, text="<<", command=lambda: back(image_number - 1))
if image_number == len(image_list):
button_forward = Button(root, text=">>", state=DISABLED)
my_label.grid(row=0, column=0, columnspan=3)
button_back.grid(row=1, column=0)
button_forward.grid(row=1, column=2)
def back(image_number):
global my_label
global button_forward
global button_back
my_label.grid_forget()
my_label = Label(image=image_list[image_number - 1])
button_forward = Button(root, text=">>", command=lambda: forward(image_number + 1))
button_back = Button(root, text="<<", command=lambda: back(image_number - 1))
if image_number == 1:
button_back = Button(root, text="<<", state=DISABLED)
my_label.grid(row=0, column=0, columnspan=3)
button_back.grid(row=1, column=0)
button_forward.grid(row=1, column=2)
button_back = Button(root, text="<<", command=back, state=DISABLED)
button_exit = Button(root, text="Exit Program", command=root.quit)
button_forward = Button(root, text=">>", command=lambda: forward(2))
button_back.grid(row=1, column=0)
button_exit.grid(row=1, column=1)
button_forward.grid(row=1, column=2)
root.mainloop() | matinoseda/CPT-datos-ventas | Estadísticas Juegos.py | Estadísticas Juegos.py | py | 5,685 | python | es | code | 0 | github-code | 36 |
25796455279 | import itertools
import numpy as np
import collections
import tensorflow as tf
from PIL import Image
from keras.models import Model, load_model
from keras import backend as K
from integrations.diagnosis_nn.diagnosisNN import DiagnosisNN
from neural_network.models import NeuralNetwork
from neural_network.nn_manager.GeneratorNNQueryManager import GeneratorNNQueryManager
class DiagnosisQuery(GeneratorNNQueryManager):
input_shape = (100, 100, 1)
db_description = 'diagnosis'
def __init__(self):
self.model = None
self.sess = None
super().__init__()
def transform_image(self, image):
if len(image.shape) == 2:
image = image.reshape((image.shape[0], image.shape[1], 1))
return image
def create_model(self) -> Model:
if self.model is None:
try:
nn = NeuralNetwork.objects.all().filter(description=self.db_description)
if nn.count() > 0:
nn = nn.latest('created')
self.sess = tf.Session()
K.set_session(self.sess)
self.model = load_model(nn.model.path)
return self.model
except IOError as e:
print(e)
def model_predict(self, image_gen, batch=3):
if self.model is None:
self._init_model()
gen, gen_copy = itertools.tee(image_gen)
with self.sess.as_default():
result = super().model_predict(gen, batch=batch)
return result
| AkaG/inz_retina | integrations/diagnosis_nn/DiagnosisQuery.py | DiagnosisQuery.py | py | 1,527 | python | en | code | 0 | github-code | 36 |
75097728425 | import h5py
import numpy as np
import os
import matplotlib.pyplot as plt
from imblearn.over_sampling import SMOTE
import random
# A simple example of what SMOTE data generation might look like...
# Grab the data
path=os.path.join(os.getcwd() , 'batch_train_223.h5')
file = h5py.File(path, 'r')
keys = file.keys()
samples = [file[key] for key in keys]
# List to hold the images and the classes
images=[]
classes=[]
# Populate the the images and classes with examples from the hdf5 file
for sample in samples[:20]:
images.append(sample['cbed_stack'][()].reshape(-1,1))
classes.append(sample.attrs['space_group'].decode('UTF-8'))
# Display the original data
fig, axes = plt.subplots(2,3, figsize=(12, 10))
for ax, cbed in zip(axes.flatten()[:3], samples[10]['cbed_stack']):
ax.imshow(cbed**0.25)
for ax, cbed in zip(axes.flatten()[3:], samples[11]['cbed_stack']):
ax.imshow(cbed**0.25)
title = "Space Group: {} - Original".format(samples[10].attrs['space_group'].decode('UTF-8'))
fig.suptitle(title, size=40)
plt.savefig('original.png')
# Change the dimension of images to a size that SMOTE() likes and call SMOTE()
images=np.squeeze(np.array(images))
sm = SMOTE(random_state=42, k_neighbors=6, ratio={'123':10, '2':15})
images_res, classes_res = sm.fit_resample(images, classes)
# List to hold the final images
images_final=[]
image_res_list=images_res.tolist()
for image_res_list in image_res_list:
images_final.append(np.reshape(image_res_list, (3, 512, 512)))
# print("length of images: {}".format(len(images)))
# print("length of images_final: {}".format(len(images_final)))
# Generate random numbers to display the generated images
listNum = random.sample(range(20,25), 4)
# Display the sythetic images
fig, axes = plt.subplots(4, 3, figsize=(12, 10))
for ax, cbed in zip(axes.flatten()[:3], images_final[listNum[0]]):
ax.imshow(cbed**0.25)
for ax, cbed in zip(axes.flatten()[3:], images_final[listNum[0]]):
ax.imshow(cbed**0.25)
for ax, cbed in zip(axes.flatten()[6:], images_final[listNum[0]]):
ax.imshow(cbed**0.25)
for ax, cbed in zip(axes.flatten()[9:], images_final[listNum[0]]):
ax.imshow(cbed**0.25)
title = "Space Group: {} - Generated".format(classes_res[listNum[0]])
fig.suptitle(title, size=40)
plt.savefig('generated.png')
# print("Original data of class{}: {}".format(classes[-1], samples[-1]['cbed_stack'][()]))
# print("Generated data of class{}: {}".format(classes_res[-1], images_final[-1]))
| emilyjcosta5/datachallenge2 | train/testSMOTE.py | testSMOTE.py | py | 2,466 | python | en | code | 1 | github-code | 36 |
34684043114 | #!/usr/bin/env python3
from collections import deque
def search(lines, pattern, history=5):
previous_lines = deque(maxlen=history)
for i in lines:
if pattern in i:
yield i, previous_lines
previous_lines.append(i)
if __name__ == '__main__':
with open(r'somefile.txt') as f:
for line, prelines in search(f, 'python', 5):
for pline in prelines:
print(pline)
print(line)
print('-' * 20)
| kelify/WorkProgram | CookBook-python3/c01/01.py | 01.py | py | 468 | python | en | code | 0 | github-code | 36 |
14061601965 | import binascii
import json
import logging
import cv2
import numpy as np
import requests
VIDEO_UPLOAD_URL = 'http://video-fs.like.video/upload_video.php'
IMAGE_UPLOAD_URL = 'http://img-fs.like.video/FileuploadDownload/upload_img.php'
logger = logging.getLogger(__name__)
def upload_video(video_bytes):
files = {
'file': video_bytes
}
try:
resp = requests.post(VIDEO_UPLOAD_URL, files=files)
if resp.status_code == 200:
url = json.loads(resp.text)['url']
crc = binascii.crc32(video_bytes)
url = '{}?crc={}&type=5'.format(url, crc)
return url
else:
return None
except Exception as err:
logger.error('upload_video failed, error info {}'.format(err))
return None
def upload_image(image_bytes, req_name='default', ext='.jpg'):
files = {
'file': ('image{}'.format(ext), image_bytes)
}
try:
if req_name == 'bigo_live':
resp = requests.post('http://snapshot.calldev.bigo.sg/upload_file.php', files=files)
else:
resp = requests.post(IMAGE_UPLOAD_URL, files=files)
if resp.status_code == 200:
return json.loads(resp.text)['url']
else:
return None
except Exception as err:
logger.error('upload_image failed, error info {}'.format(err))
return None
def download_video(video_url):
try:
resp = requests.get(video_url)
except Exception as err:
logger.error('download_video failed, video_url {}, error info {}'.format(video_url, err))
return None
if resp.status_code != 200 or not resp.content:
logger.error('download_video failed, video_url {}'.format(video_url))
return None
video_bytes = resp.content
if video_bytes is None:
logger.error('download_video failed, empty video, video_url {}'.format(video_url))
return None
return video_bytes
def download_image(image_url, decode=False, to_rgb=False):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:70.0) Gecko/20100101 Firefox/70.0'
}
try:
resp = requests.get(image_url, headers=headers)
except Exception as err:
logger.error('download_image failed, image_url {}, error info {}'.format(image_url, err))
return None
if resp.status_code != 200 or not resp.content:
logger.error('download_image failed, image_url {}'.format(image_url))
return None
image_bytes = resp.content
image = cv2.imdecode(np.frombuffer(image_bytes, np.uint8), cv2.IMREAD_COLOR)
if image is None:
logger.error('download_image failed, empty image, image_url {}'.format(image_url))
return None
if decode and to_rgb:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image if decode else image_bytes
if __name__ == '__main__':
image_bytes = requests.get('http://img.like.video/asia_live/4h6/1Jgvll.jpg').content
image_url = upload_image(image_bytes)
print(image_url)
video_bytes = requests.get(
'http://video.like.video/asia_live/7h4/M0B/C9/D7/bvsbAF37MUGEev_7AAAAAGsyOC8464.mp4').content
video_url = upload_video(video_bytes)
print(video_url)
img = download_image('http://img.like.video/asia_live/4h6/1Jgvll.jpg', decode=True)
print(img.shape)
| ThreeBucks/model-deploy | src/utils/cdn_utils.py | cdn_utils.py | py | 3,379 | python | en | code | 0 | github-code | 36 |
37943750143 | import signal
import sys
import math
import time
class _Getch:
"""Gets a single character from standard input. Does not echo to the
screen."""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
self.impl = _GetchUnix()
def __call__(self):
return self.impl()
class _GetchUnix:
def __init__(self):
import tty, sys
def __call__(self):
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class _GetchWindows:
def __init__(self):
import msvcrt
def __call__(self):
import msvcrt
return msvcrt.getch()
getch = _Getch()
import Adafruit_CharLCD as LCD
#setup appropriate GPIO ports to appropriate inputs on display
lcd_rs = 25
lcd_en = 24
lcd_d4 = 23
lcd_d5 = 17
lcd_d6 = 21
lcd_d7 = 22
lcd_backlight = 4
lcd_columns = 16
lcd_rows = 2
lcd = LCD.Adafruit_CharLCD(lcd_rs, lcd_en, lcd_d4, lcd_d5, lcd_d6, lcd_d7, lcd_columns, lcd_rows, lcd_backlight)
char_count = 0
line_count = 1
choice = ''
while True:
choice = getch()
char_count += 1
if char_count == 16:
if line_count == 2:
lcd.clear()
char_count = 0
line_count = 1
else:
lcd.message("\n")
char_count = 0
line_count = 2
if choice == '\x03':
sys.exit()
lcd.message(choice)
| spectechular/RaspberryPi_16x2_write_message | lcd_test.py | lcd_test.py | py | 1,636 | python | en | code | 0 | github-code | 36 |
6811793818 | from random import random
import numpy as np
import time
from math import *
import os
import sys
sys.setrecursionlimit(10**6)
clusters = []
visible_cells = []
class Cluster:
def __init__(self,m,n):
# Get the dimensions of the grid
self.rows = m
self.cols = n
self.visited_map = np.zeros((m,n), dtype=bool)
global clusters
clusters = []
def traverse(self,r, c ):
# Check if the current cell is out of bounds or has already been visited
if r < 0 or r >= self.rows or c < 0 or c >= self.cols or self.visited_map[r][c]:
return
# Check if the current cell is a 0
if map[r][c] != 0.5:
return
# Mark the current cell as visited
self.visited_map[r][c] = True
self.component.append((c,r))
# Recursively traverse the neighbors of the current cell
self.traverse(r + 1, c) # right
self.traverse(r - 1, c) # left
self.traverse(r, c + 1) # down
self.traverse(r, c - 1) # up
def make_clusters(self):
for (x,y) in visible_cells:
(r,c) = (y,x)
# Skip cells that have already been visited
if self.visited_map[r][c]:
continue
# Initialize a new connected component as a list of coordinates
self.component = []
# Traverse the connected component and add the coordinates of each cell to the list
self.traverse(r, c )
# Add the connected component to the list of components
if self.is_Hole(self.component):
clusters.append(np.array(self.component))
def is_Hole(self, component):
# Get the dimensions of the map
rows = len(map)
cols = len(map[0])
visited_map = np.zeros((rows,cols), dtype=bool)
# Initialize a list to store the neighboring 0s of the component
covered = []
unexp = []
for cell in component:
(r, c) = cell
# Check the neighbors of the current cell
if r > 0 and r < rows - 1 and c > 0 and c < cols - 1 :
if map[r - 1][c] == 1.0 and (not visited_map [r-1][c]): # if the neighbouring cell is covered then append
visited_map [r-1][c] = True
covered.append((r - 1, c))
elif map[r - 1][c] == 0.0 and (not visited_map [r-1][c]): # if the neighbouring cell is covered then append
visited_map [r-1][c] = True
unexp.append((r - 1, c))
if map[r + 1][c] == 1.0 and (not visited_map [r+1][c]): # if the neighbouring cell is covered then append
visited_map [r+1][c] = True
covered.append((r + 1, c))
elif map[r + 1][c] == 0.0 and (not visited_map [r+1][c]): # if the neighbouring cell is covered then append
visited_map [r+1][c] = True
unexp.append((r+1, c))
if map[r][c - 1] == 1.0 and (not visited_map [r][c-1]): # if the neighbouring cell is covered then append
visited_map [r][c-1] = True
covered.append((r, c - 1))
elif map[r][c-1] == 0.0 and (not visited_map [r][c-1]): # if the neighbouring cell is covered then append
visited_map [r][c-1] = True
unexp.append((r, c-1))
if map[r][c + 1] == 1.0 and (not visited_map [r][c+1]): # if the neighbouring cell is covered then append
visited_map [r][c+1] = True
covered.append((r, c + 1))
elif map[r][c+1] == 0.0 and (not visited_map [r][c+1]): # if the neighbouring cell is covered then append
visited_map [r][c+1] = True
unexp.append((r, c+1))
if map[r - 1][c-1] == 1.0 and (not visited_map [r-1][c-1]): # if the neighbouring cell is covered then append
visited_map [r-1][c-1] = True
covered.append((r - 1, c-1))
elif map[r - 1][c-1] == 0.0 and (not visited_map [r-1][c-1]): # if the neighbouring cell is covered then append
visited_map [r-1][c-1] = True
unexp.append((r - 1, c-1))
if map[r + 1][c+ 1] == 1.0 and (not visited_map [r+1][c+ 1]): # if the neighbouring cell is covered then append
visited_map [r+1][c+ 1] = True
covered.append((r + 1, c+ 1))
elif map[r + 1][c+ 1] == 0.0 and (not visited_map [r+1][c+ 1]): # if the neighbouring cell is covered then append
visited_map [r+1][c+ 1] = True
unexp.append((r+1, c+ 1))
if map[r+ 1][c - 1] == 1.0 and (not visited_map [r+ 1][c-1]): # if the neighbouring cell is covered then append
visited_map [r+ 1][c-1] = True
covered.append((r+ 1, c - 1))
elif map[r+ 1][c-1] == 0.0 and (not visited_map [r+ 1][c-1]): # if the neighbouring cell is covered then append
visited_map [r+ 1][c-1] = True
unexp.append((r+ 1, c-1))
if map[r- 1][c + 1] == 1.0 and (not visited_map [r- 1][c+1]): # if the neighbouring cell is covered then append
visited_map [r- 1][c+1] = True
covered.append((r- 1, c + 1))
elif map[r- 1][c+1] == 0.0 and (not visited_map [r- 1][c+1]): # if the neighbouring cell is covered then append
visited_map [r- 1][c+1] = True
unexp.append((r- 1, c+1))
else: # if it is a boundary cell return false
return False
# Check if there are any covered in the list
return len(unexp)<len(covered)
def update_visible(row,col,D,l=1):
r_ = row
c_ = col
dimension_r = D
dimension_c = D
r_l = int (max (0, r_-l))
r_h = int (min (dimension_r, r_+l+1))
c_l = int (max (0, c_-l))
c_h = int (min (dimension_c, c_+l+1))
for r in range (r_l, r_h):
for c in range (c_l, c_h):
if map[r][c] == 0.0:
map[r][c] = 0.5
visible_cells.append((r,c))
def main(D,R,test):
global map
map = np.full ((D,D),0.0)
files = []
Prev_row = []
Prev_col = []
for r in range(R):
path = os.path.join(str(D)+'x'+str(D)+'_'+str(R)+'bots','TEST'+str(test),'WPts','robot_'+str(r))
files.append(open(path,'r'))
NewLine = files[r].readline()
row,col = int (NewLine.split(' ')[0]), int (NewLine.split(' ')[1])
Prev_row.append(row)
Prev_col.append(col)
update_visible(row,col,D)
while True:
line_check = False
for r in range(R):
(row,col) = Prev_row[r],Prev_col[r]
map[row][col] = 1.0
for r in range(R):
NewLine = files[r].readline()
if len(NewLine)>0:
line_check = True
row,col = int (NewLine.split(' ')[0]), int (NewLine.split(' ')[1])
update_visible(row,col,D)
Prev_row[r] = row
Prev_col[r] = col
else:
line_check = False
break
if(line_check==False):
break
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-r', dest='num_robots', type=int, help='Number of robots')
parser.add_argument('-d', dest='dimension', type=int, help='Size of workspace')
parser.add_argument('-t', default=1, dest='test', type=int, help='test no')
args = parser.parse_args()
R = int(args.num_robots)
D = int(args.dimension)
test = int(args.test)
| Luckykantnayak/uav-project-2 | performance_check.py | performance_check.py | py | 8,208 | python | en | code | 0 | github-code | 36 |
20940437621 | from collections import OrderedDict
import torch
def anchor_offset_to_midpoint_offset(anchor_offset: torch.Tensor, anchors: torch.Tensor):
b, n, h, w = anchors.shape
num_anchors = int(n/4)
# prediction has 6 * num_anchors in dim=1 (they are concatenated) we reshape
# for easier handling (same for anchors)
r_offset = anchor_offset.reshape((b, num_anchors, 6, h, w))
r_anchors = anchors.reshape((b, num_anchors, 4, h, w))
w = r_anchors[:, :, 2, :, :] * torch.exp(r_offset[:, :, 2, :, :])
h = r_anchors[:, :, 3, :, :] * torch.exp(r_offset[:, :, 3, :, :])
x = r_offset[:, :, 0, :, :] * r_anchors[:, :, 2, :, :] + r_anchors[:, :, 0, :, :]
y = r_offset[:, :, 1, :, :] * r_anchors[:, :, 3, :, :] + r_anchors[:, :, 1, :, :]
delta_alpha = r_offset[:, :, 4, :, :] * w
delta_beta = r_offset[:, :, 5, :, :] * h
r_midpoint_offset = torch.stack((x, y, w, h, delta_alpha, delta_beta), dim=2)
return torch.cat([r_midpoint_offset[:, i, :, :, :] for i in range(num_anchors)], dim=1).float()
def midpoint_offset_to_anchor_offset(midpoint_offset: torch.tensor, anchors: torch.tensor):
b, n, h, w = anchors.shape
num_anchors = int(n/4)
# reshape for easier handling
r_midpoint_offset = midpoint_offset.reshape((b, num_anchors, 6, h, w))
r_anchors = anchors.reshape((b, num_anchors, 4, h, w))
d_a = r_midpoint_offset[:, :, 4, :, :] / r_midpoint_offset[:, :, 2, :, :]
d_b = r_midpoint_offset[:, :, 5, :, :] / r_midpoint_offset[:, :, 3, :, :]
d_w = torch.log(r_midpoint_offset[:, :, 2, :, :] / r_anchors[:, :, 2, :, :])
d_h = torch.log(r_midpoint_offset[:, :, 3, :, :] / r_anchors[:, :, 3, :, :])
d_x = (r_midpoint_offset[:, :, 0, :, :] - r_anchors[:, :, 0, :, :]) / r_anchors[:, :, 2, :, :]
d_y = (r_midpoint_offset[:, :, 1, :, :] - r_anchors[:, :, 1, :, :]) / r_anchors[:, :, 3, :, :]
r_anchor_offset = torch.stack((d_x, d_y, d_w, d_h, d_a, d_b), dim=2)
return torch.cat([r_anchor_offset[:, i, :, :, :] for i in range(num_anchors)], dim=1).float()
def midpoint_offset_to_anchor_offset_gt(midpoint_offset_gt: torch.tensor, tp_anchors: torch.tensor):
num_anchors = len(tp_anchors)
d_a = midpoint_offset_gt[:, 4] / midpoint_offset_gt[:, 2]
d_b = midpoint_offset_gt[:, 5] / midpoint_offset_gt[:, 3]
d_w = torch.log(midpoint_offset_gt[:, 2] / tp_anchors[:, 2])
d_h = torch.log(midpoint_offset_gt[:, 3] / tp_anchors[:, 3])
d_x = (midpoint_offset_gt[:, 0] - tp_anchors[:, 0]) / tp_anchors[:, 2]
d_y = (midpoint_offset_gt[:, 1] - tp_anchors[:, 1]) / tp_anchors[:, 3]
return torch.stack((d_x, d_y, d_w, d_h, d_a, d_b), dim=1)
def midpoint_offset_to_vertices(midpoint_offset: torch.Tensor):
b, n, h, w = midpoint_offset.shape
num_anchors = int(n/6)
# prediction has 6 * num_anchors in dim=1 (they are concatenated) we reshape
# for easier handling
r_midpoint_offset = midpoint_offset.reshape((b, num_anchors, 6, h, w))
x = r_midpoint_offset[:, :, 0, :, :]
y = r_midpoint_offset[:, :, 1, :, :]
w = r_midpoint_offset[:, :, 2, :, :]
h = r_midpoint_offset[:, :, 3, :, :]
d_alpha = r_midpoint_offset[:, :, 4, :, :]
d_beta = r_midpoint_offset[:, :, 5, :, :]
v1 = torch.stack([x + d_alpha, y - h / 2], dim=2)
v2 = torch.stack([x + w / 2, y + d_beta], dim=2)
v3 = torch.stack([x - d_alpha, y + h / 2], dim=2)
v4 = torch.stack([x - w / 2, y - d_beta], dim=2)
r_vertices = torch.stack((v1, v2, v3, v4), dim=2)
return torch.cat([r_vertices[:, i, :, :, :, :] for i in range(num_anchors)], dim=1).float()
def vertices_to_midpoint_offset(vertices: torch.Tensor):
# vertices shape: b, num_anchors * 4, 2, H, W
b, n, _, h, w = vertices.shape
num_anchors = int(n/4)
# reshape for easier handling
r_vertices = vertices.reshape((b, num_anchors, 4, 2, h, w))
x_min = torch.min(r_vertices[:, :, :, 0, :, :], dim=2)[0]
x_max = torch.max(r_vertices[:, :, :, 0, :, :], dim=2)[0]
y_min = torch.min(r_vertices[:, :, :, 1, :, :], dim=2)[0]
y_max = torch.max(r_vertices[:, :, :, 1, :, :], dim=2)[0]
w = x_max - x_min
h = y_max - y_min
x_center = x_min + w / 2
y_center = y_min + h / 2
delta_a = r_vertices[:, :, 0, 0, :, :] - x_center
delta_b = r_vertices[:, :, 1, 1, :, :] - y_center
r_midpoint_offset = torch.stack((x_center, y_center, w, h, delta_a, delta_b), dim=2)
return torch.cat([r_midpoint_offset[:, i, :, :, :] for i in range(num_anchors)], dim=1)
def vertices_to_midpoint_offset_gt(vertices: torch.Tensor):
# vertices shape: n, 4, 2
n, _, _ = vertices.shape
x_min = torch.min(vertices[:, :, 0], dim=1)[0]
x_max = torch.max(vertices[:, :, 0], dim=1)[0]
y_min = torch.min(vertices[:, :, 1], dim=1)[0]
y_max = torch.max(vertices[:, :, 1], dim=1)[0]
# assuming clockwise
# (argmin returns first idx)
top_left_idx = (torch.arange(n), torch.argmin(vertices[:, :, 1], dim=1))
cl_next_idx = (top_left_idx[0], (top_left_idx[1] + 1) % 4)
w = x_max - x_min
h = y_max - y_min
x_center = x_min + w / 2
y_center = y_min + h / 2
delta_a = vertices[top_left_idx][:, 0] - x_center
delta_b = vertices[cl_next_idx][:, 1] - y_center
return torch.stack((x_center, y_center, w, h, delta_a, delta_b), dim=1)
| Simon128/pytorch-ml-models | models/oriented_rcnn/encodings.py | encodings.py | py | 5,317 | python | en | code | 0 | github-code | 36 |
37099800243 | import pandas as pd
import pickle
from data import DATA_FILENAME, to_days_since_1998, datetime
def parse_date(string_value: str) -> int:
try:
return datetime.datetime.strptime(string_value.strip(), '%d/%m/%Y').date()
except ValueError:
return None
COLUMNS = ['ibovespa']
df = pd.read_csv(DATA_FILENAME, usecols=COLUMNS)
max_values = {
col: df[col].max()
for col in COLUMNS
}
df = None
MODELS = {
'date': {
'transform': parse_date,
'normalize': to_days_since_1998,
'file': 'svr_model.bin',
'input_label': 'Entre com a data (DD/MM/YYYY): ',
'error_label': 'A data informada não é valida! Por favor tente novamente...'
},
}
print('''Esse programa não garante o seus resultados e não se responsabiliza pelo mesmos.
O modelo utilizado é fruto de um projeto de pesquisa com fins acadêmicos. Todo o projeto está disponível em: https://github.com/fernando7jr/py-ibov-regression
Funcionamento:
* Informe a data no formato DD/MM/YYYY.
* O programa calcula com base no modelo de aprendizado de máquina qual a pontuação possível de acordo com os paramêtros informados.
Pressione ^Z (CTRL+Z) ou ^C (CTRL+C) para sair a qualquer momento.
''')
model_config = MODELS['date']
# load the model
f = open(model_config['file'], 'rb')
model = pickle.load(f)
f.close()
while True:
value = input(model_config['input_label'])
value = model_config['transform'](value)
if value is None:
print(model_config['error_label'])
continue
value_norm = model_config['normalize'](value)
X = [[value_norm]]
y = model.predict(X)
ibov = max_values['ibovespa'] * y[0]
print(f'De acordo com o modelo, o valor esperado paro IBOV é de {str(ibov).replace(".", ".")} pontos\n')
| fernando7jr/py-ibov-regression | ibov.py | ibov.py | py | 1,795 | python | pt | code | 0 | github-code | 36 |
34274019963 | import time
import multiprocessing as mp
def show_current_time():
while True:
t = time.strftime("%H:%M:%S")
print("Текущее время:", t)
time.sleep(1)
def show_message():
while True:
print("(* ^ ω ^)")
time.sleep(3)
if __name__ == "__main__":
p1 = mp.Process(target=show_current_time)
p2 = mp.Process(target=show_message)
# метод start() запускает наш процесс (функцию)
p1.start()
p2.start()
# метод join() дожидается окончания нашего процесса (функции)
p1.join()
p2.join() | Surikat226/Python-grade | async_run.py | async_run.py | py | 645 | python | ru | code | 0 | github-code | 36 |
11892380680 | # Uses python3
import sys
import random
def partition3(a, l, r):
#Whole idea is to compare if the ith element is larger than the last element.
#If yes, then we swap it. This will automatically make sure that equal elements as the first one will be in the middle.
x = a[l]
j = l
end = r
i=j
while i <= end:
if a[i] < x:
j += 1
a[i], a[j] = a[j], a[i]
elif a[i] > x:
a[i], a[end] = a[end],a[i]
end = end -1
i=i-1
i=i+1
a[l], a[j] = a[j], a[l] #Get the first element beetween the 2 regions.
return j, end
def partition2(a, l, r):
x = a[l]
j = l
for i in range(l + 1, r + 1):
if a[i] <= x:
j += 1
a[i], a[j] = a[j], a[i]
a[l], a[j] = a[j], a[l] #Get the first element beetween the 2 regions.
return j
def randomized_quick_sort(a, l, r):
if l >= r:
return
k = random.randint(l, r)
print (k)
a[l], a[k] = a[k], a[l]
#use partition3
m,n = partition3(a, l, r)
print (m,n)
#m = partition2(a, l, r)
randomized_quick_sort(a, l, m - 1)
randomized_quick_sort(a, n + 1, r)
if __name__ == '__main__':
#input = sys.stdin.read()
n, *a = list(map(int, input().split()))
randomized_quick_sort(a, 0, n - 1)
for x in a:
print(x, end=' ')
| bandiatindra/DataStructures-and-Algorithms | Week 4/Improving Quick Sort.py | Improving Quick Sort.py | py | 1,430 | python | en | code | 3 | github-code | 36 |
8444325228 | # class s_(object):
import functools
import numbers
import operator
import numpy
import cupy
from cupy._creation import from_data
from cupy._manipulation import join
class AxisConcatenator(object):
"""Translates slice objects to concatenation along an axis.
For detailed documentation on usage, see :func:`cupy.r_`.
This implementation is partially borrowed from NumPy's one.
"""
def _output_obj(self, obj, ndim, ndmin, trans1d):
k2 = ndmin - ndim
if trans1d < 0:
trans1d += k2 + 1
defaxes = list(range(ndmin))
k1 = trans1d
axes = defaxes[:k1] + defaxes[k2:] + \
defaxes[k1:k2]
return obj.transpose(axes)
def __init__(self, axis=0, matrix=False, ndmin=1, trans1d=-1):
self.axis = axis
self.trans1d = trans1d
self.matrix = matrix
self.ndmin = ndmin
def __getitem__(self, key):
trans1d = self.trans1d
ndmin = self.ndmin
objs = []
arrays = []
scalars = []
if isinstance(key, str):
raise NotImplementedError
if not isinstance(key, tuple):
key = (key,)
for i, k in enumerate(key):
if isinstance(k, slice):
raise NotImplementedError
elif isinstance(k, str):
if i != 0:
raise ValueError(
'special directives must be the first entry.')
raise NotImplementedError
elif type(k) in numpy.ScalarType:
newobj = from_data.array(k, ndmin=ndmin)
scalars.append(i)
else:
newobj = from_data.array(k, copy=False, ndmin=ndmin)
if ndmin > 1:
ndim = from_data.array(k, copy=False).ndim
if trans1d != -1 and ndim < ndmin:
newobj = self._output_obj(newobj, ndim, ndmin, trans1d)
arrays.append(newobj)
objs.append(newobj)
final_dtype = numpy.result_type(*arrays, *[key[k] for k in scalars])
if final_dtype is not None:
for k in scalars:
objs[k] = objs[k].astype(final_dtype)
return join.concatenate(tuple(objs), axis=self.axis)
def __len__(self):
return 0
class CClass(AxisConcatenator):
def __init__(self):
super(CClass, self).__init__(-1, ndmin=2, trans1d=0)
c_ = CClass()
"""Translates slice objects to concatenation along the second axis.
This is a CuPy object that corresponds to :obj:`cupy.r_`, which is
useful because of its common occurrence. In particular, arrays will be
stacked along their last axis after being upgraded to at least 2-D with
1's post-pended to the shape (column vectors made out of 1-D arrays).
For detailed documentation, see :obj:`r_`.
This implementation is partially borrowed from NumPy's one.
Returns:
cupy.ndarray: Joined array.
.. seealso:: :obj:`numpy.c_`
Examples
--------
>>> a = cupy.array([[1, 2, 3]], dtype=np.int32)
>>> b = cupy.array([[4, 5, 6]], dtype=np.int32)
>>> cupy.c_[a, 0, 0, b]
array([[1, 2, 3, 0, 0, 4, 5, 6]], dtype=int32)
"""
class RClass(AxisConcatenator):
def __init__(self):
super(RClass, self).__init__()
r_ = RClass()
"""Translates slice objects to concatenation along the first axis.
This is a simple way to build up arrays quickly.
If the index expression contains comma separated arrays, then stack
them along their first axis.
This object can build up from normal CuPy arrays.
Therefore, the other objects (e.g. writing strings like '2,3,4',
or using imaginary numbers like [1,2,3j],
or using string integers like '-1') are not implemented yet
compared with NumPy.
This implementation is partially borrowed from NumPy's one.
Returns:
cupy.ndarray: Joined array.
.. seealso:: :obj:`numpy.r_`
Examples
--------
>>> a = cupy.array([1, 2, 3], dtype=np.int32)
>>> b = cupy.array([4, 5, 6], dtype=np.int32)
>>> cupy.r_[a, 0, 0, b]
array([1, 2, 3, 0, 0, 4, 5, 6], dtype=int32)
"""
def indices(dimensions, dtype=int):
"""Returns an array representing the indices of a grid.
Computes an array where the subarrays contain index values 0,1,...
varying only along the corresponding axis.
Args:
dimensions: The shape of the grid.
dtype: Data type specifier. It is int by default.
Returns:
ndarray:
The array of grid indices,
``grid.shape = (len(dimensions),) + tuple(dimensions)``.
Examples
--------
>>> grid = cupy.indices((2, 3))
>>> grid.shape
(2, 2, 3)
>>> grid[0] # row indices
array([[0, 0, 0],
[1, 1, 1]])
>>> grid[1] # column indices
array([[0, 1, 2],
[0, 1, 2]])
.. seealso:: :func:`numpy.indices`
"""
dimensions = tuple(dimensions)
N = len(dimensions)
shape = (1,) * N
res = cupy.empty((N,) + dimensions, dtype=dtype)
for i, dim in enumerate(dimensions):
res[i] = cupy.arange(dim, dtype=dtype).reshape(
shape[:i] + (dim,) + shape[i + 1:]
)
return res
def ix_(*args):
"""Construct an open mesh from multiple sequences.
This function takes N 1-D sequences and returns N outputs with N
dimensions each, such that the shape is 1 in all but one dimension
and the dimension with the non-unit shape value cycles through all
N dimensions.
Using `ix_` one can quickly construct index arrays that will index
the cross product. ``a[cupy.ix_([1,3],[2,5])]`` returns the array
``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``.
Args:
*args: 1-D sequences
Returns:
tuple of ndarrays:
N arrays with N dimensions each, with N the number of input sequences.
Together these arrays form an open mesh.
Examples
--------
>>> a = cupy.arange(10).reshape(2, 5)
>>> a
array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> ixgrid = cupy.ix_([0,1], [2,4])
>>> ixgrid
(array([[0],
[1]]), array([[2, 4]]))
.. warning::
This function may synchronize the device.
.. seealso:: :func:`numpy.ix_`
"""
# TODO(niboshi): Avoid nonzero which may synchronize the device.
out = []
nd = len(args)
for k, new in enumerate(args):
new = from_data.asarray(new)
if new.ndim != 1:
raise ValueError('Cross index must be 1 dimensional')
if new.size == 0:
# Explicitly type empty arrays to avoid float default
new = new.astype(numpy.intp)
if cupy.issubdtype(new.dtype, cupy.bool_):
new, = new.nonzero() # may synchronize
new = new.reshape((1,) * k + (new.size,) + (1,) * (nd - k - 1))
out.append(new)
return tuple(out)
def ravel_multi_index(multi_index, dims, mode='wrap', order='C'):
"""
Converts a tuple of index arrays into an array of flat indices, applying
boundary modes to the multi-index.
Args:
multi_index (tuple of cupy.ndarray) : A tuple of integer arrays, one
array for each dimension.
dims (tuple of ints): The shape of array into which the indices from
``multi_index`` apply.
mode ('raise', 'wrap' or 'clip'), optional: Specifies how out-of-bounds
indices are handled. Can specify either one mode or a tuple of
modes, one mode per index:
- *'raise'* -- raise an error
- *'wrap'* -- wrap around (default)
- *'clip'* -- clip to the range
In 'clip' mode, a negative index which would normally wrap will
clip to 0 instead.
order ('C' or 'F'), optional: Determines whether the multi-index should
be viewed as indexing in row-major (C-style) or column-major
(Fortran-style) order.
Returns:
raveled_indices (cupy.ndarray): An array of indices into the flattened
version of an array of dimensions ``dims``.
.. warning::
This function may synchronize the device when ``mode == 'raise'``.
Notes
-----
Note that the default `mode` (``'wrap'``) is different than in NumPy. This
is done to avoid potential device synchronization.
Examples
--------
>>> cupy.ravel_multi_index(cupy.asarray([[3,6,6],[4,5,1]]), (7,6))
array([22, 41, 37])
>>> cupy.ravel_multi_index(cupy.asarray([[3,6,6],[4,5,1]]), (7,6),
... order='F')
array([31, 41, 13])
>>> cupy.ravel_multi_index(cupy.asarray([[3,6,6],[4,5,1]]), (4,6),
... mode='clip')
array([22, 23, 19])
>>> cupy.ravel_multi_index(cupy.asarray([[3,6,6],[4,5,1]]), (4,4),
... mode=('clip', 'wrap'))
array([12, 13, 13])
>>> cupy.ravel_multi_index(cupy.asarray((3,1,4,1)), (6,7,8,9))
array(1621)
.. seealso:: :func:`numpy.ravel_multi_index`, :func:`unravel_index`
"""
ndim = len(dims)
if len(multi_index) != ndim:
raise ValueError(
"parameter multi_index must be a sequence of "
"length {}".format(ndim))
for d in dims:
if not isinstance(d, numbers.Integral):
raise TypeError(
"{} object cannot be interpreted as an integer".format(
type(d)))
if isinstance(mode, str):
mode = (mode, ) * ndim
if functools.reduce(operator.mul, dims) > cupy.iinfo(cupy.int64).max:
raise ValueError("invalid dims: array size defined by dims is larger "
"than the maximum possible size")
s = 1
ravel_strides = [1] * ndim
order = 'C' if order is None else order.upper()
if order == 'C':
for i in range(ndim - 2, -1, -1):
s = s * dims[i + 1]
ravel_strides[i] = s
elif order == 'F':
for i in range(1, ndim):
s = s * dims[i - 1]
ravel_strides[i] = s
else:
raise ValueError('order not understood')
multi_index = cupy.broadcast_arrays(*multi_index)
raveled_indices = cupy.zeros(multi_index[0].shape, dtype=cupy.int64)
for d, stride, idx, _mode in zip(dims, ravel_strides, multi_index, mode):
if not isinstance(idx, cupy.ndarray):
raise TypeError("elements of multi_index must be cupy arrays")
if not cupy.can_cast(idx, cupy.int64, 'same_kind'):
raise TypeError(
'multi_index entries could not be cast from dtype(\'{}\') to '
'dtype(\'{}\') according to the rule \'same_kind\''.format(
idx.dtype, cupy.int64().dtype))
idx = idx.astype(cupy.int64, copy=False)
if _mode == "raise":
if cupy.any(cupy.logical_or(idx >= d, idx < 0)):
raise ValueError("invalid entry in coordinates array")
elif _mode == "clip":
idx = cupy.clip(idx, 0, d - 1)
elif _mode == 'wrap':
idx = idx % d
else:
raise ValueError('Unrecognized mode: {}'.format(_mode))
raveled_indices += stride * idx
return raveled_indices
def unravel_index(indices, dims, order='C'):
"""Converts array of flat indices into a tuple of coordinate arrays.
Args:
indices (cupy.ndarray): An integer array whose elements are indices
into the flattened version of an array of dimensions :obj:`dims`.
dims (tuple of ints): The shape of the array to use for unraveling
indices.
order ('C' or 'F'): Determines whether the indices should be viewed as
indexing in row-major (C-style) or column-major (Fortran-style)
order.
Returns:
tuple of ndarrays:
Each array in the tuple has the same shape as the indices array.
Examples
--------
>>> cupy.unravel_index(cupy.array([22, 41, 37]), (7, 6))
(array([3, 6, 6]), array([4, 5, 1]))
>>> cupy.unravel_index(cupy.array([31, 41, 13]), (7, 6), order='F')
(array([3, 6, 6]), array([4, 5, 1]))
.. warning::
This function may synchronize the device.
.. seealso:: :func:`numpy.unravel_index`, :func:`ravel_multi_index`
"""
order = 'C' if order is None else order.upper()
if order == 'C':
dims = reversed(dims)
elif order == 'F':
pass
else:
raise ValueError('order not understood')
if not cupy.can_cast(indices, cupy.int64, 'same_kind'):
raise TypeError(
'Iterator operand 0 dtype could not be cast '
'from dtype(\'{}\') to dtype(\'{}\') '
'according to the rule \'same_kind\''.format(
indices.dtype, cupy.int64().dtype))
if (indices < 0).any(): # synchronize!
raise ValueError('invalid entry in index array')
unraveled_coords = []
for dim in dims:
unraveled_coords.append(indices % dim)
indices = indices // dim
if (indices > 0).any(): # synchronize!
raise ValueError('invalid entry in index array')
if order == 'C':
unraveled_coords = reversed(unraveled_coords)
return tuple(unraveled_coords)
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of
size ``(n, n)`` with a possible offset argument `k`, when called
as ``mask_func(a, k)`` returns a new array with zeros in certain
locations (functions like :func:`~cupy.triu` or :func:`~cupy.tril` do
precisely this). Then this function returns the indices where the non-zero
values would be located.
Args:
n (int): The returned indices will be valid to access arrays
of shape (n, n).
mask_func (callable): A function whose call signature is
similar to that of :func:`~cupy.triu`, :func:`~tril`. That is,
``mask_func(x, k)`` returns a boolean array, shaped like
`x`. `k` is an optional argument to the function.
k (scalar): An optional argument which is passed through to
`mask_func`. Functions like :func:`~cupy.triu`, :func:`~cupy.tril`
take a second argument that is interpreted as an offset.
Returns:
tuple of arrays: The `n` arrays of indices corresponding to
the locations where ``mask_func(np.ones((n, n)), k)`` is
True.
.. warning::
This function may synchronize the device.
.. seealso:: :func:`numpy.mask_indices`
"""
a = cupy.ones((n, n), dtype=cupy.int8)
return mask_func(a, k).nonzero()
# TODO(okuta): Implement diag_indices
# TODO(okuta): Implement diag_indices_from
def tril_indices(n, k=0, m=None):
"""Returns the indices of the lower triangular matrix.
Here, the first group of elements contains row coordinates
of all indices and the second group of elements
contains column coordinates.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal above which to zero elements. `k = 0`
(the default) is the main diagonal, `k < 0` is
below it and `k > 0` is above.
m : int, optional
The column dimension of the arrays for which the
returned arrays will be valid. By default, `m = n`.
Returns
-------
y : tuple of ndarrays
The indices for the triangle. The returned tuple
contains two arrays, each with the indices along
one dimension of the array.
See Also
--------
numpy.tril_indices
"""
tri_ = cupy.tri(n, m, k=k, dtype=bool)
return tuple(cupy.broadcast_to(inds, tri_.shape)[tri_]
for inds in cupy.indices(tri_.shape, dtype=int))
def tril_indices_from(arr, k=0):
"""Returns the indices for the lower-triangle of arr.
Parameters
----------
arr : cupy.ndarray
The indices are valid for square arrays
whose dimensions are the same as arr.
k : int, optional
Diagonal offset.
See Also
--------
numpy.tril_indices_from
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""Returns the indices of the upper triangular matrix.
Here, the first group of elements contains row coordinates
of all indices and the second group of elements
contains column coordinates.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Refers to the diagonal offset. By default, `k = 0` i.e.
the main dialogal. The positive value of `k`
denotes the diagonals above the main diagonal, while the negative
value includes the diagonals below the main diagonal.
m : int, optional
The column dimension of the arrays for which the
returned arrays will be valid. By default, `m = n`.
Returns
-------
y : tuple of ndarrays
The indices for the triangle. The returned tuple
contains two arrays, each with the indices along
one dimension of the array.
See Also
--------
numpy.triu_indices
"""
tri_ = ~cupy.tri(n, m, k=k - 1, dtype=bool)
return tuple(cupy.broadcast_to(inds, tri_.shape)[tri_]
for inds in cupy.indices(tri_.shape, dtype=int))
def triu_indices_from(arr, k=0):
"""Returns indices for the upper-triangle of arr.
Parameters
----------
arr : cupy.ndarray
The indices are valid for square arrays.
k : int, optional
Diagonal offset (see 'triu_indices` for details).
Returns
-------
triu_indices_from : tuple of ndarrays
Indices for the upper-triangle of `arr`.
See Also
--------
numpy.triu_indices_from
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| cupy/cupy | cupy/_indexing/generate.py | generate.py | py | 18,125 | python | en | code | 7,341 | github-code | 36 |
6108135387 | from django.db import models
from django.utils.translation import gettext_lazy as _
from solo.models import SingletonModel
class Configuration(SingletonModel):
tenant = models.CharField(max_length=255, help_text="Welkin organization name.")
instance = models.CharField(
max_length=255, help_text="The environment inside a Welkin organization."
)
api_client = models.CharField(
max_length=255, help_text="Welkin API client name.", verbose_name="API client"
)
secret_key = models.CharField(
max_length=255, help_text="Welkin API client secret key."
)
def __str__(self):
return "Welkin configuration"
class Meta:
verbose_name = _("configuration")
@classmethod
def get_test_payload(cls):
config = cls.objects.get()
return {
"sourceId": "SOURCE_ID",
"eventSubtype": "EVENT_SUBTYPE",
"tenantName": config.tenant,
"instanceName": config.instance,
"patientId": "PATIENT_ID",
"eventEntity": "EVENT_ENTITY",
"sourceName": "SOURCE_NAME",
"url": "URL",
}
| Lightmatter/django-welkin | django_welkin/models/configuration.py | configuration.py | py | 1,152 | python | en | code | 1 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.