blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
040a421fd08c22b88661d55b4267176f9cdbbae8 | 161d43b73438c4423949f4d4898e44f015691a0e | /GaussianProcess/__init__.py | 41d152d0ef74cf3acec2895b134b155ca3abafde | [] | no_license | apetri/CFHTLens_analysis | a44f754114a6a6129088f0771cc558baed987462 | b19343b43b54870f7950bcd9ea76bbe829448c44 | refs/heads/master | 2020-05-21T22:06:24.551906 | 2017-12-14T16:17:08 | 2017-12-14T16:17:08 | 16,521,933 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 47 | py | from .interpolation import *
__version__="0.1" | [
"apetri@phys.columbia.edu"
] | apetri@phys.columbia.edu |
25e219ef8afbaaf847e7238c1b86d7c0bd4e0452 | 83427c470b8e2ee4636a1e9aaa940cdefecc0c2b | /Baxter/Archivos_Hugo/baxter_gato/pantalla_gato.py | a4878c40de9df76719aae984989d08139aff8c0d | [] | no_license | Angelooocw/Documents | c67554f0fce582feb7ec6b3809cb0ea19c7c07a9 | 346ebf7cff48d517ee2f0b0838f66e22529a3f59 | refs/heads/master | 2020-03-31T10:56:50.905510 | 2019-04-17T02:15:21 | 2019-04-17T02:15:21 | 152,156,490 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,940 | py | #!/usr/bin/env
import rospy
import cv2
import cv_bridge
from sensor_msgs.msg import Image
import os
import numpy as np
class pantalla_gato():
def __init__(self):
home = os.getenv('HOME') #
# Editar carpeta
home = home + '/ros_ws/gato'
# Imagenes
self.p_principal = cv2.imread(home+'/images/Gato.jpg')
self.circulo = cv2.imread(home+'/images/Circulo.jpg')
self.cruz = cv2.imread(home+'/images/Cruz.jpg')
self.ganador_x = cv2.imread(home+'/images/GanadorX.jpg')
self.ganador_o = cv2.imread(home+'/images/GanadorO.jpg')
self.empate = cv2.imread(home+'/images/Empate.jpg')
# Dimensiones
self.cir_altura_y = len(self.circulo)
self.cir_ancho_x = len(self.circulo[0])
self.gan_altura_y = len(self.ganador_x)
self.gan_ancho_x = len(self.ganador_x[0])
# Jugadas
x1,x2,x3 = 440,600,760
y1,y2,y3 = 100,250,400
self.pts = [[y1,x1],[y1,x2],[y1,x3],[y2,x1],[y2,x2],[y2,x3],[y3,x1],[y3,x2],[y3,x3]]
# Publicador
self.pub = rospy.Publisher('/robot/xdisplay', Image, latch = True , queue_size = 10)
self.publicador_a_pantalla()
# Los pixeles estan ordenados de [y_hacia_abajo, x_positivo] o de otra forma,
# es un arreglo donde cada valor es una horizontal de la foto
def publicador_a_pantalla(self):
msg = cv_bridge.CvBridge().cv2_to_imgmsg(self.p_principal)
self.pub.publish(msg)
rospy.sleep(1)
def hacer_jugada(self, jugador, indice_jugada):
'''
indice_jugada: corresponde a un numero del 0 al 8 que permite colocar un valor en el tablero
jugador: correspone a 1 o 2 para ver si la jugada es de O o X
'''
if jugador == 1:
self.p_principal[self.pts[indice_jugada][0]:self.pts[indice_jugada][0]+self.cir_altura_y,self.pts[indice_jugada][1]:self.pts[indice_jugada][1]+self.cir_ancho_x] = self.cruz
else:
self.p_principal[self.pts[indice_jugada][0]:self.pts[indice_jugada][0]+self.cir_altura_y,self.pts[indice_jugada][1]:self.pts[indice_jugada][1]+self.cir_ancho_x] = self.circulo
self.publicador_a_pantalla()
def resultado(self, resultado):
'''
resultado: 0,1,2 donde 1 gana jugador 1, 2 gana jugador 2 y 0 empate
'''
x4 = 40
y4 = 400
if resultado == 1:
self.p_principal[y4:y4+self.gan_altura_y,x4:x4+self.gan_ancho_x] = self.ganador_x
elif resultado == 2:
self.p_principal[y4:y4+self.gan_altura_y,x4:x4+self.gan_ancho_x] = self.ganador_o
else:
self.p_principal[y4:y4+self.gan_altura_y,x4:x4+self.gan_ancho_x] = self.empate
self.publicador_a_pantalla()
def main():
rospy.init_node('ffdsdsfads', anonymous = True)
gato = pantalla_gato()
gato.hacer_jugada(0,0)
if __name__ == '__main__':
main() | [
"angelomontano42@gmail.com"
] | angelomontano42@gmail.com |
69cfac43adaf11db9463ba6660f355dd9fe7d777 | f19e38ef948796e93981fb10a5b0098f65e4e247 | /bin/django-admin | 9a165941cc3d7a5bcc070729306e3baabbfdc844 | [] | no_license | FranSollima/gwent-project | b2b1a9af1b803ae5311e79057e2acf6388c8cf27 | 89f4540e3b036ca3aec49b69ede81c23c48106b4 | refs/heads/master | 2021-01-22T09:26:49.914790 | 2015-07-02T17:10:08 | 2015-07-02T17:10:08 | 37,854,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | #!/home/francisco/gwent-project/bin/python2
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"francisco@debianFS.marketimetrics.com"
] | francisco@debianFS.marketimetrics.com | |
9d96bb713aa28e97eeda888b809169677dfa9351 | 053ee1c7814a735f6ed54e066e1a2d62d9532c10 | /pjf/tag.py | 12825debc533746dc147b18e69a1924ea5671b02 | [] | no_license | walkeryg/hdotfiles | 617ef4850bf1371b9a9c5fe525efa3fdb5d26bf0 | c6fb3a06c8777b0b34d2acb729f4592f71a89091 | refs/heads/master | 2022-07-23T07:07:41.230401 | 2022-07-08T09:49:22 | 2022-07-08T09:49:22 | 88,474,863 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | import os
import optparse
import platform
import re
import sys
import subprocess
class Tag(object):
"""Base Class for Gen/Update Tags"""
def __init__(self):
self.__ignore = [
'.*o',
]
def Usage(self):
print("NotImplemented")
sys.exit(1)
def update(self):
raise NotImplementedError
def _kill(sef, proc_pid):
process = psutil.Process(proc_pid)
for proc in process.children():
proc.kill()
process.kill()
| [
"ygcom@aliun.com"
] | ygcom@aliun.com |
8acd536cba2570095245ee769e4dd66173229afd | 0df65d4630774e5e3a6513decfb7f992e1741458 | /HeavyChHiggsToTauNu_REMOVEME/test/plotScripts-AN-11-470/plotEwkValidationEmbeddedMcNormalTauTrigger.py | 05e31411c14d1d6a0b832f245e095f7dd8ed6bf7 | [] | no_license | heitorPB/HiggsAnalysis | 1742705f9bcde4e60e358f21a81a385dceeb8906 | 5e417978406b18a52e014125b8e833d4efcbe8b9 | refs/heads/master | 2020-04-12T01:42:41.431378 | 2017-03-07T15:21:34 | 2017-03-07T15:21:34 | 52,434,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,229 | py | #!/usr/bin/env python
######################################################################
#
# This plot script is for comparing the embedded MC and normal MC
# withinsignal analysis and tau trigger. The corresponding python job
# configurations are
# * signalAnalysis_cfg.py with "doPat=1 tauEmbeddingInput=1"
# * signalAnalysis_cfg.py
# for embedding tauID, normal tauID, embedded signal analysis, and
# normal signal analysis, respecitvely
#
# The development scripts are
# * plotTauEmbeddingMcSignalAnalysisMcMany
#
# Authors: Matti Kortelainen
#
######################################################################
import os
import array
import math
import ROOT
ROOT.gROOT.SetBatch(True)
import HiggsAnalysis.HeavyChHiggsToTauNu.tools.dataset as dataset
import HiggsAnalysis.HeavyChHiggsToTauNu.tools.histograms as histograms
import HiggsAnalysis.HeavyChHiggsToTauNu.tools.plots as plots
import HiggsAnalysis.HeavyChHiggsToTauNu.tools.counter as counter
import HiggsAnalysis.HeavyChHiggsToTauNu.tools.tdrstyle as tdrstyle
import HiggsAnalysis.HeavyChHiggsToTauNu.tools.styles as styles
from HiggsAnalysis.HeavyChHiggsToTauNu.tools.cutstring import * # And, Not, Or
import HiggsAnalysis.HeavyChHiggsToTauNu.tools.crosssection as xsect
import HiggsAnalysis.HeavyChHiggsToTauNu.tools.tauEmbedding as tauEmbedding
analysisEmb = "signalAnalysisCaloMet60TEff"
analysisSig = "signalAnalysisGenuineTau" # require that the selected tau is genuine, valid comparison after njets
#taujet = "#tau jet"
#taujetH = "#tau-jet"
taujet = "#tau_{h}"
taujetH = "#tau_{h}"
def main():
dirEmbs = ["."] + [os.path.join("..", d) for d in tauEmbedding.dirEmbs[1:]]
dirSig = "../"+tauEmbedding.dirSig
# dirEmbs = dirEmbs[:2]
datasetsEmb = tauEmbedding.DatasetsMany(dirEmbs, analysisEmb+"Counters", normalizeMCByLuminosity=True)
datasetsSig = dataset.getDatasetsFromMulticrabCfg(cfgfile=dirSig+"/multicrab.cfg", counters=analysisSig+"Counters")
datasetsSig.updateNAllEventsToPUWeighted()
datasetsEmb.forEach(plots.mergeRenameReorderForDataMC)
datasetsEmb.setLumiFromData()
plots.mergeRenameReorderForDataMC(datasetsSig)
def mergeEWK(datasets):
datasets.merge("EWKMC", ["WJets", "TTJets", "DYJetsToLL", "SingleTop", "Diboson"], keepSources=True)
mergeEWK(datasetsSig)
datasetsEmb.forEach(mergeEWK)
plots._legendLabels["EWKMC"] = "EWK"
# Apply TDR style
style = tdrstyle.TDRStyle()
ROOT.gStyle.SetHatchesLineWidth(2)
histograms.cmsTextMode = histograms.CMSMode.SIMULATION
histograms.cmsText[histograms.CMSMode.SIMULATION] = "Simulation"
#histograms.createLegend.setDefaults(y1=0.93, y2=0.75, x1=0.52, x2=0.93)
histograms.createLegend.setDefaults(y1=0.93, y2=0.77, x1=0.45, x2=0.7, textSize=0.04)
tauEmbedding.normalize = True
tauEmbedding.era = "Run2011A"
table = counter.CounterTable()
def dop(name):
doPlots(datasetsEmb, datasetsSig, name)
tbl = doCounters(datasetsEmb, datasetsSig, name)
for icol in xrange(tbl.getNcolumns()):
table.appendColumn(tbl.getColumn(icol))
dop("TTJets")
dop("WJets")
dop("DYJetsToLL")
dop("SingleTop")
dop("Diboson")
cellFormat = counter.TableFormatLaTeX(counter.CellFormatTeX(valueFormat='%.4f', withPrecision=2))
print table.format(cellFormat)
drawPlotCommon = tauEmbedding.PlotDrawerTauEmbeddingEmbeddedNormal(ylabel="Events / %.0f GeV/c", stackMCHistograms=False, log=True, addMCUncertainty=True, ratio=True, addLuminosityText=True)
def createStyles():
st = [styles.StyleCompound(styles=[s, styles.StyleLine(lineWidth=5)]) for s in styles.getStyles()]
st[0] = styles.StyleCompound(styles=[st[0], styles.StyleLine(lineStyle=2)])
return st
def doPlots(datasetsEmb, datasetsSig, datasetName):
lumi = datasetsEmb.getLuminosity()
createPlot = tauEmbedding.PlotCreatorMany(analysisEmb, analysisSig, datasetsEmb, datasetsSig, datasetName, createStyles())
def drawPlot(plot, name, *args, **kwargs):
drawPlotCommon(plot, "mcembsig_"+datasetName+"_"+name, *args, **kwargs)
def createDrawPlot(name, *args, **kwargs):
p = createPlot(name)
drawPlot(plot, *args, **kwargs)
opts2def = {"ymin": 0, "ymax": 2}
def drawControlPlot(path, xlabel, rebin=None, opts2=None, **kwargs):
opts2_ = opts2def
if opts2 != None:
opts_ = opts2
cargs = {}
if rebin != None:
cargs["rebin"] = rebin
drawPlot(createPlot("ControlPlots/"+path, **cargs), path, xlabel, opts2=opts2_, **kwargs)
def update(d1, d2):
tmp = {}
tmp.update(d1)
tmp.update(d2)
return tmp
# Control plots
optsdef = {}
opts = optsdef
moveLegend = {
"DYJetsToLL": {"dx": -0.002},
"WJets": {"dx": 0.02, "dh": -0.02},
}.get(datasetName, {})
drawControlPlot("SelectedTau_pT_AfterStandardSelections", taujetH+" ^{}p_{T} (GeV/c)", opts=update(opts, {"xmax": 250}), rebin=2, cutBox={"cutValue": 40, "greaterThan": 40}, moveLegend=moveLegend)
opts = {
"TTJets": {"ymax": 8.4},
"WJets": {"ymax": 21},
"SingleTop": {"ymax": 1.9},
"Diboson": {"ymax": 0.7},
}.get(datasetName, {"ymaxfactor": 1.4})
moveLegend = {
"TTJets": {"dy":-0.6, "dx":-0.12},
}.get(datasetName, {"dx": -0.26})
drawControlPlot("SelectedTau_eta_AfterStandardSelections", taujetH+" #eta", opts=update(opts, {"xmin": -2.2, "xmax": 2.2}), ylabel="Events / %.1f", rebin=4, log=False, moveLegend=moveLegend)
moveLegend = {
"DYJetsToLL": {"dx": -0.02},
"Diboson": {"dx": -0.02},
}.get(datasetName, {})
opts ={
#"Diboson": {"ymaxfactor": 1.4},
}.get(datasetName, {})
drawControlPlot("SelectedTau_LeadingTrackPt_AfterStandardSelections", taujetH+" ldg. charged particle ^{}p_{T} (GeV/c)", opts=update(opts, {"xmax": 300}), rebin=2, cutBox={"cutValue": 20, "greaterThan": True}, moveLegend=moveLegend)
opts = {"ymin": 1e-1, "ymaxfactor": 2}
if datasetName == "Diboson":
opts["ymin"] = 1e-2
moveLegend = {"dx": -0.17}
drawControlPlot("SelectedTau_Rtau_AfterStandardSelections", "R_{#tau} = p^{ldg. charged particle}/^{}p^{%s}"%taujet, opts=update(opts, {"xmin": 0.65, "xmax": 1.05}), rebin=5, ylabel="Events / %.2f", moveLegend=moveLegend, cutBox={"cutValue":0.7, "greaterThan":True})
opts = optsdef
opts = {
"TTJets": {"ymaxfactor": 2.2},
}.get(datasetName, opts)
moveLegend = {
"TTJets": {"dx": 0.03},
"DYJetsToLL": {"dx": -0.02},
}.get(datasetName, {})
drawControlPlot("Njets_AfterStandardSelections", "Number of jets", ylabel="Events", opts=opts, moveLegend=moveLegend)
# After Njets
opts = {
"TTJets": {"ymaxfactor": 2.2},
"WJets": {"ymaxfactor": 6},
"Diboson": {"ymaxfactor": 3.5},
}.get(datasetName, {})
moveLegend = {
"TTJets": {"dx": 0.03},
"WJets": {"dx": 0.02, "dh": -0.03},
"DYJetsToLL": {"dx": -0.02},
"Diboson": {"dx": -0.01},
}.get(datasetName, {})
drawControlPlot("MET", "Uncorrected PF ^{}E_{T}^{miss} (GeV)", rebin=5, opts=update(opts, {"xmax": 400}), cutLine=50, moveLegend=moveLegend)
# after MET
opts = {
"SingleTop": {"ymaxfactor": 5}
}.get(datasetName, {})
moveLegend = {
"TTJets": {"dx": -0.12, "dy": -0.5},
"DYJetsToLL": {"dx": -0.02},
}.get(datasetName, {})
drawControlPlot("NBjets", "Number of selected b jets", opts=update(opts, {"xmax": 6}), ylabel="Events", moveLegend=moveLegend, cutLine=1)
# Tree cut definitions
treeDraw = dataset.TreeDraw("dummy", weight=tauEmbedding.signalNtuple.weightBTagging)
tdDeltaPhi = treeDraw.clone(varexp="%s >>tmp(18, 0, 180)" % tauEmbedding.signalNtuple.deltaPhiExpression)
tdMt = treeDraw.clone(varexp="%s >>tmp(15,0,300)" % tauEmbedding.signalNtuple.mtExpression)
# DeltapPhi
xlabel = "#Delta#phi(^{}%s, ^{}E_{T}^{miss}) (^{o})" % taujet
def customDeltaPhi(h):
yaxis = h.getFrame().GetYaxis()
yaxis.SetTitleOffset(0.8*yaxis.GetTitleOffset())
opts2=opts2def
opts = {
"WJets": {"ymax": 20},
"DYJetsToLL": {"ymax": 5.4},
"SingleTop": {"ymax": 2},
"Diboson": {"ymax": 0.6},
}.get(datasetName, {"ymaxfactor": 1.2})
opts2 = {
"WJets": {"ymin": 0, "ymax": 3}
}.get(datasetName, opts2def)
moveLegend = {
"DYJetsToLL": {"dx": -0.21},
"Diboson": {"dx": -0.205},
}.get(datasetName, {"dx": -0.2})
drawPlot(createPlot(tdDeltaPhi.clone(selection=And(tauEmbedding.signalNtuple.metCut, tauEmbedding.signalNtuple.bTaggingCut))), "deltaPhi_3AfterBTagging", xlabel, log=False, opts=opts, opts2=opts2, ylabel="Events /^{} %.0f^{o}", function=customDeltaPhi, moveLegend=moveLegend, cutLine=[130, 160])
# Transverse mass
selection = And(*[tauEmbedding.signalNtuple.metCut, tauEmbedding.signalNtuple.bTaggingCut, tauEmbedding.signalNtuple.deltaPhi160Cut])
opts = {
"EWKMC": {"ymax": 40},
"TTJets": {"ymax": 12},
#"WJets": {"ymax": 35},
"WJets": {"ymax": 25},
"SingleTop": {"ymax": 2.2},
"DYJetsToLL": {"ymax": 6.5},
#"Diboson": {"ymax": 0.9},
"Diboson": {"ymax": 0.8},
"W3Jets": {"ymax": 5}
}.get(datasetName, {})
opts2 = {
"TTJets": {"ymin": 0, "ymax": 1.2},
"Diboson": {"ymin": 0, "ymax": 3.2},
}.get(datasetName, opts2)
p = createPlot(tdMt.clone(selection=selection))
p.appendPlotObject(histograms.PlotText(0.55, 0.7, "#Delta#phi(^{}%s, ^{}E_{T}^{miss}) < 160^{o}"%taujet, size=24))
moveLegend = {"DYJetsToLL": {"dx": -0.02}}.get(datasetName, {})
drawPlot(p, "transverseMass_4AfterDeltaPhi160", "m_{T}(^{}%s, ^{}E_{T}^{miss}) (GeV/^{}c^{2})" % taujet, opts=opts, opts2=opts2, ylabel="Events / %.0f GeV/^{}c^{2}", log=False, moveLegend=moveLegend)
def doCounters(datasetsEmb, datasetsSig, datasetName):
lumi = datasetsEmb.getLuminosity()
# Counters
eventCounterEmb = tauEmbedding.EventCounterMany(datasetsEmb)
eventCounterSig = counter.EventCounter(datasetsSig)
def isNotThis(name):
return name != datasetName
eventCounterEmb.removeColumns(filter(isNotThis, datasetsEmb.getAllDatasetNames()))
eventCounterSig.removeColumns(filter(isNotThis, datasetsSig.getAllDatasetNames()))
eventCounterSig.normalizeMCToLuminosity(lumi)
tableEmb = eventCounterEmb.getMainCounterTable()
tableSig = eventCounterSig.getMainCounterTable()
table = counter.CounterTable()
col = tableEmb.getColumn(name=datasetName)
col.setName(datasetName+" emb")
table.appendColumn(col)
col = tableSig.getColumn(name=datasetName)
col.setName(datasetName+" norm")
table.appendColumn(col)
table.keepOnlyRows([
"njets",
"MET",
"btagging",
"btagging scale factor",
"deltaPhiTauMET<160",
"deltaPhiTauMET<130",
])
table.renameRows({"njets": "tau ID"})
return table
if __name__ == "__main__":
main()
| [
"matti.kortelainen@helsinki.fi"
] | matti.kortelainen@helsinki.fi |
ff8df54b7c715153b82188cee10a486b3527e8b6 | b1db6cd08631e47fcdb0d1ec6277de5176e904f8 | /codice/run_devices.py | 911cdf8d81f69bc9c3fcb8fe3e23e5ee3d9249fb | [
"MIT"
] | permissive | pietropezzi/Elaborato-PdR | 1efcf98cb6e8c7086f38c6c0cbe5044c1d2e15c5 | 040bd05ca985931d776a28e7412eee6cb3808930 | refs/heads/main | 2023-05-07T12:39:23.468116 | 2021-05-26T07:28:06 | 2021-05-26T07:28:06 | 365,720,039 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,950 | py | import device
import time
from threading import Thread
import sys
# Intervallo di tempo tra l'attivazione di ogni device (in secondi).
timeInt = 0.5
# Numero di letture da effettuare prima di comunicarle al gateway.
readAmount = 3
# Intervallo di tempo tra ogni lettura (in secondi).
readBreak = 2
# Numero di device che verranno attivati.
# Assicurarsi che deviceAmount corrisponda al deviceAmount di gateway!
# Per ogni device deve essere generato un thread.
deviceAmount = 4
# checkvalues controlla che i valori di readAmount e deviceAmount
# rispettino le condizioni necessarie per i buffer del gateway e del server.
def checkvalues():
if readAmount * 34 > 1024 or readAmount * 34 * deviceAmount > 4096:
print("I valori inseriti non sono validi")
sys.exit(1)
# rundevices genera un thread per ogni device.
def rundevices():
# devono essere generati deviceAmount thread, per ogni device inserire
# <nome_thread_device> = Thread(target=device.create, args=(IP, ID, readAmount, readBreak),daemon = True)
# <nome_thread_device>.start()
# time.sleep(timeInt)
td1 = Thread(target=device.create, args=("192.168.0.1", "D01", readAmount, readBreak),
daemon = True)
td1.start()
time.sleep(timeInt)
td2 = Thread(target=device.create, args=("192.168.0.2", "D02", readAmount, readBreak),
daemon = True)
td2.start()
time.sleep(timeInt)
td3 = Thread(target=device.create, args=("192.168.0.3", "D03", readAmount, readBreak),
daemon = True)
td3.start()
time.sleep(timeInt)
td4 = Thread(target=device.create, args=("192.168.0.4", "D04", readAmount, readBreak),
daemon = True)
td4.start()
print("Premere ENTER per chiudere run_devices e terminare tutti i thread")
input()
sys.exit(0)
if __name__ == "__main__":
checkvalues()
rundevices()
| [
"pietro.pezzi3@studio.unibo.it"
] | pietro.pezzi3@studio.unibo.it |
9427dd2eb8619763631b53850f3d848d5866e9e7 | eacff46eda2c6b509449979a16002b96d4645d8e | /Collections-a-installer/community-general-2.4.0/plugins/modules/monitoring/airbrake_deployment.py | 3e7938bfba10ac8e1d2080f7ed8ae71ed9589628 | [
"MIT",
"GPL-3.0-only",
"GPL-3.0-or-later"
] | permissive | d-amien-b/simple-getwordpress | 5e6d4d15d5f87124ab591e46b63fec552998fdc3 | da90d515a0aa837b633d50db4d91d22b031c04a2 | refs/heads/master | 2023-04-08T22:13:37.347545 | 2021-04-06T09:25:51 | 2021-04-06T09:25:51 | 351,698,069 | 0 | 0 | MIT | 2021-03-31T16:16:45 | 2021-03-26T07:30:00 | HTML | UTF-8 | Python | false | false | 6,696 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2013 Bruce Pennypacker <bruce@pennypacker.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: airbrake_deployment
author:
- "Bruce Pennypacker (@bpennypacker)"
- "Patrick Humpal (@phumpal)"
short_description: Notify airbrake about app deployments
description:
- Notify airbrake about app deployments (see U(https://airbrake.io/docs/api/#deploys-v4)).
- Parameter I(token) has been deprecated for community.general 0.2.0. Please remove entry.
options:
project_id:
description:
- Airbrake PROJECT_ID
required: false
type: str
version_added: '0.2.0'
project_key:
description:
- Airbrake PROJECT_KEY.
required: false
type: str
version_added: '0.2.0'
environment:
description:
- The airbrake environment name, typically 'production', 'staging', etc.
required: true
type: str
user:
description:
- The username of the person doing the deployment
required: false
type: str
repo:
description:
- URL of the project repository
required: false
type: str
revision:
description:
- A hash, number, tag, or other identifier showing what revision from version control was deployed
required: false
type: str
version:
description:
- A string identifying what version was deployed
required: false
type: str
version_added: '1.0.0'
url:
description:
- Optional URL to submit the notification to. Use to send notifications to Airbrake-compliant tools like Errbit.
required: false
default: "https://api.airbrake.io/api/v4/projects/"
type: str
validate_certs:
description:
- If C(no), SSL certificates for the target url will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
type: bool
token:
description:
- This parameter (API token) has been deprecated in community.general 0.2.0. Please remove it from your tasks.
required: false
type: str
requirements: []
'''
EXAMPLES = '''
- name: Notify airbrake about an app deployment
community.general.airbrake_deployment:
project_id: '12345'
project_key: 'AAAAAA'
environment: staging
user: ansible
revision: '4.2'
- name: Notify airbrake about an app deployment, using git hash as revision
community.general.airbrake_deployment:
project_id: '12345'
project_key: 'AAAAAA'
environment: staging
user: ansible
revision: 'e54dd3a01f2c421b558ef33b5f79db936e2dcf15'
version: '0.2.0'
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.six.moves.urllib.parse import urlencode
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
token=dict(required=False, no_log=True, type='str'),
project_id=dict(required=False, no_log=True, type='str'),
project_key=dict(required=False, no_log=True, type='str'),
environment=dict(required=True, type='str'),
user=dict(required=False, type='str'),
repo=dict(required=False, type='str'),
revision=dict(required=False, type='str'),
version=dict(required=False, type='str'),
url=dict(required=False, default='https://api.airbrake.io/api/v4/projects/', type='str'),
validate_certs=dict(default=True, type='bool'),
),
supports_check_mode=True,
required_together=[('project_id', 'project_key')],
mutually_exclusive=[('project_id', 'token')],
)
# Build list of params
params = {}
# If we're in check mode, just exit pretending like we succeeded
if module.check_mode:
module.exit_json(changed=True)
if module.params["token"]:
# v2 API documented at https://airbrake.io/docs/legacy-xml-api/#tracking-deploys
if module.params["environment"]:
params["deploy[rails_env]"] = module.params["environment"]
if module.params["user"]:
params["deploy[local_username]"] = module.params["user"]
if module.params["repo"]:
params["deploy[scm_repository]"] = module.params["repo"]
if module.params["revision"]:
params["deploy[scm_revision]"] = module.params["revision"]
# version not supported in v2 API; omit
module.deprecate("Parameter 'token' is deprecated since community.general 0.2.0. Please remove "
"it and use 'project_id' and 'project_key' instead",
version='3.0.0', collection_name='community.general') # was Ansible 2.14
params["api_key"] = module.params["token"]
# Allow sending to Airbrake compliant v2 APIs
if module.params["url"] == 'https://api.airbrake.io/api/v4/projects/':
url = 'https://api.airbrake.io/deploys.txt'
else:
url = module.params["url"]
# Send the data to airbrake
data = urlencode(params)
response, info = fetch_url(module, url, data=data)
if module.params["project_id"] and module.params["project_key"]:
# v4 API documented at https://airbrake.io/docs/api/#create-deploy-v4
if module.params["environment"]:
params["environment"] = module.params["environment"]
if module.params["user"]:
params["username"] = module.params["user"]
if module.params["repo"]:
params["repository"] = module.params["repo"]
if module.params["revision"]:
params["revision"] = module.params["revision"]
if module.params["version"]:
params["version"] = module.params["version"]
# Build deploy url
url = module.params.get('url') + module.params["project_id"] + '/deploys?key=' + module.params["project_key"]
json_body = module.jsonify(params)
# Build header
headers = {'Content-Type': 'application/json'}
# Notify Airbrake of deploy
response, info = fetch_url(module, url, data=json_body,
headers=headers, method='POST')
if info['status'] == 200 or info['status'] == 201:
module.exit_json(changed=True)
else:
module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url))
if __name__ == '__main__':
main()
| [
"test@burdo.fr"
] | test@burdo.fr |
e23d8667131d425957a2d499281cc4a48a30ab3c | 3ea7233cca492f36130e6e2da253409e90c97526 | /netneurotools/freesurfer.py | 0b73400b3235e911137fbdea37eb01f861ed03fb | [
"BSD-3-Clause"
] | permissive | giuliabaracc/netneurotools | b26aa43ec9a34bb2ce3da43a734c955edd375327 | 8532cc136261b6b70e40a63070a968a9b2519c3a | refs/heads/master | 2020-12-20T10:14:57.141250 | 2020-01-24T16:21:54 | 2020-01-24T16:21:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,795 | py | # -*- coding: utf-8 -*-
"""
Functions for working with FreeSurfer data and parcellations
"""
import os
import os.path as op
import warnings
from nibabel.freesurfer import read_annot, read_geometry
import numpy as np
from scipy.ndimage.measurements import _stats, labeled_comprehension
from scipy.spatial.distance import cdist
from .datasets import fetch_fsaverage
from .stats import gen_spinsamples
from .utils import check_fs_subjid, run
def apply_prob_atlas(subject_id, gcs, hemi, *, orig='white', annot=None,
ctab=None, subjects_dir=None, use_cache=True,
quiet=False):
"""
Creates an annotation file for `subject_id` by applying atlas in `gcs`
Runs subprocess calling FreeSurfer's "mris_ca_label" function; as such,
FreeSurfer must be installed and accesible on the local system path.
Parameters
----------
subject_id : str
FreeSurfer subject ID
gcs : str
Filepath to .gcs file containing classifier array
hemi : {'lh', 'rh'}
Hemisphere corresponding to `gcs` file
orig : str, optional
Original surface to which to apply classifer. Default: 'white'
annot : str, optional
Path to output annotation file to generate. If set to None, the name is
created from the provided `hemi` and `gcs`. If provided as a
relative path, it is assumed to stem from `subjects_dir`/`subject_id`.
Default: None
ctab : str, optional
Path to colortable corresponding to `gcs`. Default: None
subjects_dir : str, optional
Path to FreeSurfer subject directory. If not set, will inherit from
the environmental variable $SUBJECTS_DIR. Default: None
use_cache : bool, optional
Whether to check for existence of `annot` in directory specified by
`{subjects_dir}/{subject_id}/label' and use that, if it exists. If
False, will create a new annot file. Default: True
quiet : bool, optional
Whether to restrict status messages. Default: False
Returns
-------
annot : str
Path to generated annotation file
"""
cmd = 'mris_ca_label {opts}{subject_id} {hemi} {hemi}.sphere.reg ' \
'{gcs} {annot}'
if hemi not in ['rh', 'lh']:
raise ValueError('Provided hemisphere designation `hemi` must be one '
'of \'rh\' or \'lh\'. Provided: {}'.format(hemi))
if not op.isfile(gcs):
raise ValueError('Cannot find specified `gcs` file {}.'.format(gcs))
subject_id, subjects_dir = check_fs_subjid(subject_id, subjects_dir)
# add all the options together, as specified
opts = ''
if ctab is not None and op.isfile(ctab):
opts += '-t {} '.format(ctab)
if orig is not None:
opts += '-orig {} '.format(orig)
if subjects_dir is not None:
opts += '-sdir {} '.format(subjects_dir)
else:
subjects_dir = os.environ['SUBJECTS_DIR']
# generate output filename
if annot is None:
base = '{}.{}.annot'.format(hemi, gcs[:-4])
annot = op.join(subjects_dir, subject_id, 'label', base)
else:
# if not a full path, assume relative from subjects_dir/subject_id
if not annot.startswith(op.abspath(os.sep)):
annot = op.join(subjects_dir, subject_id, annot)
# if annotation file doesn't exist or we explicitly want to make a new one
if not op.isfile(annot) or not use_cache:
run(cmd.format(opts=opts, subject_id=subject_id, hemi=hemi,
gcs=gcs, annot=annot),
quiet=quiet)
return annot
def _decode_list(vals):
""" List decoder
"""
return [l.decode() if hasattr(l, 'decode') else l for l in vals]
def find_parcel_centroids(*, lhannot, rhannot, version='fsaverage',
surf='sphere', drop=None):
"""
Returns vertex coords corresponding to centroids of parcels in annotations
Note that using any other `surf` besides the default of 'sphere' may result
in centroids that are not directly within the parcels themselves due to
sulcal folding patterns.
Parameters
----------
{lh,rh}annot : str
Path to .annot file containing labels of parcels on the {left,right}
hemisphere. These must be specified as keyword arguments to avoid
accidental order switching.
version : str, optional
Specifies which version of `fsaverage` provided annotation files
correspond to. Must be one of {'fsaverage', 'fsaverage3', 'fsaverage4',
'fsaverage5', 'fsaverage6'}. Default: 'fsaverage'
surf : str, optional
Specifies which surface projection of fsaverage to use for finding
parcel centroids. Default: 'sphere'
drop : list, optional
Specifies regions in {lh,rh}annot for which the parcel centroid should
not be calculated. If not specified, centroids for 'unknown' and
'corpuscallosum' are not calculated. Default: None
Returns
-------
centroids : (N, 3) numpy.ndarray
xyz coordinates of vertices closest to the centroid of each parcel
defined in `lhannot` and `rhannot`
hemiid : (N,) numpy.ndarray
Array denoting hemisphere designation of coordinates in `centroids`,
where `hemiid=0` denotes the left and `hemiid=1` the right hemisphere
"""
if drop is None:
drop = [
'unknown', 'corpuscallosum', # default FreeSurfer
'Background+FreeSurfer_Defined_Medial_Wall' # common alternative
]
drop = _decode_list(drop)
surfaces = fetch_fsaverage(version)[surf]
centroids, hemiid = [], []
for n, (annot, surf) in enumerate(zip([lhannot, rhannot], surfaces)):
vertices, faces = read_geometry(surf)
labels, ctab, names = read_annot(annot)
names = _decode_list(names)
for lab in np.unique(labels):
if names[lab] in drop:
continue
coords = np.atleast_2d(vertices[labels == lab].mean(axis=0))
roi = vertices[np.argmin(cdist(vertices, coords), axis=0)[0]]
centroids.append(roi)
hemiid.append(n)
return np.row_stack(centroids), np.asarray(hemiid)
def parcels_to_vertices(data, *, lhannot, rhannot, drop=None):
"""
Projects parcellated `data` to vertices defined in annotation files
Assigns np.nan to all ROIs in `drop`
Parameters
----------
data : (N,) numpy.ndarray
Parcellated data to be projected to vertices. Parcels should be ordered
by [left, right] hemisphere; ordering within hemisphere should
correspond to the provided annotation files.
{lh,rh}annot : str
Path to .annot file containing labels of parcels on the {left,right}
hemisphere. These must be specified as keyword arguments to avoid
accidental order switching.
drop : list, optional
Specifies regions in {lh,rh}annot that are not present in `data`. NaNs
will be inserted in place of the these regions in the returned data. If
not specified, 'unknown' and 'corpuscallosum' are assumed to not be
present. Default: None
Reurns
------
projected : numpy.ndarray
Vertex-level data
"""
if drop is None:
drop = [
'unknown', 'corpuscallosum', # default FreeSurfer
'Background+FreeSurfer_Defined_Medial_Wall' # common alternative
]
drop = _decode_list(drop)
data = np.vstack(data)
# check this so we're not unduly surprised by anything...
n_vert = expected = 0
for a in [lhannot, rhannot]:
vn, _, names = read_annot(a)
n_vert += len(vn)
names = _decode_list(names)
expected += len(names) - len(set(drop) & set(names))
if expected != len(data):
raise ValueError('Number of parcels in provided annotation files '
'differs from size of parcellated data array.\n'
' EXPECTED: {} parcels\n'
' RECEIVED: {} parcels'
.format(expected, len(data)))
projected = np.zeros((n_vert, data.shape[-1]), dtype=data.dtype)
start = end = n_vert = 0
for annot in [lhannot, rhannot]:
# read files and update end index for `data`
labels, ctab, names = read_annot(annot)
names = _decode_list(names)
todrop = set(names) & set(drop)
end += len(names) - len(todrop) # unknown and corpuscallosum
# get indices of unknown and corpuscallosum and insert NaN values
inds = sorted([names.index(f) for f in todrop])
inds = [f - n for n, f in enumerate(inds)]
currdata = np.insert(data[start:end], inds, np.nan, axis=0)
# project to vertices and store
projected[n_vert:n_vert + len(labels), :] = currdata[labels]
start = end
n_vert += len(labels)
return np.squeeze(projected)
def vertices_to_parcels(data, *, lhannot, rhannot, drop=None):
"""
Reduces vertex-level `data` to parcels defined in annotation files
Takes average of vertices within each parcel, excluding np.nan values
(i.e., np.nanmean). Assigns np.nan to parcels for which all vertices are
np.nan.
Parameters
----------
data : (N,) numpy.ndarray
Vertex-level data to be reduced to parcels
{lh,rh}annot : str
Path to .annot file containing labels to parcels on the {left,right}
hemisphere
drop : list, optional
Specifies regions in {lh,rh}annot that should be removed from the
parcellated version of `data`. If not specified, 'unknown' and
'corpuscallosum' will be removed. Default: None
Reurns
------
reduced : numpy.ndarray
Parcellated `data`, without regions specified in `drop`
"""
if drop is None:
drop = [
'unknown', 'corpuscallosum', # default FreeSurfer
'Background+FreeSurfer_Defined_Medial_Wall' # common alternative
]
drop = _decode_list(drop)
data = np.vstack(data)
n_parc = expected = 0
for a in [lhannot, rhannot]:
vn, _, names = read_annot(a)
expected += len(vn)
names = _decode_list(names)
n_parc += len(names) - len(set(drop) & set(names))
if expected != len(data):
raise ValueError('Number of vertices in provided annotation files '
'differs from size of vertex-level data array.\n'
' EXPECTED: {} vertices\n'
' RECEIVED: {} vertices'
.format(expected, len(data)))
reduced = np.zeros((n_parc, data.shape[-1]), dtype=data.dtype)
start = end = n_parc = 0
for annot in [lhannot, rhannot]:
# read files and update end index for `data`
labels, ctab, names = read_annot(annot)
names = _decode_list(names)
indices = np.unique(labels)
end += len(labels)
for idx in range(data.shape[-1]):
# get average of vertex-level data within parcels
# set all NaN values to 0 before calling `_stats` because we are
# returning sums, so the 0 values won't impact the sums (if we left
# the NaNs then all parcels with even one NaN entry would be NaN)
currdata = np.squeeze(data[start:end, idx])
isna = np.isnan(currdata)
counts, sums = _stats(np.nan_to_num(currdata), labels, indices)
# however, we do need to account for the NaN values in the counts
# so that our means are similar to what we'd get from e.g.,
# np.nanmean here, our "sums" are the counts of NaN values in our
# parcels
_, nacounts = _stats(isna, labels, indices)
counts = (np.asanyarray(counts, dtype=float)
- np.asanyarray(nacounts, dtype=float))
with np.errstate(divide='ignore', invalid='ignore'):
currdata = sums / counts
# get indices of unkown and corpuscallosum and delete from parcels
inds = sorted([names.index(f) for f in set(drop) & set(names)])
currdata = np.delete(currdata, inds)
# store parcellated data
reduced[n_parc:n_parc + len(names) - len(inds), idx] = currdata
start = end
n_parc += len(names) - len(inds)
return np.squeeze(reduced)
def _get_fsaverage_coords(version='fsaverage', surface='sphere'):
"""
Gets vertex coordinates for specified `surface` of fsaverage `version`
Parameters
----------
version : str, optional
One of {'fsaverage', 'fsaverage3', 'fsaverage4', 'fsaverage5',
'fsaverage6'}. Default: 'fsaverage'
surface : str, optional
Surface for which to return vertex coordinates. Default: 'sphere'
Returns
-------
coords : (N, 3) numpy.ndarray
xyz coordinates of vertices for {left,right} hemisphere
hemiid : (N,) numpy.ndarray
Array denoting hemisphere designation of entries in `coords`, where
`hemiid=0` denotes the left and `hemiid=1` the right hemisphere
"""
# get coordinates and hemisphere designation for spin generation
lhsphere, rhsphere = fetch_fsaverage(version)[surface]
coords, hemi = [], []
for n, sphere in enumerate([lhsphere, rhsphere]):
coords.append(read_geometry(sphere)[0])
hemi.append(np.ones(len(coords[-1])) * n)
return np.row_stack(coords), np.hstack(hemi)
def spin_data(data, *, lhannot, rhannot, version='fsaverage', n_rotate=1000,
spins=None, drop=None, seed=None, verbose=False,
return_cost=False):
"""
Projects parcellated `data` to surface, rotates, and re-parcellates
Projection to the surface uses `{lh,rh}annot` files. Rotation uses vertex
coordinates from the specified fsaverage `version` and relies on
:func:`netneurotools.stats.gen_spinsamples`. Re-parcellated data will not
be exactly identical to original values due to re-averaging process.
Parcels subsumed by regions in `drop` will be listed as NaN.
Parameters
----------
data : (N,) numpy.ndarray
Parcellated data to be rotated. Parcels should be ordered by [left,
right] hemisphere; ordering within hemisphere should correspond to the
provided `{lh,rh}annot` annotation files.
{lh,rh}annot : str
Path to .annot file containing labels to parcels on the {left,right}
hemisphere
version : str, optional
Specifies which version of `fsaverage` provided annotation files
correspond to. Must be one of {'fsaverage', 'fsaverage3', 'fsaverage4',
'fsaverage5', 'fsaverage6'}. Default: 'fsaverage'
n_rotate : int, optional
Number of rotations to generate. Default: 1000
spins : array_like, optional
Pre-computed spins to use instead of generating them on the fly. If not
provided will use other provided parameters to create them. Default:
None
drop : list, optional
Specifies regions in {lh,rh}annot that are not present in `data`. NaNs
will be inserted in place of the these regions in the returned data. If
not specified, 'unknown' and 'corpuscallosum' are assumed to not be
present. Default: None
seed : {int, np.random.RandomState instance, None}, optional
Seed for random number generation. Default: None
verbose : bool, optional
Whether to print occasional status messages. Default: False
return_cost : bool, optional
Whether to return cost array (specified as Euclidean distance) for each
coordinate for each rotation. Currently this option is not supported if
pre-computed `spins` are provided. Default: True
Returns
-------
rotated : (N, `n_rotate`) numpy.ndarray
Rotated `data
cost : (N, `n_rotate`,) numpy.ndarray
Cost (specified as Euclidean distance) of re-assigning each coordinate
for every rotation in `spinsamples`. Only provided if `return_cost` is
True.
"""
if drop is None:
drop = [
'unknown', 'corpuscallosum', # default FreeSurfer
'Background+FreeSurfer_Defined_Medial_Wall' # common alternative
]
# get coordinates and hemisphere designation for spin generation
vertices = parcels_to_vertices(data, lhannot=lhannot, rhannot=rhannot,
drop=drop)
if spins is None:
coords, hemiid = _get_fsaverage_coords(version, 'sphere')
if len(vertices) != len(coords):
raise ValueError('Provided annotation files have a different '
'number of vertices than the specified fsaverage '
'surface.\n ANNOTATION: {} vertices\n '
'FSAVERAGE: {} vertices'
.format(len(vertices), len(coords)))
spins, cost = gen_spinsamples(coords, hemiid, n_rotate=n_rotate,
seed=seed, verbose=verbose)
else:
spins = np.asarray(spins, dtype='int32')
if len(spins) != len(vertices):
raise ValueError('Provided `spins` array has a different number '
'of vertices than the provided annotation files.'
'\n ANNOTATION: {} vertices\n SPINS: '
'{} vertices\n'
.format(len(vertices), len(spins)))
if spins.shape[-1] != n_rotate:
warnings.warn('Shape of provided `spins` array does not match '
'number of rotations requested with `n_rotate`. '
'Ignoring specified `n_rotate` parameter and using '
'all provided `spins`.')
n_rotate = spins.shape[-1]
if return_cost:
raise ValueError('Cannot `return_cost` when `spins` are provided.')
spun = np.zeros(data.shape + (n_rotate,))
for n in range(n_rotate):
if verbose:
msg = f'Reducing vertices to parcels: {n:>5}/{n_rotate}'
print(msg, end='\b' * len(msg), flush=True)
spun[..., n] = vertices_to_parcels(vertices[spins[:, n]],
lhannot=lhannot, rhannot=rhannot,
drop=drop)
if verbose:
print(' ' * len(msg) + '\b' * len(msg), end='', flush=True)
if return_cost:
return spun, cost
return spun
def spin_parcels(*, lhannot, rhannot, version='fsaverage', n_rotate=1000,
drop=None, seed=None, return_cost=False, **kwargs):
"""
Rotates parcels in `{lh,rh}annot` and re-assigns based on maximum overlap
Vertex labels are rotated with :func:`netneurotools.stats.gen_spinsamples`
and a new label is assigned to each *parcel* based on the region maximally
overlapping with its boundaries.
Parameters
----------
{lh,rh}annot : str
Path to .annot file containing labels to parcels on the {left,right}
hemisphere
version : str, optional
Specifies which version of `fsaverage` provided annotation files
correspond to. Must be one of {'fsaverage', 'fsaverage3', 'fsaverage4',
'fsaverage5', 'fsaverage6'}. Default: 'fsaverage'
n_rotate : int, optional
Number of rotations to generate. Default: 1000
drop : list, optional
Specifies regions in {lh,rh}annot that are not present in `data`. NaNs
will be inserted in place of the these regions in the returned data. If
not specified, 'unknown' and 'corpuscallosum' are assumed to not be
present. Default: None
return_cost : bool, optional
Whether to return cost array (specified as Euclidean distance) for each
coordinate for each rotation Default: True
kwargs : key-value, optional
Key-value pairs passed to :func:`netneurotools.stats.gen_spinsamples`
Returns
-------
spinsamples : (N, `n_rotate`) numpy.ndarray
Resampling matrix to use in permuting data parcellated with labels from
{lh,rh}annot, where `N` is the number of parcels. Indices of -1
indicate that the parcel was completely encompassed by regions in
`drop` and should be ignored.
cost : (N, `n_rotate`,) numpy.ndarray
Cost (specified as Euclidean distance) of re-assigning each coordinate
for every rotation in `spinsamples`. Only provided if `return_cost` is
True.
"""
def overlap(vals):
""" Returns most common non-negative value in `vals`; -1 if all neg
"""
vals = np.asarray(vals)
vals, counts = np.unique(vals[vals > 0], return_counts=True)
try:
return vals[counts.argmax()]
except ValueError:
return -1
if drop is None:
drop = [
'unknown', 'corpuscallosum', # default FreeSurfer
'Background+FreeSurfer_Defined_Medial_Wall' # common alternative
]
drop = _decode_list(drop)
# get vertex-level labels (set drop labels to - values)
vertices, end = [], 0
for n, annot in enumerate([lhannot, rhannot]):
labels, ctab, names = read_annot(annot)
names = _decode_list(names)
todrop = set(names) & set(drop)
inds = [names.index(f) - n for n, f in enumerate(todrop)]
labs = np.arange(len(names) - len(inds)) + (end - (len(inds) * n))
insert = np.arange(-1, -(len(inds) + 1), -1)
vertices.append(np.insert(labs, inds, insert)[labels])
end += len(names)
vertices = np.hstack(vertices)
labels = np.unique(vertices)
mask = labels > -1
# get coordinates and hemisphere designation for spin generation
coords, hemiid = _get_fsaverage_coords(version, 'sphere')
if len(vertices) != len(coords):
raise ValueError('Provided annotation files have a different number '
'of vertices than the specified fsaverage surface.\n'
' ANNOTATION: {} vertices\n'
' FSAVERAGE: {} vertices'
.format(len(vertices), len(coords)))
# spin and assign regions based on max overlap
spins, cost = gen_spinsamples(coords, hemiid, n_rotate=n_rotate, **kwargs)
regions = np.zeros((len(labels[mask]), n_rotate), dtype='int32')
for n in range(n_rotate):
regions[:, n] = labeled_comprehension(vertices[spins[:, n]], vertices,
labels, overlap, int, -1)[mask]
if return_cost:
return regions, cost
return regions
| [
"rossmarkello@gmail.com"
] | rossmarkello@gmail.com |
6b4427adecbd6d4a38872c33dcbca2e3d68aeb29 | e8ae11e5017507da59e2e92d423b6a1994490de4 | /env/lib/python2.7/site-packages/azure/batch/models/pool_delete_options.py | d959c796b779edb07a5117788f554dc19bb6cab6 | [] | no_license | teopeurt/ansible-ubuntu-server | 613d00cea28bc6531acf4a39aeeb9cd0baa2a391 | b5b6127d2ee9723c5088443efe2ffb8ae30cfea7 | refs/heads/master | 2021-06-28T12:49:50.935753 | 2017-07-31T17:34:33 | 2017-07-31T17:34:33 | 98,912,808 | 0 | 1 | null | 2020-07-24T00:05:31 | 2017-07-31T17:32:56 | Makefile | UTF-8 | Python | false | false | 3,192 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PoolDeleteOptions(Model):
"""Additional parameters for the Pool_Delete operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id identifier in the response.
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. If not specified, this
header will be automatically populated with the current system clock
time.
:type ocp_date: datetime
:param if_match: An ETag is specified. Specify this header to perform the
operation only if the resource's ETag is an exact match as specified.
:type if_match: str
:param if_none_match: An ETag is specified. Specify this header to
perform the operation only if the resource's ETag does not match the
specified ETag.
:type if_none_match: str
:param if_modified_since: Specify this header to perform the operation
only if the resource has been modified since the specified date/time.
:type if_modified_since: datetime
:param if_unmodified_since: Specify this header to perform the operation
only if the resource has not been modified since the specified date/time.
:type if_unmodified_since: datetime
"""
def __init__(self, timeout=30, client_request_id=None, return_client_request_id=None, ocp_date=None, if_match=None, if_none_match=None, if_modified_since=None, if_unmodified_since=None):
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
| [
"me@teopeurt.com"
] | me@teopeurt.com |
f1c6be714acc05c933ed4a627b677064ee9c889f | d26ddd67bff0e06be424f732d6b873f05a86fab3 | /applications/migrations/0004_auto_20200520_1009.py | a98a6d048f23f635941cadfab9f35567de3f4a28 | [] | no_license | TimaAvdrakh/univer | 28ace7afaf5f1d2651d57e4f30902b0da4b3fef4 | 41e3b7028c78b838177b918550ffaa5b29308a6d | refs/heads/master | 2022-12-08T02:09:53.491399 | 2020-08-05T04:40:19 | 2020-08-05T04:40:19 | 290,717,421 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | # Generated by Django 2.2.4 on 2020-05-20 10:09
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('applications', '0003_auto_20200430_0934'),
]
operations = [
migrations.AlterUniqueTogether(
name='subapplication',
unique_together={('id',)},
),
]
| [
"Beattle-b@yandex.ru"
] | Beattle-b@yandex.ru |
f2aadf9fbcc0ba25fb9b0f082ab2a60c6ebf40cf | 99fc13928b8efd37f19541446a3ff25171a515ea | /flask_api_crud/code/models/user.py | 3e00185de9255d83e76f4df693d764b77ef5069a | [] | no_license | clwest/crud_api | 1257e8966afdca0fc7c00db0f0ea5ebc33b45bda | 1692f303fe2a8a70bc340485b3fe32ab0bd02006 | refs/heads/master | 2023-06-16T10:58:29.084279 | 2021-07-15T17:36:30 | 2021-07-15T17:36:30 | 338,493,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 824 | py | from db import db
class UserModel(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80))
password = db.Column(db.String(80))
def __init__(self, username, password):
self.username = username
self.password = password
def json(self):
return {
'id': self.id,
'username': self.username
}
def save_to_db(self):
db.session.add(self)
db.session.commit()
def delete_from_db(self):
db.session.delete(self)
db.session.commit()
@classmethod
def find_by_username(cls, username):
return cls.query.filter_by(username=username).first()
@classmethod
def find_by_id(cls, _id):
return cls.query.filter_by(id=_id).first() | [
"westremarketing@gmail.com"
] | westremarketing@gmail.com |
cca3497ee130bfce32a0cdf927de5d4546b8238a | 667840e8de9972039dda4a86241ab3c902c108e3 | /affine_ctcf_matchings.py | 54e78c5b1618ea6d7618d2f7055362672def3138 | [] | no_license | CSE282Project/mainprojectcode | e986749a533257cdbf5b24275412feaa2134b614 | 0216c7dc4657036e2ddbcbe4ca47b8973fb405f8 | refs/heads/master | 2020-05-17T05:54:16.846427 | 2015-03-11T06:26:20 | 2015-03-11T06:26:20 | 30,937,972 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,890 | py | from sys import argv
from itertools import product
class Matching:
'''
A matching object is a collection of vertex-disjoint edges
'''
def __init__(self, edges = None):
if edges == None:
edges = []
self.vertices = set()
self.edges = []
self.add_edges(edges)
def get_edges(self):
return self.edges
def add_edge(self, edge):
u, v = edge
assert u > 0
assert v < 0
assert u not in self.vertices
assert v not in self.vertices
self.vertices.update(edge)
self.edges.append(edge)
def add_edges(self, edges):
for edge in edges:
self.add_edge(edge)
def add_matching(self, matching):
'''
merge with other matching
'''
if matching != None:
self.add_edges(matching.get_edges())
def get_weight(self):
'''
weight a matching as simply the average distance between vertices
'''
if len(self.edges) == 0:
'''
if matching is empty just return 0, 0
this makes it easy when combining the scores of multiple matchings
admittedly this may not be 100% robust but for now i think it's fine
'''
return None, 0
total = 0.0
n = len(self.edges)
for u, v in self.edges:
total += abs(u - abs(v))
return total / n, n
def __add__(self, other):
m = Matching()
m.add_matching(self)
m.add_matching(other)
return m
def __iter__(self):
for edge in self.edges:
yield edge
def __repr__(self):
return str(self.edges)
def __contains__(self,item):
u,v = item
if v in self.vertices and u in self.vertices: # avg: O(1), worst: O(n)
for e in self.edges:
if u == e[0] and v == e[1]:
return True
return False
complements = {'A' : 'T', 'T' : 'A', 'G' : 'C', 'C' : 'G'}
def reverse_complement(dna):
global count
revc = []
for i in range(len(dna) - 1, -1, -1):
count += 1
base = dna[i]
revc.append(complements[base])
return ''.join(revc)
def get_vertices(genome, motifs, k):
global count
reverse_complements = map(reverse_complement, motifs)
motif_nodes = []
revc_nodes = []
for i in range(len(genome) - k + 1):
'''
use 1-based indexing, so that positive/negative values delineate motifs from
reverse complements
'''
count += 1
pos = i + 1
kmer = genome[i:i + k]
if kmer in motifs:
motif_nodes.append(pos)
elif kmer in reverse_complements:
revc_nodes.append(-pos)
'''
we know the graph is bipartite and semi-complete
as such there is no need to create an adjacency list and matrix, we can infer the
edge relation by simply bipartitioning the vertex set
'''
return motif_nodes, revc_nodes
def matching_helper(motif_nodes, revc_nodes, k, start, end, sub_matchings):
'''
helper function to find the minimum-weight maximal on a certain subsection of the
genome
Args:
motif_nodes: the locations of CTCF binding motifs
revc_nodes: the locations of reverse complements to CTCF binding motifs
k: length of the motifs
start, end: the endpoints of the region on which we should find our matching
ensures that whatever matching the callee returns does not cross with the caller's
matching
sub_matchings: dictionary of minimally-weighted matchings on previously computed
intervals, to avoid repeated computation
Returns:
The non-crossing maximal matching that minimizes the average distance between
nodes in the matching among the space of all non-crossing maximal matchings on
the interval. Returns an empty matching if there are no edges within this
interval
'''
global recursiveCount, count
try:
return sub_matchings[(start, end)]
except KeyError:
pass
count += len(motif_nodes) + len(revc_nodes)
inbounds = lambda x : start <= x and x <= end
motifs = filter(inbounds, motif_nodes)
reverses = filter(lambda x : inbounds(-x), revc_nodes)
count += 1
if len(motifs) > 0 and len(reverses) > 0:
start2 = min([motifs[0], -reverses[0]])
end2 = max([motifs[-1], -reverses[-1]])
assert start <= start2
assert end >= end2
if (start < start2 or end > end2) and start2 < end2:
recursiveCount += 1
return matching_helper(motifs, reverses, k, start2, end2, sub_matchings)
'''
instead of initializing best_matching to None we initialize it to an empty matching
if there are no edges within the interval, then an empty matching will be returned
which is what we want
'''
# best_matching = Matching()
best_matchings = [Matching()]
best_weight = float("inf")
best_n = 0
for motif in motifs:
assert motif > 0
for revc in reverses:
count += 1
assert revc < 0
dist = abs(motif - abs(revc))
if dist < k:
continue
'''
adding edge (motif, revc) to the matching, then finding the best
non-crossing matching that includes this edge
by (greedily) adding whatever non-crossing edges possible we assure that our
matching is maximally non-crossing. We take the best-weighted matching
among maximal non-crossing matchings. We use memoization to avoid repated
computation.
'''
first, last = sorted([motif, abs(revc)])
# get best matching to the left of the edge, but within the interval
lefts = matching_helper(motif_nodes, revc_nodes, k, start, first - k, sub_matchings)
# get best matching to the right of the edge,
rights = matching_helper(motif_nodes, revc_nodes, k, last + k, end, sub_matchings)
# get best matching within the loop formed by this edge
mids = matching_helper(motif_nodes, revc_nodes, k, first + k, last - k, sub_matchings)
# for left in lefts:
# for right in rights:
# for mid in mids:
for left, right, mid in product(lefts, rights, mids):
count += 1
matching_i = left + right + mid
matching_i.add_edge((motif, revc))
weight, n = matching_i.get_weight()
# if this matching is optimal on this interval, then combine all the optimal
# sub-matchings and add this edge
if weight <= best_weight:
if weight < best_weight or n > best_n:
best_weight = weight
best_n = n
best_matchings = [matching_i]
elif n == best_n:
best_matchings.append(matching_i)
sub_matchings[(start, end)] = best_matchings
recursiveCount += 1
print "finished", start, end
return best_matchings
def maximal_matching(genome, motifs, k):
# generate the graph
motif_nodes, revc_nodes = get_vertices(genome, motifs, k)
print "graph made"
'''
call the helper function, 1st forms all three node optimal(?) matchings.
Then, the three way matchings are generalized to an edge set.
The edge set is generalized to a Directed Acyclic Graph.
The DAG is simplified to a bilevel Affine gap graph of loops and loop skips:
s -> 12 23 -> 45 -> $
\ / \ / \ / \ /
M1 -> M2 -> M3 -> M4
(all '\' are directed downward, all '/' are directed upward)
Finally, we find the longest path through the DAG using the score of each loop as the edgeweight
'''
return matching_helper(motif_nodes, revc_nodes, k, 1, len(genome) - k, {})
recursiveCount = 0
count = 0
def initCounter():
'''
Initializes the recursion counter
'''
global recursiveCount, count
recursiveCount = 0
count = 0
def parse_file(fname):
f = open(fname)
input = f.read().splitlines()
genome = input[0]
k = int(input[1])
motifs = input[2:]
return genome, k, motifs
if __name__ == '__main__':
fname = argv[1]
genome, k, motifs = parse_file(fname)
matchings = maximal_matching(genome, motifs, k)
for matching in matchings:
print matching
print matching.get_weight()
| [
"arjunc12@gmail.com"
] | arjunc12@gmail.com |
bdce2a77a3125a7db599035c4836c071af0a44a6 | 5cf88e202a8d6bcb1c9b68173476bd9fd8510ebc | /main.py | 4c55c7e57b6f7336bc568e5218a1436b1976fc33 | [
"MIT"
] | permissive | i2nes/app-engine-blog | b2cd45821befaef9c78d0718c223626d0bc3107b | 94cdc25674c946ad643f7f140cbedf095773de3f | refs/heads/master | 2021-07-25T13:37:06.697703 | 2017-11-03T23:37:35 | 2017-11-03T23:37:35 | 106,749,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 106 | py | from app import create_app
from config import config, blog_config
app = create_app(config, blog_config)
| [
"joao.antunes@sky.uk"
] | joao.antunes@sky.uk |
202131d751e30e0b6464079f63e290c45a89c07a | 6cdff1cccb229bd98c7b7fce0ad3df32e4f04557 | /tests/conftest.py | 4eb8621c176f7ad405450bd91027044cc1498eb9 | [] | no_license | MITLibraries/workflow | fb8cbdf809702318c8d7c64307da90c0acda28cc | 63a17c3021e2bc0e0b13d22246ce3f13295349ca | refs/heads/main | 2023-03-04T10:38:12.270942 | 2021-07-08T18:06:16 | 2021-07-08T18:06:16 | 211,862,997 | 2 | 1 | null | 2023-02-08T01:14:43 | 2019-09-30T13:12:20 | Python | UTF-8 | Python | false | false | 3,108 | py | from collections import namedtuple
import json
from unittest import mock
import boto3
from moto import mock_ecs, mock_ec2
from moto.ec2.utils import generate_instance_identity_document
import pytest
from manager.cluster import Cluster
@pytest.fixture(autouse=True)
def aws_credentials(monkeypatch):
monkeypatch.setenv('AWS_ACCESS_KEY_ID', 'foo')
monkeypatch.setenv('AWS_SECRET_ACCESS_KEY', 'correct horse battery staple')
monkeypatch.setenv('AWS_SESSION_TOKEN', 'baz')
monkeypatch.setenv('AWS_DEFAULT_REGION', 'us-east-1')
@pytest.fixture
def cluster():
"""Create the mock Airflow cluster.
moto doesn't support the Fargate launch type, so we have to pretend
like we're going to launch our containers in EC2. There's a little
hand waving to make this work. moto comes with some predefined images
that seem to work fine.
Also see the ``patch_cluster_config`` fixture below.
"""
C = namedtuple('C', ['name', 'scheduler', 'worker', 'web'])
cluster = C('airflow-test', 'airflow-test-scheduler',
'airflow-test-worker', 'airflow-test-web')
with mock_ecs(), mock_ec2():
ec2_client = boto3.client('ec2')
ec2 = boto3.resource('ec2')
ecs = boto3.client('ecs')
image = ec2_client.describe_images()['Images'][0]
instance = ec2.create_instances(ImageId=image['ImageId'], MinCount=1,
MaxCount=1)[0]
doc = json.dumps(generate_instance_identity_document(instance))
ecs.create_cluster(clusterName=cluster.name)
ecs.register_container_instance(cluster=cluster.name,
instanceIdentityDocument=doc)
for service in cluster[1:]:
ecs.register_task_definition(family=service,
containerDefinitions=[])
ecs.create_service(cluster=cluster.name,
serviceName=service,
desiredCount=1,
taskDefinition=f'{service}:1')
ecs.update_service(cluster=cluster.name,
service=cluster.worker,
desiredCount=3)
yield cluster
@pytest.fixture(autouse=True)
def patch_cluster_config():
"""Patch the private config method on Cluster.
moto does not add the networkConfiguration to the service description.
Rather than just patching the whole thing, this effectively provides a
runtime decorator on the ``Cluster.__get_config`` method to augment the
reponse.
"""
def wraps(f):
def wrapped(*args, **kwargs):
network_config = {
'awsvpcConfiguration': {
'subnets': ['awesome-subnet', 'dumb-subnet']
}
}
res = f(*args, **kwargs)
[r.update(networkConfiguration=network_config) for r in res]
return res
return wrapped
func = wraps(Cluster._Cluster__get_config)
with mock.patch.object(Cluster, '_Cluster__get_config', func):
yield
| [
"mgraves@mit.edu"
] | mgraves@mit.edu |
e16189f36956843b3dfa3909dccea36da75ad30e | 5de4aed3d9a9230404150d4c3c553ea05ac4e088 | /afm/logger.py | c872f1d55b593e4a85f55bd2fb43d16e0e878e5a | [] | no_license | UfSoft/afm | db4df3189095aa916b3a3f770d5366bb3e0a9b74 | 2e85c65389a10f7bed032956b0c603bbb2af2dac | refs/heads/master | 2021-01-19T13:25:08.121356 | 2009-10-29T15:24:49 | 2009-10-29T15:24:49 | 26,618,925 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,483 | py | # -*- coding: utf-8 -*-
# vim: sw=4 ts=4 fenc=utf-8 et
# ==============================================================================
# Copyright © 2009 UfSoft.org - Pedro Algarvio <ufs@ufsoft.org>
#
# License: BSD - Please view the LICENSE file for additional information.
# ==============================================================================
import logging
from twisted.internet import defer
LoggingLoggerClass = logging.getLoggerClass()
class Logging(LoggingLoggerClass):
def __init__(self, logger_name='afm', level=logging.DEBUG):
LoggingLoggerClass.__init__(self, logger_name, level)
@defer.inlineCallbacks
def debug(self, msg, *args, **kwargs):
yield LoggingLoggerClass.debug(self, msg, *args, **kwargs)
@defer.inlineCallbacks
def info(self, msg, *args, **kwargs):
yield LoggingLoggerClass.info(self, msg, *args, **kwargs)
@defer.inlineCallbacks
def warning(self, msg, *args, **kwargs):
yield LoggingLoggerClass.warning(self, msg, *args, **kwargs)
warn = warning
@defer.inlineCallbacks
def error(self, msg, *args, **kwargs):
yield LoggingLoggerClass.error(self, msg, *args, **kwargs)
@defer.inlineCallbacks
def critical(self, msg, *args, **kwargs):
yield LoggingLoggerClass.critical(self, msg, *args, **kwargs)
@defer.inlineCallbacks
def exception(self, msg, *args, **kwargs):
yield LoggingLoggerClass.exception(self, msg, *args, **kwargs)
| [
"ufs@ufsoft.org"
] | ufs@ufsoft.org |
3e0de5cb556e7c8e5d1c6598623bb17beae6bb7c | 369061ac4e2fcbac7ee4e4cd7e06e4712ed5884f | /travell/whole/apps.py | d90f2a1ad2a4f8a8182dbc5c5381ee525c7eb7ae | [] | no_license | saurabh32-cell/Real-world | ebe1dc46649f61ff9e442dd7370385ab478279f4 | cb121dda0aabe31a684c546ae3a45ad0b2d5d9a6 | refs/heads/master | 2023-07-22T04:34:43.320172 | 2021-08-31T10:41:07 | 2021-08-31T10:41:07 | 400,728,285 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | from django.apps import AppConfig
class WholeConfig(AppConfig):
name = 'whole'
| [
"saurabh32-cell"
] | saurabh32-cell |
870ddd8535f2ebfe0c5101edf3aa733366abd305 | edb198afa6cde22041c4caab39c6c4bd7bc89d30 | /src/lib/server/presenterserver/common/presenter_socket_server.py | 6bc375f1e1393440e1b24e40d836bea29e2bbd1b | [
"Apache-2.0"
] | permissive | Dedederek/HiFly_Drone | 24119d382b356d0dc997c7273728bc2a8bfa87a0 | 1e523d776474f4e17b18f53e6f1ea9916f28050d | refs/heads/main | 2023-06-18T03:37:41.618258 | 2021-07-07T20:48:06 | 2021-07-07T20:48:06 | 383,921,036 | 0 | 0 | Apache-2.0 | 2021-07-07T20:47:31 | 2021-07-07T20:47:30 | null | UTF-8 | Python | false | false | 18,360 | py | # =======================================================================
#
# Copyright (C) 2018, Hisilicon Technologies Co., Ltd. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1 Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2 Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3 Neither the names of the copyright holders nor the names of the
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# =======================================================================
#
"""presenter socket server module"""
import threading
import select
import struct
import logging
import socket
from google.protobuf.message import DecodeError
import common.presenter_message_pb2 as pb2
from common.channel_manager import ChannelManager
from common.channel_handler import ChannelHandler
#read nothing from socket.recv()
SOCK_RECV_NULL = b''
# epool will return if no event coming in 1 s
EPOLL_TIMEOUT = 1
# it specifies the number of unaccepted connections that
# the system will allow before refusing new connections.
SOCKET_WAIT_QUEUE = 2
# message head length, include 4 bytes message total length
# and 1 byte message name length
MSG_HEAD_LENGTH = 5
class PresenterSocketServer():
"""a socket server communication with presenter agent.
"""
def __init__(self, server_address):
"""
Args:
server_address: server listen address,
include an ipv4 address and a port.
"""
# thread exit switch, if set true, thread must exit immediately.
self.thread_exit_switch = False
# message head length, include 4 bytes message total length
# and 1 byte message name length
self.msg_head_len = 5
self._create_socket_server(server_address)
def _create_socket_server(self, server_address):
"""
create a socket server
Args:
server_address: server listen address,
include an ipv4 address and a port.
"""
# Create a socket server.
self._sock_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock_server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._sock_server.bind(server_address)
self._sock_server.listen(SOCKET_WAIT_QUEUE)
self._sock_server.setblocking(False)
# Get server host name and port
host, port = self._sock_server.getsockname()[:2]
# Start presenter socket server thread.
threading.Thread(target=self._server_listen_thread).start()
# Display directly on the screen
print('Presenter socket server listen on %s:%s\n' % (host, port))
def set_exit_switch(self):
"""set switch True to stop presenter socket server thread."""
self.thread_exit_switch = True
def _read_socket(self, conn, read_len):
'''
Read fixed length data
Args:
conn: a socket connection
read_len: read fix byte.
Returns:
ret: True or False
buf: read fix byte buf.
'''
has_read_len = 0
read_buf = SOCK_RECV_NULL
total_buf = SOCK_RECV_NULL
while has_read_len != read_len:
try:
read_buf = conn.recv(read_len - has_read_len)
except socket.error:
logging.error("socket %u exception:socket.error", conn.fileno())
return False, None
if read_buf == SOCK_RECV_NULL:
return False, None
total_buf += read_buf
has_read_len = len(total_buf)
return True, total_buf
def _read_msg_head(self, sock_fileno, conns):
'''
Args:
sock_fileno: a socket fileno
conns: all socket connections which created by server.
Returns:
msg_total_len: total message length.
msg_name_len: message name length.
'''
ret, msg_head = self._read_socket(conns[sock_fileno], self.msg_head_len)
if not ret:
logging.error("socket %u receive msg head null", sock_fileno)
return None, None
# in Struct(), 'I' is unsigned int, 'B' is unsigned char
msg_head_data = struct.Struct('IB')
(msg_total_len, msg_name_len) = msg_head_data.unpack(msg_head)
msg_total_len = socket.ntohl(msg_total_len)
return msg_total_len, msg_name_len
def _read_msg_name(self, sock_fd, conns, msg_name_len):
'''
Args:
sock_fd: a socket fileno
conns: all socket connections which created by server.
msg_name_len: message name length.
Returns:
ret: True or False
msg_name: message name.
'''
ret, msg_name = self._read_socket(conns[sock_fd], msg_name_len)
if not ret:
logging.error("socket %u receive msg name null", sock_fd)
return False, None
try:
msg_name = msg_name.decode("utf-8")
except UnicodeDecodeError:
logging.error("msg name decode to utf-8 error")
return False, None
return True, msg_name
def _read_msg_body(self, sock_fd, conns, msg_body_len, msgs):
'''
Args:
sock_fd: a socket fileno
conns: all socket connections which created by server.
msg_name_len: message name length.
msgs: msg read from a socket
Returns:
ret: True or False
'''
ret, msg_body = self._read_socket(conns[sock_fd], msg_body_len)
if not ret:
logging.error("socket %u receive msg body null", sock_fd)
return False
msgs[sock_fd] = msg_body
return True
def _read_sock_and_process_msg(self, sock_fileno, conns, msgs):
'''
Args:
sock_fileno: a socket fileno, return value of socket.fileno()
conns: all socket connections registered in epoll
msgs: msg read from a socket
Returns:
ret: True or False
'''
# Step1: read msg head
msg_total_len, msg_name_len = self._read_msg_head(sock_fileno, conns)
if msg_total_len is None:
logging.error("msg_total_len is None.")
return False
# Step2: read msg name
ret, msg_name = self._read_msg_name(sock_fileno, conns, msg_name_len)
if not ret:
return ret
# Step3: read msg body
msg_body_len = msg_total_len - self.msg_head_len - msg_name_len
if msg_body_len < 0:
logging.error("msg_total_len:%u, msg_name_len:%u, msg_body_len:%u",
msg_total_len, msg_name_len, msg_body_len)
return False
ret = self._read_msg_body(sock_fileno, conns, msg_body_len, msgs)
if not ret:
return ret
# Step4: process msg
ret = self._process_msg(conns[sock_fileno], msg_name, msgs[sock_fileno])
return ret
def _process_epollin(self, sock_fileno, epoll, conns, msgs):
'''
Args:
sock_fileno: a socket fileno, return value of socket.fileno()
epoll: a set of select.epoll.
conns: all socket connections registered in epoll
msgs: msg read from a socket
'''
msgs[sock_fileno] = b''
try:
ret = self._read_sock_and_process_msg(sock_fileno, conns, msgs)
if not ret:
self._clean_connect(sock_fileno, epoll, conns, msgs)
except socket.error:
logging.error("receive socket error.")
self._clean_connect(sock_fileno, epoll, conns, msgs)
def _accept_new_socket(self, epoll, conns):
'''
Args:
epoll: a set of select.epoll.
conns: all socket connections registered in epoll
'''
try:
new_conn, address = self._sock_server.accept()
new_conn.setblocking(True)
epoll.register(new_conn.fileno(), select.EPOLLIN | select.EPOLLHUP)
conns[new_conn.fileno()] = new_conn
logging.info("create new connection:client-ip:%s, client-port:%s, fd:%s",
address[0], address[1], new_conn.fileno())
except socket.error:
logging.error("socket.error exception when sock.accept()")
def _server_listen_thread(self):
"""socket server thread, epoll listening all the socket events"""
epoll = select.epoll()
epoll.register(self._sock_server.fileno(), select.EPOLLIN | select.EPOLLHUP)
try:
conns = {}
msgs = {}
while True:
# thread must exit immediately
if self.thread_exit_switch:
break
events = epoll.poll(EPOLL_TIMEOUT)
# timeout, but no event come, continue waiting
if not events:
continue
for sock_fileno, event in events:
# new connection request from presenter agent
if self._sock_server.fileno() == sock_fileno:
self._accept_new_socket(epoll, conns)
# remote connection closed
# it means presenter agent exit withot close socket.
elif event & select.EPOLLHUP:
logging.info("receive event EPOLLHUP")
self._clean_connect(sock_fileno, epoll, conns, msgs)
# new data coming in a socket connection
elif event & select.EPOLLIN:
self._process_epollin(sock_fileno, epoll, conns, msgs)
# receive event not recognize
else:
logging.error("not recognize event %f", event)
self._clean_connect(sock_fileno, epoll, conns, msgs)
finally:
logging.info("conns:%s", conns)
logging.info("presenter server listen thread exit.")
epoll.unregister(self._sock_server.fileno())
epoll.close()
self._sock_server.close()
def _process_heartbeat(self, conn):
'''
set heartbeat
Args:
conn: a socket connection
Returns:
True: set heartbeat ok.
'''
sock_fileno = conn.fileno()
handler = self.channel_manager.get_channel_handler_by_fd(sock_fileno)
if handler is not None:
handler.set_heartbeat()
return True
def _process_open_channel(self, conn, msg_data):
"""
Deserialization protobuf and process open_channel request
Args:
conn: a socket connection
msg_data: a protobuf struct, include open channel request.
Returns:
protobuf structure like this:
----------------------------------------------
|channel_name | string |
|----------------------------------------------
|content_type | ChannelContentType |
|----------------------------------------------
enum ChannelContentType {
kChannelContentTypeImage = 0;
kChannelContentTypeVideo = 1;
}
"""
request = pb2.OpenChannelRequest()
response = pb2.OpenChannelResponse()
try:
request.ParseFromString(msg_data)
except DecodeError:
logging.error("ParseFromString exception: Error parsing message")
channel_name = "unknown channel"
return self._response_open_channel(conn, channel_name, response,
pb2.kOpenChannelErrorOther)
channel_name = request.channel_name
# check channel name if exist
if not self.channel_manager.is_channel_exist(channel_name):
logging.error("channel name %s is not exist.", channel_name)
# if channel is not exist, need to create the channel
ret = self.channel_manager.register_one_channel(channel_name)
if ret != ChannelManager.err_code_ok:
logging.error("Create the channel %s failed!, and ret is %d", channel_name, ret)
err_code = pb2.kOpenChannelErrorOther
self._response_open_channel(conn, channel_name, response, err_code)
# check channel path if busy
if self.channel_manager.is_channel_busy(channel_name):
logging.error("channel path %s is busy.", channel_name)
err_code = pb2.kOpenChannelErrorChannelAlreadyOpened
return self._response_open_channel(conn, channel_name, response,
err_code)
# if channel type is image, need clean image if exist
self.channel_manager.clean_channel_image(channel_name)
if request.content_type == pb2.kChannelContentTypeImage:
media_type = "image"
elif request.content_type == pb2.kChannelContentTypeVideo:
media_type = "video"
else:
logging.error("media type %s is not recognized.",
request.content_type)
return self._response_open_channel(conn, channel_name, response,
pb2.kOpenChannelErrorOther)
handler = ChannelHandler(channel_name, media_type)
self.channel_manager.create_channel_resource(
channel_name, conn.fileno(), media_type, handler)
return self._response_open_channel(conn, channel_name, response,
pb2.kOpenChannelErrorNone)
def _response_open_channel(self, conn, channel_name, response, err_code):
"""
Assemble protobuf to response open_channel request
Args:
conn: a socket connection
channel_name: name of a channel.
response: a protobuf response to presenter agent
err_code: part of the response
Returns:
ret_code:True or False
Message structure like this:
--------------------------------------------------------------------
|total message len | int | 4 bytes |
|-------------------------------------------------------------------
|message name len | byte | 1 byte |
|-------------------------------------------------------------------
|message name | string | xx bytes |
|-------------------------------------------------------------------
|message body | protobuf | xx bytes |
--------------------------------------------------------------------
protobuf structure like this:
--------------------------------------------------------------------
|error_code | enum | OpenChannelErrorCode |
|-------------------------------------------------------------------
|error_message | string | xx bytes |
|-------------------------------------------------------------------
enum OpenChannelErrorCode {
kOpenChannelErrorNone = 0;
kOpenChannelErrorNoSuchChannel = 1;
kOpenChannelErrorChannelAlreadyOpened = 2;
kOpenChannelErrorOther = -1;
}
"""
response.error_code = err_code
ret_code = False
if err_code == pb2.kOpenChannelErrorNoSuchChannel:
response.error_message = "channel {} not exist." \
.format(channel_name)
elif err_code == pb2.kOpenChannelErrorChannelAlreadyOpened:
response.error_message = "channel {} is busy.".format(channel_name)
elif err_code == pb2.kOpenChannelErrorNone:
response.error_message = "open channel succeed"
ret_code = True
else:
response.error_message = "Unknown err open channel {}." \
.format(channel_name)
self.send_message(conn, response, pb2._OPENCHANNELRESPONSE.full_name)
return ret_code
def send_message(self, conn, protobuf, msg_name):
'''
API for send message
Args:
conn: a socket connection.
protobuf: message body defined in protobuf.
msg_name: msg name.
Returns: NA
'''
message_data = protobuf.SerializeToString()
message_len = len(message_data)
msg_name_size = len(msg_name)
msg_total_size = self.msg_head_len + msg_name_size + message_len
# in Struct(), 'I' is unsigned int, 'B' is unsigned char
s = struct.Struct('IB')
msg_head = (socket.htonl(msg_total_size), msg_name_size)
packed_msg_head = s.pack(*msg_head)
msg_data = packed_msg_head + \
bytes(msg_name, encoding="utf-8") + message_data
conn.sendall(msg_data)
| [
"hardyshongyijin@gmail.com"
] | hardyshongyijin@gmail.com |
ee115ea926c68bbd2775e5a2cedf7635265e9c69 | 30f7dd10d4facde13b918c7c3e5c9fee50abfd69 | /audio_dev.py | 2df08a8bfb9c9e5e90a1da969c4c347d455c9439 | [] | no_license | RochSchanen/audio | 5e6cbd84b5e88e5c2176013e89b06728aa2d690f | af9aa8ed6ae76bfb3397b7b96e88d0a4eda38632 | refs/heads/main | 2023-02-11T20:01:10.918983 | 2021-01-08T17:20:09 | 2021-01-08T17:20:09 | 323,748,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,390 | py | #! /usr/bin/python3
# file: audio.py
# created: 20201220
# author: roch schanen
# comment: > chmod u+x audio_dev.py
# comment: > ./audio_dev.py
# comment: > aplay sound.wav
# __DEBUG__ = True
# from numpy import array
# from numpy import append
# from numpy import empty
# from numpy import iinfo
# from numpy import finfo
# from numpy import uint8
# from numpy import int16
# from numpy import int32
# from numpy import int64
# from numpy import float32
# from numpy import float64
# from numpy import frombuffer
# to do: automate some convertion
# : keep data in float format
# : at the moment we need getData/setData()
# : to update format changes.
from audio import wave
if __name__ == "__main__":
__DEVSTEP__ = 5
# ------------------
if __DEVSTEP__ == 5:
# build two waves and export data in .wav (WAVE) file
from numpy import linspace
from numpy import sin
from numpy import pi
from numpy import array
from numpy import append
R = array([], float)
L = array([], float)
r = 44100 # rate (sample/s)
d = 1.0 # duration
# compute time interval
t = linspace(0.0, d, int(d*r))
# compute wave parts
for part in [
[440.0, 1.0, 0.0*pi, 440.0, 1.0, 0.0*pi],
[440.0, 1.0, 0.0*pi, 440.0, 1.0, 0.0*pi],
]:
fR, aR, pR, fL, aL, pL = part
R = append(R, aR*sin(2.0*pi*fR*t+pR)) # left channel
L = append(L, aL*sin(2.0*pi*fL*t+pL)) # right channel
mywave = wave()
mywave.setData([L,R])
mywave.setSampleRate(r)
# mywave.displayMeta()
# mywave.displayData()
mywave.exportFile('./sound.wav')
# > aplay sound.wav
# should show the correct parameters
# should sound like a pure 'A' note
# ------------------
if __DEVSTEP__ == 4:
# load float 32 bits wav file
# convert
# save integer 16 bits wav file
mywave = wave()
mywave.importFile('./soundcopy.wav')
mywave.displayMeta()
X, Y = mywave.getData()
mywave.set('af', 1)
mywave.set('sw', 16)
mywave.setData([X, Y])
mywave.displayMeta()
mywave.exportFile('./soundcopy2.wav')
# > aplay soundcopy.wav
# > aplay soundcopy2.wav
# should show the correct parameters
# should sound exactly the same
# ------------------
if __DEVSTEP__ == 3:
# load integer 16 bits wav file
# convert
# save float 32 bits wav file
mywave = wave()
mywave.importFile('./sound.wav')
# mywave.displayMeta()
X, Y = mywave.getData()
# print(min(X), max(X))
# print(min(Y), max(Y))
mywave.set('af', 3)
mywave.set('sw', 32)
mywave.setData([X, Y])
mywave.exportFile('./soundcopy.wav')
# > aplay soundcopy.wav
# > aplay sound.wav
# should show the correct parameters
# should sound exactly the same
# ------------------
if __DEVSTEP__ == 2:
# import, export test
print()
mywave = wave()
print()
mywave.importFile('./sound.wav')
print()
mywave.displayMeta()
print()
mywave.exportFile('./soundcopy.wav')
# > aplay soundcopy.wav
# > aplay sound.wav
# should show the correct parameters
# should sound exactly the same
# ------------------
if __DEVSTEP__ == 1:
# build two waves and export data in .wav (WAVE) file
from numpy import linspace
from numpy import sin
from numpy import pi
f1 = 440.0 # left frequency
f2 = 440.0 # right frequency
r = 44100 # rate (sample/s)
d = 1.0 # duration
# compute sound waves
t = linspace(0.0, d, int(d*r))
x = sin(2.0*pi*f1*t) # compute left channel
y = sin(2.0*pi*f2*t) # compute right channel
mywave = wave()
mywave.setData([x,y])
mywave.setSampleRate(r)
# mywave.displayMeta()
# mywave.displayData()
mywave.exportFile('./sound.wav')
# > aplay sound.wav
# should show the correct parameters
# should sound like a pure 'A' note
| [
"schanen@lancaster.ac.uk"
] | schanen@lancaster.ac.uk |
05c686e57349070729fed3e90271c155029b76cb | 25ad906181ae94f3423a5330b06112faaf1d0059 | /python网络数据采集/my_爬虫_进阶_之路/scrapy框架/my_spiders/curl转python/test.py | 1063f2c551f1e27ed86e7b64a5c2a17899e79e4f | [] | no_license | liuhe3647/python | 5ee0aff3f2bbff864fdb86db0371d0a07745dc26 | 4368cab542f4d2b1ecc845ff996e8898a9aaaca6 | refs/heads/master | 2022-04-18T15:56:45.263684 | 2020-04-18T03:43:18 | 2020-04-18T03:43:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,293 | py | # coding:utf-8
'''
@author = super_fazai
@File : test.py
@connect : superonesfazai@gmail.com
'''
from __future__ import unicode_literals
from ftfy import fix_text
from random import randint
from urllib.parse import (
urlparse,
parse_qsl,
urlencode,)
from fzutils.ip_pools import (
fz_ip_pool,
ip_proxy_pool,
sesame_ip_pool,
tri_ip_pool,
get_random_proxy_ip_from_ip_pool,)
from fzutils.spider.fz_aiohttp import AioHttp
from fzutils.spider.fz_driver import (
BaseDriver,
CHROME,
FIREFOX,)
from fzutils.spider.fz_phantomjs import CHROME_DRIVER_PATH
from fzutils.url_utils import unquote_plus
from fzutils.img_utils import save_img_through_url
from fzutils.spider.fz_driver import PHONE
from fzutils.common_utils import _print
from fzutils.data.excel_utils import read_info_from_excel_file
from fzutils.data.list_utils import list_remove_repeat_dict_plus
from fzutils.internet_utils import (
str_cookies_2_dict,
_get_url_contain_params,
tuple_or_list_params_2_dict_params,
driver_cookies_list_2_str,)
from fzutils.qrcode_utils import decode_qrcode
from fzutils.spider.fz_requests import (
PROXY_TYPE_HTTP,
PROXY_TYPE_HTTPS,)
from fzutils.spider.selector import *
from fzutils.spider.async_always import *
from fzutils.spider.selenium_always import *
FIREFOX_DRIVER_PATH = '/Users/afa/myFiles/tools/geckodriver'
# headers = {
# 'Accept-Encoding': 'br, gzip, deflate',
# 'Connection': 'keep-alive',
# 'Accept': '*/*',
# 'Host': 'alisitecdn.m.taobao.com',
# 'User-Agent': 'iPhone7,1(iOS/11.0) AliApp(TB/8.4.0) Weex/0.20.0 1242x2208',
# 'Accept-Language': 'zh-cn',
# }
#
# params = (
# ('pathInfo', 'shop/impression'),
# ('userId', '3012445016'),
# ('shopId', '380157209'),
# ('pageId', '0'),
# )
# url = 'https://alisitecdn.m.taobao.com/pagedata/shop/impression'
# body = Requests.get_url_body(
# url=url,
# headers=headers,
# params=params,
# cookies=None,
# ip_pool_type=tri_ip_pool)
# # print(body)
# data = json_2_dict(
# json_str=body,
# default_res={}).get('module', {})
# # pprint(data)
# # 服务电话的js
# # print(data.get('module', {}).get('moduleSpecs', {}).get('shop_base_info', {}).get('moduleCode', ''))
#
# def wash_ori_data(ori_data:dict):
# """
# 清洗原始data
# :return:
# """
# try:
# ori_data.pop('moduleSpecs')
# ori_data.pop('moduleList')
# except:
# pass
#
# return ori_data
#
# data = wash_ori_data(ori_data=data)
# pprint(data)
# wireshark
# iOS (ip.addr == 192.168.3.2 or ip.src == 192.168.3.2) and ssl
# meizu (ip.addr == 192.168.3.4 or ip.src == 192.168.3.4) and (ssl or http)
# charles
# https://campaigncdn.m.taobao.com/moduledata/downgrade.htm?dataId=taobao
# https://alisitecdn.m.taobao.com/pagedata/shop/index?pathInfo=shop/index&userId=201249601&shopId=58640118&pageId=1860970
# https://alisitecdn.m.taobao.com/pagedata/shop/impression?pathInfo=shop/impression&userId=201249601&shopId=58640118&pageId=0
# wireshark
# $ sudo /Applications/Google\ Chrome.app/Contents/MacOS/Google\ Chrome --ssl-key-log-file=/Users/afa/sslkeylog.log
# android (ip.addr == 192.168.3.4 or ip.src == 192.168.3.4) and ssl
# company_info
# headers = {
# 'Connection': 'keep-alive',
# 'Cache-Control': 'max-age=0',
# 'Upgrade-Insecure-Requests': '1',
# 'User-Agent': get_random_pc_ua(),
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
# # 'Referer': 'http://z.go2.cn/product/oaamaeq.html',
# 'Accept-Encoding': 'gzip, deflate',
# 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
# }
# url = 'http://diteni.go2.cn/'
# body = Requests.get_url_body(
# url=url,
# headers=headers,
# ip_pool_type=tri_ip_pool,)
# print(body)
#
# company_name_selector = {
# 'method': 'css',
# 'selector': 'a.merchant-title ::text'
# }
# company_name = parse_field(
# parser=company_name_selector,
# target_obj=body,
# )
# print(company_name)
# 源自百家号
# 百度某作者的文章
# 必传
# cookies = {
# 'BAIDUID': '1666ADBB95B083DBB2DA29E9BEFCB50B:FG=1',
# 'BIDUPSID': '1666ADBB95B083DBB2DA29E9BEFCB50B',
# # 'PSTM': '1553750958',
# # 'locale': 'zh',
# }
#
# headers = {
# 'Accept-Encoding': 'gzip, deflate, br',
# 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
# 'User-Agent': get_random_phone_ua(),
# 'Accept': '*/*',
# # 'Referer': 'https://author.baidu.com/home?type=profile&action=profile&mthfr=box_share&context=%7B%22from%22%3A%22ugc_share%22%2C%22app_id%22%3A%221617808623102717%22%7D&from=singlemessage&isappinstalled=0',
# 'Connection': 'keep-alive',
# }
#
# params = (
# ('type', 'article'),
# ('tab', '2'),
# ('uk', 'sCWQteHJevYiu1bvIiKrEw'), # 非定值, 看分享出来文章的uk
# # ('ctime', '15564325069740'),
# ('num', '14'),
# # ('_', '1556502637335'),
# ('callback', 'jsonp2'),
# )
# url = 'https://author.baidu.com/list'
# body = Requests.get_url_body(
# url=url,
# headers=headers,
# params=params,
# cookies=cookies,
# ip_pool_type=tri_ip_pool,)
# # print(body)
#
# data = json_2_dict(
# json_str=re.compile('\((.*)\)').findall(body)[0],
# )
# pprint(data)
# 视频信息接口
# params = (
# ('callback', 'tvp_request_getinfo_callback_654434'),
# ('platform', '11001'),
# ('charge', '0'),
# ('otype', 'json'),
# ('ehost', 'http://post.mp.qq.com'),
# ('sphls', '0'),
# ('sb', '1'),
# ('nocache', '0'),
# # ('_rnd', '1557917186'),
# # ('guid', 'daf25a829d645f1196b61df6417e87bf'),
# ('appVer', 'V2.0Build9502'),
# ('vids', 'm0866r0q1xn'),
# ('defaultfmt', 'auto'),
# # ('_qv_rmt', 'AI5PT6eoA15978I5x='),
# # ('_qv_rmt2', 'Kt7fT8OE157116tsw='),
# ('sdtfrom', 'v3010'),
# ('_', '1557917186891'),
# )
# body = Requests.get_url_body(
# url='http://h5vv.video.qq.com/getinfo',
# headers=headers,
# params=params,
# ip_pool_type=tri_ip_pool,
# num_retries=5,)
# print(body)
# data = json_2_dict(
# json_str=re.compile('\((.*)\)').findall(body)[0],
# default_res={})
# pprint(data)
# ** 咪咕视频根据视频id进行视频信息获取
# import requests
#
# headers = {
# 'Proxy-Connection': 'keep-alive',
# 'terminalId': 'h5',
# # 'X-UP-CLIENT-CHANNEL-ID': '0131_10010001005',
# 'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1',
# 'Accept': 'application/json',
# # 'clientId': '36854075131aeac30ca17f1b54649196',
# 'userId': '',
# 'userToken': '',
# 'appId': 'miguvideo',
# 'SDKCEId': '',
# 'Origin': 'http://m.miguvideo.com',
# 'Referer': 'http://m.miguvideo.com/mgs/msite/prd/detail.html?cid=652525090',
# 'Accept-Encoding': 'gzip, deflate',
# 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
# }
#
# params = (
# ('contId', '652525090'),
# ('rateType', '3'),
# # ('clientId', '36854075131aeac30ca17f1b54649196'),
# # ('channelId', '0131_10010001005'),
# )
#
# response = requests.get('http://webapi.miguvideo.com/gateway/playurl/v2/play/playurlh5', headers=headers, params=params, verify=False)
# print(response.text)
# 百度app的小视频发现接口, 其中的全屏视频文章可直接被抓取
# headers = {
# 'Host': 'mbd.baidu.com',
# 'Connection': 'keep-alive',
# 'Content-Length': '4557',
# 'X-BD-QUIC': '1',
# 'Content-Type': 'application/x-www-form-urlencoded',
# 'X-BDBoxApp-NetEngine': '3',
# 'User-Agent': get_random_phone_ua(), # 'Mozilla/5.0 (iPad; CPU OS 13_3_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 SP-engine/2.18.0'
# # 'X-Bd-Traceid': '644a9f61e6cc425e8df842d2cb926de9',
# 'Accept': '*/*',
# # 'X-TurboNet-Info': '2.13.2679.177',
# 'Accept-Encoding': 'gzip, deflate',
# }
#
# params = (
# ('action', 'feed'),
# ('cmd', '210'),
# # ('maid', '_a2S8_aq28_qa28qiPSOtj8Pvag3h2aajiXT8jukvNlza-uNzuB3uli6-u_KO-ifY0HJ8lukSugkuXa90ivhI_PSv8oIi2ihgCSaa_asS8_M82uazxqSC'),
# ('refresh', '1'),
# ('imgtype', 'webp'),
# ('cfrom', '1099a'),
# ('from', '1099a'),
# ('network', '1_0'),
# ('osbranch', 'i0'),
# ('osname', 'baiduboxapp'),
# ('service', 'bdbox'),
# # ('sid', '1027585_4-2600_6645-1027088_2-1027514_1-1027521_1-1027598_3-3081_8171-5238_7311-2696_6930-1027056_2-3057_8089-5618_8591-1027583_1-1027195_1-1027384_2-1027255_3-1027604_1-5456_8016-1026924_1-5306_7565-1027258_2-3270_8882-2946_7781-1027230_2-5524_8269-1027659_1-2929_7702-1027285_1-1027328_5-1027599_1-1472_3438-5579_8458-3037_8036-1027425_3-1027641_1-1027564_2-3000026_2-1027249_1-1027654_1-1027525_2-5529_8280-1027151_2-5566_8411-1027577_2-5562_8387-1027102_1-5571_8441-1027346_1-1021859_1-5409_7877-3039_8040-5586_8486-5546_8581-1027597_2-1027562_1-1027251_1-5525_8271-1021774_1-2512_6387-2859_7452-1027460_2-1027128_2-1027379_1-1027652_2-2939_7745-1027218_1-1027225_1-1026985_1'),
# ('sst', '0'),
# ('st', '0'),
# ('ua', '1668_2224_iphone_11.22.0.17_0'),
# ('uid', 'E4317D7927A4F423B2A894710C308D015F8D69D51OMTBGHBERB'),
# # ('ut', 'iPad7,3_13.3.1'),
# # ('zid', '9iAc0yzbau51GKO563M1gzHzaPoPDD_d8nXwjCKxdBLITCmV4uqwJmkYrkuarE6BQqUXF7INisVWgScgYhwZ0qQ'),
# )
#
# data = {
# # 'data': '{\n "upload_ids" : [\n {\n "clk" : 0,\n "id" : "sv_5653763656459563687",\n "show" : 0,\n "clk_ts" : 0,\n "show_ts" : 0\n },\n {\n "clk" : 0,\n "id" : "sv_3599925748637729943",\n "show" : 0,\n "clk_ts" : 0,\n "show_ts" : 0\n },\n {\n "clk" : 0,\n "id" : "sv_5250727945753531281",\n "show" : 0,\n "clk_ts" : 0,\n "show_ts" : 0\n },\n {\n "clk" : 0,\n "id" : "sv_4823468498756614746",\n "show" : 1,\n "clk_ts" : 0,\n "show_ts" : 1587165880\n },\n {\n "clk" : 0,\n "id" : "sv_4439062174156612467",\n "show" : 1,\n "clk_ts" : 0,\n "show_ts" : 1587165886\n },\n {\n "clk" : 0,\n "id" : "sv_5248424962721750237",\n "show" : 1,\n "clk_ts" : 0,\n "show_ts" : 1587165886\n },\n {\n "clk" : 0,\n "id" : "sv_4130330140644084020",\n "show" : 1,\n "clk_ts" : 0,\n "show_ts" : 1587165880\n },\n {\n "clk" : 0,\n "id" %3...'
# 'data': dumps({
# "upload_ids" : [
# {
# "clk" : 0,
# "id" : "sv_5653763656459563687",
# "show" : 0,
# "clk_ts" : 0,
# "show_ts" : 0
# },
# {
# "clk" : 0,
# "id" : "sv_3599925748637729943",
# "show" : 0,
# "clk_ts" : 0,
# "show_ts" : 0
# },
# {
# "clk" : 0,
# "id" : "sv_5250727945753531281",
# "show" : 0,
# "clk_ts" : 0,
# "show_ts" : 0
# },
# {
# "clk" : 0,
# "id" : "sv_4823468498756614746",
# "show" : 1,
# "clk_ts" : 0,
# "show_ts" : datetime_to_timestamp(get_shanghai_time()), # 1587165880
# },
# {
# "clk" : 0,
# "id" : "sv_4439062174156612467",
# "show" : 1,
# "clk_ts" : 0,
# "show_ts" : datetime_to_timestamp(get_shanghai_time())
# },
# {
# "clk" : 0,
# "id" : "sv_5248424962721750237",
# "show" : 1,
# "clk_ts" : 0,
# "show_ts" : datetime_to_timestamp(get_shanghai_time())
# },
# {
# "clk" : 0,
# "id" : "sv_4130330140644084020",
# "show" : 1,
# "clk_ts" : 0,
# "show_ts" : datetime_to_timestamp(get_shanghai_time())
# },
# ]})
# }
# body = Requests.get_url_body(
# method='post',
# url='https://mbd.baidu.com/searchbox',
# headers=headers,
# params=params,
# # cookies=cookies,
# data=data,
# ip_pool_type=tri_ip_pool,
# proxy_type=PROXY_TYPE_HTTPS,
# num_retries=6,)
# data = json_2_dict(
# json_str=body).get('data', {}).get('210', {}).get('itemlist', {}).get('items', [])
# # pprint(data)
#
# for item in data:
# try:
# _mode = item.get('data', {}).get('mode', '')
# assert _mode != ''
# title = item.get('data', {}).get('title', '')
# assert title != ''
# article_url = item.get('data', {}).get('videoInfo', {}).get('pageUrl', '')
# print('mode: {}, title: {}, article_url: {}'.format(_mode, title, article_url))
# except Exception:
# continue
# 根据百度app的金华本地接口列表数据(包含视频)
# 测试发现其中返回的数据中图文文章的prefetch_html字段打开的页面图片都是异常的(图片只能在百度app里面调起), pass
# headers = {
# 'Host': 'mbd.baidu.com',
# 'Connection': 'keep-alive',
# # 'Content-Length': '601',
# 'X-BDBoxApp-NetEngine': '3',
# 'Accept': 'application/json, text/plain, */*',
# 'Content-Type': 'application/x-www-form-urlencoded',
# # 'X-Bd-Traceid': '16fe51d50af744aa9f405a6674a0ece3',
# # 'X-TurboNet-Info': '2.13.2679.177',
# 'User-Agent': get_random_phone_ua(), # 'BaiduBoxApp/11.22.0 iPad; CPU OS 13_3_1 like Mac OS X'
# 'Accept-Encoding': 'gzip, deflate',
# }
#
# params = (
# ('action', 'feed'),
# ('cmd', '206'),
# ('refresh', '0'),
# ('cfrom', '1099a'),
# ('from', '1099a'),
# ('network', '1_0'),
# ('osbranch', 'i0'),
# ('osname', 'baiduboxapp'),
# # ('puid', '_avrijOq2iAqAqqqB'),
# ('service', 'bdbox'),
# # ('sid', '5279_7493-5343_7673-1027255_3-1027249_1-3108_8246-1027599_1-5420_7915-5159_7064-5318_7602-5505_8213-2387_6070-5546_8581-3200_8608-5409_7877-1027056_2-3057_8089-1768_6301-2849_7423-1027525_2-3085_8180-3188_8547-5276_7485-5177_7115-5566_8411-5482_8122-1027088_2-5247_7339-2411_6133-5553_8355-5351_7695-3022_7980-5358_7713-2583_6589-1027151_2-2964_7829-5270_7472-2422_6166-3092_8204-5344_7676-5525_8271-5557_8366-1027564_2-5508_8414-5297_7538-1027652_2-5426_7932-5291_7522-5309_7573-5188_7161-2558_7271-1027384_2-2966_7835-5164_7078-5295_7533-5618_8591-1869_4509-5568_8429-1027604_1-1027379_1-1027654_1-5288_7517-3072_8145-3234_8756-5306_7565-2119_5266-1549_3643-2702_6941-5397_7837-5292_7525-5605_8537-5189_7164-3195_8561-2929_7702-1027562_1-5623_8610-5456_8016-3281_8984-5571_8441-2762_7136-5437_7972-5399_7843-1027251_1-1027195_1-5382_7800-3021_7978-3037_8036-5305_7560-1027102_1-1026985_1-1027583_1-5434_7961-5524_8269-2939_7745-5529_8280-2132_5301-5287_7515-1021859_1-1027577_2-2962_7825-1027346_1-2512_6387-1027128_2-5511_8234-5562_8387-1026924_1-1892_4570-5302_7555-1027460_2-5253_7382-5540_8312-5191_7167-2859_7452-5258_7413-5380_7796-3000026_2-1021774_1-5501_8201-2696_6930-5337_8416-5356_7706-1027230_2-5208_7208-3270_8882-3068_8126-2701_6939-1027218_1-5495_8181-5244_7333-3095_8211-3081_8171-2429_6181-2720_7764-1027225_1-3094_8208-5354_7701-3066_8262-2407_6127-1756_4144-1027425_3-5290_7521-5289_7518-3008_7953-1472_3438-3051_8075-571_1173-5488_8587-5260_7422-5196_7178-5326_7620-5514_8240-5539_8310-5586_8486-1027514_1-965_2041-1027258_2-5274_7482-5465_8048-2991_7919-5474_8088-5238_7311-2949_7792-5304_7558-1027521_1-3269_8880-5341_7661-5396_7836-2734_7019-5277_7487-1027659_1-5229_7291-2862_7464-3039_8040-1027328_5-1027641_1-1027597_2-2946_7781-2520_6890-1027285_1-5476_8091-3150_8396-5579_8458-3038_8037-3246_8805-5621_8606-2163_5390-1027585_4-2600_6645-5551_8343-5507_8218-5552_8352-1027598_3-5387_7815-2466_6272'),
# ('sst', '0'),
# ('st', '0'),
# ('ua', '1668_2224_iphone_11.22.0.17_0'),
# ('uid', 'E4317D7927A4F423B2A894710C308D015F8D69D51OMTBGHBERB'),
# ('ut', 'iPad7,3_13.3.1'),
# # ('zid', '9iAc0yzbau51GKO563M1gzHzaPoPDD_d8nXwjCKxdBLL_jVT_hAYpPuHPN7r33duZtuXxOapOpFhVJsy0VCBMVg'),
# )
#
# data = {
# # 'data': '{"direction":"auto","refresh_type":0,"bundleVersion":"2.80.57","source":"bdbox_feed_attentiontab","upload_ids":[],"info":{"location":"120.072277,28.962932,---"},"data":{"tab_id":"109999333","tab_name":"","is_sub":0,"last_update_time":0,"session_id":"1587166932496","click_id":"f7c2394b4a3a374e9565268449e1f8b7","refresh_index":1,"refresh_count":1,"refresh_state":4,"pre_render":0,"context":{}}}'
# 'data': dumps({
# 'bundleVersion': '2.80.57',
# 'data': {
# # 'click_id': 'f7c2394b4a3a374e9565268449e1f8b7',
# 'context': {},
# 'is_sub': 0,
# 'last_update_time': 0,
# 'pre_render': 0,
# 'refresh_count': 1,
# 'refresh_index': 1,
# 'refresh_state': 4,
# 'session_id': get_now_13_bit_timestamp(),
# 'tab_id': '109999333',
# 'tab_name': ''
# },
# 'direction': 'auto',
# 'info': {'location': '120.072277,28.962932,---'},
# 'refresh_type': 0,
# 'source': 'bdbox_feed_attentiontab',
# 'upload_ids': []
# })
# }
#
# body = Requests.get_url_body(
# method='post',
# url='https://mbd.baidu.com/searchbox',
# headers=headers,
# params=params,
# data=data,
# ip_pool_type=tri_ip_pool,
# proxy_type=PROXY_TYPE_HTTPS,
# num_retries=6,
# )
# assert body != ''
#
# data = json_2_dict(
# json_str=body,
# logger=None,).get('data', {}).get('206', {}).get('itemlist', {}).get('items', [])
# # pprint(data)
#
# for item in data:
# try:
# title = item.get('data', {}).get('title', '')
# assert title != ''
# _mode = item.get('data', {}).get('mode', '')
# assert _mode != ''
# if _mode == 'video':
# article_url = item.get('data', {}).get('videoInfo', {}).get('pageUrl', '')
# else:
# # 跳过图文文章, 因为其中图片只能在百度app里面调起
# # article_url = item.get('data', {}).get('prefetch_html', '')
# continue
# assert article_url != ''
#
# print('mode: {}, title: {}, article_url: {}'.format(_mode, title, article_url))
# except Exception as e:
# continue | [
"superonesfazai@gmail.com"
] | superonesfazai@gmail.com |
bb6c3b64422418aee867dca1dbec924a6ffc67c5 | 2b42b40ae2e84b438146003bf231532973f1081d | /spec/mgm4455603.3.spec | 4b1c58937e8cd790c55d073e3e63567e8ee9b14f | [] | no_license | MG-RAST/mtf | 0ea0ebd0c0eb18ec6711e30de7cc336bdae7215a | e2ddb3b145068f22808ef43e2bbbbaeec7abccff | refs/heads/master | 2020-05-20T15:32:04.334532 | 2012-03-05T09:51:49 | 2012-03-05T09:51:49 | 3,625,755 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 14,310 | spec | {
"id": "mgm4455603.3",
"metadata": {
"mgm4455603.3.metadata.json": {
"format": "json",
"provider": "metagenomics.anl.gov"
}
},
"providers": {
"metagenomics.anl.gov": {
"files": {
"100.preprocess.info": {
"compression": null,
"description": null,
"size": 736,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/100.preprocess.info"
},
"100.preprocess.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 109524,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/100.preprocess.passed.fna.gz"
},
"100.preprocess.passed.fna.stats": {
"compression": null,
"description": null,
"size": 310,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/100.preprocess.passed.fna.stats"
},
"100.preprocess.removed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 2247,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/100.preprocess.removed.fna.gz"
},
"100.preprocess.removed.fna.stats": {
"compression": null,
"description": null,
"size": 307,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/100.preprocess.removed.fna.stats"
},
"205.screen.h_sapiens_asm.info": {
"compression": null,
"description": null,
"size": 448,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/205.screen.h_sapiens_asm.info"
},
"299.screen.info": {
"compression": null,
"description": null,
"size": 410,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/299.screen.info"
},
"299.screen.passed.fna.gcs": {
"compression": null,
"description": null,
"size": 1143,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/299.screen.passed.fna.gcs"
},
"299.screen.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 99301,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/299.screen.passed.fna.gz"
},
"299.screen.passed.fna.lens": {
"compression": null,
"description": null,
"size": 442,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/299.screen.passed.fna.lens"
},
"299.screen.passed.fna.stats": {
"compression": null,
"description": null,
"size": 310,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/299.screen.passed.fna.stats"
},
"440.cluster.rna97.fna.gz": {
"compression": "gzip",
"description": null,
"size": 20019,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/440.cluster.rna97.fna.gz"
},
"440.cluster.rna97.fna.stats": {
"compression": null,
"description": null,
"size": 308,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/440.cluster.rna97.fna.stats"
},
"440.cluster.rna97.info": {
"compression": null,
"description": null,
"size": 947,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/440.cluster.rna97.info"
},
"440.cluster.rna97.mapping": {
"compression": null,
"description": null,
"size": 119071,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/440.cluster.rna97.mapping"
},
"440.cluster.rna97.mapping.stats": {
"compression": null,
"description": null,
"size": 48,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/440.cluster.rna97.mapping.stats"
},
"450.rna.expand.lca.gz": {
"compression": "gzip",
"description": null,
"size": 87329,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/450.rna.expand.lca.gz"
},
"450.rna.expand.rna.gz": {
"compression": "gzip",
"description": null,
"size": 37078,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/450.rna.expand.rna.gz"
},
"450.rna.sims.filter.gz": {
"compression": "gzip",
"description": null,
"size": 24398,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/450.rna.sims.filter.gz"
},
"450.rna.sims.gz": {
"compression": "gzip",
"description": null,
"size": 246589,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/450.rna.sims.gz"
},
"900.abundance.function.gz": {
"compression": "gzip",
"description": null,
"size": 7961,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/900.abundance.function.gz"
},
"900.abundance.lca.gz": {
"compression": "gzip",
"description": null,
"size": 4699,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/900.abundance.lca.gz"
},
"900.abundance.md5.gz": {
"compression": "gzip",
"description": null,
"size": 13553,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/900.abundance.md5.gz"
},
"900.abundance.ontology.gz": {
"compression": "gzip",
"description": null,
"size": 197,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/900.abundance.ontology.gz"
},
"900.abundance.organism.gz": {
"compression": "gzip",
"description": null,
"size": 18035,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/900.abundance.organism.gz"
},
"900.loadDB.sims.filter.seq": {
"compression": null,
"description": null,
"size": 2435468,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/900.loadDB.sims.filter.seq"
},
"900.loadDB.source.stats": {
"compression": null,
"description": null,
"size": 100,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/900.loadDB.source.stats"
},
"999.done.COG.stats": {
"compression": null,
"description": null,
"size": 13,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/999.done.COG.stats"
},
"999.done.KO.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/999.done.KO.stats"
},
"999.done.NOG.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/999.done.NOG.stats"
},
"999.done.Subsystems.stats": {
"compression": null,
"description": null,
"size": 69,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/999.done.Subsystems.stats"
},
"999.done.class.stats": {
"compression": null,
"description": null,
"size": 308,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/999.done.class.stats"
},
"999.done.domain.stats": {
"compression": null,
"description": null,
"size": 27,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/999.done.domain.stats"
},
"999.done.family.stats": {
"compression": null,
"description": null,
"size": 868,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/999.done.family.stats"
},
"999.done.genus.stats": {
"compression": null,
"description": null,
"size": 1230,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/999.done.genus.stats"
},
"999.done.order.stats": {
"compression": null,
"description": null,
"size": 413,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/999.done.order.stats"
},
"999.done.phylum.stats": {
"compression": null,
"description": null,
"size": 198,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/999.done.phylum.stats"
},
"999.done.rarefaction.stats": {
"compression": null,
"description": null,
"size": 26099,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/999.done.rarefaction.stats"
},
"999.done.sims.stats": {
"compression": null,
"description": null,
"size": 79,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/999.done.sims.stats"
},
"999.done.species.stats": {
"compression": null,
"description": null,
"size": 4741,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/999.done.species.stats"
}
},
"id": "mgm4455603.3",
"provider": "metagenomics.anl.gov",
"providerId": "mgm4455603.3"
}
},
"raw": {
"mgm4455603.3.fna.gz": {
"compression": "gzip",
"format": "fasta",
"provider": "metagenomics.anl.gov",
"url": "http://api.metagenomics.anl.gov/reads/mgm4455603.3"
}
}
} | [
"jared.wilkening@gmail.com"
] | jared.wilkening@gmail.com |
7bf9961f9abe963c51fc315c0be7e3c57d39a529 | 1b8a99a4ff80da51dc81dd8354bf9bf1cbd25a8b | /2023/longest_nice_substring.py | a832858e6eafd4d321f1afb296fd2304b2ca0cb5 | [] | no_license | eronekogin/leetcode | ea639eebe0cd70af9eb4cba59bc68f636d7b3e0c | edb870f83f0c4568cce0cacec04ee70cf6b545bf | refs/heads/master | 2023-08-16T10:35:57.164176 | 2023-08-14T11:25:33 | 2023-08-14T11:25:33 | 163,679,450 | 0 | 0 | null | 2021-09-09T12:04:44 | 2018-12-31T15:33:06 | Python | UTF-8 | Python | false | false | 449 | py | """
https://leetcode.com/problems/longest-nice-substring/
"""
class Solution:
def longestNiceSubstring(self, s: str) -> str:
if not s:
return ''
ss = set(s)
for i, c in enumerate(s):
if c.swapcase() not in ss:
left = self.longestNiceSubstring(s[:i])
right = self.longestNiceSubstring(s[i + 1:])
return max(left, right, key=len)
return s
| [
"mengyu.jiang@gmail.com"
] | mengyu.jiang@gmail.com |
eff964f095873cb135849614ed7d198f9273a967 | 49124113dd02d123f1d8e42a72f78b8b3ecfada8 | /crawler/bin/cc.py | 495bfb5e324d0876dd9ac4a149709ba2687fca87 | [] | no_license | CheungZeeCn/dmClassProj_weiboCircle | 073d7e41289ff3e1c2d233a997e5a2a9c6ceef84 | 507996dcfb50f47f2cf7c36d2b203a4e06a914a1 | refs/heads/master | 2021-01-19T00:12:54.055594 | 2014-01-16T07:17:34 | 2014-01-16T07:17:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,412 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# by zhangzhi @2013-11-30 16:52:56
# Copyright 2013 NONE rights reserved.
import weibo_login
import conf
import util
import dataUtil
import urllib2
import surf
import logging
import os
import pickle
import signal
import time
g_userResF = 'userRes.pickle'
g_out = set()
g_userRes = {}
g_c = 0
g_storeStep = 30
def autoStore():
global g_c, g_storeStep
last = g_c / g_storeStep
g_c += 1
if g_c / g_storeStep > last:
logging.debug("auto dump len(g_userRes.keys()):%s" % (len(g_userRes.keys())))
dump()
def sig_exit():
logging.info("[end time]:[%s]" % str(time.time()))
logging.info("store g_userRes into %s"%(g_userResF))
dump()
logging.info("exit")
sys.exit()
def handler(signum, frame):
print "got an signal",signum,frame
if signum == 3:
sig_exit()
if signum == 2:
sig_exit()
if signum == 9:
sig_exit()
return None
signal.signal(signal.SIGINT,handler)
signal.signal(signal.SIGTERM,handler)
signal.signal(3,handler)
def dump():
global g_userRes, g_userResF
if g_userRes != {}:
util.dump2Pickle(g_userRes, g_userResF)
def load():
global g_userRes, g_userResF
ret = util.loadPickle(g_userResF)
if ret == None:
return {}
return ret
def addInOutput(user, biIds):
for each in biIds:
g_out.add((user, each))
# do this for sinaWeibo's lock
g_out.add((each, user))
def getSeedUser():
ret = dataUtil.getSeedUserList()
userList = [ x[2] for x in ret ]
return userList
def dumpRes():
for u in g_userRes:
biIds = [ bi[0] for bi in g_userRes[u][2] ]
addInOutput(u, biIds)
with open('output_bi.txt', 'w') as f:
for each in g_out:
f.write(("%s\t%s\n" % (each[0], each[1])).encode('utf-8'))
if __name__ == '__main__':
seeds = getSeedUser()
g_userRes = load()
logging.info("load %d user res info" % (len(g_userRes.keys())))
for seed in seeds:
flwrs, flwees, bis = surf.getUserFollower(seed)
g_userRes[seed] = (flwrs, flwees, bis)
level1 = bis
biIds = [ each[0] for each in bis ]
addInOutput(seed, biIds)
level2 = []
for l1 in level1:
if l1[0] in g_userRes:
logging.debug("[%s] in g_userRes, ignore"%l1[0])
bis1 = g_userRes[l1[0]][2]
level2 += bis1
continue
flwrs1, flwees1, bis1 = surf.getUserFollower(l1[0])
autoStore()
level2 += bis1
g_userRes[l1[0]] = (flwrs1, flwees1, bis1)
biIds = [ each[0] for each in bis1 ]
addInOutput(l1[0], biIds)
logging.debug("%d userResGot" % len(g_userRes))
level2 = list(set(level2))
level3 = [] # do nothing here
for l2 in level2:
if l2[0] in g_userRes:
logging.debug("[%s] in g_userRes, ignore"%l2[0])
continue
flwrs2, flwees2, bis2 = surf.getUserFollower(l2[0])
autoStore()
level3 += bis2
g_userRes[l2[0]] = (flwrs2, flwees2, bis2)
bisIds = [ each[0] for each in bis2 ]
addInOutput(l2[0], biIds)
logging.debug("%d userResGot" % len(g_userRes))
dump()
dumpRes()
| [
"cheungzeecn@gmail.com"
] | cheungzeecn@gmail.com |
3cbea014ca4c3557f1dc31bf8b7e06b3fc86b32f | 37b27a03d2730550b4f0bcb33db596ab2a59ec55 | /cm_demo.py | 3892443d8c32b0c9fccc0be9fa2df58fee162c36 | [] | no_license | nrclark/kymeta-sample | 615feb3e75c5a59930a10e30e81d5df339335c5e | ff573e84ddb0f9ddffcfa4d20c29777b7f2176ce | refs/heads/master | 2020-12-03T18:27:55.076580 | 2020-01-02T22:11:05 | 2020-01-02T22:11:05 | 231,430,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | #!/usr/bin/env python3
""" Example code that exercises customer_management.py """
import customer_management as cm
def main():
""" Sample routine to read an external file and print a ledger. """
ledger = cm.Ledger()
json_data = open('cm_demo.json').read()
ledger.import_json(json_data)
print(ledger.generate_report(), end='')
if __name__ == "__main__":
main()
| [
"nicholas.clark@gmail.com"
] | nicholas.clark@gmail.com |
7fc33fe13ad8e58b553e9fcac5d4ba7cdf292b8f | f9ab83d8a2f1e0e96be29e2942d200915a1e3dee | /4.23/bilibili/.ipynb_checkpoints/gc-checkpoint.py | 583e0ab5fdc72a55f91774ff05c7b76e326f941a | [] | no_license | heathcliff233/ai_programming | 5c145bba6ced1738eb02cc4aaeb69f75708848a5 | d7a6d899b81335ac74c97337dcbc72c89a71f26b | refs/heads/master | 2020-05-17T19:15:30.611276 | 2019-07-11T13:26:20 | 2019-07-11T13:26:20 | 183,906,331 | 1 | 0 | null | 2020-02-22T05:53:51 | 2019-04-28T12:37:34 | Jupyter Notebook | UTF-8 | Python | false | false | 2,501 | py | # -*- coding: utf-8 -*-
"""
Basic idea of the program:
1. Grab a few webpages from sina and extract the roll news subjects
2. Segment words using Python the package jieba
2.1 Filter out the stop words
2.2 Only keep nouns
3. Load all the words(with some appearing multiple times) into the package wordcloud
4. That's it!
@author: Dazhuang
"""
import jieba.posseg as pseg
import matplotlib.pyplot as plt
from os import path
import re
import requests
from scipy.misc import imread
from wordcloud import WordCloud
def fetch_sina_news():
# <d p="15.56200,1,25,16777215,1556013801,0,55c4619c,15137047079550976">(.*?)</d>
PATTERN = re.compile('<d p=".*?">(.*?)<\/d>')
BASE_URL = 'http://comment.bilibili.com/83089367.xml'
with open('/Users/apple/Documents/Jupytor_Notebook/ai_programming/4.23/sina_news_wordcloud/subjects.txt ', 'w', encoding='gb18030') as f:
r = requests.get(BASE_URL)
data = r.text.encode('utf-8').decode('unixode-esacpe')
p = re.findall(PATTERN, data)
for s in p:
f.write(s)
def extract_words():
with open('/Users/apple/Documents/Jupytor_Notebook/ai_programming/4.23/sina_news_wordcloud/subjects.txt ','r', encoding='utf-8') as f:
news_subjects = f.readlines()
stop_words = set(line.strip() for line in open('stopwords.txt', encoding='utf-8'))
newslist = []
for subject in news_subjects:
if subject.isspace():
continue
# segment words line by line
p = re.compile("n[a-z0-9]{0,2}") # n, nr, ns, ... are the flags of nouns
word_list = pseg.cut(subject)
for word, flag in word_list:
if word not in stop_words and p.search(flag) != None:
newslist.append(word)
# print(newslist)
# print(word_list)
content = {}
for item in newslist:
content[item] = content.get(item, 0) + 1
d = path.dirname(__file__)
mask_image = imread(path.join(d, "/Users/apple/Documents/Jupytor_Notebook/ai_programming/4.23/sina_news_wordcloud/mickey.png "))
wordcloud = WordCloud(font_path='simhei.ttf', background_color="black", mask=mask_image, max_words=5,).generate_from_frequencies(content)
# Display the generated image:
plt.imshow(wordcloud)
plt.axis("off")
wordcloud.to_file('wordcloud.jpg')
plt.show()
if __name__ == "__main__":
fetch_sina_news()
extract_words()
| [
"brighthong233@gmail.com"
] | brighthong233@gmail.com |
4a10a13c8f0a37ceb9c6fe88a0f94457cc76f7bf | 58a373fda9c8b2ec3cb40c84ccbc36332f7d0561 | /tests.py | a5fe75a9799fb8848fe9eb0b8c55f0a0af129d22 | [
"MIT"
] | permissive | aniruddhachoudhury/mbtr | 672d9a4a792c6071f7067a5f2157d3be26d5a947 | ce413c60fa91ed8e5c22453dfee2217edfb8e56c | refs/heads/master | 2022-12-31T03:16:13.076155 | 2020-10-22T16:28:30 | 2020-10-22T16:28:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | import runpy
import os
import mbtr
mbtr.splash()
print('Commencing tests...')
k = os.path.dirname(os.path.realpath(__file__))
os.chdir(k)
print('standalone tests...')
runpy.run_path('tests/standalone_tests.py', run_name='__main__')
| [
"vasco.medici@gmail.com"
] | vasco.medici@gmail.com |
abbb45d65c617f8debfb4c0c3238b51eb2986029 | 2fd3d7a7c676a89a961cea12167020e5c742761a | /FibonaChicken.6.py | 665ed364a38b0d99a445364fac82b5f70c211a09 | [
"CC0-1.0"
] | permissive | Python-Repository-Hub/fibona-chicken-number | 773cbde7f269280498c3602dfacd470373c9e718 | 1158ea27c06487edcf15e28515b04c15118f4e60 | refs/heads/main | 2023-04-17T01:01:42.287165 | 2021-05-06T21:18:23 | 2021-05-06T21:18:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | '''
Applying Zeckendorf's Decomposition for a Number Which is NOT a Fibonacci Number.
'''
from math import sqrt, log, floor
phi = (1 + sqrt(5)) / 2
phi_ = 1 - phi
def Binet(i):
return round((phi ** i - phi_ ** i) / sqrt(5))
def inverse_fibonacci(N):
return round(log(sqrt(5) * N) / log(phi))
def is_perfect(n):
rootn = floor(sqrt(n))
return True if rootn * rootn == n else False
def is_fibonacci(N):
x, y = 5 * N * N + 4, 5 * N * N - 4
return is_perfect(x) or is_perfect(y)
def FibonaChicken(N):
if N <= 2:
return 1
i = inverse_fibonacci(N)
if is_fibonacci(N):
return Binet(i - 1)
else:
while N > Binet(i):
i += 1
return Binet(i - 2) + FibonaChicken(N - Binet(i - 1))
for N in range(15, 65):
print(N, FibonaChicken(N))
| [
"joonion@gmail.com"
] | joonion@gmail.com |
d6f86c9d6779bc60ff65165127c286c7a59b7925 | 94014307ef501c4494a7b3eed5a3c6146d4ade63 | /20200907/ExpreCodeDemo.py | 8c380e44222d20c0222abddda4e63be8a116ade4 | [] | no_license | lly1997/pythonTest | 3413734445ecce02676ecc3d16d4d40d41cdebc0 | a8b20cdabe2b9f55b393cfa557f51787b413206b | refs/heads/master | 2022-12-07T21:49:01.578863 | 2020-09-07T08:52:25 | 2020-09-07T08:52:25 | 292,137,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | # l1=[i for i in range(20) if i%3==0 and i!=0]
# print(l1)
# print(sum((i for i in range(2000000000) if i%3==0 and i!=0)))
print("9++++++++++++++++++++++++++++++++++++++++++")
fileobj=open("../../lianxi.txt","r+")
strcode='add add aaaioio'
# fileobj.write(strcode)
# fileobj.flush()
strcode=fileobj.read()
strdict={}
strlist=strcode.split()
for i in strlist:
if strdict.get(i) is None:
strdict[i]=1
else:
strdict[i]+=1
for key in strdict:
print(key,strdict[key])
print(strdict) | [
"lianliyong@lianliyong.com"
] | lianliyong@lianliyong.com |
961c1d870a4bc91d98be1c1c362104544c60d352 | 0e7847ecd12cb86749841535cd3bcfd7ccfd1705 | /pyTCExamHtmlWindowQuestionGui.py | 103b01bd03080c35c65b7991ce0dd134c3edaac2 | [] | no_license | insomnia-soft/pyTCExam | 17f57ddccc7dee56a4789ffd342dce6fd49674c1 | 6c9eb8654c3d283a14462a218293cb258e987819 | refs/heads/master | 2021-01-22T11:20:43.056325 | 2015-08-30T23:00:54 | 2015-08-30T23:00:54 | 35,804,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,977 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import wx
import wx.html as html
import wx.lib.wxpTag
import pyTCExam
import pyTCExamCommon
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
class HtmlWindowQuestion(html.HtmlWindow):
#----------------------------------------------------------------------
def __init__(self, parent):
html.HtmlWindow.__init__(self, parent=parent, id=wx.NewId(), style=html.HW_NO_SELECTION)
self.__test = None
self.__handleEvent = False
self.answers = {}
self.__widgetMcsaName = "mcsa_answer_"
self.__widgetMcmaName = "mcma_answer_"
self.__widgetTextName = "text_answer"
self.__widgetOrderName = "order_answer_"
#----------------------------------------------------------------------
def setQuestion(self, test):
self.answers = {}
self.__test = test
code = self.__getUi()
self.SetPage(source=code)
self.__setAnswersToWidgets()
self.toggleHandleEvent(True)
#----------------------------------------------------------------------
def toggleHandleEvent(self, toggle):
self.__handleEvent = toggle
#----------------------------------------------------------------------
def __getUi(self):
question_id = self.__test._selectedQuestionId
test_noanswer_enabled = self.__test._testInfo["test_noanswer_enabled"]
question_type = self.__test._testData[question_id]["question_type"]
counter = 1
code = ''
code += '<div>' + pyTCExamCommon.decodeBBCode(self.__test._testData[question_id]["question_description"]) + '</div>'
code += '<hr>'
if question_type == 1:
# MCSA - single-answer question
code += '<table>'
for a in self.__test._testData[question_id]["answers"]:
code += '<tr>'
code += '<td>' + str(counter) + '.</td>'
code += '<td>'
code += '<wxp module="wx" class="RadioButton">'
code += '<param name="name" value="' + self.__widgetMcsaName + str(a["logansw_answer_id"]) + '">'
code += '</wxp>'
code += '</td>'
code += '<td>' + pyTCExamCommon.decodeBBCode(a["answer_description"]) + '</td>'
code += '</tr>'
counter += 1
if test_noanswer_enabled:
code += '<tr>'
code += '<td>' + str(counter) + '.</td>'
code += '<td>'
code += '<wxp module="wx" class="RadioButton">'
code += '<param name="name" value="' + self.__widgetMcsaName + '0">'
code += '</wxp>'
code += '<td>Bez odgovora</td>'
code += '</td>'
code += '</tr>'
code += '<table>'
self.Bind(wx.EVT_RADIOBUTTON, self.onRadioBox)
elif question_type == 2:
# MCMA - multiple-answer question
if self.__test._testInfo["test_mcma_radio"]:
# radio button
code += '<table>'
code += '<tr>'
code += '<td></td>'
if test_noanswer_enabled:
code += u'<td width="80" align="center"><font color="#A0A0A0"><b>Bez odgovora</b></font></td>'
code += u'<td width="80" align="center"><font color="#FF0000"><b>Netočno</b></font></td>'
code += u'<td width="80" align="center"><font color="#008000"><b>Točno</b></font></td>'
code += '</tr>'
for a in self.__test._testData[question_id]["answers"]:
code += '<tr>'
code += '<td>' + str(counter) + '.</td>'
if test_noanswer_enabled:
code += '<td align="center">'
code += '<wxp module="wx" class="RadioButton">'
code += '<param name="name" value="' + self.__widgetMcmaName + str(a["logansw_answer_id"]) + '_0">'
code += '<param name="style" value="wx.RB_GROUP">'
code += '</wxp>'
code += '</td>'
code += '<td align="center">'
code += '<wxp module="wx" class="RadioButton">'
code += '<param name="name" value="' + self.__widgetMcmaName + str(a["logansw_answer_id"]) + '_1">'
if not test_noanswer_enabled:
code += '<param name="style" value="wx.RB_GROUP">'
code += '</wxp>'
code += '</td>'
code += '<td align="center">'
code += '<wxp module="wx" class="RadioButton">'
code += '<param name="name" value="' + self.__widgetMcmaName + str(a["logansw_answer_id"]) + '_2">'
code += '</wxp>'
code += '</td>'
code += '<td>' + pyTCExamCommon.decodeBBCode(a["answer_description"]) + '</td>'
code += '</tr>'
counter += 1
code += '</table>'
self.Bind(wx.EVT_RADIOBUTTON, self.onRadioBox)
else:
# checkbox
code += '<table>'
for a in self.__test._testData[question_id]["answers"]:
code += '<tr>'
code += '<td>' + str(counter) + '.</td>'
code += '<td>'
code += '<wxp module="wx" class="CheckBox">'
code += '<param name="name" value="' + self.__widgetMcmaName + str(a["logansw_answer_id"]) + '">'
code += '</wxp>'
code += '</td>'
code += '<td>' + pyTCExamCommon.decodeBBCode(a["answer_description"]) + '</td>'
code += '</tr>'
counter += 1
code += '</table>'
self.Bind(wx.EVT_CHECKBOX, self.onCheckBox)
elif question_type == 3:
# TEXT - free text question
code += '<wxp module="wx" class="TextCtrl" height=200 width=100%>'
code += '<param name="style" value="wx.TE_MULTILINE">'
code += '<param name="name" value="' + self.__widgetTextName + '">'
code += '</wxp><br /><br />'
self.Bind(wx.EVT_TEXT, self.onTextInput)
elif question_type == 4:
# ORDER - ordering questions
choices = []
if test_noanswer_enabled:
choices.append(' ')
choices.extend(["{0}".format(x) for x in range(1, len(self.__test._testData[question_id]["answers"]) + 1)])
if len(self.__test._testData[question_id]["answers"]):
code += '<table>'
for a in self.__test._testData[question_id]["answers"]:
code += '<tr>'
code += '<td>' + str(counter) + '.</td>'
code += '<td>'
code += '<wxp module="wx" class="StaticText">'
code += "<param name='label' value='" + a["answer_description"] + "'>"
code += '</wxp>'
code += '</td><td>'
code += '<wxp module="wx" class="ComboBox">'
code += '<param name="style" value="wx.CB_READONLY">'
code += '<param name="choices" value="' + str(choices) + '">'
code += '<param name="name" value="' + self.__widgetOrderName + str(a["logansw_answer_id"]) + '">'
code += '</wxp>'
code += '</td></tr>'
counter += 1
code += '<table>'
self.Bind(wx.EVT_COMBOBOX, self.onComboBox)
return code
#----------------------------------------------------------------------
def __setAnswersToWidgets(self):
question_id = self.__test._selectedQuestionId
question_type = self.__test._testData[question_id]["question_type"]
test_noanswer_enabled = self.__test._testInfo["test_noanswer_enabled"]
answered = False
if self.__test._testData[question_id]["testlog_change_time"] != None:
answered = True
if question_type == 1:
# MCSA - single-answer question
for a in self.__test._testData[question_id]["answers"]:
# odabran odgovor
if a["logansw_selected"] == 1:
name = self.__widgetMcsaName + str(a["logansw_answer_id"])
self.__setWidgetValue(name, True)
if test_noanswer_enabled and answered == False:
self.__setWidgetValue(self.__widgetMcsaName + "0", True)
elif question_type == 2:
# MCMA - multiple-answer question
for a in self.__test._testData[question_id]["answers"]:
status = a["logansw_selected"]
if self.__test._testInfo["test_mcma_radio"]:
name = self.__widgetMcmaName + str(a["logansw_answer_id"]) + "_" + str(status + 1)
self.__setWidgetValue(name, True)
else:
name = self.__widgetMcmaName + str(a["logansw_answer_id"])
if status == 1:
self.__setWidgetValue(name, True)
elif question_type == 3:
# TEXT - free text question
a = self.__test._testData[question_id]
if a["testlog_answer_text"] != None:
self.__setWidgetValue(self.__widgetTextName, a["testlog_answer_text"])
elif question_type == 4:
# ORDER - ordering questions
#if self.__test._testData[question_id]["testlog_change_time"] != None:
for a in self.__test._testData[question_id]["answers"]:
name = self.__widgetOrderName + str(a["logansw_answer_id"])
self.__setWidgetValue(name, str(a["logansw_position"]))
#----------------------------------------------------------------------
def __setWidgetValue(self, name, value):
widget = wx.FindWindowByName(name)
if widget != None:
widget.SetValue(value)
#----------------------------------------------------------------------
def __getWidgetValue(self, name):
widget = wx.FindWindowByName(name)
if widget != None:
return widget.GetValue()
return None
#----------------------------------------------------------------------
def onCheckBox(self, event):
if self.__handleEvent == True:
question_id = self.__test._selectedQuestionId
question_type = self.__test._testData[question_id]["question_type"]
answers = {}
time = pyTCExamCommon.getCurrentTime()
tmp = event.GetEventObject().GetName().split("_")
selectedAnswerIndex = int(tmp[2])
for a in self.__test._testData[question_id]["answers"]:
name = self.__widgetMcmaName + str(a["logansw_answer_id"])
value = self.__getWidgetValue(name)
answers[a["logansw_answer_id"]] = 1 if value == True else 0
self.__test.setAnswerData(field="logansw_selected", answer_dict=answers)
self.__test.setQuestionData(field="testlog_change_time", value=time)
#----------------------------------------------------------------------
def onRadioBox(self, event):
if self.__handleEvent == True:
question_id = self.__test._selectedQuestionId
question_type = self.__test._testData[question_id]["question_type"]
test_noanswer_enabled = self.__test._testInfo["test_noanswer_enabled"]
answers = {}
if question_type == 1:
# MCSA
tmp = event.GetEventObject().GetName().split("_")
selectedAnswerIndex = int(tmp[2])
time = None
if selectedAnswerIndex > 0:
time = pyTCExamCommon.getCurrentTime()
for a in self.__test._testData[question_id]["answers"]:
answers[a["logansw_answer_id"]] = 0
answers[selectedAnswerIndex] = 1
else:
for a in self.__test._testData[question_id]["answers"]:
answers[a["logansw_answer_id"]] = -1
self.__test.resetUpdateQuery()
self.__test.setAnswerData(field="logansw_selected", answer_dict=answers)
self.__test.setQuestionData(field="testlog_change_time", value=time)
elif question_type == 2:
# MCMA
tmp = event.GetEventObject().GetName().split("_")
value = int(tmp[3]) - 1
selectedAnswerIndex = int(tmp[2])
time = None
#time = pyTCExamCommon.getCurrentTime()
for a in self.__test._testData[question_id]["answers"]:
for i in range(3):
name = self.__widgetMcmaName + str(a["logansw_answer_id"]) + "_" + str(i)
if self.__getWidgetValue(name) == True:
value = i - 1
break
answers[a["logansw_answer_id"]] = value
if value > -1:
time = pyTCExamCommon.getCurrentTime()
self.__test.resetUpdateQuery()
self.__test.setAnswerData(field="logansw_selected", answer_dict=answers)
self.__test.setQuestionData(field="testlog_change_time", value=time)
#----------------------------------------------------------------------
def onTextInput(self, event):
if self.__handleEvent == True:
if event.GetEventObject().GetName() == self.__widgetTextName:
time = None
text = self.__getWidgetValue(self.__widgetTextName)
if len(text):
time = pyTCExamCommon.getCurrentTime()
self.__test.resetUpdateQuery()
self.__test.setQuestionData(field="testlog_answer_text", value=text)
self.__test.setQuestionData(field="testlog_change_time", value=time)
#----------------------------------------------------------------------
def onComboBox(self, event):
if self.__handleEvent == True:
question_id = self.__test._selectedQuestionId
question_type = self.__test._testData[question_id]["question_type"]
answers = {}
if question_type == 4:
time = None
for a in self.__test._testData[question_id]["answers"]:
field = []
name = self.__widgetOrderName + str(a["logansw_answer_id"])
value = self.__getWidgetValue(name)
if value == "" or value == " ":
value = -1
else:
value = int(value)
field = ("logansw_selected", "logansw_position")
if value == -1:
answers[a["logansw_answer_id"]] = (-1, 0)
else:
answers[a["logansw_answer_id"]] = (1, value)
if value > -1:
time = pyTCExamCommon.getCurrentTime()
self.__test.resetUpdateQuery()
self.__test.setAnswerData(field=field, answer_dict=answers)
self.__test.setQuestionData(field="testlog_change_time", value=time)
#----------------------------------------------------------------------
def unbind(self):
self.Unbind(wx.EVT_RADIOBUTTON)
self.Unbind(wx.EVT_TEXT)
| [
"insomnia.soft.git@gmail.com"
] | insomnia.soft.git@gmail.com |
7d8d774734f7c7972ad8b0af987928e389fc3728 | df4228e2734bea9d5ff1908705683672f6561044 | /viceversion/asgi.py | ad9e43060a627bfe4d83b276a9270390af209eec | [] | no_license | Stazzz544/viceversa-project | 67431f8830a4ada04e2814acb2116e168581dbc2 | 5b49cfbb0873c1bb18f1b6416fb0e9895aa713c0 | refs/heads/master | 2022-12-28T10:22:20.195692 | 2020-09-28T16:43:58 | 2020-09-28T16:43:58 | 299,278,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
ASGI config for viceversion project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'viceversion.settings')
application = get_asgi_application()
| [
"stazzzdzr@gmail.com"
] | stazzzdzr@gmail.com |
dec8c19c8e2aefadf43e0d18ca81cf5d267b3c00 | 5c915d43fc4fc883a47eb6bf584dfa4a91ac93c5 | /tests/physics/helicityformalism/test_canonical.py | 68b747424dd641ce7a0b0008ab4a55555b8aa0cd | [] | no_license | spflueger/tensorwaves | e2fab0f8d493f00f90149ef7ff3062ac6e4e156e | 323576944cdc189b6c560301395afa6c08628265 | refs/heads/master | 2021-08-17T23:51:04.105559 | 2020-08-07T14:10:50 | 2020-08-07T14:16:19 | 244,342,170 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,026 | py | import math
import pytest # type: ignore
from tensorwaves.physics.helicity_formalism.amplitude import (
_clebsch_gordan_coefficient,
_determine_canonical_prefactor,
_get_orbital_angular_momentum,
)
@pytest.mark.parametrize(
"test_recipe, expected_value",
[
({"J": 1.0, "M": 1.0, "j1": 0.5, "m1": 0.5, "j2": 0.5, "m2": 0.5}, 1,),
(
{"J": 1.0, "M": 0.0, "j1": 0.5, "m1": 0.5, "j2": 0.5, "m2": -0.5},
math.sqrt(1 / 2),
),
(
{"J": 1.0, "M": 0.0, "j1": 0.5, "m1": -0.5, "j2": 0.5, "m2": 0.5},
math.sqrt(1 / 2),
),
(
{"J": 0.0, "M": 0.0, "j1": 0.5, "m1": -0.5, "j2": 0.5, "m2": 0.5},
-math.sqrt(1 / 2),
),
(
{"J": 0.0, "M": 0.0, "j1": 0.5, "m1": 0.5, "j2": 0.5, "m2": -0.5},
math.sqrt(1 / 2),
),
({"J": 3.0, "M": 3.0, "j1": 2.0, "m1": 2.0, "j2": 1.0, "m2": 1.0}, 1,),
(
{"J": 3.0, "M": 2.0, "j1": 2.0, "m1": 2.0, "j2": 1.0, "m2": 0.0},
math.sqrt(1 / 3),
),
(
{"J": 1.0, "M": 1.0, "j1": 2.0, "m1": 0.0, "j2": 1.0, "m2": 1.0},
math.sqrt(1 / 10),
),
],
)
def test_clebsch_gordan_coefficient(test_recipe, expected_value):
cgc = _clebsch_gordan_coefficient(test_recipe)
assert cgc == pytest.approx(expected_value, rel=1e-6)
@pytest.mark.parametrize(
"test_recipe, expected_value",
[
(
{
"LS": {
"ClebschGordan": {
"J": 1.5,
"M": 0.5,
"j1": 1.0,
"m1": 0.0,
"j2": 0.5,
"m2": 0.5,
}
}
},
1.0,
),
],
)
def test_orbital_angular_momentum(test_recipe, expected_value):
orbit_l = _get_orbital_angular_momentum(test_recipe)
assert orbit_l == pytest.approx(expected_value, rel=1e-6)
@pytest.mark.parametrize(
"test_recipe, expected_value",
[
(
{
"LS": {
"ClebschGordan": {
"J": 1.0,
"M": 1.0,
"j1": 2.0,
"m1": 0.0,
"j2": 1.0,
"m2": 1.0,
}
},
"s2s3": {
"ClebschGordan": {
"J": 1.0,
"M": 1.0,
"j1": 0.0,
"m1": 0.0,
"j2": 1.0,
"m2": 1.0,
}
},
},
math.sqrt(1 / 10) * 1,
),
(
{
"LS": {
"ClebschGordan": {
"J": 1.0,
"M": 1.0,
"j1": 2.0,
"m1": 0.0,
"j2": 1.0,
"m2": 1.0,
}
},
"s2s3": {
"ClebschGordan": {
"J": 1.0,
"M": 1.0,
"j1": 1.0,
"m1": 0.0,
"j2": 1.0,
"m2": 1.0,
}
},
},
math.sqrt(1 / 10) * -math.sqrt(1 / 2),
),
(
{
"LS": {
"ClebschGordan": {
"J": 1.0,
"M": 1.0,
"j1": 2.0,
"m1": 0.0,
"j2": 1.0,
"m2": 1.0,
}
},
},
KeyError(),
),
(
{
"LS": {
"ClebschGordan": {
"J": 1.0,
"M": 1.0,
"j1": 2.0,
"m1": 0.0,
"j2": 1.0,
"m2": 1.0,
}
},
"s2s3": {
"ClebschGordan": {
"J": 1.0,
"m": 1.0,
"j1": 1.0,
"m1": 0.0,
"j2": 1.0,
"m2": 1.0,
}
},
},
KeyError(),
),
],
)
def test_determine_canonical_prefactor(test_recipe, expected_value):
if isinstance(expected_value, BaseException):
with pytest.raises(type(expected_value)):
prefactor = _determine_canonical_prefactor(test_recipe)
else:
prefactor = _determine_canonical_prefactor(test_recipe)
assert prefactor == pytest.approx(expected_value, rel=1e-6)
| [
"noreply@github.com"
] | spflueger.noreply@github.com |
ef79a4d63a49d14bf5e3079c3c27e76024d83212 | ae6177cf2ebe87c3749f03e0ffaade2dac8b8688 | /AulasPython/Parte2/Semana2/maiusculas.py | 79905531de3a2f72b960d9e9e95d48bc6330da18 | [] | no_license | jmarq76/Learning_Programming | 8a7c598a733c1ba9983103e4aa284bed80ffabbe | bf15d351e239529645fb74a355e296d085683921 | refs/heads/master | 2022-11-17T23:03:32.236684 | 2020-07-07T12:05:56 | 2020-07-07T12:05:56 | 277,804,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | def maiusculas(frase):
''' Recebe uma frase e devolve string com as letras maiúsculas que existem
nesta frase, na ordem que elas aparecem'''
maiusculas = ""
for i in frase:
if ord(i) > 64 and ord(i) < 91:
maiusculas = maiusculas + i
return maiusculas
| [
"58978254+jmarq76@users.noreply.github.com"
] | 58978254+jmarq76@users.noreply.github.com |
6fbdb7bdc4a9bfbef4af7b925d463d3295fb2859 | 1f05f590241ef931fde1db93680007732bf4b48f | /resizer.py | 319a3a3f02642c38083f17ceb22d97726ee6a159 | [] | no_license | kevinlela/EasyImageCustomizer | 7e6da2d4d9092f56d1c7ed546377d956033b3f32 | ecbbaf9159a61de15abda36b594e6a64d8b49201 | refs/heads/master | 2020-03-22T01:24:49.737352 | 2018-07-01T05:55:29 | 2018-07-01T05:55:29 | 139,303,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | import sys
sys.path.append("/Users/xiaqingpan/Library/Python/2.7/bin")
from skimage import data
from skimage.transform import resize
from skimage import io
def resizer(img, width, height):
resize(img, (width, height))
return img;
image = data.camera()
io.imsave("origin.png", image)
image = resizer(image, 100, 100)
io.imsave("result.png", image) | [
"xiaqingp@gmail.com"
] | xiaqingp@gmail.com |
287ab5a78ec8d237777d9426dded532406192c74 | 53bd50bd01232eda47434ab5f99072dee7409829 | /Python/python_stack/myEnvironments/django_app/apps/surveys/urls.py | 93af08423e2d1c06741d1f9f73f856a926881095 | [] | no_license | WarrenHellman/University | 1f3e4dd0065b040a0f9b38317be96d739fa2ff68 | 3978b68910de06836f5cfd5c7ede687b444b70c9 | refs/heads/master | 2020-03-27T11:26:43.556660 | 2019-01-11T00:50:19 | 2019-01-11T00:50:19 | 146,487,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^surveys$', views.index),
url(r'^surveys/new$', views.new)
] | [
"w.e.hellman@gmail.com"
] | w.e.hellman@gmail.com |
d09e307bed163dcd5d241a3e519b081042bc41e2 | 09a2e7152ddbe99f98133b5906f80652bb2cbfe5 | /server/src/test_client.py | 93d9dde3d45c7637fb31727e6ac0dc8739eb2914 | [] | no_license | yaliu0703/PythonCode | 1fada1b1c170e33339d60dcaa267659c821b3857 | eb7bff39b2a84b9268e37a3a8d6fc57114ae34f5 | refs/heads/master | 2020-08-30T07:47:36.697910 | 2019-11-01T05:26:31 | 2019-11-01T05:26:31 | 218,309,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,979 | py | # -*- coding: UTF-8 -*-
'''
Created on 2016年6月30日
@author: lfy
'''
from socket import *
from string import *
from sys import *
from threading import *
from select import *
from time import *
import socket, string, select, sys, threading, time, os, struct
#main function
if __name__ == "__main__":
#host, port = socket.gethostname(), 5000
host, port = '10.8.177.221', 5000
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(10)
# connect to remote host
try :
sock.connect((host, port))
except :
print 'Unable to connect'
sys.exit()
print 'Connected to remote host. '
while 1:
#user entered a message
#msg = sys.stdin.readline()
msg = raw_input()
print"i say:"+msg
sock.send(msg)
print sock.recv(2048)
if msg[0:17] == "group_msg:up_file":
filepath = "D:\girl.bmp"
if os.path.isfile(filepath):
fileinfo_size = struct.calcsize('128sl') # 定义打包规则
# 定义文件头信息,包含文件名和文件大小
fhead = struct.pack('128sl', os.path.basename(filepath), os.stat(filepath).st_size)
sock.send("group_msg:up_file_head." + fhead) # 加消息头
print 'client filepath: ', filepath
fo = open(filepath, 'rb')
while True:
filedata = fo.read(1024)
if not filedata:
break
sock.send("group_msg:up_file_data." + filedata) # 加消息头
fo.close()
# sock.close()
print 'send over...'
elif msg[0:19] =="group_msg:down_file":#格式group_msg:down_file20161
sock.settimeout(600)
fileinfo_size = struct.calcsize('128sl')
buf = sock.recv(fileinfo_size + 25) # 25 是消息头长度
buf = buf[25:]
filename, filesize = struct.unpack('128sl', buf) # filesize 文件大小
filename_f = filename.strip('\00') # 去除打包的空格符
filenewname = os.path.join('C:\\', (filename_f)) # 存在 E 根目录
print 'file new name is %s, filesize is %s' % (filenewname, filesize)
recvd_size = 0 # 定义接收了的文件大小
file = open(filenewname, 'wb')
print 'stat receiving...'
while not recvd_size == filesize: # filesize 文件实际大小
if filesize - recvd_size > 1024:
rdata = sock.recv(1024 + 25)
recvd_size = recvd_size + len(rdata) - 25 # 15是消息头长度
else:
rdata = sock.recv(filesize - recvd_size + 25)
recvd_size = filesize
rdata = rdata[25:]
file.write(rdata)
file.close()
print 'receive done'
| [
"lujza.qq"
] | lujza.qq |
cce44d4896db289d833b4ab7a0d74363793fcbff | 0694071162d7f3c60292a3bf42995b7dc7fa74f0 | /cmdb/agent/models.py | d630a0c7ac60990d89ba71cb516c04fd36203850 | [
"MIT"
] | permissive | InformationX/easyCMDB | 4b0215c3f64b4fddc4a5a858427377cbede4e946 | 919050481fe4eab485ecaffcd4d6329020b9b41e | refs/heads/master | 2020-09-21T10:43:26.355124 | 2018-03-28T06:28:42 | 2018-03-28T06:28:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,101 | py | from django.db import models
from django.core.exceptions import ObjectDoesNotExist
from django.utils import timezone
import datetime
# Create your models here.
class ASDictMixin:
def as_dict(self):
dict = {}
for key, value in self.__dict__.items():
if isinstance(value, (int, float, str)):
dict[key] = value
else:
dict[key] = str(value)
return dict
class Client(ASDictMixin,models.Model):
uuid = models.CharField(max_length=61, unique=True,default='')
hostname = models.CharField(max_length=128, default='')
ip = models.GenericIPAddressField(default='0.0.0.0')
mac = models.CharField(max_length=32,default='')
paltform = models.CharField(max_length=128,default='')
arch = models.CharField(max_length=16, default='')
cpu = models.IntegerField(default=0)
mem = models.BigIntegerField(default=0)
pid = models.IntegerField(default=0)
time = models.FloatField(default=0)
user = models.CharField(max_length=64, default='')
application = models.CharField(max_length=64, default='')
addr = models.CharField(max_length=256, default='')
remark = models.TextField(default='')
heartbeat_time = models.DateTimeField(auto_now_add=True)
register_time = models.DateTimeField(auto_now_add=True)
modify_time = models.DateTimeField(auto_now=True)
class Meta:
db_table = 'clients'
def __repr__(self):
return f'Host<ID: {self.uuid}, Name: {self.hostname}>'
__str__ = __repr__
# Judge host is online or not?
@property
def is_online(self):
return timezone.now() - self.heartbeat_time < datetime.timedelta(minutes=5)
# Register a Host
@classmethod
def register(cls, uuid, **kwargs):
instance = None
created = False
try:
instance = cls.objects.get(uuid=uuid)
except ObjectDoesNotExist as e:
instance = cls()
setattr(instance, 'uuid', uuid)
created = True
for key, value in kwargs.items():
if hasattr(instance, key):
setattr(instance, key, value)
instance.save()
return created, instance
# Set a Heartbeat time
@classmethod
def heartbeat(cls, uuid):
try:
instance = cls.objects.get(uuid=uuid)
instance.heartbeat_time = timezone.now()
instance.save()
return True
except ObjectDoesNotExist as e:
return False
class Resource(ASDictMixin, models.Model):
uuid = models.CharField(max_length=64, default='')
time = models.DateTimeField(auto_now_add=True)
cpu = models.FloatField(default=0)
mem = models.FloatField(default=0)
@classmethod
def create(cls, uuid, **kwargs):
instance = cls()
setattr(instance, 'uuid', uuid)
for key, value in kwargs.items():
if hasattr(instance, key):
setattr(instance, key, value)
instance.save()
return instance
class Meta:
db_table = 'resources'
| [
"1046710938@qq.com"
] | 1046710938@qq.com |
1ebcd378f007964e3b29cc790ed88d7a8bd87369 | d3f5eb057e9efbbd71dceb8d2af380b9d9d7ab6a | /csopen/core/tests/test_api_supplier.py | 4dd90be2a3f1d61d4880e7147b4698065bd67e45 | [] | no_license | henemer/csopen | 6734a403dd65a0842bf74cdddb6b9fc2097b10d0 | dea56bc320bc0284006518dbf0e3bb44ab399dfd | refs/heads/master | 2021-01-10T10:39:46.341798 | 2016-04-29T17:50:22 | 2016-04-29T17:50:22 | 51,755,274 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,082 | py | from csopen.core.models import Supplier
from django.test import TestCase
from django_extensions.db.fields import json
from rest_framework.test import APIClient
class SupplierModelTest(TestCase):
def setUp(self):
self.client = APIClient()
self.obj = Supplier(
code=1,
company='Emerson Henning ME',
trade='Henning Informática',
cnpj='00000000000000',
observations='Alguma observação.'
)
def test_post_supplier(self):
j = json.dumps({'code':1, 'company':'Empresa S.A.'})
response = self.client.post('/api/fornecedores/',
content_type='application/json',
data = j)
self.assertEqual(response.status_code, 201)
def test_retrieve_supplier(self):
self.obj.save()
response = self.client.get('/api/fornecedores/1' )
self.assertContains(response, 'Emerson Henning ME')
def test_update_supplier(self):
self.obj.save()
j = json.dumps({'id':1, 'code':22, 'company':'Empresa XYZ'})
response = self.client.put('/api/fornecedores/1',
content_type='application/json',
data = j)
self.assertContains(response, 'Empresa XYZ')
def test_delete_supplier(self):
self.obj.save()
response = self.client.delete('/api/fornecedores/1')
self.assertContains(response, 'Ok')
def test_get_supplier(self):
self.obj.save()
response = self.client.get('/api/fornecedores/')
self.assertContains(response, 'Emerson Henning ME')
def test_supplier_code_exists(self):
self.obj.save();
client = APIClient()
response = client.get('/api/fornecedores/codeexists/0/1', format='json')
self.assertEqual(response.data, True)
def test_supplier_next_code(self):
self.obj.save()
client = APIClient()
response = client.get('/api/fornecedores/getmaxcode/', format='json')
self.assertEqual(2, response.data) | [
"emerson@henning.com.br"
] | emerson@henning.com.br |
1607a3e3331e20d9281ee04b374c3d4ea110cb01 | c2849586a8f376cf96fcbdc1c7e5bce6522398ca | /ch21/ex21-15.pybench2.py | a110d546ad7c35d5e88ae11bbd6ee12cc27e3857 | [] | no_license | freebz/Learning-Python | 0559d7691517b4acb0228d1cc76de3e93915fb27 | 7f577edb6249f4bbcac4f590908b385192dbf308 | refs/heads/master | 2020-09-23T01:48:24.009383 | 2019-12-02T12:26:40 | 2019-12-02T12:26:40 | 225,371,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,214 | py | # pybench2.py
...
def runner(stmts, pythons=None, tracemd=False):
for (number, repeat, setup, stmt) in stmts:
if not pythons:
...
best = min(timeit.repeat(
setup=setup, stmt=stmt, number=number, repeat=repeat))
else:
setup = setup.replace('\t', ' ' * 4)
setup = ' '.join('-s "%s"' % line for line in setup.split('\n'))
...
for (ispy3, python) in pythons:
...
cmd = '%s -m timeit -n %s -r %s %s %s' %
(python, number, repeat, setup, args)
# pybench2_cases.py
import pybench2, sys
...
stmts = [ # (num,rep,setup,stmt)
(0, 0, "", "[x ** 2 for x in range(1000)]"),
(0, 0, "", "res=[]\nfor x in range(1000): res.append(x ** 2)")
(0, 0, "def f(x):\n\treturn x",
"[f(x) for x in 'spam' * 2500]"),
(0, 0, "def f(x):\n\treturn x",
"res=[]\nfor x in 'spam' * 2500:\n\tres.append(f(x))"),
(0, 0, "L = [1, 2, 3, 4, 5]", "for i in range(len(L)): L[i] += 1"),
(0, 0, "L = [1, 2, 3, 4, 5]", "i=0\nwhile i < len(L):\n\tL[i] += 1\n\ti += 1")]
...
pybench2.runner(stmts, pythons, tracemd)
| [
"freebz@hananet.net"
] | freebz@hananet.net |
8709aa8d2425d2e8c07103a8eba6bda48f9330fb | 431445d1674fbd4f04b68e930343862e1a075260 | /AdslProxy/adslproxy/db.py | 71aa312853677fdcd642590c39c4a78782d99a14 | [] | no_license | bowtcn/spider_collection | 409e49ac4e86695827d874038d7a21823c6a0c17 | 192ce5e3ea423c504393f6fef483f08336f184fa | refs/heads/master | 2020-07-07T12:38:08.594701 | 2018-07-19T03:32:25 | 2018-07-19T03:32:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,069 | py | import redis
import random
# Redis数据库IP
REDIS_HOST = '8.8.8.8'
# Redis数据库密码, 如无则填None
REDIS_PASSWORD = '123456'
# Redis数据库端口
REDIS_PORT = 6379
# 代理池键名
PROXY_KEY = 'adsl'
class RedisClient(object):
def __init__(self, host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PASSWORD, proxy_key=PROXY_KEY):
"""
初始化Redis连接
:param host: Redis 地址
:param port: Redis 端口
:param password: Redis 密码
:param proxy_key: Redis 哈希表名
"""
self.db = redis.StrictRedis(host=host, port=port, password=password, decode_responses=True)
self.proxy_key = proxy_key
def set(self, name, proxy):
"""
设置代理
:param name: 主机名称
:param proxy: 代理
:return: 设置结果
"""
return self.db.hset(self.proxy_key, name, proxy)
def get(self, name):
"""
获取代理
:param name: 主机名称
:return: 代理
"""
return self.db.hget(self.proxy_key, name)
def count(self):
"""
获取代理总数
:return: 代理总数
"""
return self.db.hlen(self.proxy_key)
def remove(self, name):
"""
删除代理
:param name: 主机名称
:return: 删除结果
"""
return self.db.hdel(self.proxy_key, name)
def names(self):
"""
获取主机名称列表
:return: 获取主机名称列表
"""
return self.db.hkeys(self.proxy_key)
def proxies(self):
"""
获取代理列表
:return: 代理列表
"""
return self.db.hvals(self.proxy_key)
def random(self):
"""
随机获取代理
:return:
"""
proxies = self.proxies()
return random.choice(proxies)
def all(self):
"""
获取字典
:return:
"""
return self.db.hgetall(self.proxy_key)
| [
"153329152@qq.com"
] | 153329152@qq.com |
9b4aeca7aaf8054a65b69e335967fec315c3caba | 4591a91b39bf975b0779482ce4a908a9dc2cc17d | /trainServer/src/ActivateFunc.py | dde99a301d061a067da4ecdff31cddcb455ea887 | [] | no_license | hongquanzhou/bear_fault_diagnose | bcaf2f386c86dab28fc5fcdbca2d60fec6ae1a04 | 5f65c5f33bc253a03ebd3a2ef99cb127208831b7 | refs/heads/master | 2023-04-14T04:40:02.223660 | 2021-04-26T07:16:46 | 2021-04-26T07:16:46 | 286,705,838 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | from enum import Enum
class ActivateFunc(Enum):
elu = 'elu'
exponential = 'exponential'
hard_sigmoid = 'hard_sigmoid'
liner = 'liner'
relu = 'relu'
selu = 'selu'
sigmoid = 'sigmoid'
softmax = 'softmax'
softplus = 'softplus'
softsign = 'softsign'
tanh = 'tanh'
| [
"18121350@bjtu.edu.cn"
] | 18121350@bjtu.edu.cn |
1eda0b8f2f98f764ee5779f297abf8a6c8d28ac8 | 2da754bca0a676402e8030dd9c195e062b3ace7e | /forensics/mac_to_location/mac_to_location.py | 480c9740e69260ce683b6b9a56e95793a3b14db5 | [] | no_license | supraz/hoax | b9735ced783db049ab0aae61596c0c97b95fb8a3 | 7f4353e44ef585cd85aeb6533b78efa75dbee6c4 | refs/heads/master | 2021-01-18T19:19:21.393663 | 2013-12-22T21:49:45 | 2013-12-22T21:49:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,103 | py | import mechanize
import urllib
import urlparse
import re
# didn't forget to remove these :-)
USERNAME = 'devsupraz0'
PASSWORD = 'devsupraz123'
SAMPLE_MAC = '0A:2C:EF:3D:25:1B'
def wigle_print(username, password, netid):
browser = mechanize.Browser()
browser.open('http://wigle.net')
request_data = urllib.urlencode({'credential_0' : username,
'credential_1' : password})
browser.open('http://wigle.net/gps/gps/main/login', request_data)
params = {}
params['netid'] = netid
request_params = urllib.urlencode(params)
resp_url = 'http://wigle.net/gps/gps/main/confirmquery/'
resp = browser.open(resp_url, request_params).read()
map_lat = 'N/A'
map_lon = 'N/A'
r_lat = re.findall(r'maplat=.*\&', resp)
r_lon = re.findall(r'maplon=.*\&', resp)
if r_lat:
map_lat = r_lat[0].split('&')[0].split('=')[1]
if r_lon:
map_lon = r_lon[0].split()
print '[-] LAT: ' + map_lat + ', LON: ' + map_lon
def main():
wigle_print(USERNAME, PASSWORD, SAMPLE_MAC)
if __name__ == '__main__':
main() | [
"supraz69@gmail.com"
] | supraz69@gmail.com |
552fabaac23280042b2ab4a761df1d8b4eca7d0f | 0fad0cf1a7b8e6d41fb86d8dc8f0e747fff808af | /models/cifar/resnet.py | b81c51598afd97b0a9034aed50c07728184a5886 | [
"MIT"
] | permissive | suyanzhou626/DHM | bcb33f8409da98c1f99a095425e77226f9148cd5 | c67639df309397e01e20f071e7f50f9de5c1c839 | refs/heads/master | 2022-11-18T06:55:17.749736 | 2020-07-16T02:54:08 | 2020-07-16T02:54:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,104 | py | from __future__ import absolute_import
'''Resnet for cifar dataset.
Ported form
https://github.com/facebook/fb.resnet.torch
and
https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
(c) YANG, Wei
'''
import torch
import torch.nn as nn
import math
__all__ = ['ResNet', 'resnet32', 'resnet110', 'resnet1202']
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
c_in = x.shape[1]
b, c_out, h, w = out.shape
residual = self.downsample(x)
zero_padding = torch.zeros(b, c_out - c_in, h, w).cuda()
residual = torch.cat([residual, zero_padding], dim=1)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
c_in = x.shape[1]
b, c_out, h, w = out.shape
residual = self.downsample(x)
zero_padding = torch.zeros(b, c_out - c_in, h, w).cuda()
residual = torch.cat([residual, zero_padding], dim=1)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, depth, branch_layers, num_classes=10):
self.inplanes = 16
super(ResNet, self).__init__()
block = BasicBlock
assert (depth - 2) % 6 == 0, 'depth should be 6n+2'
n = (depth - 2) // 6
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 16, n)
inplanes_head2 = self.inplanes
self.layer2 = self._make_layer(block, 32, n, stride=2)
inplanes_head1 = self.inplanes
self.layer3 = self._make_layer(block, 64, n, stride=2)
self.avgpool = nn.AvgPool2d(8, stride=1)
self.fc = nn.Linear(64 * block.expansion, num_classes)
self.inplanes = inplanes_head2
self.layer2_head2 = self._make_layer(block, 32, branch_layers[0][0], stride=2)
self.layer3_head2 = self._make_layer(block, 64, branch_layers[0][1], stride=2)
self.avgpool_head2 = nn.AvgPool2d(8, stride=1)
self.fc_head2 = nn.Linear(64 * block.expansion, num_classes)
self.inplanes = inplanes_head1
self.layer3_head1 = self._make_layer(block, 128, branch_layers[1][0], stride=2)
self.avgpool_head1 = nn.AvgPool2d(8, stride=1)
self.fc_head1 = nn.Linear(128 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.AvgPool2d(kernel_size=2, stride=2)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = node2 = self.layer1(x)
x = node1 = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x0 = self.fc(x)
x = self.layer2_head2(node2)
x = self.layer3_head2(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x2 = self.fc_head2(x)
x = self.layer3_head1(node1)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x1 = self.fc_head1(x)
return x0, x1, x2
def resnet32(**kwargs):
"""
Constructs a ResNet-32 model.
"""
return ResNet(32, [[5, 3], [5]], **kwargs)
def resnet110(**kwargs):
"""
Constructs a ResNet-110 model.
"""
return ResNet(110, [[9, 9], [18]], **kwargs)
def resnet1202(**kwargs):
"""
Constructs a ResNet-1202 model.
"""
return ResNet(1202, [[100, 100], [200]], **kwargs)
| [
"liduo95@gmail.com"
] | liduo95@gmail.com |
573ee99b7b9e199d81be42bbe4e61d9478eca3da | a6676ff9a9c656c163692d0958a2e3beda2912d2 | /python dersleri/stringsQuiz.py | 63fba2f937867974f3aec5c9cfce897ddf15ef46 | [
"Apache-2.0"
] | permissive | HaNuNa42/pythonDersleri | ba937fddb026c8a70255e41af376fae88e57fafa | f21d4cdc5a99398d4ebdefda9224cb6e86cf755b | refs/heads/master | 2022-12-05T06:55:52.754261 | 2020-07-28T05:41:07 | 2020-07-28T05:41:07 | 263,439,052 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,239 | py | website = "http://www.google.com"
course = "Python Kursu"
# 1- 'course' karakter dizisinde kaç karakter bulunmaktadır ?
result = len(course)
length = len(website)
# 2- 'website' içinden www karakterlerini alın.
result = website[7:10]
# 3- 'website' içinden com karakterlerini alın.
result = website[22:25]
result = website[length-3:length]
# 4- 'course' içinden ilk 15 ve son 15 karakterlerini alın.
result = course[0:15]
result = course[:15]
result = course[-15:]
# 5- 'course' ifadesindeki karakterleri tersten yazdırın.
result = course[::-1]
name, surname, age, job = 'Bora','Yılmaz', 32, 'mühendis'
# 6- Yukarıda verilen değişkenler ile ekrana aşağıdaki ifadeyi yazdırın.
# 'Benim adım Bora Yılmaz, Yaşım 32 ve mesleğim mühendis.'
result = "Benim adım "+ name+ " " + surname+ ", Yaşım "+ str(age) + " ve mesleğim "+ job
result = "Benim adım {0} {1}, Yaşım {2} ve mesleğim {3}.".format(name,surname,age,job)
result = f'Benim adım {name} {surname}, Yaşım {age} ve "mesleğim" {job}.'
# 7- 'Hello world' ifadesindeki w harfini 'W' ile değiştirin.
s = 'Hello world'
s = s[0:6] + 'W'+ s[-4:]
print(s)
# 8- 'abc' ifadesini yan yana 3 defa yazdırın.
result = 'abc ' * 3
print(result) | [
"noreply@github.com"
] | HaNuNa42.noreply@github.com |
b1846eb652d0d9d08bcf70f40a2fa64db21b295c | 6c26277b61ec3c41a9e8c238d3e058abce4a36e8 | /scrapy/FirstScrapy.py | bbc593270ef5384622cbd1d655c702b5c9fbdf5f | [
"Apache-2.0"
] | permissive | liusong-cn/python | 1eda7b15f01e6958ceaa8eb7e9435a9da01ec6c4 | 4143660f1a28e8a19a620e66cfc552f8d29c2946 | refs/heads/master | 2023-04-14T10:24:09.679958 | 2023-04-05T15:49:13 | 2023-04-05T15:49:13 | 221,226,548 | 1 | 2 | Apache-2.0 | 2021-09-25T04:09:42 | 2019-11-12T13:38:32 | Python | UTF-8 | Python | false | false | 204 | py | import requests as r
#首次利用python爬取网页数据,get方式爬取
url = 'http://news.sina.com.cn/s/2019-06-05/doc-ihvhiews6901394.shtml'
html = r.get(url)
html.encoding='utf-8'
print(html.text) | [
"liusong@glaway.com"
] | liusong@glaway.com |
000fe5fe6d7a41642db55280e7a0463e118c759e | 80301f1cffc5afce13256e2ecab6323c5df00194 | /en.fc/py/E0001.py | fe101d453fb481e13f8ba0eeffa341ab4c59e54f | [] | no_license | ZhenjianYang/SoraVoiceScripts | c1ddf7c1bbcb933243754f9669bd6b75777c87b9 | 94a948090aba0f63b10b2c69dc845dc99c822fc4 | refs/heads/master | 2023-04-18T04:54:44.306652 | 2023-04-06T11:15:17 | 2023-04-06T11:15:17 | 103,167,541 | 43 | 11 | null | 2021-03-06T08:52:54 | 2017-09-11T17:36:55 | Python | UTF-8 | Python | false | false | 28,509 | py | from ED6ScenarioHelper import *
def main():
SetCodePage("ms932")
CreateScenaFile(
FileName = 'E0001 ._SN',
MapName = 'event',
Location = 'E0001.x',
MapIndex = 1,
MapDefaultBGM = "ed60010",
Flags = 0,
EntryFunctionIndex = 0xFFFF,
Reserved = 0,
IncludedScenario = [
'',
'',
'',
'',
'',
'',
'',
''
],
)
BuildStringList(
'@FileName', # 8
'General Morgan', # 9
'Royal Army Soldier A', # 10
)
DeclEntryPoint(
Unknown_00 = 0,
Unknown_04 = 0,
Unknown_08 = 6000,
Unknown_0C = 4,
Unknown_0E = 0,
Unknown_10 = 0,
Unknown_14 = 9500,
Unknown_18 = -10000,
Unknown_1C = 0,
Unknown_20 = 0,
Unknown_24 = 0,
Unknown_28 = 2800,
Unknown_2C = 262,
Unknown_30 = 45,
Unknown_32 = 0,
Unknown_34 = 360,
Unknown_36 = 0,
Unknown_38 = 0,
Unknown_3A = 0,
InitScenaIndex = 0,
InitFunctionIndex = 0,
EntryScenaIndex = 0,
EntryFunctionIndex = 1,
)
AddCharChip(
'ED6_DT07/CH02080 ._CH', # 00
'ED6_DT07/CH01300 ._CH', # 01
)
AddCharChipPat(
'ED6_DT07/CH02080P._CP', # 00
'ED6_DT07/CH01300P._CP', # 01
)
DeclNpc(
X = -7752,
Z = -2000,
Y = 4527,
Direction = 270,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = -7116,
Z = -2000,
Y = -197,
Direction = 270,
Unknown2 = 0,
Unknown3 = 1,
ChipIndex = 0x1,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
ScpFunction(
"Function_0_FA", # 00, 0
"Function_1_146", # 01, 1
"Function_2_147", # 02, 2
"Function_3_15D", # 03, 3
"Function_4_FF8", # 04, 4
"Function_5_163D", # 05, 5
)
def Function_0_FA(): pass
label("Function_0_FA")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x64, 5)), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x64, 4)), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x64, 3)), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x65, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_115")
OP_A2(0x328)
Event(0, 3)
label("loc_115")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x7F, 2)), scpexpr(EXPR_END)), "loc_123")
OP_A3(0x3FA)
Event(0, 5)
label("loc_123")
Switch(
(scpexpr(EXPR_PUSH_VALUE_INDEX, 0x0), scpexpr(EXPR_END)),
(100, "loc_12F"),
(SWITCH_DEFAULT, "loc_145"),
)
label("loc_12F")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x65, 0)), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x65, 1)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_142")
OP_A2(0x329)
Event(0, 4)
label("loc_142")
Jump("loc_145")
label("loc_145")
Return()
# Function_0_FA end
def Function_1_146(): pass
label("Function_1_146")
Return()
# Function_1_146 end
def Function_2_147(): pass
label("Function_2_147")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_15C")
OP_99(0xFE, 0x0, 0x7, 0x5DC)
Jump("Function_2_147")
label("loc_15C")
Return()
# Function_2_147 end
def Function_3_15D(): pass
label("Function_3_15D")
ClearMapFlags(0x1)
EventBegin(0x0)
OP_6D(1000, 5000, -3500, 0)
SetChrPos(0x101, 1000, 5000, -3590, 225)
SetChrPos(0x102, -360, 5000, -3840, 135)
SetChrPos(0x103, 730, 5000, -4940, 315)
ChrTalk( #0
0x101,
(
"#002FWe checked it over, but it looks\x01",
"like there's nobody inside...\x02",
)
)
CloseMessageWindow()
ChrTalk( #1
0x102,
(
"#012FThere's a high possibility the\x01",
"passengers were transferred\x01",
"to the sky bandits' airship.\x02\x03",
"And then to wherever their\x01",
"hideout is...\x02",
)
)
CloseMessageWindow()
ChrTalk( #2
0x101,
(
"#002FAgreed.\x02\x03",
"But this sucks... Right when\x01",
"I thought we had some clues,\x01",
"we're back to zero.\x02",
)
)
CloseMessageWindow()
ChrTalk( #3
0x103,
(
"#020FCome on, cheer up already.\x02\x03",
"It's not like every clue has\x01",
"completely vanished.\x02\x03",
"Why do you think the sky bandits\x01",
"hid the airliner in a place like this?\x02",
)
)
CloseMessageWindow()
ChrTalk( #4
0x101,
"#002FHuh...?\x02",
)
CloseMessageWindow()
ChrTalk( #5
0x103,
(
"#020FAs far as I can tell, the orbal energy in the ship\x01",
"has completely stopped.\x02\x03",
"Which means that the orbal engine was stripped\x01",
"from the aircraft.\x02\x03",
"Furthermore, the sky bandits made multiple trips\x01",
"to carry off a large amount of cargo.\x02\x03",
"Considering the time and risk involved, don't you\x01",
"think it would have been more effective just to\x01",
"take the entire airliner to their hideout?\x02",
)
)
CloseMessageWindow()
ChrTalk( #6
0x101,
(
"#000FYeah, that does seem a little\x01",
"odd that they didn't...\x02\x03",
"So, why'd they hide the airliner\x01",
"here then?\x02\x03",
"Umm, all I can think of is that\x01",
"they did it in order to...\x02",
)
)
CloseMessageWindow()
FadeToDark(300, 0, 100)
RunExpression(0x0, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_4F(0x28, (scpexpr(EXPR_PUSH_LONG, 0x18), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Menu(
0,
10,
10,
0,
(
"[Sort the cargo.]\x01", # 0
"[Move the hostages aboard their own aircraft.]\x01", # 1
"[Steal the orbal engine.]\x01", # 2
"[Keep clear of the Royal Army's search party.]\x01", # 3
"[Ditch the Linde, because their hideout is somewhere weird.]\x01", # 4
)
)
MenuEnd(0x0)
OP_4F(0x28, (scpexpr(EXPR_PUSH_LONG, 0xFFFF), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_5F(0x0)
OP_56(0x0)
FadeToBright(300, 0)
Switch(
(scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_END)),
(0, "loc_6DC"),
(1, "loc_79C"),
(2, "loc_852"),
(3, "loc_90E"),
(4, "loc_A06"),
(SWITCH_DEFAULT, "loc_A2E"),
)
label("loc_6DC")
ChrTalk( #7
0x103,
(
"#026FIt's true this may have been a\x01",
"good place to sort the cargo\x01",
"because of the space...\x02\x03",
"However, it doesn't account for\x01",
"the fact that they didn't take\x01",
"the airliner to their hideout.\x02",
)
)
CloseMessageWindow()
Jump("loc_A2E")
label("loc_79C")
ChrTalk( #8
0x103,
(
"#026FIt's true they would have needed to\x01",
"land in order to move the hostages...\x02\x03",
"However, it doesn't account for\x01",
"the fact that they didn't take\x01",
"the airliner to their hideout.\x02",
)
)
CloseMessageWindow()
Jump("loc_A2E")
label("loc_852")
ChrTalk( #9
0x103,
(
"#026FIt's true they would have needed\x01",
"to land in order to remove the\x01",
"orbal engine...\x02\x03",
"However, it doesn't account for\x01",
"the fact that they didn't take\x01",
"the airliner to their hideout.\x02",
)
)
CloseMessageWindow()
Jump("loc_A2E")
label("loc_90E")
ChrTalk( #10
0x103,
(
"#026FIt's true the airliner is rather\x01",
"large and easily seen...\x02\x03",
"And in that sense, it would seem highly\x01",
"likely that they would leave it in a\x01",
"different place than their hideout.\x02\x03",
"However, that alone couldn't be\x01",
"considered a decisive reason.\x02",
)
)
CloseMessageWindow()
Jump("loc_A2E")
label("loc_A06")
ChrTalk( #11
0x103,
"#020FYes, that's exactly right.\x02",
)
CloseMessageWindow()
Jump("loc_A2E")
label("loc_A2E")
ChrTalk( #12
0x103,
(
"#020FFrom my guess, I would imagine\x01",
"that their hideout is in a slightly\x01",
"peculiar place.\x02\x03",
"Maybe 10 to 15 arge in size...\x02\x03",
"In short, a peculiar place on which\x01",
"only a small aircraft like the sky\x01",
"bandits' airship could land.\x02",
)
)
CloseMessageWindow()
ChrTalk( #13
0x101,
"#000FInteresting...\x02",
)
CloseMessageWindow()
ChrTalk( #14
0x102,
(
"#012FHow about terrain covered with\x01",
"extreme differences in height,\x01",
"like mountains and ravines...?\x02\x03",
"That seems like a likely place\x01",
"for the sky bandits' hideout.\x02",
)
)
CloseMessageWindow()
ChrTalk( #15
0x103,
(
"#020FYes, that's what I've been\x01",
"thinking, too.\x02\x03",
"However, if that's the case...then we\x01",
"may be unable to do anything else.\x02\x03",
"There's the possibility that their\x01",
"hideout may be in a place we can't\x01",
"reach by foot.\x02",
)
)
CloseMessageWindow()
ChrTalk( #16
0x101,
"#002FTh-Then what CAN we do?\x02",
)
CloseMessageWindow()
ChrTalk( #17
0x103,
(
"#022FWell...\x02\x03",
"I hate to say it, but we may have\x01",
"to share our conclusions with the\x01",
"army and ask for their cooperation.\x02\x03",
"Because they're the ones with\x01",
"the patrol ships.\x02",
)
)
CloseMessageWindow()
ChrTalk( #18
0x101,
(
"#004FWhat...? Now you're trying to tell\x01",
"us we should go crawling back to\x01",
"the army and ask them for help?!\x02",
)
)
CloseMessageWindow()
ChrTalk( #19
0x102,
(
"#012FEither way, we still have to report\x01",
"to them about the airliner.\x02\x03",
"Personally speaking, I still think\x01",
"we should cooperate with the army,\x01",
"whatever their attitude may be.\x02\x03",
"Especially if that means bringing\x01",
"the hostages back safe and sound.\x02",
)
)
CloseMessageWindow()
ChrTalk( #20
0x101,
(
"#002FI guess you're right...\x02\x03",
"This isn't the time or place to be\x01",
"letting my personal feelings get\x01",
"the best of me.\x02",
)
)
CloseMessageWindow()
ChrTalk( #21
0x103,
(
"#020FFor the time being, let's get back\x01",
"to the guild and report our findings\x01",
"to Lugran.\x02\x03",
"We should be able to contact\x01",
"the Haken Gate if we use the\x01",
"orbal telephone.\x02",
)
)
CloseMessageWindow()
EventEnd(0x0)
Return()
# Function_3_15D end
def Function_4_FF8(): pass
label("Function_4_FF8")
EventBegin(0x0)
ClearChrFlags(0x9, 0x80)
ClearChrFlags(0x8, 0x80)
ChrTalk( #22
0x101,
(
"#004FHuh?!\x02\x03",
"Wh-What the heck?!\x02",
)
)
CloseMessageWindow()
ChrTalk( #23
0x102,
(
"#017FGreat... Now this was something\x01",
"I did not expect.\x02",
)
)
CloseMessageWindow()
ChrTalk( #24
0x103,
(
"#025FI wonder if we should be glad,\x01",
"since they've saved us the trouble of\x01",
"having to contact them...\x02",
)
)
CloseMessageWindow()
ChrTalk( #25
0x9,
(
"We have found a suspicious\x01",
"armed group!\x02",
)
)
CloseMessageWindow()
ChrTalk( #26
0x9,
"Put your hands in the air! All of you!\x02",
)
CloseMessageWindow()
ChrTalk( #27
0x9,
(
"What is this world coming to? A woman\x01",
"and two kids are the sky bandits...?\x01",
"Though the girl DOES look shifty...\x02",
)
)
CloseMessageWindow()
ChrTalk( #28
0x101,
(
"#009FH-Hey! I do not! And who are\x01",
"you calling sky bandits?!\x02\x03",
"Can't you see this shiny emblem\x01",
"on my chest?!\x02",
)
)
CloseMessageWindow()
NpcTalk( #29
0x8,
"Man's Voice",
"Hmph! The bracer emblem, huh...?\x02",
)
CloseMessageWindow()
NpcTalk( #30
0x8,
"Man's Voice",
(
"I hope you don't think for a moment\x01",
"something like that proves your\x01",
"innocence.\x02",
)
)
CloseMessageWindow()
ChrTalk( #31
0x101,
"#004FG-General Morgan?!\x02",
)
CloseMessageWindow()
ChrTalk( #32
0x102,
"#014FWhy are you here...?\x02",
)
CloseMessageWindow()
ChrTalk( #33
0x8,
(
"#160FAfter looking over the reports of my men, I\x01",
"found this place to have been insufficiently\x01",
"investigated, so I came to see for myself...\x02\x03",
"Who would have thought the lot of you\x01",
"were conspiring with the sky bandits?\x02",
)
)
CloseMessageWindow()
ChrTalk( #34
0x103,
(
"#022FMight I get you to stop with the\x01",
"accusations, General?\x02\x03",
"We happened to find this place\x01",
"one step ahead of your men.\x02",
)
)
CloseMessageWindow()
ChrTalk( #35
0x8,
(
"#160FIf that's the truth, then why don't\x01",
"you tell me where the sky bandits\x01",
"are?\x02\x03",
"Are the hostages inside that\x01",
"airliner?\x02",
)
)
CloseMessageWindow()
ChrTalk( #36
0x102,
(
"#012FWe almost had the sky bandits,\x01",
"but they managed to escape...\x02\x03",
"And there are no hostages to be\x01",
"found here.\x02",
)
)
CloseMessageWindow()
ChrTalk( #37
0x8,
(
"#160FHmph! It looks like the truth\x01",
"has come out...\x02\x03",
"Most likely, you notified the sky\x01",
"bandits to let them know we were\x01",
"coming!\x02",
)
)
CloseMessageWindow()
ChrTalk( #38
0x101,
(
"#005FW-Wait a minute here!\x01",
"How about you cut with the crap!\x02",
)
)
CloseMessageWindow()
ChrTalk( #39
0x8,
(
"#162FMy thoughts exactly!\x02\x03",
"All right, men!\x01",
"Take them into custody!\x02",
)
)
CloseMessageWindow()
OP_A2(0x3FB)
NewScene("ED6_DT01/T1410 ._SN", 100, 0, 0)
IdleLoop()
Return()
# Function_4_FF8 end
def Function_5_163D(): pass
label("Function_5_163D")
OP_77(0x0, 0x0, 0x0, 0x0, 0x0)
ClearMapFlags(0x1)
EventBegin(0x0)
OP_6D(-2670, 5000, -10370, 0)
OP_6C(315000, 0)
OP_6B(2400, 0)
SetChrFlags(0x101, 0x80)
SetChrFlags(0x102, 0x80)
SetChrPos(0x104, 0, 5000, -10200, 180)
SetChrPos(0x103, 0, 5000, -1650, 180)
FadeToBright(2000, 0)
OP_0D()
SetMessageWindowPos(72, 320, 56, 3)
SetChrName("Man's Voice")
AnonymousTalk( #40
(
"\x07\x00And that's about the gist of\x01",
"the sky bandit incident that\x01",
"occurred in northern Liberl...\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
AnonymousTalk( #41
"\x07\x00...\x02",
)
CloseMessageWindow()
OP_56(0x0)
AnonymousTalk( #42
(
"\x07\x00And to think that the bankrupt\x01",
"Capua family drifted all the way\x01",
"down here.\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
AnonymousTalk( #43
(
"\x07\x00You might be contacted by Liberl\x01",
"regarding the incident, so deal\x01",
"with it as you see fit.\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
AnonymousTalk( #44
"\x07\x00...\x02",
)
CloseMessageWindow()
OP_56(0x0)
AnonymousTalk( #45
(
"\x07\x00Yeah, it turns out I wasn't able to\x01",
"meet him in the end. It seems like\x01",
"something else must have come up.\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
AnonymousTalk( #46
(
"\x07\x00Also, the connection with the sky bandit\x01",
"incident is still unknown, but it's clear\x01",
"that another power is at work here.\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
AnonymousTalk( #47
"\x07\x00...\x02",
)
CloseMessageWindow()
OP_56(0x0)
AnonymousTalk( #48
(
"\x07\x00No, it's not like that at all. I've also\x01",
"become acquainted with an interesting\x01",
"bunch.\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
AnonymousTalk( #49
(
"\x07\x00The food's great, and there are babes\x01",
"everywhere. This is unquestionably\x01",
"my kind of country.\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
AnonymousTalk( #50
(
"\x07\x00Maybe I'll just take up permanent\x01",
"residence here while I'm at it.\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
AnonymousTalk( #51
"\x07\x00...\x02",
)
CloseMessageWindow()
OP_56(0x0)
AnonymousTalk( #52
(
"\x07\x00All right, all right.\x01",
"There's no need to throw a fit.\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
AnonymousTalk( #53
(
"\x07\x00Anyway, see what else you can find out.\x01",
"Just don't get caught looking into things\x01",
"by the chancellor.\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
AnonymousTalk( #54
"\x07\x00I'll contact you again...my dear friend.\x02",
)
CloseMessageWindow()
OP_56(0x0)
OP_77(0xFF, 0xFF, 0xFF, 0x7D000, 0x0)
Sleep(1000)
ChrTalk( #55
0x104,
(
"#030FHa ha. I love messing with that guy.\x02\x03",
"He's just so stuffy and uptight\x01",
"that I can't help myself...\x02",
)
)
CloseMessageWindow()
NpcTalk( #56
0x103,
"Woman's Voice",
"A portable phone, huh...?\x02",
)
CloseMessageWindow()
NpcTalk( #57
0x103,
"Woman's Voice",
(
"Well, aren't you carrying around\x01",
"quite the nifty gadget?\x02",
)
)
CloseMessageWindow()
OP_62(0x104, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
TurnDirection(0x104, 0x103, 500)
def lambda_1BE2():
OP_6B(3000, 1000)
ExitThread()
QueueWorkItem(0x101, 1, lambda_1BE2)
OP_6D(-190, 5000, -6110, 1000)
Sleep(1000)
ChrTalk( #58
0x104,
"#030FSch-Schera...\x02",
)
CloseMessageWindow()
def lambda_1C20():
OP_6D(100, 5000, -9000, 2500)
ExitThread()
QueueWorkItem(0x101, 1, lambda_1C20)
OP_8E(0x103, 0x0, 0x1388, 0xFFFFE142, 0x7D0, 0x0)
ChrTalk( #59
0x103,
(
"#020FAnd the fact that you're carrying around\x01",
"an orbment that even the Zeiss Central\x01",
"Factory couldn't create, well...\x02\x03",
"How about you tell me who you really\x01",
"are?\x02",
)
)
CloseMessageWindow()
ChrTalk( #60
0x104,
(
"#030FCome on, Schera. Don't treat me like I'm\x01",
"some kind of stranger.\x02\x03",
"I'm Olivier Lenheim, the wandering bard\x01",
"and gifted musician you've come to adore.\x02\x03",
"But if you'd like to get to know me better,\x01",
"I'm sure we could arrange something...\x01",
"A little pillow talk, perhaps...?\x02",
)
)
CloseMessageWindow()
ChrTalk( #61
0x103,
(
"#020FHow about we skip the foreplay and go\x01",
"straight to the climax. Your cheap\x01",
"antics don't fool me, Olivier.\x02\x03",
"Or should I call you\x01",
"'Mr. Erebonian Operative'?\x02",
)
)
CloseMessageWindow()
ChrTalk( #62
0x104,
(
"#030F...\x02\x03",
"Heh. It looks like the title 'Silver Streak'\x01",
"isn't just for show.\x02\x03",
"So I guess you were pretending\x01",
"that you didn't notice in front\x01",
"of Estelle and Joshua, huh?\x02",
)
)
CloseMessageWindow()
ChrTalk( #63
0x103,
(
"#020FI don't want to worry those two\x01",
"any more than they already are.\x02\x03",
"So back to the subject at hand,\x01",
"why don't you start talking?\x02\x03",
"Who are you, and what are you\x01",
"doing in Liberl?\x02",
)
)
CloseMessageWindow()
ChrTalk( #64
0x104,
(
"#030FBefore that...I'm going to have to\x01",
"correct you on two points.\x02\x03",
"First off, these 'cheap antics', as\x01",
"you call them, are totally natural.\x02\x03",
"I'm not playacting or anything.\x01",
"That's just who I am.\x02",
)
)
CloseMessageWindow()
ChrTalk( #65
0x103,
(
"#020FOh, I'm sure.\x02\x03",
"So do you mean to tell me that you\x01",
"drank that wine without paying just\x01",
"because you felt like it?\x02\x03",
"And after that, being taken to the Haken\x01",
"Gate so you could gather information was\x01",
"all a part of the plan?\x02\x03",
"And you even set yourself up to run into\x01",
"us? I don't think so...\x02",
)
)
CloseMessageWindow()
ChrTalk( #66
0x104,
(
"#030FHeh... I'll leave that part\x01",
"up to your imagination.\x02\x03",
"The other thing I must correct you on\x01",
"is that this device is not an orbment.\x02\x03",
"It is an artifact which was unearthed\x01",
"in the Empire.\x02\x03",
"It can piggyback off any orbal communications\x01",
"system and its transmissions can be encrypted,\x01",
"so there's no worry about them being intercepted.\x02\x03",
"It comes in handy for a busy man such as myself.\x02",
)
)
CloseMessageWindow()
ChrTalk( #67
0x103,
(
"#020FAn artifact...like one of the sacred relics\x01",
"the Septian Church has stewardship over?\x02\x03",
"Now I'm all the more curious to\x01",
"know what you're after.\x02",
)
)
CloseMessageWindow()
ChrTalk( #68
0x104,
(
"#030FOh no, no, no, Schera.\x02\x03",
"You should never try to pry into\x01",
"the secrets of a mysterious beauty\x01",
"all at once.\x02",
)
)
CloseMessageWindow()
ChrTalk( #69
0x103,
(
"#020F...\x02\x03",
"How would you like to get to know a\x01",
"real woman? I'd be more than willing\x01",
"to show you with my whip.\x02",
)
)
CloseMessageWindow()
ChrTalk( #70
0x104,
(
"#030F...\x01",
"Schera...I don't see any humor\x01",
"in those eyes...\x02\x03",
"Well, jokes aside...\x02",
)
)
CloseMessageWindow()
ChrTalk( #71
0x103,
(
"#020FYou really should have just been\x01",
"straightforward from the beginning.\x02",
)
)
CloseMessageWindow()
ChrTalk( #72
0x104,
(
"#030FAs you have already figured out,\x01",
"my position is like that of an\x01",
"operative in the Empire.\x02\x03",
"But I have no intention of sabotaging\x01",
"anything or stealing classified\x01",
"information.\x02\x03",
"I merely came here to meet a certain\x01",
"someone.\x02",
)
)
CloseMessageWindow()
ChrTalk( #73
0x103,
"#020FA certain someone...?\x02",
)
CloseMessageWindow()
ChrTalk( #74
0x104,
(
"#030FYes, someone you know all too well.\x02\x03",
"The one lauded as the supreme swordsman\x01",
"and master strategist by the Royal Army.\x02\x03",
"The bracer with the special title belonging\x01",
"to but four people throughout the whole of\x01",
"the entire continent.\x02\x03",
"The Divine Blade--Cassius Bright\x01",
"is the one I seek.\x02",
)
)
CloseMessageWindow()
OP_A2(0x3FC)
NewScene("ED6_DT01/T1101 ._SN", 100, 0, 0)
IdleLoop()
Return()
# Function_5_163D end
SaveToFile()
Try(main)
| [
"zj.yang@qq.com"
] | zj.yang@qq.com |
e3a20c33463c6737ce0a9c7ef85e374de481845f | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /wsCshmu5zkN5BfeAC_11.py | 16ee4c11004690dd94154fad7dd29ce965bcbb66 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 739 | py | """
Create a function that takes a number `n` and checks if each digit is
divisible by the digit on its left. Return a boolean array depending on the
condition checks.
### Examples
divisible_by_left(73312) ➞ [False, False, True, False, True]
# no element left to 7 = False
# 3/7 = False
# 3/3 = True
# 1/3 = False
# 2/1 = True
divisible_by_left(1) ➞ [False]
divisible_by_left(635) ➞ [False, False, False]
### Notes
The array should always start with `False` as there is no digit to the left of
the first digit.
"""
def divisible_by_left(n):
nums = list(map(int, str(n)))
return [False] + [
False if not i else (j / i).is_integer()
for i, j in zip(nums, nums[1:])
]
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
7586f2806ece479ea1e2d474b53558d8c88144b2 | fdc3d2daf484e8b500368987930d85b833d43fd6 | /sandbox/python/spectrogram3.py | 07bb5de54e8d25c13ce1d5af9224dc0a0bb27ecc | [] | no_license | luiarthur/signal_processing | 9d61b368603b965ab526b9226a1114022e08463b | f6f00ce57b94bfa020ac494fcb4e83549d05c902 | refs/heads/master | 2021-01-01T19:47:45.076460 | 2017-08-12T15:50:18 | 2017-08-12T15:50:18 | 98,684,524 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,513 | py | import os
import numpy as np
from scipy.io import wavfile
from scipy import signal
import matplotlib.pyplot as plt
from notes import pitch, piano_freq, freq_dict, bin_spec
HOME = os.path.expanduser('~')
### Read a wavfile
(fs, x) = wavfile.read(HOME+"/wav/embraceableYou.wav")
if x.ndim > 1: x = x[:,1]
w_size = 4096
f, t, Zxx = signal.spectrogram(x, fs, nperseg=w_size, window=signal.get_window('blackman', Nx=w_size))
### Plot Spectrogram
### Spectrogram (traditional)
#Z = np.log(Zxx) - np.log(Zxx.max())
#plt.pcolormesh(t, f, Z, vmin=Z.min(), vmax=0, cmap=plt.cm.gist_heat)
### Spectrogram (peak frequency)
Z = np.exp( np.log(Zxx) - np.log(Zxx.max()) )
plt.pcolormesh(t, f, Z, vmin=.00001, vmax=.0005, cmap=plt.cm.gist_heat)
plt.title('STFT Magnitude')
plt.ylabel('Frequency [Hz]')
plt.ylim([0, 4200])
plt.xlabel('Time [sec]')
#plt.yticks(f, pitch(f+1E-6))
plt.show()
### Plot Spectrogram built-in
#Pxx, freqs, bins, im = plt.specgram(x, NFFT=w_size, Fs=fs, noverlap=100, cmap=plt.cm.gist_heat)
#plt.ylim([0, 4200])
#plt.show()
### Plot Spectrogram built-in (2)
#np.mean( np.exp(np.log(Pxx) - np.log(Pxx.max())) < .001 )
#plt.pcolormesh(bins, freqs, np.exp(np.log(Pxx) - np.log(Pxx.max())), cmap=plt.cm.gist_heat, vmin=.00001, vmax=.0001)
#plt.title('STFT Magnitude')
#plt.ylabel('Frequency [Hz]')
#plt.ylim([0, 4200])
#plt.xlabel('Time [sec]')
#plt.yticks(f, pitch(f))
#plt.show()
### Movie
from matplotlib.animation import FuncAnimation
#thresh = .0005
thresh = .5
fig, ax = plt.subplots()
ln, = plt.plot([], [], animated=True)
title = ax.text(.8, .95, '', transform = ax.transAxes, va='center')
#plt.xticks(np.log(piano_freq), pitch(piano_freq), rotation=90)
plt.xticks(np.log(f), pitch(f), rotation=90)
plt.axhline(y=thresh, color='grey')
def init():
#ax.set_ylim(0, 1.1)
#ax.set_ylim(0, .01)
#ax.set_ylim(0, 1.1)
ax.set_ylim(0, thresh*2)
ax.set_xlim(np.log(27.5), np.log(4186))
return [ln, title]
def update(i):
ydata = np.exp( np.log(Zxx[:,i]) - np.log(Zxx[:,i].max()) )
#ydata = np.exp( np.log(Zxx[:,i]) - np.log(Zxx.max()) )
#ydata = np.exp( np.log(Zxx[:,i]) - np.log(10000) )
#ydata = Zxx[:,i]
ln.set_data(np.log(f), ydata)
title.set_text("time: " + str(np.round(t[i],2)) + "s")
#print t[i], pitch(f[Zxx[:,i].argmax()])
return [title, ln]
delay = (t[1:] - t[:-1]).mean() * 1000
ani = FuncAnimation(fig, update, frames=range(t.size),
init_func=init, blit=True, repeat=False, interval=delay)
plt.show()
| [
"luiarthur@gmail.com"
] | luiarthur@gmail.com |
5410fb3ad8321f769a3d458d24d4a3f211d4f57a | e3bfccdcb93891232e414747dd9f23d9a3137755 | /docs/conf.py | 725965d0deb5bd2caa6c94ca7e8192c7e8ffaa29 | [
"MIT"
] | permissive | jclachance/BOFdat | 59d09930d3abdb61a48ec9901b8c8708abf2c279 | cf2de074c2d789ad47b890083cbbffadab40f177 | refs/heads/master | 2023-07-22T22:28:19.880579 | 2019-07-23T14:25:16 | 2019-07-23T14:25:16 | 91,520,195 | 25 | 6 | NOASSERTION | 2023-07-06T21:16:15 | 2017-05-17T01:26:21 | Jupyter Notebook | UTF-8 | Python | false | false | 6,307 | py | # -*- coding: utf-8 -*-
#
# biomass documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 19 19:59:51 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0,os.path.abspath('.'))
sys.path.insert(0,os.path.abspath('../'))
# MOCK STUFF
from recommonmark.parser import CommonMarkParser
source_parsers = {'.md': CommonMarkParser}
source_suffix = ['.rst', '.md']
class Mock(object):
__all__ = []
def __init__(self, *args, **kwargs):
return
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
else:
return Mock()
MOCK_MODULES = ['numpy','numpy.linalg','python-scipy',
'scipy', 'scipy.sparse', 'scipy.io', 'scipy.stats', 'scipy.spatial.distance',
'sklearn','sklearn.metrics','sklearn.cluster',
'glpk', 'gurobipy', 'gurobipy.GRB', 'cplex', 'cplex.exceptions','pp',
'tabulate', 'libsbml','argparse', 'pandas',
'matplotlib','matplotlib.pyplot','pylab','seaborn','seaborn.color_palette','biopython',
'cobra','cobra.flux_analysis','cobra.util','cobra.util.solver','cobra.io', 'cobra.io.json', 'cobra.io.dict']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# -- General configuration ------------------------------------------------
# Document Python Code
autoapi_type = 'python'
autoapi_dirs = ['..']
autoapi_ignore = ['.tox', '.pytest_cache', 'scripts', 'benchmarks']
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinx.ext.autosummary',
'nbsphinx'
]
exclude_patterns = ['_build', '**.ipynb_checkpoints']
# 'autoapi.extension',
# Add any paths that contain templates here, relative to this directory.
templates_path = ['ntemplates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'BOFdat'
copyright = u'2018, Jean-Christophe Lachance'
author = u'Jean-Christophe Lachance'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
# exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
#todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['nstatic']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'BOFdatdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'biomass.tex', u'biomass Documentation',
u'Jean-Christophe Lachance', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'biomass', u'biomass Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'biomass', u'biomass Documentation',
author, 'biomass', 'One line description of project.',
'Miscellaneous'),
]
| [
"jelachance@eng.ucsd.edu"
] | jelachance@eng.ucsd.edu |
4b9c499c4cf735c4bbb7e381d11f44e4a1d22ac8 | ad13583673551857615498b9605d9dcab63bb2c3 | /output/instances/nistData/atomic/byte/Schema+Instance/NISTXML-SV-IV-atomic-byte-maxExclusive-5-3.py | f378841d6d0ec8179638494024c1501e673c1b5e | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 259 | py | from output.models.nist_data.atomic.byte.schema_instance.nistschema_sv_iv_atomic_byte_max_exclusive_5_xsd.nistschema_sv_iv_atomic_byte_max_exclusive_5 import NistschemaSvIvAtomicByteMaxExclusive5
obj = NistschemaSvIvAtomicByteMaxExclusive5(
value=-73
)
| [
"tsoulloftas@gmail.com"
] | tsoulloftas@gmail.com |
7603c91ae6ea657f35d7fc00dd178dec90f24380 | ab13c7352dd5174032ff9a5806d1e4fa3c38b8b2 | /test_circuit.py | 6216cd1114ed5db5a7fa04b1a1c721abdff82ea5 | [] | no_license | will8s/pj-theorie-graphe | ab8f9f3ec00d1b49e3f8cce53aba653a756d5a80 | 7be8de13678bc42eb968c2818ed224776820b8da | refs/heads/master | 2022-04-25T18:00:10.900120 | 2020-04-25T20:49:27 | 2020-04-25T20:49:27 | 255,648,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,335 | py | def test_circuit(self):
#on utilise la matrice de d'adjacence pour savoir si il y a un circuit
#definition circuit :
#un circuit n'a pas de point d'entré : pas de noeuds qui sont seulement des etats initiaux
#un circuit n'a pas de point d'sorti : pas de noeuds qui sont seulement des états finaux
http://www.momirandum.com/graphes/algorithmes-generaux/CircuitRosalindMarimond.html
https://www.myefrei.fr/moodle/pluginfile.php/139023/mod_resource/content/2/TG-Exercices%202019-20%20solutions.pdf
#la page 11 t'explique comment detecter un circuit par suppression de point d'entrée/initial
"""
determiner les entrées du graphe et les sorties
determiner les element precedent et suivant d'un element(stocké)
if on regarde si chaque element est un etat inital et un etat terminal d'un arc si oui :
un circuit
else
on supprime de self.list_graphe_adja les elements qui sont seulement initiaux (ayant la valeur 1 et 0)
on supprime de self.list_graphe_adja les elements qui sont seulement terminaux (ayant la valeur 1 et 0)
"""
pour résumer si l'élément (de valeur 1) dans la matrice d'adjacence n'a pas d'élément avant lui tu le supprimes
et il n'a pas d'élément apres lui tu le supprime
donc si il y reste aucun element c'est pas un circuit sinon si il reste des elements qui forme une boucle c'est un circuit | [
"williamlemoal@gmail.com"
] | williamlemoal@gmail.com |
77a69cba5f3bc6967941cc5a0afc8c514aed3461 | e7000ae9b0704a4edeaed1f24ce87442e616059a | /django/manage.py | 7a9b7b9764f87f4c2677f4e85a6f2fbc6cf4b6dd | [] | no_license | adamlj/bcg | 811c4c1c706440ae1ded35a6415874bc8e90d84d | 3b0811d08d9aaf077d1df39f7adfb82f29bab9d1 | refs/heads/master | 2022-12-12T17:49:08.167722 | 2020-05-11T13:39:02 | 2020-05-11T13:39:02 | 253,270,016 | 0 | 0 | null | 2022-12-08T04:00:23 | 2020-04-05T15:41:40 | JavaScript | UTF-8 | Python | false | false | 599 | py | #!/usr/bin/env python
import os
import sys
def main():
env = os.getenv('ENV', 'local')
os.environ.setdefault('DJANGO_SETTINGS_MODULE', f"core.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"adam.lj@gmail.com"
] | adam.lj@gmail.com |
f2c1fa65055ab7e99ef7cf1412927d159b6f24df | 2e42938e48b54fc2fbdc72c0c251f63d43710f68 | /autodecrypt/pongo.py | b9c2327f161b9e5874243094ae4cb26f20c53c2d | [
"MIT"
] | permissive | matteyeux/autodecrypt | b8b3e16d99d2c59b61c8ee6f5b9cbdae09cfb6e9 | 15cb32927c1be8a8ce583e2c0a742561b43f274b | refs/heads/main | 2023-06-07T12:48:14.479707 | 2023-03-01T20:34:14 | 2023-03-01T20:34:14 | 171,001,021 | 161 | 39 | MIT | 2023-05-31T16:56:32 | 2019-02-16T12:23:37 | Python | UTF-8 | Python | false | false | 633 | py | import usb.core
def pongo_send_command(command: str):
"""Send command to Pongo device."""
dev = usb.core.find(idVendor=0x05AC, idProduct=0x4141)
if dev is None:
return None
dev.set_configuration()
dev.ctrl_transfer(0x21, 4, 0, 0, 0)
dev.ctrl_transfer(0x21, 3, 0, 0, command + "\n")
def pongo_get_key() -> str:
"""Grab key from Pongo device."""
dev = usb.core.find(idVendor=0x05AC, idProduct=0x4141)
if dev is None:
return None
dev.set_configuration()
output = dev.ctrl_transfer(0xA1, 1, 0, 0, 512).tobytes()
key = output.decode('utf-8').split()[-2]
return key
| [
"mathieu.hautebas@gmail.com"
] | mathieu.hautebas@gmail.com |
212803a1285712ef30c6bf9d40459200d7aecd3a | e520cf1a063fe98f5c8815f4bcc8f7bdce7343d2 | /continue.py | 72e295eef08beb24afb394f3d1af87b5cc35f620 | [] | no_license | meinagong/python | 4282ad57cd07070e6f42233af898a879fcce18b3 | 6a7a01cd9d5da6f5762c0878d39972354547fec1 | refs/heads/main | 2022-08-20T00:30:46.001070 | 2022-07-29T11:18:40 | 2022-07-29T11:18:40 | 397,453,951 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | numer = 0
denom = 0
while denom != -1:
print("Enter a numerator: ")
numer = float(input())
print("Enter a denominator: ")
denom = float(input())
if denom == 0:
continue
print(numer / denom)
| [
"meinagong@gmail.com"
] | meinagong@gmail.com |
ac7dbcc29cd94b930311b689f769330072f16e55 | ef87282b947d99bae7e15e91990c21f4078781c9 | /miles_to_km.py | 988f29cfa1b05ebcdd36ea07737596958bb23026 | [] | no_license | Srinivasaraghavansj/Python-Programs | d61cf559e72f5f3c84fefd71905223f532ac5878 | c97ca67d5e555aa86f92bdb4453dddafd8de84fb | refs/heads/main | 2023-05-29T13:26:19.472261 | 2021-06-20T10:16:06 | 2021-06-20T10:16:06 | 378,598,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | def convert_distance(miles):
km = round(miles * 1.6,1)
result = f"{miles} miles equals {km} km"
return result
print(convert_distance(12)) # Should be: 12 miles equals 19.2 km
print(convert_distance(5.5)) # Should be: 5.5 miles equals 8.8 km
print(convert_distance(11)) # Should be: 11 miles equals 17.6 km | [
"noreply@github.com"
] | Srinivasaraghavansj.noreply@github.com |
6d74c1aed3f04b1afa8580c60a0f4ef52f33cf6a | d6c890cafcabb30215317fda876c969d541ddd5e | /classify.py | 0be93abeadaccfb7827ec829867ad7be088714b3 | [] | no_license | kfcpaladin/makers-games-boral | 838f97d6c278fe59035f00c4935889edca404925 | 6c34329595ec15f5705b576b266d982a93961a37 | refs/heads/master | 2022-12-08T07:55:36.313523 | 2019-07-10T11:12:40 | 2019-07-10T11:12:40 | 196,190,414 | 0 | 0 | null | 2022-11-21T22:04:35 | 2019-07-10T11:11:50 | Python | UTF-8 | Python | false | false | 1,712 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import tensorflow as tf
import sys
import os
import base64
# Disable tensorflow compilation warnings
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
def getModelResponse(imagePath):
print ("getting model response")
# Read the image_data
image_data = tf.gfile.FastGFile(imagePath, 'rb').read()
print ("image_data {}".format(image_data))
# Loads label file, strips off carriage return
label_lines = [line.rstrip() for line in tf.gfile.GFile("logs/output_labels.txt")]
print ("label_lines {}".format(label_lines))
# Unpersists graph from file
with tf.gfile.FastGFile("logs/output_graph.pb", 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
with tf.Session() as sess:
# Feed the image_data as input to the graph and get first prediction
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
predictions = sess.run(softmax_tensor, feed_dict={'Placeholder:0': image_data})
# Sort to show labels of first prediction in order of confidence
top_k = predictions[0].argsort()[-len(predictions[0]):][::-1]
print("4")
response = {}
print("5")
for node_id in top_k:
human_string = label_lines[node_id]
score = predictions[0][node_id]
score = round(score.item(),6) # convert to float and round
#print('%s (score = %.5f)' % (human_string, score))
response[human_string] = score
return (response) # {'a': 'confidence'} in dict
getModelResponse("tmp/a.JPEG") | [
"doommaster3214@gmail.com"
] | doommaster3214@gmail.com |
499219ab593d7651569b9fc86e1d2809c14ab393 | f95cd23d01ebc53c872052a299ffdbe26736c449 | /4. String Algorithms/Programming-Assignment-1/trie_matching/trie_matching.py | fd32086f09af9c49a0e5a32c32e6d85c42bc3caf | [
"MIT"
] | permissive | manparvesh/coursera-ds-algorithms | 4126492a1ea46694c6a5cfab843addac68c21749 | 99e08921c0c0271e66a9aea42e7d38b7df494007 | refs/heads/master | 2021-06-18T15:22:35.467761 | 2020-10-05T02:54:33 | 2020-10-05T02:54:33 | 101,484,382 | 63 | 50 | MIT | 2023-06-16T20:11:58 | 2017-08-26T12:28:34 | C++ | UTF-8 | Python | false | false | 1,212 | py | # python3
import sys
def build_trie(patterns):
tree = { 0: { } }
# write your code here
tree_index = 0
for pattern in patterns:
current_node = tree[0]
for letter in pattern:
if letter in current_node:
current_node = tree[current_node[letter]]
else:
tree_index += 1
tree[tree_index] = {}
current_node[letter] = tree_index
current_node = tree[tree_index]
return tree
def prefix_trie_matching(text, trie):
text += "$"
symbol_index = 0
v = trie[0]
symbol = text[0]
while True:
if not v:
return True
elif symbol in v:
v = trie[v[symbol]]
symbol_index += 1
symbol = text[symbol_index]
else:
return False
def solve(text, n, patterns):
result = []
trie = build_trie(patterns)
for index in range(len(text)):
if prefix_trie_matching(text[index:], trie):
result.append(index)
return result
text = sys.stdin.readline().strip()
n = int (sys.stdin.readline().strip())
patterns = []
for i in range (n):
patterns += [sys.stdin.readline().strip()]
ans = solve(text, n, patterns)
sys.stdout.write(' '.join(map(str, ans)) + '\n') | [
"manparveshsinghrandhawa@gmail.com"
] | manparveshsinghrandhawa@gmail.com |
47a112ee16196e739b06cf29dc87bb9fe6694f87 | 18508cea9458b2879017b44e6f18520cd8cf4f6c | /UCMDBPython/src/eview_resources.py | ec0e87b018aafd0657874aaf533d7204eb5a8157 | [] | no_license | kvt11/dd-git | 7d4935962e06d835ad0023c4abb185876a5a9e77 | 49aafa7081b861c5f6d0e1753b425e78948116d0 | refs/heads/master | 2022-11-23T19:03:19.763423 | 2016-04-04T14:54:18 | 2016-04-04T14:54:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 52,215 | py | #
# Host Resource Discovery by Eview
#
# Created on Sep 20, 2010
#
# @author: kchhina
#
# CP8 - Intial version
# CP9 - Added Dasd storage discovery for CP9 March 31,2011 podom
# CP9 - Added FORMAT=SHORT and MAX=* to commands for defects to force the output of the commands podom
# CP10 - Changed script to support LONG format returned from Network commands on IPV6 enabled systems QCCR1H38586 podom
# CP10 - Fixed QCCR1H38525 - Duplicate Software CIs in Bulk
# CP10 - Fixed QCCR1H6397 - Empty Volume Group causes failure on Volume Group Discovery
# CP10 - Add Job Discovery
# CP10 - CUP 1 to fix urget issue with Netlinks module being depricated.
# CP12 - Discovery CPU types of ZIIP and ZAAP processor and set CPU type attribute
# CP15 - Change Jobs Discovery to not discover time sharing users. TSU type was incorrectly added as a Job.
# Cp15 - Change interface discovery to add LPAR name to Linkname. This will prevent duplicate interfaces if MAC not available. QCIM1H94721
import string, re, logger, modeling
import eview_lib
from appilog.common.system.types import ObjectStateHolder
from appilog.common.system.types.vectors import ObjectStateHolderVector
from eview_lib import isNotNull, isnumeric, isNull
from com.hp.ucmdb.discovery.library.common import CollectorsParameters
from string import upper, lower
from modeling import _CMDB_CLASS_MODEL
import eview_netutils
''' Variables '''
global Framework
PARAM_HOST_ID = 'hostId'
PARAM_LPAR_NAME = 'LparName'
global knownPortsConfigFile
TCP_PORT_TYPE_ENUM = 1
UDP_PORT_TYPE_ENUM = 2
_CMD_D_SYMBOLS = 'D SYMBOLS'
_CMD_D_M_CPU = 'D M=CPU'
_CMD_D_TCPIP = 'D TCPIP'
_CMD_TCPIP_NETSTAT_HOME = 'D TCPIP,%s,NETSTAT,HOME,FORMAT=LONG'
_CMD_D_SSI = 'D SSI'
_CMD_D_NET_MAJNODES = 'D NET,MAJNODES,MAX=*'
_CMD_D_ASM = 'D ASM'
_CMD_D_PROD_STATE = 'D PROD,STATE'
_CMD_D_PROD_REGISTERED = 'D PROD,REGISTERED'
_CMD_D_XCF_GRP = 'D XCF,GRP'
_CMD_D_XCF_GRP_ALL = 'D XCF,GRP,%s,ALL'
_CMD_D_TCPIP_NETSTAT_CONN = 'D TCPIP,%s,NETSTAT,CONN,FORMAT=LONG,MAX=*'
_CMD_D_TCPIP_NETSTAT_ROUTE = 'D TCPIP,%s,NETSTAT,ROUTE,FORMAT=LONG'
_CMD_D_TCPIP_NETSTAT_DEV = 'D TCPIP,%s,NETSTAT,DEV,FORMAT=LONG'
_CMD_D_TCPIP_NETSTAT_ARP = 'D TCPIP,%s,NETSTAT,ARP,FORMAT=LONG,MAX=*'
_CMD_I_DASD = '*'
# Classes
class DevLink:
devName = ''
devType = ''
linkName = ''
linkType = ''
linkStatus = ''
linkMac = ''
def __init__(self, devName, devType, linkName, linkType, linkStatus, linkMac):
self.devName = devName
self.devType = devType
self.linkName = linkName
self.linkType = linkType
self.linkStatus = linkStatus
self.linkMac = linkMac
''' Methods '''
def appendToList(originalList, newList):
tempList = []
if isNull(originalList):
originalList = tempList
for val in newList:
if isNotNull(val):
originalList.append(val)
return originalList
def appendToDictionary(originalDict, newDict):
dict = {}
if isNull(originalDict):
originalDict = dict
for (x, y) in newDict.items():
if isNotNull(y) and not originalDict.has_key(x):
originalDict[x] = y
return originalDict
def getCpuStatus(cpuStatusSymbol):
cpuStatus = ''
if isNotNull(cpuStatusSymbol):
#Spencer: CPU status could be a multi-character string
cpuStatusSymbol = cpuStatusSymbol[0]
if cpuStatusSymbol == '+':
cpuStatus = 'ONLINE'
elif cpuStatusSymbol == '-':
cpuStatus = 'OFFLINE'
elif cpuStatusSymbol == '.':
cpuStatus = 'DOES NOT EXIST'
elif cpuStatusSymbol == 'W':
cpuStatus = 'WLM-MANAGED'
elif cpuStatusSymbol == 'N':
cpuStatus = 'NOT AVAILABLE'
return cpuStatus
def processXcfGroups(xcfGroupsList):
list = []
for xcfGroupLists in xcfGroupsList:
for xcfGroup in xcfGroupLists:
# get the group name and the number of members ---------------------
match = re.match('(.*)\((\d*)\)', xcfGroup)
if match:
groupName = match.group(1)
memberCount = match.group(2)
list.append([groupName, memberCount])
return list
''' EView Command Execution Methods '''
def ev2_getSymlistOutput(ls):
# process SYMLIST ----------------------------------------------------------
symbolsMap = {} # {name:value}
output = ls.evMvsCmd(_CMD_D_SYMBOLS)
if output.isSuccess() and len(output.cmdResponseList) > 0:
symbolsList = output.getValuesFromLineList('s', output.cmdResponseList, '&', '\.\s+=\s+"', '"')
for symbols in symbolsList:
if len(symbols) == 4:
symbolName = symbols[1]
symbolValue = symbols[2]
if isNotNull(symbolName) and isNotNull(symbolValue):
symbolsMap[symbolName] = symbolValue
return symbolsMap
def ev3_getCpulistOutput(ls):
cpuLists = [] # [CPU ID, CPU STATUS, CPU SERIAL, CPU RAW STATUS]
cpcSi = ''
cpcName = ''
cpcId = ''
lpName = ''
lpId = ''
output = ls.evMvsCmd(_CMD_D_M_CPU)
if output.isSuccess() and len(output.cmdResponseList) > 0:
# first search for CPUs ------------------------------------------------
headerColumns = ['ID', 'CPU', 'SERIAL']
tableBeginPattern = 'PROCESSOR STATUS'
tableEndPattern = 'CPC ND ='
firstColumnPaddingChar = ''
includeRegexPattern = ''
ignorePatterns = []
cpuTable = output.getTableValues(output.cmdResponseList, headerColumns, tableBeginPattern, tableEndPattern, firstColumnPaddingChar, includeRegexPattern, ignorePatterns)
for i in range(1, len(cpuTable)):
# Spencer: Add the raw entry for the status to the cpuLists array
cpuLists.append([cpuTable[i][0], getCpuStatus(cpuTable[i][1]), cpuTable[i][2], cpuTable[i][1]])
# then search for CPC SI -----------------------------------------------
cpcSiList = output.getValuesFromLineList('s', output.cmdResponseList, 'CPC SI =')
if isNotNull(cpcSiList) and len(cpcSiList) > 0 and isNotNull(cpcSiList[0][1]):
cpcSi = cpcSiList[0][1]
# then search for CPC ID -----------------------------------------------
cpcIdList = output.getValuesFromLineList('s', output.cmdResponseList, 'CPC ID =')
if isNotNull(cpcIdList) and len(cpcIdList) > 0 and isNotNull(cpcIdList[0][1]):
cpcId = cpcIdList[0][1]
# then search for CPC Name ---------------------------------------------
cpcNameList = output.getValuesFromLineList('s', output.cmdResponseList, 'CPC NAME =')
if isNotNull(cpcNameList) and len(cpcNameList) > 0 and isNotNull(cpcNameList[0][1]):
cpcName = cpcNameList[0][1]
# finally search for LP NAME and LP ID ---------------------------------
lpList = output.getValuesFromLineList('s', output.cmdResponseList, 'LP NAME =', 'LP ID =')
if isNotNull(lpList) and len(lpList) > 0 and isNotNull(lpList[0][1]):
lpName = lpList[0][1]
if isNotNull(lpList) and len(lpList) > 0 and isNotNull(lpList[0][2]):
lpId = lpList[0][2]
return (cpuLists, cpcSi, cpcId, cpcName, lpId, lpName)
def ev4_getTcpStackNameOutput(ls):
# get the running TCP stacks -----------------------------------------
tcpStackList = []
output = ls.evMvsCmd(_CMD_D_TCPIP)
if output.isSuccess() and len(output.cmdResponseList) > 0:
headerColumns = ['COUNT', 'TCPIP NAME', 'VERSION', 'STATUS']
tableBeginPattern = 'EZAOP50I TCPIP STATUS REPORT'
tableEndPattern = 'END TCPIP STATUS REPORT'
firstColumnPaddingChar = ' '
includeRegexPattern = ''
ignorePatterns = ['------']
stacks = output.getTableValues(output.cmdResponseList, headerColumns, tableBeginPattern, tableEndPattern, firstColumnPaddingChar, includeRegexPattern, ignorePatterns)
for i in range(1, len(stacks)):
if len(stacks[i]) == 4 and isNotNull(stacks[i][1]):
tcpStackList.append(stacks[i][1])
return tcpStackList
def ev5_getHomelistOutput(ls, tcpStack):
# process HOMELIST ---------------------------------------------------------
homeLists = [] # [ADDRESS, LINK, FLG]
homelistentry = []
complete = 0
output = ls.evMvsCmd(_CMD_TCPIP_NETSTAT_HOME % tcpStack)
if output.isSuccess() and len(output.cmdResponseList) > 0:
for i in range(len(output.cmdResponseList)):
retVal = output.getValuesFromLine('s', output.cmdResponseList[i], 'LINKNAME:')
if len(retVal) > 0 and isNotNull(retVal[1]):
linkname = retVal[1]
complete = 1
continue
retVal = output.getValuesFromLine('s', output.cmdResponseList[i], 'ADDRESS:')
if len(retVal) > 0 and isNotNull(retVal[1]):
address = retVal[1]
if eview_netutils._isValidIp (address):
complete = 1
else:
address = None
continue
retVal = output.getValuesFromLine('s', output.cmdResponseList[i], 'FLAGS:')
if len(retVal) > 0 and isNotNull(retVal[1]):
flags = retVal[1]
complete = 1
else:
flags = ' '
if complete:
homelistentry = [address, linkname, flags]
homeLists.append (homelistentry)
complete = 0
return homeLists
def ev6_getSsilistOutput(ls):
# process SSILIST ----------------------------------------------------------
ssiList = [] # [Name, Dynamic, Status, Commands]
output = ls.evMvsCmd(_CMD_D_SSI)
if output.isSuccess() and len(output.cmdResponseList) > 0:
# first get the subsystem names from alternate lines -------------------
ssiListOutput = output.getRegexedValuesFromList(output.cmdResponseList, '^SUBSYS=(.*)$')
# then get the subsystem parameters from alternate lines ---------------
ssiParamList = output.getValuesFromLineList('s', output.cmdResponseList, 'DYNAMIC=', 'STATUS=', 'COMMANDS=')
if len(ssiListOutput) == len(ssiParamList): # TODO change this condition to something more air tight
for i in range(len(ssiListOutput)):
if isNotNull(ssiListOutput[i][0]):
ssiList.append([ssiListOutput[i][0], ssiParamList[i][1], ssiParamList[i][2], ssiParamList[i][3]])
return ssiList
def ev7_getMajorNodesOutput(ls):
# process MAJOR NODES ------------------------------------------------------
majorNodesLists = [] # [Name, Type, Status]
output = ls.evMvsCmd(_CMD_D_NET_MAJNODES)
if output.isSuccess() and len(output.cmdResponseList) > 0:
majNodeList = output.getValuesFromLineList('s', output.cmdResponseList, '\S+\s(\S+)', 'TYPE =', ',')
for majNodes in majNodeList:
if len(majNodes) == 5:
majorNodesLists.append([majNodes[1], majNodes[3], majNodes[4]])
return majorNodesLists
def ev8_getPagelistOutput(ls):
# process PAGE LIST --------------------------------------------------------
pageLists = [] # [Type, Used, Status, Device, DSN_Name]
output = ls.evMvsCmd(_CMD_D_ASM)
if output.isSuccess() and len(output.cmdResponseList) > 0:
pageLists = output.getRegexedValuesFromList(output.cmdResponseList, '^(\S+)\s+(\d+)%\s+(\S+)\s+(\S+)\s+(\S+)$')
return pageLists
def ev9_getListProdOutput(ls):
# process LISTPROD ---------------------------------------------------------
prodLists = [] # [ID, name, feature, version, owner, state]
headerColumns = ['S', 'OWNER', 'NAME', 'FEATURE', 'VERSION', 'ID']
tableBeginPattern = 'IFA111I'
tableEndPattern = ''
firstColumnPaddingChar = ''
includeRegexPattern = ''
ignorePatterns = []
output = ls.evMvsCmd(_CMD_D_PROD_STATE)
if output.isSuccess() and len(output.cmdResponseList) > 0:
prods = output.getTableValues(output.cmdResponseList, headerColumns, tableBeginPattern, tableEndPattern, firstColumnPaddingChar, includeRegexPattern, ignorePatterns)
for i in range(1, len(prods)):
if len(prods[i]) == 6:
if prods[i][0] != 'D':
prodLists.append([prods[i][5], prods[i][2], prods[i][3], prods[i][4], prods[i][1], 'STATE'])
output = ls.evMvsCmd(_CMD_D_PROD_REGISTERED)
if output.isSuccess() and len(output.cmdResponseList) > 0:
prods = output.getTableValues(output.cmdResponseList, headerColumns, tableBeginPattern, tableEndPattern, firstColumnPaddingChar, includeRegexPattern, ignorePatterns)
for i in range(1, len(prods)):
if len(prods[i]) == 6:
prodLists.append([prods[i][5], prods[i][2], prods[i][3], prods[i][4], prods[i][1], 'REGISTERED'])
return prodLists
def ev10_getXcfGroupOutput(ls):
groups = []
output = ls.evMvsCmd(_CMD_D_XCF_GRP)
if output.isSuccess() and len(output.cmdResponseList) > 0:
# get the groups from the first line -----------------------------------
xcfGroupsList = output.getRegexedValuesFromList(output.cmdResponseList, ".*\s(\S+\(\d+\))\s+(\S+\(\d+\))\s+(\S+\(\d+\))$")
groups.extend(processXcfGroups(xcfGroupsList))
# get the set of three groups ------------------------------------------
xcfGroupsList = output.getRegexedValuesFromList(output.cmdResponseList, "^(\S+\(\d+\))\s+(\S+\(\d+\))\s+(\S+\(\d+\))$")
groups.extend(processXcfGroups(xcfGroupsList))
# get the set of two groups --------------------------------------------
xcfGroupsList = output.getRegexedValuesFromList(output.cmdResponseList, "^(\S+\(\d+\))\s+(\S+\(\d+\))$")
groups.extend(processXcfGroups(xcfGroupsList))
# get the set of single group ------------------------------------------
xcfGroupsList = output.getRegexedValuesFromList(output.cmdResponseList, "^(\S+\(\d+\))$")
groups.extend(processXcfGroups(xcfGroupsList))
return groups
def ev11_getXcfMemberOutput(ls, groupName, xcfGroupsDict):
output = ls.evMvsCmd(_CMD_D_XCF_GRP_ALL % groupName)
if output.isSuccess() and len(output.cmdResponseList) > 0:
headerColumns = ['MEMBER NAME:', 'SYSTEM:', 'JOB ID:', 'STATUS:']
tableBeginPattern = 'INFORMATION FOR GROUP'
tableEndPattern = 'FOR GROUP'
firstColumnPaddingChar = ''
includeRegexPattern = ''
ignorePatterns = []
prods = output.getTableValues(output.cmdResponseList, headerColumns, tableBeginPattern, tableEndPattern, firstColumnPaddingChar, includeRegexPattern, ignorePatterns)
for i in range(1, len(prods)):
if len(prods[i]) == 4:
if xcfGroupsDict.has_key(groupName):
tempList = xcfGroupsDict[groupName]
tempList.append(prods[i])
xcfGroupsDict[groupName] = tempList
else:
xcfGroupsDict[groupName] = [prods[i]]
def ev12_getTcpConnOutput(ls, tcpProcName):
connections = []
connectionentry = []
output = ls.evMvsCmd(_CMD_D_TCPIP_NETSTAT_CONN % tcpProcName)
if output.isSuccess() and len(output.cmdResponseList) > 0:
#connectionentry = ['USER ID', 'CONN', 'LOCAL SOCKET', 'FOREIGN SOCKET', 'STATE']
for line in output.cmdResponseList:
if (re.search('EZD0101', line) or
re.search('USER ID', line)):
continue
m = re.search('LOCAL SOCKET:\s+(\S+)', line)
if (m):
localsocket = m.group(1)
continue
m = re.search('FOREIGN SOCKET:\s+(\S+)', line)
if (m):
foreignsocket = m.group(1)
connectionentry = [userid, conn,localsocket,foreignsocket,state]
connections.append (connectionentry)
continue
m = re.search('(\S+)\s+(\S+)\s+(\S+)', line)
if (m):
userid = m.group(1)
conn = m.group(2)
state = m.group(3)
return connections
def ev13_getTcpRouteOutput(ls, tcpProcName):
routes = []
output = ls.evMvsCmd(_CMD_D_TCPIP_NETSTAT_ROUTE % tcpProcName)
if output.isSuccess() and len(output.cmdResponseList) > 0:
headerColumns = ['DESTINATION', 'GATEWAY', 'FLAGS', 'REFCNT', 'INTERFACE']
tableBeginPattern = 'EZD0101I NETSTAT'
tableEndPattern = 'RECORDS DISPLAYED'
firstColumnPaddingChar = ''
includeRegexPattern = ''
ignorePatterns = ['IPV4']
routes = output.getTableValues(output.cmdResponseList, headerColumns, tableBeginPattern, tableEndPattern, firstColumnPaddingChar, includeRegexPattern, ignorePatterns)
#logger.debug ('Routes == ',routes)
return routes
def ev14_getTcpDevLinkOutput(ls, tcpProcName):
linkDevLinkDict = {} # {LINKNAME:DevLink Instance}
output = ls.evMvsCmd(_CMD_D_TCPIP_NETSTAT_DEV % tcpProcName)
if isNotNull(output) and output.isSuccess() and len(output.cmdResponseList) > 0:
for i in range(len(output.cmdResponseList)):
# get device names -------------------------------------------------
retVal = output.getValuesFromLine('s', output.cmdResponseList[i], 'DEVNAME:', 'DEVTYPE:')
if len(retVal) == 3:
# get link names -----------------------------------------------
j = i + 2
retVal1 = output.getValuesFromLine('s', output.cmdResponseList[j], 'LNKNAME:', 'LNKTYPE:', 'LNKSTATUS:')
if len(retVal1) == 4:
if isNotNull(retVal1[1]):
linkDevLinkDict[retVal1[1]] = DevLink(retVal[1], retVal[2], retVal1[1], retVal1[2], retVal1[3], '')
return linkDevLinkDict
def ev15_getArpCacheOutput(ls, tcpProcName):
ipMacDict = {} # {IP:[MAC, LINKNAME]}
output = ls.evMvsCmd(_CMD_D_TCPIP_NETSTAT_ARP % tcpProcName)
if isNotNull(output) and output.isSuccess() and len(output.cmdResponseList) > 0:
for i in range(len(output.cmdResponseList)):
retVal = output.getValuesFromLine('s', output.cmdResponseList[i], 'CACHE FOR ADDRESS')
if len(retVal) > 0 and isNotNull(retVal[1]):
j = i + 1 #MAC is on the next line
retVal1 = output.getValuesFromLine('s', output.cmdResponseList[j], 'INTERFACE:', 'ETHERNET:')
if len(retVal1) > 0 and isNotNull(retVal1[1]) and isNotNull(retVal1[2]):
ipMacDict[retVal[1]] = [retVal1[1], retVal1[2]]
return ipMacDict
''' OSHV Creation Methods '''
def osh_createSysplexOsh(lparOsh, symbolsMap):
str_name = 'name'
if _CMDB_CLASS_MODEL.version() < 9:
str_name = 'data_name'
_vector = ObjectStateHolderVector()
sysplexOsh = None
if symbolsMap.has_key('SYSPLEX'):
sysplexOsh = ObjectStateHolder('mainframe_sysplex')
sysplexOsh.setAttribute(str_name, symbolsMap['SYSPLEX'])
_vector.add(sysplexOsh)
str_membership = 'membership'
if _CMDB_CLASS_MODEL.version() < 9:
str_membership = 'member'
membershipOsh = modeling.createLinkOSH(str_membership, sysplexOsh, lparOsh)
_vector.add(lparOsh)
_vector.add(membershipOsh)
else:
logger.warn("No sysplex found")
return (_vector, sysplexOsh)
def osh_createMainframeCpcOsh(lparOsh, cpcSi, cpcId, cpcName, cpuLists):
str_name = 'name'
str_node_family = 'node_family'
str_discovered_model = 'discovered_model'
str_serial_number = 'serial_number'
if _CMDB_CLASS_MODEL.version() < 9:
str_name = 'data_name'
str_node_family = 'host_servertype'
str_discovered_model = 'host_model'
str_serial_number = 'host_serialnumber'
isComplete = 1
createMainframe = 0
_vector = ObjectStateHolderVector()
cpcOsh = ObjectStateHolder('mainframe') # Mainframe CPC
cpcOsh.setAttribute(str_name, cpcName) # CPC Name
cpcOsh.setBoolAttribute('host_iscomplete', isComplete)
cpcOsh.setAttribute('system_information', cpcSi) # CPC SI
if isNotNull(cpcSi):
cpcSiList = string.split(cpcSi, '.')
if len(cpcSiList) == 5:
cpcOsh.setAttribute(str_node_family, cpcSiList[0]) # CPC Type
cpcOsh.setAttribute(str_discovered_model, cpcSiList[1]) # CPC Model
if len(cpuLists) > 0:
if isNotNull(cpuLists[0][2]):
cpuSerial = cpuLists[0][2]
cpcSerial = cpcSiList[4]
if isNotNull(cpcSerial):
createMainframe = 1
cpcOsh.setAttribute(str_serial_number, cpcSerial) # CPC Serial
# set host_key as serial number ----------------------------
cpcOsh.setAttribute('host_key', cpcSerial)
if createMainframe:
str_membership = 'membership'
if _CMDB_CLASS_MODEL.version() < 9:
str_membership = 'member'
membershipOsh = modeling.createLinkOSH(str_membership, cpcOsh, lparOsh)
_vector.add(cpcOsh)
_vector.add(lparOsh)
_vector.add(membershipOsh)
return _vector
def osh_createCpuOsh(lparOsh, cpuLists):
_vector = ObjectStateHolderVector()
for cpu in cpuLists:
if isNotNull(cpu[0]) and isNotNull(cpu[1]) and cpu[1] == 'ONLINE':
cpuOsh = ObjectStateHolder('cpu')
if _CMDB_CLASS_MODEL.version() > 9:
cpuOsh.setAttribute('cpu_id', cpu[0])
cpuOsh.setAttribute('serial_number', cpu[2])
else:
cpuOsh.setAttribute('cpu_cid', cpu[0])
#Spencer: Add cpu type
cpu_type = ''
if (len(cpu[3]) >= 2):
if (cpu[3][1] == 'I'):
cpu_type = 'Ziip'
elif (cpu[3][1] == 'A'):
cpu_type = 'Zaap'
cpuOsh.setAttribute('cpu_type', cpu_type)
cpuOsh.setContainer(lparOsh)
_vector.add(cpuOsh)
return _vector
def osh_createIpOsh(lparOsh, homeLists):
_vector = ObjectStateHolderVector()
ipOshDict = {}
ipstoexclude = ['127.0.0.1']
if len(homeLists) > 0:
for home in homeLists:
if isNotNull(home[0]) and upper(home[0]) != 'ADDRESS' and home[0] not in ipstoexclude and eview_netutils._isValidIp(home[0]):
ipOsh = eview_netutils._buildIp(home[0])
containedOsh = modeling.createLinkOSH('contained', lparOsh, ipOsh)
_vector.add(lparOsh)
_vector.add(ipOsh)
_vector.add(containedOsh)
# add IP OSH to dictionary for later use -----------------------
ipOshDict[home[0]] = ipOsh
return (_vector, ipOshDict)
def osh_createSubsystemsOsh(lparOsh, ssiList):
str_name = 'name'
str_discovered_product_name = 'discovered_product_name'
if _CMDB_CLASS_MODEL.version() < 9:
str_name = 'data_name'
str_discovered_product_name = 'data_name' # duplicated on purpose
_vector = ObjectStateHolderVector()
if isNotNull(ssiList):
for ssi in ssiList:
if isNotNull(ssi[0]):
ssOsh = ObjectStateHolder('mainframe_subsystem')
ssOsh.setAttribute(str_name, ssi[0])
ssOsh.setAttribute(str_discovered_product_name, ssi[0])
# Is Dynamic ---------------------------------------------------
if isNotNull(ssi[1]) and upper(ssi[1]) == 'YES':
ssOsh.setBoolAttribute('is_dynamic', 1)
elif isNotNull(ssi[1]) and upper(ssi[1]) == 'NO':
ssOsh.setBoolAttribute('is_dynamic', 0)
# Is Active ----------------------------------------------------
if isNotNull(ssi[2]) and upper(ssi[2]) == 'ACTIVE':
ssOsh.setBoolAttribute('is_active', 1)
elif isNotNull(ssi[2]) and upper(ssi[2]) == 'INACTIVE':
ssOsh.setBoolAttribute('is_active', 0)
# Accepts commands ---------------------------------------------
if isNotNull(ssi[3]):
ssOsh.setAttribute('accepts_commands', ssi[3])
ssOsh.setContainer(lparOsh)
_vector.add(ssOsh)
return _vector
def osh_createMajorNodesOsh(lparOsh, majorNodesLists):
str_name = 'name'
if _CMDB_CLASS_MODEL.version() < 9:
str_name = 'data_name'
_vector = ObjectStateHolderVector()
if len(majorNodesLists) > 0:
for majNode in majorNodesLists:
if isNotNull(majNode[0]):
majOsh = ObjectStateHolder('mainframe_major_node')
majOsh.setAttribute(str_name, majNode[0])
if isNotNull(majNode[1]):
majOsh.setAttribute('type', majNode[1])
majOsh.setContainer(lparOsh)
_vector.add(majOsh)
return _vector
def osh_createPageOsh(lparOsh, pageLists):
str_name = 'name'
if _CMDB_CLASS_MODEL.version() < 9:
str_name = 'data_name'
_vector = ObjectStateHolderVector()
if len(pageLists) > 0:
for page in pageLists: # [Type, Used, Status, Device, DSN_Name]
if isNotNull(page[4]):
pageOsh = ObjectStateHolder('mainframe_page_dataset')
pageOsh.setAttribute(str_name, page[4]) # DSN Name
if isNotNull(page[0]):
pageOsh.setAttribute('type', page[0]) # Type
if isNotNull(page[1]) and isnumeric(page[1]):
pageOsh.setIntegerAttribute('used', int(page[1])) # Used
if isNotNull(page[2]):
pageOsh.setAttribute('status', page[2])
if isNotNull(page[3]):
pageOsh.setAttribute('device', page[3])
pageOsh.setContainer(lparOsh)
_vector.add(pageOsh)
return _vector
def osh_createSoftwareOsh(lparOsh, prodLists):
str_cit = 'installed_software'
str_name = 'name'
str_description = 'description'
str_version = 'version'
str_software_productid = 'software_productid'
str_discovered_vendor = 'discovered_vendor'
if _CMDB_CLASS_MODEL.version() < 9:
str_cit = 'software'
str_name = 'data_name'
str_description = 'data_description'
str_version = 'software_version'
str_software_productid = 'software_productid'
str_discovered_vendor = 'software_vendor'
_vector = ObjectStateHolderVector()
if len(prodLists) > 0:
for prod in prodLists: # [ID, name, feature, version, owner, registered]
swOsh = None
if isNotNull(prod[1]) and isNotNull(prod[2]):
swOsh = ObjectStateHolder(str_cit)
softwareName = ''
softwareDesc = ''
if upper(prod[1]) == upper(prod[2]):
swOsh.setAttribute(str_name, prod[1]) # Name
swOsh.setAttribute(str_description, prod[1]) # Name
else:
swOsh.setAttribute(str_name, '%s %s' % (prod[1], prod[2])) # Name Feature
swOsh.setAttribute(str_description, '%s %s' % (prod[1], prod[2])) # Name Feature
elif isNotNull(prod[2]):
swOsh = ObjectStateHolder(str_cit)
swOsh.setAttribute(str_name, prod[2]) # Feature
if isNotNull(swOsh):
if isNotNull(prod[3]) and prod[3] != '**.**.**' and prod[3] != '* .* .*':
swOsh.setAttribute(str_version, prod[3]) # Version
if isNotNull(prod[0]):
swOsh.setAttribute(str_software_productid, prod[0]) # Version
if isNotNull(prod[4]):
swOsh.setAttribute(str_discovered_vendor, prod[4]) # Owner
swOsh.setContainer(lparOsh)
_vector.add(swOsh)
return _vector
def getIpFromHomeList(homeLists, linkName = ''):
if isNotNull(homeLists) and len(homeLists) > 0:
firstAvailableIp = ''
for home in homeLists:
if isNotNull(home[0]) and upper(home[0]) != 'ADDRESS' and isNotNull(home[1]):
firstAvailableIp = home[0]
if isNotNull(linkName) and upper(home[1]) == upper(linkName):
return home[0]
elif isNull(linkName) and isNotNull(home[2]) and upper(home[2]) == 'P':
return home[0]
return firstAvailableIp
return ''
def getLinkFromHomeList(homeLists, ip):
if isNotNull(homeLists) and len(homeLists) > 0 and isNotNull(ip):
for home in homeLists:
if isNotNull(home[0]) and upper(home[0]) != 'ADDRESS' and isNotNull(home[1]) and home[0] == ip:
return home[1]
return ''
def osh_createDeviceAndLinkOsh(lparOsh, ipOshDict, lparName, linkDevLinkDict, ipMacDict, homeLists):
str_name = 'interface_name'
str_mac_address = 'mac_address'
if _CMDB_CLASS_MODEL.version() < 9:
str_name = 'data_name'
str_mac_address = 'interface_macaddr'
_vector = ObjectStateHolderVector()
for (linkName, j) in linkDevLinkDict.items():
# create interfaces ----------------------------------------------------
ifOsh = ObjectStateHolder('interface')
ifOsh.setAttribute(str_name, linkName)
ifOsh.setAttribute('data_note', j.linkType) ## ER: change attribute to link type
# default the mac address attribute to linkName-Lparname and update later if MAC found #CP15
ifOsh.setAttribute(str_mac_address, '#%s-%s' % (linkName, lparName)) # if MAC not found for set #linkName-Lparname as key #CP15
ifOsh.setContainer(lparOsh)
# link interfaces to IPs -----------------------------------------------
ipOsh = None
parentIp = getIpFromHomeList(homeLists, linkName)
if isNotNull(parentIp) and ipOshDict.has_key(parentIp):
ipOsh = ipOshDict[parentIp]
if isNotNull(ipMacDict) and ipMacDict.has_key(parentIp):
arpInfo = ipMacDict[parentIp]
if isNotNull(arpInfo) and len(arpInfo) == 2:
if isNotNull(arpInfo[0]) and upper(linkName) == upper(arpInfo[0]):
ifOsh.setAttribute(str_mac_address, arpInfo[1])
_vector.add(ifOsh)
if isNotNull(ipOsh):
parentLinkOsh = modeling.createLinkOSH('containment', ifOsh, ipOsh)
_vector.add(ipOsh)
_vector.add(parentLinkOsh)
# create devices (only for UCMDB 9.x) ----------------------------------
if _CMDB_CLASS_MODEL.version() >= 9:
devOsh = ObjectStateHolder('hardware_board')
devOsh.setAttribute('serial_number', j.devName) # serial number not available, use device name
devOsh.setAttribute('name', j.devName)
##devOsh.setAttribute('data_note', j.devType)
devOsh.setContainer(lparOsh)
_vector.add(devOsh)
return _vector
def _getIpPortFromSocket(socket, primaryIp):
ip = ''
port = ''
if isNotNull(socket):
socket = string.split(socket, "..")
if len(socket) == 2:
if isNotNull(socket[0]):
ip = socket[0]
if isNotNull(socket[1]):
port = socket[1]
if ip == '0.0.0.0': # use homelist primary IP
ip = primaryIp
if not eview_netutils._isValidIp (ip):
ip = None
return (ip, port)
def osh_createTcpConnectionsOsh(lparOsh, ipOshDict, connsList, knownPortsConfigFile, homeLists):
str_containment = 'containment'
if _CMDB_CLASS_MODEL.version() < 9:
str_containment = 'contained'
_vector = ObjectStateHolderVector()
ignoreLocalConnections = 0 ## ER: parameterize
primaryIp = getIpFromHomeList(homeLists)
for conn in connsList:
if upper(conn[0]) != 'USER ID':
id = conn[0]
localSocket = conn[2]
foreignSocket = conn[3]
state = conn[4]
srcAddr = ''
# split up the socket text into IP and port ------------------------
(dstAddr, dstPort) = _getIpPortFromSocket(localSocket, primaryIp)
if upper(state) == 'ESTBLSH':
(srcAddr, srcPort) = _getIpPortFromSocket(foreignSocket, primaryIp)
if ignoreLocalConnections and (srcAddr == dstAddr):
continue
if isNotNull(dstAddr) and eview_netutils._isValidIp(dstAddr):
# create destination (server) IP and Host --------------------------
dstIpOsh = eview_netutils._buildIp(dstAddr)
dstHostOsh = None
if isNotNull(lparOsh):
dstHostOsh = lparOsh
else:
dstHostOsh = modeling.createHostOSH(dstAddr)
dstContainedLinkOsh = modeling.createLinkOSH(str_containment, dstHostOsh, dstIpOsh)
_vector.add(dstIpOsh)
_vector.add(dstHostOsh)
_vector.add(dstContainedLinkOsh)
# create destination service address object ------------------------
portTypeEnum = TCP_PORT_TYPE_ENUM
portName = knownPortsConfigFile.getTcpPortName(int(dstPort))
if upper(state) == 'UDP':
portTypeEnum = UDP_PORT_TYPE_ENUM
portName = knownPortsConfigFile.getUdpPortName(int(dstPort))
if isNull(portName):
portName = dstPort
serverPortOsh = modeling.createServiceAddressOsh(dstHostOsh, dstAddr, int(dstPort), portTypeEnum, portName)
_vector.add(serverPortOsh)
if isNotNull(srcAddr) and eview_netutils._isValidIp(srcAddr):
# create source (client) IP and Host ---------------------------
srcIpOsh = eview_netutils._buildIp(srcAddr)
srcHostOsh = modeling.createHostOSH(srcAddr)
srcContainedLinkOsh = modeling.createLinkOSH(str_containment, srcHostOsh, srcIpOsh)
_vector.add(srcIpOsh)
_vector.add(srcHostOsh)
_vector.add(srcContainedLinkOsh)
# create client-server links -----------------------------------
_vector.add(_createClientServerLinkOsh(dstPort, serverPortOsh, portName, lower(state), srcIpOsh))
# create client server dependency links ------------------------
_vector.add(_createClientServerDependencyLinkOsh(dstHostOsh, dstPort, srcHostOsh, portName))
return _vector
def _createClientServerDependencyLinkOsh(serverHostOSH, serverPort, clientHostOsh, portName):
str_dependency = 'node_dependency'
if _CMDB_CLASS_MODEL.version() < 9:
str_dependency = 'dependency'
nodeDependencyLinkOsh = modeling.createLinkOSH(str_dependency, clientHostOsh, serverHostOSH)
nodeDependencyLinkOsh.setAttribute('dependency_name', serverPort)
nodeDependencyLinkOsh.setAttribute('dependency_source', portName)
return nodeDependencyLinkOsh
def _createClientServerLinkOsh(serverPort, serverPortOsh, portName, portType, clientIpOsh):
str_client_server = 'client_server'
str_name = 'name'
if _CMDB_CLASS_MODEL.version() < 9:
str_client_server = 'clientserver'
str_name = 'data_name'
csLinkOsh = modeling.createLinkOSH(str_client_server, clientIpOsh, serverPortOsh)
csLinkOsh.setStringAttribute('clientserver_protocol', portType)
csLinkOsh.setStringAttribute(str_name, portName)
csLinkOsh.setLongAttribute('clientserver_destport', int(serverPort))
return csLinkOsh
def osh_createXcfOsh(lparOsh, xcfGroupsDict, sysplexOsh, lparName):
str_name = 'name'
str_membership = 'membership'
str_containment = 'containment'
if _CMDB_CLASS_MODEL.version() < 9:
str_name = 'data_name'
str_membership = 'member'
str_containment = 'contained'
_vector = ObjectStateHolderVector()
if isNotNull(sysplexOsh):
if isNotNull(xcfGroupsDict):
for (groupName, membersList) in xcfGroupsDict.items():
# Create XCF Groups
xcfGroupOsh = ObjectStateHolder('mainframe_xcf_group')
xcfGroupOsh.setAttribute(str_name, groupName)
xcfGroupOsh.setContainer(sysplexOsh)
_vector.add(xcfGroupOsh)
# Make the LPAR member of XCF
if isNotNull(xcfGroupOsh):
memberLinkOsh = modeling.createLinkOSH(str_membership, xcfGroupOsh, lparOsh)
_vector.add(memberLinkOsh)
# Create XCF member for every group
if isNotNull(xcfGroupOsh) and isNotNull(membersList) and len(membersList) > 0:
for member in membersList:
if isNotNull(member[0]):
memberOsh = ObjectStateHolder('mainframe_xcf_member')
memberOsh.setAttribute(str_name, member[0])
memberOsh.setAttribute('job_id', member[2])
memberOsh.setAttribute('xcf_member_status', member[3])
memberOsh.setContainer(xcfGroupOsh)
_vector.add(memberOsh)
# If LPAR sysid matches member system name, create contained link
if isNotNull(lparName) and isNotNull(memberOsh) and string.upper(lparName) == string.upper(member[1]):
containedLinkOsh = modeling.createLinkOSH(str_containment, lparOsh, memberOsh)
_vector.add(containedLinkOsh)
else:
logger.debug('Not creating any XCF Groups since no sysplex discovered')
return _vector
# Process LPAR Network Resources
def processNetworkResources(ls, lparOsh, ipOshDict, lparName, sysplexOsh, knownPortsConfigFile, Framework):
_vector = ObjectStateHolderVector()
#===========================================================================
# Run commands and create OSHs
# XCF (Groups, Members), TCPIP NETSTAT (CONN, HOME, ROUTE)
#===========================================================================
# XCF Groups and Members Commands ------------------------------------------
xcfGroupsDict = {} # {groupName:[[memberName, memberSystem, jobId, status]]
xcfGroups = ev10_getXcfGroupOutput(ls)
for group in xcfGroups:
if isNotNull(group[0]):
ev11_getXcfMemberOutput(ls, group[0], xcfGroupsDict)
_vector.addAll(osh_createXcfOsh(lparOsh, xcfGroupsDict, sysplexOsh, lparName))
# TCPIP Stacks Command -----------------------------------------------------
createTcpUdp = Framework.getParameter('discover_TCP_UDP')
if isNotNull(createTcpUdp) and string.lower(createTcpUdp) == 'true':
createTcpUdp = 1
else:
createTcpUdp = 0
tcpStacksList = ev4_getTcpStackNameOutput(ls)
connsList = []
routeList = []
linkDevLinkDict = {}
ipMacDict = {}
homeLists = []
for tcpStack in tcpStacksList:
linkDevLinkDict = appendToDictionary(linkDevLinkDict, ev14_getTcpDevLinkOutput(ls, tcpStack)) # for TCP devices and interfaces (links)
ipMacDict = appendToDictionary(ipMacDict, ev15_getArpCacheOutput(ls, tcpStack)) # for TCP interfaces (links)
homeLists = appendToList(homeLists, ev5_getHomelistOutput(ls, tcpStack)) # for IP addresses and links
if createTcpUdp:
connsList = appendToList(connsList, ev12_getTcpConnOutput(ls, tcpStack)) # for TCP connections
routeList = appendToList(routeList, ev13_getTcpRouteOutput(ls, tcpStack)) # for TCP connections
_vector.addAll(osh_createDeviceAndLinkOsh(lparOsh, ipOshDict, lparName, linkDevLinkDict, ipMacDict, homeLists))
if createTcpUdp:
_vector.addAll(osh_createTcpConnectionsOsh(lparOsh, ipOshDict, connsList, knownPortsConfigFile, homeLists))
return _vector
####################################
## Create Jobs Objects ##
####################################
def createJobsOSH(joblist,lparOSH):
myVec = ObjectStateHolderVector()
for job in joblist:
jobOSH = ObjectStateHolder('mainframe_job')
jobOSH.setAttribute('name', job[0])
jobOSH.setAttribute('step_name',job[1])
jobOSH.setAttribute('proc_step',job[2])
jobOSH.setAttribute('job_id',job[3])
jobOSH.setAttribute('process_user',job[4])
jobOSH.setIntegerAttribute('current_storage', int(job[8]))
jobOSH.setAttribute('program_name',job[9])
jobid = job[3]
if re.findall("STC.*", jobid):
jobOSH.setAttribute('type', 'Started Task')
elif re.findall("JOB.*", jobid): #CP15
jobOSH.setAttribute('type', 'Job') #CP15
else:
continue #CP15
jobOSH.setContainer(lparOSH)
myVec.add(jobOSH)
return myVec
####################################
## Create DASD Volume object ##
####################################
def createDASDVolOSH(vollist,lparOSH):
dasdOSH = ObjectStateHolder('dasd3390')
dasdOSH.setAttribute('name', vollist[0])
dasdOSH.setIntegerAttribute('num_tracks', int(vollist[1]))
dasdOSH.setIntegerAttribute('tracks_per_cyl', int(vollist[2]))
dasdOSH.setIntegerAttribute('volume_free_extents', int(vollist[3]))
dasdOSH.setIntegerAttribute('volume_free_tracks', int(vollist[4]))
dasdOSH.setIntegerAttribute('largest_extent', int(vollist[5]))
dasdOSH.setIntegerAttribute('percent_used', int(vollist[6]))
dasdOSH.setContainer(lparOSH)
return dasdOSH
####################################
## Create DASD Storage Group ##
####################################
def createDASDSG(grouplist,lparOSH):
dasdOSH = ObjectStateHolder('volumegroup')
dasdOSH.setAttribute('name', grouplist[0])
dasdOSH.setContainer(lparOSH)
return dasdOSH
#############################################################
## Get the Indivual DASD Volumes and the Groups ##
#############################################################
def getvolumes(ls,lparOSH):
vector = ObjectStateHolderVector()
vollinelist = []
volDICT = {}
#
# First get the indivual DASD volumes for the Lpar
#
output = ls.evSysInfoCmd(_CMD_I_DASD,'01')
if output.isSuccess() and len(output.cmdResponseList) > 0:
lines = output.cmdResponseList
for line in lines:
vollinelist = line.split('|')
volDICT[vollinelist[0]] = vollinelist
vector.add(createDASDVolOSH(vollinelist,lparOSH))
return vector, volDICT
#############################################################
## Get the Storage Volumes in the Storage Groups ##
#############################################################
def getStorageVolumes(ls, lparOSH, vgOSH, sgname, volDICT):
vector = ObjectStateHolderVector()
volumelist = []
#
# First get the volumes for the storage group
#
output = ls.evSysInfoCmd(sgname,'12','evsgv')
if output.isSuccess() and len(output.cmdResponseList) > 0:
lines = output.cmdResponseList
for line in lines:
volumelist = line.split()
if (volumelist[0] in volDICT.keys()):
volOSH = createDASDVolOSH(volDICT[volumelist[0]],lparOSH)
vector.add(modeling.createLinkOSH('containment', vgOSH , volOSH))
return vector
#############################################################
## Get the Storage Groups ##
#############################################################
def getStorageGroups(ls, lparOSH):
vector = ObjectStateHolderVector()
grouplist = []
(volvector, volDICT) = getvolumes(ls,lparOSH)
vector.addAll(volvector)
#
# Get the Storage Groups
#
output = ls.evSysInfoCmd('','12','evsgl')
if output.isSuccess() and len(output.cmdResponseList) > 0:
lines = output.cmdResponseList
for line in lines:
grouplist = line.split()
#Skip the VIO group as it is not a real group
if grouplist[0] == 'VIO':
continue
#Verify we have a valid group, must be at least 10 entries to be valid
if len(grouplist) >= 10:
vgOSH = createDASDSG(grouplist,lparOSH)
vector.add (vgOSH)
vector.addAll(getStorageVolumes(ls, lparOSH, vgOSH, grouplist[0], volDICT))
return vector
#############################################################
# Discover the Dasd Storage connected to the Mainframe
#############################################################
def processDasd(ls,lparOSH,Framework):
_vector = ObjectStateHolderVector()
discoverDasd = Framework.getParameter('discover_DASD')
if isNotNull(discoverDasd) and string.lower(discoverDasd) == 'true':
discoverDasd = 1
else:
discoverDasd = 0
if discoverDasd:
_vector = getStorageGroups(ls, lparOSH)
return _vector
#############################################################
## Get the each Address Space (Jobs and Started Tasks) ##
#############################################################
def getjobs(ls,jobregex,lparOSH):
vector = ObjectStateHolderVector()
joblist = []
joblinelist = []
if jobregex == None:
jobregex = '*'
#
# First get the jobs and started tasks
#
output = ls.evSysInfoCmd(jobregex,'40')
if output.isSuccess() and len(output.cmdResponseList) > 0:
lines = output.cmdResponseList
for line in lines:
joblinelist = line.split('|')
joblist.append(joblinelist)
vector.addAll(createJobsOSH(joblist,lparOSH))
else:
logger.reportWarning('Jobs where not found on target system. Please, verify the regex expression parameter and rerun discovery.')
return vector
#############################################################
## Process the Host Resources ##
#############################################################
def processHostResources(ls, lparOsh, Framework):
_vector = ObjectStateHolderVector()
#===========================================================================
# Run commands and create OSHs
# SYMLIST, CPULIST, HOMELIST, SSILIST, MAJNODES, PAGELIST, LISTPROD
#===========================================================================
# Symbols ------------------------------------------------------------------
symbolsMap = ev2_getSymlistOutput(ls) # {symbolName:symbolValue}
# Create Sysplex OSH -------------------------------------------------------
(sysplexTopology, sysplexOsh) = osh_createSysplexOsh(lparOsh, symbolsMap)
_vector.addAll(sysplexTopology)
# CPU List Command ---------------------------------------------------------
(cpuLists, cpcSi, cpcId, cpcName, lpId, lpName) = ev3_getCpulistOutput(ls)
''' Create Mainframe CPC OSH '''
_vector.addAll(osh_createMainframeCpcOsh(lparOsh, cpcSi, cpcId, cpcName, cpuLists))
''' Create CPU OSH '''
createCpu = Framework.getParameter('discover_CPUs')
if isNotNull(createCpu) and string.lower(createCpu) == 'true':
_vector.addAll(osh_createCpuOsh(lparOsh, cpuLists))
''' TCPIP Stacks Command '''
tcpStacksList = ev4_getTcpStackNameOutput(ls)
# For every TCP stack run the TCPIP NETSTAT HOME ---------------------------
homeLists = []
for tcpStack in tcpStacksList:
homeLists = homeLists + ev5_getHomelistOutput(ls, tcpStack) # [ADDRESS, LINK, FLG]
# Create IP OSH ------------------------------------------------------------
(ipOshv, ipOshDict) = osh_createIpOsh(lparOsh, homeLists)
_vector.addAll(ipOshv)
createSubsystem = Framework.getParameter('discover_Subsystems')
if isNotNull(createSubsystem) and string.lower(createSubsystem) == 'true':
''' SSI Command '''
ssiList = ev6_getSsilistOutput(ls) # {Subsystem Name:[Dynamic, Status, Commands]}
''' Create Subsystem OSH '''
_vector.addAll(osh_createSubsystemsOsh(lparOsh, ssiList))
createNodes = Framework.getParameter('discover_MajorNodes')
if isNotNull(createNodes) and string.lower(createNodes) == 'true':
''' Major Nodes Command '''
majorNodesLists = ev7_getMajorNodesOutput(ls) # [Name, Type, Status]
''' Create Mainframe Major Nodes OSH '''
_vector.addAll(osh_createMajorNodesOsh(lparOsh, majorNodesLists))
createPageDatasets = Framework.getParameter('discover_PageDatasets')
if isNotNull(createPageDatasets) and string.lower(createPageDatasets) == 'true':
''' Page Lists Command '''
pageLists = ev8_getPagelistOutput(ls) # [Type, Used, Status, Device, DSN_Name]
''' Create Mainframe Page Dataset OSH '''
_vector.addAll(osh_createPageOsh(lparOsh, pageLists))
createSoftware = Framework.getParameter('discover_Software')
if isNotNull(createSoftware) and string.lower(createSoftware) == 'true':
''' Prod Lists Command '''
prodLists = ev9_getListProdOutput(ls) # [ID, name, feature, version, owner, state]
''' Create Mainframe Software OSH '''
_vector.addAll(osh_createSoftwareOsh(lparOsh, prodLists))
createJobs = Framework.getParameter('discover_Jobs')
if isNotNull(createJobs) and string.lower(createJobs) == 'true':
jobregex = Framework.getParameter('job_Regex')
if isNotNull(jobregex):
_vector.addAll(getjobs(ls,jobregex,lparOsh))
else:
logger.reportWarning('Regex Parameter invalid. Please, verify the Regex expression parameter and rerun discovery.')
return _vector, ipOshDict, sysplexOsh
#######
# MAIN
#######
def DiscoveryMain(Framework):
OSHVResult = ObjectStateHolderVector()
knownPortsConfigFile = Framework.getConfigFile(CollectorsParameters.KEY_COLLECTORS_SERVERDATA_PORTNUMBERTOPORTNAME)
# create LPAR node
lparName = Framework.getDestinationAttribute(PARAM_LPAR_NAME)
hostId = Framework.getDestinationAttribute(PARAM_HOST_ID)
lparOsh = None
if eview_lib.isNotNull(hostId):
lparOsh = modeling.createOshByCmdbIdString('host_node', hostId)
ls = eview_lib.EvShell(Framework)
(hostResourcesOshv, ipOshDict, sysplexOsh) = processHostResources(ls, lparOsh, Framework)
OSHVResult.addAll(hostResourcesOshv)
(networkResourcesOshv) = processNetworkResources(ls, lparOsh, ipOshDict, lparName, sysplexOsh, knownPortsConfigFile, Framework)
OSHVResult.addAll(networkResourcesOshv)
OSHVResult.addAll(processDasd(ls,lparOsh,Framework))
ls.closeClient()
return OSHVResult | [
"bluesteelkc@gmail.com"
] | bluesteelkc@gmail.com |
90a26d93ea05d64db95e9ed53c7fe2fcd4b30d8a | 56591823019e0ac1d857f97a1b8c85e9d85a8385 | /Scopuli/Interfaces/WEB/Jinja/Filters.py | d2702eaf522afa636d5c239edcaee4604161951d | [
"Apache-2.0"
] | permissive | MaxOnNet/scopuli-core-web | 3c19e312ec5688034295ac86a7a56fe2b2cf7915 | 66a2c31b36d7fc05be36ba5d5b141644459b4aba | refs/heads/master | 2020-03-23T19:49:56.383093 | 2018-08-30T13:44:31 | 2018-08-30T13:44:31 | 142,004,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,911 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright [2018] Tatarnikov Viktor [viktor@tatarnikov.org]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" """
import phonenumbers
def _decode_text(value):
"""
Decode a text-like value for display.
Unicode values are returned unchanged. Byte strings will be decoded
with a text-safe replacement for unrecognized characters.
"""
if isinstance(value, bytes):
return value.decode('ascii', 'replace')
else:
return value
def filter_markdown(value):
from flask import Markup
from markdown import markdown
return Markup(markdown(value))
def filter_printable(value):
try:
return _decode_text(repr(value))
except Exception as e:
return '<repr(%s) raised %s: %s>' % (
object.__repr__(value), type(e).__name__, e)
def filter_shuffle(seq):
import random
try:
result = list(seq)
random.shuffle(result)
return result
except:
return seq
def filter_phonenumber(value, country='RU', format=phonenumbers.PhoneNumberFormat.INTERNATIONAL):
try:
parsed = phonenumbers.parse(value, country)
return phonenumbers.format_number(parsed, format)
except phonenumbers.NumberParseException as e:
return value
def filter_money(value):
return "{money:0,.2f} р.".format(money=value) | [
"viktor@tatarnikov.org"
] | viktor@tatarnikov.org |
d0ae1b63e5f6d3f36e516465a1b9561c19929bee | 4bd364186e70e31e62fb039bda4c3ffd8ec1c46b | /NLP/TF/main.py | 8a6d4654f22bcad6f4ccf0376864965bfac30fe0 | [] | no_license | fanqi0312/MachineLearning | 50e1baa1a9e6e4b2d2eebe51413243ff1c2c9834 | 214b1bf2e3084b48db2a1f627d2e117aa0101a57 | refs/heads/master | 2021-01-20T04:53:59.185799 | 2018-03-06T15:07:01 | 2018-03-06T15:07:01 | 101,392,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,369 | py | # -*- coding:utf-8 -*-
import collections
import math
import os
import pickle as pkl
# from pymongo import MongoClient
import jieba
import numpy as np
import tensorflow as tf
class word2vec():
def __init__(self,
vocab_list=None,
embedding_size=200,
win_len=3, # 单边窗口长
num_sampled=1000,
learning_rate=1.0,
logdir='/tmp/simple_word2vec',
model_path=None
):
# 获得模型的基本参数
self.batch_size = None # 一批中数据个数, 目前是根据情况来的
if model_path != None:
self.load_model(model_path)
else:
# model parameters
assert type(vocab_list) == list
self.vocab_list = vocab_list
self.vocab_size = vocab_list.__len__()
self.embedding_size = embedding_size
self.win_len = win_len
self.num_sampled = num_sampled
self.learning_rate = learning_rate
self.logdir = logdir
self.word2id = {} # word => id 的映射
for i in range(self.vocab_size):
self.word2id[self.vocab_list[i]] = i
# train times
self.train_words_num = 0 # 训练的单词对数
self.train_sents_num = 0 # 训练的句子数
self.train_times_num = 0 # 训练的次数(一次可以有多个句子)
# train loss records
self.train_loss_records = collections.deque(maxlen=10) # 保存最近10次的误差
self.train_loss_k10 = 0
# 图模型
self.build_graph()
self.init_op()
if model_path != None:
tf_model_path = os.path.join(model_path, 'tf_vars')
self.saver.restore(self.sess, tf_model_path)
def init_op(self):
self.sess = tf.Session(graph=self.graph)
self.sess.run(self.init)
self.summary_writer = tf.summary.FileWriter(self.logdir, self.sess.graph)
def build_graph(self):
self.graph = tf.Graph()
with self.graph.as_default():
self.train_inputs = tf.placeholder(tf.int32, shape=[self.batch_size])
self.train_labels = tf.placeholder(tf.int32, shape=[self.batch_size, 1]) #输入前后各3个,预测中间1个
self.embedding_dict = tf.Variable(
tf.random_uniform([self.vocab_size, self.embedding_size], -1.0, 1.0)
)
self.nce_weight = tf.Variable(tf.truncated_normal([self.vocab_size, self.embedding_size],
stddev=1.0 / math.sqrt(self.embedding_size)))
# 类别=词数量
self.nce_biases = tf.Variable(tf.zeros([self.vocab_size]))
# 将输入序列向量化
embed = tf.nn.embedding_lookup(self.embedding_dict, self.train_inputs) # batch_size
# 得到NCE损失
self.loss = tf.reduce_mean(
tf.nn.nce_loss(
weights=self.nce_weight,
biases=self.nce_biases,
labels=self.train_labels,
inputs=embed,
num_sampled=self.num_sampled, #负采样个数
num_classes=self.vocab_size #分类
)
)
# tensorboard 相关
tf.summary.scalar('loss', self.loss) # 让tensorflow记录参数
# 根据 nce loss 来更新梯度和embedding
self.train_op = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(self.loss) # 训练操作
# 计算与指定若干单词的相似度
self.test_word_id = tf.placeholder(tf.int32, shape=[None])
#公式
vec_l2_model = tf.sqrt( # 求各词向量的L2模
tf.reduce_sum(tf.square(self.embedding_dict), 1, keep_dims=True)
)
avg_l2_model = tf.reduce_mean(vec_l2_model)
tf.summary.scalar('avg_vec_model', avg_l2_model)
# 归一化
self.normed_embedding = self.embedding_dict / vec_l2_model
# self.embedding_dict = norm_vec # 对embedding向量正则化
test_embed = tf.nn.embedding_lookup(self.normed_embedding, self.test_word_id)
# 计算相似度
self.similarity = tf.matmul(test_embed, self.normed_embedding, transpose_b=True)
# 变量初始化
self.init = tf.global_variables_initializer()
self.merged_summary_op = tf.summary.merge_all()
self.saver = tf.train.Saver()
def train_by_sentence(self, input_sentence=[]):
# input_sentence: [sub_sent1, sub_sent2, ...]
# 每个sub_sent是一个单词序列,例如['这次','大选','让']
sent_num = input_sentence.__len__()
batch_inputs = []
batch_labels = []
for sent in input_sentence:
for i in range(sent.__len__()):
start = max(0, i - self.win_len)
end = min(sent.__len__(), i + self.win_len + 1)
for index in range(start, end):
if index == i:
continue
else:
input_id = self.word2id.get(sent[i])
label_id = self.word2id.get(sent[index])
if not (input_id and label_id):
continue
batch_inputs.append(input_id)
batch_labels.append(label_id)
if len(batch_inputs) == 0:
return
batch_inputs = np.array(batch_inputs, dtype=np.int32)
batch_labels = np.array(batch_labels, dtype=np.int32)
batch_labels = np.reshape(batch_labels, [batch_labels.__len__(), 1])
feed_dict = {
self.train_inputs: batch_inputs,
self.train_labels: batch_labels
}
_, loss_val, summary_str = self.sess.run([self.train_op, self.loss, self.merged_summary_op], feed_dict=feed_dict)
# train loss
self.train_loss_records.append(loss_val)
# self.train_loss_k10 = sum(self.train_loss_records)/self.train_loss_records.__len__()
self.train_loss_k10 = np.mean(self.train_loss_records)
if self.train_sents_num % 1000 == 0:
self.summary_writer.add_summary(summary_str, self.train_sents_num)
print("{a} sentences dealed, loss: {b}"
.format(a=self.train_sents_num, b=self.train_loss_k10))
# train times
self.train_words_num += batch_inputs.__len__()
self.train_sents_num += input_sentence.__len__()
self.train_times_num += 1
def cal_similarity(self, test_word_id_list, top_k=10):
sim_matrix = self.sess.run(self.similarity, feed_dict={self.test_word_id: test_word_id_list})
sim_mean = np.mean(sim_matrix)
sim_var = np.mean(np.square(sim_matrix - sim_mean))
test_words = []
near_words = []
for i in range(test_word_id_list.__len__()):
test_words.append(self.vocab_list[test_word_id_list[i]])
nearst_id = (-sim_matrix[i, :]).argsort()[1:top_k + 1]
nearst_word = [self.vocab_list[x] for x in nearst_id]
near_words.append(nearst_word)
return test_words, near_words, sim_mean, sim_var
def save_model(self, save_path):
if os.path.isfile(save_path):
raise RuntimeError('the save path should be a dir')
if not os.path.exists(save_path):
os.mkdir(save_path)
# 记录模型各参数
model = {}
var_names = ['vocab_size', # int model parameters
'vocab_list', # list
'learning_rate', # int
'word2id', # dict
'embedding_size', # int
'logdir', # str
'win_len', # int
'num_sampled', # int
'train_words_num', # int train info
'train_sents_num', # int
'train_times_num', # int
'train_loss_records', # int train loss
'train_loss_k10', # int
]
for var in var_names:
model[var] = eval('self.' + var)
param_path = os.path.join(save_path, 'params.pkl')
if os.path.exists(param_path):
os.remove(param_path)
with open(param_path, 'wb') as f:
pkl.dump(model, f)
# 记录tf模型
tf_path = os.path.join(save_path, 'tf_vars')
if os.path.exists(tf_path):
os.remove(tf_path)
self.saver.save(self.sess, tf_path)
def load_model(self, model_path):
if not os.path.exists(model_path):
raise RuntimeError('file not exists')
param_path = os.path.join(model_path, 'params.pkl')
with open(param_path, 'rb') as f:
model = pkl.load(f)
self.vocab_list = model['vocab_list']
self.vocab_size = model['vocab_size']
self.logdir = model['logdir']
self.word2id = model['word2id']
self.embedding_size = model['embedding_size']
self.learning_rate = model['learning_rate']
self.win_len = model['win_len']
self.num_sampled = model['num_sampled']
self.train_words_num = model['train_words_num']
self.train_sents_num = model['train_sents_num']
self.train_times_num = model['train_times_num']
self.train_loss_records = model['train_loss_records']
self.train_loss_k10 = model['train_loss_k10']
if __name__ == '__main__':
# step 1 读取停用词
stop_words = []
with open('stop_words.txt', encoding='utf-8') as f:
line = f.readline()
while line:
# 排除回车符
stop_words.append(line[:-1])
line = f.readline()
stop_words = set(stop_words)
print('停用词读取完毕,共{n}个单词'.format(n=len(stop_words)))
# step2 读取文本,预处理,分词,得到词典
raw_word_list = [] # 处理后词-每个单词
sentence_list = [] # 处理后词-每行
with open('2800.txt', encoding='gbk') as f: # 一部小说
line = f.readline()
while line:
while '\n' in line: # 去回车
line = line.replace('\n', '')
while ' ' in line: # 去空格
line = line.replace(' ', '')
if len(line) > 0: # 如果句子非空
#2.1 分词
raw_words = list(jieba.cut(line, cut_all=False))
dealed_words = []
for word in raw_words:
# 2.2排除停用词
if word not in stop_words and word not in ['qingkan520', 'www', 'com', 'http']:
raw_word_list.append(word)
dealed_words.append(word)
sentence_list.append(dealed_words)
line = f.readline()
word_count = collections.Counter(raw_word_list) # 统计词频,去重复
print('文本中总共有{n1}个单词,不重复单词数{n2},选取前30000个单词进入词典'
.format(n1=len(raw_word_list), n2=len(word_count)))
word_count = word_count.most_common(30000) # 过滤不常用的词,取前30000个
word_list = [x[0] for x in word_count] # 只取出词,不要频率
# 3.创建模型,训练
w2v = word2vec(vocab_list=word_list, # 词典集
embedding_size=200,
win_len=2,
learning_rate=1, # 学习率
num_sampled=100, # 负采样个数
logdir='/tmp/280') # tensorboard记录地址
num_steps = 10000
for i in range(num_steps):
# print (i%len(sentence_list))
sent = sentence_list[i % len(sentence_list)]
w2v.train_by_sentence([sent])
w2v.save_model('model')
w2v.load_model('model')
test_word = ['天地', '级别']
test_id = [word_list.index(x) for x in test_word]
test_words, near_words, sim_mean, sim_var = w2v.cal_similarity(test_id)
print(test_words, near_words, sim_mean, sim_var)
| [
"fanqi0312@sina.com"
] | fanqi0312@sina.com |
0e501abc2dbd7d6492e3b00b62b69df06e629252 | 73aea19ba4c4f8923e366c1d65443950834a39e2 | /app/recipe/tests/test_recipe_api.py | db6143d1222db8be2e1a84a263cf2d1ca5ed60c9 | [
"MIT"
] | permissive | t-monaco/recipe-app-api | 72997a960f30445e9614e607a47df4490820eadf | 09899f3ab56b05637612232e85febd4069efdfd0 | refs/heads/main | 2023-01-03T22:37:38.156774 | 2020-11-02T13:09:39 | 2020-11-02T13:09:39 | 306,006,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,811 | py | import tempfile
import os
from PIL import Image
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Recipe, Tag, Ingredient
from recipe.serializers import RecipeSerializer, RecipeDetailSerializer
RECIPE_URL = reverse('recipe:recipe-list')
def image_upload_url(recipe_id):
"""Return URL for recipe image upload"""
return reverse('recipe:recipe-upload-image', args=[recipe_id])
def detail_url(recipe_id):
"""return recipe detail URL"""
return reverse('recipe:recipe-detail', args=[recipe_id])
def sample_tag(user, name='Sample Tag'):
"""Create and return a sample tag"""
return Tag.objects.create(user=user, name=name)
def sample_ingredient(user, name='Sample Ingredient'):
"""Create and return a sample tag"""
return Ingredient.objects.create(user=user, name=name)
def sample_recipe(user, **params):
"""Create and return a sample recipe"""
defaults = {
'title': 'Sample Recipe',
'time_minutes': 10,
'price': 5.00
}
defaults.update(params)
return Recipe.objects.create(user=user, **defaults)
class PublicRecipeTest(TestCase):
"""Test public recipe api"""
def setUp(self):
self.client = APIClient()
def test_auth_required(self):
"""Test that authentication is required"""
res = self.client.get(RECIPE_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecipeTest(TestCase):
"""Test private recipe api"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().object.create_user(
'recipe@test.com',
'testpass123'
)
self.client.force_authenticate(self.user)
def test_retrieve_recipes(self):
"""Test retrieving a list of recipes"""
sample_recipe(user=self.user)
sample_recipe(user=self.user)
res = self.client.get(RECIPE_URL)
recipe = Recipe.objects.all().order_by('-id')
serializer = RecipeSerializer(recipe, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_recipes_limited_to_user(self):
"""Test retrieving recipe for user"""
user2 = get_user_model().object.create_user(
'recipe2@test.com',
'testpass123'
)
sample_recipe(user=user2)
sample_recipe(user=self.user)
res = self.client.get(RECIPE_URL)
recipes = Recipe.objects.filter(user=self.user)
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data, serializer.data)
def test_view_recipe_detail(self):
"""Test viewing a recipe detail"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
url = detail_url(recipe.id)
res = self.client.get(url)
serializer = RecipeDetailSerializer(recipe)
self.assertEqual(res.data, serializer.data)
def test_create_basic_recipe(self):
"""Test creating a recipe"""
payload = {
'title': 'Chocolate Cheesecake',
'time_minutes': 5,
'price': 7.50
}
res = self.client.post(RECIPE_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
for key in payload.keys():
self.assertEqual(payload[key], getattr(recipe, key))
def test_create_recipe_with_tags(self):
"""Test creating a recipe with tags"""
tag1 = sample_tag(user=self.user, name='Vegan')
tag2 = sample_tag(user=self.user, name='Dessert')
payload = {
'title': 'Avocado line cheesecake',
'tags': [tag1.id, tag2.id],
'time_minutes': 60,
'price': 21.1
}
res = self.client.post(RECIPE_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
tags = recipe.tags.all()
self.assertEqual(tags.count(), 2)
self.assertIn(tag1, tags)
self.assertIn(tag2, tags)
def test_create_recipe_with_ingredients(self):
"""Test creating a recipe with ingredients"""
ingredient1 = sample_ingredient(user=self.user, name='Meat')
ingredient2 = sample_ingredient(user=self.user, name='Salt')
payload = {
'title': 'Cheese Hamburger',
'ingredients': [ingredient1.id, ingredient2.id],
'time_minutes': 15,
'price': 9.09
}
res = self.client.post(RECIPE_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
ingredients = recipe.ingredients.all()
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient1, ingredients)
self.assertIn(ingredient2, ingredients)
def test_partial_update_recipe(self):
"""Test updating a recipe with PATCH"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
new_tag = sample_tag(user=self.user, name='Curry')
payload = {
'title': 'Chicken Tikka',
'tags': [new_tag.id]
}
url = detail_url(recipe.id)
self.client.patch(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 1)
self.assertIn(new_tag, tags)
def test_full_update_recipe(self):
"""Test updating a recipe with PUT"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
payload = {
'title': 'Spaghetti carbonara',
'time_minutes': 44,
'price': 21
}
url = detail_url(recipe.id)
self.client.put(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
self.assertEqual(recipe.time_minutes, payload['time_minutes'])
self.assertEqual(recipe.price, payload['price'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 0)
class RecipeImageUploadTest(TestCase):
def setUp(self):
self.client = APIClient()
self.user = get_user_model().object.create_user(
'test@test.com',
'testpass123'
)
self.client.force_authenticate(self.user)
self.recipe = sample_recipe(user=self.user)
def tearDown(self):
self.recipe.image.delete()
def test_upload_image_to_recipe(self):
"""Test uploading an image to recipe"""
url = image_upload_url(self.recipe.id)
with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:
img = Image.new('RGB', (10, 10))
img.save(ntf, format='JPEG')
ntf.seek(0)
res = self.client.post(url, {'image': ntf}, format='multipart')
self.recipe.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertIn('image', res.data)
self.assertTrue(os.path.exists(self.recipe.image.path))
def test_upload_image_bad_request(self):
"""Test uploading an invalid image to recipe"""
url = image_upload_url(self.recipe.id)
res = self.client.post(url, {'image': 'notimage'}, format='multipart')
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_filter_recipes_by_tags(self):
"""Test returning recipes with specific tags"""
recipe1 = sample_recipe(user=self.user, title='Thai vegetable curry')
recipe2 = sample_recipe(user=self.user, title='Aubergine with tahini')
tag1 = sample_tag(user=self.user, name='Vegan')
tag2 = sample_tag(user=self.user, name='Vegetarian')
recipe1.tags.add(tag1)
recipe2.tags.add(tag2)
recipe3 = sample_recipe(user=self.user, title='Fish and chips')
res = self.client.get(
RECIPE_URL,
{'tags': '{},{}'.format(tag1.id, tag2.id)}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
def test_filter_recipe_by_ingredients(self):
"""Test filtering recipe with ingredients"""
recipe1 = sample_recipe(user=self.user, title='Spicy chicken wings')
recipe2 = sample_recipe(user=self.user, title='Lemon Pie')
ingredient1 = sample_ingredient(user=self.user, name='Chicken')
ingredient2 = sample_ingredient(user=self.user, name='Lemon')
recipe1.ingredients.add(ingredient1)
recipe2.ingredients.add(ingredient2)
recipe3 = sample_recipe(user=self.user, title='Steak & Mushrooms')
res = self.client.get(
RECIPE_URL,
{'ingredients': f'{ingredient1.id},{ingredient2.id}'}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
| [
"tomas.monaco@wundermanthompson.com"
] | tomas.monaco@wundermanthompson.com |
4a7e085434aaacffba28d622b9a8734b6ac3dbee | f03020b8c6f8629af3d9a8cb842e507218e3fc1f | /config_check.py | e4154979f54f358bad505230a1715b38794c8821 | [] | no_license | garandria/project-diverse-linux-sampling | 066f29d2ec0aba86aa1a06155ca6c2bba51d6363 | 5057a0a58d757fb92287987a751a50b956915c63 | refs/heads/master | 2021-09-06T20:43:07.383330 | 2021-02-21T00:39:41 | 2021-02-21T00:39:41 | 214,476,090 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,765 | py | """CHECK CONFIGURATION"""
from pysat.formula import CNF
from pysat.solvers import *
import utils
import argparse
def config_check(config, dimacs, alloptions_file, verbose=False):
"""Checks configuration's integrity
:param config: .config file
:type config: str
:param dimacs: DIMACS file
:type dimacs: str
:param alloptions: alloptions csv file
:type alloptions: str
:return: SAT, LAST CLAUSE, UNSATISFIABILITY CORE
:rtype: tuple
"""
dconfig = utils.read_config(config)
dimacs = utils.DimacsFla(dimacs)
fla = dimacs.get_formula()
variables = dimacs.get_variables()
alloptions = utils.Alloptions(alloptions_file)
assumptions = []
with Solver(bootstrap_with=fla.clauses) as l:
if verbose:
print("* At the beginning, fla is", l.solve(assumptions=assumptions))
for k, v in variables.items():
if (v.endswith("_MODULE") or "CHOICE_" in v):
continue # ignore
# SYMBOLS \IN .CONFIG
if v in dconfig:
kconfig_type = alloptions.get_kconfig_type(v)
vprint = ""
if kconfig_type == "TRISTATE":
kmodule = dimacs.get_kmodule(v)
if dconfig[v] == 'y':
vprint = "[T] {}:{} ^ {}:{}_MODULE"\
.format(k, v, -kmodule, v)
assumptions.extend([k, -kmodule])
elif dconfig[v] == 'm':
vprint = "[T] {}:{} ^ {}:{}_MODULE"\
.format(-k, v, kmodule, v)
assumptions.extend([-k, kmodule])
if verbose:
print(vprint)
# SOLVE
if not l.solve(assumptions=assumptions):
if verbose:
vcore = " ^ ".join(list(map(
(lambda x : '~' + variables[abs(x)] if (x < 0)
else variables[x]), l.get_core())))
print("---")
print("/!\ UNSATISFIABLE")
print("Unsatisfiable core:", vcore)
return False, assumptions[-2:], l.get_core()
if kconfig_type == "BOOL":
vprint = "[B] {}:{}".format(k, v)
if verbose:
print(vprint)
assumptions.append(k)
# SOLVE
if not l.solve(assumptions=assumptions):
if verbose:
vcore = " ^ ".join(list(map(
(lambda x : '~' + variables[abs(x)] if (x < 0)
else variables[x]), l.get_core())))
print("---")
print("/!\ UNSATISFIABLE")
print("Unsatisfiable core:", vcore)
return False, assumptions[-1], l.get_core()
# NOT IN .CONFIG
for k, v in variables.items():
if (v.endswith("_MODULE") or "CHOICE_" in v):
continue # ignore
if (v in alloptions.get_options()) and (v not in dconfig):
kconfig_type = alloptions.get_kconfig_type(v)
if kconfig_type == "TRISTATE":
kmodule = dimacs.get_kmodule(v)
vprint = "[T] {}:{} ^ {}:{}_MODULE"\
.format(-k, v, -kmodule, v)
if verbose:
print(vprint)
assumptions.extend([-k, -kmodule])
# SOLVE
if not l.solve(assumptions=assumptions):
if verbose:
vcore = " ^ ".join(list(map(
(lambda x : '~' + variables[abs(x)] if (x < 0)
else variables[x]), l.get_core())))
print("---")
print("/!\ UNSATISFIABLE")
print("Unsatisfiable core:", vcore)
return False, assumptions[-2:], l.get_core()
elif kconfig_type == "BOOL":
vprint = "[B] {}:{}".format(-k, v)
if verbose:
print(vprint)
assumptions.append(-k)
# SOLVE
if not l.solve(assumptions=assumptions):
if verbose:
vcore = " ^ ".join(list(map(
(lambda x : '~' + variables[abs(x)] if (x < 0)
else variables[x]), l.get_core())))
print("---")
print("/!\ UNSATISFIABLE")
print("Unsatisfiable core:", vcore)
return False, assumptions[-1], l.get_core()
if verbose:
print("---")
print("[DONE] SAT")
return True, None, None
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, help=".config file",
required=True)
parser.add_argument("--dimacs", type=str, help="dimacs file",
required=True)
parser.add_argument("--alloptions", type=str, help="alloptions-csv file",
required=True)
# parser.add_argument("--verbose", "-v", action="store_true")
args = parser.parse_args()
config_check(args.config, args.dimacs, args.alloptions, True)
if __name__ == "__main__":
main()
| [
"georges-aaron.randrianaina@irisa.fr"
] | georges-aaron.randrianaina@irisa.fr |
8903d69cc21adc5ab2090880649a80026486b7cd | 8015f1c62a2cb4efd21aa8938336913bf8117868 | /bamap/ba0977.pngMap.py | 1cd8f233a130576370bea74bec2c2353600f8e4f | [] | no_license | GamerNoTitle/Beepers-and-OLED | 675b5e3c179df0f0e27b42bf594c43860d03b9af | afe1340e5394ae96bda5f9022a8a66824368091e | refs/heads/master | 2020-04-20T00:09:47.122471 | 2019-04-29T04:59:35 | 2019-04-29T04:59:35 | 168,515,579 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 8,468 | py | ba0977.pngMap = [
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111001111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111100001111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111100000011110100000000000010111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111100000011000000000000000000111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111100000000000000000000000000000000111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111000000000000000000000000000000000111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000001111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111000000000000000000000000000000000001111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111100000000000000000000000000000000000011111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111100000000000000000000000000000000000011111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111100000000000000000000000000000000000011111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111000000000000000000000000000000000000011111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111110111000000000000000000000000000000000000000011111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000011011111111011111111111111111',
'11111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000011111111101111111111111111',
'11111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000000000000000011111111111111',
'11111111111111111111111111111111111111111111111111111111111000000000000000000000000000000000000000000000000000001111111111111111',
'11111111111111111111111111111111111111111111111111111111111000000000000000000000000000000000000000000000000000011111111111111111',
'11111111111111111111111111111111111111111111111111111111111100000000000000000000000000000000000000000000000000111111111111111111',
'11111111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111000000000000000000000000000000000000000001111111111111111111111111',
'11111111111111111111111111111111111111111111111111111110111111110000000000000000000000000000000111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111110001111100000000000000000000000001000000011111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000001011111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111100010000000000000000000000000000000000000000000111111111111111111111111111111',
'11111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000000000100011111111111111111111111111',
'11111111111111111111111111111111111111111111111100000000000000000000000000000000000000000000000000010011111111111111111111111111',
'11111111111111111111111111111111111111111111000000000000000000000000000000000000000000000000000000000011111111111111111111111111',
'11111111111111111111111111111111111111111111000000000000000000000000000000000000000000000000000000000011111111111111111111111111',
'11111111111111111111111111111111111111111111010000000000000000000000000000000000000000000000000000000000111111111111111111111111',
'11111111111111111111111111111111111111111110000000000000000000000000000000000000000000000000000000000000111111111111111111111111',
'11111111111111111111111111111111111111111100000000000000000000000000000000000000000000000000000000000000001111111111111111111111',
'11111111111111111111111111111111111111111000000000000000000000000000000000000000000000000000000000000000001111111111111111111111',
'11111111111111111111111111111111111111000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111',
'11111111111111111111111111111111111110000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111',
'11111111111111111111111111111111111100000000000000000000000000000000001000000000000000000000000000000000001111111111111111111111',
'11111111111111111111111111111111111100000000000000000000000000000000001000000000000000000000000000000000001111111111111111111111',
'11111111111111111111111111111111111100000000000000000000000000000000110000000000000000000000000000000000001111111111111111111111',
'11111111111111111111111111111111111100000000000000000000000000000000110000000000000000000000000000000000001111111111111111111111',
'11111111111111111111111111111111111100010001011001111011111101101111111100000000000000000000000000000000000000001111111111111111',
'11111111111111111111111111111111111100000000111011110011111100001111111110000000000000000000000000000000010001000111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111100000000000000000000000000000000000000000000101111111111',
'11111111111111111111111111111111111111111111111111111111111111111111001111000000000000000000000000000000000000000000001111111111',
'11111111111111111111111111111111111111111111111111111111111111000000000100000000000000000000000000000000000000000000000111111111',
'11111111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000000000000000001111111111',
'11111111111111111111111111111111111111111111111111111111111111000000000000000000000000000000000000000000000000000000001111111111',
'11111111111111111111111111111111111111111111111111111111111111000000000000000000000000000000000000000000000000000000000011111111',
'11111111111111111111111111111111111111111111111111111111111111000000000000000000000000000000000000000000000000000000000011111111',
'11111111111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000000000001111111111111',
'11111111111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000000000000111111111111',
'11111111111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000000000000111111111111',
'11111111111111111111111111111111111111111111111111111111111111111100000000000000000000000000000000000000000000000010111111111111',
'11111111111111111111111111111111111111111111111111111111111111111100000000000000000000000000000000000000010010100111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111100000000000000000000000000000000000000101001111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000101111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111110001000000000000000000000000000000000111111111111111111111111',
]
| [
"bili33@87ouo.top"
] | bili33@87ouo.top |
704ca9353bebed65f05b644757f66dc04ed23b41 | be2a67ca92a5f9b71c93495f57100615bed3a5aa | /utilities/read_task.py | ad0beb90d74109bad963184b43803fdac2fa002a | [
"MIT"
] | permissive | devBezel/among_us_tasker | 42f73119dbcdddb879ec83740817ce13bfb19fbd | dd13c13a7d2d776143522ccfa27696d69524707b | refs/heads/master | 2023-01-21T00:01:39.534805 | 2020-11-25T22:50:05 | 2020-11-25T22:50:05 | 312,372,747 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,540 | py | import pyautogui
from tasks.cables import Cables
from tasks.swipe_card import SwipeCard
from tasks.unlock_reactor import UnlockReactor
from tasks.download_data import DownloadData
from utilities.config import get_config
class ReadTask:
def __init__(self):
self.resolution = pyautogui.size()
self.task_performance = False
def resolve_read_task(self):
colour = self.check_colour()
if (self.task_performance is False):
print("wykonuje sie")
if (colour == eval(get_config("task", "cables"))):
cables = Cables()
self.task_performance = cables.run()
cables.log()
elif (colour == eval(get_config("task", "reactor"))):
reactor = UnlockReactor()
self.task_performance = reactor.run()
reactor.log()
elif (colour == eval(get_config("task", "swipe_card"))):
swipe_card = SwipeCard()
self.task_performance = swipe_card.run()
swipe_card.log()
elif (colour == eval(get_config("task", "download_data"))):
download_data = DownloadData()
self.task_performance = download_data.run()
download_data.log()
self.task_performance = False
def check_colour(self):
pixel_color = pyautogui.pixel(int(self.resolution[0] / 2),
int(self.resolution[1] / 2))
print(pixel_color)
return pixel_color | [
"dawids14@onet.pl"
] | dawids14@onet.pl |
4cb5f42ba92aed7a5cd3e5366421894ad0406739 | 0615318ec0d205d0e1ea06aaf2d9a1d8d54af254 | /help_timer_functions.py | 9d5a298bc6fe5967973b21c7302fc96e9df060d2 | [] | no_license | AnjaTRPES/Covid7dI_Berlin_app | b22049bfdf84a7911f0b83fa671ed2bfc74a9a69 | 088ea3b881a9bba596c5c1e36e111dabca555c9d | refs/heads/main | 2023-08-13T23:05:26.180796 | 2021-09-30T08:47:37 | 2021-09-30T08:47:37 | 405,866,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,652 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Sep 12 17:59:04 2021
@author: Anja
"""
import time
import unittest
from datetime import timedelta
import pandas as pd
def unixTimeMillis(dt):
''' Convert datetime to unix timestamp '''
return int(time.mktime(dt.timetuple()))
def unixToDatetime(unix):
''' Convert unix timestamp to datetime. '''
return pd.to_datetime(unix, unit='s', origin='unix').round('1d')
def getMarks(start, end, N=20):
'''
start: Time in datetime format
end: time in datetime format
N: how many marks
Returns the marks for labeling.
Every Nth value will be used.
'''
start_unix = unixTimeMillis(start)
end_unix = unixTimeMillis(end)
range_dates = range(start_unix, end_unix, int((end_unix-start_unix)/N))
result = {date: {'label': str(unixToDatetime(date))[:10],
'style': {'color': 'lightblue'}} for date in range_dates}
return result
class TestDateConversion(unittest.TestCase):
def test_unixConversion(self):
g = pd.to_datetime('2021-06-23', format="%Y-%m-%d")
self.assertEqual(g, unixToDatetime(unixTimeMillis(g)))
def test_unixConversionHours(self):
g = pd.to_datetime('2021-06-23', format="%Y-%m-%d")
h = g+timedelta(hours=2)
self.assertEqual(g, unixToDatetime(unixTimeMillis(h)))
def test_getMarks_len(self):
end = pd.to_datetime('2021-06-23', format="%Y-%m-%d")
start = pd.to_datetime('2020-06-23', format="%Y-%m-%d")
self.assertEqual(20, len(getMarks(start, end, 20)))
if __name__ == '__main__':
unittest.main(argv=[''], verbosity=1, exit=False)
| [
"roeder.anja.sci@gmail.com"
] | roeder.anja.sci@gmail.com |
abc60c06903b32cbddd7c108cebddc46d8e4f123 | 0dbbefd93135c30ee4c8b0e69c1e13f2eaea48c7 | /_site/affarsideer/md.py | a1357ec2afba8ed343e8567c89ed34b9c2679393 | [
"MIT"
] | permissive | jensbackbom/jensbackbomse.github.io | 51a0d604d62363d34a4b4d182f026b58ea01ad44 | ed4417bbced3a14326b9ea97c3535a1a1eca7996 | refs/heads/master | 2023-02-05T01:11:41.737440 | 2023-01-30T22:13:19 | 2023-01-30T22:13:19 | 124,751,595 | 0 | 0 | NOASSERTION | 2022-09-17T07:09:36 | 2018-03-11T11:55:22 | HTML | UTF-8 | Python | false | false | 15,574 | py | # -*- coding: utf-8 -*-
import re
import random
import unicodedata
def transliterate(string):
"""
Replace non-ASCII characters with an ASCII approximation. If no
approximation exists, the non-ASCII character is ignored. The string must
be ``unicode``.
Examples::
>>> transliterate('älämölö')
'alamolo'
>>> transliterate('Ærøskøbing')
'rskbing'
"""
normalized = unicodedata.normalize('NFKD', string)
return normalized.encode('ascii', 'ignore').decode('ascii')
def parameterize(string, separator='-'):
"""
Replace special characters in a string so that it may be used as part of a
'pretty' URL.
Example::
>>> parameterize(u"Donald E. Knuth")
'donald-e-knuth'
"""
string = transliterate(string)
# Turn unwanted chars into the separator
string = re.sub(r"(?i)[^a-z0-9\-_]+", separator, string)
if separator:
re_sep = re.escape(separator)
# No more than one of the separator in a row.
string = re.sub(r'%s{2,}' % re_sep, separator, string)
# Remove leading/trailing separator.
string = re.sub(r"(?i)^{sep}|{sep}$".format(sep=re_sep), '', string)
return string.lower()
services=[
{
"gender": u"Ett",
"name":u"CRM-system",
"intro":u"Företagets kundregister är ett av de viktigaste IT-systemen, och den totala marknaden för CRM-system är gigantisk. Det bör finnas stora möjligheter att skräddarsy ett CRM-system för specifika branscher, och ändå kunna bygga ett relativt stort bolag!",
"business_model":u"Årsavgift per användare.",
"price_per_unit": 4000,
"competitors":u"Salesforce, Hubspot, Pipedrive m fl.",
"competitor_market_size":"",
"exceptions": ["VD:ar", "CTO:er", "marknadschefer"],
"role": False
},
{
"gender": u"Ett",
"name":u"community",
"intro":u"Betalda medlemscommunities har börjat växa till sig som affärsmodell de senaste åren, och det finns förmodligen en marknad för att göra detta mot nischade målgrupper.",
"business_model":u"Årssavgift per användare.",
"price_per_unit": 10000,
"competitors":u"VD-nätverk såsom Close, EGN etc.",
"competitor_market_size":"",
"exceptions": [],
"role": True
},
{
"gender": u"Ett",
"name":u"personalundersökningsverktyg",
"intro":u"Det blir allt vanligare att företag gör regelbundna personalundersökningar, dels en större årlig undersökning där man frågar allmäna saker kring anställningen, men även regelbundna mikroundersökningar för att kolla hur personalen mår. Kanske finns det en marknad för nischade undersökningar, där deltagande företag kan få skräddarsydd, branschanpassad statistik.",
"business_model":u"Årssavgift per användare.",
"price_per_unit": 1500,
"competitors":u"bolag såsom WinningTemp, &Frankly m fl.",
"competitor_market_size":"",
"exceptions": ["VD:ar", "CTO:er", "marknadschefer"],
"role": False
},
{
"gender": u"Ett",
"name":u"modernt utbildningsföretag",
"preposition":"riktat mot",
"intro":u"Alla yrkeskategorier behöver fortbildning, men många e-learningtjänster är hopplöst generella och statiska. Om man gör det bransch- eller rollspecifikt med kohort-baserade liveutbildningar över Zoom så borde det gå att öka relevansen avsevärt!",
"business_model":u"Årssavgift per användare.",
"price_per_unit": 15000,
"competitors":u"bolag i stil med LinkedIn learning, Udemy m fl.",
"competitor_market_size":"",
"exceptions": [],
"role": True
},
{
"gender": u"En",
"name":u"e-handelsplattform",
"intro":u"Digitaliseringen av samhället pågår för fullt, men fortfarande utgör e-handel bara c:a 15% av fysisk handel. Lösningar såsom Shopify gör det möjligt för små företag att börja med e-handel, men det skulle eventuellt kunna finnas utrymme för branschspecifika varianter, ungefär som Etsy för hantverk.",
"business_model":u"Årssavgift per användare (eller möjligen en transaktionsavgift).",
"price_per_unit": 5000,
"competitors":u"Shopify, Amazon, Etsy m fl.",
"competitor_market_size":"",
"exceptions": ["VD:ar", "CTO:er", "marknadschefer"],
"role": True
},
{
"gender": u"Ett",
"name":u"prissättningsverktyg",
"intro":u"För företag som har ett större antal produkter och säljer via flera kanaler är det värt att optimera sin prissättning.",
"business_model":u"Årssavgift per användare",
"price_per_unit": 10000,
"competitors":u"I huvudsak Excel.",
"competitor_market_size":"",
"exceptions": ["VD:ar", "CTO:er", "marknadschefer"],
"role": True
},
{
"gender": u"En",
"name":u"upphandlingsplattform",
"intro":u"Om man är en stor organisation så kan man ofta förhandla sig till bättre priser tack vare stora inköpsvolymer, men även många små företag skulle kunna gå ihop och pressa priser tillsammans. Detta borde vara en lämplig tjänst att bygga branschspecifikt.",
"business_model":u"Årssavgift per användare",
"price_per_unit": 5000,
"competitors":u"Aktörer såsom t.ex. Pressa.se.",
"competitor_market_size":"",
"exceptions": ["VD:ar", "CTO:er", "marknadschefer"],
"role": True
},
{
"gender": u"En",
"name":u"franchisekedja",
"preposition": "av",
"intro":u"Det har skapats många kedjor inom t.ex. restaurang- och cafénäringen. Detta borde även gå att göra för flera tjänsteföretag!",
"business_model":u"Medlemsavgift per användare",
"price_per_unit": 20000,
"competitors":u"Beror på bransch.",
"competitor_market_size":"",
"exceptions": ["hotell", "VD:ar", "CTO:er", "marknadschefer"],
"role": True
}
]
target_groups=[
{
"name":u"tandläkare",
"number":u"Det finns c:a 8 000 tandläkare i Sverige, men förmodligen kan man även ta betalt av andra yrkesroller inom tandvården. Totalt arbetar *25 000* personer i tandvården.",
"swe_customers":25000,
"number_source":u"http://www.tlv.se/"
},
{
"name":u"hotell",
"number":u"Det finns c:a 2 000 hotell i Sverige, och låt oss anta att det finns två säljare per hotell, dvs *4000* hotellsäljare.",
"swe_customers":4000,
"number_source":u"http://www.hotels.com/"
},
{
"name":u"krögare",
"number":u"Det finns c:a 20 000 restauranger i Sverige, och låt oss anta att det är en person per restaurang som blir användare.",
"swe_customers":20000,
"number_source":u"http://www.scb.se/"
},
{
"name":u"advokater",
"number":u"Det finns c:a 6 000 advokater i Sverige, och låt oss anta att det på varje advokat går två juniora jurister (som också kan bli användare).",
"swe_customers":18000,
"number_source":u"http://www.advokatsamfundet.se/"
},
{
"name":u"fastighetsmäklare",
"number":u"Det finns c:a 7 000 fastighetsmäklare i Sverige, och låt oss anta att det på varje mäklare går en halv mäklarassistent (som också kan bli användare).",
"swe_customers":10500,
"number_source":u"http://www.maklarsamfundet.se/"
},
{
"name":u"IT-konsulter",
"number":u"Det finns c:a 44 000 företag som säljer programvara och IT-tjänster i Sverige, och låt oss anta att varje företag i snitt har 4 potentiella användare.",
"swe_customers":176000,
"number_source":u"https://www.itot.se/om-oss/statistik/statistik-foretag//"
},
{
"name":u"elektriker",
"number":u"Det finns c:a 95 000 elinstallatörer och elektriker i Sverige.",
"swe_customers":95000,
"number_source":u"http://www.tidningenelektrikern.se/"
},
{
"name":u"VD:ar",
"number":u"Det finns 138 000 personer med titeln VD på LinkedIn i Sverige, men förmodligen är endast c:a 20 procent av dessa mottagliga för tjänsten.",
"swe_customers":28000,
"number_source":u"https://www.linkedin.com/",
"role": True,
},
{
"name":u"CTO:er",
"number":u"Det finns 8 500 personer med titeln CTO på LinkedIn i Sverige, men detta är troligen en underskattning av den verkliga siffran så vi räknar med minst 10 000 st.",
"swe_customers":10000,
"number_source":u"https://www.linkedin.com/",
"role": True,
},
{
"name":u"marknadschefer",
"number":u"Det finns c:a 13 000 personer med titeln CMO eller marknadschef på LinkedIn i Sverige.",
"swe_customers":13000,
"number_source":u"https://www.linkedin.com/",
"role": True,
},
{
"name":u"arkitekter",
"number":u"Det finns c:a 13 000 arkitekter i Sverige.",
"swe_customers":13000,
"number_source":u"https://www.arkitekten.se/",
},
{
"name":u"tillverkande industri",
"number":u"Det finns c:a 10 000 industriföretag med fler än 4 anställda i Sverige.",
"swe_customers":10000,
"number_source":u"https://www.tillvaxtverket.se/",
},
{
"name":u"åkerier",
"number":u"Det finns c:a 10 000 åkeriföretag i Sverige.",
"swe_customers":10000,
"number_source":u"https://www.akerier.se/",
},
{
"name":u"redovisningskonsulter",
"number":u"Det finns c:a 3 000 auktoriserade redovisningskonsulter i Sverige, så det totala antalet (inkl icke-auktoriserade) är förmodligen åtminstone 10 000 st.",
"swe_customers":10000,
"number_source":u"https://www.revisionsvarlden.se/",
}
]
"""
<li>hairdressers</li>
<li>construction firms</li>
<li>web designers</li>
<li>game developers</li>
<li>camping venues</li>
<li>sports associations</li>
<li>oil companies</li>
<li>horse breeders</li>
<li>hospitals</li>
<li>tourism agencies</li>
<li>manufacturers</li>
<li>grocery stores</li>
<li>ad agencies</li>
<li>IT consultants</li>
<li>fast food outlets</li>
<li>retail businesses</li>
<li>small businesses</li>
<li>Instagrammers</li>
<li>Twitter users</li>
<li>Substack users</li>
<li>Gumroad creators</li>
<li>E-mail marketers</li>
<li>developers</li>
<li>recruiters</li>
<li>COOs</li>
<li>CMOs</li>
<li>set their prices</li>
<li>manage suppliers</li>
<li>sell excess capacity</li>
<li>benchmark their costs</li>
<li>benchmark their prices</li>
<li>lower their costs</li>
<li>find co-founders</li>
<li>invest their profits</li>
<li>manage pensions</li>
<li>find more customers</li>
<li>localize their service</li>
<li>expand to new markets</li>
<li>find right suppliers</li>
<li>identify unhappy customers</li>
<li>survey their customers</li>
<li>reach more customers</li>
<li>pick marketing channels</li>
<li>get Twitter followers</li>
<li>build a community</li>
<li>become more creative</li>
<li>hide their side hustle</li>
<li>reach 1000 true fans</li>
<li>monetize their audience</li>
<li>grow their e-mail list</li>
"""
basecontent=u"""---
layout: page
title: "Affärsidé: {headline}"
---
{intro}
**Antal möjliga kunder i Sverige:** {customers_in_sweden}(källa: [{number_source}]({number_source}))
**Marknadspotential i Sverige:** {market_size_sweden} (vi räknar med en årsintäkt per användare på {fee} kr)
**Konkurrenter:** {competitors} {competitor_fill}
#### Några andra affärsidéer riktade mot samma målgrupp:
{random_ideas}
#### Några andra möjliga målgrupper för samma idé:
{random_target_groups}
#### Andra inlägg jag skrivit på detta tema:
- {link_one}
- {link_two}
- {link_three}
"""
landing_page_content=u"""---
layout: page
title: {total_number} gratis affärsidéer för dig som vill starta eget
---
Jag har gjort en sammanställning av {total_number} gratis affärsidéer med tillhörande marknadsuppskattningar. Se nedan för några exempel som du sedan kan klicka dig vidare från. Håll till godo!
#### Exempel:
{list_of_ideas}
#### Andra inlägg jag skrivit på detta tema:
- {link_one}
- {link_two}
- {link_three}
"""
for service in services:
for target_group in [tg for tg in target_groups if not tg["name"] in service["exceptions"]]:
headline=u"{gender} {service} {prep} {target_group}".format(
gender=service["gender"],
service=service["name"],
prep=service["preposition"] if "preposition" in service else u"för",
target_group=target_group["name"]
)
filename=parameterize(headline)
file = open("{filename}.md".format(filename=filename), "w")
content = basecontent.format(
headline=headline,
competitor_fill=random.choice([
u"Har ännu inte undersökt om det finns direkta konkurrenter inom nischen.",
u"Det kan säkert finnas mer nischade aktörer som man behöver se upp med.",
u"Hemmagjorda lösningar kan säkert förekomma också."
]),
competitors=service["competitors"],
fee=service["price_per_unit"],
number_source=target_group["number_source"],
intro=service["intro"],
random_ideas=", ".join([u"[{a} {b}](/affarsideer/{c}/)".format(a=i["gender"],b=i["name"], c=parameterize(u"{g} {s} {prep} {target_group}".format(
g=i["gender"],
s=i["name"],
prep=i["preposition"] if "preposition" in i else u"för",
target_group=target_group["name"]
))) for i in random.sample(services, 3) if not i["name"]==service["name"] and not target_group["name"] in i["exceptions"]]),
random_target_groups=", ".join([u"[{a}](/affarsideer/{c}/)".format(a=i["name"], c=parameterize(u"{g} {s} {prep} {target_group}".format(
g=service["gender"],
s=service["name"],
prep=service["preposition"] if "preposition" in service else u"för",
target_group=i["name"]
))) for i in random.sample(target_groups, 5) if not i["name"]==target_group["name"] and not i["name"] in service["exceptions"]]),
customers_in_sweden=target_group["number"],
market_size_sweden=u"{size} Mkr per år".format(size=service["price_per_unit"]*target_group["swe_customers"]/1000000),
link_one=u"[Hur man hittar affärsidéer]({% post_url 2020-11-08-hur-man-hittar-affarsideer %})",
link_two=u"[10 sätt att blåsa en minoritetsägare]({% post_url 2020-10-13-10-satt-att-blasa-aktieagare %}) (när du väl startar företaget)",
link_three=u"[Checklista för B2B SaaS-bolag]({% post_url 2020-12-02-checklista-for-b2b-saas-bolag %})",
)
file.write(content.encode("utf8"))
file.close()
file = open("gratis-ideer.md".format(filename=filename), "w")
services_subset=services
target_group_subset=random.sample([tg for tg in target_groups if not "role" in tg], len(services))
random.shuffle(services_subset)
random.shuffle(target_group_subset)
services_subset=[u"{p} {t} {prep}".format(p=s["gender"], t=s["name"], prep=s["preposition"] if "preposition" in s else u"för") for s in services_subset]
target_group_subset=[u"{t}".format(t=s["name"]) for s in target_group_subset]
random_combinations=list(zip(services_subset, target_group_subset))
content=landing_page_content.format(
total_number=len(services)*len(target_groups),
list_of_ideas="""
""".join([u"- [{s} {t}](/affarsideer/{c}/)".format(s=i[0], t=i[1], c=parameterize(u"{s} {t}".format(
s=i[0], t=i[1]
))) for i in random_combinations]),
link_one=u"[Hur man hittar affärsidéer]({% post_url 2020-11-08-hur-man-hittar-affarsideer %})",
link_two=u"[10 sätt att blåsa en minoritetsägare]({% post_url 2020-10-13-10-satt-att-blasa-aktieagare %}) (när du väl startar företaget)",
link_three=u"[Checklista för B2B SaaS-bolag]({% post_url 2020-12-02-checklista-for-b2b-saas-bolag %})",
)
file.write(content.encode("utf8"))
file.close()
| [
"jens.backbom@gmail.com"
] | jens.backbom@gmail.com |
dbdec97274ee20719addb5d041d37d0fb81f63ce | 8ac8bc849bce42a1d1a37ed8b153acf130cfc5a7 | /insta_dj/post/models.py | 7131ac83c1ddb92c6ed0fc5acbcfa34afb2a03bf | [] | no_license | CitooZz/instaDJ | 19e48a5db54c2ec89e8ee217d4a665d4f16837a8 | 4c4a143c1fa4f6d2e13c9133d7bfcacef1371c65 | refs/heads/master | 2021-01-17T16:02:42.742678 | 2017-06-01T07:23:10 | 2017-06-01T07:23:10 | 83,179,244 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,448 | py | from __future__ import unicode_literals
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from .utils import image_upload_handler
class Post(models.Model):
creator = models.ForeignKey(User, related_name='posts')
caption = models.CharField(max_length=50)
image = models.ImageField(upload_to=image_upload_handler)
created_at = models.DateTimeField(default=timezone.now)
def __unicode__(self):
return self.caption
@staticmethod
def get_number_of_likes(self):
return self.reaction.filter(type=PostReaction.LIKE)
@staticmethod
def get_number_of_dislikes(self):
return self.reaction.filter(type=PostReaction.DISLIKE)
class Comment(models.Model):
post = models.ForeignKey(Post, related_name='comments')
user = models.ForeignKey(User)
comment = models.CharField(max_length=150)
created_at = models.DateTimeField(default=timezone.now)
def __unicode__(self):
return self.comment
class PostReaction(models.Model):
LIKE = 'Like'
DISLIKE = 'Dislike'
REACTION_TYPES = [LIKE, DISLIKE]
post = models.ForeignKey(Post, related_name='reaction')
user = models.ForeignKey(User)
type = models.CharField(max_length=10, choices=zip(
REACTION_TYPES, REACTION_TYPES))
def __unicode__(self):
return "{}: {} {}".format(self.type, self.user.username, self.post.caption)
| [
"havizvaisal@gmail.com"
] | havizvaisal@gmail.com |
ee1291fd0e95c7b23cc8c9d9423999e621f6112c | e1eaed6dde62fc54eb317d28dbd18e0740e3e8f3 | /official/vision/beta/evaluation/segmentation_metrics.py | ae1131dd227009686ac52ccbdfb66c8051ba2da9 | [
"Apache-2.0"
] | permissive | nlpming/models | cf5008d2e66d2b66b6d61423e214f2f9f9fbe472 | 3cbf0748529d787dd09fa3ed031e557f0ddfa268 | refs/heads/master | 2021-12-03T03:29:16.042489 | 2021-11-23T14:09:10 | 2021-11-23T14:09:10 | 206,007,973 | 0 | 0 | Apache-2.0 | 2019-09-03T06:47:46 | 2019-09-03T06:47:46 | null | UTF-8 | Python | false | false | 9,914 | py | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metrics for segmentation."""
import tensorflow as tf
from official.vision.beta.evaluation import iou
class MeanIoU(tf.keras.metrics.MeanIoU):
"""Mean IoU metric for semantic segmentation.
This class utilizes tf.keras.metrics.MeanIoU to perform batched mean iou when
both input images and groundtruth masks are resized to the same size
(rescale_predictions=False). It also computes mean iou on groundtruth original
sizes, in which case, each prediction is rescaled back to the original image
size.
"""
def __init__(
self, num_classes, rescale_predictions=False, name=None, dtype=None):
"""Constructs Segmentation evaluator class.
Args:
num_classes: `int`, number of classes.
rescale_predictions: `bool`, whether to scale back prediction to original
image sizes. If True, y_true['image_info'] is used to rescale
predictions.
name: `str`, name of the metric instance..
dtype: data type of the metric result.
"""
self._rescale_predictions = rescale_predictions
super().__init__(num_classes=num_classes, name=name, dtype=dtype)
def update_state(self, y_true, y_pred):
"""Updates metric state.
Args:
y_true: `dict`, dictionary with the following name, and key values.
- masks: [batch, width, height, 1], groundtruth masks.
- valid_masks: [batch, width, height, 1], valid elements in the mask.
- image_info: [batch, 4, 2], a tensor that holds information about
original and preprocessed images. Each entry is in the format of
[[original_height, original_width], [input_height, input_width],
[y_scale, x_scale], [y_offset, x_offset]], where [desired_height,
desired_width] is the actual scaled image size, and [y_scale, x_scale]
is the scaling factor, which is the ratio of scaled dimension /
original dimension.
y_pred: Tensor [batch, width_p, height_p, num_classes], predicated masks.
"""
predictions = y_pred
masks = y_true['masks']
valid_masks = y_true['valid_masks']
images_info = y_true['image_info']
if isinstance(predictions, tuple) or isinstance(predictions, list):
predictions = tf.concat(predictions, axis=0)
masks = tf.concat(masks, axis=0)
valid_masks = tf.concat(valid_masks, axis=0)
images_info = tf.concat(images_info, axis=0)
# Ignore mask elements is set to zero for argmax op.
masks = tf.where(valid_masks, masks, tf.zeros_like(masks))
if self._rescale_predictions:
# This part can only run on cpu/gpu due to dynamic image resizing.
for i in range(tf.shape(predictions)[0]):
mask = masks[i]
valid_mask = valid_masks[i]
predicted_mask = predictions[i]
image_info = images_info[i]
rescale_size = tf.cast(
tf.math.ceil(image_info[1, :] / image_info[2, :]), tf.int32)
image_shape = tf.cast(image_info[0, :], tf.int32)
offsets = tf.cast(image_info[3, :], tf.int32)
predicted_mask = tf.image.resize(
predicted_mask,
rescale_size,
method=tf.image.ResizeMethod.BILINEAR)
predicted_mask = tf.image.crop_to_bounding_box(predicted_mask,
offsets[0], offsets[1],
image_shape[0],
image_shape[1])
mask = tf.image.crop_to_bounding_box(mask, 0, 0, image_shape[0],
image_shape[1])
valid_mask = tf.image.crop_to_bounding_box(valid_mask, 0, 0,
image_shape[0],
image_shape[1])
predicted_mask = tf.argmax(predicted_mask, axis=2)
flatten_predictions = tf.reshape(predicted_mask, shape=[1, -1])
flatten_masks = tf.reshape(mask, shape=[1, -1])
flatten_valid_masks = tf.reshape(valid_mask, shape=[1, -1])
super(MeanIoU, self).update_state(
flatten_masks, flatten_predictions,
tf.cast(flatten_valid_masks, tf.float32))
else:
predictions = tf.image.resize(
predictions,
tf.shape(masks)[1:3],
method=tf.image.ResizeMethod.BILINEAR)
predictions = tf.argmax(predictions, axis=3)
flatten_predictions = tf.reshape(predictions, shape=[-1])
flatten_masks = tf.reshape(masks, shape=[-1])
flatten_valid_masks = tf.reshape(valid_masks, shape=[-1])
super().update_state(flatten_masks, flatten_predictions,
tf.cast(flatten_valid_masks, tf.float32))
class PerClassIoU(iou.PerClassIoU):
"""Per Class IoU metric for semantic segmentation.
This class utilizes iou.PerClassIoU to perform batched per class
iou when both input images and groundtruth masks are resized to the same size
(rescale_predictions=False). It also computes per class iou on groundtruth
original sizes, in which case, each prediction is rescaled back to the
original image size.
"""
def __init__(
self, num_classes, rescale_predictions=False, name=None, dtype=None):
"""Constructs Segmentation evaluator class.
Args:
num_classes: `int`, number of classes.
rescale_predictions: `bool`, whether to scale back prediction to original
image sizes. If True, y_true['image_info'] is used to rescale
predictions.
name: `str`, name of the metric instance..
dtype: data type of the metric result.
"""
self._rescale_predictions = rescale_predictions
super().__init__(num_classes=num_classes, name=name, dtype=dtype)
def update_state(self, y_true, y_pred):
"""Updates metric state.
Args:
y_true: `dict`, dictionary with the following name, and key values.
- masks: [batch, width, height, 1], groundtruth masks.
- valid_masks: [batch, width, height, 1], valid elements in the mask.
- image_info: [batch, 4, 2], a tensor that holds information about
original and preprocessed images. Each entry is in the format of
[[original_height, original_width], [input_height, input_width],
[y_scale, x_scale], [y_offset, x_offset]], where [desired_height,
desired_width] is the actual scaled image size, and [y_scale, x_scale]
is the scaling factor, which is the ratio of scaled dimension /
original dimension.
y_pred: Tensor [batch, width_p, height_p, num_classes], predicated masks.
"""
predictions = y_pred
masks = y_true['masks']
valid_masks = y_true['valid_masks']
images_info = y_true['image_info']
if isinstance(predictions, tuple) or isinstance(predictions, list):
predictions = tf.concat(predictions, axis=0)
masks = tf.concat(masks, axis=0)
valid_masks = tf.concat(valid_masks, axis=0)
images_info = tf.concat(images_info, axis=0)
# Ignore mask elements is set to zero for argmax op.
masks = tf.where(valid_masks, masks, tf.zeros_like(masks))
if self._rescale_predictions:
# This part can only run on cpu/gpu due to dynamic image resizing.
for i in range(tf.shape(predictions)[0]):
mask = masks[i]
valid_mask = valid_masks[i]
predicted_mask = predictions[i]
image_info = images_info[i]
rescale_size = tf.cast(
tf.math.ceil(image_info[1, :] / image_info[2, :]), tf.int32)
image_shape = tf.cast(image_info[0, :], tf.int32)
offsets = tf.cast(image_info[3, :], tf.int32)
predicted_mask = tf.image.resize(
predicted_mask,
rescale_size,
method=tf.image.ResizeMethod.BILINEAR)
predicted_mask = tf.image.crop_to_bounding_box(predicted_mask,
offsets[0], offsets[1],
image_shape[0],
image_shape[1])
mask = tf.image.crop_to_bounding_box(mask, 0, 0, image_shape[0],
image_shape[1])
valid_mask = tf.image.crop_to_bounding_box(valid_mask, 0, 0,
image_shape[0],
image_shape[1])
predicted_mask = tf.argmax(predicted_mask, axis=2)
flatten_predictions = tf.reshape(predicted_mask, shape=[1, -1])
flatten_masks = tf.reshape(mask, shape=[1, -1])
flatten_valid_masks = tf.reshape(valid_mask, shape=[1, -1])
super().update_state(flatten_masks, flatten_predictions,
tf.cast(flatten_valid_masks, tf.float32))
else:
predictions = tf.image.resize(
predictions,
tf.shape(masks)[1:3],
method=tf.image.ResizeMethod.BILINEAR)
predictions = tf.argmax(predictions, axis=3)
flatten_predictions = tf.reshape(predictions, shape=[-1])
flatten_masks = tf.reshape(masks, shape=[-1])
flatten_valid_masks = tf.reshape(valid_masks, shape=[-1])
super().update_state(flatten_masks, flatten_predictions,
tf.cast(flatten_valid_masks, tf.float32))
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
a3d679949562466f4ce55d64546316cf11b470e1 | 1b5404b8099de74d4e39e0a41b1d04c61defa8d4 | /Лабиринт/dump/labyrinth_find_solution.py | 6284287ae0344286006f098090bcd1a1b2c5c773 | [] | no_license | ipeterov/random-stuff | 5d07bdcfdcb145d06ed89095f2ad34b70ff0f0bd | dbb38d42331f636919fd149b23783e02ee2c9afb | refs/heads/master | 2023-05-14T00:41:51.122251 | 2023-05-04T12:10:26 | 2023-05-04T12:10:26 | 206,028,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,776 | py | import pickle
def refactored_labyrinth(labyrinth):
# 0 - верх, 1 право, 2 - лево, 3 - низ
refactored_labyrinth = []
for y in range(len(labyrinth)):
refactored_labyrinth.append([])
for x in range(len(labyrinth[0])):
refactored_labyrinth[y].append([0,0,0,0])
for y in range(len(labyrinth)):
for x in range(len(labyrinth[0])):
if labyrinth[y-1][x]['d'] == 1 or y == 0:
refactored_labyrinth[y][x][0] = 1
if labyrinth[y][x]['r'] == 1 or x == len(labyrinth[0]) - 1:
refactored_labyrinth[y][x][1] = 1
if labyrinth[y][x]['d'] == 1 or y == len(labyrinth) - 1:
refactored_labyrinth[y][x][2] = 1
if labyrinth[y][x-1]['r'] == 1 or x == 0:
refactored_labyrinth[y][x][3] = 1
return refactored_labyrinth
def find_path(labyrinth, start_coords = [0,0]):
def move(current_coords, forbidden_move):
if current_coords == goal_coords:
#~ print('aaaaaaaa')
for element in path:
gpath.append(element)
exit
path.append(current_coords)
dead_end = False
print(current_coords)
y = current_coords[0]
x = current_coords[1]
while not dead_end:
for i in range(4):
if labyrinth[y][x][i] != 1 and i != forbidden_move:
if i == 0:
move([y-1,x], 2)
elif i == 1:
move([y,x+1], 3)
elif i == 2:
move([y+1,x], 0)
elif i == 3:
move([y,x-1], 1)
i = 5
if i != 5:
dead_end = True
try:
labyrinth[y + 1][x][0] = 1
except:
pass
try:
labyrinth[y][x - 1][1] = 1
except:
pass
try:
labyrinth[y - 1][x][2] = 1
except:
pass
try:
labyrinth[y][x + 1][3] = 1
except:
pass
path.pop()
#~ print(labyrinth)
labyrinth = refactored_labyrinth(labyrinth)
#~ print(labyrinth)
goal_coords = [99, 99]
gpath = []
path = []
goal_reached = False
move(start_coords, -1)
if len(gpath) == 0:
print('fuckfuckfuck')
return None
gpath.append(goal_coords)
return gpath
name = 'labyrinth_backtrack'
labyrinth = pickle.load(open(name, 'rb'))
path = find_path(labyrinth)
pickle.dump(path, open('labyrinth_solution','wb'))
| [
"ipeterov1@gmail.com"
] | ipeterov1@gmail.com |
cecde9f7d974f33696ac3cd600678c76957d3c9f | f4e70a716d323a6510c77deb5f38e273bdf7a2c4 | /zgit/zgit | 80d363742cc96dcd5670784c382bc18a3a7d8e47 | [] | no_license | zenelk/ScriptUtils | 6238cfd57447849c4abb68274c4bc440e001de3d | 07e0786d33da15c03a08f2ea96dec816a3984b26 | refs/heads/master | 2020-03-15T07:59:14.999531 | 2018-05-03T19:38:39 | 2018-05-03T19:38:39 | 132,041,423 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | #!/usr/bin/python
import sys
import zgit_lib as zgit
def main(argv):
arguments_parser = zgit.commands.ArgumentsParser()
if __name__ == "__main__":
exit(main(sys.argv)) | [
"hunter.lang@workday.com"
] | hunter.lang@workday.com | |
c62c1f9ce22afc1304a0eb6f7d5b43391f53f60a | 117e5d603bd4e3649624a962b20219ac68bcd172 | /payroll_module/tables.py | 3c95e950a216f0af0571ea8c34fc10ec03ee17ae | [] | no_license | aqlanhadi/cisco-nexus | c8bc0dd6f794e4141829a7975cb8bab5501c9186 | 1411fd3f5ca2eedc21490dd08786374a124a3302 | refs/heads/master | 2022-05-02T09:17:13.687235 | 2020-06-29T06:40:00 | 2020-06-29T06:40:00 | 209,244,232 | 0 | 1 | null | 2022-04-22T22:22:57 | 2019-09-18T07:20:33 | HTML | UTF-8 | Python | false | false | 232 | py | from django.contrib.auth.models import User
from table import Table
from table.columns import Column
class SalaryList(Table):
id = Column(field='id')
username = Column(field='username')
class Meta:
model = User | [
"aqlanhadi@gmail.com"
] | aqlanhadi@gmail.com |
e037ca045c3b88861ebd25a11acf649ebfc2733a | ada71fc4e068f7d800f8ed5af3882b1792129d40 | /algorithm_pracs/programmers_python/q06_info_query.py | 98e665ea2fedc15035d39047d8a9a2827977080a | [] | no_license | heo-mk/algorithm_pracs_js_python | fe2a304a9eac8b47b6409a20dbe99e9ef7e8e810 | 2a293f801834f89e056b301348037ef051f73477 | refs/heads/main | 2023-05-31T11:38:40.239811 | 2021-07-04T08:22:09 | 2021-07-04T08:22:09 | 374,882,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,561 | py | from itertools import combinations
def make_all_cases(temp):
case = []
for i in range(5):
for combination in combinations([0, 1, 2, 3], i):
case = ""
for idx in range(4):
if idx in combination:
case += temp[idx]
else:
case += "-"
cases.append(case)
return cases
def get_lower_bound(target, array):
current_min = 0
current_max = len(array)
while current_min < current_max:
current_guess = (current_min + current_max) // 2
if array[current_guess] >= target:
current_max = current_guess
else:
current_min = current_guess + 1
return current_max
def solution(info, query):
answer = []
all_cases_from_users = {}
for user_info in info:
user_info_array = user_info.split()
all_cases_from_user = make_all_cases(user_info_array)
for case in all_cases_from_user:
if case not in all_cases_from_users.keys():
all_cases_from_users[case] = [int(user_info_array[4])]
else:
all_cases.from_users[case].append(int(user_info_array[4]))
for key in all_cases_from_users.keys():
all_cases_from_users[key].sort()
for query_info in query:
query_info_array = query_info.split()
case = query_info_array[0] + query_info_array[2] + query_info_array[4] + query_info_array[6]
if case in all_cases_from_users.keys():
target_users = all_cases_from_users[case]
answer.append(len(target_users) - get_lower_bound(int(query_info_array[7]), target_users))
else:
answer.append(0)
return answer
| [
"79818840+heo-mk@users.noreply.github.com"
] | 79818840+heo-mk@users.noreply.github.com |
dcdf70e2c8d6c5252286ad9a31c1bfea728f69af | bbfd187e7cc5f858db5cf349ddfd5969d21f4df2 | /constants.py | 33b2036eb4507a89a60fe45afcf03086c0779cb6 | [
"MIT"
] | permissive | ketrint/ElectromagneticShowersReconstruction | e7718f9c345ddb6b30d57bf31fe9baacd51fa074 | b3b67acc43401f49c55109dfe864aa62965817d3 | refs/heads/master | 2023-01-22T17:10:03.522817 | 2020-12-09T19:20:25 | 2020-12-09T19:20:25 | 160,969,419 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 558 | py | import numpy as np
Z = np.array([
0., 1293., 2586., 3879., 5172., 6465., 7758.,
9051., 10344., 11637., 12930., 14223., 15516., 16809.,
18102., 19395., 20688., 21981., 23274., 24567., 25860.,
27153., 28446., 29739., 31032., 32325., 33618., 34911.,
36204., 37497., 38790., 40083., 41376., 42669., 43962.,
45255., 46548., 47841., 49134., 50427., 51720., 53013.,
54306., 55599., 56892., 58185., 59478., 60771., 62064.,
63357., 64650., 65943., 67236., 68529., 69822., 71115.,
72408., 73701.
])
Z_centered = Z - 73701. / 2.
| [
"belavin@phystech.edu"
] | belavin@phystech.edu |
236ad3f39efbc804fa9714d96c5dab64c9961ea7 | a3ae9a875c03d63465aa8136084a4a08175ff984 | /osbuild/command.py | a19231c0fa0904f210cfb02f9bd267f40f4c2350 | [
"Apache-2.0"
] | permissive | dnarvaez/osbuild | e8700d42602dc191dc6f83d825d580a9447e6f01 | 08031487481ba23597f19cb3e106628e5c9d440d | refs/heads/master | 2021-07-11T12:40:19.638300 | 2014-05-05T23:08:47 | 2014-05-05T23:08:47 | 10,110,945 | 0 | 2 | Apache-2.0 | 2021-03-29T16:39:13 | 2013-05-16T21:29:19 | Python | UTF-8 | Python | false | false | 1,345 | py | # Copyright 2013 Daniel Narvaez
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import subprocess
import time
import plog
from osbuild import config
def run(args, retry=0, watch_log=None):
logging.info("Running command %s" % " ".join(args))
tries = 0
while tries < retry + 1:
tries = tries + 1
process = plog.LoggedProcess(args)
process.execute()
result = process.wait(watch_log=watch_log)
if result != 0:
if tries < retry + 1:
print("Retrying (attempt %d) in 1 minute" % tries)
time.sleep(60)
else:
raise subprocess.CalledProcessError(result, args)
else:
break
def run_with_runner(cmd):
os.environ[config.runner_variable] = cmd
return run(config.runner_bin)
| [
"dwnarvaez@gmail.com"
] | dwnarvaez@gmail.com |
73db1141a062dab86543dba3571ab8faea784fdc | 4a5562bf8a7967c9c5d76265d89c366165bff9f8 | /template_python/setup.py | a4bf90beaf8b2625aaf3603393c5c536b60eec9a | [] | no_license | lokendert/me132_students | 640f935bd6e5c5d65329a161731afad4068a72e0 | 8e1075c4b61bef5c8f4d322cb168e2f942e1fad6 | refs/heads/master | 2020-03-31T03:40:46.680398 | 2011-02-04T20:13:29 | 2011-02-04T20:13:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 520 | py | from setuptools import setup, find_packages
setup(
name='me132_template',
author="The ME132 TAs",
author_email="me132-tas@caltech.edu",
url='www.its.caltech.edu/~me132',
description="A minimal Player client",
version="0.1",
package_dir={'':'src'},
packages=find_packages(),
entry_points={
'console_scripts': [
# List executables in the format '<name> = <module>:<function>'
'my_python_client = me132_template.basic_client:main'
]
}
)
| [
"andrea@cds.caltech.edu"
] | andrea@cds.caltech.edu |
98d409f33d7fb8f90dc1c6996bbb0d7dc681e014 | b359cfb124e4249c2783640d1f5b25ecad7a1687 | /api/resources_portal/migrations/0008_auto_20201210_1930.py | 82b6c945d5ddc892595838713c86b2ef470467f6 | [
"BSD-3-Clause"
] | permissive | AlexsLemonade/resources-portal | 5e1abc67b6be0abeabcb06cc75dc4bdb9839800c | a35feea4eeb70b4b659e108835f203ff428676f4 | refs/heads/dev | 2023-08-16T21:57:44.485303 | 2023-07-07T20:46:35 | 2023-07-07T20:46:35 | 221,481,704 | 0 | 1 | BSD-3-Clause | 2023-08-24T20:23:39 | 2019-11-13T14:45:54 | Python | UTF-8 | Python | false | false | 554 | py | # Generated by Django 2.2.13 on 2020-12-10 19:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("resources_portal", "0007_auto_20201203_2010"),
]
operations = [
migrations.AlterField(
model_name="user",
name="first_name",
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name="user",
name="last_name",
field=models.CharField(max_length=100),
),
]
| [
"davidsmejia@gmail.com"
] | davidsmejia@gmail.com |
527a6d466c641647a0d5b4f0d252b604be53e76a | b5fdba0c75156e3933ff45f4973c88609573d363 | /reader.py | 8a177825768e0a929f50db85ed31db99a875fbf2 | [] | no_license | jotoy/sr | 6c403bf6e15212f2e5bc04beea705dab9699bc50 | 5cb596dd2c2bda7a072281c2d0651c9b1d17ed5e | refs/heads/master | 2020-03-17T10:25:00.517687 | 2018-05-15T12:18:35 | 2018-05-15T12:18:35 | 133,510,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,142 | py | import tensorflow as tf
import utils
class Reader():
def __init__(self, tfrecords_file, image_size=100,
min_queue_examples=1000, batch_size=1, num_threads=8, name=''):
"""
Args:
tfrecords_file: string, tfrecords file path
min_queue_examples: integer, minimum number of samples to retain in the queue that provides of batches of examples
batch_size: integer, number of images per batch
num_threads: integer, number of preprocess threads
"""
self.tfrecords_file = tfrecords_file
self.image_size = image_size
self.min_queue_examples = min_queue_examples
self.batch_size = batch_size
self.num_threads = num_threads
self.reader = tf.TFRecordReader()
self.name = name
def feed(self):
"""
Returns:
images: 4D tensor [batch_size, image_width, image_height, image_depth]
"""
with tf.name_scope(self.name):
filename_queue = tf.train.string_input_producer([self.tfrecords_file])
reader = tf.TFRecordReader()
_, serialized_example = self.reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features={
'image/file_name': tf.FixedLenFeature([], tf.string),
'image/encoded_image': tf.FixedLenFeature([], tf.string),
})
image_buffer = features['image/encoded_image']
image = tf.image.decode_jpeg(image_buffer, channels=3)
image = self._preprocess(image)
images = tf.train.shuffle_batch( # output tensor[ batch_size, x, y, z ]
[image], batch_size=self.batch_size, num_threads=self.num_threads,
capacity=self.min_queue_examples + 3*self.batch_size,
min_after_dequeue=self.min_queue_examples
)
tf.summary.image('_input', images)
return images
def _preprocess(self, image):
image = tf.image.resize_images(image, size=(self.image_size, self.image_size))
image = utils.convert2float(image)
image.set_shape([self.image_size, self.image_size, 3])
return image
def test_reader():
TRAIN_FILE_1 = 'data/tfrecords/apple.tfrecords'
TRAIN_FILE_2 = 'data/tfrecords/orange.tfrecords'
with tf.Graph().as_default():
reader1 = Reader(TRAIN_FILE_1, batch_size=2)
reader2 = Reader(TRAIN_FILE_2, batch_size=2)
images_op1 = reader1.feed()
images_op2 = reader2.feed()
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
step = 0
while not coord.should_stop():
batch_images1, batch_images2 = sess.run([images_op1, images_op2])
print("image shape: {}".format(batch_images1))
print("image shape: {}".format(batch_images2))
print("="*10)
step += 1
except KeyboardInterrupt:
print('Interrupted')
coord.request_stop()
except Exception as e:
coord.request_stop(e)
finally:
# When done, ask the threads to stop.
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
test_reader()
| [
"noreply@github.com"
] | jotoy.noreply@github.com |
2d81ec8412a37d6156e16a3c3376145e05b1831a | de0341aef4a487d1be271fc2bc3b3b60258ef6b0 | /programmers/Level 3/하노이의 탑/solve.py | c759336840a972a1e8bca98c26b09905c4ac4800 | [] | no_license | aver1001/github-practice | 485d8695cd4b9aa374c6b069832b3c0999fc4b6c | 62ab6de80e8246b627b880a7aff5d668b0fea889 | refs/heads/main | 2023-08-24T09:49:35.498578 | 2021-10-13T23:57:18 | 2021-10-13T23:57:18 | 379,813,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | result = []
def hanoi(n,start,end,mid):
global result
if n == 1:
result.append([start,end])
else:
hanoi(n-1, start, mid, end)
result.append([start,end])
print([start,end])
hanoi(n-1, mid, end, start)
return result
def solution(n):
global result
hanoi(n,1,3,2)
print(result)
solution(6)
| [
"69618305+aver1001@users.noreply.github.com"
] | 69618305+aver1001@users.noreply.github.com |
a858c760c091ec14a6001637fc484297a82730af | 1fd5963ac7bcdcedfd6ddaa10bf36a37927d704f | /back-end/app/models.py | 9db6ffd5831e4b90e54c61de29f33b0c84c652e3 | [
"MIT"
] | permissive | whmnoe4j/flask-vuejs-madblog | 75ef248336b71ae648c8d84e6e9957225feebcec | 585d95648a5cc48d26bf94ed0e21f7326a212fa8 | refs/heads/master | 2020-04-05T08:14:44.692921 | 2018-11-06T07:58:48 | 2018-11-06T07:58:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,932 | py | from datetime import datetime, timedelta
from hashlib import md5
import jwt
from werkzeug.security import generate_password_hash, check_password_hash
from flask import url_for, current_app
from app import db
class PaginatedAPIMixin(object):
@staticmethod
def to_collection_dict(query, page, per_page, endpoint, **kwargs):
resources = query.paginate(page, per_page, False)
data = {
'items': [item.to_dict() for item in resources.items],
'_meta': {
'page': page,
'per_page': per_page,
'total_pages': resources.pages,
'total_items': resources.total
},
'_links': {
'self': url_for(endpoint, page=page, per_page=per_page,
**kwargs),
'next': url_for(endpoint, page=page + 1, per_page=per_page,
**kwargs) if resources.has_next else None,
'prev': url_for(endpoint, page=page - 1, per_page=per_page,
**kwargs) if resources.has_prev else None
}
}
return data
class User(PaginatedAPIMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
password_hash = db.Column(db.String(128)) # 不保存原始密码
name = db.Column(db.String(64))
location = db.Column(db.String(64))
about_me = db.Column(db.Text())
member_since = db.Column(db.DateTime(), default=datetime.utcnow)
last_seen = db.Column(db.DateTime(), default=datetime.utcnow)
def __repr__(self):
return '<User {}>'.format(self.username)
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
def avatar(self, size):
'''头像'''
digest = md5(self.email.lower().encode('utf-8')).hexdigest()
return 'https://www.gravatar.com/avatar/{}?d=identicon&s={}'.format(digest, size)
def to_dict(self, include_email=False):
data = {
'id': self.id,
'username': self.username,
'name': self.name,
'location': self.location,
'about_me': self.about_me,
'member_since': self.member_since.isoformat() + 'Z',
'last_seen': self.last_seen.isoformat() + 'Z',
'_links': {
'self': url_for('api.get_user', id=self.id),
'avatar': self.avatar(128)
}
}
if include_email:
data['email'] = self.email
return data
def from_dict(self, data, new_user=False):
for field in ['username', 'email', 'name', 'location', 'about_me']:
if field in data:
setattr(self, field, data[field])
if new_user and 'password' in data:
self.set_password(data['password'])
def ping(self):
self.last_seen = datetime.utcnow()
db.session.add(self)
def get_jwt(self, expires_in=3600):
now = datetime.utcnow()
payload = {
'user_id': self.id,
'name': self.name if self.name else self.username,
'exp': now + timedelta(seconds=expires_in),
'iat': now
}
return jwt.encode(
payload,
current_app.config['SECRET_KEY'],
algorithm='HS256').decode('utf-8')
@staticmethod
def verify_jwt(token):
try:
payload = jwt.decode(
token,
current_app.config['SECRET_KEY'],
algorithms=['HS256'])
except jwt.exceptions.ExpiredSignatureError as e:
return None
return User.query.get(payload.get('user_id'))
| [
"wangy8961@163.com"
] | wangy8961@163.com |
bf3f86be25ab7a8ffe01b3fea5ec5bc1ae6b5c2b | a4a63eedacd544872fbfa33fc58d7cf1558829b7 | /backend/event/api/v1/urls.py | 046246934dbd8d54f00c2d7d0a6bb4781e87498b | [] | no_license | crowdbotics-apps/revil-18107 | 3d9bd52855e33debaa60f4f5c801629fb1aa60da | 2671f3410b43cd8ed2ccc51780a80366fb594684 | refs/heads/master | 2022-10-17T09:34:39.097853 | 2020-06-15T00:05:02 | 2020-06-15T00:05:02 | 272,301,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 854 | py | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .viewsets import (
VendorViewSet,
LocationViewSet,
FavoritesViewSet,
VendorDetailViewSet,
CategoryViewSet,
FaqViewSet,
PresenterViewSet,
ScheduleViewSet,
MyScheduleViewSet,
SponsorViewSet,
)
router = DefaultRouter()
router.register("faq", FaqViewSet)
router.register("vendordetail", VendorDetailViewSet)
router.register("location", LocationViewSet)
router.register("presenter", PresenterViewSet)
router.register("myschedule", MyScheduleViewSet)
router.register("schedule", ScheduleViewSet)
router.register("category", CategoryViewSet)
router.register("favorites", FavoritesViewSet)
router.register("vendor", VendorViewSet)
router.register("sponsor", SponsorViewSet)
urlpatterns = [
path("", include(router.urls)),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
2c897e1776cf7ff4cedc748abaf296faad2846c5 | 10a703dc7ca459b3c440bc0fcf1eccc222801cc1 | /3. teema/test_Auto.py | cdb011603f1b2240c54f4f51555761c229827852 | [] | no_license | alvarkoiv/oop_alused | bc435f08067bd32f02ccc848796d2c21e6894f28 | 6c215b4888cfe9c178db812799d570cc4189d7cf | refs/heads/master | 2023-03-03T02:46:42.249438 | 2021-02-16T09:24:57 | 2021-02-16T09:24:57 | 337,351,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | from Auto import Auto
alvari_uus_auto = Auto("Audi", "A6", 2017)
minu_uus_auto = Auto("Tesla", "Mudel 3", 2020)
print(alvari_uus_auto.kirjeldus())
alvari_uus_auto.odomeeter()
alvari_uus_auto.odomeeteri_nait = 3
alvari_uus_auto.uuenda_odomeeter(-2)
alvari_uus_auto.suurenda_odomeeter(30)
alvari_uus_auto.odomeeter()
print(minu_uus_auto.kirjeldus())
minu_uus_auto.odomeeter()
minu_uus_auto.odomeetri_nait = 5 | [
"alvar.koiv@khk.ee"
] | alvar.koiv@khk.ee |
540399eeb786c98974f1aef8c8edd58b283fbeac | 4c329e94e1e47cc8c5abae946b8eb8e58fb8041e | /problem_1.py | b9be39a1715f25d3e9544a1f33bc183a68239b80 | [] | no_license | AkiraKane/Udacity-Data-Structure-Algorithms-Project2 | 2cb979b57cd75062806aa0426924713bcec236c2 | 27c591c5bd10b10f49e75a7e13e11f4799e4e743 | refs/heads/master | 2022-06-08T05:31:16.395528 | 2020-05-03T07:54:03 | 2020-05-03T07:54:03 | 260,853,287 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,642 | py | from collections import OrderedDict
class LRU_Cache(object):
def __init__(self, capacity):
# Initialize class variables
self.capacity = capacity
self.cache = OrderedDict()
def get(self, key):
# Retrieve item from provided key. Return -1 if nonexistent.
if key not in self.cache:
return -1
# if we found the key, then move the key to the end to show that it is recently used.
self.cache.move_to_end(key)
return self.cache[key]
def set(self, key, value):
# Set the value if the key is not present in the cache. If the cache is at capacity remove the oldest item.
if self.capacity == 0:
print("Error: Capacity cannot be 0")
return
# remove the least recently used item if ordereddict is at capacity
if len(self.cache) == self.capacity:
self.cache.popitem(last=False)
# add/update the key, and move the key to the end to show that it is recently used
self.cache[key] = value
self.cache.move_to_end(key)
our_cache = LRU_Cache(5)
############################## Test1 - at capacity ################################################################
# set values and get values within the capacity
our_cache.set(1, 1);
our_cache.set(2, 2);
our_cache.set(3, 3);
our_cache.set(4, 4);
print(our_cache.get(1)) # returns 1
print(our_cache.get(2)) # returns 2
print(our_cache.get(9)) # returns -1 because 9 is not present in the cache
our_cache.set(5, 5) # move 5 to the end
our_cache.set(6, 6) # exceed the capacity, remove the left-most key, which is 3. then move 6 to the end
print(our_cache.get(3)) # returns -1 because the cache reached it's capacity and 3 was the least recently used entry
print(our_cache.get(6)) # return 6 since it was recently used
################################ Test2 - update key ####################################################################
# update the existing key
our_cache.set(4,10)
print(our_cache.get(4)) # return 10
# update the non-existing key
our_cache.set(3,20)
print(our_cache.get(3)) # return 20
################################## Test 3 - no-keys & no-capacity ########################################################
# initialize a cache without setting any keys/values
our_cache_no_keys = LRU_Cache(3)
print(our_cache_no_keys.get(1)) # return -1 since no key found in the cache
our_cache_no_capacity = LRU_Cache(0)
our_cache_no_capacity.set(1, 1) # print out the error message
print(our_cache_no_capacity.get(1)) # return -1
| [
"AkiraKaneshiro@Lis-MacBook-Air.local"
] | AkiraKaneshiro@Lis-MacBook-Air.local |
db31e57b47ab17b38c5bf4a29af523b5560ac3f4 | 07700c2737581fdce38de2e0176819b78524d505 | /pysenec/cli.py | 73ba4d5573c4216168a550cb8a5c0e2157bc0465 | [
"Apache-2.0"
] | permissive | SilverXanga/pysenec | 298abdb60ec8af3c372fd287e2452d99679b6000 | 4522e3465baa3c7f41e98145da879747c7d6d5f6 | refs/heads/master | 2022-07-01T19:31:09.135041 | 2020-05-10T20:04:35 | 2020-05-10T20:08:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,277 | py | import argparse
import asyncio
from pprint import pprint
import aiohttp
import pysenec
async def run(host, verbose=False):
async with aiohttp.ClientSession() as session:
senec = pysenec.Senec(host, session)
await senec.update()
print(f"System state: {senec.system_state}")
print(f"House energy use: {senec.house_power / 1000 :.3f} kW")
print(f"Solar Panel generate: {senec.solar_generated_power / 1000 :.3f} kW")
print(
f"Battery: {senec.battery_charge_percent :.1f} % charge: {senec.battery_charge_power / 1000 :.3f} kW, discharge {senec.battery_discharge_power / 1000 :.3f} kW"
)
print(
f"Grid: exported {senec.grid_exported_power / 1000 :.3f} kW, imported {senec.grid_imported_power / 1000 :.3f} kW"
)
if verbose:
pprint(senec.raw_status)
def main():
parser = argparse.ArgumentParser(description="Senec Home Battery Sensor")
parser.add_argument("--host", help="Local Senec host (or IP)")
parser.add_argument("--all", help="Prints extended info", action="store_true")
args = parser.parse_args()
loop = asyncio.get_event_loop()
loop.run_until_complete(run(args.host, verbose=args.all))
if __name__ == "__main__":
main()
| [
"m.chwalisz@gmail.com"
] | m.chwalisz@gmail.com |
5896418942efd005a46d1c7d2e74df68364411c9 | 9ede3bec6dc9cd58f91ba3ee2b3f4b7eb3b6c889 | /lintreview/repo.py | aa745a9b199595da98ab54ef33439fa29c5edb40 | [
"MIT"
] | permissive | LyleH/lint_review_1 | d0816e68ee74c507357471b1183348b2c8d59af2 | a36945446745a9e8d8c1f1955e084add6563647b | refs/heads/master | 2021-01-19T11:43:42.780988 | 2016-09-22T05:28:23 | 2016-09-22T05:28:23 | 68,887,536 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,101 | py | import lintreview.github as github
import lintreview.git as git
import logging
log = logging.getLogger(__name__)
class GithubRepository(object):
"""Abstracting wrapper for the
various interactions we have with github.
This will make swapping in other hosting systems
a tiny bit easier in the future.
"""
def __init__(self, config, user, repo_name):
self.config = config
self.user = user
self.repo_name = repo_name
def repository(self):
"""Get the underlying repository model
"""
self.repo = github.get_repository(
self.config,
self.user,
self.repo_name)
return self.repo
def pull_request(self, number):
"""Get a pull request by number.
"""
pull = self.repository().pull_request(number)
return GithubPullRequest(pull)
def ensure_label(self, label):
"""Create label if it doesn't exist yet
"""
repo = self.repository()
if not repo.label(label):
repo.create_label(
name=label,
color="bfe5bf", # a nice light green
)
def create_status(self, sha, state, description):
"""Create a commit status
"""
context = self.config.get('APP_NAME', 'lintreview')
repo = self.repository()
repo.create_status(
sha,
state,
None,
description,
context)
class GithubPullRequest(object):
"""Abstract the underlying github models.
This makes other code simpler, and enables
the ability to add other hosting services later.
"""
def __init__(self, pull_request):
self.pull = pull_request
@property
def number(self):
return self.pull.number
@property
def is_private(self):
data = self.pull.as_dict()
return data['head']['repo']['private']
@property
def head(self):
data = self.pull.as_dict()
return data['head']['sha']
@property
def clone_url(self):
data = self.pull.as_dict()
return data['head']['repo']['clone_url']
@property
def target_branch(self):
data = self.pull.as_dict()
return data['base']['ref']
def commits(self):
return self.pull.commits()
def review_comments(self):
return self.pull.review_comments()
def files(self):
return list(self.pull.files())
def remove_label(self, label_name):
issue = self.pull.issue()
labels = issue.labels()
if not any(label_name == label.name for label in labels):
return
log.debug("Removing issue label '%s'", label_name)
issue.remove_label(label_name)
def add_label(self, label_name):
issue = self.pull.issue()
issue.add_labels(label_name)
def create_comment(self, body):
self.pull.create_comment(body)
def create_review_comment(self, body, commit_id, path, position):
self.pull.create_review_comment(body, commit_id, path, position)
| [
"lyle.henkeman01@gmail.com"
] | lyle.henkeman01@gmail.com |
c7aba38ca201ad0b7f4f372f442c65fca270ea6f | 64ba65f065e0d09fa840fdf7a1e6f6d072e73103 | /aws lambda deployment package/fuzzywuzzy/process.py | a0236920e3f24e10647916462c5a323f1244f0d9 | [] | no_license | josephdouce/alexa-find-any-film | b3cf4db8333cc6c5425fc9e0f69ae861ab08c649 | e060d974969139ad585de27473e63ec3d82bf833 | refs/heads/master | 2021-01-13T10:04:18.174514 | 2016-11-13T09:42:29 | 2016-11-13T09:42:29 | 72,143,333 | 2 | 2 | null | 2016-11-10T18:37:33 | 2016-10-27T19:57:15 | Python | UTF-8 | Python | false | false | 11,161 | py | #!/usr/bin/env python
# encoding: utf-8
"""
process.py
Copyright (c) 2011 Adam Cohen
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from . import fuzz
from . import utils
import heapq
def extractWithoutOrder(query, choices, processor=None, scorer=None, score_cutoff=0):
"""Select the best match in a list or dictionary of choices.
Find best matches in a list or dictionary of choices, return a
generator of tuples containing the match and it's score. If a dictionary
is used, also returns the key for each match.
Arguments:
query: An object representing the thing we want to find.
choices: An iterable or dictionary-like object containing choices
to be matched against the query. Dictionary arguments of
{key: value} pairs will attempt to match the query against
each value.
processor: Optional function of the form f(a) -> b, where a is an
individual choice and b is the choice to be used in matching.
This can be used to match against, say, the first element of
a list:
lambda x: x[0]
Defaults to fuzzywuzzy.utils.full_process().
scorer: Optional function for scoring matches between the query and
an individual processed choice. This should be a function
of the form f(query, choice) -> int.
By default, fuzz.WRatio() is used and expects both query and
choice to be strings.
score_cutoff: Optional argument for score threshold. No matches with
a score less than this number will be returned. Defaults to 0.
Returns:
Generator of tuples containing the match and its score.
If a list is used for choices, then the result will be 2-tuples.
If a dictionary is used, then the result will be 3-tuples containing
he key for each match.
For example, searching for 'bird' in the dictionary
{'bard': 'train', 'dog': 'man'}
may return
('train', 22, 'bard'), ('man', 0, 'dog')
"""
def no_process(x):
return x
if choices is None:
raise StopIteration
# Catch generators without lengths
try:
if len(choices) == 0:
raise StopIteration
except TypeError:
pass
# default: wratio
if not scorer:
scorer = fuzz.WRatio
# fuzz.WRatio already process string so no need extra step
if not processor:
processor = no_process
# default, turn whatever the choice is into a workable string
if not processor:
processor = utils.full_process
try:
# See if choices is a dictionary-like object.
for key, choice in choices.items():
processed = processor(choice)
score = scorer(query, processed)
if score >= score_cutoff:
yield (choice, score, key)
except AttributeError:
# It's a list; just iterate over it.
for choice in choices:
processed = processor(choice)
score = scorer(query, processed)
if score >= score_cutoff:
yield (choice, score)
def extract(query, choices, processor=None, scorer=None, limit=5):
"""Select the best match in a list or dictionary of choices.
Find best matches in a list or dictionary of choices, return a
list of tuples containing the match and it's score. If a dictionary
is used, also returns the key for each match.
Arguments:
query: An object representing the thing we want to find.
choices: An iterable or dictionary-like object containing choices
to be matched against the query. Dictionary arguments of
{key: value} pairs will attempt to match the query against
each value.
processor: Optional function of the form f(a) -> b, where a is an
individual choice and b is the choice to be used in matching.
This can be used to match against, say, the first element of
a list:
lambda x: x[0]
Defaults to fuzzywuzzy.utils.full_process().
scorer: Optional function for scoring matches between the query and
an individual processed choice. This should be a function
of the form f(query, choice) -> int.
By default, fuzz.WRatio() is used and expects both query and
choice to be strings.
limit: Optional maximum for the number of elements returned. Defaults
to 5.
Returns:
List of tuples containing the match and its score.
If a list is used for choices, then the result will be 2-tuples.
If a dictionary is used, then the result will be 3-tuples containing
he key for each match.
For example, searching for 'bird' in the dictionary
{'bard': 'train', 'dog': 'man'}
may return
[('train', 22, 'bard'), ('man', 0, 'dog')]
"""
sl = extractWithoutOrder(query, choices, processor, scorer)
return heapq.nlargest(limit, sl, key=lambda i: i[1]) if limit is not None else \
sorted(sl, key=lambda i: i[1], reverse=True)
def extractBests(query, choices, processor=None, scorer=None, score_cutoff=0, limit=5):
"""Get a list of the best matches to a collection of choices.
Convenience function for getting the choices with best scores.
Args:
query: A string to match against
choices: A list or dictionary of choices, suitable for use with
extract().
processor: Optional function for transforming choices before matching.
See extract().
scorer: Scoring function for extract().
score_cutoff: Optional argument for score threshold. No matches with
a score less than this number will be returned. Defaults to 0.
limit: Optional maximum for the number of elements returned. Defaults
to 5.
Returns: A a list of (match, score) tuples.
"""
best_list = extractWithoutOrder(query, choices, processor, scorer, score_cutoff)
return heapq.nlargest(limit, best_list, key=lambda i: i[1]) if limit is not None else \
sorted(best_list, key=lambda i: i[1], reverse=True)
def extractOne(query, choices, processor=None, scorer=None, score_cutoff=0):
"""Find the single best match above a score in a list of choices.
This is a convenience method which returns the single best choice.
See extract() for the full arguments list.
Args:
query: A string to match against
choices: A list or dictionary of choices, suitable for use with
extract().
processor: Optional function for transforming choices before matching.
See extract().
scorer: Scoring function for extract().
score_cutoff: Optional argument for score threshold. If the best
match is found, but it is not greater than this number, then
return None anyway ("not a good enough match"). Defaults to 0.
Returns:
A tuple containing a single match and its score, if a match
was found that was above score_cutoff. Otherwise, returns None.
"""
best_list = extractWithoutOrder(query, choices, processor, scorer, score_cutoff)
try:
return max(best_list, key=lambda i: i[1])
except ValueError:
return None
def dedupe(contains_dupes, threshold=70, scorer=fuzz.token_set_ratio):
"""This convenience function takes a list of strings containing duplicates and uses fuzzy matching to identify
and remove duplicates. Specifically, it uses the process.extract to identify duplicates that
score greater than a user defined threshold. Then, it looks for the longest item in the duplicate list
since we assume this item contains the most entity information and returns that. It breaks string
length ties on an alphabetical sort.
Note: as the threshold DECREASES the number of duplicates that are found INCREASES. This means that the
returned deduplicated list will likely be shorter. Raise the threshold for fuzzy_dedupe to be less
sensitive.
Args:
contains_dupes: A list of strings that we would like to dedupe.
threshold: the numerical value (0,100) point at which we expect to find duplicates.
Defaults to 70 out of 100
scorer: Optional function for scoring matches between the query and
an individual processed choice. This should be a function
of the form f(query, choice) -> int.
By default, fuzz.token_set_ratio() is used and expects both query and
choice to be strings.
Returns:
A deduplicated list. For example:
In: contains_dupes = ['Frodo Baggin', 'Frodo Baggins', 'F. Baggins', 'Samwise G.', 'Gandalf', 'Bilbo Baggins']
In: fuzzy_dedupe(contains_dupes)
Out: ['Frodo Baggins', 'Samwise G.', 'Bilbo Baggins', 'Gandalf']
"""
extractor = []
# iterate over items in *contains_dupes*
for item in contains_dupes:
# return all duplicate matches found
matches = extract(item, contains_dupes, limit=None, scorer=scorer)
# filter matches based on the threshold
filtered = [x for x in matches if x[1] > threshold]
# if there is only 1 item in *filtered*, no duplicates were found so append to *extracted*
if len(filtered) == 1:
extractor.append(filtered[0][0])
else:
# alpha sort
filtered = sorted(filtered, key=lambda x: x[0])
# length sort
filter_sort = sorted(filtered, key=lambda x: len(x[0]), reverse=True)
# take first item as our 'canonical example'
extractor.append(filter_sort[0][0])
# uniquify *extractor* list
keys = {}
for e in extractor:
keys[e] = 1
extractor = keys.keys()
# check that extractor differs from contain_dupes (e.g. duplicates were found)
# if not, then return the original list
if len(extractor) == len(contains_dupes):
return contains_dupes
else:
return extractor
| [
"josephdouce@gmail.com"
] | josephdouce@gmail.com |
66439779c4dabc84c959cddc7b42f38bce45fd4d | cd187d99cd5e83ca5b8409085d765e74da643496 | /system/migrations/0011_auto_20200225_1256.py | 78ab716c80fd9f9125ec195240a165692843e38d | [] | no_license | 1701210370/pys | 541c5a910d2b06de2b54ad158e8fcf5309298d01 | b75dc6f00fed4b9c5aa6f8d966347f52a15e5cd6 | refs/heads/master | 2022-04-28T23:44:59.953714 | 2020-04-27T00:38:35 | 2020-04-27T00:38:35 | 258,520,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 582 | py | # Generated by Django 2.1.4 on 2020-02-25 04:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('system', '0010_auto_20200224_2105'),
]
operations = [
migrations.AlterField(
model_name='post',
name='topic',
field=models.CharField(default='1', max_length=20),
),
migrations.AlterField(
model_name='reply',
name='topic',
field=models.CharField(default='1', max_length=20),
),
]
| [
"2428211025@qq.com"
] | 2428211025@qq.com |
5dfb76ca2541d9d9e26795b1062fe2c8377f0531 | 270c1473ae711d206933808c2a6413f51044ec0d | /justitie/just/items.py | 78ea0d4f793abdd30481a928581dbd4aeae8103f | [] | no_license | dira/czl-scrape | 664a6869670f4fd06da614d39a1a459f1333528e | cb898844497402cc0995159b5f4f5a1f010cefd6 | refs/heads/master | 2020-05-22T03:03:22.373033 | 2017-03-11T11:18:37 | 2017-03-11T11:18:37 | 84,665,560 | 0 | 0 | null | 2017-03-11T16:54:36 | 2017-03-11T16:54:36 | null | UTF-8 | Python | false | false | 283 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class JustItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| [
"s.alexandra.stefanescu@gmail.com"
] | s.alexandra.stefanescu@gmail.com |
2ad8fb86568b9db89c98af5b07780a905127da55 | 0675dad295526480242c9da48310a1c958423e72 | /dmrg_j2j2/build_lattice.py | 0d410148abf8f05a76145d0c57c8cbc956ac5397 | [] | no_license | GiggleLiu/numeric_master | 627e054ab7404b1bbf8b8eec65f05346b35640a3 | 47b9eaf1eeaceacf5ff43f2226620e5c37064095 | refs/heads/master | 2021-08-24T11:31:37.107583 | 2017-11-21T06:26:38 | 2017-11-21T06:26:38 | 111,409,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,766 | py | #!/usr/bin/env python
test_str = '''
<LATTICES>
<GRAPH name = "heisenberg" dimension="1" vertices="6" edges="5">
<VERTEX id="1" type="0"><COORDINATE>0</COORDINATE></VERTEX>
<VERTEX id="2" type="1"><COORDINATE>2</COORDINATE></VERTEX>
<VERTEX id="3" type="1"><COORDINATE>3</COORDINATE></VERTEX>
<VERTEX id="4" type="1"><COORDINATE>4</COORDINATE></VERTEX>
<VERTEX id="5" type="1"><COORDINATE>5</COORDINATE></VERTEX>
<VERTEX id="6" type="0"><COORDINATE>6</COORDINATE></VERTEX>
<EDGE source="1" target="2" id="1" type="0" vector="1"/>
<EDGE source="2" target="3" id="2" type="0" vector="1"/>
<EDGE source="3" target="4" id="3" type="0" vector="1"/>
<EDGE source="4" target="5" id="4" type="0" vector="1"/>
<EDGE source="5" target="6" id="5" type="0" vector="1"/>
</GRAPH>
</LATTICES>
'''
import lxml.etree as ET
def build_j1j2(size, filename):
lattice = ET.Element('LATTICES')
graph = ET.SubElement(lattice, 'GRAPH', attrib={'name':'J1J2',
'dimension':'1', 'vertices':'%d'%size, 'edges':'%d'%(size-1)})
for i in range(size):
vi = ET.SubElement(graph, 'VERTEX', attrib={'id':'%d'%(i+1),
'type':'0'})
co = ET.SubElement(vi, 'COORDINATE')
co.text = '%d'%i
for i in range(1,size+1):
ET.SubElement(graph, 'EDGE', attrib={'source':'%d'%(i),'target':'%d'%((i)%size+1),
'id':'%d'%i, 'type':'0', 'vector':'1'})
ET.SubElement(graph, 'EDGE', attrib={'source':'%d'%(i),'target':'%d'%((i+1)%size+1),
'id':'%d'%i, 'type':'1', 'vector':'1'})
with open(filename, 'w') as f:
f.write(ET.tostring(lattice, pretty_print=True))
if __name__ == '__main__':
import sys
nsite = int(sys.argv[1])
build_j1j2(nsite, 'lattices/j1j2_%d.xml'%nsite)
| [
"cacate0129@gmail.com"
] | cacate0129@gmail.com |
2818d25958d54d76630a75fd90a98d9c9c089d7c | 02c30e713ceb5274b9aaac3c3907450a9995a423 | /impressao/service.py | 6aaaae074c1a08d8739655f380c083352b50d306 | [] | no_license | kanatius/Impressoes_DSC | 89d78ad1edebb5af615ec5ffc32b96f68f769fa7 | 76c5061c68e30c689d152fb5efe33741bcf595c5 | refs/heads/master | 2023-04-07T06:44:11.565424 | 2021-04-13T16:32:42 | 2021-04-13T16:32:42 | 331,646,139 | 1 | 0 | null | 2021-01-30T17:43:53 | 2021-01-21T14:03:44 | HTML | UTF-8 | Python | false | false | 11,373 | py | import os
from impressao.repository import ImpressaoRepository, TipoImpressaoRepository, TurmaRepository
from usuario.repository import UsuarioRepository
from impressao.forms import ImpressaoForm
from ProjetoDSC.settings import MEDIA_ROOT
from datetime import datetime
from django.core.files.storage import default_storage
from django.core.files.base import ContentFile
from django.conf import settings
from django.http import HttpResponse, Http404
from django.db.models import Q
from django.core.mail import send_mail
from ProjetoDSC.settings import EMAIL_HOST_USER
def isCliente(request):
if not request.user.is_anonymous and request.user is not None:
if request.user.cliente:
return True
return False
def isFuncionario(request):
if not request.user.is_anonymous and request.user is not None:
if request.user.funcionario or request.user.funcionario_aluno:
return True
return False
class ImpressaoService():
def __init__(self):
super().__init__()
self.impressaoRepository = ImpressaoRepository()
self.tipoRepository = TipoImpressaoRepository()
self.turmaRepository = TurmaRepository()
self.usuarioRepository = UsuarioRepository()
def getImpressoes(self, request, offset=0, limit=0, desc=False):
if not request.user.is_authenticated:
return None
if isCliente(request):
return self.impressaoRepository.list(cliente_id=request.user.id, offset=offset, limit=limit, desc=desc)
if isFuncionario(request) or request.user.funcionario_aluno:
impressoes = []
if request.user.funcionario_aluno:
#se o funcionario for aluno, retira as provas e testes
prova = self.tipoRepository.getByNameEquals("Prova")
teste = self.tipoRepository.getByNameEquals("Teste")
impressoes = self.impressaoRepository.list(imprimida=False, desc=desc, no_tipo=[prova, teste])
else:
impressoes = self.impressaoRepository.list(imprimida=False, desc=desc)
for impressao in impressoes:
impressao.visualizado_em = datetime.now() #set visualizado_em nas impressões que foram selecionadas
impressao.save()
return impressoes
def getById(self, request, id):
if not request.user.is_authenticated:
return None
impressao = self.impressaoRepository.getById(id=id)
if impressao is None:
return None
if impressao.cliente.id == request.user.id or request.user.funcionario:
#se a impressao for do usuario ou se o usuario for cliente
if request.user.funcionario_aluno and (impressao.tipo.nome.lower() == "prova" or impressao.tipo.nome.lower == "teste"):
return None #se o usuario for aluno e impressao for prova ou teste, retorna None
#retorna impressao se:
#1 - usuario logado for funcionario
#2 - se o funcionario for aluno e a impressao não for prova ou teste
#3 - se o usuario for cliente e a impressao for dele
return impressao
return None
def create(self, request):
if isCliente(request):
form = ImpressaoForm(request.POST, files=request.FILES)
if form.is_valid():
print("Formulário válido")
impressao = form.save(commit=False)
impressao.cliente = request.user
impressao.save()
return True
return False
def update(self, request, id=None):
if id is None:
return None
if not request.user.is_authenticated:
return None
impressao = self.impressaoRepository.getById(id=id)
if impressao is None:
return None
request_from = ""
try: #for api
data = request.data
request_from = "api"
except: #for form
data = request.POST
request_from = "website"
if request.user.cliente and impressao.imprimida != True: #cliente só pode editar se a impressão ainda não foi imprimida
if "colorida" in data:
colorida = True if (data["colorida"] == 'on' or data["colorida"] == 'true') else False
impressao.colorida = colorida
elif request_from == "website":
#se não vier no form e vier do website
#se a requisição vier do website, o campo "colorida" não vem com o form caso não estiver marcado
#se não vier com o form é pq é False
impressao.colorida = False
if 'comentario' in data:
impressao.comentario = data["comentario"]
if 'turma' in data:
turma = self.turmaRepository.getById(data["turma"])
impressao.turma = turma
if request.FILES.get("uri_arquivo"):
file = request.FILES.get("uri_arquivo")
default_storage.delete(str(impressao.uri_arquivo)) #delete old file
path = default_storage.save(file.name, ContentFile(file.read())) #save file in default dir
impressao.uri_arquivo = path #set file
if 'qtd_copias' in data:
impressao.qtd_copias = data["qtd_copias"]
if 'tipo' in data:
impressao.tipo = TipoImpressaoRepository().getById(id=int(data["tipo"]))
# UPDATE FILE
impressao.save()
return True
#Não está sendo usado
# if request.user.funcionario:
# #campos que o funcionario pode editar
# # if "vizualizao_em" in data:
# # impressao.vizualizao_em = data["vizualizao_em"]
# if "imprimida" in data:
# impressao.imprimida : data["imprimida"]
# if "prazo_entrega" in data:
# impressao.prazo_entrega : data["prazo_entrega"]
# impressao.save()
# return True
return False
def setImprimida(self, request):
if not request.user.is_authenticated:
return False
if not isFuncionario(request) and not request.user.funcionario_aluno:
return False
impressao = self.impressaoRepository.getById(request.POST.get("id_impressao"))
if impressao is None:
return False
prova = self.tipoRepository.getByNameEquals("Prova")
teste = self.tipoRepository.getByNameEquals("Teste")
if request.user.funcionario_aluno and (impressao.tipo == prova or impressao.tipo == teste):
return False #retorna false se a impressão for prova ou teste e o funcionario for aluno
impressao.imprimida = True
impressao.set_imprimida_em = datetime.now()
impressao.qtd_laudas_imprimidas = int(request.POST.get("qtd_laudas_imprimidas"))
impressao.save()
# send_mail(
# 'Sua Impressão está pronta!',
# 'Olá ' + impressao.cliente.getFullName() + ", sua impressão : cod-" + str(impressao.id) + " " + impressao.uri_arquivo.name + " está pronta!",
# EMAIL_HOST_USER,
# [impressao.cliente.email],
# fail_silently=False,
# )
return True
def delete(self, request, id):
if isCliente(request):
impressao = self.impressaoRepository.getById(id=id)
if impressao is not None:
if impressao.cliente_id == request.user.id and not impressao.imprimida:
#se a impressao for do cliente e ainda não foi imprimida
default_storage.delete(str(impressao.uri_arquivo)) #delete old file
impressao.delete()
return True
return False
#DOWNLOAD FILES
def download(self, request, path):
if not request.user.is_authenticated:
raise Http404 #retorna erro se o usuário não estiver autenticado
impressao = self.impressaoRepository.getByPath(path)
#----- NÃO MUDE SE NÃO SOUBER O QUE ESTÁ FAZENDO -----#
if impressao is None:
raise Http404 #retorna erro se a impressao não existe
if not isCliente(request) and not isFuncionario(request) and not request.user.funcionario_aluno:
raise Http404 #retorna erro se não for funcionário, cliente ou funcionario aluno
if (isCliente(request) and impressao.cliente.id != request.user.id) and not isFuncionario(request):
raise Http404 #retorna erro se o cliente não for dono da impressao e tbm não é funcionario
prova = self.tipoRepository.getByNameEquals("Prova")
teste = self.tipoRepository.getByNameEquals("Teste")
if request.user.funcionario_aluno and (impressao.tipo == prova or impressao.tipo == teste):
raise Http404 #retorna erro se o funcionario aluno tentar baixar uma prova ou teste
#----- NÃO MUDE SE NÃO SOUBER O QUE ESTÁ FAZENDO -----#
file_path = os.path.join(settings.MEDIA_ROOT, path)
if os.path.exists(file_path):
with open(file_path, 'rb') as fh:
response = HttpResponse(fh.read(), content_type="application/default")
response['Content-Disposition'] = 'inline; filename=' + os.path.basename(file_path)
return response
raise Http404
def getReportData(self, request, cliente_id, data_inicio, data_fim, turma_id=None):
if not isFuncionario(request):
return None
cliente_nome = self.usuarioRepository.getById(cliente_id).getFullName()
impressoes = self.impressaoRepository.list(
imprimida=True,
cliente_id=cliente_id,
order_by="set_imprimida_em",
imprimida_min_date= data_inicio,
imprimida_max_date= data_fim,
turma_id= turma_id
)
turma_nome = ""
if turma_id is not None:
turma_nome = self.turmaRepository.getById(turma_id)
else:
turma_nome = "Todas"
total_laudas = 0
for impressao in impressoes:
total_laudas += impressao.qtd_laudas_imprimidas
return {
"cliente_nome" : cliente_nome,
"impressoes" : impressoes,
"total_laudas" : total_laudas,
"turma_nome" : turma_nome
}
class TipoImpressaoService:
def __init__(self):
super().__init__()
self.tipoImpressaoRepository = TipoImpressaoRepository()
def getAllTipos(self, request):
return self.tipoImpressaoRepository.getAll()
class TurmaService:
def __init__(self):
super().__init__()
self.turmaRepository = TurmaRepository()
def getById(self, request, id):
return self.turmaRepository.getById(id)
def getAllTurmas(self, request):
return self.turmaRepository.getAll()
| [
"natanalmeidadelima@gmail.com"
] | natanalmeidadelima@gmail.com |
4dba83fc8918b25d6c255e3cf16e95d4f5230aee | 5d2837138a1191853f99cecb0c47057aa4eb37ea | /backup-version 1/bin/substrates.py | 7386cc2966313a30672e63eafad5e58c08f05082 | [
"BSD-3-Clause"
] | permissive | yafeiwang89/liver_medium | 23ac58cc5409ad03405439193a0948d79aa4e028 | 06fe994a828e6149819bd4e2e0aa7dc161c6a0ad | refs/heads/master | 2021-07-16T11:19:12.670966 | 2020-06-29T23:07:51 | 2020-06-29T23:07:51 | 181,332,552 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 16,679 | py | # substrates Tab
import os, math
from ipywidgets import Layout, Label, Text, Checkbox, Button, BoundedIntText, HBox, VBox, Box, \
FloatText, Dropdown, interactive
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
import matplotlib.colors as mplc
import scipy.io
import xml.etree.ElementTree as ET # https://docs.python.org/2/library/xml.etree.elementtree.html
import glob
import platform
import zipfile
from debug import debug_view
hublib_flag = True
if platform.system() != 'Windows':
try:
# print("Trying to import hublib.ui")
from hublib.ui import Download
except:
hublib_flag = False
else:
hublib_flag = False
class SubstrateTab(object):
def __init__(self):
self.output_dir = '.'
# self.output_dir = 'tmpdir'
# self.fig = plt.figure(figsize=(7.2,6)) # this strange figsize results in a ~square contour plot
# initial value
self.field_index = 4
# self.field_index = self.mcds_field.value + 4
# define dummy size of mesh (set in the tool's primary module)
self.numx = 0
self.numy = 0
tab_height = '500px'
tab_height = '700px'
constWidth = '180px'
constWidth2 = '150px'
tab_layout = Layout(width='900px', # border='2px solid black',
height=tab_height, ) #overflow_y='scroll')
max_frames = 1
self.mcds_plot = interactive(self.plot_substrate, frame=(0, max_frames), continuous_update=False)
svg_plot_size = '900px'
svg_plot_size = '600px'
svg_plot_size = '500px'
self.mcds_plot.layout.width = svg_plot_size
self.mcds_plot.layout.height = svg_plot_size
self.max_frames = BoundedIntText(
min=0, max=99999, value=max_frames,
description='Max',
layout=Layout(width='160px'),
)
self.max_frames.observe(self.update_max_frames)
self.field_min_max = {'dummy': [0., 1.]}
# hacky I know, but make a dict that's got (key,value) reversed from the dict in the Dropdown below
self.field_dict = {0:'dummy'}
self.mcds_field = Dropdown(
options={'dummy': 0},
value=0,
# description='Field',
layout=Layout(width=constWidth)
)
# print("substrate __init__: self.mcds_field.value=",self.mcds_field.value)
# self.mcds_field.observe(self.mcds_field_cb)
self.mcds_field.observe(self.mcds_field_changed_cb)
# self.field_cmap = Text(
# value='viridis',
# description='Colormap',
# disabled=True,
# layout=Layout(width=constWidth),
# )
self.field_cmap = Dropdown(
options=['viridis', 'jet', 'YlOrRd'],
value='viridis',
# description='Field',
layout=Layout(width=constWidth)
)
#self.field_cmap.observe(self.plot_substrate)
# self.field_cmap.observe(self.plot_substrate)
self.field_cmap.observe(self.mcds_field_cb)
self.cmap_fixed = Checkbox(
description='Fix',
disabled=False,
# layout=Layout(width=constWidth2),
)
self.save_min_max= Button(
description='Save', #style={'description_width': 'initial'},
button_style='success', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Save min/max for this substrate',
disabled=True,
layout=Layout(width='90px')
)
def save_min_max_cb(b):
# field_name = self.mcds_field.options[]
# field_name = next(key for key, value in self.mcds_field.options.items() if value == self.mcds_field.value)
field_name = self.field_dict[self.mcds_field.value]
# print(field_name)
# self.field_min_max = {'oxygen': [0., 30.], 'glucose': [0., 1.], 'H+ ions': [0., 1.], 'ECM': [0., 1.], 'NP1': [0., 1.], 'NP2': [0., 1.]}
self.field_min_max[field_name][0] = self.cmap_min.value
self.field_min_max[field_name][1] = self.cmap_max.value
# print(self.field_min_max)
self.save_min_max.on_click(save_min_max_cb)
self.cmap_min = FloatText(
description='Min',
value=0,
step = 0.1,
disabled=True,
layout=Layout(width=constWidth2),
)
self.cmap_min.observe(self.mcds_field_cb)
self.cmap_max = FloatText(
description='Max',
value=38,
step = 0.1,
disabled=True,
layout=Layout(width=constWidth2),
)
self.cmap_max.observe(self.mcds_field_cb)
def cmap_fixed_cb(b):
if (self.cmap_fixed.value):
self.cmap_min.disabled = False
self.cmap_max.disabled = False
self.save_min_max.disabled = False
else:
self.cmap_min.disabled = True
self.cmap_max.disabled = True
self.save_min_max.disabled = True
# self.mcds_field_cb()
self.cmap_fixed.observe(cmap_fixed_cb)
field_cmap_row2 = HBox([self.field_cmap, self.cmap_fixed])
# field_cmap_row3 = HBox([self.save_min_max, self.cmap_min, self.cmap_max])
items_auto = [
self.save_min_max, #layout=Layout(flex='3 1 auto', width='auto'),
self.cmap_min,
self.cmap_max,
]
box_layout = Layout(display='flex',
flex_flow='row',
align_items='stretch',
width='90%')
field_cmap_row3 = Box(children=items_auto, layout=box_layout)
# field_cmap_row3 = Box([self.save_min_max, self.cmap_min, self.cmap_max])
# mcds_tab = widgets.VBox([mcds_dir, mcds_plot, mcds_play], layout=tab_layout)
mcds_params = VBox([self.mcds_field, field_cmap_row2, field_cmap_row3, self.max_frames]) # mcds_dir
# mcds_params = VBox([self.mcds_field, field_cmap_row2, field_cmap_row3, self.max_frames], layout=tab_layout) # mcds_dir
# mcds_params = VBox([self.mcds_field, field_cmap_row2, field_cmap_row3,]) # mcds_dir
# self.tab = HBox([mcds_params, self.mcds_plot], layout=tab_layout)
# self.tab = HBox([mcds_params, self.mcds_plot])
help_label = Label('select slider: drag or left/right arrows')
row1 = Box([help_label, Box( [self.max_frames, self.mcds_field, self.field_cmap], layout=Layout(border='0px solid black',
width='50%',
height='',
align_items='stretch',
flex_direction='row',
display='flex'))] )
row2 = Box([self.cmap_fixed, self.cmap_min, self.cmap_max], layout=Layout(border='0px solid black',
width='50%',
height='',
align_items='stretch',
flex_direction='row',
display='flex'))
if (hublib_flag):
self.download_button = Download('mcds.zip', style='warning', icon='cloud-download',
tooltip='Download data', cb=self.download_cb)
download_row = HBox([self.download_button.w, Label("Download all substrate data (browser must allow pop-ups).")])
# self.tab = VBox([row1, row2, self.mcds_plot])
self.tab = VBox([row1, row2, self.mcds_plot, download_row])
else:
# self.tab = VBox([row1, row2])
self.tab = VBox([row1, row2, self.mcds_plot])
#---------------------------------------------------
def update_dropdown_fields(self, data_dir):
# print('update_dropdown_fields called --------')
self.output_dir = data_dir
tree = None
try:
fname = os.path.join(self.output_dir, "initial.xml")
tree = ET.parse(fname)
xml_root = tree.getroot()
except:
print("Cannot open ",fname," to read info, e.g., names of substrate fields.")
return
xml_root = tree.getroot()
self.field_min_max = {}
self.field_dict = {}
dropdown_options = {}
uep = xml_root.find('.//variables')
comment_str = ""
field_idx = 0
if (uep):
for elm in uep.findall('variable'):
# print("-----> ",elm.attrib['name'])
self.field_min_max[elm.attrib['name']] = [0., 1.]
self.field_dict[field_idx] = elm.attrib['name']
dropdown_options[elm.attrib['name']] = field_idx
field_idx += 1
# constWidth = '180px'
# print('options=',dropdown_options)
self.mcds_field.value=0
self.mcds_field.options=dropdown_options
# self.mcds_field = Dropdown(
# # options={'oxygen': 0, 'glucose': 1},
# options=dropdown_options,
# value=0,
# # description='Field',
# layout=Layout(width=constWidth)
# )
def update_max_frames_expected(self, value): # called when beginning an interactive Run
self.max_frames.value = value # assumes naming scheme: "snapshot%08d.svg"
self.mcds_plot.children[0].max = self.max_frames.value
# def update(self, rdir):
def update(self, rdir=''):
# with debug_view:
# print("substrates: update rdir=", rdir)
if rdir:
self.output_dir = rdir
all_files = sorted(glob.glob(os.path.join(self.output_dir, 'output*.xml')))
if len(all_files) > 0:
last_file = all_files[-1]
self.max_frames.value = int(last_file[-12:-4]) # assumes naming scheme: "snapshot%08d.svg"
# with debug_view:
# print("substrates: added %s files" % len(all_files))
# self.output_dir = rdir
# if rdir == '':
# # self.max_frames.value = 0
# tmpdir = os.path.abspath('tmpdir')
# self.output_dir = tmpdir
# all_files = sorted(glob.glob(os.path.join(tmpdir, 'output*.xml')))
# if len(all_files) > 0:
# last_file = all_files[-1]
# self.max_frames.value = int(last_file[-12:-4]) # assumes naming scheme: "output%08d.xml"
# self.mcds_plot.update()
# return
# all_files = sorted(glob.glob(os.path.join(rdir, 'output*.xml')))
# if len(all_files) > 0:
# last_file = all_files[-1]
# self.max_frames.value = int(last_file[-12:-4]) # assumes naming scheme: "output%08d.xml"
# self.mcds_plot.update()
def download_cb(self):
file_xml = os.path.join(self.output_dir, '*.xml')
file_mat = os.path.join(self.output_dir, '*.mat')
# print('zip up all ',file_str)
with zipfile.ZipFile('mcds.zip', 'w') as myzip:
for f in glob.glob(file_xml):
myzip.write(f, os.path.basename(f)) # 2nd arg avoids full filename path in the archive
for f in glob.glob(file_mat):
myzip.write(f, os.path.basename(f))
def update_max_frames(self,_b):
self.mcds_plot.children[0].max = self.max_frames.value
def mcds_field_changed_cb(self, b):
# print("mcds_field_changed_cb: self.mcds_field.value=",self.mcds_field.value)
if (self.mcds_field.value == None):
return
self.field_index = self.mcds_field.value + 4
field_name = self.field_dict[self.mcds_field.value]
# print('mcds_field_cb: '+field_name)
self.cmap_min.value = self.field_min_max[field_name][0]
self.cmap_max.value = self.field_min_max[field_name][1]
self.mcds_plot.update()
def mcds_field_cb(self, b):
#self.field_index = self.mcds_field.value
# self.field_index = self.mcds_field.options.index(self.mcds_field.value) + 4
# self.field_index = self.mcds_field.options[self.mcds_field.value]
self.field_index = self.mcds_field.value + 4
# field_name = self.mcds_field.options[self.mcds_field.value]
# self.cmap_min.value = self.field_min_max[field_name][0] # oxygen, etc
# self.cmap_max.value = self.field_min_max[field_name][1] # oxygen, etc
# self.field_index = self.mcds_field.value + 4
# print('field_index=',self.field_index)
self.mcds_plot.update()
def plot_substrate(self, frame):
# global current_idx, axes_max, gFileId, field_index
fname = "output%08d_microenvironment0.mat" % frame
xml_fname = "output%08d.xml" % frame
# fullname = output_dir_str + fname
# fullname = fname
full_fname = os.path.join(self.output_dir, fname)
full_xml_fname = os.path.join(self.output_dir, xml_fname)
# self.output_dir = '.'
# if not os.path.isfile(fullname):
if not os.path.isfile(full_fname):
print("Once output files are generated, click the slider.") # No: output00000000_microenvironment0.mat
return
# tree = ET.parse(xml_fname)
tree = ET.parse(full_xml_fname)
xml_root = tree.getroot()
mins= round(int(float(xml_root.find(".//current_time").text))) # TODO: check units = mins
hrs = int(mins/60)
days = int(hrs/24)
title_str = '%dd, %dh, %dm' % (int(days),(hrs%24), mins - (hrs*60))
info_dict = {}
# scipy.io.loadmat(fullname, info_dict)
scipy.io.loadmat(full_fname, info_dict)
M = info_dict['multiscale_microenvironment']
# global_field_index = int(mcds_field.value)
# print('plot_substrate: field_index =',field_index)
f = M[self.field_index, :] # 4=tumor cells field, 5=blood vessel density, 6=growth substrate
# plt.clf()
# my_plot = plt.imshow(f.reshape(400,400), cmap='jet', extent=[0,20, 0,20])
self.fig = plt.figure(figsize=(7.2,6)) # this strange figsize results in a ~square contour plot
# fig.set_tight_layout(True)
# ax = plt.axes([0, 0.05, 0.9, 0.9 ]) #left, bottom, width, height
# ax = plt.axes([0, 0.0, 1, 1 ])
# cmap = plt.cm.viridis # Blues, YlOrBr, ...
# im = ax.imshow(f.reshape(100,100), interpolation='nearest', cmap=cmap, extent=[0,20, 0,20])
# ax.grid(False)
# print("substrates.py: ------- numx, numy = ", self.numx, self.numy )
if (self.numx == 0): # need to parse vals from the config.xml
fname = os.path.join(self.output_dir, "config.xml")
tree = ET.parse(fname)
xml_root = tree.getroot()
xmin = float(xml_root.find(".//x_min").text)
xmax = float(xml_root.find(".//x_max").text)
dx = float(xml_root.find(".//dx").text)
ymin = float(xml_root.find(".//y_min").text)
ymax = float(xml_root.find(".//y_max").text)
dy = float(xml_root.find(".//dy").text)
self.numx = math.ceil( (xmax - xmin) / dx)
self.numy = math.ceil( (ymax - ymin) / dy)
xgrid = M[0, :].reshape(self.numy, self.numx)
ygrid = M[1, :].reshape(self.numy, self.numx)
num_contours = 15
levels = MaxNLocator(nbins=num_contours).tick_values(self.cmap_min.value, self.cmap_max.value)
contour_ok = True
if (self.cmap_fixed.value):
try:
my_plot = plt.contourf(xgrid, ygrid, M[self.field_index, :].reshape(self.numy, self.numx), levels=levels, extend='both', cmap=self.field_cmap.value)
except:
contour_ok = False
# print('got error on contourf 1.')
else:
try:
my_plot = plt.contourf(xgrid, ygrid, M[self.field_index, :].reshape(self.numy,self.numx), num_contours, cmap=self.field_cmap.value)
except:
contour_ok = False
# print('got error on contourf 2.')
if (contour_ok):
plt.title(title_str)
plt.colorbar(my_plot)
axes_min = 0
axes_max = 2000
# plt.xlim(axes_min, axes_max)
# plt.ylim(axes_min, axes_max)
# mcds_play = widgets.Play(
# # interval=10,
# value=50,
# min=0,
# max=100,
# step=1,
# description="Press play",
# disabled=False,
# )
# #mcds_slider = widgets.IntSlider()
# widgets.jslink((mcds_play, 'value'), (mcds_slider, 'value'))
# widgets.HBox([mcds_play, mcds_slider])
| [
"yafei.wang89@gmail.com"
] | yafei.wang89@gmail.com |
33b9c91f645426469203bd3758dc36576c685fd0 | e3858c9736935a582304ff7460f211b45f6f8287 | /skins_kravenzero/usr/lib/enigma2/python/Components/Converter/KravenZeroHDServiceEndTime.py | c6d215646152a3142d23b6594bdc2bc09360fc75 | [] | no_license | wrepin/Skins | e0046578a362ed1b9af81c9c4f47904bbf69d318 | e82cc4cc2816d0bb2b6ab9b404ccee475cab84d4 | refs/heads/master | 2020-12-30T18:37:39.167935 | 2015-02-23T18:36:51 | 2015-02-23T18:36:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,922 | py | #taken from "ServicePosition" Converter
#edited by mogli123 @ et-view-support.com
from Components.Converter.Converter import Converter
from Poll import Poll
from enigma import iPlayableService, iPlayableServicePtr, iServiceInformation, eTimer, eLabel
from Components.Element import cached, ElementError
from time import localtime, strftime, time, gmtime, asctime
from Components.Sources.Clock import Clock
class KravenZeroHDServiceEndTime(Poll, Converter, object):
TYPE_ENDTIME = 0
def __init__(self, type):
Poll.__init__(self)
Converter.__init__(self, type)
if type == "EndTime":
self.type = self.TYPE_ENDTIME
self.poll_enabled = True
def getSeek(self):
s = self.source.service
return s and s.seek()
@cached
def getPosition(self):
seek = self.getSeek()
if seek is None:
return None
pos = seek.getPlayPosition()
if pos[0]:
return 0
return pos[1]
@cached
def getLength(self):
seek = self.getSeek()
if seek is None:
return None
length = seek.getLength()
if length[0]:
return 0
return length[1]
@cached
def getText(self):
seek = self.getSeek()
if seek is None:
return ""
else:
if self.type == self.TYPE_ENDTIME:
e = (self.length / 90000)
s = self.position / 90000
return strftime("%H:%M", localtime(time() + (self.length / 90000 - self.position / 90000)))
range = 10000
position = property(getPosition)
length = property(getLength)
text = property(getText)
def changed(self, what):
cutlist_refresh = what[0] != self.CHANGED_SPECIFIC or what[1] in (iPlayableService.evCuesheetChanged,)
time_refresh = what[0] == self.CHANGED_POLL or what[0] == self.CHANGED_SPECIFIC and what[1] in (iPlayableService.evCuesheetChanged,)
if time_refresh:
self.downstream_elements.changed(what) | [
"neipe78@googlemail.com"
] | neipe78@googlemail.com |
0a594efa5004b79150ace703b60d768090d1e120 | 1dacbf90eeb384455ab84a8cf63d16e2c9680a90 | /lib/python2.7/site-packages/odps/tunnel/checksum.py | 8e8fc3c8d359101a5792eac47318794d0db3a82b | [
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] | permissive | wangyum/Anaconda | ac7229b21815dd92b0bd1c8b7ec4e85c013b8994 | 2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6 | refs/heads/master | 2022-10-21T15:14:23.464126 | 2022-10-05T12:10:31 | 2022-10-05T12:10:31 | 76,526,728 | 11 | 10 | Apache-2.0 | 2022-10-05T12:10:32 | 2016-12-15T05:26:12 | Python | UTF-8 | Python | false | false | 1,918 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
from ..crc import Crc32c, Crc32
from .. import utils
class Checksum(object):
TRUE = bytearray([1])
FALSE = bytearray([0])
def __init__(self, method='crc32c'):
self.crc = Crc32c() if method.lower() == 'crc32c' else Crc32()
def _mode(self):
# use for UT to check if use c extension
try:
from ..src.crc32c_c import Crc32c
return 'c' if isinstance(self.crc, Crc32c) else 'py'
except ImportError:
return 'py'
def update_bool(self, val):
assert isinstance(val, bool)
val = self.TRUE if val else self.FALSE
self._update(val)
def update_int(self, val):
val = struct.pack('<i', val)
self._update(val)
def update_long(self, val):
val = struct.pack('<q', val)
self._update(val)
def update_float(self, val):
val = struct.pack('<d', val)
self._update(val)
def _update(self, b):
# update crc without type checking
self.crc.update(bytearray(b))
def update(self, b):
b = utils.to_binary(b)
self._update(b)
def getvalue(self):
return self.crc.getvalue()
def reset(self):
return self.crc.reset()
| [
"noreply@github.com"
] | wangyum.noreply@github.com |
f3c1cd5e293453a360833be5f67645fb1cdbba71 | 16eb11581f14e9252f540d8570aa4a0cb3d0304f | /Ex17_NumTable.py | 40c8d20312ba7383420037896270a3607e93b385 | [] | no_license | Akshay-Chandelkar/PythonTraining2019 | 81b699471041cbb85e2bf447b0269b1836301109 | 11f9d1ec8d1e4d111cb7b364aec7bce336cf432c | refs/heads/master | 2020-04-16T13:35:15.895370 | 2019-04-10T10:33:19 | 2019-04-10T10:33:19 | 165,633,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py |
def NumTable(N1):
if N1 > 0:
print("The table for the given number ::")
for x in range(1,11):
N2 = N1 * x
print(N2)
else:
print("The entered number is 0 or negative.")
if __name__ == '__main__':
N1 = int(input("Enter a number to get its table :: "))
NumTable(N1)
#print("The table for the given number :: {}".format(res)) | [
"noreply@github.com"
] | Akshay-Chandelkar.noreply@github.com |
5a187adf4e613bd3ed6215071f10539c5c306b85 | 2f8dd346f2d5aa7948e7f73172a1b38153a4d49e | /Windows/Gammacat.py | 79c983efb9593e58798b277de12c2002f0617513 | [] | no_license | Amirsil/Gammacat | 8b21a9e22ecd30dabe896dd183b4dd09ee829b3d | 5bb06bd096c65c78315754c64dc0e44ad5d9323b | refs/heads/master | 2020-09-14T14:03:03.027909 | 2020-07-02T17:40:03 | 2020-07-02T17:40:03 | 223,149,378 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,106 | py | from sys import argv, executable as python
import webbrowser
import subprocess
import os
import daemon
try:
import requests
except ModuleNotFoundError:
subprocess.check_call([python, "-m", "pip", "install", "requests"])
try:
import flask_cors
except ModuleNotFoundError:
subprocess.check_call([python, "-m", "pip", "install", "flask_cors"])
try:
import flask
except ModuleNotFoundError:
subprocess.check_call([python, "-m", "pip", "install", "flask"])
def main():
if len(argv) > 1:
if argv[1] == '--version':
print('''
gammacat (GNU coreutils) 1.0
Copyleft (C) 2019 Free Software Foundation, Inc.
License GPLv3+: GNU GPL version 3 or later <https://gnu.org/licenses/gpl.html>.
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law.
Written by Torbjorn Granlund and Richard M. Stallman. ''')
elif argv[1] in ['-d', '--daemon']:
if os.path.isfile('daemon.txt'):
print('\nA daemon is already running in the background')
else:
print('\nStarting daemon...')
daemon.main()
elif argv[1] in ['-s', '--server']:
try:
requests.post("http://localhost:5555", timeout=0.1)
print('\nA server is already active')
except requests.exceptions.ConnectionError:
subprocess.call("server.bat")
elif len(argv) == 2:
if argv[1] in ['-c', '--connect']:
print('\nusage: gammacat [-c, --connect] [HOST]')
elif argv[1] in ['-e', '--search']:
print('\nusage: gammacat [-e, --search] [HOST]')
elif argv[1] in ['-cs', '--close-server']:
try:
r = requests.post("http://localhost:5555/shutdown", timeout=0.1)
print(r.text)
except requests.exceptions.ConnectionError:
print('\nNo server is active on your computer right now')
elif argv[1] in ['-cn', '--close-node']:
try:
r = requests.post("http://localhost:5550/shutdown", timeout=0.1)
print(r.text)
except requests.exceptions.ConnectionError:
print('\nNo node is active on your computer right now ')
else:
print('\nusage: gammacat [OPTION] [HOST]')
elif len(argv) == 3:
if argv[1] in ['-c', '--connect']:
try:
requests.post("http://localhost:5550", timeout=0.1)
print('\nA node is already active')
except requests.exceptions.ConnectionError:
subprocess.call(["node.bat", argv[2]])
elif argv[1] in ['-e', '--search']:
try:
requests.post("http://%s:5555" % argv[2], timeout=0.1)
webbrowser.open_new_tab('http://%s:5555' % argv[2])
except requests.exceptions.ConnectionError:
print('\nNo server is active on this host')
else:
print('\nusage: gammacat [OPTION] [HOST]')
else:
print('\nusage: gammacat [OPTION] [HOST]')
else:
print('''
usage: gammacat [OPTION]
Software sollution for file search in a network of computers all connected to 1 main server.
-s, --server open the gammacat web main server on your host
-c, --connect connect to the server as a storage node
-e, --search connect to the main server from the browser to search files
-d, --daemon start a daemon that appends every file on your computer to a local database
-cs, --close-server close the gammacat web main server that is open on your host
-cn, --close-node close the storage node that is connected to the main server
--version output version information and exits ''')
if __name__ == '__main__':
main()
| [
"amirsil2002@gmail.com"
] | amirsil2002@gmail.com |
32d4b6c7bcd8882e5b9e7ec8f0d4ca47f4d7aeaa | 19855dbff1488f41d5ebb71dae03ecca11ade897 | /folder_mathHW/f-of-x-0/Cartesian coordinate system -- ln.py | 6250ec3f9aa30292e2978c2c727b30e9a12f05fe | [] | no_license | PEN2014/Self-practice | 011aa7580f88d35d0b030abd216f5c4a23702e89 | 73978f69457f5f436058ce5b36378333f1cffc22 | refs/heads/master | 2021-01-19T02:29:58.968071 | 2016-06-05T06:58:31 | 2016-06-05T06:58:31 | 60,446,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | import matplotlib.pyplot as plt
import numpy as np
N = 30
n = np.arange(0.001, N/2, 1.0/N)
s = (n-2)*(n-2)-np.log(n)
plt.plot(n, s)
plt.axis([0, 8, -10, 10])
plt.show() | [
"emosome1@gmail.com"
] | emosome1@gmail.com |
32f85e0acbe9f62581b5d9b916122810e397e5a3 | e4043e69ba065186a153ef0995a8c952a665f3b4 | /system/migrations/0034_auto_20160608_0920.py | 1197124fb925f69030df52a62f5558967ae106f3 | [] | no_license | RayZimnat/hos_cash_pycharm | 98e9dc4ad6ba66dcee9223c61a5289d551a2d7cf | 84c8d860282617b4ee8cac7b5cb97f90d343f760 | refs/heads/master | 2020-03-18T19:41:04.999628 | 2018-06-07T08:34:15 | 2018-06-07T08:34:15 | 135,170,043 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 853 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-06-08 07:20
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('system', '0033_agent_agent_email'),
]
operations = [
migrations.CreateModel(
name='Scheme',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('scheme_name', models.CharField(max_length=50, unique=True)),
],
),
migrations.AddField(
model_name='policy',
name='scheme',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='system.Scheme'),
),
]
| [
"jowar@zimnat.co.zw"
] | jowar@zimnat.co.zw |
faf3c8dfa5ff66ccb5061a5361f46ea8660794fb | 6eb56f2e3f14f2373be07fe95b1c6fedf1e2d49f | /alien_invasion/settings.py | 88e78bcdae2ac7d01be37c4e59510f618f2782a2 | [
"Apache-2.0"
] | permissive | chaofan-zheng/python_leanring_code | fe22b0370cadebf7456477269aff4a35cef0eb41 | 0af44ff39b9ded2c1d2cc96c6d356d21170ac04d | refs/heads/main | 2023-02-28T07:56:46.457552 | 2021-02-10T15:08:33 | 2021-02-10T15:08:33 | 323,584,115 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 553 | py | """存储雷霆战机的所有类"""
class Settings():
def __init__(self):
self.screen_width = 1200
self.screen_height = 800
self.bg_color = (0, 0, 0)
self.speed_factor = 1.5
self.bullet_speed_factor = 5
self.bullet_width = 5
self.bullet_height = 15
self.bullet_color = 255, 255, 102
self.bullets_allowed = 5
self.alien_speed_factor = 1
self.fleet_drop_speed = 10
# fleet_direction =1 表示右移动,-1表示左移
self.fleet_direction = 1
| [
"417355570@qq.com"
] | 417355570@qq.com |
71d9ef0d70866a4e18c09969ee810ff780a8146c | b434d3432decfd039b0a5b57a567478deefa4d32 | /backend/app/payments/__init__.py | 3aa807b7976475b0ab98c405cfaf156801e9924c | [] | no_license | elcolono/marryday | b895bca21337e0f3092f8eda4cac59b23c41b4b7 | 89e3caf5c2b0260d15259c6d72bc85d1f3dac81c | refs/heads/main | 2023-08-27T03:09:25.797531 | 2021-09-18T12:40:44 | 2021-09-18T12:40:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51 | py | default_app_config = "payments.apps.PaymentsConfig" | [
"andreas.siedler@gmail.com"
] | andreas.siedler@gmail.com |
1e3cd8bb025e9418fdb64b8da0b9313baf5079b7 | f554142b0973adae190274b6005fb08477e50143 | /cmd/auth/forms.py | 2946c4e544f7a69ad91dc8e43100576ae96847a6 | [] | no_license | cmdpdx/cdouglas.xyz | e08808e4845bd65133ff3dba70eb9f8f05801bd9 | dd84ac75117300f5833e6511948e596c1a863196 | refs/heads/master | 2020-03-18T08:40:20.721514 | 2019-05-10T17:22:17 | 2019-05-10T17:22:17 | 134,522,185 | 0 | 0 | null | 2019-05-07T18:54:22 | 2018-05-23T06:14:00 | Python | UTF-8 | Python | false | false | 421 | py | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import DataRequired
from cmd.models import User
class LoginForm(FlaskForm):
username = StringField('User', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Remember')
submit = SubmitField('Log in') | [
"colin.m.douglas@gmail.com"
] | colin.m.douglas@gmail.com |
bcd4268e4894fcefee6f7c7f50a401ca6e1e2a87 | f3ca45656ed05c6f287265a273043063cc37478d | /sentiment_test1.py | a958f94c65cea3b5d2f85876170798c09a3fbbd2 | [] | no_license | CasewardBen/CasewardNLP | 3c8f9e23f82205ee94aedb01029f6b0bbe0920aa | e4bbf96b787515e8a75542cb5f1eda3fd1d6696a | refs/heads/master | 2020-06-16T05:03:27.408609 | 2019-07-06T02:01:21 | 2019-07-06T02:01:21 | 195,486,177 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | from snownlp import SnowNLP
#返回情感分析结果
def get_sentiment(text):
score = SnowNLP(text)
return score | [
"48239811+CasewardBen@users.noreply.github.com"
] | 48239811+CasewardBen@users.noreply.github.com |
83c106f802d458035e4bf0e7d6e1a576cb89d1f3 | ebe43006105b910873bf39458a5041af2f5ddf0d | /code_generator/generator.py | 5f2aaefcaaa9f30cc965b026658eb82d8bcc5888 | [] | no_license | elis351/promprog | 7ea5e3919f91247a81a0a2ad13af2330559155e7 | a1f26100b0f5b5b84ae5b102f7b38b16d1900b9f | refs/heads/main | 2023-07-22T16:43:23.834727 | 2021-09-09T19:07:44 | 2021-09-09T19:07:44 | 404,721,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 580 | py | import cv2
import sys
with open(sys.argv[2], 'w+') as header:
header.write('''#pragma once
#include<vector>
#include<string>
std::vector<std::vector<std::vector<unsigned char>>> array = {
''')
mask = cv2.imread(sys.argv[1])
height, width, depth = mask.shape
for i in range(height):
header.write('\t{\n')
for j in range(width):
header.write('\t\t{')
for color in range(depth):
header.write(f'{mask[i, j, color]}, ')
header.write('},\n')
header.write('\t},\n')
header.write('};')
| [
"44136701+elis351@users.noreply.github.com"
] | 44136701+elis351@users.noreply.github.com |
5cdb7b87f7a99ecd94da0f8dc4de117098d1ea45 | 487c845443e2a945b56b2ea3d65e9137017a3c69 | /gamedisplay.py | a70bed51bbdcd92b0ffea36f191ab3f6c4c9e37e | [] | no_license | ruiqimao/nox-macros | d188d4bab45014d79dd30165203c77f009b2a107 | 3cf2ff59f57df8e3218cadf77e11c1e8f6fd027f | refs/heads/master | 2020-12-02T03:09:30.652041 | 2019-12-30T07:15:58 | 2019-12-30T07:15:58 | 230,867,491 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,336 | py | from threading import Thread
from PIL import Image, ImageDraw
import cv2
import numpy as np
import time
import util
class GameDisplay(Thread):
def __init__(self, scap, parser):
super().__init__()
self._running = True
self._scap = scap
self._parser = parser
self._mouse_pos = (0, 0)
self._mouse_down = False
def run(self):
cv2.namedWindow('game')
cv2.setMouseCallback('game', self._cb_mouse)
while self._running:
# Capture a frame.
frame = self._scap.capture()
# Feed the frame to the parser.
self._parser.feed(frame)
# Get the overlay from the parser.
overlay = self._parser.get_overlay().copy()
# Show the current mouse coordinates.
mouse_string = '%d, %d' % self._mouse_pos
draw = ImageDraw.Draw(overlay)
draw.rectangle((0,702,8+util.FONT.getsize(mouse_string)[0],720), fill=(0,0,0,200))
draw.text((4,704),
mouse_string,
font=util.FONT,
fill=(255,255,255,255))
# Show the capture mouse coordinates.
cap_x, cap_y, cap_down = self._scap.get_mouse()
outline_color = (255,0,0,255) if cap_down else (255,255,255,255)
draw.ellipse((cap_x-7,cap_y-7,cap_x+7,cap_y+7),
outline=outline_color,
width=3)
# Apply the overlay.
frame = Image.alpha_composite(frame, overlay)
# Display the frame.
opencv_frame = cv2.cvtColor(np.array(frame), cv2.COLOR_RGB2BGR)
cv2.imshow('game', opencv_frame)
cv2.waitKey(1)
def stop(self):
self._running = False
def _cb_mouse(self, evt, x, y, flags, params):
# Update the mouse position.
self._mouse_pos = (x, y)
if self._mouse_down:
self._scap.mouse_move((x,y))
# Handle mouse clicks.
if evt == cv2.EVENT_LBUTTONDOWN:
self._scap.mouse_move((x,y))
self._scap.mouse_down()
self._mouse_down = True
if evt == cv2.EVENT_LBUTTONUP:
self._scap.mouse_move((x,y))
self._scap.mouse_up()
self._mouse_down = False
| [
"ruiqim@gmail.com"
] | ruiqim@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.