text stringlengths 38 1.54M |
|---|
import os
from CoreScripts.CfgParserFactory import CfgParserFactory
from ParametersParsing import GeneralParameters
from LogParser import LogParser, TableType
from Logger import Logger
from CoreFunctions import CreateConfigParser, WriteStringToFile, MakeTableInPercents, PrintTableToFile
class ComparisonResult:
OK = "OK"
NEW = "New"
FAILED = "Failed"
CHANGED = "Changed"
class ExperimentResults:
errors = []
resultFile = "" #file with result table
pfstTables = {} #benchmark: pfst
pfstRefTables = {} #benchmark: pfst
benchmarkResults = {} #result: [benchmarks]
def __init__(self):
self.errors = []
self.resultFile = ""
self.pfstTables = {}
self.pfstRefTables = {}
self.benchmarkResults = {}
def GetPFSTForBenchmark(self, benchmark):
return self.pfstTables[benchmark]
def GetReferencePFSTForBenchmark(self, benchmark):
return self.pfstRefTables[benchmark]
def AddError(self, error):
self.errors.append(error)
def AddBenchmarkResult(self, benchmark, result):
if not benchmark in self.benchmarkResults.keys():
self.benchmarkResults[benchmark] = []
self.benchmarkResults[benchmark] = result
def AddPFSTForBenchmark(self, benchmark, table):
self.pfstTables[benchmark] = table
def AddReferencePFSTForBenchmark(self, benchmark, table):
self.pfstRefTables[benchmark] = table
def __str__(self):
resultStr = ""
for (benchmark, result) in list(self.benchmarkResults.iteritems()):
resultStr += ("%s: %s\n" % (result, benchmark))
for error in self.errors:
resultStr += ("%s\n" % error)
return resultStr + "\n"
def Print(self):
print(self.__str__())
class BaseExperiment:
name = ""
cfg = ""
benchmarks = ""
cmdArgs = [] #list of command line arguments
metrics = []
stages = []
doParsePQAT = False
cfgParser = CreateConfigParser()
generalParameters = GeneralParameters(cfgParser)
def __init__(self, name, cfg, benchmarks, metrics, stages, cmdArgs=None):
if not cmdArgs: cmdArgs = []
self.name = name
self.cfg = os.path.join(self.generalParameters.binDir, "cfg", cfg)
self.benchmarks = self.SetBenchmarksList(benchmarks)
self.cmdArgs = cmdArgs
self.metrics = metrics
self.stages = stages
def CopyingConstructor(self, be):
self.cfg = be.cfg
self.name = be.name
self.stages = be.stages
self.metrics = be.metrics
self.cmdArgs = be.cmdArgs
self.benchmarks = be.benchmarks
self.doParsePQAT = be.doParsePQAT
def SetConfig(self, cfg):
self.cfg = os.path.join(self.generalParameters.binDir, "cfg", cfg)
def SetBenchmarksList(self, benchmarks):
self.benchmarks = os.path.join(CfgParserFactory.get_root_dir(), self.cfgParser.get("GeneralParameters", "benchmarkLists"), benchmarks)
def CreateEmptyTable(self, reportTable):
cols = ["Benchmark"]
#write header of a table.
for row in range(len(self.stages)):
for col in range(len(self.metrics)):
cols.append("%s_%s" % (self.metrics[col], self.stages[row]))
cols.append("") #an empty column between metrics on different stages
WriteStringToFile(cols, reportTable)
def ParseLog(self, logName):
parser = LogParser(logName, TableType.PFST, self.cfgParser)
return parser.ParsePFST(self.metrics, self.stages)
def ParsePQATAndPrintTable(self, logName):
metrics = ["HPWL", "TNS", "WNS"]
parser = LogParser(logName, TableType.PQAT, self.cfgParser)
table = parser.ParsePQAT(metrics)
table = MakeTableInPercents(table)
pqatName = r"%s.csv" % (os.path.basename(logName))
PQATFileName = os.path.join(os.path.dirname(logName), pqatName)
PrintTableToFile(PQATFileName, table, metrics)
def AddStringToTable(self, values, benchmark, reportTable):
cols = [benchmark]
for row in range(len(self.stages)):
for col in range(len(self.metrics)):
cols.append(str(values[row][col]))
cols.append("") #an empty column between metrics on different stages
#write metrics to the file
WriteStringToFile(cols, reportTable)
def MakeResultTable(self, logFolder, reportTable):
if os.path.exists(logFolder) == False:
logger = Logger()
logger.Log("Error: folder %s does not exist" % logFolder)
return
reportTable = os.path.join(logFolder, reportTable)
self.CreateEmptyTable(reportTable)
for log in os.listdir(logFolder):
if os.path.isfile(os.path.join(logFolder, log)) and (".log" == os.path.splitext(log)[-1]):
benchmark = os.path.splitext(log)[0]
self.ParseLogAndFillTable(os.path.join(logFolder, log), benchmark, reportTable)
def ParseLogAndFillTable(self, logName, benchmark, reportTable):
values = self.ParseLog(logName)
if values == []:
return [ComparisonResult.FAILED, []]
self.AddStringToTable(values, benchmark, reportTable)
if self.doParsePQAT == True:
self.ParsePQATAndPrintTable(logName)
return [ComparisonResult.OK, values]
def TestResultTableMaking():
stages = ["LEG", "DP"]
metrics = ["HPWL", "TNS", "WNS"]
experiment = BaseExperiment("HippocrateDP experiment", "HippocrateDP.cfg", "IWLS_GP_Hippocrate.list",
metrics, stages)
experiment.MakeResultTable(r"..\Reports\HippocrateDP", "TestTable2.csv")
def test():
from TestRunner import TestRunner
stages = ["LEG", "DP"]
metrics = ["HPWL", "TNS", "WNS"]
experiment = BaseExperiment("HippocrateDP experiment", "HippocrateDP.cfg", "IWLS_GP_Hippocrate.list",
metrics, stages)
testRunner = TestRunner()
testRunner.Append(experiment)
testRunner.Run()
if __name__ == "__main__":
test() |
from django.conf import settings
from django.db.models import Sum
from django.shortcuts import render
from poyosei.models import *
from django.views.decorators.csrf import csrf_exempt
@csrf_exempt
def operation(request):
continuer = 'init'
mouvements = ""
planteurs = Planteur.objects.all()
for p in planteurs:
query = Campagne.objects.filter(pacage=p.pacage)
camp = Campagne.objects.order_by('annee').last()
annee = camp.annee
total = Mouvement.objects.all().filter(année_concerne=annee).aggregate(Sum('quantite_reference_individuelle_accorde'))['quantite_reference_individuelle_accorde__sum'] or 0.00
totalMouv = Mouvement.objects.all().filter(année_concerne=annee)
mouvement = Mouvement.objects.filter(mouvement_valide=False)
nbrM = mouvement.count
keyword = request.POST.get("order", "")
annee = Campagne.CampagneEnCours()
newAnnee = annee + 1
if request.method == 'POST':
for t in totalMouv:
taxin = t.ridReserve
for p in planteurs:
if p.pacage == '000000000':
total = float(p.ridAnneeEnCours) + taxin
riTemp = p.ritAnneeEnCours
Campagne.objects.create(pacage=p.pacage, annee=newAnnee, rid=p.taxeReserve, rit=riTemp, ri_Total=p.riTotale)
else :
ridP = p.ridAnneeP
ritP = p.ritAnneeP
Campagne.objects.create(pacage=p.pacage, annee=newAnnee, rid=ridP, rit=ritP, ri_Total=p.riTotale)
return render(request, 'operation/index.html', { 'continuer':continuer, 'annee':annee, 'mouvements':mouvements, 'mouvement':mouvement, "active_tab": "operation", 'total':total, 'totalMouv':totalMouv, 'nbrM':nbrM})
|
"""def countCC(M):
row= len(M)
col= len(M[0])
visited = [[False]*col]*row
count = 0
for i in range(0,row):
for j in range(0,col):
if (M[i][j]==1 and visited[i][j]==False):
dfs(M, i, j ,visited)
print (i,j)
count =count+1
return count
def isSafe(M, row, col, visited):
Row= len(M)
Col= len(M[0])
return (row < Row) and (row >= 0) and (col <Col) and (col >= 0) and (M[row][col] == 1) and (visited[row][col] == False)
def dfs(M, row, col, visited):
rowNbr =[-1, -1, -1, 0, 0, 1, 1, 1]
colNbr= [-1, 0, 1, -1, 1, -1, 0, 1]
visited[row][col] = True
for i in range(4):
if(isSafe(M, row + rowNbr[i], col + colNbr[i], visited)):
dfs(M, row + rowNbr[i], col + colNbr[i],visited)
a=[ [1, 1, 0, 0, 0],
[0, 1, 0, 0, 1],
[1, 0, 0, 1, 1],
[0, 0, 0, 0, 0],
[1, 0, 1, 0, 1]
]
"""
def recursion(a,i,j):
if i>=0 and i<len(a) and j>=0 and j<len(a[i]):
if a[i][j] ==1:
a[i][j] =2
recursion(a,i,j+1)
recursion(a,i,j-1)
recursion(a,i+1,j)
recursion(a,i-1,j)
recursion(a,i-1,j-1)
recursion(a,i-1,j+1)
recursion(a,i+1,j-1)
recursion(a,i+1,j+1)
def findingIslands(a):
count =0
for i in range(len(a)):
for j in range(len(a[i])):
if a[i][j] ==1:
count = count+1
recursion(a,i,j)
print "Number of Islands:" +str(count)
def main():
array =[[1,0,1],
[1,1,1],
[0,0,1]]
findingIslands(array)
if __name__ == '__main__':
main() |
import wolframalpha
import wikipedia
import PySimpleGUI as sg
import pyttsx3 #for text to speech
client = wolframalpha.Client("RT85GL-4VV66LR9G6")
sg.theme('DarkBlack')
layout =[[sg.Text('Enter a command'), sg.InputText()],
[sg.Button('Ok'), sg.Button('Cancel')]]
window = sg.Window('PVA', layout)
engine = pyttsx3.init()
engine.setProperty('voice', 'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech\Voices\Tokens\TTS_MS_EN-US_DAVID_11.0')
while True:
event, values = window.read()
if event in (None, 'Cancel'):
break
try:
wiki_res = wikipedia.summary(values[0], sentences=1)
wolfram_res = next(client.query(values[0]).results).text
engine.say("Wolfram Result: "+wolfram_res)
engine.say("Wikipedia Result: "+wiki_res)
sg.PopupNonBlocking("Wolfram Result: "+wolfram_res,"Wikipedia Result: "+wiki_res)
except wikipedia.exceptions.DisambiguationError:
wolfram_res = next(client.query(values[0]).results).text
engine.say("Wolfram Result: "+wolfram_res)
sg.PopupNonBlocking("Wolfram Result: "+wolfram_res)
except wikipedia.exceptions.PageError:
wolfram_res = next(client.query(values[0]).results).text
engine.say("Wolfram Result: "+wolfram_res)
sg.PopupNonBlocking("Wolfram Result: "+wolfram_res)
except:
wiki_res = wikipedia.summary(values[0], sentences=1)
engine.say("Wikipedia Result: "+wiki_res)
sg.PopupNonBlocking("Wikipedia Result: "+wiki_res)
engine.runAndWait()
window.close() |
import os
from datetime import timedelta
class Config(object):
DEBUG = False
USE_FAKE_SERVICES = False
SQLALCHEMY_DATABASE_URI = 'sqlite:///../data/bark.db'
UDB_URL = 'https://cgi.cse.unsw.edu.au/~csesoc/udb/'
UDB_USER = 'udb'
LDAP_HOST = 'ldap://ad.unsw.edu.au'
EVENT_LEEWAY = timedelta(hours=1)
# TODO: Add link to Android App Download
ANDROID_URL = 'about:blank'
class Production(Config):
PROPAGATE_EXCEPTIONS = True
def __init__(self):
self.SECRET_KEY = os.environ['SECRET_KEY']
self.UDB_PASSWORD = os.environ['UDB_PASSWORD']
class Development(Config):
DEBUG = True
USE_FAKE_SERVICES = True
SECRET_KEY = 'development-only'
def __init__(self):
super(Development, self).__init__()
self.UDB_PASSWORD = os.environ.get('UDB_PASSWORD')
|
import re
import graphbrain.constants as const
from graphbrain import hedge
from graphbrain.hyperedge import UniqueAtom
def _edge2text(edge, parse):
atoms = [UniqueAtom(atom) for atom in edge.all_atoms()]
tokens = [parse['atom2token'][atom] for atom in atoms if atom in parse['atom2token']]
if len(tokens) == 0:
return ''
tokens = sorted(tokens, key=lambda x: x.i)
prev_txt = tokens[0].text
txt_parts = [prev_txt]
sentence = str(parse['spacy_sentence'])
for token in tokens[1:]:
txt = token.text
res = re.search(r'{}(.*?){}'.format(re.escape(prev_txt), re.escape(txt)), sentence)
if res:
sep = res.group(1)
else:
sep = ' '
if any(letter.isalnum() for letter in sep):
sep = ' '
txt_parts.append(sep)
txt_parts.append(token.text)
prev_txt = txt
return ''.join(txt_parts)
def _set_edge_text(edge, hg, parse):
text = _edge2text(edge, parse)
hg.set_attribute(edge, 'text', text)
if edge.not_atom:
for subedge in edge:
_set_edge_text(subedge, hg, parse)
class Parser(object):
"""Defines the common interface for parser objects.
Parsers transofrm natural text into graphbrain hyperedges.
"""
def __init__(self, lemmas=True, corefs=True, debug=False):
self.lemmas = lemmas
self.corefs = corefs
self.debug = debug
# to be created by derived classes
self.lang = None
def debug_msg(self, msg):
if self.debug:
print(msg)
def parse(self, text):
"""Transforms the given text into hyperedges + aditional information.
Returns a dictionary with two fields:
-> parses: a sequence of dictionaries, with one dictionary for each
sentence found in the text.
-> inferred_edges: a sequence of edges inferred during by parsing
process (e.g. genders, 'X is Y' relationships)
Each sentence parse dictionary contains at least the following fields:
-> main_edge: the hyperedge corresponding to the sentence.
-> extra_edges: aditional edges, e.g. connecting atoms that appear
in the main_edge to their lemmas.
-> text: the string of natural language text corresponding to the
main_edge, i.e.: the sentence itself.
-> edges_text: a dictionary of all edges and subedges to their
corresponding text.
-> corefs: resolve coreferences.
"""
# replace newlines with spaces
clean_text = text.replace('\n', ' ').replace('\r', ' ')
# remove repeated spaces
clean_text = ' '.join(clean_text.split())
parse_results = self._parse(clean_text)
if self.corefs:
self._resolve_corefs(parse_results)
else:
for parse in parse_results['parses']:
parse['resolved_corefs'] = parse['main_edge']
return parse_results
def parse_and_add(self, text, hg, sequence=None, infsrcs=False, max_text=1500):
# split large blocks of text to avoid coreference resolution errors
if self.corefs and 0 < max_text < len(text):
for sentence in self.sentences(text):
self.parse_and_add(sentence, hg=hg, sequence=sequence, infsrcs=infsrcs, max_text=-1)
parse_results = self.parse(text)
edges = []
for parse in parse_results['parses']:
if parse['main_edge']:
edges.append(parse['main_edge'])
main_edge = parse['resolved_corefs']
if self.corefs:
unresolved_edge = parse['main_edge']
else:
unresolved_edge = None
# add main edge
if main_edge:
if sequence:
hg.add_to_sequence(sequence, main_edge)
else:
hg.add(main_edge)
# attach text to edge and subedges
_set_edge_text(main_edge, hg, parse)
# attach token list and token position structure to edge
self._set_edge_tokens(main_edge, hg, parse)
if self.corefs:
if unresolved_edge != main_edge:
_set_edge_text(main_edge, hg, parse)
coref_res_edge = hedge((const.coref_res_connector, unresolved_edge, main_edge))
hg.add(coref_res_edge)
# add extra edges
for edge in parse['extra_edges']:
hg.add(edge)
for edge in parse_results['inferred_edges']:
hg.add(edge, count=True)
if infsrcs:
inference_srcs_edge = hedge([const.inference_srcs_connector, edge] + edges)
hg.add(inference_srcs_edge)
return parse_results
def sentences(self, text):
raise NotImplementedError()
def atom_gender(self, atom):
raise NotImplementedError()
def atom_number(self, atom):
raise NotImplementedError()
def atom_person(self, atom):
raise NotImplementedError()
def atom_animacy(self, atom):
raise NotImplementedError()
def _post_process(self, edge):
raise NotImplementedError()
def _parse_token(self, token, atom_type):
raise NotImplementedError()
def _before_parse_sentence(self):
raise NotImplementedError()
def _parse_sentence(self, sent):
raise NotImplementedError()
def _parse(self, text):
raise NotImplementedError()
def _set_edge_tokens(self, edge, hg, parse):
raise NotImplementedError()
def _resolve_corefs(self, parse_results):
# do nothing if not implemented in derived classes
for parse in parse_results['parses']:
parse['resolved_corefs'] = parse['main_edge']
|
define wt_N = 1100000
define speed = 80
define acc_grav = 9.8
define mass_kg = 1100000/9.8
input weight_
mass_ = weight_/9.8
allowed_speed = 80*9.9*mass_/110000
input speed_actual
if curve:
input r
input theta
speed_max = sqrt(r*9.8*tan(theta))*3.6
else:
allowed_speed = speed_max
speed_critical = minimum(allowed_speed,speed_max)
if speed_actual<speed_critical:
print("OK")
if speed_actual>speed_critical:
print("slow down")
else:
print("critical speed reached")
|
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 4 09:52:19 2018
@author: alunoic
"""
import cv2
import numpy as np
#%%
im1 = cv2.imread("lena.png", cv2.IMREAD_GRAYSCALE)
im2 = cv2.imread("baboon.png", cv2.IMREAD_GRAYSCALE)
x_range, y_range = im1.shape
im_res = np.zeros([x_range, y_range], dtype=np.uint8)
#%% LOOP
for i in range(x_range):
for j in range(y_range):
im_res[i][j] = max(im1[i][j],im2[i][j])
#%%
#cv2.bitwise_and(im1,im2)
cv2.namedWindow('RESULT', cv2.WINDOW_KEEPRATIO)
cv2.imshow('RESULT', im_res)
cv2.waitKey(0)
cv2.destroyAllWindows() |
import sys
import hashlib
input_file = open(sys.argv[1])
input_lines = input_file.readlines()
for line in input_lines:
private_key = line.lstrip().rstrip()
counter = 1
magic_number_1st_half = -1
magic_number_2nd_half = -1
adventcoin = private_key + str(counter)
while True:
digest = hashlib.md5(adventcoin.encode('utf-8')).hexdigest()
# 1st half
if (magic_number_1st_half == -1) and (digest[0:5] == "00000"):
magic_number_1st_half = counter
# 2nd half
if (magic_number_2nd_half == -1) and (digest[0:6] == "000000"):
magic_number_2nd_half = counter
# both solutions have been found
if (magic_number_1st_half != -1) and (magic_number_2nd_half != -1):
break
else:
counter += 1
adventcoin = private_key + str(counter)
print(magic_number_1st_half, magic_number_2nd_half)
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Azure Sentinel unit tests."""
import re
from collections import namedtuple
import pandas as pd
import pytest
import pytest_check as check
import respx
from msticpy.auth.azure_auth_core import AzureCloudConfig
from msticpy.context.azure import MicrosoftSentinel
from msticpy.data import QueryProvider
# pylint: disable=protected-access
_PORTAL_URLS = [
"https://ms.portal.azure.com/#@microsoft.onmicrosoft.com/resource/subscriptions/d1d8779d-38d7-4f06-91db-9cbc8de0176f/resourceGroups/soc/providers/Microsoft.OperationalInsights/workspaces/cybersecuritysoc/Overview",
"https://ms.portal.azure.com/#blade/Microsoft_Azure_Security_Insights/MainMenuBlade/0/id/%2Fsubscriptions%2F40dcc8bf-0478-4f3b-b275-ed0a94f2c013%2Fresourcegroups%2Fasihuntomsworkspacerg%2Fproviders%2Fmicrosoft.securityinsightsarg%2Fsentinel%2Fasihuntomsworkspacev4",
"https://portal.azure.com/#asset/Microsoft_Azure_Security_Insights/Incident/subscriptions/d1d8779d-38d7-4f06-91db-9cbc8de0176f/resourceGroups/soc-purview/providers/Microsoft.OperationalInsights/workspaces/aipdstim/providers/Microsoft.SecurityInsights/Incidents/a31d57c2-f973-87e3-2081-517e08940301",
"https://ms.portal.azure.com/#asset/Microsoft_Azure_Security_Insights/Incident/subscriptions/d1d8779d-9999-4f06-91db-9cbc8de0176f/resourceGroups/soc-na/providers/Microsoft.OperationalInsights/workspaces/non-existent/Overview",
]
_TENANT_LOOKUP_RESP = {
"token_endpoint": "https://login.microsoftonline.com/72f988bf-86f1-41af-91ab-2d7cd011db47/oauth2/token",
"token_endpoint_auth_methods_supported": [
"client_secret_post",
"private_key_jwt",
"client_secret_basic",
],
"jwks_uri": "https://login.microsoftonline.com/common/discovery/keys",
"response_modes_supported": ["query", "fragment", "form_post"],
"subject_types_supported": ["pairwise"],
"id_token_signing_alg_values_supported": ["RS256"],
"response_types_supported": [
"code",
"id_token",
"code id_token",
"token id_token",
"token",
],
}
_WS_RES_GRAPH_DATA = [
{
"workspaceName": "CyberSecuritySOC",
"workspaceId": "8ecf8077-cf51-4820-aadd-14040956f35d",
"tenantId": "4b2462a4-bbee-495a-a0e1-f23ae524cc9c",
"subscriptionId": "d1d8779d-38d7-4f06-91db-9cbc8de0176f",
"resourceGroup": "soc",
"id": (
"/subscriptions/d1d8779d-38d7-4f06-91db-9cbc8de0176f/resourceGroups/SOC/"
"providers/Microsoft.OperationalInsights/workspaces/CyberSecuritySoc"
),
},
{
"workspaceName": "ASIHuntOMSWorkspaceV4",
"workspaceId": "52b1ab41-869e-4138-9e40-2a4457f09bf0",
"tenantId": "72f988bf-86f1-41af-91ab-2d7cd011db47",
"subscriptionId": "40dcc8bf-0478-4f3b-b275-ed0a94f2c013",
"resourceGroup": "asihuntomsworkspacerg",
"id": (
"/subscriptions/40dcc8bf-0478-4f3b-b275-ed0a94f2c013/resourceGroups/asihuntomsworkspacerg/"
"providers/Microsoft.OperationalInsights/workspaces/ASIHuntOMSWorkspaceV4"
),
},
{
"workspaceName": "AipDstim",
"workspaceId": "0f926592-f41a-48b8-8a57-95422c50db93",
"tenantId": "4b2462a4-bbee-495a-a0e1-f23ae524cc9c",
"subscriptionId": "d1d8779d-38d7-4f06-91db-9cbc8de0176f",
"resourceGroup": "soc-purview",
"id": (
"/subscriptions/d1d8779d-38d7-4f06-91db-9cbc8de0176f/resourceGroups/SOC-Purview/"
"providers/Microsoft.OperationalInsights/workspaces/AipDstim"
),
},
{
"workspaceName": "AipDstim",
"workspaceId": "0f926592-f41a-48b8-8a57-95422c50db93",
"tenantId": "4b2462a4-bbee-495a-a0e1-f23ae524cc9c",
"subscriptionId": "40dcc8bf-0478-4f3b-b275-ed0a94f2c013",
"resourceGroup": "soc-purview2",
"id": (
"/subscriptions/40dcc8bf-0478-4f3b-b275-ed0a94f2c013/resourceGroups/SOC-Purview2/"
"providers/Microsoft.OperationalInsights/workspaces/AipDstim"
),
},
]
def _get_ws_results(**kwargs):
"""Return results DF."""
df = pd.DataFrame(_WS_RES_GRAPH_DATA)
if kwargs.get("workspace_id"):
return df[df.workspaceId.str.casefold() == kwargs["workspace_id"].casefold()]
if kwargs.get("resource_id"):
return df[df.id.str.casefold() == kwargs["resource_id"].casefold()]
if kwargs.get("workspace_name") and kwargs.get("resource_group"):
return df[
(df.workspaceName.str.casefold() == kwargs["workspace_name"].casefold())
& (
df.resourceGroup.str.casefold()
== kwargs.get("resource_group", "").casefold()
)
]
if kwargs.get("workspace_name") and kwargs.get("resource_group"):
return df[
(df.workspaceName.str.casefold() == kwargs["subscription_id"].casefold())
& (
df.subscriptionId.str.casefold()
== kwargs.get("subscription_id", "").casefold()
)
]
if kwargs.get("workspace_name"):
return df[
df.workspaceName.str.casefold() == kwargs["workspace_name"].casefold()
]
return pd.DataFrame(columns=df.columns)
TestExpected = namedtuple("TestExpected", "ws_name, sub_id, res_group, ws_id, ten_id")
_TEST_URL_LOOKUP = [
(
_PORTAL_URLS[0],
TestExpected(
"cybersecuritysoc",
"d1d8779d-38d7-4f06-91db-9cbc8de0176f",
"soc",
"8ecf8077-cf51-4820-aadd-14040956f35d",
"72f988bf-86f1-41af-91ab-2d7cd011db47",
),
"cybersecuritysoc",
),
(
_PORTAL_URLS[1],
TestExpected(
"asihuntomsworkspacev4",
"40dcc8bf-0478-4f3b-b275-ed0a94f2c013",
"asihuntomsworkspacerg",
"52b1ab41-869e-4138-9e40-2a4457f09bf0",
"72f988bf-86f1-41af-91ab-2d7cd011db47",
),
"asihuntomsworkspacev4",
),
(
_PORTAL_URLS[2],
TestExpected(
"aipdstim",
"d1d8779d-38d7-4f06-91db-9cbc8de0176f",
"soc-purview",
"0f926592-f41a-48b8-8a57-95422c50db93",
"4b2462a4-bbee-495a-a0e1-f23ae524cc9c",
),
"aipdstim",
),
(
_PORTAL_URLS[3],
TestExpected(
"non-existent",
"d1d8779d-9999-4f06-91db-9cbc8de0176f",
"soc-na",
"unknown",
"unknown",
),
"non-existent",
),
]
@pytest.mark.parametrize(
"url, expected, wk_space", _TEST_URL_LOOKUP, ids=[i[2] for i in _TEST_URL_LOOKUP]
)
@respx.mock
def test_ws_details_from_url(url, expected, wk_space, monkeypatch):
"""Testing retrieving workspace details from portal url."""
del wk_space
login_endpoint = AzureCloudConfig().endpoints.active_directory
respx.get(re.compile(f"{login_endpoint}.*")).respond(200, json=_TENANT_LOOKUP_RESP)
_patch_qry_prov(monkeypatch)
ws_details = MicrosoftSentinel.get_workspace_details_from_url(url)
ws_details = next(iter(ws_details.values()))
check.equal(ws_details["WorkspaceName"].casefold(), expected.ws_name.casefold())
check.equal(ws_details["SubscriptionId"].casefold(), expected.sub_id.casefold())
check.equal(ws_details["ResourceGroup"].casefold(), expected.res_group.casefold())
check.equal(ws_details["WorkspaceId"].casefold(), expected.ws_id.casefold())
check.equal(ws_details["TenantId"].casefold(), expected.ten_id.casefold())
@pytest.mark.parametrize(
"url, expected, wk_space", _TEST_URL_LOOKUP, ids=[i[2] for i in _TEST_URL_LOOKUP]
)
def test_get_workspace_name(url, expected, wk_space, monkeypatch):
"""Testing retrieving workspace details from portal url."""
del url, wk_space
_patch_qry_prov(monkeypatch)
ws_name = MicrosoftSentinel.get_workspace_name(workspace_id=expected.ws_id)
if ws_name is None:
check.equal(expected.ws_name.casefold(), "non-existent")
else:
check.equal(ws_name.casefold(), expected.ws_name.casefold())
@pytest.mark.parametrize(
"url, expected, wk_space", _TEST_URL_LOOKUP, ids=[i[2] for i in _TEST_URL_LOOKUP]
)
def test_get_workspace_id(url, expected, wk_space, monkeypatch):
"""Testing retrieving workspace details from portal url."""
del url
_patch_qry_prov(monkeypatch)
# with only ws name parameter we might get multiple results
ws_id = MicrosoftSentinel.get_workspace_id(workspace_name=wk_space)
if expected.ws_id == "unknown":
check.is_none(ws_id)
else:
multi_res_id = _get_ws_results(workspace_name=wk_space)
check.greater_equal(len(multi_res_id), 1)
check.equal(ws_id.casefold(), expected.ws_id.casefold())
ws_id = MicrosoftSentinel.get_workspace_id(
workspace_name=wk_space, resource_group=expected.res_group
)
if expected.ws_id == "unknown":
check.is_none(ws_id)
else:
check.equal(ws_id.casefold(), expected.ws_id.casefold())
ws_id = MicrosoftSentinel.get_workspace_id(workspace_name=wk_space)
@pytest.mark.parametrize(
"url, expected, wk_space", _TEST_URL_LOOKUP, ids=[i[2] for i in _TEST_URL_LOOKUP]
)
def test_get_workspace_settings_by_id(url, expected, wk_space, monkeypatch):
"""Testing retrieving workspace details from portal url."""
_patch_qry_prov(monkeypatch)
ws_settings = MicrosoftSentinel.get_workspace_settings(workspace_id=expected.ws_id)
if wk_space == "non-existent":
check.is_false(ws_settings)
else:
ws_details = next(iter(ws_settings.values()))
check.equal(ws_details["WorkspaceName"].casefold(), expected.ws_name.casefold())
check.equal(ws_details["SubscriptionId"].casefold(), expected.sub_id.casefold())
check.equal(
ws_details["ResourceGroup"].casefold(), expected.res_group.casefold()
)
check.equal(ws_details["WorkspaceId"].casefold(), expected.ws_id.casefold())
resource_id = MicrosoftSentinel.get_resource_id_from_url(url)
ws_settings = MicrosoftSentinel.get_workspace_settings(resource_id=resource_id)
if wk_space == "non-existent":
check.is_false(ws_settings)
else:
ws_details = next(iter(ws_settings.values()))
check.equal(ws_details["WorkspaceName"].casefold(), expected.ws_name.casefold())
check.equal(ws_details["SubscriptionId"].casefold(), expected.sub_id.casefold())
check.equal(
ws_details["ResourceGroup"].casefold(), expected.res_group.casefold()
)
check.equal(ws_details["WorkspaceId"].casefold(), expected.ws_id.casefold())
@pytest.mark.parametrize(
"url, expected, wk_space", _TEST_URL_LOOKUP, ids=[i[2] for i in _TEST_URL_LOOKUP]
)
def test_get_workspace_settings_by_name(url, expected, wk_space, monkeypatch):
"""Testing retrieving workspace details from portal url."""
del url
_patch_qry_prov(monkeypatch)
# with only ws name parameter we might get multiple results
ws_settings = MicrosoftSentinel.get_workspace_settings_by_name(
workspace_name=wk_space
)
if expected.ws_id == "unknown":
check.is_false(ws_settings)
else:
ws_details = next(iter(ws_settings.values()))
multi_res_id = _get_ws_results(workspace_name=wk_space)
check.greater_equal(len(multi_res_id), 1)
check.equal(ws_details["WorkspaceId"].casefold(), expected.ws_id.casefold())
ws_settings = MicrosoftSentinel.get_workspace_settings_by_name(
workspace_name=wk_space, subscription_id=expected.sub_id
)
if wk_space == "non-existent":
check.is_false(ws_settings)
else:
ws_details = next(iter(ws_settings.values()))
check.equal(ws_details["WorkspaceName"].casefold(), expected.ws_name.casefold())
check.equal(ws_details["SubscriptionId"].casefold(), expected.sub_id.casefold())
check.equal(
ws_details["ResourceGroup"].casefold(), expected.res_group.casefold()
)
check.equal(ws_details["WorkspaceId"].casefold(), expected.ws_id.casefold())
@pytest.mark.parametrize(
"url, expected, wk_space", _TEST_URL_LOOKUP, ids=[i[2] for i in _TEST_URL_LOOKUP]
)
def test_get_resource_id(url, expected, wk_space):
"""Test get resource IDs."""
res_id = MicrosoftSentinel.get_resource_id_from_url(url)
check.is_not_none(res_id)
if wk_space != "non-existent":
expected_id = next(
iter(
ws["id"]
for ws in _WS_RES_GRAPH_DATA
if ws["workspaceName"].casefold() == wk_space.casefold()
and ws["subscriptionId"].casefold() == expected.sub_id.casefold()
)
)
check.equal(res_id.casefold(), expected_id.casefold())
def test_get_resource_id_bad():
"""Check format/error handling on resource IDs."""
res_id = MicrosoftSentinel.get_resource_id_from_url("not a resource ID")
check.is_none(res_id)
res_id = MicrosoftSentinel.get_resource_id_from_url(
"https://foo/#page/subscriptions/not_res_id"
)
check.is_none(res_id)
# Can't find a
dubious_res_url = (
"https://foo/#page/subscriptions/999"
"/resourcegroups/1234"
"/providers/Microsoft.OperationalInsights"
"/workspaces/0"
)
res_id = MicrosoftSentinel.get_resource_id_from_url(dubious_res_url)
check.is_not_none(res_id)
@pytest.mark.parametrize(
"url, expected, wk_space", _TEST_URL_LOOKUP, ids=[i[2] for i in _TEST_URL_LOOKUP]
)
@respx.mock
def test_fail_tenantid_lookup(url, expected, wk_space, monkeypatch):
"""Test when tenant ID lookup fails."""
login_endpoint = AzureCloudConfig().endpoints.active_directory
respx.get(re.compile(f"{login_endpoint}.*")).respond(404, json={})
_patch_qry_prov(monkeypatch)
ws_details = MicrosoftSentinel.get_workspace_details_from_url(url)
ws_details = next(iter(ws_details.values()))
check.equal(ws_details["WorkspaceName"].casefold(), expected.ws_name.casefold())
check.equal(ws_details["SubscriptionId"].casefold(), expected.sub_id.casefold())
check.equal(ws_details["ResourceGroup"].casefold(), expected.res_group.casefold())
check.equal(ws_details["WorkspaceId"].casefold(), expected.ws_id.casefold())
if wk_space == "cybersecuritysoc":
check.equal(
ws_details["TenantId"].casefold(), "4b2462a4-bbee-495a-a0e1-f23ae524cc9c"
)
else:
check.equal(ws_details["TenantId"].casefold(), expected.ten_id.casefold())
def test_param_checks():
"""Test checks for missing params."""
with pytest.raises(ValueError):
MicrosoftSentinel.get_workspace_settings()
with pytest.raises(ValueError):
MicrosoftSentinel.get_workspace_name()
def _patch_qry_prov(patcher):
qry_prov = QueryProvider("ResourceGraph")
setattr(MicrosoftSentinel, "_RES_GRAPH_PROV", qry_prov)
qry_prov._query_provider._loaded = True
qry_prov._query_provider._connected = True
patcher.setattr(qry_prov, "connect", lambda: True)
resg_queries = getattr(qry_prov, "Sentinel")
patcher.setattr(
resg_queries, "get_sentinel_workspace_for_resource_id", _get_ws_results
)
patcher.setattr(
resg_queries, "get_sentinel_workspace_for_workspace_id", _get_ws_results
)
patcher.setattr(resg_queries, "list_sentinel_workspaces_for_name", _get_ws_results)
|
# Use arguments with move statements to move farther.
hero.moveRight(3)
hero.moveUp()
hero.moveRight()
hero.moveDown(3)
hero.moveRight(2)
|
import os
import boto3
def handler(event, context):
s3 = boto3.resource("s3")
content = "You are the best!"
s3.Object(os.environ.get("BUCKET_NAME"), "hourly.txt").put(Body=content)
|
print y_train1
binarizer = MultiLabelBinarizer().fit(y_train1)
y_train = binarizer.transform(y_train1)
print y_train
print binarizer.inverse_transform(y_train)
|
# Playstation controller Code
# For controller pairing:
# In the terminal place the following code:
# setup
sudo apt-get install bluetooth libbluetooth3 libusb-dev
sudo systemctl enable bluetooth.service
sudo usermod -G bluetooth -a pi
# pairing
wget http://www.pabr.org/sixlinux/sixpair.c
gcc -o sixpair sixpair.c -lusb
# plug in controller with cord
sudo ./sixpair
# should see the following message:
# Current Bluetooth master: 5c:f3:70:66:5c:e2
# Setting master bd_addr to 5c:f3:70:66:5c:e2
# confirm it is connected with:
ls /dev/input # device should be called something like 'js0'
# Handeler///
from triangula.input import SixAxis, SixAxisResource
# Button handler, will be bound to the square button later
def handler(button):
print 'Button {} pressed'.format(button)
# Get a joystick, this will fail unless the SixAxis controller is paired and active
# The bind_defaults argument specifies that we should bind actions to the SELECT and START buttons to
# centre the controller and reset the calibration respectively.
with SixAxisResource(bind_defaults=True) as joystick:
# Register a button handler for the square button
joystick.register_button_handler(handler, SixAxis.BUTTON_SQUARE)
while 1:
# Read the x and y axes of the left hand stick, the right hand stick has axes 2 and 3
x = joystick.axes[0].corrected_value()
y = joystick.axes[1].corrected_value()
print(x,y) |
from models import Calendar
class Events:
def __init__(self):
self.events = Calendar.objects.all()
|
/*
Common elements in two sorted lists with duplicates (return all duplicates if exists)
*/
a = [1, 1, 2, 2, 3, 5, 8, 13, 21, 34, 55, 89]
b = [1, 2, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
c = []
for i in a:
if i in b and i not in c:
c.append([i])
print(c)
def common_member(a, b):
a_set = set(a)
b_set = set(b)
if (a_set & b_set):
return a_set & b_set)
else:
return [] |
testCaseNums = list()
def getLastNum(num):
multiple = 1
num_basket = list()
realnum = num
while(len(num_basket)<10):
num = 0;
num = realnum*multiple
num = str(num)
for i in range(len(num)):
num_basket.append(num[i])
num_basket = list(set(num_basket))
num_basket.sort()
multiple = multiple+1
num = int(num)
return num;
def getCaseResult(num):
if num==0:
return "INSOMNIA"
elif num>0:
return getLastNum(num)
testCaseCount = input()
for i in range(testCaseCount):
num = input()
testCaseNums.append(num)
j = 1
for i in testCaseNums:
print("Case #%d: %s"%(j,getCaseResult(i)))
j = j+1
|
def find_cycle(connections):
def findloop(point, route, startpoint, lastpoint, connections):
for (i,j) in connections:
if point in (i,j):
if point == j:
i,j = j,i
if j != lastpoint:
if j == startpoint:
resultlist.append(route+[j])
elif j not in route:
findloop(j, route+[j], startpoint, i, connections)
elementbook = set()
resultlist = []
for (i,j) in connections:
elementbook.add(i)
elementbook.add(j)
for i in elementbook:
findloop(i, [i], i, i, connections)
if resultlist:
resultlist.sort(key = lambda x: len(x), reverse = True)
return resultlist[0]
else:
return []
if __name__ == '__main__':
# These "asserts" using only for self-checking and not necessary for auto-testing
def checker(function, connections, best_size):
user_result = function(connections)
if not isinstance(user_result, (tuple, list)) or not all(isinstance(n, int) for n in user_result):
print("You should return a list/tuple of integers.")
return False
if not best_size and user_result:
print("Where did you find a cycle here?")
return False
if not best_size and not user_result:
return True
if len(user_result) < best_size + 1:
print("You can find a better loop.")
return False
if user_result[0] != user_result[-1]:
print("A cycle starts and ends in the same node.")
return False
if len(set(user_result)) != len(user_result) - 1:
print("Repeat! Yellow card!")
return False
for n1, n2 in zip(user_result[:-1], user_result[1:]):
if (n1, n2) not in connections and (n2, n1) not in connections:
print("{}-{} is not exist".format(n1, n2))
return False
return True, "Ok"
assert checker(find_cycle,
((1, 2), (2, 3), (3, 4), (4, 5), (5, 7), (7, 6),
(8, 5), (8, 4), (1, 5), (2, 4), (1, 8)), 6), "Example"
assert checker(find_cycle,
((1, 2), (2, 3), (3, 4), (4, 5), (5, 7), (7, 6), (8, 4), (1, 5), (2, 4)), 5), "Second"
|
## Unconditional Deviation instructions ##
#
# Opcode | Symbolic representation | Description
# 00001101 | JUMP M(X,0:19) | Apanha a próxima instrução da metade esquerda de M(X)
# 00001110 | JUMP M(X,20:39) | Apanha a próxima instrução da metade direita de M(X)
class UnconditionalDeviation():
def __init__(self):
super().__init__()
def jumpMxLeft(self, x, memory):
"""00001101 - Apanha a próxima instrução da metade esquerda de M(X)"""
return memory[x][0:20]
def jumpMxRight(self, x, memory):
"""00001110 - Apanha a próxima instrução da metade direita de M(X)"""
return memory[x][20:40]
|
def ejemplo_for():
#x es una variable que toma los valores comprendidos en
#el rango
i=input("Introduzca un numero: ")
if(i%2==0):
if(i%3==0):
print i, "es par"
print i, "es multiplo de 3"
else:
print i, "es impar"
print i, "no es multiplo de 3"
ejemplo_for()
|
# # import requests
# # from flask-test import users
# # # res = requests.get("https://api.exchangeratesapi.io/latest?base=USD")
# # # data = res.json()
# # # results = data['rates']
# # # currency_value = results['ILS']
# # # x = float(input("Please enter an amount of Shekeles to convert to Dollars: "))
# # # print(x * currency_value)
# #
# # res = requests.post('http://127.0.0.1:5000/data/8', json={"user_name":"ben"})
# # if res.ok:
# # def add_user():
# # if add_user():
# # conn = pymysql.connect(host='remotemysql.com', port=3306, user='12F1HSutPL', passwd='LB8D9pgJuP', db='12F1HSutPL')
# # conn.autocommit(True)
# # cursor = conn.cursor()
# # cursor.execute("INSERT into 12F1HSutPL.users (ID, name, date) VALUES ("+user_id+","+user_name+",'" + str(datetime.datetime.now()) + "')")
# # # return print(json.loads(y)), {"status": "ok", "user_added": "john"}, 200 # status code
# # # else:
# # # return print(json.loads(z)), {"status": "error", "reason": "id already exists"}, 500 # status code
#
# # import requests
# # res = requests.post('http://127.0.0.1:5000/data/1', json={"user_name":"itay"})
# # if res.ok:
# # print(res.json())
# # import pymysql,requests,datetime,time
# #
# # import requests
# #
# # res = requests.get("http://127.0.0.1:5000/data/1")
# # data = res.json()
# # id = data["user_id"]
# # name = data["user_name"]
# # conn = pymysql.connect(host='remotemysql.com', port=3306, user='12F1HSutPL', passwd='LB8D9pgJuP',db='12F1HSutPL')
# # conn.autocommit(True)
# # cursor = conn.cursor()
# # now = datetime.datetime.utcnow()
# # cursor.execute("INSERT into 12F1HSutPL.users (ID, name, date) VALUES (%s,%s,%s)" , (id, name, now.strftime('%Y-%m-%d %H:%M:%S')))
# #
# #
#
# import pymysql
#
# # Establishing a connection to DB
# conn = pymysql.connect(host='remotemysql.com', port=3306, user='12F1HSutPL', passwd='LB8D9pgJuP', db='12F1HSutPL')
#
# # Getting a cursor from Database
# cursor = conn.cursor()
#
# # Getting all data from table “users”
# cursor.execute("SELECT * FROM 12F1HSutPL.users;")
#
# # Iterating table and printing all users
# for row in cursor:
# print(row)
#
# cursor.close()
# conn.close()
|
#-*-coding:utf8;-*-
#qpy:3
#qpy:console
print("This is console module")
a=(' * ')
b=''
for i in range(1,15):
b=a*i
print(b) |
"""
This module acts as the task scheduler. Coroutine functions can be spawned, joined or killed
"""
import Queue
import collections
from .task import Task
__author__ = 'stevet'
_ready_queue = Queue.Queue()
_job_registry = {}
_signal_list = {}
_join_list = collections.defaultdict(list)
_await_list = collections.defaultdict(list)
def spawn(coroutine, callback=None):
"""
add a new task for function <coroutine>, with optional <callback> function
if defer is True, the task will not be started immediately
returns the id of the new task
"""
new_task = Task(coroutine, callback)
_job_registry[new_task.id] = new_task
_ready_queue.put(new_task)
return new_task.id
def defer_spawn(coroutine, callback=None):
"""
Create a task without starting it -- used to create tasks for joins
"""
new_task = Task(coroutine, callback)
_job_registry[new_task.id] = new_task
return new_task.id
def kill(task_id):
"""
remove task <task_id> from the systems
"""
result = _job_registry.pop(task_id, None)
_signal_list.pop(task_id, None)
if result:
deferred_tasks = _join_list.pop(result.id, tuple())
for deferred_task in deferred_tasks:
_await_list[deferred_task].remove(task_id)
if not _await_list[deferred_task]:
_await_list.pop(deferred_task)
waiting_task = _job_registry.get(deferred_task)
_ready_queue.put(waiting_task)
return result
def join(existing_task, joining_task):
"""
make task with id <joining task> dependent on task with id <existing task>. Returns the ids
of all the tasks on which <joining task> depends
"""
if not existing_task in _job_registry:
raise RuntimeError("No active task: %s" % existing_task)
if not joining_task in _job_registry:
raise RuntimeError("No active task: %s" % joining_task)
_join_list[existing_task].append(joining_task)
_await_list[joining_task].append(existing_task)
return _await_list[joining_task]
def signal(task_id, message):
"""
queues a message to send to the task at id <task_id>. The signal will be passed to the
task on its next time slice
"""
if task_id in _job_registry:
_signal_list[task_id] = message
def tick():
"""
execute one time slice
"""
if _job_registry:
task = _ready_queue.get()
if task and task.id in _job_registry:
message = _signal_list.pop(task.id, None)
if task.tick(message):
_ready_queue.put(task)
else:
kill(task.id)
def list_jobs():
"""
lists all of the jobs in the scheduler
"""
return _job_registry.items()
def list_waiting():
"""
list all the jobs waiting on other jobs
"""
return _join_list.items()
def run():
"""
run through all of the jobs in the scheduler
"""
while _job_registry:
tick()
def reset():
"""
wipe all of the existing tasks and jobs. This will be
"""
_ready_queue = Queue.deque()
_job_registry = {}
_signal_list = {}
_join_list = collections.defaultdict(list)
__all__ = 'spawn defer_spawn kill join signal tick run reset list_jobs list_waiting'.split()
|
from gmpy2 import *
print('m1=m+p1')
print('m2=m+p2')
n=int(input("n(hex):")[2:],16)
c1=int(input("c1(hex):")[2:],16)
p1=int(input("p1(hex):")[2:],16)
c2=int(input("c2(hex):")[2:],16)
p2=int(input("p2(hex):")[2:],16)
print('m1=a*m2+p1-p2')
a=int(input("a(hex):")[2:],16)
b=p1-p2
m=((3*b*((a**3)*c2-b**3)*invert(c1-c2*(a**3)+2*(b**3),n)+b)*invert(a,n)-p2)%n
from binascii import *
print(unhexlify(hex(m)[2:]))
|
# -*- coding: utf8 -*-
#快速排序
#
# 算法思想
# 快速排序的核心思想在于:首先在这个序列中随便找一个数作为基准数,然后将这个序列中所有比基准数大的数放在该数字的右边,比基准数小的数放在该数字的左边。
# 第一轮排序结束之后,再分别对已经好的基准书左边(比基准数小)和基准书右边(比基准书大)的数字序列重复上述操作,用递归形式即可实现快速排序,完成对整个序列的排序。
#
# 算法步骤
# 为了清晰地展示快速排序的原理,这里使用一个例子来具体说明快速排序算法排序的过程。
# 假定现在要对数字序列 [4, 2, 7,8, 0,1, 5,23] 进行快速排序。
# 我们假设最左边的编号为i,最左边的编号为j,不失一般性,假定以4作为基准进行排序(每一次总总是让j先出发,向左移动,再让i出发,向右移动)。
#
# (一)第一轮排序
# [4, 2, 7,8, 0,1, 5,23]
# j向左出发寻找第一个小于4的数,遇到1的时候停下来;i向右出发寻找第一个大于4的数,遇到7的时候停下来,交换两者的位置,数字序列变为
# [4,2,1,8,0,7,5,23]
# 接下继续让j向左移动,寻找第二个小于4的数,遇到0的时候停下来;让i向右移动,寻找第二个大于4的数,遇到8的时候停下来,交换两者的位置,数字序列变为
# [4,2,1,0,8,7,5,23]
# 此时发现i与j相遇了,第一轮排序结束,调换4和0的位置,数字序列变为
# [0,2,1,4,8,7,5,23],
# 所有小于4的数字都在4的左边,所有大于4的数字都在4的右边。
#
# (二)第二轮排序
# 分别对4左边和4右边的数字序列进行排序处理,首先对4左边的数字序列[0,2,1]排序。
# 以第一个数字0为基准,j向左出发寻找第一个小于0的数,i向右出发寻找第一个大于0的数,遇到0的时候停下来,如果找到就交换两者的位置,否则不变。数字序列变为[0,2,1],所有小于0的数字都在0的左边,所有大于0的数字都在0的右边。
# 再对4右边的数字序列[8, 7,5,23]排序。
# 以第一个数字8为基准,j向左出发寻找第一个小于8的数,遇到5的时候停下来;i向右出发寻找第一个大于8的数,如果找到就交换两者的位置,同时保证i小于j,那么数字序列变为[5,7,8,23]。所有小于8的数字都在8的左边,所有大于8的数字都在8的右边。
# 总体数字序列变为[0,2,1,4,5,7,8,23]。
#
# (三)第三轮排序
# 好了动手算一算,类似第一和第二轮的排序方法,对0右边的数字序列进行排序,得到[0,1,2];对8左边的数字序列进行排序,得到[5,7,8,23]。第三轮排序结束。
# 得到最终的排序结果为
# [0,1,2,4,5,7,8,23]。
# 快排过程结束。
def fake_quick_sort(arr):
"""
快速排序伪代码,时间复杂度O(nlogn),空间复杂度O(n),不稳定排序算法
效率较低
:param arr:
:return:
"""
if len(arr) < 2:
return arr
pivot = arr[0]
less_arr = fake_quick_sort([i for i in arr[1:] if i <= pivot])
large_arr = fake_quick_sort([i for i in arr[1:] if i > pivot])
return less_arr + [pivot] + large_arr
def partition(arr,low,high):
"""
找到最佳分隔点
:param arr:
:param low:
:param high:
:return:
"""
i = low - 1
pivot = arr[high]
for j in range(low,high):
if arr[j] <= pivot:
i += 1
arr[i],arr[j] = arr[j],arr[i]
arr[i+1],arr[high] = arr[high],arr[i+1]
return i + 1
def quick_sort(arr,low,high):
"""
快速排序,时间复杂度O(nlogn),空间复杂度O(1),不稳定排序算法
:param arr:
:param low:
:param high:
:return:
"""
if low < high:
p = partition(arr,low,high)
quick_sort(arr, low, p - 1)
quick_sort(arr, p + 1, high)
return arr
def partition2(arr,low,high):
"""
数据分隔算法
:param arr:
:param low:
:param high:
:return:
"""
#选取最右边的元素当作分隔点
pivot = arr[low]
while low < high:
#从右往左走,high -1
while low < high and arr[high] >= pivot:
high -= 1
#如果遇到比pivot小的数据,交换位置,小于pivot的数据,移动到左边
arr[low] = arr[high]
#从右往左走,low + 1
while low < high and arr[low] <= pivot:
low += 1
#遇到比pivot大的数据,互换位置,大于pivot的数据移动到右边
arr[high] = arr[low]
#当两点相遇的时候low=high
arr[low] = pivot
return low
def quick_sort2(arr,low,high):
"""
:param arr:
:param low:
:param high:
:return:
"""
if low < high:
p = partition2(arr,low,high)
quick_sort2(arr,low,p-1)
quick_sort2(arr,p+1,high)
return arr
if __name__ == '__main__':
arr = [2, 5, 4, 6, 1, 3,2]
# arr = [1, 4, 2, 6, 5, 3, 2]
# print(fake_quick_sort(arr))
print(quick_sort2(arr,0,len(arr)-1)) |
def get_sent_message(bot):
send_message_args = bot.mock_calls[0][1]
text = send_message_args[1]
return text
|
# https://atcoder.jp/contests/abc161/tasks/abc161_d
import sys
def input(): return sys.stdin.readline().rstrip()
sys.setrecursionlimit(10 ** 7)
K = int(input())
nums = []
def make_num(keta, str_num):
if len(str_num) == keta:
nums.append(str_num)
return
if str_num[-1] == '0':
# for i in [0,1,9]:
for i in [0,1]:
make_num(keta, str_num + str(i))
elif str_num[-1] == '9':
# for i in [0,8,9]:
for i in [8,9]:
make_num(keta, str_num + str(i))
else:
for i in range(-1, 2):
make_num(keta, str_num + str(int(str_num[-1])+i))
def enum(keta):
for i in range(1,10):
make_num(keta, str(i))
keta = 0
while len(nums) < K:
keta += 1
enum(keta)
# print(nums[K-20:])
print(nums[K-1])
# TLE
# def chk(num):
# s = str(num)
# for i in range(len(s)-1):
# if abs(int(s[i]) - int(s[i+1])) > 1:
# return False
# return True
# K = int(input())
# cnt = 0
# now = 0
# while cnt < K:
# now += 1
# if chk(now):
# cnt += 1
# print(now)
# S = input()
# N, K = map(int, input().split())
# A = list(map(int, (input().split())))
# A = [[int(i) for i in input().split()] for _ in range(N)]
|
# Import required library
import pandas as pd
# Import the CSV file into Python
A_data = pd.read_csv("../input/hr-ana/train.csv")
A_data = A_data.dropna()
# Directly assigning individual field columns different integer value #
# gender
A_data.gender[A_data.gender == 'm'] = 1 #male -> 1
A_data.gender[A_data.gender == 'f'] = 2 #femal -> 2
A_data["gender"] = A_data["gender"].astype('int64')
# department
A_data.department[A_data.department == 'Analytics'] = 1 #Analytics -> 1
A_data.department[A_data.department == 'Finance'] = 2 #Finance -> 2
A_data.department[A_data.department == 'HR'] = 3 #HR -> 3
A_data.department[A_data.department == 'Legal'] = 4 #Legal -> 4
A_data.department[A_data.department == 'Operations'] = 5 #Operations -> 5
A_data.department[A_data.department == 'Procurement'] = 6 #Procurement -> 6
A_data.department[A_data.department == 'R&D'] = 7 #R&D -> 7
A_data.department[A_data.department == 'Sales & Marketing'] = 8 #Sales & Marketing -> 8
A_data.department[A_data.department == 'Technology'] = 9 #Technology -> 9
A_data["department"] = A_data["department"].astype('int64')
# region
A_data.region[A_data.region == 'region_1'] = 1 #region_1 -> 1
A_data.region[A_data.region == 'region_2'] = 2 #region_2 -> 2
A_data.region[A_data.region == 'region_3'] = 3 #region_3 -> 3
A_data.region[A_data.region == 'region_4'] = 4 #region_4 -> 4
A_data.region[A_data.region == 'region_5'] = 5 #region_5 -> 5
A_data.region[A_data.region == 'region_6'] = 6 #region_6 -> 6
A_data.region[A_data.region == 'region_7'] = 7 #region_7 -> 7
A_data.region[A_data.region == 'region_8'] = 8 #region_8 -> 8
A_data.region[A_data.region == 'region_9'] = 9 #region_9 -> 9
A_data.region[A_data.region == 'region_10'] = 10 #region_10 -> 10
A_data.region[A_data.region == 'region_11'] = 11 #region_11 -> 11
A_data.region[A_data.region == 'region_12'] = 12 #region_12 -> 12
A_data.region[A_data.region == 'region_13'] = 13 #region_13 -> 13
A_data.region[A_data.region == 'region_14'] = 14 #region_14 -> 14
A_data.region[A_data.region == 'region_15'] = 15 #region_15 -> 15
A_data.region[A_data.region == 'region_16'] = 16 #region_16 -> 16
A_data.region[A_data.region == 'region_17'] = 17 #region_17 -> 17
A_data.region[A_data.region == 'region_18'] = 18 #region_18 -> 18
A_data.region[A_data.region == 'region_19'] = 19 #region_19 -> 19
A_data.region[A_data.region == 'region_20'] = 20 #region_20 -> 20
A_data.region[A_data.region == 'region_21'] = 21 #region_21 -> 21
A_data.region[A_data.region == 'region_22'] = 22 #region_22 -> 22
A_data.region[A_data.region == 'region_23'] = 23 #region_23 -> 23
A_data.region[A_data.region == 'region_24'] = 24 #region_24 -> 24
A_data.region[A_data.region == 'region_25'] = 25 #region_25 -> 25
A_data.region[A_data.region == 'region_26'] = 26 #region_26 -> 26
A_data.region[A_data.region == 'region_27'] = 27 #region_27 -> 27
A_data.region[A_data.region == 'region_28'] = 28 #region_28 -> 28
A_data.region[A_data.region == 'region_29'] = 29 #region_29 -> 29
A_data.region[A_data.region == 'region_30'] = 30 #region_30 -> 30
A_data.region[A_data.region == 'region_31'] = 31 #region_31 -> 31
A_data.region[A_data.region == 'region_32'] = 32 #region_32 -> 32
A_data.region[A_data.region == 'region_33'] = 33 #region_33 -> 33
A_data.region[A_data.region == 'region_34'] = 34 #region_34 -> 34
A_data["region"] = A_data["region"].astype('int64')
# education
A_data.education[A_data.education == "Bachelor's"] = 1 #Bachelor's -> 1
A_data.education[A_data.education == "Below Secondary"] = 2 #Below Secondary -> 2
A_data.education[A_data.education == "Master's & above"] = 3 #Master's & above -> 3
A_data["education"] = A_data["education"].astype('int64')
# recruitment_channel
A_data.recruitment_channel[A_data.recruitment_channel == 'other'] = 1 #other -> 1
A_data.recruitment_channel[A_data.recruitment_channel == 'sourcing'] = 2 #sourcing -> 2
A_data.recruitment_channel[A_data.recruitment_channel == 'referred'] = 3 #referred -> 3
A_data["recruitment_channel"] = A_data["recruitment_channel"].astype('int64')
# drop unecessary employee ID for analysis
A_data.drop(['employee_id'], axis = 1, inplace = True)
#set cleaned data to new csv file
A_data.to_csv('c_train.csv',index=False)
|
#!/usr/bin/Python
# Filename: backup_ver1.py
#是是不是还需要安装什么压缩软件啥的
import os
import time
source =[r'D:\IDM下载文件\考研辅导班\电子科大本科软件工程上课PPT']
target_dir = r'D:\MyDrivers\hotfix'
target = target_dir + time.strftime('%Y%m%d%H%M%S') + '.zip'
zip_command = "zip -qr '%s' %s" % (target, ' '.join(source))
# Run the backup
if os.system(zip_command) == 0:
print ('Successful backup to', target )
else:
print ('Backup FAILED') |
from werkzeug.security import generate_password_hash, check_password_hash
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from flask import current_app
from flask_login import UserMixin
from . import db
class Fenxi(db.Model):
__tablename__ = 'pingjufenxi'
id = db.Column(db.Integer, primary_key=True)
match_id = db.Column(db.Integer)
competition = db.Column(db.String(40))
round = db.Column(db.String(20))
hometeam = db.Column(db.String(40))
guestteam = db.Column(db.String(40))
times = db.Column(db.Integer)
htscore = db.Column(db.Integer)
gtscore = db.Column(db.Integer)
result = db.Column(db.Integer)
def __repr__(self):
return '<match of %r vs %r>' % (self.hometeam, self.guestteam)
|
from sanic import Sanic
from sanic.response import json
from wym.playlist import Playlist
app = Sanic()
playlist = Playlist()
queue = []
@app.route("/add", methods=['POST'])
async def add_url(request):
return json({"hello": "world"})
@app.route("/playlist", methods=['GET'])
async def show_playlist(request):
return json(playlist.as_dict())
@app.route("/downloadqueue", methods=['GET'])
async def show_download_queue(request):
return json({"queue": queue})
@app.route("/control", methods=['POST'])
async def show_download_queue(request):
return json({})
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8000) |
import logging
from multiprocessing import Process
import zmq
from zmq.eventloop import ioloop, zmqstream
from config import c
from base import ZMQProcess
from handler import AgentStreamHandler
from puppet import Puppet
from utils import import_from_string
log = logging.getLogger(__name__)
class Agent(ZMQProcess):
def __init__(self, conn_str, subscribe, puppet_handler):
super(Agent, self).__init__()
self.conn_str = conn_str
self.sub_stream = None
self.puppet_handler = puppet_handler
self.subscribe = subscribe
def __str__(self):
return 'Agent {0}'.format(self.__dict__)
def setup(self):
# setup the puppet handler
self.sub_stream = self.stream(zmq.SUB, self.conn_str, bind=False,
subscribe=self.subscribe)
self.sub_stream.on_recv(AgentStreamHandler(
self.sub_stream, self.stop,
self.puppet_handler))
def run(self):
self.setup()
self.loop.start()
def stop(self):
self.loop.stop()
if __name__ == '__main__':
# setup the repo
scm = import_from_string(c.scm.klass)
repo = scm(c.scm.repo_url)
log.info('Initializing scm: {0}'.format(repo.repo_url))
# initialize puppet handler
puppet_handler = Puppet(
module_path=c.puppet.module_path,
manifest_path=c.puppet.manifest_path,
scm=repo,
nice=c.puppet.nice)
log.info('Puppet Handler initialized: {0}'.format(puppet_handler))
# intialize the Agent
agent = Agent(
conn_str=c.broker.pub_endpoint,
subscribe=c.agent.topic,
puppet_handler=puppet_handler)
log.info('Agent initialized: {0}'.format(agent))
# start the Agent
log.info("Agent Starting")
agent.run()
|
import codecs
import xml.etree.ElementTree as ET
import re
def more(f, n=10):
with open(f) as c:
lines = c.readlines()
while lines:
print(' '.join(lines[:n]))
lines = lines[n:]
if input('more?') != 'y':
break
#with codecs.open('rates', 'rb', 'cp1251') as f:
#content = f.read()
#qqtree = ET.parse(content)
#print(tree)
#for line in content:
# print(line)
#with open('rates', 'r') as f:
# for line in f:
# print(line.decode())
#with codecs.open('rates', 'r', 'cp1251') as f:
# content = f.readline()
# print(content)
#with codecs.open('output', 'w', 'utf-8') as out:
# out.write(content)
#print(content)
#findall = re.search(r'<Name>(.+)</Name>', content.decode())
#print(findall.groups())
#for line in f:
# print(line.decode())
if __name__ == '__main__':
more('rates')
# with open('rates', 'r') as f:
# content = f.readlines() #.decode('cp1251')
# print(content[0:8])
|
def read_input():
# open file for reading
in_file = open('word.in', 'r')
# read m and n
coords = in_file.readline().strip().split()
m = int(coords[0])
n = int(coords[1])
# skip blank line
in_file.readline()
# read the grid of characters
word_grid = []
for _ in range(m):
word_grid.append(list(map(lambda x: x[0], in_file.readline().rstrip().split())))
# skip blank line
in_file.readline()
k = int(in_file.readline().strip())
# read the list of words
word_list = []
for _ in range(k):
word_list_item = in_file.readline().strip()
word_list.append(word_list_item)
# close the input file
in_file.close()
return word_grid, word_list
def main():
# read input from file
word_grid, word_list = read_input()
# call word_search() using the word_grid and word_list parameters
for word in word_list:
word_coordinates = word_search(word_grid, word)
print(str(word_coordinates[0]) + " " + str(word_coordinates[1]))
def first_char (word_grid, word_to_search):
char01_exists = False
results = []
char01 = word_to_search[0]
for row_i, row in enumerate(word_grid):
for col_i, colchar in enumerate(row):
if char01 == colchar:
char01_exists = True
results.append(tuple((row_i, col_i)))
return results, char01_exists
def word_search (word_grid, word_to_search):
results, char01_exists = first_char(word_grid, word_to_search)
if not char01_exists:
return -1, -1
word_to_search = word_to_search[1:]
for result in results:
row_i = result[0]
col_i = result[1]
if len(word_to_search) == 0:
return result
# search vertically downwards
if len(word_grid) - (row_i + 1) >= len(word_to_search):
exists = True
i = row_i + 1
for char in word_to_search:
if char != word_grid[i][col_i]:
exists = False
break
i += 1
if exists:
return result
# search vertically upwards
if row_i + 1 >= len(word_to_search):
exists = True
i = row_i - 1
for char in word_to_search:
if char != word_grid[i][col_i]:
exists = False
break
i -= 1
if exists:
return result
# search horizontally right
if len(word_grid[row_i]) - (col_i + 1) >= len(word_to_search):
exists = True
i = col_i + 1
for char in word_to_search:
if char != word_grid[row_i][i]:
exists = False
break
i += 1
if exists:
return result
# search horizontally left
if col_i + 1 >= len(word_to_search):
exists = True
i = col_i - 1
for char in word_to_search:
if char != word_grid[row_i][i]:
exists = False
break
i -= 1
if exists:
return result
# search diagonally up and to the right
if (row_i + 1 >= len(word_to_search)
and len(word_grid[row_i]) - (col_i + 1) >= len(word_to_search)):
exists = True
x = col_i + 1
y = row_i - 1
for char in word_to_search:
if char != word_grid[y][x]:
exists = False
break
x += 1
y -= 1
if exists:
return result
# search diagonally down and to the right
if (len(word_grid) - (row_i + 1) >= len(word_to_search)
and len(word_grid[row_i]) - (col_i + 1) >= len(word_to_search)):
exists = True
x = col_i + 1
y = row_i + 1
for char in word_to_search:
if char != word_grid[y][x]:
exists = False
break
x += 1
y += 1
if exists:
return result
# search diagonally up and to the left
if (row_i + 1 >= len(word_to_search)
and col_i + 1 >= len(word_to_search)):
exists = True
x = col_i - 1
y = row_i - 1
for char in word_to_search:
if char != word_grid[y][x]:
exists = False
break
x -= 1
y -= 1
if exists:
return result
# search diagonally down and to the left
if (len(word_grid) - (row_i + 1) >= len(word_to_search)
and col_i + 1 >= len(word_to_search)):
exists = True
x = col_i - 1
y = row_i + 1
for char in word_to_search:
if char != word_grid[y][x]:
exists = False
break
x -= 1
y += 1
if exists:
return result
return -1, -1
main()
|
# Generated by Django 2.1 on 2019-02-27 10:19
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='add_time',
field=models.DateTimeField(default=datetime.datetime(2019, 2, 27, 18, 19, 20, 552188), verbose_name='注册时间'),
),
migrations.AlterField(
model_name='userprofile',
name='user_idv',
field=models.CharField(default='b0cf37e00de04af5a195a4f7e9b62d61', max_length=50, unique=True, verbose_name='用户唯一ID'),
),
]
|
class Node:
def __init__(self, val, next = None):
self.val = val
self.next = next
def getval(self):
return self.val
def getnext(self):
return self.next
class LinkedList:
def __init__(self, list):
previous = Node(list[0])
self.head = previous
for v in list[1:]:
previous.next = Node(v)
previous = previous.next
def length(self):
l = 1
current = self.head
while current.next != None:
current = current.next
l += 1
return l
def first(self):
return self.head.getval()
def last(self):
current = self.head
while current.next != None:
current = current.next
return current.getval()
def get(self, index):
current = self.head
for i in range(index):
current = current.next
return current.getval()
def append(self, val):
current = self.head
while current.next != None:
current = current.next
current.next = Node(val)
def delete(self, index):
current = self.head
for i in range(index + 1):
if current.next != None:
current = current.next
new_next = current
current = self.head
for i in range(index - 1):
current = current.next
current.next = new_next
def insert(self, val, index):
before = self.head
after = self.head
for i in range(index - 1):
if before.next != None:
before = before.next
for i in range(index):
after = after.next
before.next = Node(val, after)
def reverse(self, index):
pass |
import scrapy
from handReqPro.items import HandreqproItem
scrapy手动请求发送实现全站数据爬取
- yield scrapy.Request(url=,callback=)
callback指定解析函数,用于解析数据
- yield scrapy.FormRequest(url=,callback=,formdata=):POST
fordata:字典,请求参数
class DuanziSpider(scrapy.Spider):
name = 'duanzi'
# allowed_domains = ['www.xxx.com']
start_urls = ['http://duanziwang.com/category/经典段子/1/']
# 通用url模板
url = 'http://duanziwang.com/category/经典段子/%d/'
page_num = 2
# 将段子网中所有页码对应的数据进行爬取
def parse(self, response):
# 数据解析 名称和内容
article_list = response.xpath('/html/body/section/div/div/main/article')
for article in article_list:
title = article.xpath('./div[1]/h1/a/text()').extract_first()
content = article.xpath('./div[2]/p/text()').extract_first()
item = HandreqproItem()
item['title'] = title
item['content'] = content
yield item
if self.page_num < 5:
new_url = format(self.url%self.page_num) # 其它页码完整url
self.page_num += 1
# 对新的页码对应的url进行请求发送(手动请求发送)
yield scrapy.Request(url=new_url,callback=self.parse)
|
from ..Backend.webAdapter import *
print("StartupManager global space")
def initializeBackendStartup():
print("\n\n\nStartup initialized")
n = NoosAdapter()
print("Startup complete\n\n\n") |
from ophyd import Component as Cpt
from ophyd import Device, EpicsSignal, EpicsSignalRO
from .interface import BaseInterface
from .pv_positioner import PVPositionerIsClose
EVR_TICK_NS = 8.3
class EvrMotor(PVPositionerIsClose):
"""
PV Positioner for adjusting an EVR channel.
Moves that are less than one tick
are considered immediately complete.
"""
setpoint = Cpt(EpicsSignal, ":TDES", kind="normal")
readback = Cpt(EpicsSignalRO, ":BW_TDES", kind="hinted")
atol = EVR_TICK_NS
rtol = 0
class Trigger(BaseInterface, Device):
"""Class for an individual Trigger."""
eventcode = Cpt(EpicsSignal, ':EC_RBV', write_pv=':TEC', kind="config")
eventrate = Cpt(EpicsSignalRO, ':RATE', kind="normal")
label = Cpt(EpicsSignal, ':TCTL.DESC', kind="omitted")
ns_delay = Cpt(
EpicsSignal,
':BW_TDES',
write_pv=':TDES',
tolerance=EVR_TICK_NS,
kind="hinted",
)
ns_delay_scan = Cpt(EvrMotor, '', kind="omitted")
polarity = Cpt(EpicsSignal, ':TPOL', kind="config")
width = Cpt(EpicsSignal, ':BW_TWIDCALC', write_pv=':TWID', kind="normal")
enable_cmd = Cpt(EpicsSignal, ':TCTL', kind="omitted")
tab_whitelist = ['enable', 'disable']
tab_component_names = True
def enable(self):
"""Enable the trigger."""
self.enable_cmd.put(1)
def disable(self):
"""Disable the trigger."""
self.enable_cmd.put(0)
|
#-*- coding: UTF-8 -*-
import tornado.web
import tornado.websocket
import json
import uuid
import tornado.ioloop
import os
g_machines = {}
g_commanders = {}
def JsonResponser(code, result, msg):
response = {}
response['code'] = code
response['result'] = result
response['msg'] = msg
return json.dumps(response)
class IndexHandler(tornado.web.RequestHandler):
def get(self):
self.render("index.html", machines = g_machines)
class ImageHandler(tornado.web.RequestHandler):
def get(self, machine_id = "test_machine"):
self.render("image.html", machine_id = machine_id)
def post(self, machine_id):
static_path = os.path.join(os.path.dirname(__file__), 'static')
file_path = os.path.join(static_path, machine_id + ".png")
file_metas = self.request.files['image']
for meta in file_metas:
with open(file_path, 'wb') as fp:
fp.write(meta['body'])
self.finish(JsonResponser(200, None, 'Success'))
class CommandHandler(tornado.web.RequestHandler):
def get(self, machine_id):
if machine_id not in g_machines.keys():
self.finish(JsonResponser(404, None, "Machine_id not exist!"))
else:
self.finish(JsonResponser(200, g_machines[machine_id]["command"], ""))
g_machines[machine_id]["command"] = []
class MachineStatusHandler(tornado.web.RequestHandler):
def post(self, machine_id):
status = self.get_argument("status")
if cmp(status, "On") == 0:
g_machines[machine_id] = {}
g_machines[machine_id]['command'] = []
g_machines[machine_id]['status'] = "On"
ControlCenterHandler.announcement("Join", "Bot_" + machine_id)
elif cmp(status, "Off") == 0 and machine_id in g_machines.keys():
ControlCenterHandler.announcement("Left", "Bot_" + machine_id)
g_machines.pop(machine_id)
elif cmp(status, "Off") == 0 and machine_id not in g_machines.keys():
self.finish(JsonResponser(404, None, "Machine_id not exist!"))
else:
self.finish(JsonResponser(400, None, "Not valid status(On/Off)"))
self.finish(JsonResponser(200, None, "finished"))
def get(self, machine_id = None):
self.write(JsonResponser(200, g_machines, ""))
def MessageParser(mode, message):
j_msg = {}
j_msg["Mode"] = mode
j_msg["Msg"] = message
return json.dumps(j_msg)
def MessageParser3(from_, message):
j_msg = {}
j_msg["From"] = from_
j_msg["Msg"] = message
return json.dumps(j_msg)
class ControlCenterHandler(tornado.websocket.WebSocketHandler):
@staticmethod
def announcement(mode, msg):
for i in g_commanders.keys():
g_commanders[i].send_message(mode, msg)
def send_message(self, mode, message):
try:
self.write_message(MessageParser(mode, message))
except:
pass
def open(self):
self.me_id = str(uuid.uuid4())[0:8]
self.send_message("System", '欢迎来到rCtrl控制中心<br/>您的ID是%s' % self.me_id)
self.send_message("Online", json.dumps(g_commanders.keys()))
g_commanders[self.me_id] = self
ControlCenterHandler.announcement("Join", self.me_id)
def on_close(self):
g_commanders.pop(self.me_id)
ControlCenterHandler.announcement("Left", self.me_id)
def on_message(self, message):
j_msg = None
try:
j_msg = json.loads(message)
if "Recipient" not in j_msg or "Message" not in j_msg:
return
except Exception as e:
print e
return
if cmp(j_msg["Recipient"], "all") == 0:
ControlCenterHandler.announcement("Speak", MessageParser3(self.me_id, j_msg["Message"]))
return
if j_msg["Recipient"] in g_commanders.keys():
g_commanders[j_msg["Recipient"]].send_message("Message", MessageParser3(self.me_id, j_msg["Message"]))
return
if j_msg["Recipient"] in g_machines.keys():
g_machines[j_msg["Recipient"]]["command"].append(j_msg["Message"])
self.send_message("System", 'Command bot %s successfully' % j_msg["Recipient"])
return
self.send_message("System", "发送失败")
urls = [
(r'/', IndexHandler),
(r'/machine', MachineStatusHandler),
(r'/machine/(?P<machine_id>[a-zA-Z0-9-_]+).stat', MachineStatusHandler),
(r'/image/(?P<machine_id>[a-zA-Z0-9-_]+).png', ImageHandler),
(r'/command/(?P<machine_id>[a-zA-Z0-9-_]+).list', CommandHandler),
(r'/controlcenter', ControlCenterHandler),
]
settings = {
"static_path" : os.path.join(os.path.dirname(__file__), "static"),
"template_path" : os.path.join(os.path.dirname(__file__), "templates"),
"debug" : True,
"gzip" : True,
}
def main(host="0.0.0.0", port=7070):
app = tornado.web.Application(urls, **settings)
app.listen(port, host)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
'''
Created on Dec 17, 2016
@author: Mark
'''
import md5, copy
lplen = 0
spath = []
splen = 1e6
paths = []
passw = "bwnlcvfs"
def opened(digest):
return [False if int(x, 16) <= 10 else True for x in digest[:4]]
def move(state):
nextstates = []
md = md5.new()
md.update(state["pass"] + state["path"])
doors = opened(md.hexdigest().upper())
i = state["id"]
if doors[0]: #up
st = copy.copy(state)
if i > 4:
st["id"] = i - 4
st["path"]+= "U"
nextstates.append(st)
if doors[1]: #down
st = copy.copy(state)
if i <= 12:
st["id"] = i + 4
st["path"]+= "D"
nextstates.append(st)
if doors[2]: #left
st = copy.copy(state)
if not i in [1,5,9,13]:
st["id"] = i - 1
st["path"]+= "L"
nextstates.append(st)
if doors[3]: #right
st = copy.copy(state)
if i % 4 != 0 :
st["id"] = i + 1
st["path"]+= "R"
nextstates.append(st)
return nextstates
def solve(state):
global paths
if state["id"] == 16:
paths.append(state["path"])
return
for st in move(state):
solve(st)
state = {"id": 1, "path":"", "pass": passw}
solve(state)
for path in paths:
if len(path) < splen:
splen = len(path)
spath = path
if len(path) > lplen:
lplen = len(path)
print "shortest path", spath
print "Longest path length:", lplen
|
# -*- coding: utf-8 -*-
# @Time : 2018/8/7 09:34
# @Author : Xiaoyu Xing
# @File : feature_pu_model.py
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import numpy as np
import copy
from torch.autograd import Variable
import argparse
from utils.data_utils import DataPrepare, set_seed, get_cnt, get_hyper_dist
from utils.feature_pu_model_utils import FeaturedDetectionModelUtils
from sub_model import CharCNN, CaseNet, WordNet, FeatureNet, TimeDistributed
from progressbar import *
from sklearn.metrics import precision_score, recall_score, f1_score
import pickle
import os
class PULSTMCNN(nn.Module):
def __init__(self, dp, charModel, wordModel, caseModel, featureModel, inputSize, hiddenSize, layerNum, dropout):
super(PULSTMCNN, self).__init__()
self.dp = dp
self.charModel = TimeDistributed(charModel, self.dp.char2Idx)
self.wordModel = wordModel
self.caseModel = caseModel
self.featureModel = featureModel
self.lstm = nn.LSTM(inputSize, hiddenSize, num_layers=layerNum,batch_first=True, bidirectional=True)
self.fc = nn.Sequential(
nn.Linear(2 * hiddenSize, 200),
nn.ReLU(),
nn.Linear(200, 200),
nn.ReLU(),
nn.Linear(200, 200),
nn.ReLU(),
nn.Linear(200, 2),
nn.Softmax(dim=2)
# nn.Linear(200, 1)
)
self.classifier = nn.Sequential(
nn.Linear(10 * hiddenSize, 200),
nn.ReLU(),
nn.Linear(200, 200),
nn.ReLU(),
nn.Linear(200, 200),
nn.ReLU(),
nn.Linear(200, 2),
nn.Softmax(dim=2)
# nn.Linear(200, 1)
)
def forward(self, token, case, char, feature):
charOut, sortedLen1, reversedIndices1 = self.charModel(char)
wordOut, sortedLen2, reversedIndices2 = self.wordModel(token)
caseOut, sortedLen3, reversedIndices3 = self.caseModel(case)
featureOut, sortedLen4, reversedIndices4 = self.featureModel(feature)
encoding = torch.cat([wordOut.float(), caseOut.float(), charOut.float(), featureOut.float()], dim=2)
sortedLen = sortedLen1
reverseIndices = reversedIndices1
packed_embeds = pack_padded_sequence(encoding, sortedLen, batch_first=True)
maxLen = sortedLen[0]
mask = torch.zeros([len(sortedLen), maxLen, 2])
for i, l in enumerate(sortedLen):
mask[i][:l][:] = 1
lstmOut, (h, _) = self.lstm(packed_embeds)
paddedOut = pad_packed_sequence(lstmOut, batch_first=True)
# print(paddedOut)
rep = paddedOut[0]
fcOut = self.fc(rep)
fcOut = fcOut * mask.cuda()
fcOut = fcOut[reverseIndices]
return fcOut, rep[reverseIndices]
def loss_func(self, yTrue, yPred, type):
y = torch.eye(2)[yTrue].float().cuda()
if len(y.shape) == 1:
y = y[None, :]
# y = torch.from_numpy(yTrue).float().cuda()
if type == 'bnpu' or type == 'bpu':
loss = torch.mean((y * (1 - yPred)).sum(dim=1))
elif type == 'upu':
loss = torch.mean((-y * torch.log(yPred)).sum(dim=1))
# loss = 0.5 * torch.max(1-yPred*(2.0*yTrue-1),0)
return loss
class Trainer(object):
def __init__(self, models, priors, beta, gamma, learningRate, m, Ns=None):
self.models = models
self.num = len(models)
self.learningRate = learningRate
self.optimizers = [torch.optim.Adam(filter(lambda p: p.requires_grad, self.models[idx].parameters()),
lr=self.learningRate,
weight_decay=1e-8) for idx in range(len(models))]
self.m = m
self.priors = priors
self.bestResult = -1
self.bestPre = -1
self.bestRec = -1
self.beta = beta
self.gamma = gamma
self.positive = np.eye(2)[1]
self.negative = np.eye(2)[0]
if Ns is not None:
self.Ns = Ns
self.Ks = [int((1-self.priors[idx]) * self.Ns[idx]) for idx in range(self.num)]
for idx in range(self.num):
print("N:{}\tK:{}\tK/N:{}".format(self.Ns[idx], self.Ks[idx], self.Ks[idx]/self.Ns[idx]), flush=True)
def train_mini_batch2(self, batches, args):
num = len(batches)
positives = []
unlabeleds = []
reps = []
results = []
for idx, (token, case, char, feature, label, flag) in enumerate(batches):
if idx == 0:
continue
length = [len(i) for i in flag]
maxLen = max(length)
fids = []
lids = []
for s in flag:
f = list(s)
f += [np.array([-1, -1]) for _ in range(maxLen - len(f))]
fids.append(f)
fids = np.array(fids)
positive = (fids==self.positive).sum(axis=-1)==2
unlabeled = (fids==self.negative).sum(axis=-1)==2
positives.append(positive)
unlabeleds.append(unlabeled)
result, rep = self.models[idx](token, case, char, feature)
results.append(result)
reps.append(rep)
self.optimizers[idx].zero_grad()
risks, prisks, nrisks = [], [], []
for i in range(num-1):
pi = positives[i]
ui = unlabeleds[i]
ri = reps[i]
reg = 0.0
tmpp, tmpu = None, None
p2, n1, n2 = None, None, ui
for j in range(num-1):
if i==j: continue
p2 = (ui & positives[j]) if p2 is None else (p2 | (ui & positives[j]))
n1 = (pi & unlabeleds[j]) if n1 is None else (n1 | (pi & unlabeleds[j]))
n2 = ui & unlabeleds[j] if n2 is None else (n2 & (ui & unlabeleds[j]))
if args.lamb != 0:
if args.reg=="diff":
diff = (positives[i] & unlabeleds[j]) | (unlabeleds[i] & positives[j])
if diff.sum()!=0:
diff = torch.from_numpy(diff).bool().cuda()
reg += ((ri[diff] - reps[j][diff].detach())**2).mean()
elif args.reg=='all':
reg += ((ri-reps[j].detach())**2).mean()
p1 = pi
hP1 = results[i].masked_select(torch.from_numpy(p1).bool().unsqueeze(-1).cuda()).contiguous().view(-1, 2) if p1 is not None else []
hP2 = results[i].masked_select(torch.from_numpy(p2).bool().unsqueeze(-1).cuda()).contiguous().view(-1, 2) if p2 is not None else []
hU1 = results[i].masked_select(torch.from_numpy(n1).bool().unsqueeze(-1).cuda()).contiguous().view(-1, 2) if n1 is not None else []
hU2 = results[i].masked_select(torch.from_numpy(n2).bool().unsqueeze(-1).cuda()).contiguous().view(-1, 2) if n2 is not None else []
pRisk, uRisk = 0, 0
if args.p1!=0 and len(hP1) > 0:
pRisk += args.p1 * self.models[i+1].loss_func(1, hP1, args.type)
else:
pRisk += torch.FloatTensor([0]).cuda()
if args.p2!=0 and len(hP2) > 0:
if args.p2>0:
pRisk += args.p2 * self.models[i+1].loss_func(1, hP2, args.type)
else:
uRisk += abs(args.p2) * self.models[i+1].loss_func(0, hP2, args.type)
uRisk += args.n2 * self.models[i+1].loss_func(0, hU2, args.type)
if args.n1!=0 and len(hU1)>0:
uRisk += args.n1 * self.models[i+1].loss_func(0, hU1, args.type)
nRisk = uRisk - self.priors[i+1] * (1-pRisk)
risk = self.m * pRisk + nRisk
if args.type == 'bnpu':
if nRisk < self.beta:
risk = -self.gamma * nRisk
if args.lamb != 0:
risk = risk + args.lamb * reg
(risk).backward()
self.optimizers[i+1].step()
risks.append(risk.item())
prisks.append(pRisk.item())
nrisks.append(nRisk.item())
return 0.0, sum(risks)/len(risks), sum(prisks)/len(prisks), sum(nrisks)/len(nrisks), [], []
def train_mini_batch(self, batches, args):
num = len(batches)
risks = []
prisks = []
nrisks = []
accs = []
as_ = []
bs = []
rep0 = None
fids0 = None
result0 = None
reps = []
risks = []
for idx, (token, case, char, feature, label, flag) in enumerate(batches):
length = [len(i) for i in flag]
maxLen = max(length)
fids = []
lids = []
for s in flag:
f = list(s)
f += [np.array([-1, -1]) for _ in range(maxLen - len(f))]
fids.append(f)
for s in label:
l = list(s)
l += [np.array([-1, -1]) for _ in range(maxLen - len(l))]
lids.append(l)
fids = np.array(fids)
lids = np.array(lids)
postive = (fids == self.positive) * 1
unlabeled = (fids == self.negative) * 1
self.optimizers[idx].zero_grad()
result, rep = self.models[idx](token, case, char, feature)
if idx==0:
result0 = result.detach()
rep0 = rep.detach()
fids0 = copy.deepcopy(fids)
continue
reps.append(rep.detach())
if idx!=0:
posadd = ((fids0==self.positive).sum(axis=-1)==2) & ((fids==self.negative).sum(axis=-1)==2)
postive = torch.from_numpy(postive).bool() | torch.from_numpy(posadd).bool().unsqueeze(-1)
if not isinstance(postive, np.ndarray):
hP = result.masked_select(postive.cuda()).contiguous().view(-1, 2)
else:
hP = result.masked_select(torch.from_numpy(postive).bool().cuda()).contiguous().view(-1, 2)
if not isinstance(unlabeled, np.ndarray):
hU = result.masked_select(unlabeled.cuda()).contiguous().view(-1, 2)
else:
hU = result.masked_select(torch.from_numpy(unlabeled).bool().cuda()).contiguous().view(-1, 2)
if len(hP) > 0:
pRisk = self.models[idx].loss_func(1, hP, args.type)
else:
pRisk = torch.FloatTensor([0]).cuda()
uRisk = self.models[idx].loss_func(0, hU, args.type)
nRisk = uRisk - self.priors[idx] * (1 - pRisk)
risk = self.m * pRisk + nRisk
if args.type == 'bnpu':
if nRisk < self.beta:
risk = -self.gamma * nRisk
risk = risk #+ args.lamb * reg
risks.append(risk)
rep = torch.cat(reps, dim=-1)
result = ner_models[0].classifier(rep)
return acc.item(), risk.item(), pRisk.item(), nRisk.item(), None, None
def test(self, batch, length, ens='other'):
token, case, char, feature = batch
maxLen = max([x for x in length])
mask = np.zeros([len(token), maxLen, 2])
for i, x in enumerate(length):
mask[i][:x][:] = 1
results = []
for idx in range(len(self.models)):
result, _ = self.models[idx](token, case, char, feature)
results.append(result)
if ens=='average':
result = torch.stack(results, dim=0).mean(0)
elif ens=='max':
result, _ = torch.stack(results, dim=0).max(0)
else:
result = results[1]
# print(result)
result = result.masked_select(torch.from_numpy(mask).bool().cuda()).contiguous().view(-1, 2)
pred = torch.argmax(result, dim=1)
temp = result[:, 1]
return pred.cpu().numpy(), temp.detach().cpu().numpy()
def save(self, dir):
if dir is not None:
for idx in range(len(self.models)):
torch.save(self.models[idx].state_dict(), dir+"_{}".format(idx))
def decay_learning_rate(self, epoch, init_lr):
lr = init_lr / (1 + 0.05 * epoch)
print('learning rate: {0}'.format(lr))
for idx in range(len(self.optimizers)):
for param_group in self.optimizers[idx].param_groups:
param_group['lr'] = lr
return self.optimizers
from typing import Callable, List, Set, Tuple, TypeVar, Optional
import warnings
TypedSpan = Tuple[int, Tuple[int, int]]
TypedStringSpan = Tuple[str, Tuple[int, int]]
class InvalidTagSequence(Exception):
def __init__(self, tag_sequence=None):
super().__init__()
self.tag_sequence = tag_sequence
def __str__(self):
return " ".join(self.tag_sequence)
def bio_tags_to_spans(
tag_sequence: List[str], classes_to_ignore: List[str] = None
):
"""
Given a sequence corresponding to BIO tags, extracts spans.
Spans are inclusive and can be of zero length, representing a single word span.
Ill-formed spans are also included (i.e those which do not start with a "B-LABEL"),
as otherwise it is possible to get a perfect precision score whilst still predicting
ill-formed spans in addition to the correct spans. This function works properly when
the spans are unlabeled (i.e., your labels are simply "B", "I", and "O").
# Parameters
tag_sequence : `List[str]`, required.
The integer class labels for a sequence.
classes_to_ignore : `List[str]`, optional (default = `None`).
A list of string class labels `excluding` the bio tag
which should be ignored when extracting spans.
# Returns
spans : `List[TypedStringSpan]`
The typed, extracted spans from the sequence, in the format (label, (span_start, span_end)).
Note that the label `does not` contain any BIO tag prefixes.
"""
classes_to_ignore = classes_to_ignore or []
spans: Set[Tuple[str, Tuple[int, int]]] = set()
span_start = 0
span_end = 0
active_conll_tag = None
for index, string_tag in enumerate(tag_sequence):
# Actual BIO tag.
bio_tag = string_tag[0]
if bio_tag not in ["B", "I", "O"]:
raise InvalidTagSequence(tag_sequence)
conll_tag = string_tag[2:]
if bio_tag == "O" or conll_tag in classes_to_ignore:
# The span has ended.
if active_conll_tag is not None:
spans.add((active_conll_tag, (span_start, span_end)))
active_conll_tag = None
# We don't care about tags we are
# told to ignore, so we do nothing.
continue
elif bio_tag == "B":
# We are entering a new span; reset indices
# and active tag to new span.
if active_conll_tag is not None:
spans.add((active_conll_tag, (span_start, span_end)))
active_conll_tag = conll_tag
span_start = index
span_end = index
elif bio_tag == "I" and conll_tag == active_conll_tag:
# We're inside a span.
span_end += 1
else:
# This is the case the bio label is an "I", but either:
# 1) the span hasn't started - i.e. an ill formed span.
# 2) The span is an I tag for a different conll annotation.
# We'll process the previous span if it exists, but also
# include this span. This is important, because otherwise,
# a model may get a perfect F1 score whilst still including
# false positive ill-formed spans.
if active_conll_tag is not None:
spans.add((active_conll_tag, (span_start, span_end)))
active_conll_tag = conll_tag
span_start = index
span_end = index
# Last token might have been a part of a valid span.
if active_conll_tag is not None:
spans.add((active_conll_tag, (span_start, span_end)))
return list(spans)
def compute_prf(sentences, flag=None):
"""
sentences:
[word, label(BIO), pred]
"""
def convert_spans(preds):
spans = set()
start = 0
while start < len(preds):
if preds[start] == 1:
end = start + 1
while end < len(preds) and preds[end] == 1:
end += 1
spans.add((start, end))
start = end
else:
start += 1
return spans
def convert_spans_with_type(preds):
spans = set()
start = 0
while start < len(preds):
if preds[start]!='O':
type_ = preds[start]
end = start + 1
while end < len(preds) and preds[end]==type_:
end += 1
spans.add((start, end, type_))
start = end
else:
start += 1
return spans
tp = 0
pre = 0
rec = 0
all_spans = []
for sentence in sentences:
# words = [token[0] for token in sentence]
labels = [token[1] for token in sentence]
preds = [token[2] for token in sentence]
if flag is not None:
spans = set([(span[1][0], span[1][1] + 1) for span in bio_tags_to_spans(labels) if span[0] == flag])
pred_spans = convert_spans(preds)
else:
spans = set([(span[1][0], span[1][1] + 1, span[0]) for span in bio_tags_to_spans(labels)])
pred_spans = convert_spans_with_type(preds)
tp += len(spans & pred_spans)
pre += len(pred_spans)
rec += len(spans)
all_spans.append(pred_spans)
p = tp / pre if pre != 0 else 0
r = tp / rec if rec != 0 else 0
f = 2 * p * r / (p + r) if (p + r) != 0 else 0
return p, r, f, all_spans
def run(args, trainSets, validSet, testSet, priors, m, Ns):
num = len(trainSets)
trainSize = len(trainSets[0])
validSize = len(validSet)
testSize = len(testSet)
pulstmcnns = []
for idx in range(num):
set_seed(args.seed + idx*100)
charcnn = CharCNN(dp.char2Idx)
wordnet = WordNet(dp.wordEmbeddings, dp.word2Idx)
casenet = CaseNet(dp.caseEmbeddings, dp.case2Idx)
featurenet = FeatureNet()
pulstmcnn = PULSTMCNN(dp, charcnn, wordnet, casenet, featurenet, 150, 200, 1, args.drop_out)
pulstmcnns.append(pulstmcnn)
set_seed(args.seed)
if torch.cuda.is_available:
charcnn.cuda()
wordnet.cuda()
casenet.cuda()
featurenet.cuda()
for pulstmcnn in pulstmcnns:
pulstmcnn.cuda()
trainer = Trainer(pulstmcnns, priors, args.beta, args.gamma, args.lr, m, Ns)
time = 0
bar = ProgressBar(maxval=int((len(trainSets[0]) - 1) / args.batch_size))
train_sentences = dp.read_origin_file("data_{}/".format(args.suf) + args.dataset + "/train.txt")
trainSize = int(len(train_sentences) * args.pert)
train_sentences = train_sentences[:trainSize]
train_words = []
train_efs = []
for s in train_sentences:
temp = []
temp2 = []
for word, ef, lf in s:
temp.append(word)
temp2.append(ef)
train_words.append(temp)
train_efs.append(temp2)
valid_sentences = dp.read_origin_file("data_{}/".format(args.suf) + args.dataset + "/valid.txt")
valid_words = []
valid_efs = []
for s in valid_sentences:
temp = []
temp2 = []
for word, ef, lf in s:
temp.append(word)
temp2.append(ef)
valid_words.append(temp)
valid_efs.append(temp2)
test_sentences = dp.read_origin_file("data_{}/".format(args.suf) + args.dataset + "/test.txt")
test_words = []
test_efs = []
for s in test_sentences:
temp = []
temp2 = []
for word, ef, lf in s:
temp.append(word)
temp2.append(ef)
test_words.append(temp)
test_efs.append(temp2)
best_f1_valid = None
if not args.eval:
for e in range(1, 1000):
print("Epoch: {}".format(e),flush=True)
bar.start()
risks = []
prisks = []
nrisks = []
as_ = []
bs = []
# for step, (x_word_batch, x_case_batch, x_char_batch, x_feature_batch, y_batch, flag_batch) in enumerate(
for step, batches in enumerate(
mutils.iterateSet_ens(trainSets, batchSize=args.batch_size, mode="TRAIN")):
bar.update(step)
acc, risk, prisk, nrisk, a, b = trainer.train_mini_batch2(batches, args)
as_ += a
bs += b
risks.append(risk)
prisks.append(prisk)
nrisks.append(nrisk)
meanRisk = np.mean(np.array(risks))
meanRisk2 = np.mean(np.array(prisks))
meanRisk3 = np.mean(np.array(nrisks))
print("risk: {}, prisk: {}, nrisk: {}".format(meanRisk, meanRisk2, meanRisk3), flush=True)
if e % 5 == 0:
trainer.decay_learning_rate(e, args.lr)
if e % args.print_time == 0:
pred_valid = []
corr_valid = []
as_ = []
bs = []
for step, (
x_word_test_batch, x_case_test_batch, x_char_test_batch, x_feature_test_batch,
y_test_batch, _) in enumerate(
mutils.iterateSet(validSet, batchSize=100, mode="TEST", shuffle=False)):
validBatch = [x_word_test_batch, x_case_test_batch, x_char_test_batch, x_feature_test_batch]
correcLabels = []
for x in y_test_batch:
for xi in x:
correcLabels.append(xi)
lengths = [len(x) for x in x_word_test_batch]
predLabels, _ = trainer.test(validBatch, lengths, ens=args.ens)
correcLabels = np.array(correcLabels)
as_ += predLabels.tolist()
bs += correcLabels.tolist()
assert len(predLabels) == len(correcLabels)
start = 0
for i, l in enumerate(lengths):
end = start + l
p = predLabels[start:end]
c = correcLabels[start:end]
pred_valid.append(p)
corr_valid.append(c)
start = end
newSentencesValid = []
for i, s in enumerate(valid_words):
sent = []
assert len(s) == len(valid_efs[i]) == len(pred_valid[i])
for j, item in enumerate(s):
sent.append([item, valid_efs[i][j], pred_valid[i][j]])
newSentencesValid.append(sent)
p_valid, r_valid, f1_valid, _ = compute_prf(newSentencesValid, args.flag)
print("Valid Precision: {}, Recall: {}, F1: {}".format(p_valid, r_valid, f1_valid),flush=True)
print("Valid token, pre: {}, rec: {}, f1: {}".format(precision_score(bs, as_), recall_score(bs, as_), f1_score(bs, as_)), flush=True)
if f1_valid <= trainer.bestResult:
time += 1
else:
trainer.bestResult = f1_valid
trainer.bestPre = p_valid
trainer.bestRec = r_valid
time = 0
trainer.save(
("{}/{}_{}_{}_{}_lr_{}_prior_{:.1f}_beta_{}_gamma_{}_percent_{}").format(args.save_dir, args.type, args.ens, args.dataset,
args.flag,
trainer.learningRate,
trainer.m,
trainer.beta,
trainer.gamma,
args.pert))
pred_test = []
corr_test = []
prob_test = []
as_ = []
bs = []
for step, (
x_word_test_batch, x_case_test_batch, x_char_test_batch, x_feature_test_batch,
y_test_batch, _) in enumerate(
mutils.iterateSet(testSet, batchSize=100, mode="TEST", shuffle=False)):
testBatch = [x_word_test_batch, x_case_test_batch, x_char_test_batch, x_feature_test_batch]
correcLabels = []
for x in y_test_batch:
for xi in x:
correcLabels.append(xi)
lengths = [len(x) for x in x_word_test_batch]
predLabels, probLabels = trainer.test(testBatch, lengths, ens=args.ens)
correcLabels = np.array(correcLabels)
as_ += predLabels.tolist()
bs += correcLabels.tolist()
assert len(predLabels) == len(correcLabels) == len(probLabels)
start = 0
for i, l in enumerate(lengths):
end = start + l
p = predLabels[start:end]
c = correcLabels[start:end]
r = probLabels[start:end]
pred_test.append(p)
corr_test.append(c)
prob_test.append(r)
start = end
newSentencesTest = []
for i, s in enumerate(test_words):
sent = []
assert len(s) == len(test_efs[i]) == len(pred_test[i])
for j, item in enumerate(s):
sent.append([item, test_efs[i][j], pred_test[i][j], prob_test[i][j]])
newSentencesTest.append(sent)
outputFile = args.result_dir+"/" + args.type + "_"+args.ens+"_feature_pu_" + args.dataset + "_" + args.flag + ".txt"
with open(outputFile, "w") as fw:
for i, sent in enumerate(test_words):
preds = pred_test[i]
probs = prob_test[i]
corrs = test_efs[i]
for j, w in enumerate(sent):
pred = preds[j]
corr = corrs[j]
prob = probs[j]
fw.write(("{} {} {} {}\n").format(w, corr, pred, prob))
fw.write("\n")
p_valid, r_valid, f1_valid, _ = compute_prf(newSentencesTest, args.flag)
best_f1_valid = f1_valid
print("Test Result: Precision: {}, Recall: {}, F1: {}".format(p_valid, r_valid, f1_valid),flush=True)
print("Test token, pre: {}, rec: {}, f1: {}".format(precision_score(bs, as_), recall_score(bs, as_), f1_score(bs, as_)))
if time > 10:
print(("BEST RESULT ON VALIDATE DATA:{}").format(trainer.bestResult))
break
for idx, pulstmcnn in enumerate(trainer.models):
pulstmcnn.load_state_dict(
torch.load(
"{}/{}_{}_{}_{}_lr_{}_prior_{:.1f}_beta_{}_gamma_{}_percent_{}_{}".format(args.save_dir, args.type, args.ens, args.dataset, args.flag,
trainer.learningRate,
trainer.m,
trainer.beta,
trainer.gamma, args.pert, idx)))
pred_test = []
corr_test = []
prob_test = []
as_ = []
bs = []
for step, (
x_word_test_batch, x_case_test_batch, x_char_test_batch, x_feature_test_batch,
y_test_batch, _) in enumerate(
mutils.iterateSet(testSet, batchSize=100, mode="TEST", shuffle=False)):
testBatch = [x_word_test_batch, x_case_test_batch, x_char_test_batch, x_feature_test_batch]
correcLabels = []
for x in y_test_batch:
for xi in x:
correcLabels.append(xi)
lengths = [len(x) for x in x_word_test_batch]
predLabels, probLabels = trainer.test(testBatch, lengths, ens=args.ens)
correcLabels = np.array(correcLabels)
as_ += predLabels.tolist()
bs += correcLabels.tolist()
assert len(predLabels) == len(correcLabels) == len(probLabels)
start = 0
for i, l in enumerate(lengths):
end = start + l
p = predLabels[start:end]
c = correcLabels[start:end]
r = probLabels[start:end]
pred_test.append(p)
corr_test.append(c)
prob_test.append(r)
start = end
newSentencesTest = []
for i, s in enumerate(test_words):
sent = []
assert len(s) == len(test_efs[i]) == len(pred_test[i])
for j, item in enumerate(s):
sent.append([item, test_efs[i][j], pred_test[i][j], prob_test[i][j]])
newSentencesTest.append(sent)
p_valid, r_valid, f1_valid, _ = compute_prf(newSentencesTest, args.flag)
print("Test Result: Precision: {}, Recall: {}, F1: {}".format(p_valid, r_valid, f1_valid),flush=True)
print("Test token, pre: {}, rec: {}, f1: {}".format(precision_score(bs, as_), recall_score(bs, as_), f1_score(bs, as_)))
outputFile =args.result_dir + "/" + args.type + "_"+args.ens+"_feature_pu_" + args.dataset + "_" + args.flag + ".txt"
if args.eval or f1_valid > best_f1_valid:
with open(outputFile, "w") as fw:
for i, sent in enumerate(test_words):
preds = pred_test[i]
probs = prob_test[i]
corrs = test_efs[i]
for j, w in enumerate(sent):
pred = preds[j]
corr = corrs[j]
prob = probs[j]
fw.write(("{} {} {} {}\n").format(w, corr, pred, prob))
fw.write("\n")
return p_valid, r_valid, f1_valid
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="PU NER")
# data
parser.add_argument('--lr', type=float, default=1e-4,help='learning rate')
parser.add_argument('--beta', type=float, default=0.0,help='beta of pu learning (default 0.0)')
parser.add_argument('--gamma', type=float, default=1.0,help='gamma of pu learning (default 1.0)')
parser.add_argument('--drop_out', type=float, default=0.5, help = 'dropout rate')
parser.add_argument('--m', type=float, default=None, help='class balance rate')
parser.add_argument('--flag', default="PER" , help='entity type (PER/LOC/ORG/MISC)')
parser.add_argument('--dataset', default="conll2003",help='name of the dataset')
parser.add_argument('--batch_size', type=int, default=100,help='batch size for training and testing')
parser.add_argument('--print_time', type=int, default=1,help='epochs for printing result')
parser.add_argument('--pert', type=float, default=1.0,help='percentage of data use for training')
parser.add_argument('--type', type=str, default='bnpu',help='pu learning type (bnpu/bpu/upu)') # bpu upu
parser.add_argument('--seed', type=int, default=1013, help="random seed")
parser.add_argument('--num_head', type=int, default=5)
parser.add_argument('--lamb', type=float, default=1.0)
parser.add_argument("--ens", default="none")
parser.add_argument("--reg", default="diff", help='diff or all')
parser.add_argument("--neg", action="store_true")
parser.add_argument("--result-dir", dest="result_dir", default="result")
parser.add_argument("--save-dir", dest="save_dir", default="saved_model")
parser.add_argument("--p1", dest="p1", type=float, default=1.0)
parser.add_argument("--p2", dest="p2", type=float, default=1.0)
parser.add_argument("--n1", dest="n1", type=float, default=1.0)
parser.add_argument("--n2", dest="n2", type=float, default=1.0)
parser.add_argument("--suf", dest="suf", default=None)
parser.add_argument("--reinit", dest="reinit", type=int, default=0)
parser.add_argument("--eval", dest="eval", action="store_true")
args = parser.parse_args()
print("p1:{}, p2:{}, n1:{}, n2:{}".format(args.p1, args.p2, args.n1, args.n2), flush=True)
set_seed(args.seed)
dp = DataPrepare(args.dataset, suf=args.suf)
mutils = FeaturedDetectionModelUtils(dp)
if args.reinit==0 and os.path.exists(os.path.join("data_{}".format(args.suf), args.dataset, "data_{}.pk".format(args.flag))):
with open(os.path.join("data_{}".format(args.suf), args.dataset, "data_{}.pk".format(args.flag)), "rb") as file:
trainSets, validSet, testSet, priors, Ns = pickle.load(file)
else:
trainSets, validSet, testSet, priors, Ns = mutils.load_dataset_ens(args.flag, args.dataset, args.pert, num=args.num_head, suf=args.suf)
with open(os.path.join("data_{}".format(args.suf), args.dataset, "data_{}.pk".format(args.flag)), "wb") as file:
pickle.dump([trainSets, validSet, testSet, priors, Ns], file)
best_pre, best_rec, best_f1, best_m = -1, -1, -1, 0.3
if args.m is None:
for idx in range(10):
pre, rec, f1 = run(args, trainSets, validSet, testSet, priors, 0.3+0.1*idx, Ns)
if f1 > best_f1:
best_f1 = f1
best_pre = pre
best_rec = rec
best_m = 0.3+0.1*idx
print("pre:{}\nrec:{}\nf1:{}\nm:{}".format(best_pre, best_rec, best_f1, best_m))
else:
pre, rec, f1 = run(args, trainSets, validSet, testSet, priors, args.m, Ns)
print("pre:{}\nrec:{}\nf1:{}\nm:{}".format(pre, rec, f1, args.m)) |
from django.contrib.gis.db import models
ESTADO_CHOICES = (
('EJC1', 'EN EJECUCION CON 1ER. DESEMBOLSO'),
('EJC2', 'EN EJECUCION CON 2DO. DESEMBOLSO'),
('EJC3', 'EN EJECUCION CON 3ER. DESEMBOLSO'),
('IR', 'INTERVENIDO CON RESOLUCION'),
('LIQ', 'PROYECTOS LIQUIDADOS'),
('PI', 'POR INTERVENIR'),
('PLIQ', 'PROCESO DE LIQUIDACION'),
('RTA', 'REVISION TECNICA Y AJUSTE PARA INICION')
)
class Vivienda(models.Model):
vigencia = models.TextField()
acta = models.TextField()
radicacion = models.TextField(primary_key=True)
departamento = models.TextField()
municipio = models.TextField()
localidad = models.TextField()
tipo_solucion = models.TextField()
hogares = models.IntegerField()
valor_total = models.TextField()
subsidio = models.TextField()
desembolso1 = models.TextField()
fecha1 = models.DateField()
desembolso2 = models.TextField()
fecha2 = models.DateField()
desembolso3 = models.TextField()
fecha3 = models.DateField()
estado = models.CharField(max_length=100)
avance = models.DecimalField(max_digits=3, decimal_places=2)
recursos = models.TextField()
poblacion = models.CharField(max_length=11)
geometry = models.PointField(db_column="geom")
objects = models.GeoManager()
class Meta:
db_table = u'vivienda'
ordering = ['radicacion']
def __unicode__(self):
return '%s: %s' % (self.radicacion, self.localidad)
#class Municipio(models.Model):
# id = models.IntegerField(primary_key=True)
# nombre = models.CharField(max_length=255, db_column='nom_mpio', null=True)
# geometry = models.MultiPolygonField(null=True)
#
# objects=models.GeoManager()
#
# class Meta:
# ordering=['nombre']
#
# def __unicode__(self):
# return '%s, %s' %(self.nombre, self.id)
class Localidad(models.Model):
cod_pob = models.TextField()
confiabilidad = models.DecimalField(max_digits=20, decimal_places=2)
localidad = models.TextField()
mpio = models.TextField()
dpto = models.TextField()
class Meta:
verbose_name_plural='localidades'
class ViviendaCode(models.Model):
radicacion = models.TextField(primary_key=True)
localidad = models.TextField()
mpio = models.TextField()
dpto = models.TextField()
class Meta:
db_table = u'vivienda_code'
class Poblacion(models.Model):
id = models.IntegerField(primary_key=True, db_column="ogc_fid")
geometry = models.GeometryField(db_column="wkb_geometry")
coddane = models.TextField()
dpto = models.TextField()
mcpio = models.TextField()
cpob = models.TextField()
clase = models.TextField()
nom_dpto = models.TextField()
nom_mpio = models.TextField()
nom_cpob = models.TextField()
tipo_clase = models.TextField()
objects = models.GeoManager()
class Meta:
db_table = u'poblaciones'
verbose_name_plural='poblaciones'
class Departamento(models.Model):
id = models.IntegerField(primary_key=True, db_column='ogc_fid')
geometry = models.GeometryField(db_column='wkb_geometry')
dpto = models.TextField()
nombre = models.TextField(db_column='nom_dpto')
avg_hogares = models.DecimalField(max_digits=20, decimal_places=2)
sum_hogares = models.TextField()
std_hogares = models.DecimalField(max_digits=20, decimal_places=2)
avg_avance = models.DecimalField(max_digits=20, decimal_places=2)
sum_avance = models.TextField()
std_avance = models.DecimalField(max_digits=20, decimal_places=2)
avg_valor = models.DecimalField(max_digits=20, decimal_places=2)
sum_valor = models.TextField()
std_valor = models.DecimalField(max_digits=20, decimal_places=2)
count = models.DecimalField(max_digits=20, decimal_places=2)
def __unicode__(self):
return '%s' %(self.nombre)
objects = models.GeoManager()
class Meta:
db_table="departamento_vivienda"
|
import pyaes
import os
import math
from datetime import datetime as dt
class AES_GCM_128:
def __init__(self, i_key):
assert len(i_key) == 16, 'This class supports 128-bit key only!'
self.i_key = i_key
# Plain text and aad size
self.plt_size = 0
self.aad_size = 0
# Cypher text and tag
self.C = 0
self.T = 0
def sendeth(self, payload, interface = "eth2"):
cmd = 'python trans.py {}'.format(self.process_payload(payload))
return os.system(cmd)
def update_i_key(self, new_key):
self.i_key = new_key
@property
def aes(self):
return pyaes.AESModeOfOperationECB(self.i_key)
@property
def H(self):
return self.aes.encrypt(bytes.fromhex("00"* 16))
@staticmethod
def xor(a, b):
return bytes([aa^bb for aa, bb in zip(a, b)])
def gctr(self, iv, x, phase='encrypt'):
cb = []
offset = 2 if phase == 'encrypt' else 1
for i in range(len(x)// 16 + 5):
cb.append(iv + b'\x00' * 3 + bytes([i]))
if x == "":
return ''
y = []
n = math.ceil(len(x) / 16)
for j in range(n):
cb_ = cb[j + offset]
ciphed = self.aes.encrypt(cb_)
y.append(self.xor(x[j* 16 : j* 16 + 16], ciphed))
return y
@staticmethod
def process_payload(p):
leng = len(p)
payload = p
print(leng)
if leng % 32 != 0:
payload = p + '0' * ( 32 - leng % 32)
return payload
@staticmethod
def shiftright(a):
leng = len(a)
carry = 0
res = []
for i in range(leng):
swap = a[i] % 2
res.append(a[i] // 2 + carry * (pow(2, 7)))
carry = swap
return bytes(res)
def hashmul(self, x, y):
z = b'\x00' * 16
v = y
ele = b'\xe1' + b'\x00' * 15
for i in x:
num = i
for j in range(8):
div = pow(2, 7 - j)
here = num // div
num = num % div
if here == 1:
z = self.xor(z, v)
if v[-1] % 2 == 0:
v = self.shiftright(v)
else:
v = self.shiftright(v)
v = self.xor(v, ele)
return z
def cal_size(self, aad, pt):
self.aad_size = (len(aad) * 8).to_bytes(8, 'big')
self.plt_size = (len(pt) * 8).to_bytes(8, 'big')
def encrypt(self, iv, aad, pt, prt = True):
# Encrypt the text
self.cal_size(aad, pt)
j0 = iv + b'00' * 3 + b'01'
yy = self.gctr(iv, pt)
C= b''
for y in yy:
C = C+ y
# Generate the tag
y1 = b'\x00' * 16
xx = aad + C + self.aad_size + self.plt_size
for i in range(len(xx) // 16):
y1 = self.hashmul(self.xor(xx[i * 16:i * 16 +16], y1), self.H)
S = y1
t = self.gctr(iv, S, 'tag')
T = t[0]
self.C = C
self.T = T
if prt:
print('\nC is\t\t')
for i in range(len(pt) // 16):
print('\t\t', C[i * 16:i * 16 +16].hex())
print('\nTag is\n','\t\t',T.hex())
if __name__ == '__main__':
key = bytes.fromhex('00' * 16)
iv = bytes.fromhex('00' * 12)
gcm_aes = AES_GCM_128(key)
pt = bytes.fromhex('D9313225 F88406E5 A55909C5 AFF5269A 86A7A953 1534F7DA 2E4C303D 8A318A72 1C3C0C95 95680953 2FCF0E24 49A6B525 B16AEDF5 AA0DE657 BA637B39 1AAFD255')
pt = bytes.fromhex('D9313225 F88406E5 A55909C5 AFF5269A' * 4)
pt = bytes.fromhex('62626262 62626262 62626262 62626262' )
pt = bytes.fromhex('21000000 00000000 00000000 00000000' )
pt = bytes.fromhex('21220102 12341234 2298abde 2fee2122' )
pt = bytes.fromhex('D9313225 F88406E5 A55909C5 AFF5269A 00000000 00000000 00000000 00000f0f' * 3)
pt = bytes.fromhex('D9313225 F88406E5 A55909C5 AFF5269A 00000000 00000000 00000000 00000000' * 3)
pt_str = '22220102123412342298abde2fee2122' * 12
pt_str = '222201021234123123adf2934283242342ecdeda1238123ad140efacbcba9123123881ade'
pt_str = '12345231223411abfcdeabd78111111111111111111111111110000000000000000000000000123dec'
pt_str = 'Hey there, it is Yu. Nice to meet you world. The network class is awesome! New York has a great sunny day today.'.encode().hex()
pt_str = 'Do you think we are gonna pass this course? New York has a great sunny day today.'.encode().hex()
pt_hex = bytes.fromhex(gcm_aes.process_payload(pt_str))
pt = pt_hex
#pt = bytes.fromhex('D9313225 F88406E5 A55909C5 AFF5269A' * 4)
#pt = bytes.fromhex('D9313225 F88406E5 A55909C5 AFF5269A' )
#aad = bytes.fromhex('3AD77BB4 0D7A3660 A89ECAF3 2466EF97 F5D3D585 03B9699D E785895A 96FDBAAF 43B1CD7F 598ECE23 881B00E3 ED030688 7B0C785E 27E8AD3F 82232071 04725DD4')
#pt = bytes.fromhex('00' * 16)
print(pt)
aad = bytes.fromhex('00' * 16)
aad = bytes.fromhex('')
print('\nPlaintext is\t\t')
for i in range(len(pt) // 16):
print('\t\t', pt[i * 16:i * 16 +16].hex())
gcm_aes.encrypt(iv, aad, pt)
print(dt.now())
gcm_aes.sendeth(pt_str)
#print( C.hex(), T.hex())
|
#
# Grab a single image
# store it on the desktop (name: "NdBx-00000.jpg") and display it
#
try:
isight = ximport("isight")
except:
isight = ximport("__init__")
reload(isight)
import os
destfolder = os.path.expanduser( "~/Desktop" )
imagepath = isight.grab( destfolder=destfolder )
w, h = imagesize(imagepath)
size(w, h)
image(imagepath,0,0)
|
# Javier Gálvez Obispo
import random
import hashlib
from aritmetica_modular import *
def knapsack_llave_privada(n, cota):
"""Genera una llave privada para la función mochila (knapsack).
Input: n, tamaño de la secuencia.
cota, máxima diferencia entre a_i y a_(i+1).
Output: secuencia, secuencia super-creciente de números positivos.
n, entero positivo tal que n > sum(secuencia).
u, entero positivo tal que gcd(n, u) = 1.
"""
secuencia = []
suma = 0
for _ in range(n):
k = random.randint(suma+1, suma+cota+1)
secuencia.append(k)
suma += k
n = random.randint(suma+1, suma+cota+1)
u = random.randint(2, n-2)
while mcd(u, n)[0] != 1:
u = random.randint(2, n-2)
return secuencia, n, u
def knapsack_llave_publica(llave_privada):
"""Genera una llave pública para la función mochila (knapsack) a partir de una
llave privada dada.
Input: llave privada para la función mochila (knapsack) (secuencia, n, u)
Output: secuencia (a*_1, ..., a*_k) obtenida de hacer a*_i = ua_i mod n
"""
secuencia, n, u = llave_privada
return [(s*u) % n for s in secuencia]
def knapsack_cifrar(mensaje, llave_publica):
"""Cifra un mensaje utilizando la función mochila (knapsack).
Input: mensaje, secuencia de bits con la misma longitud que la llave pública
llave_publica para la función mochila (knapsack) (a*_1, ..., a*_k)
Output: mensaje cifrado con la función f(x1, ..., xk) = sum(x_i * a*_i) i=1..k
"""
return sum(m*k for m, k in zip(mensaje, llave_publica))
def knapsack_descifrar(mensaje_cifrado, llave_privada):
"""Descifra un mensaje cifrado por la función mochila (knapsack) utilizando
el algoritmo voraz para resolver el problema de la mochila.
Input: mensaje cifrado.
llave_privada de la función mochila (knapsack).
Output: mensaje descifrado, secuencia de bits original.
"""
secuencia, n, u = llave_privada
inv_u = inverso(u, n)
val = (mensaje_cifrado*inv_u) % n
usados = set()
posibles = secuencia[:]
while val != 0:
posibles = [x for x in posibles if x <= val and x not in usados]
# Si ya no quedan valores menores que val => no hay solución
if len(posibles) == 0:
return None
elegido = max(posibles)
val -= elegido
usados.add(elegido)
return [int(s in usados) for s in secuencia]
def siguiente_primo(n):
"""Obtiene el primer número primo p >= n"""
p = n if n % 2 == 1 else n+1
while not es_primo(p):
p += 2
return p
def ejercicio2(numero_identidad, n):
"""Calcula f^(-1)(n) siendo f: Zp -> Zp, x -> alpha^x donde
p es un número primo >= numero_identidad tal que (p-1)/2 también es primo y
alpha es un elemento primitivo de Z*p.
Input: numero_identidad: valor mínimo que puede tomar p.
n: número del que se quiere obtener el inverso.
Output: p, alpha, x tal que alpha^x mod p = n.
"""
# Obtenemos un p primo tal que (p-1)/2 también es primo
p = siguiente_primo(numero_identidad)
while not es_primo((p-1) // 2):
p = siguiente_primo(p+2)
# alpha alteatorio hasta que jacobi (a / p) = -1
alpha = random.randint(2, p-2)
while jacobi(alpha, p) != -1:
alpha = random.randint(2, p-2)
"""
# Otra forma de obtener el generador alpha
divs = [2, (p-1) // 2)]
for i in range(2, p-1):
if all(pow(i, d, p) != 1 for d in divs):
alpha = i
break
"""
x = paso_enano_gigante(alpha, n, p)
return p, alpha, x
def obtener_pq(n, x, y):
"""Calcula p, q tal que p*q = n conociendo x, y tal que x^2 = y^2 mod n
Input: n, producto de dos números primos desconocidos
x, y tal que x^2 = y^2 mod n
Output: p, q tal que p*q = n
"""
p = mcd((x-y)%n, n)[0]
q = n // p
return sorted([p, q])
def merkle_damgard(v_inicial, mensaje, n, a0, a1):
"""Implementación de una función resumen usando la construcción de Merkle-Damgard
y tomando la función h(b, x) = x^2 * a0^b * a1^(1-b) como función de compresión.
Input: v_inicial, vector inicial.
mensaje, secuencia de bits de la que se obtiene el resumen.
n, un número primo lo suficientemente grande.
a0, a1 dos cuadrados arbitrarios módulo n.
"""
# Función de compresión
def h(b, x):
val = pow(x, 2, n)
if b == 1:
val = (val*a0) % n
else:
val = (val*a1) % n
return val
# h(b2, h(b1, h(b0, x)))
x = v_inicial
for b in mensaje:
x = h(b, x)
return x
def rsa_generar_llaves(numero_identidad, fecha):
"""Genera las llaves pública y privada para un sistema RSA dados los valores
mínimos que pueden tomar p y q
Input: numero_identidad, valor mínimo que puede tomar p
fecha, valor mínimo que puede tomar q
Output: (n, e), llave pública del sistema RSA
d, llave privada del sistema RSA
"""
p = siguiente_primo(numero_identidad)
q = siguiente_primo(fecha)
n = p * q
phi_n = (p-1)*(q-1)
# e tal que gcd(e, phi(n)) = 1
e = random.randint(2, n-2)
while mcd(e, phi_n)[0] != 1:
e = random.randint(2, n-2)
d = inverso(e, phi_n)
return n, e, d
def rsa_cifrar(m, n, e):
"""Cifra un mensaje utilizando la función RSA f(x) = x^e
Input: m, mensaje a cifrar.
(n, e), llave pública RSA
Output: mensaje cifrado m^e mod n.
"""
return pow(m, e, n)
def rsa_descifrar(c, n, d):
"""Descifra un mensaje cifrado con RSA
Input: c, mensaje cifrado.
(n, d) llave privada RSA
Output: mensaje descifrado c^d mod n.
"""
return pow(c, d, n)
def rsa_obtener_pq(n, e, d):
"""Calcula p y q tal que p*q = n conociendo las llaves pública y privada de un
sistema RSA
-- Explicación del método aplicado --
Se quiere encontrar un par x, y tal que x^2 = y^2 mod n para aplicar la misma idea
utilizada en el ejercicio 3 con la que se obtienen los factores de n.
Puesto que n no es primo x^2 - 1 = 0 tiene más de dos soluciones en Zn.
Utilizamos el mismo método utilizado en Miller-Rabin para encontrar un número y
distinto de +-1 que cumpla con y^2 = 1.
Input: (n, e), d llaves pública y privada del sistema RSA
Output: p, q tal que p*q = n
"""
# Escribir d*e - 1 como 2^a * b siendo b impar
b = d*e - 1
a = 0
while b % 2 == 0:
b = b // 2
a += 1
p = None
while p is None:
x = random.randint(1, n-1)
# Si gcd(x, n) != 1 entonces hemos encontrado un factor de n
if mcd(x, n)[0] == 1:
y = pow(x, b, n)
# Si y = +-1 mod n falla y se prueba con otro x
if y not in (1, n-1):
while y not in (1, n-1):
z = y
y = pow(y, 2, n)
# Si y = 1 mod n, hemos encontrado los factores
# Si y = -1, falla y se prueba con otro x
if y == 1:
p = mcd(n, z-1)[0]
else:
p = mcd(x, n)[0]
q = n // p
return sorted([p, q])
def resumen_sha1(mensaje):
"""Obtiene el resumen de un mensaje utilizando SHA1"""
sha1 = hashlib.sha1()
sha1.update(str.encode(mensaje))
return int(sha1.hexdigest(), 16)
def rsa_verificar_firma(mensaje, firma, n, e):
"""Verifica una firma hecha con RSA
Input: (mensaje, firma) mensaje junto a su firma
(n, e) llave pública del sistema RSA
Output: True si la firma es válida, False en caso contrario
"""
resumen = resumen_sha1(mensaje)
return pow(firma, e, n) == (resumen % n)
def list_to_str(l):
return "".join(map(str, l))
if __name__ == "__main__":
# Ejercicio 1
print("## Ejercicio 1 ##")
# llave_privada = [[1, 3, 7, 15, 31, 63, 127, 255], 557, 323] # ejemplo Notes on cryptography
llave_privada = knapsack_llave_privada(8, 30)
llave_publica = knapsack_llave_publica(llave_privada)
print("Llave privada:", llave_privada)
print("Llave pública:", llave_publica)
mensaje = [0, 1, 1, 0, 0, 1, 0, 1] # e en 8-bit ASCII
mensaje_cifrado = knapsack_cifrar(mensaje, llave_publica)
mensaje_descifrado = knapsack_descifrar(mensaje_cifrado, llave_privada)
print("Mensaje:\t\t", list_to_str(mensaje))
print("Mensaje cifrado:\t",mensaje_cifrado)
print("Mensaje descifrado:\t",list_to_str(mensaje_descifrado))
# Ejercicio 2
print("\n## Ejercicio 2 ##")
numero_identidad = 75930561
fecha = 19981223
p, alpha, x = ejercicio2(numero_identidad, fecha)
print("numero de identidad =", numero_identidad, "\nfecha =", fecha)
print("p =", p, "| alpha =", alpha, "| x =", x, "| alpha^x mod p =", pow(alpha, x, p))
# Ejercicio 3
print("\n## Ejercicio 3 ##")
n = 48478872564493742276963
x = 12
y = 37659670402359614687722
p, q = obtener_pq(n, x, y)
print("p =", p, "| q =", q)
print("Comprobaciones:")
print("p primo =", es_primo(p), "| q primo =", es_primo(q), "| (p*q == n) =", p*q == n)
print("Raices 144 en Zn usando raicesmod_pq (practica 1) =", raicesmod_pq(144, p, q))
# Ejercicio 4
print("\n## Ejercicio 4 ##")
n = 48478872564493742276963
a0 = pow(random.randint(1, n-1), 2, n)
a1 = pow(random.randint(1, n-1), 2, n)
mensaje = [random.randint(0, 1) for _ in range(100)]
print("Mensaje:", list_to_str(mensaje))
print("Resumen:", merkle_damgard(1, mensaje, n, a0, a1))
# Ejercicio 5
print("\n## Ejercicio 5 ##")
c = 1234567890
n, e, d = rsa_generar_llaves(numero_identidad, fecha)
m = rsa_descifrar(c, n, d)
print("x^e =\t", c)
print("x =\t", m)
print("x^e =\t", rsa_cifrar(m, n, e))
# Ejercicio 6
print("\n## Ejercicio 6 ##")
n = 50000000385000000551
e = 5
d = 10000000074000000101
p, q = rsa_obtener_pq(n, e, d)
print("n =", n)
print("p =", p, "| q =", q)
print("Comprobaciones:")
print("p primo =", es_primo(p), "| q primo =", es_primo(q), "| (p*q == n) =", p*q == n)
# Ejercicio 7
print("\n## Ejercicio 7 ##")
# Firma RSA
mensaje = "Prueba de verificación de una firma RSA utilizando SHA1 como función resumen"
print("Mensaje:\t", list_to_str(mensaje))
# Generar llaves RSA
n, e, d = rsa_generar_llaves(numero_identidad, fecha)
# Obtener resumen del mensaje
resumen = resumen_sha1(mensaje)
print("Resumen:\t", resumen)
# Firmar el resumen / cifrar con la llave privada
firma = rsa_cifrar(resumen, n, d)
print("Firma:\t\t", firma)
# Verificar la firma
verificacion = rsa_verificar_firma(mensaje, firma, n, e)
print("Verificación:\t", verificacion) |
# parse Twitter streaming API results (background corpus)
# write log probabilities each word, tab-delimited, one per line
import sys, json, re, gzip
from math import log
counter = {}
total = 0.0
def legal(w):
return not (len(w) == 0 or w[0] == '@')
# saves memory
CS_DICT = {}
def CS(s):
return CS_DICT.setdefault(s,s)
p = re.compile(r'[^\@\#\w\d]+')
for i in range(1, len(sys.argv)):
logfile = gzip.open(sys.argv[i], 'rb')
try:
for line in logfile:
obj = json.loads(line)
try:
words = p.split(obj['text'].lower())
for w in words:
if legal(w):
try:
counter[CS(w)] += 1
except KeyError:
counter[CS(w)] = 1
total += 1
except KeyError:
pass
except ValueError:
pass
for (w, ct) in counter.iteritems():
print '%.6f\t%s' % (log(ct/total, 2), w)
|
# coding: utf-8
# Standard Libraries
import asyncio
import logging
# Dopplerr
from dopplerr.config import DopplerrConfig
log = logging.getLogger(__name__)
class PeriodicTask(object):
job_id: str = None
job_type = 'interval'
job_default_kwargs = {'max_instances': 1}
scheduler = None
seconds: int = None
minutes: int = None
hours: int = None
active = False
_interrupted = False
enable_cfg: str = None
forced = False
force_start_required = False
def __init__(self):
self.init()
def init(self):
pass
async def run(self):
try:
if self.force_start_required:
self.forced = True
elif self.forced:
log.debug("Forced execution already started, skip this periodic schedule")
return
self.active = True
return await self._run()
finally:
self.active = False
async def _run(self):
raise NotImplementedError
@property
def _add_job_kwargs(self):
kw = self.job_default_kwargs.copy()
if self.seconds:
kw['seconds'] = self.seconds
if self.minutes:
kw['minutes'] = self.minutes
if self.hours:
kw['hours'] = self.hours
return kw
@property
def job(self):
if self.scheduler:
return self.scheduler.get_job(self.job_id)
def add_job(self, scheduler):
if self.enable_cfg is not None and not DopplerrConfig().get_cfg_value(self.enable_cfg):
log.info("Do not enable job '%s', it is disabled by configuration '%s'", self.job_id,
self.enable_cfg)
return
self.scheduler = scheduler
scheduler.add_job(
self.run, self.job_type, id=self.job_id, replace_existing=True, **self._add_job_kwargs)
@property
def next_run_time(self):
job = self.job
if job:
return self.job.next_run_time
@property
def next_run_time_iso(self):
t = self.next_run_time
if t:
return t.isoformat()
@property
def interval(self):
# yapf: disable
return (
(self.seconds if self.seconds else 0) +
(self.minutes * 60 if self.minutes else 0) +
(self.hours * 60 * 60 if self.hours else 0) +
0)
# yapf: enable
@property
def started(self):
return self.scheduler
def stop(self):
self.scheduler = None
@property
def stopped(self):
return not self.scheduler
def interrupt(self):
self._interrupted = True
@property
def interrupted(self):
return self._interrupted
async def force_start(self):
log.debug("Force start job: %s", self.job_id)
self.force_start_required = True
asyncio.ensure_future(self.run())
|
#_author:leo gao
#encoding:utf-8
from Utils.common import ci_url
video_command_url = '%s/#/spzhsystem/spzh' % ci_url
|
f = open('a.txt','r')
f_read = f.readlines()
f_display = [lines.strip() for lines in f_read]
for i in f_display:
print(i)
f.close() |
from django.contrib import admin
from django.urls import path
from django.conf.urls import url
from . import views
urlpatterns = [
path('ROE-valuation-Calculator', views.ROE_valuation.as_view(), name='ROE_valuation'),
] |
import sys
import logging
if sys.version_info >= (3, 0):
from configparser import ConfigParser
else:
from ConfigParser import ConfigParser
_LOG = logging.getLogger('__main__.' + __name__)
class Config(object):
ip = ''
port = 0
debug_interval = -1
flush_queue_interval = 0
@classmethod
def Parse(cls, config_file = '../../autosnap.conf'):
autosnap_conf = ConfigParser()
autosnap_conf.read(config_file)
Config.ip = autosnap_conf.get('global', 'ip')
Config.port = autosnap_conf.getint('global', 'port')
Config.debug_interval = autosnap_conf.getint('global', 'debug_interval')
Config.flush_queue_interval = autosnap_conf.getint('global', 'flush_queue_interval')
Config.snapshot_prefix = autosnap_conf.get('global', 'snapshot_prefix')
_LOG.debug('Config: ip = {}, port = {}, debug_interval = {}, flush_queue_interval = {}, snapshot_prefix = {}'
.format(Config.ip, Config.port, Config.debug_interval, \
Config.flush_queue_interval, Config.snapshot_prefix))
|
from django.shortcuts import render
from bs4 import BeautifulSoup
import requests
from requests.compat import quote_plus
from .models import Search
import datetime
Base_Mentor_Url="https://www.codementor.io/experts?q={}"
Base_Post_Url = "https://www.codementor.io{}"
def home(request):
return render(request, 'base.html')
def new_search(request):
search = request.POST.get('search')
final_postings = []
if not search:
error="Error!"
else:
error=""
final_url = Base_Mentor_Url.format(quote_plus(search))
response = requests.get(final_url)
data = response.text
soup=BeautifulSoup(data,"html.parser")
post_list = soup.find_all('div', {'class': 'resultBlock'})
for i in post_list:
post_listings=i.find_all('div', {'class': 'row-fluid mentor-item-row'})
for post in post_listings:
x=post.find(class_="span9 mentor-details")
post_title=x.find(class_='name').text
post_title=post_title.strip()
post_url=Base_Post_Url.format(x.find(class_='name').get('href'))
y=post.find(class_="span3 text-center mentor-info")
if y.find(class_="rate").text :
post_price=y.find(class_="rate").text
post_price = post_price.strip()
if not post_price:
post_price="N/A"
if y.find(class_="img-circle headImg").get('src'):
post_img_src=y.find(class_="img-circle headImg").get('src')
else :
post_img_src="https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcSqI-ITDXkGbI5GGDeLUJKKgvv_wEehtglCuJUyuWtqqvDeLCnz3Q&s"
tup=(post_title, post_url, post_price, post_img_src)
final_postings.append(tup)
if final_postings[0][0] == "{{item['display_name']}}":
error="Error!"
break
search_for_frontend={
'search':search,
'final_postings': final_postings[:-1],
'error':error,
}
return render(request,'my_app/new_search.html',search_for_frontend)
|
import simplejson as json
a={"name":"Divid",
"class":"I",
"age":18
}
with open("json_object.json","w")as f:
json.dump(a,f,indent=6) |
###########################################
# Let's Have Some Fun
# File Name: 648.py
# Author: Weilin Liu
# Mail: liuweilin17@qq.com
# Created Time: Tue Feb 19 19:40:48 2019
###########################################
#coding=utf-8
#!/usr/bin/python
#648. Replace Words
class TrieNode:
def __init__(self):
self.children = 26 * [None]
self.isEnd = False
class Solution:
def getRoot(self, tnd, word):
root = ''
for c in word:
if tnd.children[ord(c)-ord('a')]:
tnd = tnd.children[ord(c)-ord('a')]
root += c
if tnd.isEnd: #notice!!!, chose the shortest root.
break
else:
break
if not tnd.isEnd:
return word
else:
return root
def replaceWords(self, dict: 'List[str]', sentence: 'str') -> 'str':
# build Trie Tree
root = TrieNode()
for s in dict:
tmp = root
for c in s:
if not tmp.children[ord(c)-ord('a')]:
tmp.children[ord(c)-ord('a')] = TrieNode()
tmp = tmp.children[ord(c)-ord('a')]
tmp.isEnd = True
# search in the root
ret = []
for word in sentence.split(" "):
ret.append(self.getRoot(root, word))
return ' '.join(ret)
|
import sys
import os
import platform
import djcelery
djcelery.setup_loader()
from django.contrib.messages import constants as messages
# ===========================
# = Directory Declaractions =
# ===========================
SITE_ROOT = os.path.dirname(os.path.realpath(__file__))
CURRENT_DIR = os.path.dirname(__file__)
TEMPLATE_DIRS = os.path.join(CURRENT_DIR, 'templates')
UTILS_ROOT = os.path.join(CURRENT_DIR, 'utils')
APPS_ROOT = os.path.join(CURRENT_DIR, 'apps')
VENDOR_ROOT = os.path.join(CURRENT_DIR, 'vendor')
if '/utils' not in ' '.join(sys.path):
sys.path.append(UTILS_ROOT)
if '/vendor' not in ' '.join(sys.path):
sys.path.append(VENDOR_ROOT)
if '/apps' not in ' '.join(sys.path):
sys.path.append(APPS_ROOT)
DEBUG = True
PRODUCTION = False
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = [
'criticalcodex.com',
'*.criticalcodex.com',
'criticalcodex.herokuapp.com',
]
ADMINS = (('Tyler Rilling', 'tyler@underlost.net'))
MANAGERS = ADMINS
#DB info injected by Heroku
import dj_database_url
DATABASES = {'default': dj_database_url.config(default='postgres://localhost')}
#Cache
if DEBUG:
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
else:
import urlparse
redis_url = urlparse.urlparse(os.environ.get('REDISCLOUD_URL'))
CACHES = {
'default': {
'BACKEND': 'd20.vendor.johnny.backends.redis.RedisCache',
'LOCATION': '%s:%s' % (redis_url.hostname, redis_url.port),
'OPTIONS': {
'PASSWORD': redis_url.password,
'DB': 0,
'JOHNNY_CACHE': True,
}
}
}
JOHNNY_MIDDLEWARE_KEY_PREFIX='d20'
JOHNNY_MIDDLEWARE_SECONDS = 900
CACHE_MIDDLEWARE_SECONDS = 60 * 5 # 5 minutes
CACHE_MIDDLEWARE_KEY_PREFIX = 'd20'
#Email
EMAIL_BACKEND = 'postmark.django_backend.EmailBackend'
POSTMARK_API_KEY = os.environ.get('POSTMARK_API_KEY')
POSTMARK_SENDER = 'site@criticalcodex.com'
EMAIL_HOST = os.environ.get('POSTMARK_SMTP_SERVER')
SERVER_EMAIL = 'site@criticalcodex.com'
DEFAULT_FROM_EMAIL = "site@criticalcodex.com"
TIME_ZONE = 'US/Pacific'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = False
USE_L10N = False
AUTH_USER_MODEL = 'core.Account'
LOGIN_REDIRECT_URL = '/'
LOGIN_URL = '/login/'
LOGOUT_URL = '/logout/'
ALLOW_NEW_REGISTRATIONS = False
STATICFILES_DIRS = ( os.path.join(SITE_ROOT, 'static'),)
WSGI_APPLICATION = 'd20.wsgi.application'
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
MIXPANEL_TOKEN = os.environ.get('MIXPANEL_TOKEN')
if DEBUG:
STATIC_ROOT = os.path.join(CURRENT_DIR, 'static')
STATIC_URL = 'http://criticalcodex.com/static/'
MEDIA_URL = 'http://criticalcodex.com/static/media/'
else:
STATIC_ROOT = 'staticfiles'
STATIC_URL = 'http://static.criticalcodex.com/'
MEDIA_URL = 'http://static.criticalcodex.com/media/'
#Site Settings
ALLOW_NEW_REGISTRATIONS = False
COMMENTS_APP = 'd20.apps.threadedcomments'
SITE_NAME = 'CriticalCodex'
SITE_DESC = 'A web-based application designed to enhance your tabletop RPG adventuring and storytelling.'
SITE_URL = 'http://criticalcodex.com/'
#Stripe
if DEBUG:
STRIPE_SECRET = "WTg4xZZsbgX5oHYX8P8Ywk4jDqM3XXMP"
STRIPE_PUBLISHABLE = "pk_YhTPLj7IIvaPzHRqDOCRMHkJfHeWj"
else:
STRIPE_SECRET = os.environ.get('STRIPE_SECRET_KEY')
STRIPE_PUBLISHABLE = os.environ.get('STRIPE_PUBLISHABLE_KEY')
ZEBRA_ENABLE_APP = True
#Amazon S3
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID', '')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY', '')
AWS_STORAGE_BUCKET_NAME = 'static.criticalcodex.com'
AWS_S3_CUSTOM_DOMAIN = 'static.criticalcodex.com'
STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
AWS_S3_SECURE_URLS = False
COMPRESS_URL = "http://static.criticalcodex.com/"
COMPRESS_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
REST_FRAMEWORK = {
'PAGINATE_BY': 25, # Default to 25
'PAGINATE_BY_PARAM': 'page_size', # Allow client to override, using `?page_size=xxx`.
'MAX_PAGINATE_BY': 100 # Maximum limit allowed when using `?page_size=xxx`.
}
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY')
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = {
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
'd20.apps.core.context_processors.template_settings',
}
MIDDLEWARE_CLASSES = (
'd20.apps.core.middleware.SubdomainMiddleware',
'd20.apps.core.middleware.MultipleProxyMiddleware',
'd20.apps.core.middleware.SetRemoteAddrFromForwardedFor',
'd20.vendor.johnny.middleware.LocalStoreClearMiddleware',
'd20.vendor.johnny.middleware.QueryCacheMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'd20.apps.core.middleware.TimingMiddleware',
'd20.apps.core.middleware.LastSeenMiddleware',
)
ROOT_URLCONF = 'd20.urls'
SUBDOMAIN_URLCONFS = {
None: 'd20.urls',
'api': 'd20.apps.api.urls',
}
SESSION_COOKIE_DOMAIN = '.criticalcodex.com'
TEMPLATE_DIRS = (
os.path.join(SITE_ROOT, 'templates'),
)
INSTALLED_APPS = (
#Django
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.flatpages',
'django.contrib.admin',
'django.contrib.humanize',
#Prancing on Heroku
'djcelery',
'gunicorn',
'taggit',
'haystack',
'compressor',
'storages',
'rest_framework',
#Vendor
'zebra',
#Internal
'd20.apps.core',
'd20.apps.profile',
'd20.apps.charactersheet',
'd20.apps.resources',
)
HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor'
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
'URL': os.environ['SEARCHBOX_URL'],
'INDEX_NAME': 'charactersheets',
},
}
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"handlers": {
"console": {
"level": "INFO",
"class": "logging.StreamHandler",
},
},
"loggers": {
"django": {
"handlers": ["console"],
}
}
}
|
from state import State
import random
from utils import cprint, clear_screen
from utils import (
BLACK,
RED,
GREEN,
YELLOW,
BLUE,
MAGENTA,
CYAN,
WHITE
)
class MiniMaxAgent:
"""
TicTacToe agent that implements Minimax choice criteria
"""
def choice(self, state: State, turn: int):
# Minimax decision
choices = state.choices()
random.shuffle(choices)
best_move, best_value = 0, -2
percent = 0
for i, choice in enumerate(choices):
new_state = state.clone()
new_state.move(choice)
child_value = self.min_value(new_state, turn)
percent += 100 / len(choices)
cprint(f'Thinking... {percent}%\n', CYAN)
if child_value > best_value:
best_value = child_value
best_move = choice
if best_value > 1000:
cprint('mmmm... I\'m about to win :)\n', YELLOW)
input()
return best_move
def connection_value(self, arr: [], val: int):
m = []
for i in [0, 3, 6]:
m.append([arr[i], arr[i + 1], arr[i + 2]])
dr = [1, 0, -1, 0, 1, 1, -1, -1]
dc = [0, 1, 0, -1, 1, -1, -1, 1]
counter = 0
for i in range(len(m)):
for j in range(len(m[0])):
if m[i][j] != val:
continue
for d in range(8):
ni, nj = i + dr[d], j + dc[d]
if ni >= 0 and ni < 3 and nj >= 0 and nj < 3:
counter += (m[i][j] == m[ni][nj])
return counter
def leaf_value(self, state: State, winner: int, turn: int, moves_amount: int):
BIAS = 1000
if winner == turn: # win
return BIAS + 1 / moves_amount
if winner == 3 - turn: # lose
return -1
return self.connection_value(state.matrix, turn) # tie
def min_value(self, state: State, turn: int, depth: int = 1):
winner = state.winner()
if winner != 0:
return self.leaf_value(state, winner, turn, depth)
choices = state.choices()
random.shuffle(choices)
mn = 9999999
o = []
for choice in choices:
new_state = state.clone()
new_state.move(choice)
child_value = self.max_value(new_state, turn, depth + 1)
o.append((child_value, choice))
mn = min(mn, child_value)
return mn
def max_value(self, state: State, turn: int, depth: int = 1):
winner = state.winner()
if winner != 0:
return self.leaf_value(state, winner, turn, depth)
choices = state.choices()
random.shuffle(choices)
mx = -9999999
for choice in choices:
new_state = state.clone()
new_state.move(choice)
child_value = self.min_value(new_state, turn, depth + 1)
mx = max(mx, child_value)
return mx
class RandomAgent:
def choice(self, state: State, turn: int):
return random.choice(state.choices())
# Agent = RandomAgent
Agent = MiniMaxAgent
a = MiniMaxAgent()
print(a.connection_value([2, 1, 1, 1, 2, 2, 2, 1, 1], 1))
print(a.connection_value([2, 1, 2, 1, 1, 2, 1, 2, 1], 1)) |
"""
"""
# Classes
class City:
""" Represents a city in an input file.
Properties:
grid (int, int): (number of rows, number of columns)
vehicles (list of Vehicle): list of all available vehicles
rides (list of Ride): list of all rides
ride_num: number of rides
bonus: per-ride bonus for starting ride on time
step_num: number of steps in the simulation
"""
def __init__(self, file):
with open(file) as f:
line = f.readline()
values = line.strip('\n').split(' ')
self.grid = (int(values[0]), int(values[1]))
self.ride_num = int(values[3])
self.bonus = int(values[4])
self.step_num = int(values[5])
self.vehicles = self.get_vehicles(int(values[2]))
self.rides = self.get_rides(file)
def __repr__(self):
return ('grid: ' + str(self.grid) +
'\nnumber of vehicles: ' + str(len(self.vehicles)) +
'\nnumber of rides: ' + str(self.ride_num) +
'\nper-ride bonus: ' + str(self.bonus) +
'\nnumber of steps: ' + str(self.step_num)
)
def get_rides(self, file):
rides = []
with open(file) as f:
next(f)
cur_ride = 0
for line in f:
values = line.split(' ')
values[-1] = values[-1][-2]
r = Ride(cur_ride,
(int(values[0]), int(values[1])),
(int(values[2]), int(values[3])),
int(values[4]),
int(values[5])
)
rides.append(r)
cur_ride += 1
return rides
def get_vehicles(self, n):
vehicles = []
for i in range(0, n):
vehicles.append(Vehicle(i))
return vehicles
def get_free_vehicles(self, current_step):
free = []
for v in self.vehicles:
if (v.step_busy_until <= current_step):
free.append(v)
return free
def get_waiting_rides(self):
waiting = []
for r in self.rides:
if (not r.is_taken):
waiting.append(r)
return waiting
class Ride:
""" Represents a requested ride in the input file.
Properties:
start_intersection (int, int): (row, column)
finish_intersection (int, int): (row, column)
earliest_start (int): earliest time ride may start
latest_finish (int): earliest time ride may finish
is_taken (boolean): false if the journey has not yet been taken, true
otherwise
"""
def __init__(self, r_id, start_intersection, finish_intersection,
earliest_start, latest_finish):
self.id = r_id
self.start_intersection = start_intersection
self.finish_intersection = finish_intersection
self.earliest_start = earliest_start
self.latest_finish = latest_finish
self.distance = get_distance_between_points(start_intersection,
finish_intersection)
self.is_taken = False
def __repr__(self):
return ('id: ' + str(self.id) +
'\nstart intersection: ' + str(self.start_intersection) +
'\nfinish intersection: ' + str(self.finish_intersection) +
'\nearliest start: ' + str(self.earliest_start) +
'\nlatest finish: ' + str(self.latest_finish) +
'\ndistance: ' + str(self.distance) +
'\nhas been taken: ' + str(self.is_taken)
)
class Vehicle:
""" Represents a vehicle in the input file.
Properties:
current_position (int, int): current postion of the vehicle
ride (Ride): ride object assigned to this vehicle
"""
def __init__(self, v_id):
self.id = v_id
self.current_position = (0, 0)
self.ride = None
self.step_busy_until = 0
def __repr__(self):
return ('[' + str(self.id) + ', ' + str(self.current_position) +
', ' + str(self.ride) + ']'
)
# Functions
def get_distance_between_points(pos1, pos2):
return (
abs(pos1[0] - pos2[0]) +
abs(pos1[1] - pos2[1])
)
def create_matrix(r, c):
road_matrix = []
for i in range(0, r):
road_matrix.append([0 for i in range(0, c)])
return road_matrix
def print_file_info(file):
city = City(file)
print(city)
print('\n---\n')
for r in city.rides:
print(r, '\n')
|
from django.shortcuts import render, redirect
from django.views.decorators.http import require_http_methods
from django.http import JsonResponse
import json
from ..models.product import Product
from ..models.order import Order, OrderItems
@require_http_methods(["POST"])
def create_order(request):
""" View to create an order """
# get data
try:
data = request.POST
data = list(data.keys())[0]
data = json.loads(data)
# create order
my_order = Order()
my_order.user_id = request.user
my_order.status = 0
my_order.save()
# order items associated
for product in data['list_order']:
order_item = OrderItems()
order_item.order_id = my_order
order_item.product_id = Product.objects.get(id=product[0])
order_item.cant = product[1]
order_item.save()
return JsonResponse({"status": "success", "order_id": my_order.id})
except Exception:
return JsonResponse({"status": "error"})
@require_http_methods(["GET"])
def orders(request):
""" View to get all orders by user """
# if user is not authenticated redirect to login
if request.user.is_authenticated:
orders = Order.objects.filter(user_id=request.user).order_by('id')
context = {
"orders": orders
}
return render(request, 'orders.html', context=context)
# redirect to login
return redirect('/login')
@require_http_methods(["GET"])
def view_order(request, id):
""" View order by id """
# if user is not authenticated redirect to login
if request.user.is_authenticated:
my_order = Order.objects.get(id=id)
context = {
"my_order": my_order
}
return render(request, 'view_order.html', context=context)
# redirect to login
return redirect('/login')
@require_http_methods(["POST"])
def pay_order(request):
""" Pay order by id """
# if user is not authenticated redirect to login
if request.user.is_authenticated:
try:
data = request.POST
data = list(data.keys())[0]
data = json.loads(data)
my_order = Order.objects.get(id=data['id'])
# change status
my_order.status = 1
my_order.save()
return JsonResponse({"status": "success", "order_id": my_order.id})
except Exception:
return JsonResponse({"error": "error"})
# redirect to login
return redirect('/login')
|
import sqlite3
class User:
def __init__(self, _id , name , username):
self.id = _id
self.name = name
self.username = username
@classmethod
def find_by_username(cls ,username, password):
connection = sqlite3.connect("data.db")
cursor = connection.cursor()
result = cursor.execute("SELECT * FROM users WHERE username=? AND password=?",(username,password))
row = result.fetchone()
connection.commit()
connection.close()
if row:
return cls(*row)
else:
return None
@classmethod
def find_by_user_id(cls, user_id):
connection = sqlite3.connect("data.db")
cursor = connection.cursor()
result = cursor.execute("SELECT * FROM users WHERE id =?",(user_id,))
row = result.fetchone()
connection.commit()
connection.close()
if row:
return cls(*row)
else:
return None
|
# Generated by Django 3.2.3 on 2021-07-17 11:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('secureadmin', '0003_usedoffer'),
]
operations = [
migrations.AddField(
model_name='usedoffer',
name='is_ordered',
field=models.BooleanField(default=False),
),
]
|
import os
import resource
import sys
import time
import pdb
sys.path.append("../utils")
import snap
import testutils
if __name__ == '__main__':
if len(sys.argv) < 3:
print """Usage: """ + sys.argv[0] + """ <srcfile> <dstfile>
postsfile: posts.tsv file from StackOverflow dataset
dstfile: destination file for saving the pagerank table"""
sys.exit(1)
srcfile = sys.argv[1]
dstfile = sys.argv[2]
context = snap.TTableContext()
t = testutils.Timer()
r = testutils.Resource()
schema = snap.Schema()
schema.Add(snap.TStrTAttrPr("Id", snap.atInt))
schema.Add(snap.TStrTAttrPr("OwnerUserId", snap.atInt))
schema.Add(snap.TStrTAttrPr("AcceptedAnswerId", snap.atInt))
schema.Add(snap.TStrTAttrPr("CreationDate", snap.atInt))
schema.Add(snap.TStrTAttrPr("Score", snap.atInt))
schema.Add(snap.TStrTAttrPr("Tag", snap.atStr))
table = snap.TTable.LoadSS("1", schema, srcfile, context, "\t", snap.TBool(False))
t.show("load posts text", table)
r.show("__loadpoststext__")
questions = snap.TTable.New("2", table.GetSchema(), context)
table.SelectAtomicStrConst("Tag", "python", snap.EQ, questions)
t.show("selected tag = 'python'", questions)
r.show("__selectedtagpython__")
qa = questions.Join("AcceptedAnswerId", table, "Id")
graph = snap.ToNetwork(snap.PNEANet, qa, "2.OwnerUserId", "1.OwnerUserId", snap.aaFirst)
t.show("join", qa)
r.show("__join__")
t.show("graph", graph)
r.show("__graph__")
PRankH = snap.TIntFltH()
snap.GetPageRank(graph, PRankH, 0.85, 1e-4, 100)
prtable = snap.TTable.New("PR", PRankH, "UserId", "PageRank", context, snap.TBool(True))
t.show("pagerank", prtable)
r.show("__pagerank__")
FOut = snap.TFOut(dstfile)
prtable.Save(FOut)
t.show("save bin", prtable)
r.show("__savebin__")
|
from django.db import models
from django.contrib.auth.models import User
from mindfinder.settings import MEDIA_ROOT
from django.core.files.storage import FileSystemStorage
DEFAULT_AVATAR_IMAGE = 'avatars/no_photo_icon.png'
class UserProfile(models.Model):
user = models.OneToOneField(
User,
on_delete=models.CASCADE)
avatar = models.ImageField(
storage=FileSystemStorage(location=MEDIA_ROOT),
upload_to='avatars',
default=DEFAULT_AVATAR_IMAGE)
name = models.CharField(
'Name',
max_length=255,
default='')
age = models.IntegerField(
'Age',
null=True)
friends = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='+',
null=True)
|
from .models import Category
# For return the all categories
def categories(request):
return {
'categories' : Category.objects.all()
} |
import ServerConnection
run = True
while(run):
num = ServerConnection.openConnection() #Create a connection with the frontend
if (num == None):
run = False
|
#!/usr/bin/env python
from __future__ import print_function
import sys
import os
import random
import networkx as nx
import numpy as nm
import community
import time
import gc
import eigen_graph as eg
import graph_utils as gu
sys.path.append("twoK")
from twok_simple import joint_degree_graph
sys.path.append("twofiveK")
from Estimation import Estimation
from Generation import Generation
from net_formats import read_graphml, read_mat_file
import parallelism
def write_statistics(A, B, label, net, x, l, output=True):
eg.info("Computing new centrality..")
G = nx.from_numpy_matrix(A)
H = nx.from_numpy_matrix(B)
nx.write_weighted_edgelist(H, net + "_" + label + "_" +
str(random.randint(0, 99999999)) + ".edges")
y, m = eg.eigen_centrality(B, maxiter=100000)
eg.info("Printing out results..")
eigen_err = eg.vec_dist(x, y)
clust_err = nm.average(nx.clustering(G).values()) /\
nm.average(nx.clustering(H).values())
lambda_err = abs(l / m)
degree_corr = gu.correlation(sorted(nx.degree(G)), sorted(nx.degree(H)))
if nx.is_connected(H):
conn = 1
else:
conn = 0
out = (str(label) + "," + str(net.split(".")[0].split("/")[-1]) + "," +
str(eigen_err) + "," + str(degree_corr) + "," +
str(clust_err) + "," + str(lambda_err) + "," +
str(-1) + "," + str(-1) + "," +
# str(gu.correlate_dist_dict(gu.node_distance_dist(A),
# gu.node_distance_dist(B))) + "," +
# str(gu.correlate_dist_dict(gu.get_degree_betweeness(A),
# gu.get_degree_betweeness(B))) + "," +
str(gu.correlate_dist_dict(gu.get_kcoreness(A),
gu.get_kcoreness(B))) + "," +
str(gu.correlate_dist_dict(gu.get_common_neigh_dist(A),
gu.get_common_neigh_dist(B))) + "," +
str(-1) + "," +
# str(community.modularity(
# community.best_partition(G), G) /
# community.modularity(
# community.best_partition(H), H)) + "," +
str(gu.correlate_dist_dict(gu.get_avg_neighbour_degree(A),
gu.get_avg_neighbour_degree(B))) + "," +
str(conn))
if output:
print(out, file=sys.stderr)
return out
def csv_test_unparallel():
eg.__eigen_info = True
print("Strategy," + "Graph," +
str("EigErr") + "," + str("DegCorr") + "," +
str("ClustRatio") + "," + str("EigVErr") + "," +
str("NodeDistCorr") + "," + str("DegBetCorr") + "," +
str("KCoreCorr") + "," + str("CommNeighDist") + "," +
str("PartRatio") + "," + str("AvgNeighDegCorr") + "," +
str("Connected"), file=sys.stderr)
for filo in os.listdir("/home/baldo/tmp/graph_generator/PL200/"):
with open("/home/baldo/tmp/graph_generator/PL200/" + filo, "r") as net:
eg.info(filo)
eg.info("Loading graph..")
G = nx.read_weighted_edgelist(net) # , delimiter=":")
# G = read_graphml(net)
A = nx.to_numpy_matrix(G)
n = nm.shape(A)[0]
joint_degrees = nx.algorithms.mixing.degree_mixing_dict(G)
eg.info("Computing centrality..")
x, l = eg.eigen_centrality(A)
for i in range(10):
eg.info("Run: " + str(i))
eg.info("Building JDM graph..")
H = joint_degree_graph(joint_degrees)
B = nx.to_numpy_matrix(H)
write_statistics(A, B, "2k", filo,
x, l)
eg.info("Building degree sequence graph..")
H = nx.random_degree_sequence_graph((nx.degree(G).values()))
B = nx.to_numpy_matrix(H)
write_statistics(A, B, "1k", filo,
x, l)
precision = 0.01
eg.info("Building eigen " + str(precision) + " graph..")
B = eg.build_matrix(x, l, precision)
write_statistics(A, B, "eig" + str(precision),
filo, x, l)
precision = 0.001
eg.info("Building eigen " + str(precision) + " graph..")
B = eg.build_matrix(x, l, precision)
write_statistics(A, B, "eig" + str(precision),
filo, x, l)
precision = 0.0001
eg.info("Building eigen " + str(precision) + " graph..")
B = eg.build_matrix(x, l, precision)
write_statistics(A, B, "eig" + str(precision),
filo, x, l)
m = 0.25
eg.info("Building spectral " + str(m) + " graph..")
B = eg.sample_simm_matrix(A, int(round(n*m)))
write_statistics(A, B, "spectre" + str(m),
filo, x, l)
m = 0.5
eg.info("Building spectral " + str(m) + " graph..")
B = eg.sample_simm_matrix(A, int(round(n*m)))
write_statistics(A, B, "spectre" + str(m),
filo, x, l)
m = 0.75
eg.info("Building spectral " + str(m) + " graph..")
B = eg.sample_simm_matrix(A, int(round(n*m)))
write_statistics(A, B, "spectre" + str(m),
filo, x, l)
m = 0.9
eg.info("Building spectral " + str(m) + " graph..")
B = eg.sample_simm_matrix(A, int(round(n*m)))
write_statistics(A, B, "spectre" + str(m),
filo, x, l)
m = 0.95
eg.info("Building spectral " + str(m) + " graph..")
B = eg.sample_simm_matrix(A, int(round(n*m)))
write_statistics(A, B, "spectre" + str(m),
filo, x, l)
eg.info("Building D2.5 graph..")
test25 = Estimation()
gen25 = Generation()
test25.load_graph("", graph=G)
test25.calcfull_CCK()
test25.calcfull_JDD()
gen25.set_JDD(test25.get_JDD('full'))
gen25.set_KTRI(test25.get_KTRI('full'))
gen25.construct_triangles_2K()
gen25.mcmc_improved_2_5_K(error_threshold=0.05)
H = gen25.G
B = nx.to_numpy_matrix(H)
write_statistics(A, B, "25k", filo, x, l)
def graph_worker(inputlist, queue, print_queue):
for filo in inputlist:
if filo.split(".")[-1] == "graphml":
G = read_graphml(filo)
else:
G = nx.read_weighted_edgelist(filo)
A = nx.to_numpy_matrix(G)
n = nm.shape(A)[0]
joint_degrees = nx.algorithms.mixing.degree_mixing_dict(G)
x, l = eg.eigen_centrality(A)
H = nx.random_degree_sequence_graph((nx.degree(G).values()))
B = nx.to_numpy_matrix(H)
print_queue.put(write_statistics(A, B, "1k", filo,
x, l, output=False))
print_queue.put("\n")
H = joint_degree_graph(joint_degrees)
B = nx.to_numpy_matrix(H)
print_queue.put(write_statistics(A, B, "2k", filo,
x, l, output=False))
print_queue.put("\n")
precision = 0.01
B = eg.build_matrix(x, l, precision)
print_queue.put(write_statistics(A, B, "eig" + str(precision),
filo, x, l, output=False))
print_queue.put("\n")
precision = 0.001
B = eg.build_matrix(x, l, precision)
print_queue.put(write_statistics(A, B, "eig" + str(precision),
filo, x, l, output=False))
print_queue.put("\n")
precision = 0.0001
B = eg.build_matrix(x, l, precision)
print_queue.put(write_statistics(A, B, "eig" + str(precision),
filo, x, l, output=False))
print_queue.put("\n")
m = 0.25
B = eg.sample_simm_matrix(A, int(round(n*m)))
print_queue.put(write_statistics(A, B, "spectre" + str(m),
filo, x, l, output=False))
print_queue.put("\n")
m = 0.5
B = eg.sample_simm_matrix(A, int(round(n*m)))
print_queue.put(write_statistics(A, B, "spectre" + str(m),
filo, x, l, output=False))
print_queue.put("\n")
m = 0.75
B = eg.sample_simm_matrix(A, int(round(n*m)))
print_queue.put(write_statistics(A, B, "spectre" + str(m),
filo, x, l, output=False))
print_queue.put("\n")
m = 0.9
B = eg.sample_simm_matrix(A, int(round(n*m)))
print_queue.put(write_statistics(A, B, "spectre" + str(m),
filo, x, l, output=False))
print_queue.put("\n")
m = 0.95
B = eg.sample_simm_matrix(A, int(round(n*m)))
print_queue.put(write_statistics(A, B, "spectre" + str(m),
filo, x, l, output=False))
print_queue.put("\n")
test25 = Estimation()
gen25 = Generation()
test25.load_graph("", graph=G)
test25.calcfull_CCK()
test25.calcfull_JDD()
gen25.set_JDD(test25.get_JDD('full'))
gen25.set_KTRI(test25.get_KTRI('full'))
gen25.construct_triangles_2K()
gen25.mcmc_improved_2_5_K(error_threshold=0.05)
H = gen25.G
B = nx.to_numpy_matrix(H)
print_queue.put(write_statistics(A, B, "25k", filo, x, l, output=False))
print_queue.put("\n")
def stat_worker(inputlist, outqueue, print_queue):
for el in inputlist:
alg = el[0]
A = el[1]
B = el[2]
if alg == "clust_ratio":
a = nm.average(nx.clustering(A).values())
b = nm.average(nx.clustering(B).values())
v = a/b
if alg == "degree_corr":
v = gu.correlation(sorted(nx.degree(A)),
sorted(nx.degree(B)))
if alg == "dist_dist":
v = gu.correlate_dist_dict(gu.node_distance_dist(A),
gu.node_distance_dist(B))
if alg == "deg_bet_corr":
v = gu.correlate_dist_dict(gu.get_degree_betweeness(A),
gu.get_degree_betweeness(B))
if alg == "kcore_corr":
v = gu.correlate_dist_dict(gu.get_kcoreness(A),
gu.get_kcoreness(B))
if alg == "comm_neigh_corr":
v = gu.correlate_dist_dict(gu.get_common_neigh_dist(A),
gu.get_common_neigh_dist(B))
if alg == "mod_ratio":
v = community.modularity(community.best_partition(A), A) /\
community.modularity(community.best_partition(B), B)
if alg == "avg_neigh_deg_corr":
v = gu.correlate_dist_dict(gu.get_avg_neighbour_degree(A),
gu.get_avg_neighbour_degree(B))
eg.info("Computed " + alg)
outqueue.put({alg: v})
def get_statistics2(G, H, A, B, x, l):
eg.info("Computing statistics...")
if not H:
eg.info("Building networkx graph...")
H = gu.simm_matrix_2_graph(B)
gu.connect_components(H)
eg.info("Computing centrality...")
y, m = eg.eigen_centrality(B, maxiter=100000)
eg.info("Computing lambda ratio...")
lambda_err = abs(l / m)
eg.info("Computing centrality distance...")
eigen_err = eg.vec_dist(x, y)
inputs = [("avg_neigh_deg_corr", A, B), ("mod_ratio", G, H),
("comm_neigh_corr", A, B), ("kcore_corr", A, B),
("deg_bet_corr", A, B), ("dist_dist", A, B),
("degree_corr", G, H), ("clust_ratio", G, H)]
mets = parallelism.launch_workers(inputs, stat_worker,
inputs_per_worker=1, parallelism=4)
res = {}
for el in mets:
res.update(el)
eg.info("Check connectivity...")
if nx.is_connected(H):
conn = 1
else:
conn = 0
eg.info("Done with stats")
return (eigen_err, res['degree_corr'], res['clust_ratio'], lambda_err,
res['dist_dist'], res['deg_bet_corr'],
res['kcore_corr'], res['comm_neigh_corr'],
res['mod_ratio'], res['avg_neigh_deg_corr'],
conn)
def get_statistics1(G, H, duration, fraction=0.5):
sample_size = int(round(fraction*len(G.nodes())))
gsample = random.sample(G.nodes(), sample_size)
hsample = random.sample(H.nodes(), sample_size)
eg.info("Computing statistics...")
eg.info("Computing centrality...")
x = nx.eigenvector_centrality_numpy(G)
y = nx.eigenvector_centrality_numpy(H)
eg.info("Computing centrality distance...")
eigen_err = gu.sorted_correlation(x.values(), y.values())
eg.info("Computing clustering ratio...")
clust1 = nx.clustering(G, gsample)
clust2 = nx.clustering(H, hsample)
clust_err = sum(clust2.values())/sum(clust1.values())
lambda_err = -1
eg.info("Computing degree correlation...")
degdist1 = {}
degdist2 = {}
degree1 = nx.degree(G, nbunch=gsample)
degree2 = nx.degree(H, nbunch=hsample)
for d in degree1.values():
degdist1[d] = degdist1.get(d, 0) + 1
for d in degree2.values():
degdist2[d] = degdist2.get(d, 0) + 1
print(degdist1)
print(degdist2)
degree_corr = gu.correlate_dist_dict(degdist1, degdist2)
print(degree_corr)
eg.info("Check connectivity...")
if nx.is_connected(H):
conn = 1
else:
conn = 0
eg.info("Distance distribution correlation...")
dist1 = {}
dist2 = {}
for i in range(sample_size):
for j in range(i+1, sample_size):
try:
d = nx.algorithms.bidirectional_dijkstra(G, gsample[i],
gsample[j])[0]
dist1[d] = dist1.get(d, 0) + 1
except:
pass
try:
d = nx.algorithms.bidirectional_dijkstra(H, hsample[i],
hsample[j])[0]
dist2[d] = dist2.get(d, 0) + 1
except:
pass
distance_dist_corr = gu.correlate_dist_dict(dist1, dist2)
eg.info("Betweenness correlation...")
b1 = nx.betweenness_centrality(G, sample_size).values()
b2 = nx.betweenness_centrality(H, sample_size).values()
bet_corr = gu.sorted_correlation(b1, b2)
eg.info("K-coreness correlation...")
kcore_corr = -1 # gu.correlate_dist_dict(gu.get_kcoreness(A),
# gu.get_kcoreness(B))
eg.info("Common neighbourhood correlation...")
# comm1 = {}
# comm2 = {}
# for i in range(sample_size):
# for j in range(i+1, sample_size):
# d = len(list(nx.common_neighbors(G, gsample[i], gsample[j])))
# comm1[d] = comm1.get(d, 0) + 1
# d = len(list(nx.common_neighbors(H, hsample[i], hsample[j])))
# comm2[d] = comm2.get(d, 0) + 1
common_neigh_corr = -1 # gu.correlate_dist_dict(comm1, comm2)
eg.info("Modularity ratio...")
Gm = gu.norm_modularity(G)
Hm = gu.norm_modularity(H)
modularity_ratio = Hm[0]/Gm[0]
partition_ratio = Hm[1]/float(Gm[1])
eg.info("Community size correlation...")
gsize = gu.community_size(G)
hsize = gu.community_size(H)
comm_size_corr = gu.sorted_correlation(gsize, hsize)
eg.info("Avg neighbourhood degree correlation...")
avg1 = nx.average_neighbor_degree(G, nodes=gsample)
avg2 = nx.average_neighbor_degree(H, nodes=hsample)
avg_neigh_deg_corr = gu.sorted_correlation(avg1.values(), avg2.values())
eg.info("Done with stats")
return (eigen_err, degree_corr, clust_err, lambda_err,
distance_dist_corr, bet_corr, kcore_corr, common_neigh_corr,
modularity_ratio, partition_ratio, avg_neigh_deg_corr,
comm_size_corr, conn, duration)
def get_statistics(G, H, A, B, x, l, duration):
eg.info("Computing statistics...")
if not H:
eg.info("Building networkx graph...")
H = gu.simm_matrix_2_graph(B)
eg.info("Computing centrality...")
y, m = (-1, -1) # eg.eigen_centrality(B, maxiter=100000)
# nx.write_weighted_edgelist(H, "pgp_spectre0.9_" +
# str(random.randint(0, 99999999)) + ".edges")
eg.info("Computing centrality distance...")
eigen_err = eg.vec_dist(x, y)
eg.info("Computing clustering ratio...")
clust_err = gu.average_clustering(A) / gu.average_clustering(B)
# clust_err = nm.average(nx.clustering(G).values()) /\
# nm.average(nx.clustering(H).values())
eg.info("Computing lambda ratio...")
lambda_err = abs(l / m)
eg.info("Computing degree correlation...")
degree_corr = gu.correlation(gu.get_degrees(A), gu.get_degrees(B))
eg.info("Check connectivity...")
if nx.is_connected(H):
conn = 1
else:
conn = 0
eg.info("Distance distribution correlation...")
distance_dist_corr = -1 # gu.correlate_dist_dict(gu.node_distance_dist(A),
# gu.node_distance_dist(B))
eg.info("Degree betweenness correlation...")
degree_bet_corr = -1 # gu.correlate_dist_dict(gu.get_degree_betweeness(A),
# gu.get_degree_betweeness(B))
eg.info("K-coreness correlation...")
kcore_corr = -1 # gu.correlate_dist_dict(gu.get_kcoreness(A),
# gu.get_kcoreness(B))
eg.info("Common neighbourhood correlation...")
common_neigh_corr = -1 # gu.correlate_dist_dict(gu.get_common_neigh_dist(A)
# ,gu.get_common_neigh_dist(B))
eg.info("Modularity ratio...")
Gm = gu.norm_modularity(G)
Hm = gu.norm_modularity(H)
modularity_ratio = Gm[0]/Hm[0]
partition_ratio = Gm[1]/float(Hm[1])
eg.info("Avg neighbourhood degree correlation...")
avg_neigh_deg_corr = -1 # gu.correlate_dist_dict(
# gu.get_avg_neighbour_degree(A), gu.get_avg_neighbour_degree(B))
eg.info("Done with stats")
return (eigen_err, degree_corr, clust_err, lambda_err,
distance_dist_corr, degree_bet_corr, kcore_corr, common_neigh_corr,
modularity_ratio, partition_ratio, avg_neigh_deg_corr,
conn, duration)
def graph_worker_oneshot(inputlist, queue, print_queue):
for duty in inputlist:
name = duty[0]
G = duty[1]
algo = duty[2]
param = duty[3]
A = nx.to_numpy_matrix(G)
eg.info("Setup completed")
start_time = time.time()
if algo == "1k":
H = nx.random_degree_sequence_graph((nx.degree(G).values()))
# B = nx.to_numpy_matrix(H)
elif algo == "2k":
joint_degrees = nx.algorithms.mixing.degree_mixing_dict(G)
H = joint_degree_graph(joint_degrees)
# B = nx.to_numpy_matrix(H)
elif algo == "eig":
precision = float(param)
# B = eg.build_matrix(x, l, precision)
x, l = eg.eigen_centrality(A)
B = eg.generate_matrix(x, l*x, precision, gu.get_degrees(A))
H = None
algo += str(precision)
elif algo == "modeig":
precision = float(param)
B = eg.synthetic_modularity_matrix(A, precision)
H = None
algo += str(precision)
elif algo == "spectre":
m = float(param)
n = nm.shape(A)[0]
B = eg.sample_simm_matrix2(A, int(round(n*m)))
H = gu.simm_matrix_2_graph(B)
while nx.is_isomorphic(G, H):
B = eg.sample_simm_matrix2(A, int(round(n*m)))
H = gu.simm_matrix_2_graph(B)
algo += str(m)
elif algo == "laplacian":
m = float(param)
n = nm.shape(A)[0]
B = eg.laplacian_clone_matrix(A, int(round(n*m)))
H = gu.simm_matrix_2_graph(B)
while nx.is_isomorphic(G, H):
B = eg.sample_simm_matrix2(A, int(round(n*m)))
H = gu.simm_matrix_2_graph(B)
algo += str(m)
elif algo == "modspec":
m = float(param)
n = nm.shape(A)[0]
B = eg.modspec_clone_matrix(A, int(round(n*m)))
H = None
algo += str(m)
elif algo == "franky":
m = float(param)
n = nm.shape(A)[0]
B = eg.franky_clone_matrix(A, int(round(n*m)))
H = None
algo += str(m)
elif algo == "modularity":
m = float(param)
n = nm.shape(A)[0]
B = eg.modularity_clone_matrix(A, int(round(n*m)))
H = gu.simm_matrix_2_graph(B)
while nx.is_isomorphic(G, H):
B = eg.modularity_clone_matrix(A, int(round(n*m)))
H = gu.simm_matrix_2_graph(B)
algo += str(m)
elif algo == "25k":
test25 = Estimation()
gen25 = Generation()
test25.load_graph("", graph=G)
test25.calcfull_CCK()
test25.calcfull_JDD()
gen25.set_JDD(test25.get_JDD('full'))
gen25.set_KTRI(test25.get_KTRI('full'))
gen25.construct_triangles_2K()
gen25.mcmc_improved_2_5_K(error_threshold=0.05)
H = gen25.G
# B = nx.to_numpy_matrix(H)
eg.info("Graph Generated")
stat = get_statistics1(G, H, time.time()-start_time)
s = algo + "," + name + "," + str(len(G.nodes()))
for el in stat:
s += "," + str(el)
print_queue.put(s)
print_queue.put("\n")
gc.collect()
def init_outfile(outfile):
with open(outfile, 'w') as f:
f.write("Strategy," + "Graph," + "Nodes," +
str("EigErr") + "," + str("DegCorr") + "," +
str("ClustRatio") + "," + str("EigVErr") + "," +
str("NodeDistCorr") + "," + str("DegBetCorr") + "," +
str("KCoreCorr") + "," + str("CommNeighDist") + "," +
str("PartRatio") + "," + str("CommunityRatio") + "," +
str("AvgNeighDegCorr") + "," + "CommSizeCorr" + "," +
str("Connected") + "," + str("Duration") + "\n")
def csv_test():
eg.__eigen_info = False
# folder = "/home/baldo/Lavoro/UCI/brains/"
# files = os.listdir(folder)
# files = [folder + f for f in files]
# files *= 10
# files = ["pgp.edges"]
outfile = "fb_net.data"
init_outfile(outfile)
nets = ["fb_net/Michigan23.mat", "fb_net/MSU24.mat",
"fb_net/UIllinios20.mat"] # ["fb_net/Reed98.mat",
# "fb_net/Caltech36.mat", "fb_net/Simmons81.mat"]
# , "fb_net/Oberlin44.mat", "fb_net/Howard90.mat",
# "fb_net/Rice31.mat", "fb_net/Wake73.mat", "fb_net/UC64.mat",
# "fb_net/UCSC68.mat", "fb_net/Duke14.mat", "fb_net/UCF52.mat",
# "fb_net/Harvard1.mat" ]
shots = []
for net in nets:
G = read_mat_file(net)
name = net.split("/")[1]
name = name.split(".")[0]
shots += [(name, G, "modularity", 0.1)]
shots += [(name, G, "spectre", 0.1)]
shots += [(name, G, "modularity", 0.5)]
shots += [(name, G, "spectre", 0.5)]
shots += [(name, G, "modularity", 0.9)]
shots += [(name, G, "spectre", 0.9)]
# shots += [(name, G, "25k", 0.9)]
print("Shots loaded")
parallelism.minions(shots*10, graph_worker_oneshot, parallelism=4,
outfile=outfile)
if __name__ == "__main__":
csv_test()
|
from rest_framework import serializers
from wallet_core.models import UserPHPWallet, PHPWalletTransaction
__author__ = 'kaushal'
class PHPWalletListSerializer(serializers.ModelSerializer):
"""
Provide list of accounts for payment recipient list requested
All, fields are read only
"""
owner = serializers.ReadOnlyField(source='owner.get_username')
currency = serializers.CharField(default='PHP')
class Meta:
model = UserPHPWallet
fields = ('id', 'owner', 'balance', 'currency')
read_only_fields = ('id', 'owner', 'balance', 'currency')
class WalletTransactionSerializer(serializers.ModelSerializer):
"""
Serialize wallet transactions,
Read only fields
"""
direction = serializers.CharField()
debit_from = serializers.CharField(source='wallet_from.owner.get_username')
credit_to = serializers.CharField(source='wallet_to.owner.get_username')
currency = serializers.CharField(default='PHP')
class Meta:
model = PHPWalletTransaction
fields = ('id', 'amount', 'currency', 'direction',
'debit_from', 'credit_to', 'created')
read_only_fields = ('id', 'wallet_from', 'wallet_to',
'amount', 'created', 'currency')
|
def dele(key):
myDict.pop(key)
print(myDict)
myDict = {'java':100,'python':20,'c':300, 20:22, 32:42}
dele('java')
dele('python')
|
score1 = int(input('숫자를 입력하세요.'))
if score1 > 10 :
if score1 % 2 == 0:
print("입력한 숫자 %d 는 10보다 큰 짝수 입니다." % score1)
else :
print("입력한 숫자 %d 는 10보다 큰 홀수 입니다." % score1)
else :
if score1 % 2 == 0:
print("입력한 숫자 %d 는 10보다 크지않은 짝수 입니다." % score1)
else :
print("입력한 숫자 %d 는 10보다 크지않은 홀수 입니다." % score1)
|
# Имя: GetTelegramChatMembers.py
# Автор: Klachkov (reserfodium) Valery
from telethon import TelegramClient, errors
from telethon.tl.functions.channels import GetParticipantsRequest
from telethon.tl.types import ChannelParticipantsSearch
import getpass
import sys
# Вывод помощи
def usage():
print(
"""
Использование: *.exe [-h][--help][filename]
-h, --help Опции скрипта
[filename] Создание файла со всеми участниками чата
""")
# Парсинг аргументов командной строки
# Если надо парсить больше двух аргументов, то лучше использовать библиотеку optparse
def parse_args():
# Если были переданы аргументы
if len(sys.argv) > 1:
# Запрошен вывод помощи
if sys.argv[1] in ["-h", "--help"]:
usage()
return None
else:
return sys.argv[1]
else:
return ""
def main():
# --- ПРИВАТНЫЕ ДАННЫЕ --- #
api_id = 259668
api_hash = "0189067091f343f6e02bf514cf82921f"
session_name = "session"
chat_name = raw_input("Имя канала: ") # Имя используемого чата
# --- СТРОКИ --- #
enter_phone_number = "Введите номер телефона: "
enter_sms_code = "Введите SMS-код: "
enter_password = "Введите пароль: "
invalid_phone_number = "Неверный номер телефона. Проверьте введенные данные и повторите попытку"
chat_users_string = "Пользователи чата"
# --- ПОЛУЧЕНИЕ ИМЕНИ ФАЙЛА --- #
parse_result = parse_args()
# Использован флаг -h (--help)
if parse_result is None:
return 0
else:
filename = parse_result
# --- ИНИЦИАЛИЗАЦИЯ СЕССИИ --- #
client = TelegramClient(session_name, api_id, api_hash)
# Инициализация сессии
# Цикл продолжается до тех пор, пока пользователь не введет корректные данные
while True:
try:
client.start(phone=lambda: input(enter_phone_number),
password=lambda: getpass.getpass(enter_password),
code_callback=lambda: input(enter_sms_code))
break
# Неверный формат номера
except errors.rpc_error_list.PhoneNumberInvalidError:
print(invalid_phone_number, end="\n\n")
# --- ПОЛУЧЕНИЕ СПИСКА ПОЛЬЗОВАТЕЛЕЙ ЧАТА --- #
offset = 0
limit = 100
all_users = []
while True:
# Получаем limit пользователей из общей массы со смещением offset
users = client(GetParticipantsRequest(
chat_name, ChannelParticipantsSearch(''), offset, limit,
hash=0
))
# Получили всех пользователей
if not users.users:
break
# Добавляем пользователей в общий массив all_users и увеличиваем смещение на длину массива извлеченных пользователей
all_users.extend(users.users)
offset += len(users.users)
# --- ПОЛУЧЕНИЕ СПИСКА USERNAME'ОМ УЧАСТНИКОВ --- #
# На выходе получаем сортированный массив юзернеймов, которые не равно None
names = sorted([user.username for user in all_users if user.username is not None])
# --- ВЫВОД --- #
# Все пользователи в столбец
printable_names = '\n'.join(str(p) for p in names)
if filename == "":
# Вывод заголовка и имен
print(chat_users_string + " @" + chat_name, end="\n\n")
print(printable_names)
else:
# Запись в файл
file = open(filename, "w")
file.write(printable_names)
file.close()
# OK!
return 0
# Точка входа
if __name__ == "__main__":
main() # sys.exit(main())
|
import requests
from django.conf.urls import url
from django.contrib import admin
from django.shortcuts import redirect
from nested_inline.admin import NestedStackedInline, NestedModelAdmin
from cmdb.admins import *
from cmdb.models import *
from deploy_manager.models import *
from saltjob.salt_https_api import salt_api_token
from saltjob.salt_token_id import token_id
from saltjob.tasks import scanHostJob
from saltops.settings import SALT_CONN_TYPE, SALT_HTTP_URL, SALT_REST_URL
admin.register(Host, HostAdmin)
admin.register(Cabinet, CabinetAdmin)
admin.register(IDCLevel, IDCLevelAdmin)
admin.register(ISP, ISPAdmin)
admin.register(IDC, IDCAdmin)
admin.register(Rack, RackAdmin)
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
'''
Default configurations of model train and test
'''
#####
LOG_DIR = 'result_exp' # where checkpoints, logs are saved
RUN_NAME = 'hl_test1' # identifier of the experiment
train_sample_num = 256133
gpu_ids = '0,1,2,3'
gpu_num = 4
multi_thread = 6#multi-thread num -->$ lscpu
prefetch_capacity = 5
epoch_num = 20 #50
batch_size = 64
#####
MODEL_CONFIG = {
'embed_config': {
'init_method': 'kaiming_normal',
'use_bn': True,
'bn_scale': True,
'bn_momentum': 0.05,
'bn_epsilon': 1e-6,
'embedding_feature_num': 256,
'weight_decay': 5e-4,
'net_choose': 'alex',#select backbone type
},
}
TRAIN_CONFIG = {
'train_dir': os.path.join(LOG_DIR, RUN_NAME),
'config_saver_dir': os.path.join(LOG_DIR, RUN_NAME, 'config_json'),
'checkpoint_dir': os.path.join(LOG_DIR, RUN_NAME, 'checkpoints'),
'log_dir': os.path.join(LOG_DIR, RUN_NAME, 'log'),#save for tensorboard
'gpu_select': gpu_ids,# select which gups to run train, single:'0' or multi:'0,2,4'
'seed': 123, # fix seed for reproducing experiments
# config of input train and validate data
'train_data_config': {
'img_label_list_path': 'data_txt/train_list.txt',
'preprocessing_name': 'data_argu1',#add data-argument for train data
'num_examples_per_epoch': train_sample_num,#total train samples count
'epoch': epoch_num,#train epochs nums
'batch_size': batch_size,
'prefetch_threads': multi_thread,#use multi-thread for load in data
'prefetch_capacity': prefetch_capacity*gpu_num,# prefetch m batches
},
'validation_data_config': {
'img_label_list_path': 'data_txt/validate_list.txt',
'preprocessing_name': 'None',#without data-argument for train data
'batch_size': batch_size,
'prefetch_threads': 1,#use multi-thread for load in data
'prefetch_capacity': prefetch_capacity*gpu_num,# prefetch m batches
},
# Optimizer for training the model.
'optimizer_config': {'optimizer': 'MOMENTUM', # SGD / MOMENTUM / Adam are supported
'momentum': 0.9,
'use_nesterov': False, },
# Learning rate configs
'lr_config': {'policy': 'exponential',# piecewise_constant / exponential / cosine
'initial_lr': 0.01,
'num_epochs_per_decay': 1,
'lr_decay_factor': 0.8685113737513527,
'staircase': True, },
# Frequency at which loss and global step are logged
'log_every_n_steps': 10,
# Update tensorboard-summary every n steps
'tensorboard_summary_every_n_steps': 100,
# Frequency to save model
'save_model_every_n_step': train_sample_num // (batch_size*gpu_num), # save model every epoch
# How many model checkpoints to keep. No limit if None.
'max_checkpoints_to_keep': 30,# save last 30 epochs
}
|
# This file is part of Booktype.
# Copyright (c) 2012 Aleksandar Erkalovic <aleksandar.erkalovic@sourcefabric.org>
#
# Booktype is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Booktype is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Booktype. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls.defaults import *
urlpatterns = patterns('',
url(r'^$', 'booki.account.views.view_accounts', name='view_accounts'),
url(r'^signin/$', 'booki.account.views.signin', name='signin'),
url(r'^login/$', 'booki.account.views.signin', name='login'),
url(r'^forgot_password/$', 'booki.account.views.forgotpassword', name='forgotpassword'),
url(r'^forgot_password/enter/$', 'booki.account.views.forgotpasswordenter', name='forgotpasswordenter'),
url(r'^signout/$', 'booki.account.views.signout', name='signout'),
# url(r'^register/$', 'booki.account.views.register', name='register'),
# Username
# Letters, digits and @/./+/-/_ only.
# For now, even space.
url(r'^(?P<username>[\w\d\@\.\+\-\_\s]+)/$', 'booki.account.views.view_profile', name='view_profile'),
url(r'^(?P<username>[\w\d\@\.\+\-\_\s]+)/my_books/$', 'booki.account.views.my_books', name='my_books'),
url(r'^(?P<username>[\w\d\@\.\+\-\_\s]+)/my_groups/$', 'booki.account.views.my_groups', name='my_groups'),
url(r'^(?P<username>[\w\d\@\.\+\-\_\s]+)/my_people/$', 'booki.account.views.my_people', name='my_people'),
url(r'^(?P<username>[\w\d\@\.\+\-\_\s]+)/_create_book/$', 'booki.account.views.create_book', name='create_book'),
url(r'^(?P<username>[\w\d\@\.\+\-\_\s]+)/_create_group/$', 'booki.account.views.create_group', name='create_group'),
url(r'^(?P<username>[\w\d\@\.\+\-\_\s]+)/_import_book/$', 'booki.account.views.import_book', name='import_book'),
url(r'^(?P<username>[\w\d\@\.\+\-\_\s]+)/_save_settings/$', 'booki.account.views.save_settings', name='save_settings')
)
|
from odoo import models, fields, api
from odoo.exceptions import ValidationError
from .belonging import CATEGORIES
from Crypto.PublicKey import RSA
class User(models.Model):
_inherit = "res.users"
rrn = fields.Char('RRN Code')
req_categ = fields.Selection(CATEGORIES, 'Desired category')
req_price = fields.Float('Desired maximum price')
req_sup = fields.Float('Desired minimum superficy')
req_buy = fields.Boolean('Requesting to buy?')
req_fur = fields.Boolean('Requesting furnished places?')
cs_priv_key = fields.Char('Casalta WS private key')
cs_pub_key = fields.Char('Casalta WS public key')
cs_cert = fields.Char('Casalta WS certificate')
@api.model
def create(self, vals):
key = RSA.generate(2048)
vals.update({
'cs_priv_key': key.exportKey('PEM'),
'cs_pub_key': key.publickey().exportKey('OpenSSH')
})
resp = self.env['website'].browse(1).cs_ws_new_account(vals.get('eth_account', False), vals['cs_pub_key'])[0][0]
if 'certificate' in resp:
vals.update({
'cs_cert': resp['certificate']
})
return super(User, self).create(vals)
|
class Solution(object):
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
m, n = len(matrix), len(matrix[0])
def find(target, left, right):
if matrix[left / n][left % n] == target or matrix[right / n][right % n] == target:
return True
elif right - left == 1:
return False
mid = (left + right) / 2
if matrix[mid / n][mid % n] > target:
if matrix[left / n][left % n] > target:
return False
else:
return find(target, left, mid)
else:
if matrix[right / n][right % n] < target:
return False
else:
return find(target, mid, right)
return find(target, 0, m * n - 1)
|
import tkinter as tk
from typing import List, Tuple
from unittest import mock
from picpick import widgets
from picpick.model import Tag
class FileList(widgets.FileList):
def __init__(self, master):
super().__init__(master)
self._callback = mock.Mock()
self.bind('<<FileListSelect>>', lambda _: self._callback())
def select_event_generated(self) -> bool:
assert self._callback.call_count <= 1
result = self._callback.called
self._callback.reset_mock()
return result
@property
def displayed(self) -> List[str]:
return [self._tree.item(iid)['text'] for iid in self._tree.get_children()]
class TagList(widgets.TagList):
@property
def disabled(self) -> bool:
return all(cb['state'] == tk.DISABLED for cb in self._checkboxes)
def toggle(self, tag: Tag):
index = self._tags.index(tag)
checkbox = self._checkboxes[index]
checkbox.invoke()
@property
def displayed(self) -> List[Tuple[str, bool]]:
return [
(tag.name, variable.get())
for tag, variable in zip(self._tags, self._checked_variables)
]
def test_file_list(image_factory):
file_list = FileList(None)
assert file_list.displayed == []
assert file_list.selected is None
a = image_factory('a.jpg')
b = image_factory('b.jpg')
c = image_factory('c.jpg')
file_list.set_images([a, b, c])
assert file_list.displayed == ['a.jpg', 'b.jpg', 'c.jpg']
assert file_list.selected is None
assert not file_list.select_event_generated()
file_list.select(a)
file_list.update()
assert file_list.selected == a
assert file_list.select_event_generated()
assert not file_list.select_event_generated()
file_list.select(None)
file_list.update()
assert file_list.selected is None
assert file_list.select_event_generated()
file_list.select(b)
file_list.update()
assert file_list.selected == b
assert file_list.select_event_generated()
assert not file_list.select_event_generated()
file_list.set_images([a, c])
file_list.update()
assert file_list.displayed == ['a.jpg', 'c.jpg']
assert file_list.selected is None
assert file_list.select_event_generated()
file_list.select(None)
file_list.update()
assert not file_list.select_event_generated()
def test_tag_list(image_factory):
callback = mock.Mock()
tag_list = TagList(None, callback=callback)
assert tag_list.displayed == []
assert tag_list.disabled
red = Tag(name='red')
blue = Tag(name='blue')
tag_list.set_tags([red, blue])
assert tag_list.displayed == [('red', False), ('blue', False)]
assert tag_list.disabled
a = image_factory('a.jpg')
b = image_factory('b.jpg')
a.tags.add(red)
b.tags.add(blue)
tag_list.set_current_image(a)
assert tag_list.displayed == [('red', True), ('blue', False)]
assert not tag_list.disabled
callback.assert_not_called()
tag_list.set_current_image(b)
assert tag_list.displayed == [('red', False), ('blue', True)]
assert not tag_list.disabled
callback.assert_not_called()
tag_list.toggle(red)
assert tag_list.displayed == [('red', True), ('blue', True)]
callback.assert_called_once_with(red, True)
callback.reset_mock()
tag_list.toggle(red)
assert tag_list.displayed == [('red', False), ('blue', True)]
callback.assert_called_once_with(red, False)
callback.reset_mock()
|
import torch
import torch.nn as nn
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
class Vgg(nn.Module):
def __init__(self):
super(Vgg, self).__init__()
def _make_vgg(self, batch_norm=False):
layers = []
in_channels = 3
for v in cfg['D']:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
def init_weights(self, model):
for m in model.modules():
if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, 0.001)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def load_weights(self, model_path=None):
model_dict = self.state_dict()
print('loading model from {}'.format(model_path))
try:
pretrained_dict = torch.load(model_path)
from collections import OrderedDict
tmp = OrderedDict()
for k,v in pretrained_dict.items():
# print(k,v.shape)
if k in model_dict:
tmp[k] = v
elif 'module' in k: #multi_gpu
t_k=k[k.find('.')+1:]
tmp[t_k] = v
model_dict.update(tmp)
self.load_state_dict(model_dict)
except:
print ('loading model failed, {} may not exist'.format(model_path))
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 23 21:24:10 2019
@author: dabing
"""
import numpy as np
import csv
import loadData
from RBM import rbm
#--------------准备---------------
#-----读取数据-----
allData = loadData.allData
allFeatures = allData[:,0:77]
contentFeatures = allData[:,0:34]
trafficFeatures = allData[:,34:77]
#-----保存权重参数函数-----
#wsavePath = 'E:/workspace for python/Experiment/multimodalExperiment/CICIDS2017/RBMs/contentRBMPara/34_30_w.csv'
#b1savePath = 'E:/workspace for python/Experiment/multimodalExperiment/CICIDS2017/RBMs/contentRBMPara/34_30_b1.csv'
#b2savePath = 'E:/workspace for python/Experiment/multimodalExperiment/CICIDS2017/RBMs/contentRBMPara/34_30_b2.csv'
#wsavePath = 'E:/workspace for python/Experiment/multimodalExperiment/CICIDS2017/RBMs/trafficRBMPara/43_40_w.csv'
#b1savePath = 'E:/workspace for python/Experiment/multimodalExperiment/CICIDS2017/RBMs/trafficRBMPara/43_40_b1.csv'
#b2savePath = 'E:/workspace for python/Experiment/multimodalExperiment/CICIDS2017/RBMs/trafficRBMPara/43_40_b2.csv'
wsavePath = 'E:/workspace for python/Experiment/multimodalExperiment/CICIDS2017/RBMs/noModalityRBMPara/77_40_w.csv'
b1savePath = 'E:/workspace for python/Experiment/multimodalExperiment/CICIDS2017/RBMs/noModalityRBMPara/77_40_b1.csv'
b2savePath = 'E:/workspace for python/Experiment/multimodalExperiment/CICIDS2017/RBMs/noModalityRBMPara/77_40_b2.csv'
#def writeCsv(savePath,data):
# file = open(savePath, 'w', newline='')
# csvWriter = csv.writer(file)
# count = 0
# for row in data:
# temp_row=np.array(row)
# csvWriter.writerow(temp_row)
# count +=1
# file.close()
no_GBRBM = rbm(77,40,learning_rate=0.001,momentum=0.8,rbm_type='gbrbm',relu_hidden = True)
no_GBRBM.plot=True
w, b1, b2= no_GBRBM.pretrain(allFeatures,batch_size=100,n_epoches=100)
#basic_GBRBM = rbm(90,70,learning_rate=0.001,momentum=0.8,rbm_type='gbrbm',relu_hidden = True)
#basic_GBRBM.plot=True
#w, b1, b2= basic_GBRBM.pretrain(basicFeatures,batch_size=100,n_epoches=100)
#content_GBRBM = rbm(13,10,learning_rate=0.001,momentum=0.8,rbm_type='gbrbm',relu_hidden = True)
#content_GBRBM.plot=True
#w, b1, b2= content_GBRBM.pretrain(contentFeatures,batch_size=100,n_epoches=100)
#traffic_GBRBM = rbm(19,15,learning_rate=0.001,momentum=0.8,rbm_type='gbrbm',relu_hidden = True)
#traffic_GBRBM.plot=True
#w, b1, b2 = traffic_GBRBM.pretrain(trafficFeatures,batch_size=100,n_epoches=100)
np.savetxt(wsavePath, w, delimiter=",")
np.savetxt(b1savePath, b1, delimiter=",")
np.savetxt(b2savePath, b2, delimiter=",")
|
# %load q01_plot_deliveries_by_team/build.py
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.switch_backend('agg')
ipl_df = pd.read_csv('data/ipl_dataset.csv', index_col=None)
# Solution
def plot_deliveries_by_team():
plt.figure()
plt.bar(ipl_df.index.ipl_df)
plt.xlabel('batting_team', fontsize=5)
plt.ylabel('count_of_deli', fontsize=5
plt.show()
plot_deliveries_by_team()
|
"""
Instrument widget
"""
# Standard library modules.
# Third party modules.
# Local modules.
from pyhmsa_gui.spec.condition.condition import _ConditionWidget
from pyhmsa_gui.util.parameter import TextAttributeLineEdit
from pyhmsa.spec.condition.instrument import Instrument
# Globals and constants variables.
class InstrumentWidget(_ConditionWidget):
def __init__(self, parent=None):
_ConditionWidget.__init__(self, Instrument, parent)
def _init_ui(self):
# Controls
self._txt_manufacturer = TextAttributeLineEdit(self.CLASS.manufacturer)
self._txt_model = TextAttributeLineEdit(self.CLASS.model)
self._txt_serial_number = TextAttributeLineEdit(self.CLASS.serial_number)
# Layouts
layout = _ConditionWidget._init_ui(self)
layout.addRow("<i>Manufacturer</i>", self._txt_manufacturer)
layout.addRow("<i>Model</i>", self._txt_model)
layout.addRow("Serial number", self._txt_serial_number)
# Signals
self._txt_manufacturer.textEdited.connect(self.edited)
self._txt_model.textEdited.connect(self.edited)
self._txt_serial_number.textEdited.connect(self.edited)
return layout
def _create_parameter(self):
return self.CLASS('manufacturer', 'model')
def parameter(self, parameter=None):
parameter = _ConditionWidget.parameter(self, parameter)
parameter.manufacturer = self._txt_manufacturer.text()
parameter.model = self._txt_model.text()
parameter.serial_number = self._txt_serial_number.text()
return parameter
def setParameter(self, condition):
_ConditionWidget.setParameter(self, condition)
self._txt_manufacturer.setText(condition.manufacturer)
self._txt_model.setText(condition.model)
self._txt_serial_number.setText(condition.serial_number)
def setReadOnly(self, state):
_ConditionWidget.setReadOnly(self, state)
self._txt_manufacturer.setReadOnly(state)
self._txt_model.setReadOnly(state)
self._txt_serial_number.setReadOnly(state)
def isReadOnly(self):
return _ConditionWidget.isReadOnly(self) and \
self._txt_manufacturer.isReadOnly() and \
self._txt_model.isReadOnly() and \
self._txt_serial_number.isReadOnly()
def hasAcceptableInput(self):
return _ConditionWidget.hasAcceptableInput(self) and \
self._txt_manufacturer.hasAcceptableInput() and \
self._txt_model.hasAcceptableInput() and \
self._txt_serial_number.hasAcceptableInput()
|
#!/usr/bin/python
import pygame
class tzone(pygame.sprite.Sprite):
def __init__(self,x, y, width, height, color):
super().__init__() #это используется
self.image = pygame.Surface([width, height]) #создание поверхности
self.image.fill(color) #заполнить цветом
self.rect = self.image.get_rect()
self.rect.y = y
self.rect.x = x |
from DataLoader import RetinaDataset
#Show cropping and rotation with low probability
test = RetinaDataset(file_path="/data/targets", transforms=[Rotate(p=0.5), RandomCrop(p=0.5, height=300, width=300)])
sample = test[0]
print(sample['image'])
plt.imshow(sample['image'], cmap='gray')
plt.show()
|
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 2 17:18:14 2017
@author: oliver.cairns
"""
import csv
data = []
with open("input_2.txt", newline="") as inputfile:
for row in csv.reader(inputfile):
data.append(row)
# data = [["2x3x4"]]
# data = [["1x1x10"]]
clean_data = [[int(y) for y in x[0].split("x")] for x in data]
def pres_vol(h, w, l):
main = (2 * l * w) + (2 * w * h) + (2 * h * l)
slack = h * w * l / max([h, w, l])
return int(main + slack)
def ribbon_len(h, w, l):
main = 2 * h + 2 * w + 2 * l - 2 * max([h, w, l])
bow = h * w * l
return main + bow
area_total = 0
len_total = 0
for pres in clean_data:
area_total += pres_vol(*pres)
len_total += ribbon_len(*pres)
print("1.a", area_total)
print("1.b", len_total)
|
from class1 import course1
dayo = course1("temi",32)
biodun = course1("lola",45)
print(dayo.name)
|
# import requests
# import json
#
#
# send_url = "http://api.ipstack.com/check?access_key=f8847d936deb1c40496b1d6dd89e51b1"
# geo_req = requests.get(send_url)
# geo_json = json.loads(geo_req.text)
# latitude = geo_json['latitude']
# longitude = geo_json['longitude']
# city1 = geo_json['city']
#
#
# print(city1)
import random
res = random.randint(100000,999999)
print(res) |
from Q1_30.Q9 import isPalindrome
__author__ = 'Varun Nayyar'
__date__ = "24/02/13 12:33 AM"
__copyright__ = "Company Confidential. Copyright (c) Cochlear Ltd 2012."
if __name__ == "__main__":
palSum = 0
for i in xrange(int(1e6)):
if isPalindrome(i) and isPalindrome(bin(i)[2:].lstrip("0")):
print i, (bin(i)[2:].lstrip("0"))
palSum += i
print palSum |
"""Commands to facilitate conversion to PDF."""
from copy import copy
from pathlib import Path
import asyncio
from .utils import _error
def html_to_pdf(html_file, pdf_file):
"""
Convert arbitrary HTML file to PDF using pyppeteer.
Parameters
----------
html_file : str
A path to an HTML file to convert to PDF
pdf_file : str
A path to an output PDF file that will be created
"""
asyncio.get_event_loop().run_until_complete(_html_to_pdf(html_file, pdf_file))
async def _html_to_pdf(html_file, pdf_file):
try:
from pyppeteer import launch
except ImportError:
_error(
"Generating PDF from book HTML requires the pyppeteer package. "
"Install it first.",
ImportError,
)
browser = await launch(args=["--no-sandbox"])
page = await browser.newPage()
# Absolute path is needed
html_file = Path(html_file).resolve()
# Waiting for networkidle0 seems to let mathjax render
await page.goto(f"file:///{html_file}", {"waitUntil": ["networkidle0"]})
# Give it *some* margins to make it look a little prettier
# I just made these up
page_margins = {"left": "0in", "right": "0in", "top": ".5in", "bottom": ".5in"}
await page.pdf({"path": pdf_file, "margin": page_margins})
await browser.close()
def update_latex_document(latex_document: tuple, updates: dict):
"""Apply updates from _config.yml to a latex_document tuple"""
names = (
"startdocname",
"targetname",
"title",
"author",
"theme",
"toctree_only",
)
updated = list(copy(latex_document))
for i, (_, name) in enumerate(zip(latex_document, names)):
if name in updates:
updated[i] = updates[name]
return tuple(updated)
|
import functools
@functools.lru_cache(None)
def decode(num_str):
if not num_str: return 1
if len(num_str)==1: return 1 if num_str[0]!='0' else 0
if int(num_str[0]) > 2:
return decode(num_str[1:])
else:
return decode(num_str[1:]) + decode(num_str[2:])
import functools
@functools.lru_cache(None)
def numDecodings(s: str):
if s[0] == '0': return 0
if len(s) == 1:
return 1 if s[0] != '0' else 0
if len(s) == 2:
if s[-1] != '0':
return 2 if int(s) <= 26 else 1
else:
return 1 if 0 < int(s[0]) <= 2 else 0
if int(s[0]) > 2:
return numDecodings(s[1:])
else:
if 1 <= int(s[:2]) <= 26:
return numDecodings(s[1:]) + numDecodings(s[2:])
else:
return numDecodings(s[1:])
# num_str = '31717126241541717'
num_str = input()
print(decode(num_str))
|
import csrgraph as cg
from nodevectors.embedders import BaseNodeEmbedder
class GGVec(BaseNodeEmbedder):
def __init__(self,
n_components=32,
order=1,
learning_rate=0.1, max_loss=10.,
tol="auto", tol_samples=30,
exponent=0.33,
threads=0,
negative_ratio=0.15,
max_epoch=350,
verbose=False):
"""
GGVec: Fast global first (and higher) order local embeddings.
This algorithm directly minimizes related nodes' distances.
It uses a relaxation pass (negative sample) + contraction pass (loss minimization)
To find stable embeddings based on the minimal dot product of edge weights.
Parameters:
-------------
n_components (int):
Number of individual embedding dimensions.
order : int >= 1
Meta-level of the embeddings. Improves link prediction performance.
Setting this higher than 1 ~quadratically slows down algorithm
Order = 1 directly optimizes the graph.
Order = 2 optimizes graph plus neighbours of neighbours
Order = 3 optimizes up to 3rd order edges
(and so on)
Higher order edges are automatically weighed using GraRep-style graph formation
Eg. the higher-order graph is from stable high-order random walk distribution.
negative_ratio : float in [0, 1]
Negative sampling ratio.
Setting this higher will do more negative sampling.
This is slower, but can lead to higher quality embeddings.
exponent : float
Weighing exponent in loss function.
Having this lower reduces effect of large edge weights.
tol : float in [0, 1] or "auto"
Optimization early stopping criterion.
Stops average loss < tol for tol_samples epochs.
"auto" sets tol as a function of learning_rate
tol_samples : int
Optimization early stopping criterion.
This is the number of epochs to sample for loss stability.
Once loss is stable over this number of epochs we stop early.
negative_decay : float in [0, 1]
Decay on negative ratio.
If >0 then negative ratio will decay by (1-negative_decay) ** epoch
You should usually leave this to 0.
max_epoch : int
Stopping criterion.
max_count : int
Ceiling value on edge weights for numerical stability
learning_rate : float in [0, 1]
Optimization learning rate.
max_loss : float
Loss value ceiling for numerical stability.
"""
self.n_components = n_components
self.tol = tol
self.order=order
self.max_epoch = max_epoch
self.learning_rate = learning_rate
self.exponent = exponent
self.max_loss = max_loss
self.tol_samples = tol_samples
self.threads = threads
self.negative_ratio = negative_ratio
self.verbose = verbose
def fit(self, graph):
"""
NOTE: Currently only support str or int as node name for graph
Parameters
----------
nxGraph : graph data
Graph to embed
Can be any graph type that's supported by CSRGraph library
(NetworkX, numpy 2d array, scipy CSR matrix, CSR matrix components)
"""
G = cg.csrgraph(graph, threads=self.threads)
vectors = G.ggvec(
n_components=self.n_components,
order=self.order,
exponent=self.exponent,
tol=self.tol, max_epoch=self.max_epoch,
learning_rate=self.learning_rate,
tol_samples=self.tol_samples,
max_loss=self.max_loss,
negative_ratio=self.negative_ratio,
verbose=self.verbose)
self.model = dict(zip(G.nodes(), vectors))
def fit_transform(self, graph):
"""
NOTE: Currently only support str or int as node name for graph
Parameters
----------
nxGraph : graph data
Graph to embed
Can be any graph type that's supported by CSRGraph library
(NetworkX, numpy 2d array, scipy CSR matrix, CSR matrix components)
"""
G = cg.csrgraph(graph, threads=self.threads)
vectors = G.ggvec(
n_components=self.n_components,
order=self.order,
exponent=self.exponent,
tol=self.tol, max_epoch=self.max_epoch,
learning_rate=self.learning_rate,
tol_samples=self.tol_samples,
max_loss=self.max_loss,
negative_ratio=self.negative_ratio,
verbose=self.verbose)
self.model = dict(zip(G.nodes(), vectors))
return vectors
|
# from django.http import HttpResponse
# from django.views.decorators.csrf import csrf_exempt
# from rest_framework.renderers import JSONRenderer
# from rest_framework.parsers import JSONParser
# from rest_framework.decorators import api_view
# from django.http import Http404
# from rest_framework.views import APIView
# from rest_framework.response import Response
# from rest_framework import status
from ..models import Mark
from .serializers import MarkSerializer
from rest_framework import generics
from rest_framework.permissions import AllowAny
class MarkList(generics.ListCreateAPIView):
queryset = Mark.objects.all()
serializer_class = MarkSerializer
permission_classes = (AllowAny,)
class MarkDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Mark.objects.all()
serializer_class = MarkSerializer
permission_classes = (AllowAny,)
# class MarkList(APIView):
# """
# List all marks, or create a new mark.
# """
# def get (self, request, format=None):
# marks = Mark.objects.all()
# serializer = MarkSerializer(marks, many=True)
# return Response(serializer.data)
# def post(self, request, fromat=None):
# serializer = MarkSerializer(data=request.data)
# if serializer.is_valid():
# serializer.save()
# return Response(serializer.data, status=status.HTTP_201_CREATED)
# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# class MarkDetail(APIView):
# """
# Retrieve, update or delete a mark instance.
# """
# def get_object(self, pk):
# try:
# return Mark.objects.get(pk=pk)
# except Mark.DoesNotExist:
# raise Http404
# def get(self, request, pk, format=None):
# mark = self.get_object(pk)
# serializer = MarkSerializer(mark)
# return Response(serializer.data)
# def put(self, request, pk, format=None):
# mark = self.get_object(pk)
# serializer = MarkSerializer(mark, data=request.data)
# if serializer.is_valid():
# serializer.save()
# return Response(serializer.data)
# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# def delete(self, request, pk, format=None):
# mark = self.get_object(pk)
# mark.delete()
# return Response(status=status.HTTP_204_NO_CONTENT)
# class JSONResponse(HttpResponse):
# """
# An HttpResponse that renders its content into JSON.
# """
# def __init__(self, data, **kwargs):
# content = JSONRenderer().render(data)
# kwargs['content_type'] = 'application/json'
# super(JSONResponse, self).__init__(content, **kwargs)
# # @csrf_exempt
# @api_view(['GET','POST'])
# def mark_list(request, format=None):
# """
# List all names marks, or create a new Mark.
# """
# if request.method == 'GET':
# marks = Mark.objects.all()
# serializer = MarkSerializer(marks, many=True)
# return Response(serializer.data)
# # return JSONResponse(serializer.data)
# elif request.method == 'POST':
# # data = JSONParser().parse(request)
# # serializer = MarkSerializer(data=data)
# serializer = MarkSerializer(data=request.data)
# if serializer.is_valid():
# serializer.save()
# # return JSONResponse(serializer.data, status=201)
# return Response(serializer.data, status=status.HTTP_201_CREATED)
# # return JSONResponse(serializer.errors, status=400)
# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# # @csrf_exempt
# @api_view(['GET', 'PUT', 'DELETE'])
# def mark_detail(request, pk, format=None):
# """
# Retrieve, update or delete a name mark
# """
# try:
# mark = Mark.objects.get(pk=pk)
# except Mark.DoesNotExist:
# # return HttpResponse(status=404)
# return Response(status=status.HTTP_404_NOT_FOUND)
# if request.method == 'GET':
# serializer = MarkSerializer(mark)
# # return JSONResponse(serializer.data)
# return Response(serializer.data)
# elif request.method == 'PUT':
# # data = JSONParser().parse(request)
# # serializer = MarkSerializer(mark, data=data)
# serializer = MarkSerializer(mark, data=request.data)
# if serializer.is_valid():
# serializer.save()
# # return JSONResponse(serializer.data)
# return Response(serializer.data)
# # return JSONResponse(serializer.erros, status=404)
# return Response(serializer.erros, status=status.HTTP_400_BAD_REQUEST)
# elif request.method == 'DELETE':
# mark.delete()
# # return HttpResponse(status=204)
# return Response(status=status.HTTP_204_NO_CONTENT)
# class MarksListView(generics.ListCreateAPIView):
# queryset = Mark.objects.all()
# serializer_class = MarkSerializer
|
#!/usr/bin/env python3
# coding=utf-8
"""Model dec """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import subprocess
import json
import time
import codecs
import logging
import _pickle as pkl
from tqdm import tqdm
from gen_ner import read_dataset
from build_name_dict import build_name_dict
from candidate_entity_generation import gen_candidate_entity
from subject_id2text import hash_id2abstract
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--name_dict_path", type=str, default="./data/name_dict.pkl")
parser.add_argument("--kb_data_path", type=str, default="./original_data/kb_data")
args = parser.parse_args()
def gen_disambi(infile, outfile):
id2abstract_pkl = "./data/id2abstract.pkl"
if not os.path.exists("./data/disambi/"):
subprocess.getoutput("mkdir ./data/disambi/")
datasets = read_dataset(infile)
if not os.path.exists(id2abstract_pkl):
logging.info("Building id2abstract.pkl...")
start = time.time()
id2abstract = hash_id2abstract("./original_data/kb_data", id2abstract_pkl)
logging.info("Build id2abstract.pkl done!, Total time {} s".format(time.time()-start))
else:
id2abstract = pkl.load(codecs.open("./data/id2abstract.pkl", "rb"))
if not os.path.exists(args.name_dict_path):
logging.info(" The name dictionary does not exist and is being created. ")
build_name_dict(args.kb_data_path, args.name_dict_path)
name_dict = pkl.load(open(args.name_dict_path, "rb"))
outwriter = codecs.open(outfile, "w", "utf-8")
pos_count = 0
neg_count = 0
total_entity = 0
used_lines = 0
max_leng = 0
for data in tqdm(datasets):
data = eval(data)
candi_text = data["text"]
for mention in data["mention_data"]:
if mention["kb_id"] == "NIL":
continue
source_entity = mention["mention"]
offset = int(mention["offset"])
candi_offset = (offset, len(source_entity) + offset)
candi_entity = gen_candidate_entity(source_entity, name_dict, mode="exact")
used_lines += 1
total_entity += len(candi_entity)
if not candi_entity:
continue
if len(candi_entity) > 20:
max_leng += 1
# continue
for centity_id in candi_entity:
if centity_id not in id2abstract:
continue
out_line = {"query_entity": source_entity, "query_text": candi_text,
"query_offset": candi_offset}
out_line["candi_entity"], out_line["candi_abstract"] = id2abstract[centity_id]
if centity_id == mention["kb_id"]:
out_line["tag"] = 1
pos_count += 1
else:
out_line["tag"] = 0
neg_count += 1
# out_line["tag"] = 1 if centity_id == mention["kb_id"] else 0
outwriter.write(json.dumps(out_line) + "\n")
logging.info("upper max_length: {}".format(max_leng))
logging.info("Communist sample {}, of which positive {}, negative {} ".format(pos_count + neg_count, pos_count, neg_count))
logging.info("Avg candidate entity length: {}".format(total_entity/used_lines))
def divide_set(infile):
logging.info("Dividing file into train/dev/test...")
train_writer = codecs.open("./data/disambi/train.txt", "w", "utf-8")
dev_writer = codecs.open("./data/disambi/dev.txt", "w", "utf-8")
test_writer = codecs.open("./data/disambi/test.txt", "w", "utf-8")
datasets = read_dataset(infile)
total_line = int(subprocess.getoutput("wc -l {}".format(infile)).split()[0])
# total_line = 300000
logging.info("total_line: {}".format(total_line))
for idx, data in enumerate(datasets):
if idx > total_line:
break
if idx < 0.8 * total_line:
train_writer.write(data)
elif idx < 0.9 * total_line:
dev_writer.write(data)
elif idx < total_line:
test_writer.write(data)
logging.info("Done")
if __name__ == "__main__" :
logging.basicConfig(level = logging.DEBUG,
format = '%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s')
gen_disambi("./original_data/train_pre.json", "./data/disambi/all.txt", )
divide_set("./data/disambi/all.txt")
|
import uuid
from django.db import models
from django.db.models import Q
from .Media import Media
from .maiofields import FixedCharField
#: Quick way of saying "NULL" for Django models
NULL = {'null': True, 'blank': True}
class Playlist(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4)
media = models.ManyToManyField(Media)
name = models.CharField(max_length=1024)
tn_path = models.CharField(max_length=1024)
date_added = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
default_order = models.PositiveSmallIntegerField(default=0) # 0 random, 1 descending, 2 ascending
seconds_between = models.FloatField(default=5.0)
caption = models.TextField(**NULL)
class Meta:
ordering = ['-date_modified']
|
import sqlite3
from random import randint
import string
import enum
class Permission(enum.Enum):
NONE = 0
HIGH = 3
MEDIUM = 2
LOW = 1
class Database:
def __init__(self, name="database.db"):
self.name = name
self.db = sqlite3.connect(self.name)
def create_table(self):
sql = f"""
CREATE TABLE codes (
key_number INTEGER PRIMARY KEY,
key_info VARCHAR(200),
permission INT,
active INT,
key_data VARCHAR(40));"""
self.db.cursor().execute(sql)
self.db.commit()
def generate_key_data(self) -> str:
key_data = ""
for x in range(40):
key_data += string.ascii_letters[randint(0, 51)]
return key_data
def add_key(self, key_info: str, permission: int, actived: int, key_data: str):
sql = "INSERT INTO codes (key_info, permission, active, key_data) VALUES (?, ?, ?, ?)"
self.db.cursor().execute(sql, (key_info, permission, actived, key_data))
self.db.commit()
def delete_key(self, key_number: int):
sql = "DELETE FROM codes WHERE key_number = ?"
self.db.cursor().execute(sql, (key_number,))
def revoke_key(self, key_number: int):
sql = "UPDATE codes SET active = 0 WHERE key_number = ?"
self.db.cursor().execute(sql, (key_number,))
def activate_key(self, key_number: int):
sql = "UPDATE codes SET active = 1 WHERE key_number = ?"
self.db.cursor().execute(sql, (key_number,))
def check_key(self, data: str) -> int:
sql = "SELECT * FROM codes WHERE key_data = ?"
cursor = self.db.cursor()
cursor.execute(sql, (data,))
found = cursor.fetchone()
print(found)
if found is not None:
return Permission(found[2])
else:
return Permission(0)
def view_all_keys(self):
sql = "SELECT * FROM codes"
cursor = self.db.cursor()
cursor.execute(sql)
found = cursor.fetchall()
return found
|
'''
Say you have an array for which the ith element is the price of a given stock on day i.
If you were only permitted to complete at most one transaction (ie, buy one and sell one share of the stock), design an algorithm to find the maximum profit.
Example
Given array [3,2,3,1,2], return 1.
'''
class Solution:
def maxProfit(self, prices):
if not prices:
return 0
profit = 0
low = prices[0]
for price in prices:
profit = max(profit, price - low)
low = min(low, price)
return profit
'''
算法武器:数组 + 一次遍历(打擂台法)
http://www.cnblogs.com/felixfang/p/3644768.html
股票问题:
想要最大化收益就是buy low sell high
扫描价格数组,不断用当前价格减去历史最低点计算收益,如果大于历史收益,则将其更新
如果发现新的股票新低就将其更新
因为题目只要求进行一次交易,所以total不是累加的,而是在不同的收益中选取某次交易最大的收益🉐
''' |
import pandas as pd
import matplotlib.pylab as pl
import numpy as np
import os
import matplotlib.pyplot as plt
from patsy import dmatrices
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import train_test_split
from sklearn import metrics
from sklearn.cross_validation import cross_val_score
def numberofTokens(S):
S=str(S)
words=S.split()
return len(words)
def allCapital(S): #all caps
S=str(S)
words=S.split()
ct=0
for x in words:
if x==x.upper():
ct+=1
return ct
def startCapital(S):
S=str(S)
words=S.split()
ct=0
for x in words:
if x[0]>='A' and x[0]<='Z':
ct+=1
return ct
def numberofinout(S,ans):
S=str(S)
ans=str(ans)
words_S=S.split()
words_ans=ans.split()
ct=0
for w in words_ans:
if w in words_S:
ct+=1
return ct
def getRating(Jdg):
Jdg=str(Jdg)
if Jdg=="Good":
return 1.0
elif Jdg=="Bad":
return -1.0
else:
return 0.5
if __name__ == '__main__':
dtf = pd.read_csv(os.path.join(os.path.dirname(__file__), 'data', 'Train.csv'))
data = pd.read_csv(os.path.join(os.path.dirname(__file__), 'data', 'MindTheGap-1.0-Data.tsv'), header=0, \
delimiter="\t", quoting=3)
print "Data available for a sentence"
print dtf.dtypes
print
print "Data's Structure"
print data.shape
sid=data.SentenceID[0]
'''
for Sid,S,ans,ques,Jdg,Jid in zip(data.SentenceID,data.Sentence,data.Answer,data.Question,data.Judgment,data.JudgeId):
if(sid==Sid):
print ques
'''
i=0
#Token Count Features
token_ct_list=[]
tokdic={}
for Sid,S,ans,ques,Jdg,Jid,Qid in zip(data.SentenceID,data.Sentence,data.Answer,data.Question,data.Judgment,data.JudgeId,data.QuestionID):
if Qid=='None':
continue
if i==0:
tokdic['NUM_TOKENS_IN_ANSWER']=numberofTokens(ans)
tokdic['NUM_TOKENS_IN_SENTENCE']=numberofTokens(S)
tokdic['NUM_RAW_TOKENS_MATCHING_IN_OUT']=numberofinout(S,ans)
tokdic['PERCENT_TOKENS_IN_ANSWER']=float(tokdic['NUM_TOKENS_IN_ANSWER'])/tokdic['NUM_TOKENS_IN_SENTENCE']
tokdic['PERCENT_RAW_TOKENS_MATCHING_IN_OUT']=float(tokdic['NUM_RAW_TOKENS_MATCHING_IN_OUT'])/tokdic['NUM_TOKENS_IN_ANSWER']
tokdic['RATING']=getRating(Jdg)
else:
tokdic['RATING']+=getRating(Jdg)
if i==3:
token_ct_list.append(tokdic)
tokdic={}
i=(i+1)%4
print len(token_ct_list)
dta=pd.DataFrame.from_dict(token_ct_list, orient='columns', dtype=None)
dta.NUM_TOKENS_IN_ANSWER.hist()
plt.title('Histogram of NUM_TOKENS_IN_ANSWER')
plt.xlabel('Token Count')
plt.ylabel('Frequency')
plt.show()
dta['Appropriate'] = (dta.RATING >= 1.5).astype(int)
y, X = dmatrices('Appropriate ~ NUM_TOKENS_IN_ANSWER + NUM_TOKENS_IN_SENTENCE +NUM_RAW_TOKENS_MATCHING_IN_OUT+\
PERCENT_TOKENS_IN_ANSWER+PERCENT_RAW_TOKENS_MATCHING_IN_OUT', dta, return_type="dataframe")
y= np.ravel(y)
model = LogisticRegression(solver='lbfgs',max_iter=500,penalty='l2')
model = model.fit(X, y)
print model.score(X,y)
# examine the coefficients
print pd.DataFrame(zip(X.columns, np.transpose(model.coef_)))
|
"""
Code Challenge
Name:
Operations Function
Filename:
operation.py
Problem Statement:
Write following functions for list operations. Take list as input from the User
Add(), Multiply(), Largest(), Smallest(), Sorting(), Remove_Duplicates(), Print()
Only call Print() function to display the results in the below displayed
format (i.e all the functions must be called inside the print() function and only the Print() is to be called in the main script)
Input:
5,2,6,2,3
Output:
Sum = 18
Multiply = 360
Largest = 6
Smallest = 2
Sorted = [2, 2, 3, 5, 6]
Without Duplicates = [2, 3, 5, 6]
"""
def Add(lst):
total = 0
for i in lst:
total += i
return total
def Multiply(lst):
product = 1
for i in lst:
product *= i
return product
def Largest(lst):
init = lst[0]
for i in lst:
if init < i:
init = i
return init
def Smallest(lst):
init = lst[0]
for i in lst:
if init > i:
init = i
return init
def Sorting(lst):
s = len(lst)
us = True
while us:
us = False
i = 0
while i<s-1:
if lst[i] > lst[i+1]:
t = lst[i]
lst[i] = lst[i+1]
lst[i+1] = t
us = True
i += 1
return lst
def Remove_Duplicates(lst):
n_lst = []
for i in lst:
if i not in n_lst:
n_lst.append(i)
return n_lst
def Print(lst):
lst = list(lst)
print (Add(lst))
print (Multiply(lst))
print (Largest(lst))
print (Smallest(lst))
print (Sorting(lst))
print (Remove_Duplicates(lst))
my_list = input("Enter list: ").split(",")
final_list = []
for i in my_list:
final_list.append(int(i))
Print (final_list)
|
# -*- coding: utf-8 -*-
# 图片数据
# 20180330
from flask import Flask, Response, jsonify, current_app
import re
from app.model.image import *
from . import api
from flask_uploads import UploadSet, IMAGES, configure_uploads, ALL
from flask import request, Flask, redirect, url_for, render_template
from manage import photos
import time
@api.route("/upload/<foods_sha_id>", methods=['POST', 'GET'])
def upload(foods_sha_id):
result = {"code": 10000, "value": "", "msg": "上传成功"}
print('upload:' + foods_sha_id)
try:
if request.method == 'POST' and 'file' in request.files:
filename = photos.save(request.files['file'], time.strftime("%Y-%m-%d", time.localtime()))
image = Image()
image.path = filename
image.saveDb()
image.setFoodImg(foods_sha_id) # 保存物品对应关系
result['value'] = image.sha_id
else:
result['code'] = -10000
result['msg'] = "上传失败"
except Exception as ex:
code, err_message = ex.args
result['code'] = -10000
result['msg'] = err_message
return result
@api.route("/source/<img_sha_id>", methods=['GET'])
def source_img(img_sha_id):
# 获取资源文件
image = Image()
path = image.getImg(img_sha_id)
if path is None:
return None
img_local_path = "{}/{}".format(current_app.config['UPLOADED_PHOTO_DEST'], path)
print(img_local_path)
img_stream = ''
print(img_local_path)
with open(img_local_path, 'rb') as img_f:
img_stream = img_f.read()
resp = Response(img_stream, mimetype="image/jpeg")
return resp
@api.route("/source/<img_sha_id>", methods=['DELETE'])
def delImage(img_sha_id):
result = {"code": 10000, "value": "", "msg": "删除成功"}
image = Image()
path = image.getImg(img_sha_id)
if path is None:
result["code"] = -10000
result["msg"] = "图片不存在,删除失败"
return result
img_local_path = "{}/{}".format(current_app.config['UPLOADED_PHOTO_DEST'], path)
print(img_local_path)
condition = {'sha_id': img_sha_id}
try:
dbManager.delete('images', condition)
os.remove(img_local_path)
except Exception as ex:
code, err_message = ex.args
result['code'] = -10000
result['msg'] = err_message
return result
@api.route("/banner")
def banner():
# 返回标题图片
img_local_path = "{}".format(current_app.config['BANNER_DIR'])
img_stream = ''
print(img_local_path)
with open(img_local_path, 'rb') as img_f:
img_stream = img_f.read()
resp = Response(img_stream, mimetype="image/jpeg")
return resp
@api.route("/image/<imageid>")
def room_image(imageid):
"""
返回房间的图片
:param imageid:
:return:
"""
if imageid == 'default': # 设置默认图片
img_local_path = current_app.config['DEFAULT_FOOD_IMG_DIR']
else:
ret = dbManager.exec_sql("select path from image where name='{name}'".format(name=imageid))
img_local_path = "{}/{}".format(current_app.config['ROOM_IMG_DIR'], ret[0]['path'])
img_stream = ''
print(img_local_path)
with open(img_local_path, 'rb') as img_f:
img_stream = img_f.read()
resp = Response(img_stream, mimetype="image/jpeg")
return resp
@api.route("/avatar/<imageid>")
def avatar(imageid):
"""
返回头像
:param imageid:
:return:
"""
if imageid == 'default' or imageid == 'null': # 设置默认图片
img_local_path = current_app.config['DEFAULT_AVATAR_DIR']
else:
img_local_path = "{}/{}".format(current_app.config['AVATAR_DIR'], imageid)
print(img_local_path)
img_stream = ''
with open(img_local_path, 'rb') as img_f:
img_stream = img_f.read()
resp = Response(img_stream, mimetype="image/jpeg")
return resp
|
# Generated by Django 3.2.7 on 2021-09-09 16:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0005_contact_emails'),
]
operations = [
migrations.AlterModelOptions(
name='contact',
options={'ordering': ['-firstName']},
),
migrations.RemoveField(
model_name='contact',
name='phones',
),
migrations.AddField(
model_name='contact',
name='phone',
field=models.CharField(default='', max_length=200),
),
migrations.AlterField(
model_name='contact',
name='emails',
field=models.CharField(default='', max_length=200),
),
]
|
from queue import Queue
import threading
from .http_common import *
import simple_http_client
import utils
def pack_headers(headers):
out_list = []
for k, v in headers.items():
if isinstance(v, int):
out_list.append(b'%s: %d\r\n' % (utils.to_bytes(k), v))
else:
out_list.append(b'%s: %s\r\n' % (utils.to_bytes(k), utils.to_bytes(v)))
return b''.join(out_list)
class Http1Worker(HttpWorker):
version = "1.1"
def __init__(self, logger, ip_manager, config, ssl_sock, close_cb, retry_task_cb, idle_cb, log_debug_data):
super(Http1Worker, self).__init__(logger, ip_manager, config, ssl_sock,
close_cb, retry_task_cb, idle_cb, log_debug_data)
self.task = None
self.request_onway = False
self.transfered_size = 0
self.trace_time = []
self.trace_time.append([ssl_sock.create_time, "connect"])
self.record_active("init")
self.task_queue = Queue()
threading.Thread(target=self.work_loop, name="%s_http1_work_loop" % self.logger.name).start()
self.idle_cb()
if self.config.http1_first_ping_wait or \
self.config.http1_ping_interval or \
self.config.http1_idle_time:
threading.Thread(target=self.keep_alive_thread, name="%s_http1_keep_alive" % self.logger.name).start()
def record_active(self, active=""):
self.trace_time.append([time.time(), active])
# self.logger.debug("%s stat:%s", self.ip, active)
def get_trace(self):
out_list = []
last_time = self.trace_time[0][0]
for t, stat in self.trace_time:
time_diff = int((t - last_time) * 1000)
last_time = t
out_list.append(" %d:%s" % (time_diff, stat))
out_list.append(":%d" % ((time.time() - last_time) * 1000))
out_list.append(" processed:%d" % self.processed_tasks)
out_list.append(" transfered:%d" % self.transfered_size)
out_list.append(" sni:%s" % self.ssl_sock.sni)
return ",".join(out_list)
def get_rtt_rate(self):
return self.rtt + 100
def request(self, task):
self.accept_task = False
self.task = task
self.task_queue.put(task)
def keep_alive_thread(self):
while time.time() - self.ssl_sock.create_time < self.config.http1_first_ping_wait:
if not self.keep_running:
self.close("exit ")
return
time.sleep(3)
if self.config.http1_first_ping_wait and self.processed_tasks == 0:
self.task_queue.put("ping")
if self.config.http1_ping_interval:
while self.keep_running:
time_to_ping = max(self.config.http1_ping_interval - (time.time() - self.last_recv_time), 3)
time.sleep(time_to_ping)
if not self.request_onway and \
time.time() - self.last_recv_time > self.config.http1_ping_interval - 3:
self.task_queue.put("ping")
time.sleep(3)
elif self.config.http1_idle_time:
while self.keep_running:
time_to_sleep = max(self.config.http1_idle_time - (time.time() - self.last_recv_time), 0.2)
time.sleep(time_to_sleep)
if not self.request_onway and time.time() - self.last_recv_time > self.config.http1_idle_time:
self.close("idle timeout")
return
def work_loop(self):
while self.keep_running:
try:
task = self.task_queue.get(block=True)
except:
task = None
if not task:
# None task means exit
self.accept_task = False
self.keep_running = False
return
if task == "ping":
if not self.head_request():
self.ip_manager.recheck_ip(self.ssl_sock.ip_str)
self.close("keep alive")
return
self.last_recv_time = time.time()
continue
# self.logger.debug("http1 get task")
time_now = time.time()
if time_now - self.last_recv_time > self.config.http1_idle_time:
self.logger.warn("get task but inactive time:%d", time_now - self.last_recv_time)
self.task = task
self.close("inactive timeout %d" % (time_now - self.last_recv_time))
return
self.request_task(task)
self.request_onway = False
self.last_send_time = time_now
self.last_recv_time = time_now
if self.processed_tasks > self.config.http1_max_process_tasks or self.is_life_end():
self.close("lift end")
return
def request_task(self, task):
timeout = task.timeout
self.request_onway = True
start_time = time.time()
self.record_active("request")
task.set_state("h1_req")
task.headers[b'Host'] = self.get_host(task.host)
task.headers[b"Content-Length"] = len(task.body)
request_data = b'%s %s HTTP/1.1\r\n' % (task.method, task.path)
request_data += pack_headers(task.headers)
request_data += b'\r\n'
try:
self.ssl_sock.send(request_data)
payload_len = len(task.body)
start = 0
while start < payload_len:
send_size = min(payload_len - start, 65535)
sended = self.ssl_sock.send(task.body[start:start+send_size])
start += sended
task.set_state("h1_req_sended")
response = simple_http_client.Response(self.ssl_sock)
response.begin(timeout=timeout)
task.set_state("response_begin")
except Exception as e:
self.logger.exception("%s h1_request:%r inactive_time:%d task.timeout:%d",
self.ip_str, e, time.time() - self.last_recv_time, task.timeout)
self.logger.warn('%s trace:%s', self.ip_str, self.get_trace())
self.retry_task_cb(task)
self.task = None
self.close("down fail")
return
task.set_state("h1_get_head")
time_left = timeout - (time.time() - start_time)
if task.method == b"HEAD" or response.status in [204, 304]:
response.content_length = 0
response.ssl_sock = self.ssl_sock
response.task = task
response.worker = self
task.content_length = response.content_length
task.responsed = True
task.queue.put(response)
try:
read_target = int(response.content_length)
except:
read_target = 0
data_len = 0
while True:
try:
data = response.read(timeout=time_left)
if not data:
break
except Exception as e:
self.logger.warn("read fail, ip:%s, chunk:%d url:%s task.timeout:%d e:%r",
self.ip_str, response.chunked, task.url, task.timeout, e)
self.logger.warn('%s trace:%s', self.ip_str, self.get_trace())
self.close("down fail")
return
task.put_data(data)
length = len(data)
data_len += length
if read_target and data_len >= read_target:
break
if read_target > data_len:
self.logger.warn("read fail, ip:%s, chunk:%d url:%s task.timeout:%d ",
self.ip_str, response.chunked, task.url, task.timeout)
self.ip_manager.recheck_ip(self.ssl_sock.ip_str)
self.close("down fail")
task.finish()
self.ssl_sock.received_size += data_len
time_cost = (time.time() - start_time)
if time_cost != 0:
speed = data_len / time_cost
task.set_state("h1_finish[SP:%d]" % speed)
self.transfered_size += len(request_data) + data_len
self.task = None
self.accept_task = True
self.idle_cb()
self.processed_tasks += 1
self.last_recv_time = time.time()
self.record_active("Res")
def head_request(self):
if not self.ssl_sock.host:
# self.logger.warn("try head but no host set")
return True
# for keep alive, not work now.
self.request_onway = True
self.record_active("head")
start_time = time.time()
# self.logger.debug("head request %s", self.ip)
request_data = b'GET / HTTP/1.1\r\nHost: %s\r\n\r\n' % utils.to_bytes(self.ssl_sock.host)
try:
data = request_data
ret = self.ssl_sock.send(data)
if ret != len(data):
self.logger.warn("h1 head send len:%r %d %s", ret, len(data), self.ip_str)
self.logger.warn('%s trace:%s', self.ip_str, self.get_trace())
return False
response = simple_http_client.Response(self.ssl_sock)
response.begin(timeout=5)
status = response.status
if status != 200:
self.logger.warn("%s host:%s head fail status:%d", self.ip_str, self.ssl_sock.host, status)
return False
content = response.readall(timeout=5)
self.record_active("head end")
rtt = (time.time() - start_time) * 1000
self.update_rtt(rtt)
return True
except Exception as e:
self.logger.warn("h1 %s HEAD keep alive request fail:%r", self.ssl_sock.ip_str, e)
self.logger.warn('%s trace:%s', self.ip_str, self.get_trace())
self.close("down fail")
finally:
self.request_onway = False
def close(self, reason=""):
# Notify loop to exit
# This function may be call by out side http2
# When gae_proxy found the appid or ip is wrong
self.task_queue.put(None)
if self.task is not None:
if self.task.responsed:
self.task.finish()
else:
self.retry_task_cb(self.task)
self.task = None
super(Http1Worker, self).close(reason) |
import asyncio
from functools import reduce
from pathlib import Path
import pytest
import aioftp
@pytest.mark.asyncio
async def test_patched_sleep(skip_sleep):
await asyncio.sleep(10)
assert skip_sleep.is_close(10)
SIZE = 3 * 100 * 1024 # 300KiB
@pytest.mark.parametrize("times", [10, 20, 30])
@pytest.mark.parametrize("type", ["read", "write"])
@pytest.mark.parametrize("direction", ["download", "upload"])
@pytest.mark.asyncio
async def test_client_side_throttle(pair_factory, skip_sleep, times, type,
direction):
async with pair_factory() as pair:
await pair.make_server_files("foo", size=SIZE)
await pair.make_client_files("foo", size=SIZE)
getattr(pair.client.throttle, type).limit = SIZE / times
await getattr(pair.client, direction)("foo")
if (type, direction) in {("read", "download"), ("write", "upload")}:
assert skip_sleep.is_close(times)
else:
assert skip_sleep.is_close(0)
@pytest.mark.parametrize("times", [10, 20, 30])
@pytest.mark.parametrize("users", [1, 2, 3])
@pytest.mark.parametrize("throttle_direction", ["read", "write"])
@pytest.mark.parametrize("data_direction", ["download", "upload"])
@pytest.mark.parametrize("throttle_level", ["throttle",
"throttle_per_connection"])
@pytest.mark.asyncio
async def test_server_side_throttle(pair_factory, skip_sleep, times, users,
throttle_direction, data_direction,
throttle_level):
async with pair_factory() as pair:
names = []
for i in range(users):
name = f"foo{i}"
names.append(name)
await pair.make_server_files(name, size=SIZE)
throttle = reduce(getattr, [throttle_level, throttle_direction],
pair.server)
throttle.limit = SIZE / times
clients = []
for name in names:
c = aioftp.Client(path_io_factory=aioftp.MemoryPathIO)
async with c.path_io.open(Path(name), "wb") as f:
await f.write(b"-" * SIZE)
await c.connect(pair.server.server_host, pair.server.server_port)
await c.login()
clients.append(c)
coros = [getattr(c, data_direction)(n) for c, n in zip(clients, names)]
await asyncio.gather(*coros)
await asyncio.gather(*[c.quit() for c in clients])
throttled = {("read", "upload"), ("write", "download")}
if (throttle_direction, data_direction) not in throttled:
assert skip_sleep.is_close(0)
else:
t = times
if throttle_level == "throttle": # global
t *= users
assert skip_sleep.is_close(t)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.