index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
12,800 | c09448ff9548db2752d574887efc08514d4c69ce | import cloudpassage
import sys
import os
import yaml
import datetime
sys.path.append(os.path.join(os.path.dirname(__file__), '../../', 'lib'))
from event import Event
config_file_name = "portal.yml"
tests_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../"))
config_file = os.path.join(tests_dir, "configs/", config_file_name)
config = yaml.load(file(config_file, 'r'))
key_id = config['key_id']
secret_key = config['secret_key']
date_today = datetime.date.today().isoformat()
class TestUnitEvent:
def create_event_obj(self):
options = {
'--auth': config_file,
'--starting': date_today
}
event = Event(key_id, secret_key, options)
return event
def test_event_get_is_not_empty(self):
event = self.create_event_obj()
resp = event.get(1, date_today, 1)
assert resp['events']
def test_latest_event_is_not_empty(self):
event = self.create_event_obj()
resp = event.latest_event(1, date_today, 1)
assert resp['events']
def test_historical_limit_date_is_valid(self):
event = self.create_event_obj()
resp = event.historical_limit_date()
today = datetime.date.today()
expected = (today - datetime.timedelta(days=90)).isoformat()
assert expected == resp
def test_sort_by_is_alphabetical(self):
event = self.create_event_obj()
data = [{'color': 'red'}, {'color': 'black'}, {'color': 'white'}]
resp = event.sort_by(data, 'color')
expected = [{'color': 'black'}, {'color': 'red'}, {'color': 'white'}]
assert expected == resp
def test_get_end_date_is_not_nil(self):
event = self.create_event_obj()
dates = [
{"created_at": "2016-08-20"},
{"created_at": "2016-08-19"},
{"created_at": "2016-08-18"}
]
resp = event.get_end_date(dates, "2016-08-19")
expected = "2016-08-18"
assert expected == resp
def test_id_exists_check(self):
event = self.create_event_obj()
resp = event.get(1, date_today, 1)['events']
event_id = resp[0]['id']
id_exists = event.id_exists_check(resp, event_id)
assert id_exists
def test_loop_date(self):
event = self.create_event_obj()
data = [
{"created_at": "2016-08-23"},
{"created_at": "2016-08-22"},
{"created_at": "2016-08-21"},
{"created_at": "2016-08-20"}
]
end_date = "2016-08-03"
resp = event.loop_date(data, end_date)
assert ("2016-08-20", "2016-08-23") == resp
|
12,801 | 436278f16f3fcd08993f4995842dcf546473af50 | import pandas as pd
import numpy as np
import tensorflow as tf
from typing import List
import twins
# TODO - remove dep on the service in this repo; create a package that gets
# imported into both things (parent). But for sake of time just use the
# svc client here
import svc
def _join(x) -> str:
cols = ["interest_tag", "course_id", "assessment_tag"]
s = []
for i in cols:
if isinstance(x[i], str):
s.append(x[i])
return " ".join(s)
def _load() -> List[dict]:
datasets = [
{"name": "user_interests", "apply_col": "interest_tag"},
{"name": "user_course_views", "apply_col": "course_id"},
{"name": "user_assessment_scores", "apply_col": "assessment_tag"},
]
for i in range(len(datasets)):
datasets[i]["df"] = twins.dataset.load(datasets[i]["name"])
return datasets
def _transform(data: List[dict]) -> List[dict]:
fcn = lambda x: " ".join(np.unique(x))
for i in range(len(data)):
data[i]["transformed"] = twins.utils.create_sentence(
data[i]["df"], group_col="user_handle", apply_col=data[i]["apply_col"], transform=fcn
)
return data
def _combine(data: List[dict]) -> pd.DataFrame:
df = data[0]["transformed"].copy()
for i in range(1, len(data)):
df = pd.merge(df, data[i]["transformed"], how="left", on="user_handle")
df["sentence"] = df.apply(_join, axis=1)
return df
# --------------------------
def train():
steps = [_transform, _combine]
train_data = twins.pipeline.build(_load(), steps)
print("training model v1")
# TODO - persist this model then create a "predict" step that loads it in
# and can predict for any dataset.
model = twins.models.BagOfPCA(vocab_size=500, rank=64)
user_vecs = model.fit_transform(train_data.sentence.values)
print("trained model v1")
return train_data, user_vecs, model
def train_predict():
dat, vecs, mdl = train()
print("writing recs to db")
# TODO - parallelize this
for i in range(len(dat)):
r = dat.iloc[i]
user = int(r["user_handle"])
sentence = str(r["sentence"])
vector = list(vecs[i])
body = {"vector": vector, "sentence": sentence, "id": user}
svc.cfg["elastic"]["client"].index(index="users-v1", id=user, body=body)
print("completed writing recs to db")
|
12,802 | 44ecdd1e564fd398959adb2529f307d1183507a2 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from bili import cols
from bili.bili_video_name import *
if __name__ == "__main__":
for _col in cols[1: -1]:
init_ignore_id()
video_aids = get_video_aid(_col)
_names = get_video_name(video_aids)
update_db_video_name(_names)
print(_col, "DONE!!!")
# conn.close()
|
12,803 | 38897d9694a15e0a6f5d64c021eea56982120910 | # -*- encoding: utf-8 -*-
from django import template
from reqApp.models import *
from reqApp.choices import *
from reqApp.util import *
from random import randrange
register = template.Library()
@register.filter(name="proyecto")
def proyecto(request):
return getProject(request)
@register.filter(name="invertOrd")
def invertOrd(orden):
return u'-'+orden
@register.filter(name="prioridad")
def prioridad(elemento):
for key,val in PRIORIDAD_CHOICES:
if key == elemento.prioridad:
return val
@register.filter(name="tipoRU")
def tipoRU(elemento):
for key,val in TIPO_RU_CHOICES:
if key == elemento.tipo:
return val
@register.filter(name="tipoRS")
def tipoRS(elemento):
for key,val in TIPO_RS_CHOICES:
if key == elemento.tipo:
return val
@register.filter(name="tipoReq")
def tipoReq(req):
if req.asoc_RU():
for key,val in TIPO_RU_CHOICES:
if key == req.tipo:
return val
else:
for key,val in TIPO_RS_CHOICES:
if key == req.tipo:
return val
@register.filter(name="estado")
def estado(elemento):
for key,val in ESTADO_CHOICES:
if key == elemento.estado:
return val
@register.filter(name="enlistarVigentes")
def enlistarVigentes(queryList):
return queryList.filter(vigencia=True).order_by('identificador')
@register.filter(name="enlistarRegistrados")
def enlistarRegistrados(queryList):
return queryList.order_by('identificador')
@register.filter(name="largoLista")
def largoLista(lista):
return len(lista)
@register.filter(name="splitBy")
def splitBy(s, token):
return s.split(token)
@register.filter(name="concat")
def concat(s1,s2):
return str(s1)+str(s2)
@register.filter(name="porcentaje")
def porcentaje(total, parte):
if total == 0:
return "0%"
return ("%3.0f" % (100*parte/total)) + "%"
@register.filter(name="agregarHostALosSrc")
def agregarHostALosSrc(htmlCode,host):
# agrega el host a los src de las imagenes y otros recursos
return htmlCode.replace('src="/','src="'+host+'/').replace("src='/","src='"+host+"/")
@register.filter(name="textTableHorizHeaders")
def textTableHorizHeaders(rows):
if len(rows)>0:
if len(rows[0])>0:
firstRow = rows[0]
pref = '|'
hr = '|'
rti = firstRow[0]['elFila'].textoIdentificador()
for x in range(0, len(rti)):
pref = '<span style="color:White;">o</span>'+pref
hr = '-'+hr
hText = []
for c in firstRow[0]['elCol'].textoIdentificador():
hText.append('')
for e in firstRow:
for i,c in enumerate(e['elCol'].textoIdentificador()):
hText[i] = hText[i] + '<span class="' + e['elCol'].estado + '">' + c + '</span>'
out = ''
hrlen = 0
for r in hText:
hrlen = len(r)
out = (out + pref + r + '<br/>')
for x in range(0,len(firstRow)):
hr = hr + '-'
out = out + hr + '<br/>'
return out
return '---'
@register.filter(name="alarms")
def alarms(el):
if (randrange(100)+1)<30:
return [
"Su prioridad es menor que la prioridad más alta de sus Requisitos de Software asociados",
"Tiene más de 5 Requisitos de Software asociados (puede ser muy complejo y quizás deba ser dividido)"
]
return False
|
12,804 | 374aa1504c4270171ebf7014449aab9e85e10716 | import ast
from collections import OrderedDict
from PythonVoiceCodingPlugin.library import sorted_by_source_region,get_source_region,make_flat
from PythonVoiceCodingPlugin.library.info import *
from PythonVoiceCodingPlugin.library.traverse import search_upwards,search_upwards_log, find_matching,match_node, find_all_nodes,search_upwards_for_parent
from PythonVoiceCodingPlugin.queries.abstract import CollectionQuery
class CollectModule(CollectionQuery):
indexable = False
label = "Modules"
def handle_single(self,view_information,query_description,extra = {}):
build, origin, selection = self._preliminary(view_information,query_description,extra)
if not build:
return None,None
root,atok,m,r = build
definition_nodes = find_all_nodes(root,(ast.Import,ast.ImportFrom))
name_nodes = make_flat([get_module_names(x) for x in definition_nodes])
for name in name_nodes:
smaller = name.split(".")
if len( smaller)>1:
name_nodes.append(".".join(smaller[:-1]))
names = list(OrderedDict([(x,0) for x in name_nodes]).keys())
result = None
return names
|
12,805 | aac2bf245619832434bc896b0e73e40378a322ac | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from setuptools import setup, find_packages
import setuptools.command.install as _install
import subprocess
class install(_install.install):
def run(self):
subprocess.check_call(["make"])
subprocess.check_call(["cp", "build/plasma_store",
"plasma/plasma_store"])
subprocess.check_call(["cp", "build/plasma_manager",
"plasma/plasma_manager"])
subprocess.check_call(["cmake", ".."], cwd="./build")
subprocess.check_call(["make", "install"], cwd="./build")
# Calling _install.install.run(self) does not fetch required packages
# and instead performs an old-style install. See command/install.py in
# setuptools. So, calling do_egg_install() manually here.
self.do_egg_install()
setup(name="Plasma",
version="0.0.1",
description="Plasma client for Python",
packages=find_packages(),
package_data={"plasma": ["plasma_store",
"plasma_manager",
"libplasma.so"]},
cmdclass={"install": install},
include_package_data=True,
zip_safe=False)
|
12,806 | 2c4a5307efa9d3887183a67f06cf4fc2a262bbe5 | from urllib.request import urlopen
html=urlopen("http://www.pythonscraping.com/pages/page1.html")
print (html.read()) |
12,807 | efae03c673cdfd3529f4182946f7b33635e665b1 | __all__ = ["csv_col_save"]
import csv
def csv_col_save(fileName, cols, colHeader):
__save_to_csv_col(cols, fileName, colHeader)
def __save_to_csv_row(rows, fileName):
"""
:param rows:
list of Dictionaries where each item is a dictionary mapping the
fidld name to the value.
:param fileName:
name of the csv file.
:return:
none
"""
__just_save_it(rows, fileName)
def __save_to_csv_col(cols, fileName, colHeader):
"""
:param cols:
list of cols for the csv, all with the same length.
:param fileName:
the name of the csv file.
:param colHeader:
A list, containing all the column names, the same order as the list of cols.
:return:
none
"""
CsvRowsDic = [{} for I in range(max(len(Col) for Col in cols))]
for FieldName, Col in zip(colHeader, cols):
for RowIdx, Value in enumerate(Col):
CsvRowsDic[RowIdx][FieldName] = Value
__just_save_it(CsvRowsDic, fileName)
def __just_save_it(csvDicList, fileName):
with open(fileName, "w") as CSVDataFile:
Writer = csv.DictWriter(CSVDataFile, fieldnames=csvDicList[0].keys())
Writer.writeheader()
Writer.writerows(csvDicList)
pass |
12,808 | 45e0f562b49fdcbf14b79a3bfacf65841195d7df | import csv
#input number you want to search
cardName = input('Enter part of name to find\n')
#read csv, and split on "," the line
csv_file = csv.reader(open('MunchkinTreasureCards.csv', "r"), delimiter=",")
#loop through the csv list
for row in csv_file:
if len(row) > 1: #checking for empty rows
#if input is equel to anything in the file, then it is returned. lower() makes sure it does not distinguish between lower and uppercase.
if cardName.lower() in row or cardName.lower() in row[1].lower():
print(row)
"""
Nothing found if no cards is found
No difference of lowercase and uppercase
where should with be be placed?
""" |
12,809 | dced9cf7d4feef7ead9fba935c0a888a3d40aca9 | from flow.core.params import VehicleParams,InFlows,SumoCarFollowingParams,SumoParams, EnvParams, InitialConfig, NetParams, SumoLaneChangeParams
from flow.controllers import IDMController, RLController
from controller import SpecificMergeRouter,NearestMergeRouter
# from flow.core.params import
from network import HighwayRampsNetwork, ADDITIONAL_NET_PARAMS
#######################################################
########### Configurations
# actual_num_human_list = [10,20,30,40,50]
actual_num_human = 20
actual_num_cav_list = [(0,20),(5,15),(15,5),(20,0)]
for (NUM_MERGE_0,NUM_MERGE_1) in actual_num_cav_list:
# for actual_num_human in actual_num_human_list:
TEST_SETTINGS = True
RENDER = False
# RENDER = True
NEAREST_MERGE = False
# NEAREST_MERGE = True
NUM_HUMAN = 20
# NUM_MERGE_0 = 10
# NUM_MERGE_1 = 10
MAX_CAV_SPEED = 14
MAX_HV_SPEED = 10
VEH_COLORS = ['red','red'] if NEAREST_MERGE else ['red','green']
#######################################################
# Router = NearestMergeRouter if NEAREST_MERGE else SpecificMergeRouter
vehicles = VehicleParams()
vehicles.add(veh_id="human",
lane_change_params = SumoLaneChangeParams('only_strategic_safe'),
car_following_params = SumoCarFollowingParams(speed_mode='right_of_way',min_gap=5, tau=0.5, max_speed=MAX_HV_SPEED),
acceleration_controller=(IDMController, {}),
)
vehicles.add(veh_id="merge_0",
lane_change_params = SumoLaneChangeParams('only_strategic_safe'),
car_following_params = SumoCarFollowingParams(speed_mode='no_collide',min_gap=1, tau=0.5, max_speed=MAX_CAV_SPEED),
acceleration_controller=(RLController, {}),
color=VEH_COLORS[0])
vehicles.add(veh_id="merge_1",
lane_change_params = SumoLaneChangeParams('only_strategic_safe'),
car_following_params = SumoCarFollowingParams(speed_mode='no_collide',min_gap=1, tau=0.5, max_speed=MAX_CAV_SPEED),
acceleration_controller=(RLController, {}),
color=VEH_COLORS[1])
initial_config = InitialConfig(spacing='uniform')
inflow = InFlows()
inflow.add(veh_type="human",
edge="highway_0",
probability=actual_num_human/100,
depart_lane='random',
depart_speed = 'random',
route = 'highway_0',
number = actual_num_human)
inflow.add(veh_type="merge_0",
edge="highway_0",
probability = 0.1,
depart_lane='random',
depart_speed = 'random',
route = 'merge_0',
number = NUM_MERGE_0)
inflow.add(veh_type="merge_1",
edge="highway_0",
probability = 0.1,
depart_lane='random',
depart_speed = 'random',
route = 'merge_1',
number = NUM_MERGE_1)
sim_params = SumoParams(sim_step=0.1, restart_instance=True, render=RENDER,seed=None)
from specific_environment import MergeEnv
intention_dic = {"human":0,"merge_0":1,"merge_1":1} if NEAREST_MERGE else {"human":0,"merge_0":1,"merge_1":2}
terminal_edges = ['off_ramp_0','off_ramp_1','highway_2']
env_params = EnvParams(warmup_steps=50,
additional_params={"intention":intention_dic,
"max_cav_speed":MAX_CAV_SPEED,
"max_hv_speed":MAX_HV_SPEED})
additional_net_params = ADDITIONAL_NET_PARAMS.copy()
additional_net_params['num_vehicles'] = NUM_HUMAN + NUM_MERGE_0 + NUM_MERGE_1
additional_net_params['num_cav'] = NUM_MERGE_0 + NUM_MERGE_1
additional_net_params['num_hv'] = NUM_HUMAN
additional_net_params['terminal_edges'] = terminal_edges
net_params = NetParams(inflows=inflow, additional_params=additional_net_params)
network = HighwayRampsNetwork("highway_ramp",vehicles,net_params,initial_config)
############ BUILD RL MODEL ##############
flow_params = dict(
exp_tag='test_network',
env_name=MergeEnv,
network=network,
simulator='traci',
sim=sim_params,
env=env_params,
net=net_params,
veh=vehicles,
initial=initial_config
)
# # number of time steps
flow_params['env'].horizon = 2000
############ EXPERIMENTS ##############
if TEST_SETTINGS:
print("this is the run for the baseline model")
from experiment import Experiment
exp = Experiment(flow_params)
# run the sumo simulation
# exp.run(10,num_cav=(NUM_MERGE_0+NUM_MERGE_1),num_human=actual_num_human)
exp.run(10,num_cav=(NUM_MERGE_0+NUM_MERGE_1),num_merge_0=NUM_MERGE_0, num_merge_1=NUM_MERGE_1, num_human=actual_num_human) # For varying the popularity
|
12,810 | a227de636abf8cf28525011ecaae489d895353b9 | # -*- coding: utf-8 -*-
"""
Utility functions (:mod:`refmanage.fs_utils`)
=============================================
.. currentmodule:: fs_utils
"""
import os
import glob
import pathlib2 as pathlib
from pybtex.database.input import bibtex
from pybtex.exceptions import PybtexError
from pybtex.scanner import TokenRequired
from reffile import BibFile, NonbibFile
from ref_exceptions import UnparseableBibtexError
def handle_files_args(*paths_args):
"""
Handle file(s) arguments from command line
This method takes the string(s) which were passed to the cli which indicate the files on which to operate. It expands the path arguments and creates a list of `pathlib.Path` objects which unambiguously point to the files indicated by the cli arguments.
:param str *paths_args: Paths to files.
:rtype: list
"""
paths = []
for paths_arg in paths_args:
# Handle paths implicitly rooted at user home dir
paths_arg = os.path.expanduser(paths_arg)
# Expand wildcards
paths_arg = glob.glob(paths_arg)
# Create list of pathlib.Path objects
paths.extend([pathlib.Path(path_arg) for path_arg in paths_arg])
return paths
def reffile_factory(path):
"""
Factory method to return child of RefFile
This method returns either a BibFile or NonbibFile object depending on which is appropriate based on if the `path` arg points to a file containing valid BibTeX or invalid BibTeX, respectively.
:param pathlib.Path path: Path to file possibly containing BibTeX data.
:rtype: BibFile or NonbibFile depending on input.
"""
try:
b = BibFile(path)
except UnparseableBibtexError:
b = NonbibFile(path)
return b
def construct_bibfile_data(*paths):
"""
List of data corresponding to individual bib files
:param pathlib.Path *paths: Path to file possibly containing BibTeX data.
:rtype: list
"""
bibs = [reffile_factory(path) for path in paths]
return bibs
def bib_sublist(bibfile_data, val_type):
"""
Sublist of bibfile_data whos elements are val_type
This method examines each bib_dict element of a bibfile_data list and returns the subset which can be classified according to val_type.
:param list bibfile_data: List containing `RefFile`s.
:param type val_type:
:rtype: list
"""
sublist = [bibfile for bibfile in bibfile_data if isinstance(bibfile.bib, val_type)]
return sublist
def gen_stdout_test_msg(bibfile_data, verbose=False):
"""
Generate appropriate message for STDOUT
This method creates the string to be printed to STDOUT from the items of the `bibfile_data` list argument. It generates either a terse or verbose message based on the state of the `verbose` argument.
:param list bibfile_data: List containing `RefFile`s.
:param bool verbose: Directive to construct verbose/terse STDOUT string.
:rtype: str
"""
msg_list = [bibfile.test_msg(verbose) for bibfile in bibfile_data]
msg = "\n".join(msg_list)
return msg
|
12,811 | 1981ad50b63b60e005211e35baf9512ab89e95bc | # from jsonschema import Draft4Validator
from jsonschema import validate, RefResolver
import json
import os
with open('schema/config-schema.json') as f:
schema = json.load(f)
# Bugfix in jsonschema
# https://github.com/Julian/jsonschema/issues/313
schema_dir = os.path.dirname(os.path.abspath('schema/config-schema.json'))
resolver = RefResolver(base_uri='file://' + schema_dir + '/', referrer=schema)
# validate(obj, schema, resolver = resolver)
# https://stackoverflow.com/questions/30095032/nameerror-name-true-is-not-defined
true = True
false = False
with open('tmp/config.json') as f:
config = json.load(f)
validate(instance=config, schema=schema, resolver=resolver)
# Todo: schemas shall point to github not locally
# "windows": {
# "$ref": "config-windows.json#/windows"
# },
# "vm": {
# "$ref": "https://raw.githubusercontent.com/opencontainers/runtime-spec/master/schema/config-vm.json#/vm"
|
12,812 | 5a9b0726d37a76f73f4478a6c81d8416b460b9ce | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
from functools import reduce
from datetime import datetime as dt
class CasesCategory(scrapy.Item):
positive = scrapy.Field(serialier = int)
presumedPositive = scrapy.Field(serializer = int)
negative = scrapy.Field(serialzer = int)
pending = scrapy.Field(serializer = int)
pui = scrapy.Field(seqializer = int)
name = scrapy.Field()
class TestingStats(scrapy.Item):
date = scrapy.Field()
Local = scrapy.Field(serializer = CasesCategory)
FederalQuarantine = scrapy.Field(serializer = CasesCategory)
Repatriated = scrapy.Field(serializer = CasesCategory)
NonLocal = scrapy.Field(serializer = CasesCategory)
Combined = scrapy.Field(serializer = CasesCategory)
def getTotal(self, key = "positive"):
categories = sorted([i for i in self.keys() if i!= "date"])
return sum([int(self[i][key]) for i in categories if key in self[i].keys() and self[i][key] != "NA"])
def getCategoryTotal(self, key):
case_categories = [i for i in self[key].keys() if i not in ["name", "Hospitalized", "Deaths", "Intensive Care"]]
return sum([int(self[key][i]) for i in case_categories if self[key][i] != "NA"])
def toAsciiTable(self):
# Get case categories for which data exists
categories = sorted([i for i in self.keys() if i!= "date"])
# Get all non null keys from lib import funs CasesCategory
case_categories = [list(self[i].keys()) for i in categories]
case_categories = reduce(lambda x,y: x+y,case_categories)
case_categories = sorted(list(set([i for i in case_categories if i != "name"])))
row_format = "|{:^30}" * (len(categories) + 1) + "|\n"
rows = []
rows.append([""])
rows[0].extend([self[i]["name"] for i in categories])
rows.extend([[i.capitalize()] + [self[j][i] if j in self.keys() and i in self[j].keys() else "NA" for j in categories] for i in case_categories])
rows.append(["Total"] + [self.getCategoryTotal(j) for j in categories if j[0] not in ["Hospitalized", "Deaths", "Intensive care"]])
row_str = "Last updated: {}".format(dt.strftime(dt.strptime(self["date"], "%Y-%m-%d %H:%M %p"), "%Y-%m-%d"))
row_str += "\n```\n"
for i in rows:
row_str += row_format.format(*i)
row_str += "```\n"
row_str += "Total cases: {}".format(sum([self.getCategoryTotal(j) for j in categories]))
return row_str
|
12,813 | 41ff88dbeff121c8834d8863359e071927ac0d5c | from yolo import YOLO
from PIL import Image
import cv2
yolo = YOLO()
cap = cv2.VideoCapture("nvcamerasrc ! video/x-raw(memory:NVMM), width=(int)1280, height=(int)720,format=(string)I420, framerate=(fraction)30/1 ! nvvidconv flip-method=0 ! video/x-raw, format=(string)BGRx ! videoconvert ! video/x-raw, format=(string)BGR ! appsink")
if cap.isOpened():
while True:
ret_val, frame = cap.read();
cv2_im = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
pil_im = Image.fromarray(cv2_im)
boxes = yolo.detect_bounding_boxes(pil_im)
print(boxes)
else:
print("camera open failed")
|
12,814 | 07ac997bdb549666376009055e4c8f72fb81f00d | import json
import os
import zipfile
from unittest import mock
import pytest
from cloudify_system_workflows.snapshots.snapshot_create import EMPTY_B64_ZIP
from cloudify_system_workflows.tests.snapshots.utils import (
AuditLogResponse,
load_snapshot_to_dict,
prepare_snapshot_create_with_mocks,
FAKE_EXECUTION_ID,
FAKE_MANAGER_VERSION,
EMPTY_TENANTS_LIST_SE,
ONE_TENANTS_LIST_SE,
TWO_TENANTS_LIST_SE,
TWO_BLUEPRINTS_LIST_SE,
)
def test_dump_metadata():
with prepare_snapshot_create_with_mocks(
'test-dump-metadata',
rest_mocks=[(mock.Mock, ('tenants', 'list'), EMPTY_TENANTS_LIST_SE)],
) as sc:
sc._dump_metadata()
with open(sc._temp_dir / 'metadata.json', 'r') as f:
metadata = json.load(f)
assert metadata == {
'snapshot_version': FAKE_MANAGER_VERSION,
'execution_id': FAKE_EXECUTION_ID,
}
def test_dump_management():
with prepare_snapshot_create_with_mocks(
'test-dump-management',
rest_mocks=[
(mock.Mock, ('tenants', 'list'), EMPTY_TENANTS_LIST_SE),
(mock.Mock, ('user_groups', 'dump'), [[]]),
(mock.Mock, ('tenants', 'dump'), [[]]),
(mock.Mock, ('users', 'dump'), [[]]),
(mock.Mock, ('permissions', 'dump'), [[]]),
],
) as sc:
sc._dump_management()
sc._client.blueprints.dump.assert_not_called()
sc._client.permissions.dump.assert_called_once_with()
sc._client.user_groups.dump.assert_called_once_with()
sc._client.users.dump.assert_called_once_with()
sc._client.tenants.dump.assert_called_once_with()
def test_dump_composer():
with prepare_snapshot_create_with_mocks(
'test-dump-composer',
rest_mocks=[(mock.Mock, ('tenants', 'list'), EMPTY_TENANTS_LIST_SE)],
) as sc:
sc._dump_composer()
c_dir = sc._temp_dir / 'composer'
sc._composer_client.blueprints.dump.assert_called_once_with(c_dir)
sc._composer_client.configuration.dump.assert_called_once_with(c_dir)
sc._composer_client.favorites.dump.assert_called_once_with(c_dir)
def test_dump_stage_no_tenants():
with prepare_snapshot_create_with_mocks(
'test-dump-stage-no-tenants',
rest_mocks=[(mock.Mock, ('tenants', 'list'), EMPTY_TENANTS_LIST_SE)],
) as sc:
sc._dump_stage()
s_dir = sc._temp_dir / 'stage'
sc._stage_client.blueprint_layouts.dump.assert_called_once_with(s_dir)
sc._stage_client.configuration.dump.assert_called_once_with(s_dir)
sc._stage_client.page_groups.dump.assert_called_once_with(s_dir)
sc._stage_client.pages.dump.assert_called_once_with(s_dir)
sc._stage_client.templates.dump.assert_called_once_with(s_dir)
sc._stage_client.widgets.dump.assert_called_once_with(s_dir)
sc._stage_client.ua.dump.assert_not_called()
def test_dump_stage_two_tenants():
with prepare_snapshot_create_with_mocks(
'test-dump-stage-no-tenants',
rest_mocks=[(mock.Mock, ('tenants', 'list'), TWO_TENANTS_LIST_SE)],
) as sc:
sc._dump_stage()
sc._stage_client.ua.dump.assert_has_calls([
mock.call(sc._temp_dir / 'stage' / 'tenant1', tenant='tenant1'),
mock.call(sc._temp_dir / 'stage' / 'tenant2', tenant='tenant2'),
])
def test_dump_tenants():
with prepare_snapshot_create_with_mocks(
'test-dump-tenants',
rest_mocks=[
(mock.Mock, (dump_type, 'dump'), [[]])
for dump_type in ['sites', 'plugins', 'secrets_providers',
'secrets', 'blueprints',
'inter_deployment_dependencies',
'deployment_groups', 'deployment_updates',
'plugins_update', 'deployments_filters',
'blueprints_filters', 'execution_schedules',
'nodes', 'node_instances', 'agents', 'events',
'operations', 'tasks_graphs']
] + [
(mock.Mock, ('tenants', 'list'), TWO_TENANTS_LIST_SE),
(mock.Mock, ('blueprints', 'list'), TWO_BLUEPRINTS_LIST_SE),
(mock.Mock, ('deployments', 'dump'),
[[{'id': 'd1'}, {'id': 'd2'}]]),
(mock.Mock, ('deployments', 'get'),
{'workdir_zip': EMPTY_B64_ZIP}),
(mock.Mock, ('inter_deployment_dependencies', 'dump'), [[]]),
(mock.Mock, ('executions', 'dump'),
[[{'id': 'e1'}, {'id': 'e2'}]]),
(mock.Mock, ('execution_groups', 'dump'),
[[{'id': 'eg1'}, {'id': 'eg2'}]]),
],
include_logs=False
) as sc:
sc._dump_tenant('tenant1')
cli = sc._tenant_clients['tenant1']
for dump_type in ['sites', 'plugins', 'secrets_providers', 'secrets',
'blueprints', 'deployments', 'deployment_groups',
'inter_deployment_dependencies', 'executions',
'execution_groups', 'deployment_updates',
'plugins_update', 'deployments_filters',
'blueprints_filters', 'execution_schedules']:
method = getattr(sc._tenant_clients['tenant1'], dump_type).dump
method.assert_called_once_with()
for dump_type in ['nodes', 'agents']:
method = getattr(cli, dump_type).dump
method.assert_called_once_with(deployment_ids={'d1', 'd2'})
cli.node_instances.dump.assert_called_once_with(
deployment_ids={'d1', 'd2'},
get_broker_conf=sc._agents_handler.get_broker_conf
)
cli.events.dump.assert_called_once_with(
execution_ids={'e1', 'e2'},
execution_group_ids={'eg1', 'eg2'},
include_logs=False)
cli.operations.dump.assert_called_once_with(execution_ids={'e1', 'e2'})
def test_create_success():
with prepare_snapshot_create_with_mocks(
'test-create-success',
rest_mocks=[
(mock.Mock, (dump_type, 'dump'), [[]])
for dump_type in ['user_groups', 'tenants', 'users', 'permissions',
'sites', 'plugins', 'secrets_providers',
'secrets', 'blueprints', 'deployments',
'inter_deployment_dependencies',
'deployment_groups', 'deployment_updates',
'plugins_update', 'deployments_filters',
'blueprints_filters', 'execution_schedules',
'nodes', 'node_instances', 'agents', 'events',
'operations', 'tasks_graphs']
] + [
(mock.Mock, ('tenants', 'list'), TWO_TENANTS_LIST_SE),
(mock.Mock, ('blueprints', 'list'), TWO_BLUEPRINTS_LIST_SE),
(mock.Mock, ('executions', 'dump'),
[[{'id': 'e1'}, {'id': 'e2'}]]),
(mock.Mock, ('execution_groups', 'dump'),
[[{'id': 'eg1'}, {'id': 'eg2'}]]),
(mock.AsyncMock, ('auditlog', 'stream'), AuditLogResponse([])),
],
) as sc:
sc.create(timeout=0.2)
sc._tenant_clients['tenant1'].executions.dump.assert_called_once_with()
sc._tenant_clients['tenant1'].events.dump.assert_called_once_with(
execution_ids={'e1', 'e2'},
execution_group_ids={'eg1', 'eg2'},
include_logs=True)
sc._client.snapshots.update_status.assert_called_once_with(
sc._snapshot_id, status='created', error=None)
assert os.path.isfile(sc._archive_dest.with_suffix('.zip'))
def test_create_events_dump_failure():
with prepare_snapshot_create_with_mocks(
'test-create-events-dump-failure',
rest_mocks=[
(mock.Mock, (dump_type, 'dump'), [[]])
for dump_type in ['user_groups', 'tenants', 'users', 'permissions',
'sites', 'plugins', 'secrets_providers',
'secrets', 'blueprints', 'deployments',
'inter_deployment_dependencies',
'executions', 'execution_groups',
'deployment_groups', 'deployment_updates',
'plugins_update', 'deployments_filters',
'blueprints_filters', 'execution_schedules',
'nodes', 'node_instances', 'agents',
'operations']
] + [
(mock.Mock, ('tenants', 'list'), TWO_TENANTS_LIST_SE),
(mock.Mock, ('blueprints', 'list'), TWO_BLUEPRINTS_LIST_SE),
(mock.Mock, ('events', 'dump'), [BaseException('test failure')]),
(mock.AsyncMock, ('auditlog', 'stream'), AuditLogResponse([])),
],
) as sc:
with pytest.raises(BaseException):
sc.create(timeout=0.2)
sc._tenant_clients['tenant1'].deployments.dump.assert_called_once()
sc._client.snapshots.update_status.assert_called_once_with(
sc._snapshot_id, status='failed', error='test failure')
assert not os.path.exists(f'{sc._archive_dest}.zip')
def test_create_failure_removes_snapshot_zip():
with prepare_snapshot_create_with_mocks(
'test-failure-removes-snapshot-zip',
rest_mocks=[
(mock.Mock, (dump_type, 'dump'), [[]])
for dump_type in [
'user_groups', 'tenants', 'users', 'permissions', 'sites',
'plugins', 'secrets_providers', 'secrets', 'blueprints',
'deployments', 'inter_deployment_dependencies', 'executions',
'execution_groups', 'deployment_groups', 'deployment_updates',
'plugins_update', 'deployments_filters', 'blueprints_filters',
'execution_schedules', 'nodes', 'node_instances', 'agents',
'operations', 'events',
]
] + [
(mock.Mock, ('tenants', 'list'), TWO_TENANTS_LIST_SE),
(mock.Mock, ('blueprints', 'list'), TWO_BLUEPRINTS_LIST_SE),
(mock.AsyncMock, ('auditlog', 'stream'), AuditLogResponse([])),
],
) as sc:
sc._update_snapshot_status = mock.Mock(side_effect=[
Exception('error setting status to `created`'),
mock.Mock(),
])
with pytest.raises(BaseException):
sc.create(timeout=0.2)
sc._tenant_clients['tenant1'].deployments.dump.assert_called_once()
assert not os.path.exists(f'{sc._archive_dest}.zip')
def test_create_skip_events():
with prepare_snapshot_create_with_mocks(
'test-create-skip-events',
rest_mocks=[
(mock.Mock, (dump_type, 'dump'), [[]])
for dump_type in ['user_groups', 'tenants', 'users', 'permissions',
'sites', 'plugins', 'secrets_providers',
'secrets', 'blueprints', 'execution_groups',
'inter_deployment_dependencies',
'deployment_groups', 'deployment_updates',
'plugins_update', 'deployments_filters',
'blueprints_filters', 'execution_schedules',
'nodes', 'node_instances', 'agents',
'operations', 'tasks_graphs']
] + [
(mock.Mock, ('tenants', 'list'), TWO_TENANTS_LIST_SE),
(mock.Mock, ('blueprints', 'list'), TWO_BLUEPRINTS_LIST_SE),
(mock.Mock, ('deployments', 'dump'),
[[{'id': 'd1'}, {'id': 'd2'}]]),
(mock.Mock, ('deployments', 'get'),
{'workdir_zip': EMPTY_B64_ZIP}),
(mock.Mock, ('executions', 'dump'),
[[{'id': 'e1'}, {'id': 'e2'}]]),
(mock.AsyncMock, ('auditlog', 'stream'), AuditLogResponse([])),
],
include_events=False,
) as sc:
sc.create(timeout=0.2)
sc._tenant_clients['tenant1'].events.dump.assert_not_called()
def test_create_with_events():
timestamp_seconds = '2023-05-09T08:28:46'
events_dump_se = [[
{
'__entity': {
'_storage_id': 1,
'timestamp': f'{timestamp_seconds}.001Z',
'message': 'message #1',
},
'__source': 'executions',
'__source_id': 'e1'
},
{
'__entity': {
'_storage_id': 2,
'timestamp': f'{timestamp_seconds}.002Z',
'message': 'message #2',
},
'__source': 'executions',
'__source_id': 'e1'
},
{
'__entity': {
'_storage_id': 3,
'timestamp': f'{timestamp_seconds}.003Z',
'message': 'message #1',
},
'__source': 'executions',
'__source_id': 'e2'
},
]]
with prepare_snapshot_create_with_mocks(
'test-create-with-events',
rest_mocks=[
(mock.Mock, (dump_type, 'dump'), [[]])
for dump_type in ['user_groups', 'tenants', 'users', 'permissions',
'sites', 'plugins', 'secrets_providers',
'secrets', 'blueprints', 'execution_groups',
'inter_deployment_dependencies',
'deployment_groups', 'deployment_updates',
'plugins_update', 'deployments_filters',
'blueprints_filters', 'execution_schedules',
'nodes', 'node_instances', 'agents',
'operations', 'tasks_graphs']
] + [
(mock.Mock, ('tenants', 'list'), ONE_TENANTS_LIST_SE),
(mock.Mock, ('blueprints', 'list'), TWO_BLUEPRINTS_LIST_SE),
(mock.Mock, ('deployments', 'dump'),
[[{'id': 'd1'}, {'id': 'd2'}]]),
(mock.Mock, ('deployments', 'get'),
{'workdir_zip': EMPTY_B64_ZIP}),
(mock.Mock, ('executions', 'dump'),
[[{'id': 'e1'}, {'id': 'e2'}]]),
(mock.Mock, ('events', 'dump'), events_dump_se),
(mock.AsyncMock, ('auditlog', 'stream'), AuditLogResponse([])),
],
) as sc:
sc.create(timeout=0.2)
sc._tenant_clients['tenant1'].events.dump.assert_called_once_with(
execution_ids={'e1', 'e2'},
execution_group_ids=set(),
include_logs=True
)
snapshot = load_snapshot_to_dict(sc._archive_dest.with_suffix('.zip'))
e1_key = ('events', 'executions', 'e1')
e2_key = ('events', 'executions', 'e2')
e1_events = snapshot['tenants']['tenant1'][e1_key]
e2_events = snapshot['tenants']['tenant1'][e2_key]
assert e1_events['latest_timestamp'] == f'{timestamp_seconds}.003Z'
assert len(e1_events['items']) == 2
assert e2_events['latest_timestamp'] == f'{timestamp_seconds}.003Z'
assert len(e2_events['items']) == 1
def test_create_many_blueprints():
timestamp_seconds = '2023-05-09T08:28:47'
many_blueprints_dump_se = [[{
'id': f'bp{n}',
'tenant_name': 'tenant1',
'created_at': f'{timestamp_seconds}.{(n % 1000):03d}Z'
} for n in range(1002)]]
with prepare_snapshot_create_with_mocks(
'test-create-many-blueprints',
rest_mocks=[
(mock.Mock, (dump_type, 'dump'), [[]])
for dump_type in ['user_groups', 'tenants', 'users', 'permissions',
'sites', 'plugins', 'secrets_providers',
'secrets', 'deployments',
'inter_deployment_dependencies',
'deployment_groups', 'deployment_updates',
'executions', 'execution_groups',
'plugins_update', 'deployments_filters',
'blueprints_filters', 'execution_schedules',
'nodes', 'node_instances', 'agents', 'events',
'operations', 'tasks_graphs']
] + [
(mock.Mock, ('tenants', 'list'), ONE_TENANTS_LIST_SE),
(mock.Mock, ('blueprints', 'dump'), many_blueprints_dump_se),
(mock.AsyncMock, ('auditlog', 'stream'), AuditLogResponse([])),
],
) as sc:
sc.create(timeout=0.2)
sc._tenant_clients['tenant1'].blueprints.dump.assert_called_once_with()
snapshot = load_snapshot_to_dict(sc._archive_dest.with_suffix('.zip'))
blueprints = snapshot['tenants']['tenant1'][('blueprints', None, None)]
assert len(blueprints['items']) == 1002
assert blueprints['latest_timestamp'] == f'{timestamp_seconds}.999Z'
def test_create_with_plugins():
timestamp_seconds = '2023-05-09T08:28:48'
many_plugins_dump_se = [[{
'id': f'plugin{n}',
'tenant_name': 'tenant1',
'uploaded_at': f'{timestamp_seconds}.{(n % 1000):03d}Z'
} for n in range(995, 1005)]]
with prepare_snapshot_create_with_mocks(
'test-create-with-plugins',
rest_mocks=[
(mock.Mock, (dump_type, 'dump'), [[]])
for dump_type in ['user_groups', 'tenants', 'users', 'permissions',
'sites', 'blueprints', 'secrets_providers',
'secrets', 'deployments',
'inter_deployment_dependencies',
'deployment_groups', 'deployment_updates',
'executions', 'execution_groups',
'plugins_update', 'deployments_filters',
'blueprints_filters', 'execution_schedules',
'nodes', 'node_instances', 'agents', 'events',
'operations', 'tasks_graphs']
] + [
(mock.Mock, ('tenants', 'list'), ONE_TENANTS_LIST_SE),
(mock.Mock, ('plugins', 'dump'), many_plugins_dump_se),
(mock.AsyncMock, ('auditlog', 'stream'), AuditLogResponse([])),
],
) as sc:
sc.create(timeout=0.2)
sc._tenant_clients['tenant1'].plugins.dump.assert_called_once_with()
snapshot = load_snapshot_to_dict(sc._archive_dest.with_suffix('.zip'))
plugins = snapshot['tenants']['tenant1'][('plugins', None, None)]
assert plugins['latest_timestamp'] == f'{timestamp_seconds}.999Z'
assert len(plugins['items']) == 10
def test_create_with_agents():
timestamp_seconds = '2023-05-09T08:28:49'
many_agents_dump_se = [[{
'__entity': {
'id': f'agent{n}',
'tenant_name': 'tenant1',
'created_at': f'{timestamp_seconds}.{(n % 1000):03d}Z',
},
'__source_id': 'd1',
} for n in range(995, 1005)]]
with prepare_snapshot_create_with_mocks(
'test-create-with-agents',
rest_mocks=[
(mock.Mock, (dump_type, 'dump'), [[]])
for dump_type in ['user_groups', 'tenants', 'users', 'permissions',
'sites', 'blueprints', 'secrets_providers',
'secrets', 'inter_deployment_dependencies',
'deployment_groups', 'deployment_updates',
'executions', 'execution_groups',
'plugins_update', 'deployments_filters',
'blueprints_filters', 'execution_schedules',
'nodes', 'node_instances', 'events', 'plugins',
'operations', 'tasks_graphs']
] + [
(mock.Mock, ('tenants', 'list'), ONE_TENANTS_LIST_SE),
(mock.Mock, ('deployments', 'dump'),
[[{'id': 'd1'}, {'id': 'd2'}]]),
(mock.Mock, ('deployments', 'get'),
{'workdir_zip': EMPTY_B64_ZIP}),
(mock.Mock, ('agents', 'dump'), many_agents_dump_se),
(mock.AsyncMock, ('auditlog', 'stream'), AuditLogResponse([])),
],
) as sc:
sc.create(timeout=0.2)
sc._tenant_clients['tenant1'].agents.dump.assert_called_once_with(
deployment_ids={'d1', 'd2'})
snapshot = load_snapshot_to_dict(sc._archive_dest.with_suffix('.zip'))
d1_agents = snapshot['tenants']['tenant1'][('agents', None, 'd1')]
d2_agents = snapshot['tenants']['tenant1'][('agents', None, 'd2')]
assert d1_agents['latest_timestamp'] == f'{timestamp_seconds}.999Z'
assert len(d1_agents['items']) == 10
assert d2_agents == {'items': {}, 'latest_timestamp': None}
def test_create_deployment_workdir():
with prepare_snapshot_create_with_mocks(
'test-create-deployment-workdir',
rest_mocks=[
(mock.Mock, (dump_type, 'dump'), [[]])
for dump_type in ['user_groups', 'tenants', 'users', 'permissions',
'sites', 'blueprints', 'secrets_providers',
'secrets', 'inter_deployment_dependencies',
'deployment_groups', 'deployment_updates',
'executions', 'execution_groups', 'agents',
'plugins_update', 'deployments_filters',
'blueprints_filters', 'execution_schedules',
'nodes', 'node_instances', 'events', 'plugins',
'operations', 'tasks_graphs']
] + [
(mock.Mock, ('tenants', 'list'), ONE_TENANTS_LIST_SE),
(mock.Mock, ('deployments', 'dump'), [[{'id': 'd1'}]]),
(mock.Mock, ('deployments', 'get'),
{'workdir_zip': 'non-empty-workdir-content'}),
(mock.AsyncMock, ('auditlog', 'stream'), AuditLogResponse([])),
],
) as sc:
sc.create(timeout=0.2)
with zipfile.ZipFile(sc._archive_dest.with_suffix('.zip'), 'r') as zf:
d1_archive = zf.read(
'tenants/tenant1/deployments/d1.b64zip')
assert d1_archive == b'non-empty-workdir-content'
def test_create_tasks_graphs():
with prepare_snapshot_create_with_mocks(
'test-create-tasks-graphs',
rest_mocks=[
(mock.Mock, (dump_type, 'dump'), [[]])
for dump_type in ['user_groups', 'tenants', 'users', 'permissions',
'secrets_providers', 'secrets',
'sites', 'blueprints', 'deployments',
'inter_deployment_dependencies',
'deployment_groups', 'deployment_updates',
'executions', 'execution_groups', 'agents',
'plugins_update', 'deployments_filters',
'blueprints_filters', 'execution_schedules',
'nodes', 'node_instances', 'events', 'plugins',
'operations', '']
] + [
(mock.Mock, ('tenants', 'list'), ONE_TENANTS_LIST_SE),
(mock.Mock, ('executions', 'dump'), [[{'id': 'e1'}]]),
(mock.Mock, ('operations', 'dump'), [[
{
'__entity': {'id': 'op1', 'tasks_graph_id': 'tg1'},
'__source_id': 'e1',
},
{
'__entity': {'id': 'op2', 'tasks_graph_id': 'tg2'},
'__source_id': 'e1',
},
{
'__entity': {'id': 'op3', 'tasks_graph_id': 'tg1'},
'__source_id': 'e2',
},
]]),
(mock.Mock, ('tasks_graphs', 'dump'), [[
{
'__entity': {
'created_at': '2022-11-25T15:14:39.194Z',
'id': 'tg1',
'name': 'update_check_drift',
'execution_id': 'e1'
},
'__source_id': 'e1'
}
]]),
(mock.AsyncMock, ('auditlog', 'stream'), AuditLogResponse([])),
],
) as sc:
sc.create(timeout=0.2)
cli = sc._tenant_clients['tenant1']
cli.operations.dump.assert_called_once_with(execution_ids={'e1'})
cli.tasks_graphs.dump.assert_called_once_with(
execution_ids={'e1'},
operations={
'e1': [
{'id': 'op1', 'tasks_graph_id': 'tg1'},
{'id': 'op2', 'tasks_graph_id': 'tg2'}
],
'e2': [
{'id': 'op3', 'tasks_graph_id': 'tg1'}
]
}
)
snapshot = load_snapshot_to_dict(sc._archive_dest.with_suffix('.zip'))
e1_key = ('tasks_graphs', None, 'e1')
e1_tasks_graphs = snapshot['tenants']['tenant1'][e1_key]
assert len(e1_tasks_graphs['items']) == 1
|
12,815 | 8229a54455948ae88bdfdc854b5a062551077410 | # -*- coding: utf-8 -*-
# Copyright 2015 Yelp and Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for parsing and interpreting logs.
There is one module for each kind of logs:
history: high-level job history info (found in <log dir>/history/)
step: stderr of `hadoop jar` command (so named because on EMR it appears in
<log dir>/steps/)
task: stderr and syslog of individual tasks (found in <log dir>/userlogs/)
Each of these should have methods like this:
_find_*_logs(fs, log_dir_stream, ...): Find paths of all logs of the given
type.
log_dir_stream is a list of lists of log dirs. We assume that you might
have multiple ways to fetch the same logs (e.g. from S3, or by SSHing to
nodes), so once we find a list of log dirs that works, we stop searching.
This yields dictionaries with the following format:
application_id: (YARN application ID)
attempt_id: (ID of task attempt)
container_id: (YARN container ID)
job_id: (ID of job)
path: path/URI of log
task_id: (ID of task)
_ls_*_logs(ls, log_dir, ...): Implementation of _find_*_logs().
Use mrjob.logs.wrap _find_logs() to use this.
_interpret_*_logs(fs, matches, ...):
Once we know where our logs are, search them for counters and/or errors.
In most cases, we want to stop when we find the first error.
counters: group -> counter -> amount
errors: [
hadoop_error: (for errors internal to Hadoop)
error: string representation of Java stack trace
path: URI of log file containing error
start_line: first line of <path> with error (0-indexed)
num_lines: # of lines containing error
task_error: (for errors caused by one task)
error: stderr explaining error (e.g. Python traceback)
command: command that was run to cause this error
path: (see above)
start_line: (see above)
num_lines: (see above)
application_id: (YARN application ID)
attempt_id: (ID of task attempt)
container_id: (YARN container ID)
job_id: (ID of job)
task_id: (ID of task)
]
This assumes there might be several sets of logs dirs to look in (e.g.
the log dir on S3, directories on master and slave nodes, etc.).
_parse_*_log(lines):
Pull important information from a log file. This generally follows the same
format as _interpret_<type>_logs(), above.
Log lines are always strings (see mrjob.logs.wrap._cat_log()).
_parse_*_records(lines):
Helper method which parses low-level records out of a given log type.
There is one module for each kind of entity we want to deal with:
counters: manipulating and printing counters
errors: picking the best error, error reporting
ids: handles parsing IDs and sorting IDs by recency
Finally:
log4j: handles log4j record parsing (used by step and task syslog)
wrap: module for listing and catting logs in an error-free
way (since log parsing shouldn't kill a job).
"""
|
12,816 | 6c0bcb5bebb0e7167591a48683f55befad4f8bdf | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Print all solutions encoded in one file clingo output (with one answerset per line)
import sys
filename = sys.argv[1]
gridsize = int(sys.argv[2])
#print("This is the name of the script: ", sys.argv[0])
#print("Number of arguments: ", len(sys.argv))
#print("The arguments are: " , str(sys.argv))
count = 0
fr = open(filename)
for line in fr:
dic = {}
facts = line.split(' ')
for fact in facts:
if fact.startswith("cell("):
args = fact.replace('cell(','').replace('blank','.').replace(')','').replace('"','').replace(' ','.')
num, char = args.split(',')
num = int(num)
dic[num] = char.strip()
n = gridsize+2
if len(dic) == n*n :
count = count + 1
print "\nSolution " + str(count) + ":"
for i in range(n):
for j in range(n):
sys.stdout.write(dic[(i*n)+j])
sys.stdout.write(" ")
print ""
|
12,817 | ce907d89ce436c9ded3d18e38cc8f215a8e4fcca | # -*- coding: utf-8 -*-
from scrapy import Spider, Request
from Amazonspider.items import AmazonspiderItem
import re
class AmazonmacSpider(Spider):
name = 'AmazonMacSpider'
allowed_domains = ['www.amazon.com']
start_urls = ['https://www.amazon.com/s/ref=sr_pg_1?fst=p90x%3A1&rh=n%3A172282%2Ck%3Amacbook&keywords=macbook&ie=UTF8&qid=1522246332']
def parse(self, response):
# 获取商品详情页URL
urls = response.xpath('//a[@class="a-size-small a-link-normal a-text-normal"]/@href').extract()
for url in urls:
if 'customerReviews' in url:
yield Request(url, callback=self.product_page_parse)
# 获取下一商品列表页的URL, 回调index_page_parse
try:
next_page_url_part = response.xpath('//*[@id="pagnNextLink"]/@href').extract()
next_page_url = 'https://www.amazon.com' + next_page_url_part[-1]
yield Request(next_page_url, callback=self.parse)
except:
print('No More next page!')
pass
def product_page_parse(self, response):
customReview_url_part = response.xpath('//*[@id="reviews-medley-footer"]/div/a/@href').extract()
customReview_url = 'https://www.amazon.com' + customReview_url_part[-1]
yield Request(customReview_url, callback=self.customReview_parse)
def customReview_parse(self, response):
# 获取所有评论,星级,时间,有用人数
item = AmazonspiderItem()
product_name = response.xpath('//*[@id="cm_cr-product_info"]/div/div[2]/div/div/div[2]/div[1]/h1/a/text()').extract()
for info in response.xpath('//*[@id="cm_cr-review_list"]/div'):
text = info.xpath('.//div[@class="a-row review-data"]//text()').extract()
if len(text) > 0:
tmp = info.xpath('.//div[@class="a-row"]//text()').extract()
try:
useful = info.xpath('.//*[@data-hook="helpful-vote-statement"]/text()').extract()[-1]
useful_num = re.findall('\d+', useful)[-1]
except:
useful_num = 0
item['product_name'] = product_name[-1]
item['review_star'] = tmp[0]
item['review_time'] = tmp[4]
item['useful_num'] = useful_num
item['product_review'] = text[-1]
yield item
try:
next_url_part = response.xpath('.//li[@class="a-last"]/a/@href').extract()
next_url = 'https://www.amazon.com' + next_url_part[-1]
yield Request(next_url, callback=self.customReview_parse)
except:
print(next_url_part) |
12,818 | ebeeafb91d72d8b3c4ecb2120c6bbc860060a189 | nota1 = float(input('digite uma nota: '))
nota2 = float(input('digite outra nota: '))
print('a media das notas desse aluno será: \033[34m{:.1f}\033[m'.format((nota1+nota2)/2))
|
12,819 | fa715c6a2f91a5778127ba41ef2e5eb835be58bb | from __future__ import annotations
import json
import operator as op
import typing
import warnings
from collections import defaultdict
from dataclasses import dataclass
from datetime import datetime
from functools import cached_property
from typing import (
TYPE_CHECKING,
Any,
Callable,
Collection,
Dict,
Generator,
Iterator,
List,
Literal,
Optional,
Sequence,
Tuple,
Union,
cast,
overload,
)
from cognite.client import utils
from cognite.client.data_classes._base import CogniteResource, CogniteResourceList
from cognite.client.utils._auxiliary import find_duplicates, local_import
from cognite.client.utils._identifier import Identifier
from cognite.client.utils._pandas_helpers import (
concat_dataframes_with_nullable_int_cols,
notebook_display_with_fallback,
)
from cognite.client.utils._text import (
convert_all_keys_to_camel_case,
convert_all_keys_to_snake_case,
to_camel_case,
to_snake_case,
)
Aggregate = Literal[
"average",
"continuous_variance",
"count",
"discrete_variance",
"interpolation",
"max",
"min",
"step_interpolation",
"sum",
"total_variation",
]
ALL_SORTED_DP_AGGS = sorted(typing.get_args(Aggregate))
try:
import numpy as np
NUMPY_IS_AVAILABLE = True
except ImportError: # pragma no cover
NUMPY_IS_AVAILABLE = False
if TYPE_CHECKING:
import numpy.typing as npt
import pandas
from cognite.client import CogniteClient
NumpyDatetime64NSArray = npt.NDArray[np.datetime64]
NumpyInt64Array = npt.NDArray[np.int64]
NumpyFloat64Array = npt.NDArray[np.float64]
NumpyObjArray = npt.NDArray[np.object_]
@dataclass(frozen=True)
class LatestDatapointQuery:
"""Parameters describing a query for the latest datapoint from a time series.
Note:
Pass either ID or external ID.
Args:
id (Optional[int]): The internal ID of the time series to query.
external_id (Optional[str]): The external ID of the time series to query.
before (Union[None, int, str, datetime]): Get latest datapoint before this time. None means 'now'.
"""
id: Optional[int] = None
external_id: Optional[str] = None
before: Union[None, int, str, datetime] = None
def __post_init__(self) -> None:
# Ensure user have just specified one of id/xid:
Identifier.of_either(self.id, self.external_id)
class Datapoint(CogniteResource):
"""An object representing a datapoint.
Args:
timestamp (Union[int, float]): The data timestamp in milliseconds since the epoch (Jan 1, 1970). Can be negative to define a date before 1970. Minimum timestamp is 1900.01.01 00:00:00 UTC
value (Union[str, float]): The data value. Can be string or numeric
average (float): The integral average value in the aggregate period
max (float): The maximum value in the aggregate period
min (float): The minimum value in the aggregate period
count (int): The number of datapoints in the aggregate period
sum (float): The sum of the datapoints in the aggregate period
interpolation (float): The interpolated value of the series in the beginning of the aggregate
step_interpolation (float): The last value before or at the beginning of the aggregate.
continuous_variance (float): The variance of the interpolated underlying function.
discrete_variance (float): The variance of the datapoint values.
total_variation (float): The total variation of the interpolated underlying function.
"""
def __init__(
self,
timestamp: Optional[int] = None,
value: Optional[Union[str, float]] = None,
average: Optional[float] = None,
max: Optional[float] = None,
min: Optional[float] = None,
count: Optional[int] = None,
sum: Optional[float] = None,
interpolation: Optional[float] = None,
step_interpolation: Optional[float] = None,
continuous_variance: Optional[float] = None,
discrete_variance: Optional[float] = None,
total_variation: Optional[float] = None,
):
self.timestamp = timestamp
self.value = value
self.average = average
self.max = max
self.min = min
self.count = count
self.sum = sum
self.interpolation = interpolation
self.step_interpolation = step_interpolation
self.continuous_variance = continuous_variance
self.discrete_variance = discrete_variance
self.total_variation = total_variation
def to_pandas(self, camel_case: bool = False) -> pandas.DataFrame: # type: ignore[override]
"""Convert the datapoint into a pandas DataFrame.
Args:
camel_case (bool): Convert column names to camel case (e.g. `stepInterpolation` instead of `step_interpolation`)
Returns:
pandas.DataFrame
"""
pd = cast(Any, local_import("pandas"))
dumped = self.dump(camel_case=camel_case)
timestamp = dumped.pop("timestamp")
return pd.DataFrame(dumped, index=[pd.Timestamp(timestamp, unit="ms")])
class DatapointsArray(CogniteResource):
"""An object representing datapoints using numpy arrays."""
def __init__(
self,
id: Optional[int] = None,
external_id: Optional[str] = None,
is_string: Optional[bool] = None,
is_step: Optional[bool] = None,
unit: Optional[str] = None,
granularity: Optional[str] = None,
timestamp: Optional[NumpyDatetime64NSArray] = None,
value: Optional[Union[NumpyFloat64Array, NumpyObjArray]] = None,
average: Optional[NumpyFloat64Array] = None,
max: Optional[NumpyFloat64Array] = None,
min: Optional[NumpyFloat64Array] = None,
count: Optional[NumpyInt64Array] = None,
sum: Optional[NumpyFloat64Array] = None,
interpolation: Optional[NumpyFloat64Array] = None,
step_interpolation: Optional[NumpyFloat64Array] = None,
continuous_variance: Optional[NumpyFloat64Array] = None,
discrete_variance: Optional[NumpyFloat64Array] = None,
total_variation: Optional[NumpyFloat64Array] = None,
):
self.id = id
self.external_id = external_id
self.is_string = is_string
self.is_step = is_step
self.unit = unit
self.granularity = granularity
self.timestamp = timestamp if timestamp is not None else np.array([], dtype="datetime64[ns]")
self.value = value
self.average = average
self.max = max
self.min = min
self.count = count
self.sum = sum
self.interpolation = interpolation
self.step_interpolation = step_interpolation
self.continuous_variance = continuous_variance
self.discrete_variance = discrete_variance
self.total_variation = total_variation
@property
def _ts_info(self) -> Dict[str, Any]:
return {
"id": self.id,
"external_id": self.external_id,
"is_string": self.is_string,
"is_step": self.is_step,
"unit": self.unit,
"granularity": self.granularity,
}
@classmethod
def _load( # type: ignore [override]
cls,
dps_dct: Dict[str, Union[int, str, bool, npt.NDArray]],
) -> DatapointsArray:
assert isinstance(dps_dct["timestamp"], np.ndarray) # mypy love
# Since pandas always uses nanoseconds for datetime, we stick with the same
# (also future-proofs the SDK; ns is coming!):
dps_dct["timestamp"] = dps_dct["timestamp"].astype("datetime64[ms]").astype("datetime64[ns]")
return cls(**convert_all_keys_to_snake_case(dps_dct))
@classmethod
def create_from_arrays(cls, *arrays: DatapointsArray) -> DatapointsArray:
sort_by_time = sorted((a for a in arrays if len(a.timestamp) > 0), key=lambda a: a.timestamp[0])
if len(sort_by_time) == 0:
return arrays[0]
elif len(sort_by_time) == 1:
return sort_by_time[0]
first = sort_by_time[0]
arrays_by_attribute = defaultdict(list)
for array in sort_by_time:
for attr, arr in zip(*array._data_fields()):
arrays_by_attribute[attr].append(arr)
arrays_by_attribute = {attr: np.concatenate(arrs) for attr, arrs in arrays_by_attribute.items()} # type: ignore [assignment]
return cls(**first._ts_info, **arrays_by_attribute) # type: ignore [arg-type]
def __len__(self) -> int:
return len(self.timestamp)
def __eq__(self, other: Any) -> bool:
# Override CogniteResource __eq__ which checks exact type & dump being equal. We do not want
# this: comparing arrays with (mostly) floats is a very bad idea; also dump is exceedingly expensive.
return id(self) == id(other)
def __str__(self) -> str:
return json.dumps(self.dump(convert_timestamps=True), indent=4)
@overload
def __getitem__(self, item: int) -> Datapoint:
...
@overload
def __getitem__(self, item: slice) -> DatapointsArray:
...
def __getitem__(self, item: Union[int, slice]) -> Union[Datapoint, DatapointsArray]:
if isinstance(item, slice):
return self._slice(item)
attrs, arrays = self._data_fields()
return Datapoint(
timestamp=self._dtype_fix(arrays[0][item]) // 1_000_000,
**{attr: self._dtype_fix(arr[item]) for attr, arr in zip(attrs[1:], arrays[1:])},
)
def _slice(self, part: slice) -> DatapointsArray:
data: Dict[str, Any] = {attr: arr[part] for attr, arr in zip(*self._data_fields())}
return DatapointsArray(**self._ts_info, **data)
def __iter__(self) -> Iterator[Datapoint]:
# Let's not create a single Datapoint more than we have too:
attrs, arrays = self._data_fields()
yield from (
Datapoint(
timestamp=self._dtype_fix(row[0]) // 1_000_000, **dict(zip(attrs[1:], map(self._dtype_fix, row[1:])))
)
for row in zip(*arrays)
)
@cached_property
def _dtype_fix(self) -> Callable:
if self.is_string:
# Return no-op as array contains just references to vanilla python objects:
return lambda s: s
# Using .item() on numpy scalars gives us vanilla python types:
return op.methodcaller("item")
def _data_fields(self) -> Tuple[List[str], List[npt.NDArray]]:
data_field_tuples = [
(attr, arr)
for attr in ("timestamp", "value", *ALL_SORTED_DP_AGGS) # ts must be first!
if (arr := getattr(self, attr)) is not None
]
attrs, arrays = map(list, zip(*data_field_tuples))
return attrs, arrays # type: ignore [return-value]
def dump(self, camel_case: bool = False, convert_timestamps: bool = False) -> Dict[str, Any]:
"""Dump the DatapointsArray into a json serializable Python data type.
Args:
camel_case (bool): Use camelCase for attribute names. Default: False.
convert_timestamps (bool): Convert timestamps to ISO 8601 formatted strings. Default: False (returns as integer, milliseconds since epoch)
Returns:
Dict[str, Any]: A dictionary representing the instance.
"""
attrs, arrays = self._data_fields()
if not convert_timestamps: # Eh.. so.. we still have to convert...
arrays[0] = arrays[0].astype("datetime64[ms]").astype(np.int64)
else:
# Note: numpy does not have a strftime method to get the exact format we want (hence the datetime detour)
# and for some weird reason .astype(datetime) directly from dt64 returns native integer... whatwhyy
arrays[0] = arrays[0].astype("datetime64[ms]").astype(datetime).astype(str)
if camel_case:
attrs = list(map(to_camel_case, attrs))
dumped = {**self._ts_info, "datapoints": [dict(zip(attrs, map(self._dtype_fix, row))) for row in zip(*arrays)]}
if camel_case:
dumped = convert_all_keys_to_camel_case(dumped)
return {k: v for k, v in dumped.items() if v is not None}
def to_pandas( # type: ignore [override]
self,
column_names: Literal["id", "external_id"] = "external_id",
include_aggregate_name: bool = True,
include_granularity_name: bool = False,
) -> pandas.DataFrame:
"""Convert the DatapointsArray into a pandas DataFrame.
Args:
column_names (str): Which field to use as column header. Defaults to "external_id", can also be "id". For time series with no external ID, ID will be used instead.
include_aggregate_name (bool): Include aggregate in the column name
include_granularity_name (bool): Include granularity in the column name (after aggregate if present)
Returns:
pandas.DataFrame: The datapoints as a pandas DataFrame.
"""
pd = cast(Any, local_import("pandas"))
if column_names == "id":
if self.id is None:
raise ValueError("Unable to use `id` as column name(s), not set on object")
identifier = str(self.id)
elif column_names == "external_id":
if self.external_id is not None:
identifier = self.external_id
elif self.id is not None:
# Time series are not required to have an external_id unfortunately...
identifier = str(self.id)
warnings.warn(
f"Time series does not have an external ID, so its ID ({self.id}) was used instead as "
'the column name in the DataFrame. If this is expected, consider passing `column_names="id"` '
"to silence this warning.",
UserWarning,
)
else:
raise ValueError("Object missing both `id` and `external_id` attributes")
else:
raise ValueError("Argument `column_names` must be either 'external_id' or 'id'")
if self.value is not None:
return pd.DataFrame({identifier: self.value}, index=self.timestamp, copy=False)
(_, *agg_names), (_, *arrays) = self._data_fields()
columns = [
str(identifier) + include_aggregate_name * f"|{agg}" + include_granularity_name * f"|{self.granularity}"
for agg in agg_names
]
# Since columns might contain duplicates, we can't instantiate from dict as only the
# last key (array/column) would be kept:
(df := pd.DataFrame(dict(enumerate(arrays)), index=self.timestamp, copy=False)).columns = columns
return df
class Datapoints(CogniteResource):
"""An object representing a list of datapoints.
Args:
id (int): Id of the timeseries the datapoints belong to
external_id (str): External id of the timeseries the datapoints belong to
is_string (bool): Whether the time series is string valued or not.
is_step (bool): Whether the time series is a step series or not.
unit (str): The physical unit of the time series.
granularity (str): The granularity of the aggregate datapoints (does not apply to raw data)
timestamp (List[int]): The data timestamps in milliseconds since the epoch (Jan 1, 1970). Can be negative to define a date before 1970. Minimum timestamp is 1900.01.01 00:00:00 UTC
value (Union[List[str], List[float]]): The data values. Can be string or numeric
average (List[float]): The integral average values in the aggregate period
max (List[float]): The maximum values in the aggregate period
min (List[float]): The minimum values in the aggregate period
count (List[int]): The number of datapoints in the aggregate periods
sum (List[float]): The sum of the datapoints in the aggregate periods
interpolation (List[float]): The interpolated values of the series in the beginning of the aggregates
step_interpolation (List[float]): The last values before or at the beginning of the aggregates.
continuous_variance (List[float]): The variance of the interpolated underlying function.
discrete_variance (List[float]): The variance of the datapoint values.
total_variation (List[float]): The total variation of the interpolated underlying function.
"""
def __init__(
self,
id: Optional[int] = None,
external_id: Optional[str] = None,
is_string: Optional[bool] = None,
is_step: Optional[bool] = None,
unit: Optional[str] = None,
granularity: Optional[str] = None,
timestamp: Optional[Sequence[int]] = None,
value: Optional[Union[Sequence[str], Sequence[float]]] = None,
average: Optional[List[float]] = None,
max: Optional[List[float]] = None,
min: Optional[List[float]] = None,
count: Optional[List[int]] = None,
sum: Optional[List[float]] = None,
interpolation: Optional[List[float]] = None,
step_interpolation: Optional[List[float]] = None,
continuous_variance: Optional[List[float]] = None,
discrete_variance: Optional[List[float]] = None,
total_variation: Optional[List[float]] = None,
error: Optional[List[Union[None, str]]] = None,
):
self.id = id
self.external_id = external_id
self.is_string = is_string
self.is_step = is_step
self.unit = unit
self.granularity = granularity
self.timestamp = timestamp or [] # Needed in __len__
self.value = value
self.average = average
self.max = max
self.min = min
self.count = count
self.sum = sum
self.interpolation = interpolation
self.step_interpolation = step_interpolation
self.continuous_variance = continuous_variance
self.discrete_variance = discrete_variance
self.total_variation = total_variation
self.error = error
self.__datapoint_objects: Optional[List[Datapoint]] = None
def __str__(self) -> str:
item = self.dump()
item["datapoints"] = utils._time.convert_time_attributes_to_datetime(item["datapoints"])
return json.dumps(item, indent=4)
def __len__(self) -> int:
return len(self.timestamp)
def __eq__(self, other: Any) -> bool:
return (
type(self) == type(other)
and self.id == other.id
and self.external_id == other.external_id
and list(self._get_non_empty_data_fields()) == list(other._get_non_empty_data_fields())
)
@overload
def __getitem__(self, item: int) -> Datapoint:
...
@overload
def __getitem__(self, item: slice) -> Datapoints:
...
def __getitem__(self, item: Union[int, slice]) -> Union[Datapoint, Datapoints]:
if isinstance(item, slice):
return self._slice(item)
dp_args = {}
for attr, values in self._get_non_empty_data_fields():
dp_args[attr] = values[item]
return Datapoint(**dp_args)
def __iter__(self) -> Generator[Datapoint, None, None]:
yield from self.__get_datapoint_objects()
def dump(self, camel_case: bool = False) -> Dict[str, Any]:
"""Dump the datapoints into a json serializable Python data type.
Args:
camel_case (bool): Use camelCase for attribute names. Defaults to False.
Returns:
Dict[str, Any]: A dictionary representing the instance.
"""
dumped = {
"id": self.id,
"external_id": self.external_id,
"is_string": self.is_string,
"is_step": self.is_step,
"unit": self.unit,
"datapoints": [dp.dump(camel_case=camel_case) for dp in self.__get_datapoint_objects()],
}
if camel_case:
dumped = convert_all_keys_to_camel_case(dumped)
return {key: value for key, value in dumped.items() if value is not None}
def to_pandas( # type: ignore [override]
self,
column_names: str = "external_id",
include_aggregate_name: bool = True,
include_granularity_name: bool = False,
include_errors: bool = False,
) -> pandas.DataFrame:
"""Convert the datapoints into a pandas DataFrame.
Args:
column_names (str): Which field to use as column header. Defaults to "external_id", can also be "id". For time series with no external ID, ID will be used instead.
include_aggregate_name (bool): Include aggregate in the column name
include_granularity_name (bool): Include granularity in the column name (after aggregate if present)
include_errors (bool): For synthetic datapoint queries, include a column with errors.
Returns:
pandas.DataFrame: The dataframe.
"""
pd = cast(Any, local_import("pandas"))
if column_names in ["external_id", "externalId"]: # Camel case for backwards compat
identifier = self.external_id if self.external_id is not None else self.id
elif column_names == "id":
identifier = self.id
else:
raise ValueError("column_names must be 'external_id' or 'id'")
if include_errors and self.error is None:
raise ValueError("Unable to 'include_errors', only available for data from synthetic datapoint queries")
# Make sure columns (aggregates) always come in alphabetical order (e.g. "average" before "max"):
field_names, data_lists = [], []
data_fields = self._get_non_empty_data_fields(get_empty_lists=True, get_error=include_errors)
if not include_errors: # We do not touch column ordering for synthetic datapoints
data_fields = sorted(data_fields)
for attr, data in data_fields:
if attr == "timestamp":
continue
id_col_name = str(identifier)
if attr == "value":
field_names.append(id_col_name)
data_lists.append(data)
continue
if include_aggregate_name:
id_col_name += f"|{attr}"
if include_granularity_name and self.granularity is not None:
id_col_name += f"|{self.granularity}"
field_names.append(id_col_name)
if attr == "error":
data_lists.append(data)
continue # Keep string (object) column non-numeric
data = pd.to_numeric(data, errors="coerce") # Avoids object dtype for missing aggs
if attr == "count":
data_lists.append(data.astype("int64"))
else:
data_lists.append(data.astype("float64"))
idx = pd.to_datetime(self.timestamp, unit="ms")
(df := pd.DataFrame(dict(enumerate(data_lists)), index=idx)).columns = field_names
return df
@classmethod
def _load( # type: ignore [override]
cls,
dps_object: Dict[str, Any],
expected_fields: Optional[List[str]] = None,
cognite_client: Optional[CogniteClient] = None,
) -> Datapoints:
del cognite_client # just needed for signature
instance = cls(
id=dps_object.get("id"),
external_id=dps_object.get("externalId"),
is_string=dps_object["isString"],
is_step=dps_object.get("isStep"),
unit=dps_object.get("unit"),
)
expected_fields = (expected_fields or ["value"]) + ["timestamp"]
if len(dps_object["datapoints"]) == 0:
for key in expected_fields:
snake_key = to_snake_case(key)
setattr(instance, snake_key, [])
else:
for key in expected_fields:
data = [dp.get(key) for dp in dps_object["datapoints"]]
snake_key = to_snake_case(key)
setattr(instance, snake_key, data)
return instance
def _extend(self, other_dps: Datapoints) -> None:
if self.id is None and self.external_id is None:
self.id = other_dps.id
self.external_id = other_dps.external_id
self.is_string = other_dps.is_string
self.is_step = other_dps.is_step
self.unit = other_dps.unit
for attr, other_value in other_dps._get_non_empty_data_fields(get_empty_lists=True):
value = getattr(self, attr)
if not value:
setattr(self, attr, other_value)
else:
value.extend(other_value)
def _get_non_empty_data_fields(
self, get_empty_lists: bool = False, get_error: bool = True
) -> List[Tuple[str, Any]]:
non_empty_data_fields = []
skip_attrs = {"id", "external_id", "is_string", "is_step", "unit", "granularity"}
for attr, value in self.__dict__.copy().items():
if attr not in skip_attrs and attr[0] != "_" and (attr != "error" or get_error):
if value is not None or attr == "timestamp":
if len(value) > 0 or get_empty_lists or attr == "timestamp":
non_empty_data_fields.append((attr, value))
return non_empty_data_fields
def __get_datapoint_objects(self) -> List[Datapoint]:
if self.__datapoint_objects is not None:
return self.__datapoint_objects
fields = self._get_non_empty_data_fields(get_error=False)
new_dps_objects = []
for i in range(len(self)):
dp_args = {}
for attr, value in fields:
dp_args[attr] = value[i]
new_dps_objects.append(Datapoint(**dp_args))
self.__datapoint_objects = new_dps_objects
return self.__datapoint_objects
def _slice(self, slice: slice) -> Datapoints:
truncated_datapoints = Datapoints(id=self.id, external_id=self.external_id)
for attr, value in self._get_non_empty_data_fields():
setattr(truncated_datapoints, attr, value[slice])
return truncated_datapoints
def _repr_html_(self) -> str:
is_synthetic_dps = self.error is not None
return notebook_display_with_fallback(self, include_errors=is_synthetic_dps)
class DatapointsArrayList(CogniteResourceList[DatapointsArray]):
_RESOURCE = DatapointsArray
def __init__(self, resources: Collection[Any], cognite_client: Optional[CogniteClient] = None):
super().__init__(resources, cognite_client)
# Fix what happens for duplicated identifiers:
ids = [dps.id for dps in self if dps.id is not None]
xids = [dps.external_id for dps in self if dps.external_id is not None]
dupe_ids, id_dct = find_duplicates(ids), defaultdict(list)
dupe_xids, xid_dct = find_duplicates(xids), defaultdict(list)
for dps in self:
if (id_ := dps.id) is not None and id_ in dupe_ids:
id_dct[id_].append(dps)
if (xid := dps.external_id) is not None and xid in dupe_xids:
xid_dct[xid].append(dps)
self._id_to_item.update(id_dct)
self._external_id_to_item.update(xid_dct)
def concat_duplicate_ids(self) -> None:
"""
Concatenates all arrays with duplicated IDs.
Arrays with the same ids are stacked in chronological order.
**Caveat** This method is not guaranteed to preserve the order of the list.
"""
# Rebuilt list instead of removing duplicated one at a time at the cost of O(n).
self.data.clear()
# This implementation takes advantage of the ordering of the duplicated in the __init__ method
has_external_ids = set()
for ext_id, items in self._external_id_to_item.items():
if not isinstance(items, list):
self.data.append(items)
if items.id is not None:
has_external_ids.add(items.id)
continue
concatenated = DatapointsArray.create_from_arrays(*items)
self._external_id_to_item[ext_id] = concatenated
if concatenated.id is not None:
has_external_ids.add(concatenated.id)
self._id_to_item[concatenated.id] = concatenated
self.data.append(concatenated)
if not (only_ids := set(self._id_to_item) - has_external_ids):
return
for id_, items in self._id_to_item.items():
if id_ not in only_ids:
continue
if not isinstance(items, list):
self.data.append(items)
continue
concatenated = DatapointsArray.create_from_arrays(*items)
self._id_to_item[id_] = concatenated
self.data.append(concatenated)
def get( # type: ignore [override]
self,
id: Optional[int] = None,
external_id: Optional[str] = None,
) -> Union[None, DatapointsArray, List[DatapointsArray]]:
"""Get a specific DatapointsArray from this list by id or exernal_id.
Note: For duplicated time series, returns a list of DatapointsArray.
Args:
id (int): The id of the item(s) to get.
external_id (str): The external_id of the item(s) to get.
Returns:
Union[None, DatapointsArray, List[DatapointsArray]]: The requested item(s)
"""
# TODO: Question, can we type annotate without specifying the function?
return super().get(id, external_id) # type: ignore [return-value]
def __str__(self) -> str:
return json.dumps(self.dump(convert_timestamps=True), indent=4)
def to_pandas( # type: ignore [override]
self,
column_names: Literal["id", "external_id"] = "external_id",
include_aggregate_name: bool = True,
include_granularity_name: bool = False,
) -> pandas.DataFrame:
"""Convert the DatapointsArrayList into a pandas DataFrame.
Args:
column_names (str): Which field to use as column header. Defaults to "external_id", can also be "id". For time series with no external ID, ID will be used instead.
include_aggregate_name (bool): Include aggregate in the column name
include_granularity_name (bool): Include granularity in the column name (after aggregate if present)
Returns:
pandas.DataFrame: The datapoints as a pandas DataFrame.
"""
pd = cast(Any, local_import("pandas"))
dfs = [dps.to_pandas(column_names, include_aggregate_name, include_granularity_name) for dps in self]
if not dfs:
return pd.DataFrame(index=pd.to_datetime([]))
return concat_dataframes_with_nullable_int_cols(dfs)
def dump(self, camel_case: bool = False, convert_timestamps: bool = False) -> List[Dict[str, Any]]:
"""Dump the instance into a json serializable Python data type.
Args:
camel_case (bool): Use camelCase for attribute names. Default: False.
convert_timestamps (bool): Convert timestamps to ISO 8601 formatted strings. Default: False (returns as integer, milliseconds since epoch)
Returns:
List[Dict[str, Any]]: A list of dicts representing the instance.
"""
return [dps.dump(camel_case, convert_timestamps) for dps in self]
class DatapointsList(CogniteResourceList[Datapoints]):
_RESOURCE = Datapoints
def __init__(self, resources: Collection[Any], cognite_client: Optional[CogniteClient] = None):
super().__init__(resources, cognite_client)
# Fix what happens for duplicated identifiers:
ids = [dps.id for dps in self if dps.id is not None]
xids = [dps.external_id for dps in self if dps.external_id is not None]
dupe_ids, id_dct = find_duplicates(ids), defaultdict(list)
dupe_xids, xid_dct = find_duplicates(xids), defaultdict(list)
for dps in self:
if (id_ := dps.id) is not None and id_ in dupe_ids:
id_dct[id_].append(dps)
if (xid := dps.external_id) is not None and xid in dupe_xids:
xid_dct[xid].append(dps)
self._id_to_item.update(id_dct)
self._external_id_to_item.update(xid_dct)
def get( # type: ignore [override]
self,
id: Optional[int] = None,
external_id: Optional[str] = None,
) -> Union[None, Datapoints, List[Datapoints]]:
"""Get a specific Datapoints from this list by id or exernal_id.
Note: For duplicated time series, returns a list of Datapoints.
Args:
id (int): The id of the item(s) to get.
external_id (str): The external_id of the item(s) to get.
Returns:
Union[None, Datapoints, List[Datapoints]]: The requested item(s)
"""
# TODO: Question, can we type annotate without specifying the function?
return super().get(id, external_id) # type: ignore [return-value]
def __str__(self) -> str:
item = self.dump()
for i in item:
i["datapoints"] = utils._time.convert_time_attributes_to_datetime(i["datapoints"])
return json.dumps(item, default=lambda x: x.__dict__, indent=4)
def to_pandas( # type: ignore [override]
self,
column_names: Literal["id", "external_id"] = "external_id",
include_aggregate_name: bool = True,
include_granularity_name: bool = False,
) -> pandas.DataFrame:
"""Convert the datapoints list into a pandas DataFrame.
Args:
column_names (str): Which field to use as column header. Defaults to "external_id", can also be "id". For time series with no external ID, ID will be used instead.
include_aggregate_name (bool): Include aggregate in the column name
include_granularity_name (bool): Include granularity in the column name (after aggregate if present)
Returns:
pandas.DataFrame: The datapoints list as a pandas DataFrame.
"""
pd = cast(Any, local_import("pandas"))
dfs = [dps.to_pandas(column_names, include_aggregate_name, include_granularity_name) for dps in self]
if not dfs:
return pd.DataFrame(index=pd.to_datetime([]))
return concat_dataframes_with_nullable_int_cols(dfs)
|
12,820 | ad9deb761e8f37a1865aa8170373be7de8efe490 | #!/usr/bin/python3
from http.server import BaseHTTPRequestHandler as BaseHandler,HTTPServer
import threading
import time
import json
from .MInfo import *
class MServer:
def __init__(self, handle_chat, handle_dm):
pass
def chat(self):
pass
def dm(self):
pass
class TestMServer(MServer):
def __init__(self, handle_chat, handle_dm):
self.handle_chat = handle_chat
self.handle_dm = handle_dm
self.active = True
thread = threading.Thread(target=self.loop)
thread.start()
def chat(self):
text = input("Text: ")
if text[0:len(ACCESS_KW)] == ACCESS_KW:
group_id = input("Group_id: ")
sender_id = input("Sender_id: ")
command = text.split()[0][len(ACCESS_KW):]
j = input("Data (json): ")
if j == "":
data = {}
elif j[0:len('vote')] == 'vote':
votee = j.split()[1]
data = {'attachments':[{'type':'mentions','user_ids':[votee]}]}
else:
data = json.loads(j)
self.handle_chat(group_id, sender_id, command, text, data)
def dm(self):
text = input("Text: ")
if text[0:len(ACCESS_KW)] == ACCESS_KW:
sender_id = input("Sender_id: ")
command = text.split()[0][len(ACCESS_KW):]
j = input("Data (json): ")
if j == "":
data = {}
else:
data = json.loads(j)
self.handle_dm(sender_id, command, text, data)
def loop(self):
while self.active:
chat_dm = input("Chat/DM, [c]/d: ")
if chat_dm in ["", "c", "chat"]:
self.chat()
else:
self.dm()
if chat_dm == "quit":
self.active = False
time.sleep(.5) |
12,821 | 8c4f7b49f612d63f939fa96665a3c8ab18c9b38a | from django.shortcuts import render
# Create your views here.
from django.shortcuts import render, redirect
from .models import User, Address
from django.contrib.auth import authenticate, login, logout
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from django.template.loader import render_to_string
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.utils.encoding import force_bytes, force_text
from .utils import generate_token
from django.core.mail import EmailMultiAlternatives
from django.contrib.auth.decorators import login_required
from django.contrib import messages
# Create your views here.
def loginPage(request):
if not request.user.is_authenticated:
return render(request, 'auth/login.html')
else:
return redirect('/')
def logging(request):
if request.method == 'POST':
mail = request.POST.get('email', '')
user_password = request.POST.get('password', '')
user = authenticate(username=mail, password=user_password)
if user is not None:
login(request, user)
messages.success(request,'Login successful')
return redirect('/')
else:
messages.success(request, 'Incorrect credentials')
return redirect('/auth/login')
def registration(request):
if request.method == 'POST':
email = request.POST.get('email')
userCheck = User.objects.filter(email=email)
phone = request.POST.get('phone')
userPhone = User.objects.filter(phone=phone)
if userCheck or userPhone:
messages.error(request, 'User with same phone number or email already registered')
return redirect('/auth/register/')
else:
name = request.POST.get('name')
dob = request.POST.get('dob')
password = request.POST.get('password')
user_obj = User.objects.create_user(first_name=name, phone=phone, password=password,
email=email, user_name=email, dob=dob)
user_obj.save()
current_site = get_current_site(request)
email_sub = "Activate your account"
message = render_to_string('auth/activate.html',
{
'user': user_obj,
'domain': current_site.domain,
'uid': urlsafe_base64_encode(force_bytes(user_obj.pk)),
'token': generate_token.make_token(user_obj),
})
email = EmailMultiAlternatives(
subject=email_sub,
body='',
from_email=settings.EMAIL_HOST_USER,
to=[email]
)
email.attach_alternative(message, "text/html")
try:
email.send()
messages.success(request,'Activate your account to login')
return redirect('/auth/login')
except:
messages.error(request, 'Unknown error occurred')
return redirect('/auth/register')
def user_logout(request):
logout(request)
messages.success(request,'Logout')
return redirect('/auth/login')
def activateView(request, uidb64, token):
try:
uid = force_text(urlsafe_base64_decode(uidb64))
user = User.objects.get(pk=uid)
except:
user = None
if user is not None and generate_token.check_token(user, token):
user.is_active = True
user.save()
messages.success(request,'Account activated')
return redirect('/')
else:
messages.error(request,'Unknown error occurred')
return redirect('/auth/register')
@login_required
def addAddress(request):
if request.method == 'POST':
address = request.POST.get('address')
addType = request.POST.get('type')
state = request.POST.get('state')
pinCode = request.POST.get('zip')
add = Address(
user=request.user,
address=address,
type=addType,
pin=pinCode,
state=state
)
add.save()
messages.success(request,'Address added')
return redirect('/checkout')
@login_required
def account(request):
return render(request, 'auth/account.html')
|
12,822 | 8853305ab7bbd9294d46f383dec781d742fb905f | import time
import io
import cv2
from threading import Thread, Lock
from thread import start_new_thread
camera = None
current_photo = None
current_photo_lock = Lock()
def init(res,shutter_speed):
global camera
exposure_time = 1e9/shutter_speed
camera = cv2.VideoCapture(0)
camera.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, res[0])
camera.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, res[1])
camera.set(cv2.cv.CV_CAP_PROP_EXPOSURE,exposure_time)
start_new_thread(infinite_capture_photos,())
def infinite_capture_photos():
global current_photo
global current_photo_lock
while True:
current_photo_lock.acquire()
try:
s,current_photo = camera.read()
finally:
current_photo_lock.release()
time.sleep(0.03)
def capture_image(name):
if camera is None:
return
#s, img_rgb = camera.read()
current_photo_lock.acquire()
try:
img_rgb = current_photo
finally:
current_photo_lock.release()
cv2.imwrite(name,img_rgb)
def capture_image_by_time(name,time_sec):
if camera is None:
return
t0 = time.time()
while time.time() - t0 < time_sec:
time.sleep(0.001)
capture_image(name)
def capture_image_sequence_time(time_sec):
global camera
if camera is None:
return
frames = int(20*time_sec)
for i in range(0,frames):
capture_image( 'images%03d.jpg' % i)
|
12,823 | f827400ba12990e0a319480cdddf6014cdbe1312 | from enum import Enum
from typing import Any, Dict, Optional, overload
from .conditions import RuleConditionKey
from .user_restriction import DurationUnit, UserRestriction
from .util._codegen import BaseParameters
class RuleType(Enum):
...
class RuleAction(BaseParameters):
def __repr__(self): ...
def __str__(self): ...
def __eq__(self, other): ...
def __ne__(self, other): ...
def __lt__(self, other): ...
def __le__(self, other): ...
def __gt__(self, other): ...
def __ge__(self, other): ...
@overload
def __init__(self) -> None: ...
@overload
def __init__(
self,*,
parameters: Optional[BaseParameters.Parameters] = ...
) -> None: ...
_unexpected: Optional[Dict[str, Any]]
parameters: Optional[BaseParameters.Parameters]
class Restriction(RuleAction):
class Parameters(BaseParameters.Parameters):
def __repr__(self): ...
def __str__(self): ...
def __eq__(self, other): ...
def __ne__(self, other): ...
def __lt__(self, other): ...
def __le__(self, other): ...
def __gt__(self, other): ...
def __ge__(self, other): ...
def __init__(
self,*,
scope: Optional[UserRestriction.Scope] = ...,
duration_days: Optional[int] = ...,
private_comment: Optional[str] = ...
) -> None: ...
_unexpected: Optional[Dict[str, Any]]
scope: Optional[UserRestriction.Scope]
duration_days: Optional[int]
private_comment: Optional[str]
def __repr__(self): ...
def __str__(self): ...
def __eq__(self, other): ...
def __ne__(self, other): ...
def __lt__(self, other): ...
def __le__(self, other): ...
def __gt__(self, other): ...
def __ge__(self, other): ...
@overload
def __init__(
self,*,
scope: Optional[UserRestriction.Scope] = ...,
duration_days: Optional[int] = ...,
private_comment: Optional[str] = ...
) -> None: ...
@overload
def __init__(self, *, parameters: Optional[Parameters] = ...) -> None: ...
_unexpected: Optional[Dict[str, Any]]
parameters: Optional[Parameters]
class RestrictionV2(RuleAction):
class Parameters(BaseParameters.Parameters):
def __repr__(self): ...
def __str__(self): ...
def __eq__(self, other): ...
def __ne__(self, other): ...
def __lt__(self, other): ...
def __le__(self, other): ...
def __gt__(self, other): ...
def __ge__(self, other): ...
def __init__(
self,*,
scope: Optional[UserRestriction.Scope] = ...,
duration: Optional[int] = ...,
duration_unit: Optional[DurationUnit] = ...,
private_comment: Optional[str] = ...
) -> None: ...
_unexpected: Optional[Dict[str, Any]]
scope: Optional[UserRestriction.Scope]
duration: Optional[int]
duration_unit: Optional[DurationUnit]
private_comment: Optional[str]
def __repr__(self): ...
def __str__(self): ...
def __eq__(self, other): ...
def __ne__(self, other): ...
def __lt__(self, other): ...
def __le__(self, other): ...
def __gt__(self, other): ...
def __ge__(self, other): ...
@overload
def __init__(
self,*,
scope: Optional[UserRestriction.Scope] = ...,
duration: Optional[int] = ...,
duration_unit: Optional[DurationUnit] = ...,
private_comment: Optional[str] = ...
) -> None: ...
@overload
def __init__(self, *, parameters: Optional[Parameters] = ...) -> None: ...
_unexpected: Optional[Dict[str, Any]]
parameters: Optional[Parameters]
class SetSkillFromOutputField(RuleAction):
class Parameters(BaseParameters.Parameters):
def __repr__(self): ...
def __str__(self): ...
def __eq__(self, other): ...
def __ne__(self, other): ...
def __lt__(self, other): ...
def __le__(self, other): ...
def __gt__(self, other): ...
def __ge__(self, other): ...
def __init__(
self,*,
skill_id: Optional[str] = ...,
from_field: Optional[RuleConditionKey] = ...
) -> None: ...
_unexpected: Optional[Dict[str, Any]]
skill_id: Optional[str]
from_field: Optional[RuleConditionKey]
def __repr__(self): ...
def __str__(self): ...
def __eq__(self, other): ...
def __ne__(self, other): ...
def __lt__(self, other): ...
def __le__(self, other): ...
def __gt__(self, other): ...
def __ge__(self, other): ...
@overload
def __init__(
self,*,
skill_id: Optional[str] = ...,
from_field: Optional[RuleConditionKey] = ...
) -> None: ...
@overload
def __init__(self, *, parameters: Optional[Parameters] = ...) -> None: ...
_unexpected: Optional[Dict[str, Any]]
parameters: Optional[Parameters]
class ChangeOverlap(RuleAction):
class Parameters(BaseParameters.Parameters):
def __repr__(self): ...
def __str__(self): ...
def __eq__(self, other): ...
def __ne__(self, other): ...
def __lt__(self, other): ...
def __le__(self, other): ...
def __gt__(self, other): ...
def __ge__(self, other): ...
def __init__(
self,*,
delta: Optional[int] = ...,
open_pool: Optional[bool] = ...
) -> None: ...
_unexpected: Optional[Dict[str, Any]]
delta: Optional[int]
open_pool: Optional[bool]
def __repr__(self): ...
def __str__(self): ...
def __eq__(self, other): ...
def __ne__(self, other): ...
def __lt__(self, other): ...
def __le__(self, other): ...
def __gt__(self, other): ...
def __ge__(self, other): ...
@overload
def __init__(
self,*,
delta: Optional[int] = ...,
open_pool: Optional[bool] = ...
) -> None: ...
@overload
def __init__(self, *, parameters: Optional[Parameters] = ...) -> None: ...
_unexpected: Optional[Dict[str, Any]]
parameters: Optional[Parameters]
class SetSkill(RuleAction):
class Parameters(BaseParameters.Parameters):
def __repr__(self): ...
def __str__(self): ...
def __eq__(self, other): ...
def __ne__(self, other): ...
def __lt__(self, other): ...
def __le__(self, other): ...
def __gt__(self, other): ...
def __ge__(self, other): ...
def __init__(
self,*,
skill_id: Optional[str] = ...,
skill_value: Optional[int] = ...
) -> None: ...
_unexpected: Optional[Dict[str, Any]]
skill_id: Optional[str]
skill_value: Optional[int]
def __repr__(self): ...
def __str__(self): ...
def __eq__(self, other): ...
def __ne__(self, other): ...
def __lt__(self, other): ...
def __le__(self, other): ...
def __gt__(self, other): ...
def __ge__(self, other): ...
@overload
def __init__(
self,*,
skill_id: Optional[str] = ...,
skill_value: Optional[int] = ...
) -> None: ...
@overload
def __init__(self, *, parameters: Optional[Parameters] = ...) -> None: ...
_unexpected: Optional[Dict[str, Any]]
parameters: Optional[Parameters]
class RejectAllAssignments(RuleAction):
class Parameters(BaseParameters.Parameters):
def __repr__(self): ...
def __str__(self): ...
def __eq__(self, other): ...
def __ne__(self, other): ...
def __lt__(self, other): ...
def __le__(self, other): ...
def __gt__(self, other): ...
def __ge__(self, other): ...
def __init__(self, *, public_comment: Optional[str] = ...) -> None: ...
_unexpected: Optional[Dict[str, Any]]
public_comment: Optional[str]
def __repr__(self): ...
def __str__(self): ...
def __eq__(self, other): ...
def __ne__(self, other): ...
def __lt__(self, other): ...
def __le__(self, other): ...
def __gt__(self, other): ...
def __ge__(self, other): ...
@overload
def __init__(self, *, public_comment: Optional[str] = ...) -> None: ...
@overload
def __init__(self, *, parameters: Optional[Parameters] = ...) -> None: ...
_unexpected: Optional[Dict[str, Any]]
parameters: Optional[Parameters]
class ApproveAllAssignments(RuleAction):
def __repr__(self): ...
def __str__(self): ...
def __eq__(self, other): ...
def __ne__(self, other): ...
def __lt__(self, other): ...
def __le__(self, other): ...
def __gt__(self, other): ...
def __ge__(self, other): ...
@overload
def __init__(self) -> None: ...
@overload
def __init__(
self,*,
parameters: Optional[BaseParameters.Parameters] = ...
) -> None: ...
_unexpected: Optional[Dict[str, Any]]
parameters: Optional[BaseParameters.Parameters]
|
12,824 | 663da754865dc0a49ca4fa4dfc3869d094a6aaa7 | # 2017 Qualification Round - A. Oversized Pancake Flipper
# https://code.google.com/codejam/contest/3264486/dashboard#s=p0
def solve(pancakes, k):
pancakes = list(map(lambda p: p == '+', pancakes))
res = 0
for i in range(len(pancakes)-k+1):
# print(pancakes, i, pancakes[i])
if not pancakes[i]:
res += 1
for j in range(k):
pancakes[i+j] = not pancakes[i+j]
if sum(pancakes) == len(pancakes):
return res
else:
return 'IMPOSSIBLE'
#------------------------------------------------------------------------------#
file = 'sample'
with open(file+'.in') as f_in, open(file+'.out', 'w') as f_out:
input = f_in.readline
for case in range(1, int(input())+1):
pancakes, k = input().split()
result = solve(pancakes, int(k))
result_output = 'Case #%d: %s\n' % (case, result)
print(result_output)
f_out.write(result_output)
|
12,825 | e10cb90299dfe03ca46cb14330561400ea6f46b2 | import importlib
import sys
__all__ = (
'client'
)
|
12,826 | 12d88b47e948efb6a4094f07bc58c35c6886f634 | # Generated by Django 2.0.1 on 2018-01-16 00:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('talks', '0024_auto_20180114_0111'),
]
operations = [
]
|
12,827 | a4b2cf93d4d83e32f837f2e7e60eda731aa74af0 | #!/usr/bin/python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Helper script for Tiled to export maps in game format (and generate a respawn
# map).
# NOTE: You'll have to change the paths below.
# NOTE: You can add this script to Tiled's commands: path\bundle.py %mapfile
import sys
import json
import os
import subprocess
import base64
import hashlib
TILED = os.environ.get("TILED_EXECUTABLE", "d:\\bin\\Tiled\\tiled.exe")
CONVERT_BINARY = "d:\\commands\\bin\\convert.exe"
KNOWN_PATH_FIELDS = { "image" }
def export_to_json(tmx_full_name, json_full_name):
print("[ ] Exporting map: %s" % tmx_full_name)
subprocess.check_call([
TILED,
"--export-map",
"--embed-tilesets",
tmx_full_name,
json_full_name
])
print("[ + ] JSON file: %s" % json_full_name)
def bundle_file(bundle_dir, file_full_name):
with open(file_full_name, "rb") as f:
d = f.read()
random_prefix = hashlib.sha256(d).hexdigest()[:10]
final_name = "%s-%s" % (random_prefix, os.path.basename(file_full_name))
final_full_name = os.path.join(bundle_dir, final_name)
with open(final_full_name, "wb") as f:
f.write(d)
return final_name
def bundle_worker(top_level_dir, bundle_dir, d, path):
if type(d) is str:
if ('/' in d or '\\' in d) and (not path.endswith(".data")):
print("[wrr] Potential unhandled path in %s: %s" % (path, d))
return
if type(d) in { float, int, bool }:
return
if type(d) is list:
for i, e in enumerate(d):
bundle_worker(top_level_dir, bundle_dir, e, "%s[%i]" % (path, i))
return
if type(d) is dict:
for k, v in d.items():
if k in KNOWN_PATH_FIELDS and type(v) is str:
if os.path.isabs(v):
file_full_name = os.path.realpath(v)
else:
file_full_name = os.path.realpath(os.path.join(top_level_dir, v))
if not os.access(file_full_name, os.R_OK):
sys.exit("[err] Failed to bundle file %s: %s" % (path, v))
d[k] = bundle_file(bundle_dir, file_full_name)
continue
bundle_worker(top_level_dir, bundle_dir, v, "%s.%s" % (path, k))
return
print("[err] Unknown field type %s (%s) - skipping" % (path, type(d)))
def bundle(top_level_dir, bundle_dir, input_full_name, output_full_name):
print("[ ] Bundling files")
with open(input_full_name) as f:
data = json.load(f)
bundle_worker(top_level_dir, bundle_dir, data, "ROOT")
with open(output_full_name, "w") as f:
json.dump(data, f, indent=4, sort_keys=True)
print("[ + ] Final output: %s" % output_full_name)
def generate_respawn_map(input_full_name, raw_full_name, output_full_name):
COLORS = [
[0xFF, 0x00, 0x00],
[0x00, 0x00, 0x00],
[0x00, 0x80, 0x00],
[0x00, 0x00, 0x80],
[0xFF, 0x00, 0xFF],
[0xC0, 0xC0, 0xC0],
[0x80, 0x00, 0x80],
[0x00, 0x80, 0x80],
[0xFF, 0xFF, 0x00],
[0x80, 0x00, 0x00],
[0xFF, 0xFF, 0xFF],
[0x00, 0xFF, 0x00],
[0x00, 0x00, 0xFF],
[0x00, 0xFF, 0xFF],
[0x80, 0x80, 0x80],
[0x80, 0x80, 0x00],
]
print("[ ] Generating respawn map")
with open(input_full_name) as f:
data = json.load(f)
respawns = []
for layer in data["layers"]:
if layer["name"] == "metadata":
for obj in layer["objects"]:
if obj["type"] == "respawn":
respawns.append([
obj["x"], obj["y"]
])
map_width = data["width"]
map_height = data["height"]
print("[ ] Map size: %i x %i" % (map_width, map_height))
sz = map_width * map_height * 4
print("[ ] Creating %i byte bitmap" % (sz))
m = bytearray(sz)
idx = 0
for j in range(map_height):
for i in range(map_width):
min_idx = -1
min_dist_sq = 4000000000
for ridx, (rx, ry) in enumerate(respawns):
dx = rx - (i * 32)
dy = ry - (j * 32)
dist_sq = dx * dx + dy * dy
if dist_sq < min_dist_sq:
min_dist_sq = dist_sq
min_idx = ridx
color = COLORS[min_idx % len(COLORS)]
m[idx + 0] = color[0]
m[idx + 1] = color[1]
m[idx + 2] = color[2]
m[idx + 3] = 50
idx += 4
with open(raw_full_name, "wb") as f:
f.write(m)
print("[ ] Converting and resizing")
sys.stdout.flush()
try:
subprocess.check_call([
CONVERT_BINARY,
"-size", "%ix%i" % (map_width, map_height),
"-depth", "8",
"rgba:%s" % raw_full_name,
"-scale", "3200%",
output_full_name
])
except FileNotFoundError:
print("[***] Need imagick's convert to create the PNG of respawn map")
print("[ + ] Respawn map: %s" % output_full_name)
def main():
if len(sys.argv) != 2:
sys.exit("usage: bundle.py <mapfile.tmx>")
if not os.access(TILED, os.X_OK):
sys.exit("error: set TILED_EXECUTABLE env variable to tiled.exe path")
tmx_full_name = sys.argv[1]
tmx_name = os.path.basename(tmx_full_name)
tmx_base_name, _ = os.path.splitext(tmx_name)
tmx_path = os.path.dirname(tmx_full_name)
json_full_name = os.path.join(tmx_path, "%s-tmp.json" % tmx_base_name)
bundle_dir = os.path.realpath(os.path.join(
tmx_path, "bundle-%s" % tmx_base_name))
json_final_full_name = os.path.join(bundle_dir, "%s.json" % tmx_base_name)
os.makedirs(bundle_dir, exist_ok=True)
export_to_json(tmx_full_name, json_full_name)
bundle(tmx_path, bundle_dir, json_full_name, json_final_full_name)
raw_full_name = os.path.join(tmx_path, "%s-respawn-map.raw" % tmx_base_name)
png_full_name = os.path.join(tmx_path, "%s-respawn-map.png" % tmx_base_name)
generate_respawn_map(json_full_name, raw_full_name, png_full_name)
if __name__ == "__main__":
main()
|
12,828 | 5306ba5840d1e3965481932185e099939ffdc17c | import numpy as np
import DataProcessingTools as DPT
from .rpllfp import RPLLFP
from .rplhighpass import RPLHighPass
from .rplraw import RPLRaw
from .helperfunctions import computeFFT
import matplotlib.pyplot as plt
import os
from .misc import getChannelInArray
class FreqSpectrum(DPT.DPObject):
filename = "freqspectrum.hkl"
argsList = [('loadHighPass', False), ('loadRaw', False), ('pointsPerWindow', 2000)]
level = 'channel'
def __init__(self, *args, **kwargs):
DPT.DPObject.__init__(self, *args, **kwargs)
def create(self, *args, **kwargs):
pwd = os.path.normpath(os.getcwd());
self.freq = []
self.magnitude = []
self.numSets = 0
if self.args['loadHighPass']:
rpdata = RPLHighPass()
elif self.args['loadRaw']:
rpdata = RPLRaw()
else:
rpdata = RPLLFP()
dlength = len(rpdata.data)
ppw = self.args['pointsPerWindow']
if dlength > 0:
DPT.DPObject.create(self, *args, **kwargs)
# pad rpdata to make sure we can reshape into desired length
rpdata1 = np.pad(rpdata.data,(0,int(np.ceil(dlength/ppw)*ppw)-dlength))
# reshape so data is in columns
rpdata2 = np.reshape(rpdata1,(ppw,-1), order='F')
# compute the mean of each column so we can demean the data
rp2mean = rpdata2.mean(axis=0)
# subtract the mean so the DC value will be 0
rpdata3 = rpdata2 - rp2mean[np.newaxis, :]
magnitude, freq = computeFFT(rpdata3, rpdata.analogInfo['SampleRate'])
self.freq = [freq]
# take the mean of the magnitude across windows
self.magnitude = [magnitude.mean(axis=1)]
# take the stderr of the magnitude across windows
self.magstderr = [magnitude.std(axis=1) / np.sqrt(np.size(rpdata2,1))]
self.numSets = 1
# self.title = [DPT.levels.get_shortname("channel", pwd)[-3:]]
# get array name
aname = DPT.levels.normpath(os.path.dirname(pwd))
# store array name so we can do the array plot more easily
self.array_dict = dict()
self.array_dict[aname] = 0
# this is used to compute the right index when switching between plot types
self.current_plot_type = None
else:
DPT.DPObject.create(self, dirs=[], *args, **kwargs)
return
def append(self, fs):
DPT.DPObject.append(self, fs)
self.magnitude += fs.magnitude
self.magstderr += fs.magstderr
self.freq += fs.freq
# self.title += fs.title
# loop through array dictionary in fs
for ar in fs.array_dict:
self.array_dict[ar] = self.numSets
self.numSets += 1
def plot(self, i = None, ax = None, getNumEvents = False, getLevels = False, getPlotOpts = False, overlay = False, **kwargs):
plotOpts = {'PlotType': DPT.objects.ExclusiveOptions(['Channel', 'Array'], 0), 'LabelsOff': False, 'TitleOff': False, 'TicksOff': False, 'XLims': []}
# update the plotOpts based on kwargs, these two lines are important to
# receive the input arguments and act accordingly
for (k, v) in plotOpts.items():
plotOpts[k] = kwargs.get(k, v)
if getPlotOpts:
return plotOpts
plot_type = plotOpts["PlotType"].selected()
if self.current_plot_type is None: # initial assignement of self.current_plot_type
self.current_plot_type = plot_type
if getNumEvents:
if self.current_plot_type == plot_type: # no changes to plot_type
if plot_type == 'Channel':
return self.numSets, i
elif plot_type == 'Array':
return len(self.array_dict), i
elif self.current_plot_type == 'Array' and plot_type == 'Channel': # change from array to channel
if i == 0:
return self.numSets, 0
else:
# get values in array_dict
advals = np.array([*self.array_dict.values()])
return self.numSets, advals[i-1]+1
elif self.current_plot_type == 'Channel' and plot_type == 'Array': # change from channel to array
# get values in array_dict
advals = np.array([*self.array_dict.values()])
# find index that is larger than i
vi = (advals >= i).nonzero()
return len(self.array_dict), vi[0][0]
if ax is None:
ax = plt.gca()
if not overlay:
ax.clear()
if plot_type == 'Channel':
if self.current_plot_type == 'Array':
fig = ax.figure # get the parent figure of the ax
for x in fig.get_axes(): # remove all axes in current figure
x.remove()
ax = fig.add_subplot(1,1,1)
# plot the mountainsort data according to the current index 'i'
self.plot_data(i, ax, plotOpts, 1)
self.current_plot_type = 'Channel'
elif plot_type == 'Array': # plot in channel level
fig = ax.figure # get the parent figure of the ax
for x in fig.get_axes(): # remove all axes in current figure
x.remove()
# get values in array_dict
advals = np.array([*self.array_dict.values()])
# get first channel, which will be the last index in the previous array plus 1
if i == 0:
cstart = 0
cend = advals[0]
else:
cstart = advals[i-1] + 1
cend = advals[i]
currch = cstart
plotOpts['LabelsOff'] = True
plotOpts['TitleOff'] = True
# plotOpts['TicksOff'] = True
while currch <= cend :
# get channel name
currchname = self.dirs[currch]
# get axis position for channel
ax,isCorner = getChannelInArray(currchname, fig)
self.plot_data(currch, ax, plotOpts, isCorner)
currch += 1
self.current_plot_type = 'Array'
def plot_data(self, i, ax, plotOpts, isCorner):
y = self.magnitude[i]
x = self.freq[i]
e = self.magstderr[i]
ax.plot(x, y)
# show the stderr by adding a shaded area around the y values
ax.fill_between(x, y-e, y+e, alpha=0.5)
ax.ticklabel_format(axis='both', style='sci', scilimits=(0,3))
if (not plotOpts['TitleOff']):
ax.set_title(self.dirs[i])
if (not plotOpts['LabelsOff']) or isCorner:
ax.set_xlabel('Freq')
ax.set_ylabel('Magnitude')
if plotOpts['TicksOff'] or (not isCorner):
ax.set_xticklabels([])
ax.set_yticklabels([])
if len(plotOpts['XLims']) > 0:
ax.set_xlim(plotOpts['XLims'])
else:
if self.args['loadHighPass']:
ax.set_xlim([500, 7500])
elif self.args['loadRaw']:
ax.set_xlim([0, 10000])
else:
ax.set_xlim([0, 150])
|
12,829 | 370aafa4477562bdf91947a72f703c833360f1cd | # In your team's parameter file where you have values such as 1.96E-6. The E-6 refers to (10 ** -6)
# So you will need to change the -6 to reflect a difference in your paramters
# ----------------------------------------------------
# ENTER GROUP PARAMETERS HERE
from pip._vendor.distlib.compat import raw_input
InputBudget = 1 # input budget
HDPrice = [1, 1, 1] # need to put in corresponding HD prices in the order of: HD, SSD then XSSSD
pmin = 1.96 * (10 ** -6) # enter value
r = 3.8 * (10 ** -5) # enter value
Smin = 1.1 * (10 ** -4) # enter value
d = 0.39 # enter value
# YOU WON'T HAVE TO CHANGE ANYTHING BELOW THIS LINE
# ----------------------------------------------------
RAMSize = [4, 8, 16]
RAMPrice = [4 * 15, 8 * 15, 16 * 15]
HDName = ['HD', 'SSD', 'XSSSD']
HDSpeed = [120, 300, 420]
tmp = input("Enter budget")
InputBudget = int(tmp)
tmp1 = input("Enter HD Price")
tmp2 = input("Enter SSD Price")
tmp3 = input("Enter XSSD Price")
HDPrice[0] = int(tmp1)
HDPrice[1] = int(tmp2)
HDPrice[2] = int(tmp3)
tmpr = raw_input("Type your r")
r = float(tmpr)
tmppmin = raw_input("Type your pmin")
pmin = float(tmppmin)
tmpsmin = raw_input("Type your smin")
Smin = float(tmpsmin)
tmpd = raw_input("Type your d")
d = float(tmpd)
print("---Question 1---\n")
for ramprice in RAMPrice:
placeholder = 0
for hdprice in HDPrice:
Budget = InputBudget
Budget = Budget - hdprice - ramprice
if Budget >= 0:
print("RAM size " + str(ramprice / 15) + "GiB and HD is " + str(
HDName[placeholder]) + " and total cost is: £" + str((hdprice + ramprice)))
placeholder = placeholder + 1
print("\n")
print("---Question 2---\n")
for ramprice in RAMPrice:
placeholder = 0
for hdprice in HDPrice:
Budget = InputBudget
Budget = Budget - hdprice - ramprice
if Budget >= 0:
print("RAM size " + str(ramprice / 15) + "GiB and HD is " + str(
HDName[placeholder]) + " and total cost is: £" + str((hdprice + ramprice)))
p = float(pmin + r / (ramprice / 15))
s = float(Smin + d / (HDSpeed[placeholder]))
# print(p)
# print(s)
# print((1-p)*200 + (p*s))
print("Effective Access Time is: " + str(format(float((200 * (10 ** -9)) + (p * s)), "10.2E") + " seconds"))
print("")
placeholder = placeholder + 1
print("---Question 3---\n")
# print("I hope this is right, no promises")
outStrings = []
outNumbers = []
for ramprice in RAMPrice:
placeholder = 0
for hdprice in HDPrice:
Budget = InputBudget
Budget = Budget - hdprice - ramprice
if Budget >= 0:
outString = "RAM size " + str(ramprice / 15) + "GiB and HD is " + str(HDName[placeholder])
outStrings.append(outString)
# print(outString)
p = pmin + r / (ramprice / 15)
s = Smin + d / (HDSpeed[placeholder])
EAT = (200 * (10 ** -9)) + (p * s)
BAT = 200 * (10 ** -9)
outNumber = float((EAT - BAT) / BAT)
# print(outNumber)
outNumbers.append(outNumber)
# print(str(outNumber) + "%")
# print("")
placeholder = placeholder + 1
# print(outStrings)
# print(outNumbers)
minIndex = outNumbers.index(min(outNumbers))
maxIndex = outNumbers.index(max(outNumbers))
# print(minIndex)
# print(maxIndex)
print("Lowest degradation is for: " + outStrings[minIndex] + " with degradation of" + format(outNumbers[minIndex],
"10.2E") + "%")
print("Highest degradation is for: " + outStrings[maxIndex] + " with degradation of" + format(outNumbers[maxIndex],
"10.2E") + "%") |
12,830 | e8bea8d2ea58fb54048d633c974c3c64c339803a | from point2D import Point2D
class Point3D(Point2D):
_z:int
def __init__(self,x,y,z):
super().__init__(x,y)
self._z = z
def __getZ__(self) -> int:
return(self._z)
def __setZ__(self,z):
self._z=z
|
12,831 | f9102d02ac438551c107216f873ca165c2886482 | import django.forms
from django.shortcuts import render
from django.db import models
from django.forms import HiddenInput
from django.http import HttpResponseRedirect
from django.urls import reverse
from .models import Message, Agent, Quiz, Comment
from .forms import AgentForm, MessageForm, QuizForm, CommentForm
# Create your views here.
def home_page_view(request):
context = {
'bodyClass': "home-grid-container",
'bodyName': "home-body",
'headerClass': "header",
'mainName': "gallery-flex-container",
'home': True,
}
return render(request, 'website/index.html', context)
def about_page_view(request):
context = {
'bodyClass': "subpage-grid-container",
'bodyName': "about-subpage",
'headerClass': "",
'mainName': "about-main",
}
return render(request, 'website/about.html', context)
def contacts_page_view(request):
agentForm = AgentForm(request.POST or None)
messageForm = MessageForm(request.POST or None)
messageForm.fields['agentID'].widget = HiddenInput()
tempAgent = Agent()
dbAgentFound = False
if agentForm.is_valid():
agents = Agent.objects.all()
for agent in agents:
if agent.email == agentForm.data['email']:
tempAgent = Agent.objects.get(pk=agent.id)
dbAgentFound = True
break
if not dbAgentFound:
task = agentForm.save()
tempAgent = Agent.objects.get(pk=task.id)
tempData = messageForm.data.copy()
tempData['agentID'] = tempAgent
messageForm.data = tempData
if (messageForm.is_valid()):
messageForm.save()
return HttpResponseRedirect(reverse('website:home'))
context = {
'bodyClass': "subpage-grid-container",
'bodyName': "contacts-subpage",
'headerClass': "",
'mainName': "contacts-centered",
'agentForm': agentForm,
'messageForm': messageForm,
'agentFound': dbAgentFound,
}
return render(request, 'website/contacts.html', context)
def projects_page_view(request):
context = {
'bodyClass': "subpage-grid-container",
'bodyName': "projects-subpage",
'headerClass': "",
'mainName': "project-container",
}
return render(request, 'website/projects.html', context)
def techniques_page_view(request):
context = {
'bodyClass': "subpage-grid-container",
'bodyName': "techniques-subpage",
'headerClass': "",
'mainName': "techniques-main",
}
return render(request, 'website/techniques.html', context)
def bruhh_page_view(request):
context = {
'bodyClass': "subpage-grid-container",
'bodyName': "showcase-subpage",
'headerClass': "",
'mainName': "project-main",
}
return render(request, 'website/bruhh.html', context)
def agents_page_view(request):
if request.user.is_authenticated:
agents = Agent.objects.all()
context = {
'headerText': "Contacts Established",
'agents': agents
}
return render(request, 'website/agents.html', context)
else:
return HttpResponseRedirect(reverse('users:home'))
def agent_messages_page_view(request, agent_id):
if request.user.is_authenticated:
agent = Agent.objects.get(pk=agent_id)
messages = Message.objects.all()
context = {
'headerText': "Contact Messages",
'agent': agent,
'messages': messages,
}
return render(request, 'website/agent-messages.html', context)
else:
return HttpResponseRedirect(reverse('users:home'))
def agent_edit_page_view(request, agent_id):
if request.user.is_authenticated:
agent = Agent.objects.get(pk=agent_id)
form = AgentForm(request.POST or None, instance=agent)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('website:agents'))
context = {
'headerText': "Edit Contact",
'agent': agent,
'agent_id': agent_id,
'form': form,
}
return render(request, 'website/agent-edit.html', context)
else:
return HttpResponseRedirect(reverse('users:home'))
def agent_add_page_view(request):
if request.user.is_authenticated:
agentForm = AgentForm(request.POST or None)
if agentForm.is_valid():
agentForm.save()
return HttpResponseRedirect(reverse('website:agents'))
context = {
'headerText': "Add Contact",
'agentForm': agentForm,
}
return render(request, 'website/agent-add.html', context)
else:
return HttpResponseRedirect(reverse('users:home'))
def message_add_page_view(request):
if request.user.is_authenticated:
messageForm = MessageForm(request.POST or None)
if messageForm.is_valid():
messageForm.save()
return HttpResponseRedirect(reverse('website:agents'))
context = {
'headerText': "Add Message",
'messageForm': messageForm,
}
return render(request, 'website/message-add.html', context)
else:
return HttpResponseRedirect(reverse('users:home'))
def agent_delete_page_view(request, agent_id):
if request.user.is_authenticated:
Agent.objects.get(pk=agent_id).delete()
return HttpResponseRedirect(reverse('website:agents'))
else:
return HttpResponseRedirect(reverse('users:home'))
def message_delete_page_view(request, message_id):
if request.user.is_authenticated:
Message.objects.get(pk=message_id).delete()
return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))
else:
return HttpResponseRedirect(reverse('users:home'))
def previous_page_view(request):
return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))
def quiz_page_view(request):
context = {
'headerText': "Quiz",
'quizzes': Quiz.objects.all().order_by("correctAnswers").reverse()
}
return render(request, 'website/quiz.html', context)
def quiz_scores_page_view(request):
context = {
'headerText': "Quiz Scores",
'quizzes': Quiz.objects.all().order_by("correctAnswers").reverse()
}
return render(request, 'website/quiz-scores.html', context)
def do_quiz_page_view(request):
form = QuizForm(request.POST or None)
if form.is_valid():
current_form = form.save(commit=False)
if current_form.most_popular_family_games_developer == "NINTENDO":
current_form.correctAnswers += 1
if current_form.assassins_Creed_franchise_developer == "UBISOFT":
current_form.correctAnswers += 1
if current_form.pokemon_Yellow_release_year == "1998":
current_form.correctAnswers += 1
if current_form.pokemon_Yellow_developer == "GAME FREAK":
current_form.correctAnswers += 1
if current_form.most_sold_game_of_all_time == "MINECRAFT":
current_form.correctAnswers += 1
if current_form.most_sold_console_of_all_time == "PLAYSTATION 2":
current_form.correctAnswers += 1
if current_form.first_commercial_game == "PONG":
current_form.correctAnswers += 1
if current_form.most_sold_game_on_PlayStation_1 == "GRAN TURISMO":
current_form.correctAnswers += 1
if current_form.most_popular_arcade_game == "SPACE INVADERS":
current_form.correctAnswers += 1
if current_form.name_of_the_first_main_character_from_Minecraft == "STEVE":
current_form.correctAnswers += 1
current_form.save()
return HttpResponseRedirect(reverse('website:quiz'))
context = {
'headerText': "Quiz",
'quizForm': form
}
return render(request, 'website/do-quiz.html', context)
def comments_page_view(request):
comments = Comment.objects.all()
context = {
'headerText': "Comments",
'comments': comments,
}
return render(request, 'website/comments.html', context)
def comment_add_page_view(request):
commentForm = CommentForm(request.POST or None)
if (commentForm.is_valid()):
commentForm.save()
return HttpResponseRedirect(reverse('website:comments'))
context = {
'headerText': "Add comment",
'commentForm': commentForm,
}
return render(request, 'website/comment-add.html', context)
|
12,832 | 942fef326fed51cdc7635076c45f07af02b8b7f2 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import re
import nltk
from sklearn.datasets import load_files
# nltk.download('stopwords')
import pickle
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
# In[2]:
movie_data = load_files("txt_sentoken/")
X, y = movie_data.data, movie_data.target
# In[3]:
print(X[0])
# In[4]:
print(y,len(y))
# In[5]:
documents = []
stemmer = WordNetLemmatizer()
for sen in range(0, len(X)):
# Remove all the special characters
document = re.sub(r'\W', ' ', str(X[sen]))
# remove all single characters
document = re.sub(r'\s+[a-zA-Z]\s+', ' ', document)
# Remove single characters from the start
document = re.sub(r'\^[a-zA-Z]\s+', ' ', document)
# Substituting multiple spaces with single space
document = re.sub(r'\s+', ' ', document, flags=re.I)
# Removing prefixed 'b'
document = re.sub(r'^b\s+', '', document)
# Converting to Lowercase
document = document.lower()
# Lemmatization
document = document.split()
document = [stemmer.lemmatize(word) for word in document]
document = ' '.join(document)
documents.append(document)
# In[6]:
vectorizer = CountVectorizer(max_features=1500, min_df=5, max_df=0.7, stop_words=stopwords.words('english'))
X = vectorizer.fit_transform(documents).toarray()
tfidfconverter = TfidfTransformer()
X = tfidfconverter.fit_transform(X).toarray()
# OR the below script can be used instead
# from sklearn.feature_extraction.text import TfidfVectorizer
# tfidfconverter = TfidfVectorizer(max_features=1500, min_df=5, max_df=0.7, stop_words=stopwords.words('english'))
# X = tfidfconverter.fit_transform(documents).toarray()
# In[7]:
print(X[0],len(X))
# In[8]:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
classifier = RandomForestClassifier(n_estimators=1000, random_state=0)
classifier.fit(X_train, y_train)
# In[9]:
y_pred = classifier.predict(X_test)
# In[10]:
print(confusion_matrix(y_test,y_pred))
print(classification_report(y_test,y_pred))
print(accuracy_score(y_test, y_pred))
|
12,833 | ac3f9f6cd21790993da3193abad9cd8d67644392 | def re2sqrt(s):
j = s.find("^(1/2)")
while j != -1:
j -= 1
while '0'<=s[j]<='9':
j=j-1
if (j+1) < 0:
s = 'sqrt('+s
else:
s = s[:j+1]+'sqrt('+s[j+1:]
s = s.replace("^(1/2)",")",1)
j = s.find("^(1/2)")
return s |
12,834 | 09529f2290d3434bea5e41beac7d74d42a5b41c4 | #coding:UTF8
#字典排序
#统计目录下占用空间最大的前10个文件
import os
import sys
import operator
def get_dic(topdir):
dic = {}
a = os.walk(topdir)
for p,d,f in a :
for i in f:
fn = os.path.join(p,i)
f_size = os.path.getsize(fn)
dic[fn]=f_size
return dic
if __name__ =='__main__':
dic = get_dic(sys.argv[1])
sorted_dic = sorted(dic.iteritems(),key=operator.itemgetter(1),reverse=True)
for k,v in sorted_dic[:10]:
print k,v
|
12,835 | 7957fbe702b87873d0bb00dc4637f1c881f3c081 | """
146. LRU Cache
Medium
Design a data structure that follows the constraints of a Least Recently Used (LRU) cache.
Implement the LRUCache class:
LRUCache(int capacity) Initialize the LRU cache with positive size capacity.
int get(int key) Return the value of the key if the key exists, otherwise return -1.
void put(int key, int value) Update the value of the key if the key exists. Otherwise, add the key-value pair to the cache. If the number of keys exceeds the capacity from this operation, evict the least recently used key.
The functions get and put must each run in O(1) average time complexity.
Example 1:
Input
["LRUCache", "put", "put", "get", "put", "get", "put", "get", "get", "get"]
[[2], [1, 1], [2, 2], [1], [3, 3], [2], [4, 4], [1], [3], [4]]
Output
[null, null, null, 1, null, -1, null, -1, 3, 4]
Explanation
LRUCache lRUCache = new LRUCache(2);
lRUCache.put(1, 1); // cache is {1=1}
lRUCache.put(2, 2); // cache is {1=1, 2=2}
lRUCache.get(1); // return 1
lRUCache.put(3, 3); // LRU key was 2, evicts key 2, cache is {1=1, 3=3}
lRUCache.get(2); // returns -1 (not found)
lRUCache.put(4, 4); // LRU key was 1, evicts key 1, cache is {4=4, 3=3}
lRUCache.get(1); // return -1 (not found)
lRUCache.get(3); // return 3
lRUCache.get(4); // return 4
Constraints:
1 <= capacity <= 3000
0 <= key <= 104
0 <= value <= 105
At most 2 * 105 calls will be made to get and put.
"""
# V0
# V1
# https://blog.csdn.net/fuxuemingzhu/article/details/100800072
class ListNode:
def __init__(self, key, value):
self.key = key
self.value = value
self.prev = self
self.next = self
class LRUCache:
def __init__(self, capacity):
self.dic = dict()
self.capacity = capacity
self.size = 0
self.root = ListNode(0, 0)
def get(self, key):
if key in self.dic:
node = self.dic[key]
self.removeFromList(node)
self.insertIntoHead(node)
return node.value
else:
return -1
def put(self, key, value):
if key in self.dic:
node = self.dic[key]
self.removeFromList(node)
self.insertIntoHead(node)
node.value = value
else:
if self.size >= self.capacity:
self.removeFromTail()
self.size -= 1
node = ListNode(key, value)
self.insertIntoHead(node)
self.dic[key] = node
self.size += 1
def removeFromList(self, node):
if node == self.root: return
prev_node = node.prev
next_node = node.next
prev_node.next = next_node
next_node.prev = prev_node
node.prev = node.next = None
def insertIntoHead(self, node):
head_node = self.root.next
head_node.prev = node
node.prev = self.root
self.root.next = node
node.next = head_node
def removeFromTail(self):
if self.size == 0: return
tail_node = self.root.prev
del self.dic[tail_node.key]
self.removeFromList(tail_node)
# V1'
# https://blog.csdn.net/laughing2333/article/details/70231547
class LRUCache(object):
def __init__(self, capacity):
self.capacity = capacity
self._cache = []
self._cache_look_up = {}
def get(self, key):
if key not in self._cache_look_up:
return -1
self._cache.remove(key)
self._cache.append(key)
return self._cache_look_up[key]
def put(self, key, value):
if key in self._cache_look_up:
self._cache_look_up[key] = value
self._cache.remove(key)
self._cache.append(key)
return
else:
if len(self._cache) == self.capacity:
del_key = self._cache[0]
self._cache = self._cache[1:]
del self._cache_look_up[del_key]
self._cache.append(key)
self._cache_look_up[key] = value
# V2
|
12,836 | 8a2cdff66b39835b752ac1f32623bebb9893449e | #
# Python GUI - Colors - Gtk
#
from gtk import gdk
from GUI import export
from GUI.GColors import Color as GColor
class Color(GColor):
_alpha = 1.0
def _from_gdk_color(cls, _gdk_color):
c = cls.__new__(cls)
c._gdk_color = _gdk_color
return c
_from_gdk_color = classmethod(_from_gdk_color)
def __init__(self, red, green, blue, alpha = 1.0):
self._rgba = (red, green, blue, alpha)
gdk_color = gdk.Color()
gdk_color.red = int(red * 65535)
gdk_color.green = int(green * 65535)
gdk_color.blue = int(blue * 65535)
self._gdk_color = gdk_color
self._alpha = alpha
def get_red(self):
return self._gdk_color.red / 65535.0
def get_green(self):
return self._gdk_color.green / 65535.0
def get_blue(self):
return self._gdk_color.blue / 65535.0
def get_alpha(self):
return self._alpha
export(Color)
|
12,837 | 0a1c816109520270ad0c0f9588c5927c64f68ee8 | #!/usr/bin/env python2
#
# git-deps - automatically detect dependencies between git commits
# Copyright (C) 2013 Adam Spiers <git@adamspiers.org>
#
# The software in this repository is free software: you can redistribute
# it and/or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation, either version 2 of the
# License, or (at your option) any later version.
#
# This software is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import argparse
import json
import logging
import os
import re
import sys
import subprocess
import types
def abort(msg, exitcode=1):
print(msg, file=sys.stderr)
sys.exit(exitcode)
try:
import pygit2
except ImportError:
msg = "pygit2 not installed; aborting."
install_guide = None
import platform
if platform.system() == 'Linux':
distro, version, d_id = platform.linux_distribution()
distro = distro.strip() # why are there trailing spaces??
if distro == 'openSUSE':
install_guide = \
"You should be able to install it with something like:\n\n" \
" sudo zypper install python-pygit2"
elif distro == 'debian':
install_guide = \
"You should be able to install it with something like:\n\n" \
" sudo apt-get install python-pygit2"
if install_guide is None:
msg += "\n\nIf you figure out a way to install it on your platform,\n" \
"please submit a new issue with the details at:\n\n" \
" https://github.com/aspiers/git-config/issues/new\n\n" \
"so that it can be documented to help other users."
else:
msg += "\n\n" + install_guide
abort(msg)
class DependencyListener(object):
"""Class for listening to result events generated by
DependencyDetector. Add an instance of this class to a
DependencyDetector instance via DependencyDetector.add_listener().
"""
def __init__(self, options):
self.options = options
def set_detector(self, detector):
self.detector = detector
def repo(self):
return self.detector.repo
def new_commit(self, commit):
pass
def new_dependent(self, dependent):
pass
def new_dependency(self, dependent, dependency, path, line_num):
pass
def new_path(self, dependent, dependency, path, line_num):
pass
def new_line(self, dependent, dependency, path, line_num):
pass
def dependent_done(self, dependent, dependencies):
pass
def all_done(self):
pass
class CLIDependencyListener(DependencyListener):
"""Dependency listener for use when running in CLI mode.
This allows us to output dependencies as they are discovered,
rather than waiting for all dependencies to be discovered before
outputting anything; the latter approach can make the user wait
too long for useful output if recursion is enabled.
"""
def __init__(self, options):
super(CLIDependencyListener, self).__init__(options)
# Count each mention of each revision, so we can avoid duplicating
# commits in the output.
self._revs = {}
def new_commit(self, commit):
rev = commit.hex
if rev not in self._revs:
self._revs[rev] = 0
self._revs[rev] += 1
def new_dependency(self, dependent, dependency, path, line_num):
dependent_sha1 = dependent.hex
dependency_sha1 = dependency.hex
if self.options.multi:
if self.options.log:
print("%s depends on:" % dependent_sha1)
else:
print("%s %s" % (dependent_sha1, dependency_sha1))
else:
if not self.options.log and self._revs[dependency_sha1] <= 1:
print(dependency_sha1)
if self.options.log and self._revs[dependency_sha1] <= 1:
cmd = [
'git',
'--no-pager',
'-c', 'color.ui=always',
'log', '-n1',
dependency_sha1
]
print(subprocess.check_output(cmd))
# dependency = detector.get_commit(dependency_sha1)
# print(dependency.message + "\n")
# for path in self.dependencies[dependency]:
# print(" %s" % path)
# keys = sorted(self.dependencies[dependency][path].keys()
# print(" %s" % ", ".join(keys)))
class JSONDependencyListener(DependencyListener):
"""Dependency listener for use when compiling graph data in a JSON
format which can be consumed by WebCola / d3. Each new commit has
to be added to a 'commits' array.
"""
def __init__(self, options):
super(JSONDependencyListener, self).__init__(options)
# Map commit names to indices in the commits array. This is used
# to avoid the risk of duplicates in the commits array, which
# could happen when recursing, since multiple commits could
# potentially depend on the same commit.
self._commits = {}
self._json = {
'commits': [],
'dependencies': [],
}
def get_commit(self, sha1):
i = self._commits[sha1]
return self._json['commits'][i]
def add_commit(self, commit):
"""Adds the commit to the commits array if it doesn't already exist,
and returns the commit's index in the array.
"""
sha1 = commit.hex
if sha1 in self._commits:
return self._commits[sha1]
title, separator, body = commit.message.partition("\n")
commit = {
'explored': False,
'sha1': sha1,
'name': GitUtils.abbreviate_sha1(sha1),
'describe': GitUtils.describe(sha1),
'refs': GitUtils.refs_to(sha1, self.repo()),
'author_name': commit.author.name,
'author_mail': commit.author.email,
'author_time': commit.author.time,
'author_offset': commit.author.offset,
'committer_name': commit.committer.name,
'committer_mail': commit.committer.email,
'committer_time': commit.committer.time,
'committer_offset': commit.committer.offset,
# 'message': commit.message,
'title': title,
'separator': separator,
'body': body.lstrip("\n"),
}
self._json['commits'].append(commit)
self._commits[sha1] = len(self._json['commits']) - 1
return self._commits[sha1]
def add_link(self, source, target):
self._json['dependencies'].append
def new_commit(self, commit):
self.add_commit(commit)
def new_dependency(self, parent, child, path, line_num):
ph = parent.hex
ch = child.hex
new_dep = {
'parent': ph,
'child': ch,
}
if self.options.log:
pass # FIXME
self._json['dependencies'].append(new_dep)
def dependent_done(self, dependent, dependencies):
commit = self.get_commit(dependent.hex)
commit['explored'] = True
def json(self):
return self._json
class GitUtils(object):
@classmethod
def abbreviate_sha1(cls, sha1):
"""Uniquely abbreviates the given SHA1."""
# For now we invoke git-rev-parse(1), but hopefully eventually
# we will be able to do this via pygit2.
cmd = ['git', 'rev-parse', '--short', sha1]
# cls.logger.debug(" ".join(cmd))
out = subprocess.check_output(cmd).strip()
# cls.logger.debug(out)
return out
@classmethod
def describe(cls, sha1):
"""Returns a human-readable representation of the given SHA1."""
# For now we invoke git-describe(1), but eventually we will be
# able to do this via pygit2, since libgit2 already provides
# an API for this:
# https://github.com/libgit2/pygit2/pull/459#issuecomment-68866929
# https://github.com/libgit2/libgit2/pull/2592
cmd = [
'git', 'describe',
'--all', # look for tags and branches
'--long', # remotes/github/master-0-g2b6d591
# '--contains',
# '--abbrev',
sha1
]
# cls.logger.debug(" ".join(cmd))
out = None
try:
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
if e.output.find('No tags can describe') != -1:
return ''
raise
out = out.strip()
out = re.sub(r'^(heads|tags|remotes)/', '', out)
# We already have the abbreviated SHA1 from abbreviate_sha1()
out = re.sub(r'-g[0-9a-f]{7,}$', '', out)
# cls.logger.debug(out)
return out
@classmethod
def refs_to(cls, sha1, repo):
"""Returns all refs pointing to the given SHA1."""
matching = []
for refname in repo.listall_references():
symref = repo.lookup_reference(refname)
dref = symref.resolve()
oid = dref.target
commit = repo.get(oid)
if commit.hex == sha1:
matching.append(symref.shorthand)
return matching
@classmethod
def rev_list(cls, rev_range):
cmd = ['git', 'rev-list', rev_range]
return subprocess.check_output(cmd).strip().split('\n')
class InvalidCommitish(StandardError):
def __init__(self, commitish):
self.commitish = commitish
def message(self):
return "Couldn't resolve commitish %s" % self.commitish
class DependencyDetector(object):
"""Class for automatically detecting dependencies between git commits.
A dependency is inferred by diffing the commit with each of its
parents, and for each resulting hunk, performing a blame to see
which commit was responsible for introducing the lines to which
the hunk was applied.
Dependencies can be traversed recursively, building a dependency
tree represented (conceptually) by a list of edges.
"""
def __init__(self, options, repo_path=None, logger=None):
self.options = options
if logger is None:
self.logger = self.default_logger()
if repo_path is None:
try:
repo_path = pygit2.discover_repository('.')
except KeyError:
abort("Couldn't find a repository in the current directory.")
self.repo = pygit2.Repository(repo_path)
# Nested dict mapping dependents -> dependencies -> files
# causing that dependency -> numbers of lines within that file
# causing that dependency. The first two levels form edges in
# the dependency graph, and the latter two tell us what caused
# those edges.
self.dependencies = {}
# A TODO list (queue) and dict of dependencies which haven't
# yet been recursively followed. Only useful when recursing.
self.todo = []
self.todo_d = {}
# An ordered list and dict of commits whose dependencies we
# have already detected.
self.done = []
self.done_d = {}
# A cache mapping SHA1s to commit objects
self.commits = {}
# Memoization for branch_contains()
self.branch_contains_cache = {}
# Callbacks to be invoked when a new dependency has been
# discovered.
self.listeners = []
def add_listener(self, listener):
if not isinstance(listener, DependencyListener):
raise RuntimeError("Listener must be a DependencyListener")
self.listeners.append(listener)
listener.set_detector(self)
def notify_listeners(self, event, *args):
for listener in self.listeners:
fn = getattr(listener, event)
fn(*args)
def default_logger(self):
if not self.options.debug:
return logging.getLogger(self.__class__.__name__)
log_format = '%(asctime)-15s %(levelname)-6s %(message)s'
date_format = '%b %d %H:%M:%S'
formatter = logging.Formatter(fmt=log_format, datefmt=date_format)
handler = logging.StreamHandler(stream=sys.stdout)
handler.setFormatter(formatter)
# logger = logging.getLogger(__name__)
logger = logging.getLogger(self.__class__.__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
return logger
def seen_commit(self, rev):
return rev in self.commits
def get_commit(self, rev):
if rev in self.commits:
return self.commits[rev]
try:
self.commits[rev] = self.repo.revparse_single(rev)
except (KeyError, ValueError):
raise InvalidCommitish(rev)
return self.commits[rev]
def find_dependencies(self, dependent_rev, recurse=None):
"""Find all dependencies of the given revision, recursively traversing
the dependency tree if requested.
"""
if recurse is None:
recurse = self.options.recurse
try:
dependent = self.get_commit(dependent_rev)
except InvalidCommitish as e:
abort(e.message())
self.todo.append(dependent)
self.todo_d[dependent.hex] = True
while self.todo:
sha1s = [commit.hex[:8] for commit in self.todo]
self.logger.debug("TODO list: %s" % " ".join(sha1s))
dependent = self.todo.pop(0)
del self.todo_d[dependent.hex]
self.logger.debug("Processing %s from TODO list" %
dependent.hex[:8])
self.notify_listeners('new_commit', dependent)
for parent in dependent.parents:
self.find_dependencies_with_parent(dependent, parent)
self.done.append(dependent.hex)
self.done_d[dependent.hex] = True
self.logger.debug("Found all dependencies for %s" %
dependent.hex[:8])
# A commit won't have any dependencies if it only added new files
dependencies = self.dependencies.get(dependent.hex, {})
self.notify_listeners('dependent_done', dependent, dependencies)
self.notify_listeners('all_done')
def find_dependencies_with_parent(self, dependent, parent):
"""Find all dependencies of the given revision caused by the given
parent commit. This will be called multiple times for merge
commits which have multiple parents.
"""
self.logger.debug(" Finding dependencies of %s via parent %s" %
(dependent.hex[:8], parent.hex[:8]))
diff = self.repo.diff(parent, dependent,
context_lines=self.options.context_lines)
for patch in diff:
path = patch.delta.old_file.path
self.logger.debug(" Examining hunks in %s" % path)
for hunk in patch.hunks:
self.blame_hunk(dependent, parent, path, hunk)
def blame_hunk(self, dependent, parent, path, hunk):
"""Run git blame on the parts of the hunk which exist in the older
commit in the diff. The commits generated by git blame are
the commits which the newer commit in the diff depends on,
because without the lines from those commits, the hunk would
not apply correctly.
"""
first_line_num = hunk.old_start
line_range_before = "-%d,%d" % (hunk.old_start, hunk.old_lines)
line_range_after = "+%d,%d" % (hunk.new_start, hunk.new_lines)
self.logger.debug(" Blaming hunk %s @ %s" %
(line_range_before, parent.hex[:8]))
if not self.tree_lookup(path, parent):
# This is probably because dependent added a new directory
# which was not previously in the parent.
return
cmd = [
'git', 'blame',
'--porcelain',
'-L', "%d,+%d" % (hunk.old_start, hunk.old_lines),
parent.hex, '--', path
]
blame = subprocess.check_output(cmd)
dependent_sha1 = dependent.hex
if dependent_sha1 not in self.dependencies:
self.logger.debug(' New dependent: %s (%s)' %
(dependent_sha1[:8], self.oneline(dependent)))
self.dependencies[dependent_sha1] = {}
self.notify_listeners('new_dependent', dependent)
line_to_culprit = {}
for line in blame.split('\n'):
# self.logger.debug(' !' + line.rstrip())
m = re.match('^([0-9a-f]{40}) (\d+) (\d+)( \d+)?$', line)
if not m:
continue
dependency_sha1, orig_line_num, line_num = m.group(1, 2, 3)
line_num = int(line_num)
dependency = self.get_commit(dependency_sha1)
line_to_culprit[line_num] = dependency.hex
if self.is_excluded(dependency):
self.logger.debug(
' Excluding dependency %s from line %s (%s)' %
(dependency_sha1[:8], line_num,
self.oneline(dependency)))
continue
if dependency_sha1 not in self.dependencies[dependent_sha1]:
if dependency_sha1 in self.todo_d:
self.logger.debug(
' Dependency %s via line %s already in TODO' %
(dependency_sha1[:8], line_num,))
continue
if dependency_sha1 in self.done_d:
self.logger.debug(
' Dependency %s via line %s already done' %
(dependency_sha1[:8], line_num,))
continue
self.logger.debug(
' New dependency %s via line %s (%s)' %
(dependency_sha1[:8], line_num, self.oneline(dependency)))
self.dependencies[dependent_sha1][dependency_sha1] = {}
self.notify_listeners('new_commit', dependency)
self.notify_listeners('new_dependency',
dependent, dependency, path, line_num)
if dependency_sha1 not in self.dependencies:
if self.options.recurse:
self.todo.append(dependency)
self.todo_d[dependency.hex] = True
self.logger.debug(' added to TODO')
dep_sources = self.dependencies[dependent_sha1][dependency_sha1]
if path not in dep_sources:
dep_sources[path] = {}
self.notify_listeners('new_path',
dependent, dependency, path, line_num)
if line_num in dep_sources[path]:
abort("line %d already found when blaming %s:%s" %
(line_num, parent.hex[:8], path))
dep_sources[path][line_num] = True
self.notify_listeners('new_line',
dependent, dependency, path, line_num)
diff_format = ' |%8.8s %5s %s%s'
hunk_header = '@@ %s %s @@' % (line_range_before, line_range_after)
self.logger.debug(diff_format % ('--------', '-----', '', hunk_header))
line_num = hunk.old_start
for line in hunk.lines:
if "\n\\ No newline at end of file" == line.content.rstrip():
break
if line.origin == '+':
rev = ln = ''
else:
rev = line_to_culprit[line_num]
ln = line_num
line_num += 1
self.logger.debug(diff_format % (rev, ln, line.origin, line.content.rstrip()))
def oneline(self, commit):
return commit.message.split('\n', 1)[0]
def is_excluded(self, commit):
if self.options.exclude_commits is not None:
for exclude in self.options.exclude_commits:
if self.branch_contains(commit, exclude):
return True
return False
def branch_contains(self, commit, branch):
sha1 = commit.hex
branch_commit = self.get_commit(branch)
branch_sha1 = branch_commit.hex
self.logger.debug(" Does %s (%s) contain %s?" %
(branch, branch_sha1[:8], sha1[:8]))
if sha1 not in self.branch_contains_cache:
self.branch_contains_cache[sha1] = {}
if branch_sha1 in self.branch_contains_cache[sha1]:
memoized = self.branch_contains_cache[sha1][branch_sha1]
self.logger.debug(" %s (memoized)" % memoized)
return memoized
cmd = ['git', 'merge-base', sha1, branch_sha1]
# self.logger.debug(" ".join(cmd))
out = subprocess.check_output(cmd).strip()
self.logger.debug(" merge-base returned: %s" % out[:8])
result = out == sha1
self.logger.debug(" %s" % result)
self.branch_contains_cache[sha1][branch_sha1] = result
return result
def tree_lookup(self, target_path, commit):
"""Navigate to the tree or blob object pointed to by the given target
path for the given commit. This is necessary because each git
tree only contains entries for the directory it refers to, not
recursively for all subdirectories.
"""
segments = target_path.split("/")
tree_or_blob = commit.tree
path = ''
while segments:
dirent = segments.pop(0)
if isinstance(tree_or_blob, pygit2.Tree):
if dirent in tree_or_blob:
tree_or_blob = self.repo[tree_or_blob[dirent].oid]
# self.logger.debug('%s in %s' % (dirent, path))
if path:
path += '/'
path += dirent
else:
# This is probably because we were called on a
# commit whose parent added a new directory.
self.logger.debug(' %s not in %s in %s' %
(dirent, path, commit.hex[:8]))
return None
else:
self.logger.debug(' %s not a tree in %s' %
(tree_or_blob, commit.hex[:8]))
return None
return tree_or_blob
def edges(self):
return [
[(dependent, dependency)
for dependency in self.dependencies[dependent]]
for dependent in self.dependencies.keys()
]
def parse_args():
parser = argparse.ArgumentParser(
description='Auto-detects commits on which the given '
'commit(s) depend.',
usage='%(prog)s [options] COMMIT-ISH [COMMIT-ISH...]',
add_help=False
)
parser.add_argument('-h', '--help', action='help',
help='Show this help message and exit')
parser.add_argument('-l', '--log', dest='log', action='store_true',
help='Show commit logs for calculated dependencies')
parser.add_argument('-j', '--json', dest='json', action='store_true',
help='Output dependencies as JSON')
parser.add_argument('-r', '--recurse', dest='recurse', action='store_true',
help='Follow dependencies recursively')
parser.add_argument('-e', '--exclude-commits', dest='exclude_commits',
action='append', metavar='COMMITISH',
help='Exclude commits which are ancestors of the '
'given COMMITISH (can be repeated)')
parser.add_argument('-c', '--context-lines', dest='context_lines',
type=int, metavar='NUM', default=1,
help='Number of lines of diff context to use '
'[%(default)s]')
parser.add_argument('-d', '--debug', dest='debug', action='store_true',
help='Show debugging')
options, args = parser.parse_known_args()
# Are we potentially detecting dependencies for more than one commit?
# Even if we're not recursing, the user could specify multiple commits
# via CLI arguments.
options.multi = options.recurse
if len(args) == 0:
parser.error('You must specify at least one commit-ish.')
return options, args
def cli(options, args):
detector = DependencyDetector(options)
if options.json:
listener = JSONDependencyListener(options)
else:
listener = CLIDependencyListener(options)
detector.add_listener(listener)
if len(args) > 1:
options.multi = True
for revspec in args:
revs = GitUtils.rev_list(revspec)
if len(revs) > 1:
options.multi = True
for rev in revs:
try:
detector.find_dependencies(rev)
except KeyboardInterrupt:
pass
if options.json:
print(json.dumps(listener.json(), sort_keys=True, indent=4))
def main():
options, args = parse_args()
# rev_list = sys.stdin.readlines()
try:
cli(options, args)
except InvalidCommitish as e:
abort(e.message())
if __name__ == "__main__":
main()
|
12,838 | 48196432260c0e3c6ab9f578417de03671c20932 | import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from dipy.io.streamline import load_tractogram
from dipy.align.streamlinear import set_number_of_points
from dipy.viz import window, actor, colormap as cmap
import numpy as np
import time
import os
from models.tf_tools.transformer.transformer import Transformer
# HERE ARE ALL THE HYPERPARAMETERS OF THE TRANSFORMER
from models.tf_tools.parameters import *
start_time = time.time()
def plot_graphs(history, metric):
plt.plot(history.history[metric])
plt.plot(history.history['val_'+metric], '')
plt.xlabel("Epochs")
plt.ylabel(metric)
plt.legend([metric, 'val_'+metric])
# Our small recurrent model
class tfClassifier(Transformer):
def __init__(self,classes):
super(tfClassifier, self).__init__(d_model, num_layers, num_heads, len(classes), dff, rate=0.1)
self.classes = classes
def train(self,subjects,test_subject,path_files,retrain=True):
self.compile(loss="categorical_crossentropy", optimizer="adam", metrics=['accuracy'])
# Checkpoints
checkpoint_dir = './checkpoints-TF'+'-'+test_subject
checkpoint_prefix= os.path.join(checkpoint_dir,"ckpt")
checkpoint = tf.train.Checkpoint(optimizer=self.optimizer,model=self)
if retrain==True:
train_trajs = []
train_labels = []
val_trajs = []
val_labels = []
for k,subject in enumerate(subjects):
print('[INFO] Reading subject:',subject)
# Reads the .tck files from each specified class
for i,c in enumerate(self.classes):
# Load tractogram
#filename = path_files+'auto'+c+'.tck'
filename = path_files+subject+'/'+c+'_20p.tck'
if not os.path.isfile(filename):
continue
print('[INFO] Reading file:',filename)
#tractogram = load_tractogram(filename, path_files+fNameRef, bbox_valid_check=False)
tractogram = load_tractogram(filename, './utils/t1.nii.gz', bbox_valid_check=False)
# Get all the streamlines
STs = tractogram.streamlines
scaledSTs= set_number_of_points(STs,20)
if subject==test_subject:
val_trajs.extend(scaledSTs)
val_labels.extend(len(scaledSTs)*[i])
else:
train_trajs.extend(scaledSTs)
train_labels.extend(len(scaledSTs)*[i])
print('[INFO] Used for testing: ',test_subject)
print('[INFO] Total number of streamlines for training:',len(train_trajs))
print('[INFO] Total number of streamlines for validation:',len(val_trajs))
train_trajs = np.array(train_trajs)
val_trajs = np.array(val_trajs)
train_labels= np.array(train_labels)
aux = np.zeros([train_labels.shape[0],len(self.classes)])
for i in range(train_labels.shape[0]): aux[i,train_labels[i]] = 1
train_labels = aux
# Training
history = self.fit(train_trajs, train_labels, batch_size=32, epochs=25, validation_split=0.3)
checkpoint.save(file_prefix = checkpoint_prefix)
# Plots
plt.figure(figsize=(16, 8))
plt.subplot(1, 2, 1)
plot_graphs(history, 'accuracy')
plt.ylim(None, 1)
plt.subplot(1, 2, 2)
plot_graphs(history, 'loss')
plt.ylim(0, None)
plt.show()
else:
# To avoid training, we can just load the parameters we saved in the previous session
print("[INFO] Restoring last model")
status = checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
|
12,839 | 2f8f4eb9c22fa400d89eed9fb3a700f3624d789f | from datetime import timedelta, datetime
import pytest
from tests.conftest import assert_equal_objects, obj_id
from saana_lib.recommendation import MinimizeRecommendation, \
AvoidRecommendation, PrioritizeRecommendation, RecipeRecommendation
patient_id = obj_id
recipe_id = obj_id
@pytest.fixture
def tags_find_patch(mocker):
tags_mock = mocker.patch(
'saana_lib.connectMongo.db.tags'
)
tags_mock.find.return_value = [{
'_id': 'b09779e',
'minimize': {},
'name': 'hypertension',
'avoid': [],
'prior': {},
'type': 'comorbitities',
},{
'_id': '5cab57647',
'minimize': {},
'name': 'dry mouth',
'avoid': [],
'prior': {},
'type': 'symptoms',
}]
return tags_mock
@pytest.fixture
def recommendation_save_mock(mocker):
met = mocker.patch('saana_lib.recommendation.Recommendation.save')
return met
@pytest.mark.usefixtures("datetime_mock")
class TestCaseRecommendation(object):
"""
The method being called in the first two tests is the same,
so tests are meant to cover two different aspects (not to
be repetitive)
"""
def test_minimize_recommendation(self, mocker):
"""Verify that the proper recommendation class is used
(in this case MinimizeIngredients)
"""
all_prop = mocker.patch(
'saana_lib.recommendation.MinimizeIngredients.all',
new_callable=mocker.PropertyMock,
return_value={"onion": 0, "flax": 2}
)
_ = MinimizeRecommendation(patient_id()).as_list()
all_prop.assert_called_once_with()
def test_prioritize_recommendation(self, mocker):
"""Verify the content of the list being returned"""
mocker.patch(
'saana_lib.recommendation.PrioritizeIngredients.all',
new_callable=mocker.PropertyMock,
return_value={"onion": 0, "flax": 2}
)
get_ingr_mock = mocker.patch(
'saana_lib.recommendation.Recommendation.get_or_create_ingredient'
)
_ = PrioritizeRecommendation(patient_id()).as_list()
assert_equal_objects(
[arg[0][0] for arg in get_ingr_mock.call_args_list],
["onion", "flax"]
)
def test_avoid_recommendation_values(self, mocker, datetime_mock):
"""
This test verifies both, that the correct property is called,
and check the content values being returned
"""
all_prop = mocker.patch(
'saana_lib.patient.AvoidIngredients.all',
new_callable=mocker.PropertyMock,
return_value={"onion", "flax"}
)
get_ingr_mock = mocker.patch(
'saana_lib.recommendation.Recommendation.get_or_create_ingredient'
)
_ = AvoidRecommendation(patient_id()).as_list()
assert_equal_objects(
[arg[0][0] for arg in get_ingr_mock.call_args_list],
["onion", "flax"]
)
all_prop.assert_called_once_with()
assert datetime_mock.now.call_count == 4
class TestCaseRecipeRecommendation:
klass = RecipeRecommendation(
recipe=None,
patient_id=patient_id(),
recipe_id=recipe_id()
)
def test_recommendations_in_time_frame(self, mocker):
m = mocker.patch(
'saana_lib.recommendation.db.patient_recipe_recommendation',
)
start = datetime.now()
end = start - timedelta(days=1)
_ = self.klass.recommendations_in_time_frame(start, end)
m.find.assert_called_once_with({
'patient_id': obj_id(),
'created_at': {'$lte': start, "$gte": end}},
{'recipe_id': 1, '_id': 0}
)
def test_recommendations_in_time_frame_default_end(self, mocker):
m = mocker.patch(
'saana_lib.recommendation.db.patient_recipe_recommendation',
)
start = datetime.now()
_ = self.klass.recommendations_in_time_frame(start)
m.find.assert_called_once_with({
'patient_id': obj_id(),
'created_at': {'$lte': start, "$gte": start - timedelta(days=7)}},
{'recipe_id': 1, '_id': 0}
)
def test_recommendations_in_time_frame_values(self, mocker):
m = mocker.patch(
'saana_lib.recommendation.db.patient_recipe_recommendation',
)
m.find.return_value=[{'recipe_id': obj_id()}]
start = datetime.now()
assert_equal_objects(
self.klass.recommendations_in_time_frame(start),
[obj_id()]
)
def test_repetition_deduct_gt_threshold(self):
current = datetime.now()
last_time = current - timedelta(days=21 + current.weekday() + 1)
assert self.klass.repetition_deduct_points(last_time) == 0
def test_repetition_deduct_points(self):
current = datetime.now()
last_time = current - timedelta(days=14)
assert self.klass.repetition_deduct_points(last_time) == 20
|
12,840 | 8774c7dc21ae8a9aa6f1ce413ececdd90b632c9a | import numpy as np
import matplotlib.pyplot as plt
t0=2.0
xprime=9.0*(np.exp(-t0))/(1.0+9.0*(np.exp(-t0)))/(1.0+9.0*(np.exp(-t0)))
q1file=open('q1py.txt','w')
def x(t):
return 1.0/(1.0+9.0*(np.exp(-t)))
def xprimeest(x0,x,deltat):
return ((x-x0)/deltat)
def derivdiff(prime,estprime):
return np.abs(prime-estprime)
x0=x(t0)
for t in range(0,-20,-1):
xnew=x(t0+np.power(10.0,-t))
xest=xprimeest(x0,xnew,np.power(10.0,-t))
xvals=np.power(10.0,-t)
yvals=derivdiff(xprime,xest)
plt.loglog(xvals,yvals,'b.')
plt.suptitle('Deviation of Derivative Estimate')
plt.xlabel('$\Delta$t')
plt.ylabel('$\delta(\Delta t)$')
q1file.write(str(np.power(10.0,-t))+","+str(derivdiff(xprime,xest))+"\n")
plt.savefig('q1.png')
plt.show()
q1file.close()
|
12,841 | 6e49c2b0e7f2772f61c1dc17b9d6962b352f0694 | from random import randint
def start_game():
print('Welcome to the number guessing game!')
attempts = 0
random_number = randint(1,10)
while True:
attempts += 1
try:
guess = int(input("Guess a number 1-10 "))
except ValueError:
print("Oops, thats not a number")
else:
if guess > 10:
print("Whoops thats not a number 1-10!")
elif guess > random_number:
print("It's lower!")
elif guess < random_number:
print("It's higher!")
elif guess == random_number:
print("Congrats!! You guessed the right number")
print("You took {} tries to guess the right number.".format(attempts))
ask = input("Would you like to play again? (Yes/No) ")
if ask.upper() == "YES":
start_game()
elif ask.upper() == "NO":
print("Thanks for playing!")
quit()
else:
quit()
start_game()
|
12,842 | 3e444e78871a0b200fbfec93ad3e3a7ece3ecffa | from geopy.distance import geodesic
from django.shortcuts import render
from django.shortcuts import redirect
from django.shortcuts import HttpResponse
from sklearn import metrics
from applications.user.models import AdminInfo
from applications.work.models import CarWorkDay, Route
from applications.work.models import RouteInfo
import json
import datetime
import math
import numpy as np
import matplotlib.pyplot as plt
import sklearn.cluster as skc # 密度聚类
UNCLASSIFIED = False
NOISE = 0
# Create your views here.
class DateEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(obj, datetime.date):
return obj.strftime("%Y-%m-%d")
else:
return json.JSONEncoder.default(self, obj)
# 跳转
def manage(request):
if not request.session.get('isLogin', False): # 未登录
return redirect('/user/login')
else: # 已登陆
return render(request, 'background.html')
def worklist(request):
if not request.session.get('isLogin', False): # 未登录
return redirect('/user/login')
else: # 已登陆
return render(request, 'worklist.html')
def workdetail(request, id):
if not request.session.get('isLogin', False): # 未登录
return redirect('/user/login')
else: # 已登陆
return render(request, 'workdetail.html')
def worksearch(request):
if not request.session.get('isLogin', False): # 未登录
return redirect('/user/login')
else: # 已登陆
return render(request, 'worksearch.html')
def workstatistics(request):
if not request.session.get('isLogin', False): # 未登录
return redirect('/user/login')
else: # 已登陆
return render(request, 'workstatistics.html')
def workroute(request):
if not request.session.get('isLogin', False): # 未登录
return redirect('/user/login')
else: # 已登陆
return render(request, 'workroute.html')
def routedetail(request, id):
if not request.session.get('isLogin', False): # 未登录
return redirect('/user/login')
else: # 已登陆
return render(request, 'routedetail.html')
# api
def admin_info(request):
aid = request.session['admin']
user = AdminInfo.objects.filter(aid=aid).first()
info = {'aid': user.aid, 'username': user.username, 'email': user.email,
'phone': user.phone, 'wechat': user.wechat}
result = {
"state": 200,
"data": info
}
return HttpResponse(json.dumps(result))
def work_list(request):
if request.method == 'POST':
return HttpResponse('请使用GET请求')
try:
page = request.GET['page']
page = int(page)
except:
page = 0
work_query = CarWorkDay.objects.all()
pages = math.ceil(len(work_query) / 10)
try:
worklist = []
for i in range(10):
work = {'id': work_query[page * 10 + i].id, 'carId': work_query[page * 10 + i].carId,
'workType': work_query[page * 10 + i].workType,
'workDate': work_query[page * 10 + i].workDate}
worklist.append(work)
except:
worklist = []
for i in range(len(work_query) % 10):
work = {'id': work_query[page * 10 + i].id, 'carId': work_query[page * 10 + i].carId,
'workType': work_query[page * 10 + i].workType,
'workDate': work_query[page * 10 + i].workDate}
worklist.append(work)
result = {
"state": 200,
"data": worklist,
"page": page,
"pages": pages
}
return HttpResponse(json.dumps(result, cls=DateEncoder))
def work_detail(request):
if request.method == 'POST':
return HttpResponse('请使用GET请求')
id = request.GET.get('id', 1)
work_query = CarWorkDay.objects.filter(id=id).first()
work = {'carId': work_query.carId,
'deviceId': work_query.deviceId,
'disWork': work_query.disWork,
'workType': work_query.workType,
'beginTime': work_query.beginTime,
'endTime': work_query.endTime,
'workLocate': work_query.loc4}
result = {
"state": 200,
"data": work
}
return HttpResponse(json.dumps(result, cls=DateEncoder))
def show_by_date(request):
if request.method == 'POST':
return HttpResponse('请使用GET请求')
try:
workDate1 = request.GET['workDate1']
workDate_date1 = datetime.datetime.strptime(workDate1, '%Y-%m-%d').date()
print(workDate_date1)
workDate2 = request.GET['workDate2']
workDate_date2 = datetime.datetime.strptime(workDate2, '%Y-%m-%d').date()
print(workDate_date2)
page = request.GET['page']
page = int(page)
except:
workDate_date1 = datetime.datetime.today().date()
workDate_date2 = datetime.datetime.today().date()
page = 0
work_query = CarWorkDay.objects.filter(workDate__range=(workDate_date1, workDate_date2)).all()
pages = math.ceil(len(work_query) / 10)
try:
worklist = []
for i in range(10):
work = {'id': work_query[page * 10 + i].id, 'carId': work_query[page * 10 + i].carId,
'workType': work_query[page * 10 + i].workType,
'workDate': work_query[page * 10 + i].workDate}
worklist.append(work)
except:
worklist = []
for i in range(len(work_query) % 10):
work = {'id': work_query[page * 10 + i].id, 'carId': work_query[page * 10 + i].carId,
'workType': work_query[page * 10 + i].workType,
'workDate': work_query[page * 10 + i].workDate}
worklist.append(work)
result = {
"state": 200,
"data": worklist,
"page": page,
"pages": pages
}
return HttpResponse(json.dumps(result, cls=DateEncoder))
def work_search(request):
if request.method == 'POST':
return HttpResponse('请使用GET请求')
try:
carid = request.GET['carid']
worktype = request.GET['worktype']
page = request.GET['page']
page = int(page)
except:
page = 0
carid = None
worktype = None
search_dict = dict()
if carid:
search_dict['carId'] = carid
if worktype:
search_dict['workType'] = worktype
work_query = CarWorkDay.objects.filter(**search_dict)
pages = math.ceil(len(work_query) / 10)
try:
worklist = []
for i in range(10):
work = {'id': work_query[page * 10 + i].id, 'carId': work_query[page * 10 + i].carId,
'workType': work_query[page * 10 + i].workType,
'workDate': work_query[page * 10 + i].workDate}
worklist.append(work)
except:
worklist = []
for i in range(len(work_query) % 10):
work = {'id': work_query[page * 10 + i].id, 'carId': work_query[page * 10 + i].carId,
'workType': work_query[page * 10 + i].workType,
'workDate': work_query[page * 10 + i].workDate}
worklist.append(work)
result = {
"state": 200,
"data": worklist,
"page": page,
"pages": pages
}
return HttpResponse(json.dumps(result, cls=DateEncoder))
def work_statistics(request):
if request.method == 'POST':
return HttpResponse('请使用GET请求')
mouthcount = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
work_query = CarWorkDay.objects.all()
for each in work_query:
mouthcount[each.workDate.month - 1] += 1
result = {
"state": 200,
"data": mouthcount
}
return HttpResponse(json.dumps(result, cls=DateEncoder))
def route_list(request):
if request.method == 'POST':
return HttpResponse('请使用GET请求')
try:
page = request.GET['page']
page = int(page)
except:
page = 0
route_query = Route.objects.all()
pages = math.ceil(len(route_query) / 10)
for i in range(len(route_query)):
if (route_query[i].pointnum == None or route_query[i].distance == None):
points = RouteInfo.objects.filter(workid=i + 1).all()
num = len(points)
distance = 0
for j in range(len(points) - 1):
distance = distance + (geodesic((points[j].latitude, points[j].longitude),
(points[j + 1].latitude, points[j + 1].longitude)).m)
route_query[i].pointnum = num
route_query[i].distance = round(distance / 1000, 2)
route_query[i].save()
try:
routelist = []
for i in range(10):
route = {'id': route_query[page * 10 + i].id, 'machineid': route_query[page * 10 + i].machineid,
'pointnum': route_query[page * 10 + i].pointnum, 'distance': route_query[page * 10 + i].distance,
'date': route_query[page * 10 + i].date, 'worktype': route_query[page * 10 + i].worktype}
routelist.append(route)
except:
routelist = []
for i in range(len(route_query) % 10):
route = {'id': route_query[page * 10 + i].id, 'machineid': route_query[page * 10 + i].machineid,
'pointnum': route_query[page * 10 + i].pointnum, 'distance': route_query[page * 10 + i].distance,
'date': route_query[page * 10 + i].date, 'worktype': route_query[page * 10 + i].worktype}
routelist.append(route)
result = {
"state": 200,
"data": routelist,
"page": page,
"pages": pages
}
return HttpResponse(json.dumps(result, cls=DateEncoder))
def route_split_1(X): # 插秧
db = skc.DBSCAN(eps=11, min_samples=9).fit(X) # DBSCAN聚类方法 还有参数,matric = ""距离计算方法
#db = skc.KMeans(n_clusters=4, random_state=9).fit(X)
labels = db.labels_ # 和X同一个维度,labels对应索引序号的值 为她所在簇的序号。若簇编号为-1,表示为噪声
# print('每个样本的簇标号:')
raito = len(labels[labels[:] == -1]) / len(labels) # 计算噪声点个数占总数的比例
print('噪声比:', format(raito, '.2%'))
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0) # 获取分簇的数目
print('分簇的数目: %d' % n_clusters_)
print('Silhouette Coefficient: %0.3f' % metrics.silhouette_score(X, labels))
# clusters = [[] for i in range(n_clusters_)]
# for i in range(len(labels)):
# for j in range(n_clusters_):
# if (labels[i] == j):
# clusters[j].append(point[i])
return labels, n_clusters_
def route_split_2(X): # 深松
db = skc.DBSCAN(eps=4.5, min_samples=7).fit(X) # DBSCAN聚类方法 还有参数,matric = ""距离计算方法
labels = db.labels_ # 和X同一个维度,labels对应索引序号的值 为她所在簇的序号。若簇编号为-1,表示为噪声
# print('每个样本的簇标号:')
raito = len(labels[labels[:] == -1]) / len(labels) # 计算噪声点个数占总数的比例
print('噪声比:', format(raito, '.2%'))
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0) # 获取分簇的数目
print('分簇的数目: %d' % n_clusters_)
print('Silhouette Coefficient: %0.3f' % metrics.silhouette_score(X, labels))
# clusters = [[] for i in range(n_clusters_)]
# for i in range(len(labels)):
# for j in range(n_clusters_):
# if (labels[i] == j):
# clusters[j].append(point[i])
return labels, n_clusters_
def evaluate(labels, time):
labels = labels.tolist()
timework = 0
maxlabel = max(labels, key=labels.count)
for i in range(len(labels) - 1):
if (labels[i] == maxlabel and labels[i + 1] == maxlabel):
timework = timework + (time[i + 1] - time[i]).total_seconds()
timetotal = (time[len(labels) - 1] - time[0]).total_seconds()
return timework, timetotal
def route_detail(request):
if request.method == 'POST':
return HttpResponse('请使用GET请求')
workid = request.GET.get('id', 1)
address_point = RouteInfo.objects.filter(workid=workid).all()
work = Route.objects.filter(id=workid).first()
address_longitude = []
address_latitude = []
#distance = []
course = []
speed = []
deep = []
time = []
for i in range(len(address_point)):
if (i != len(address_point) - 1):
course.append(abs(address_point[i].course - address_point[i + 1].course))
#distance.append(geodesic((address_point[i].latitude, address_point[i].longitude),
#(address_point[i + 1].latitude, address_point[i + 1].longitude)).m)
speed.append(address_point[i].speed)
address_longitude.append(address_point[i].longitude)
address_latitude.append(address_point[i].latitude)
deep.append(address_point[i].deep)
time.append(address_point[i].time)
#distance.append(0)
course.append(0)
print(course)
point = list(zip(address_longitude, address_latitude))
if (work.worktype == '插秧'):
data = list(zip(speed, course))
X = np.array(data)
(labels, n_clusters_) = route_split_1(X)
elif (work.worktype == '深松'):
data = list(zip(speed, deep))
X = np.array(data)
(labels, n_clusters_) = route_split_2(X)
(timework, timetotal) = evaluate(labels, time)
clusters = [[] for i in range(n_clusters_)]
# for i in range(len(labels)):
# for j in range(n_clusters_):
# if (labels[i] == j):
# clusters[j].append(point[i])
for i in range(n_clusters_):
# print('簇 ', i, '的所有样本:')
#3one_cluster = clusters[i]
one_cluster=X[labels==i]
#print(one_cluster)
one_cluster = np.array(one_cluster)
plt.plot(one_cluster[:, 0], one_cluster[:, 1], 'o')
plt.xlabel(u'作业速度,单位:km/h',fontproperties='SimHei')
plt.ylabel(u'作业深度',fontproperties='SimHei')
plt.show()
result = {'point': json.dumps(point),
'data': json.dumps(data),
'speed':json.dumps(speed),
'labels': json.dumps(labels.tolist()),
'n_clusters_': json.dumps(n_clusters_),
'timework': json.dumps(timework),
'timetotal': json.dumps(timetotal),
}
return HttpResponse(json.dumps(result, cls=DateEncoder))
|
12,843 | f9f5c976ad01450d91ab7f9b959d7b4f9d10b76c | '''
Welcome.
In this kata you are required to, given a string, replace every letter with its position in the alphabet.
If anything in the text isn't a letter, ignore it and don't return it.
a being 1, b being 2, etc.
As an example:
alphabet_position("The sunset sets at twelve o' clock.")
Should return "20 8 5 19 21 14 19 5 20 19 5 20 19 1 20 20 23 5 12 22 5 15 3 12 15 3 11" as a string.
'''
def alphabet_position(text):
return ' '.join(str(ord(c.lower())-96) for c in text if c.isalpha()) |
12,844 | 576b5618f1cde5101559a242e4643f947e34ab9d | dict = {'name': 'John', 'age': 49}
str = '****'.join(dict)
print(str) |
12,845 | 03e888a7b2b6e31b85ec53ee77aa679ff0b2a79f | class Addition:
@staticmethod
def sum(augend,addend=None):
if isinstance(augend,list):
return Addition.sumList(augend)
return augend + addend
@staticmethod
def sumList (valueList):
result = 0
for element in valueList:
result = Addition.sum(result, element)
return result |
12,846 | 34fe60eec9e6f6f9f08e09a7498cf3062eb9b855 | # Generated by Django 2.2.4 on 2019-08-07 07:32
from django.db import migrations, models
import main.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('photo', models.ImageField(blank=True, height_field='height', null=True, upload_to='',
validators=[main.models.validate_name], verbose_name='Изображение',
width_field='width')),
('alt', models.TextField(blank=True, max_length=300, null=True, verbose_name='Описание фото')),
('height', models.PositiveIntegerField(blank=True, null=True)),
('width', models.PositiveIntegerField(blank=True, null=True)),
('binary_image', models.BinaryField(null=True)),
('ext', models.CharField(blank=True, max_length=10, null=True)),
('name', models.CharField(max_length=100, verbose_name='Название')),
('about', models.TextField(max_length=500, verbose_name='Краткое описание')),
('author', models.TextField(max_length=500, verbose_name='Про автора')),
('description', models.TextField(blank=True, verbose_name='Полное описание')),
('contacts', models.TextField(blank=True, verbose_name='Другие контактные данные: ')),
('price', models.DecimalField(blank=True, decimal_places=2, default=0, max_digits=8, null=True,
verbose_name='Цена')),
],
options={
'verbose_name': 'Товар',
'verbose_name_plural': 'Товары',
'db_table': 'products',
},
),
]
|
12,847 | e8f7926e9a3e9c1a6413b049a601c393075c810f | import random
def lottery(names):
return random.choice(list(set(names)))
|
12,848 | 70c495c2bd975235fab0f993553776df1ea626a8 | # I have created this file - Akanshu
from django.http import HttpResponse
from django.shortcuts import render
from .methods import *
def index(request):
return render(request, 'index.html')
def analyse(request):
#getting the text
djtext = request.POST.get("text", "default")
#applied operation
djpunc = request.POST.get("removepunc", "off")
djcaps = request.POST.get("fullcaps", "off")
newlineremover = request.POST.get("newlineremover", "off")
extraspaceremover = request.POST.get("extraspaceremover", "off")
capfirst = request.POST.get("capfirst", "off")
if djpunc == "on":
analysed = removepunc(djtext)
params = {'purpose': 'Remove punctuations', 'analysed_text': analysed }
djtext = analysed
if capfirst == "on":
analysed = capitalisefirst(djtext)
params = {'purpose': 'Capitalised the first letter.', 'analysed_text': analysed}
if djcaps == "on":
analysed = toUpper(djtext)
params = {'purpose': 'All capitalised', 'analysed_text': analysed}
djtext = analysed
if newlineremover == "on":
analysed = newlineremove(djtext)
params = {'purpose': 'New lines removed', 'analysed_text': analysed}
djtext = analysed
if extraspaceremover == "on":
analysed = extraspaceremove(djtext)
params = {'purpose': 'Space removed', 'analysed_text': analysed}
if capfirst != "on" and extraspaceremover != "on" and newlineremover != "on" and djcaps != "on" and djpunc != "on":
return HttpResponse("Please select any operation and try again..!!")
return render(request, "analyse.html", params) |
12,849 | 4352ee21338f1b09b20ec2f4a513225696ec1869 | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 15 02:49:31 2020
@author: User
"""
#Candies and two sisters
'''
import math
t = int(input())
ans = []
for i in range(t):
n = int(input())
if n%2 == 0:
ans.append(int(n/2-1))
else:
mid = math.ceil(n/2)
ans.append(n-mid)
for j in range(t):
print(ans[j]) |
12,850 | 2e7ca75adfcd5f8d017689aca34b70ae8bf4f3ac | #this script handles the operations to be performed on user accounts
#for example adding user or deleting user
class Userr(object):
def __init__(self, username, email, f_name, l_name, password):
self.username = username
self.email = email
self.f_name = f_name
self.l_name = l_name
self.password = password |
12,851 | 3ce5e4c703da51461914620cc1f2cb7f9fe69fe9 | import sqlite3
from app.api.classes import Flashcard
from flask import current_app
import json
def create_flashcard(title: str, description='', source='', image_url='', tags=[]):
with sqlite3.connect(current_app.config['DB']) as db:
c = db.cursor()
c.execute(
"""
INSERT INTO
flashcards
(
title,
description,
source,
image_url,
tags
)
VALUES
(?, ?, ?, ?, ?)
""",
(
title,
description,
source,
image_url,
json.dumps(tags)
)
)
return c.lastrowid
def retrieve_all_flashcards(start: int=0, qty:int=None):
"""
Retrieves all flashcards in ascending order (max 250 at a time) or using basic pagination returns `qty` flashcards occuring after `start`.
"""
qty = 250 if qty == None else qty
with sqlite3.connect(current_app.config['DB']) as db:
c = db.cursor()
c.execute("""
SELECT
id,
title,
description,
source,
image_url,
tags
FROM
flashcards
WHERE
id >= ?
ORDER BY
id ASC
LIMIT
?
""", (start, qty)
)
raw_cards = c.fetchall()
cards = []
for card in raw_cards:
cards.append(
Flashcard(
id=card[0],
title=card[1],
description=card[2],
source=card[3],
image_url=card[4],
tags=json.loads(card[5])
)
)
return cards
def retrieve_flashcard(id):
"""
Returns
"""
with sqlite3.connect(current_app.config['DB']) as db:
c = db.cursor()
c.execute("""
SELECT
id,
title,
description,
source,
image_url,
tags
FROM
flashcards
WHERE
id=?
""", (id,)
)
r = c.fetchone()
if not r:
return None
return Flashcard(
id=r[0],
title=r[1],
description=r[2],
source=r[3],
image_url=r[4],
tags=json.loads(r[5])
)
|
12,852 | 284c0c7705406f30a4d245e5a41597b1ef9b53dd | # main_train.py
import os
import torch
import argparse
import numpy as np
from torch import nn
from sklearn.model_selection import train_test_split
import utils as u
import model as m
from preprocess import Preprocess
from data import TwitterDataset
from train import training
path_prefix = './data/'
# 要不要固定embedding、batch大小、要訓練幾個epoch、learning rate的值、model的資料夾路徑
fix_embedding = True # fix embedding during training
batch_size = 256
epoch = 20
lr = 0.001
print("loading data ...")
X_train = torch.load(os.path.join(path_prefix, 'X_train.pt'))
X_val = torch.load(os.path.join(path_prefix, 'X_val.pt'))
y_train = torch.load(os.path.join(path_prefix, 'y_train.pt'))
y_val = torch.load(os.path.join(path_prefix, 'y_val.pt'))
embedding = torch.load(os.path.join(path_prefix, 'embedding.pt'))
# 通過torch.cuda.is_available()的回傳值進行判斷是否有使用GPU的環境,如果有的話device就設為"cuda",沒有的話就設為"cpu"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_dir = os.path.join(path_prefix, 'model/') # model directory for checkpoint model
# 製作一個model的對象
# model = m.LSTM_Net(embedding, embedding_dim=250, hidden_dim=150, num_layers=1, dropout=0.5, fix_embedding=fix_embedding)
model = m.LSTM_Net(embedding, embedding_dim=250, hidden_dim=200, num_layers=4, dropout=0.5, fix_embedding=fix_embedding)
model = model.to(device) # device為"cuda",model使用GPU來訓練(餵進去的inputs也需要是cuda tensor)
# 把data做成dataset供dataloader取用
train_dataset = TwitterDataset(X=X_train, y=y_train)
val_dataset = TwitterDataset(X=X_val, y=y_val)
# 把data 轉成 batch of tensors
train_loader = torch.utils.data.DataLoader(dataset = train_dataset,
batch_size = batch_size,
shuffle = True,
num_workers = 8)
val_loader = torch.utils.data.DataLoader(dataset = val_dataset,
batch_size = batch_size,
shuffle = False,
num_workers = 8)
print('start training...')
# 開始訓練
training(batch_size, epoch, lr, model_dir, train_loader, val_loader, model, device)
print('done.')
|
12,853 | d4c22e7ff93cd8b5b2c19da34d16c4e7299c88dd | from django import forms
from django.contrib.auth.models import User
from django.forms.extras.widgets import SelectDateWidget
class RegistrationForm(forms.Form):
username = forms.CharField(max_length = 50,
widget = forms.TextInput(attrs={'class':'form-control',
'placeholder':'Username',
'autofocus': 'autofocus'}))
firstname = forms.CharField(max_length = 20,
widget = forms.TextInput(attrs={'class':'form-control',
'placeholder':'First Name'}))
lastname = forms.CharField(max_length = 20,
widget = forms.TextInput(attrs={'class':'form-control',
'placeholder':'Last Name'}))
email = forms.EmailField(max_length = 50,
widget = forms.EmailInput(attrs={'class':'form-control',
'placeholder':'Email'}))
password1 = forms.CharField(max_length = 200,
label = 'Password',
widget = forms.PasswordInput(attrs={'class':'form-control',
'placeholder':'Password'}))
password2 = forms.CharField(max_length = 200,
label = 'Confirm Password',
widget = forms.PasswordInput(attrs={'class':'form-control',
'placeholder':'Confirm Password'}))
def clean(self):
cleaned_data = super(RegistrationForm, self).clean()
password1 = cleaned_data.get('password1')
password2 = cleaned_data.get('password2')
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords did not match.")
return cleaned_data
def clean_username(self):
new_username = self.cleaned_data.get('username')
if User.objects.filter(username = new_username):
raise forms.ValidationError("Username is already taken.")
return new_username
def clean_email(self):
new_email = self.cleaned_data.get('email')
if User.objects.filter(email = new_email):
raise forms.ValidationError("Email is already taken.")
return new_email
class ActivityForm(forms.Form):
title = forms.CharField(label="", max_length = 50,
widget = forms.Textarea(attrs={'class':'form-control',
'placeholder':'Event name',
'rows':'1',
'autofocus': 'autofocus'}))
content = forms.CharField(label="", max_length = 200,
widget = forms.Textarea(attrs={'class':'form-control',
'rows':'2',
'cols':'20',
'placeholder':'Tell people more about the event',
'data-emojiable':'true'}))
event_date = forms.DateField(label="Start Date ",widget = forms.SelectDateWidget())
event_time = forms.TimeField(label="Start Time",widget=forms.TimeInput(attrs={'placeholder':'00:00:00'}))
event_end_date = forms.DateField(label="End Date ",widget = forms.SelectDateWidget())
event_end_time = forms.TimeField(label="End Time",widget=forms.TimeInput(attrs={'placeholder':'00:00:00'}))
lat = forms.FloatField(widget = forms.HiddenInput())
lng = forms.FloatField(widget=forms.HiddenInput())
picture = forms.ImageField(label="Upload Image",required=False, widget=forms.FileInput())
videos = forms.FileField(label="Upload Video", required=False, widget=forms.FileInput())
tag1 = forms.BooleanField(required=False, widget=forms.CheckboxInput(attrs={'data-val':'true', 'value':'true'}), label="Study")
tag2 = forms.BooleanField(required=False, widget=forms.CheckboxInput(attrs={'data-val':'true', 'value':'true'}), label="Music")
tag3 = forms.BooleanField(required=False, widget=forms.CheckboxInput(attrs={'data-val':'true', 'value':'true'}), label="Sports")
|
12,854 | 28021f8a73db4f3d33e21216396a2705e6c023f0 | N,M=map(int,input().split())
even=0
odd=0
for i in range(0,N):
even+=i
for j in range(0,M):
odd+=j
print(odd+even) |
12,855 | d78bc7c085ad64c3628c03c0717dcbae323b68c9 | from django.shortcuts import render
from django.contrib.auth.decorators import login_required
def index(request):
context ={}
return render(request, 'index.html', context)
@login_required
def test_admin(request):
context = {}
return render(request, 'test.html', context) |
12,856 | 7b01dfd98b2bdf532a63860514ec57f1d8eb306d | # this function is what calculates the account that has the most similar rating
# to the account logged in
from file_manip import *
import constants as c
def get_similar(account_name):
# declare variables
filename = 0
sorted_distances = []
distance = 0
distances = []
variant = 0
i = 0
j = 0
# get songs from account file
user_songs = get_songs(account_name)
files = get_filenames()
# this loop will open each account file get the songs, compare it to
# the user's account and then add it to the manhatten distance.
# NOTE: 'filename' is the account name. ex: ben hannah, shannon
variant = get_songs('ben')
for filename in files:
if filename != account_name: # get songs from the accounts that are not itself
# get the songs from what it is comparing the user to
variant = get_songs(filename)
# cycles through all of the users songs
for i in range(len(user_songs)):
# for each song in the user_songs it compares it to each one of the songs
# in the other account (variant)
for j in range(len(variant)):
if user_songs[i][c.TITLE] == variant[j][c.TITLE]:
#Calculate manhattan distance using Mink distance metric
distance += taxicab(int(user_songs[i][c.RATING]),int(variant[j][c.RATING]))
# add the new distance to the 'distances list'
variant = []
distances.append([distance,filename])
distance = 0
# sort the accounts form least to greatest
sorted_distances = sorted(distances, key = lambda pair: pair[0])
# return most similar account
return(sorted_distances[0][1])
|
12,857 | f95bba57463087af0ec777e0affa9031c52ac32b | from LExprVisitor import LExprVisitor
from LExprParser import LExprParser
class CalcVisitor(LExprVisitor):
def visitAdd(self, ctx: LExprParser.AddContext):
return self.visit(ctx.e(0)) + self.visit(ctx.e(1))
def visitMult(self, ctx: LExprParser.MultContext):
return self.visit(ctx.e(0)) * self.visit(ctx.e(1))
def visitInt(seladdf, ctx: LExprParser.IntContext):
return int(ctx.INT().getText())
|
12,858 | 8a333a216fcc6c4c32cafd942586e9306fb4ea25 | # calculating gcd of two numbers in python
def gcd(a,b):
if(b==0):
return a
else:
return gcd(b, a%b)
a,b=map(int,input().split())
print(gcd(a,b))
|
12,859 | bf58db0be13530e9c42c3a6ac4f80939365b356b | import os
import groot
import numpy as np
import PIL
import torch
import torchvision
class GroceryDataset(torch.utils.data.Dataset):
def __init__(
self,
root,
split="train",
string_labels=True,
transforms=None,
adjust_freq=True,
eps=1e-5,
use_all_labels=False,
label_level=-1,
):
self.root = root
self.label_file_path = os.path.join(root, split + ".txt")
self.image_paths = self._load_label_path_file()
self.label_tuples = [p.split("/")[1:-1] for p in self.image_paths]
self.string_labels = string_labels
self.concept_tree = groot.Tree.from_yaml(
open(os.path.join(root, "concept_tree.yaml"), "r").read()
)
self.float_onehot_label_array = np.load(os.path.join(root, "label_arrays.npz"))[
"onehot"
]
self.transforms = transforms
self.adjust_freq = adjust_freq
self.use_all_labels = use_all_labels
self.label_level = label_level
if adjust_freq:
self.class_counts = self._count_labels()
levels = self.concept_tree.levels
self.counts_by_level = [
{cat: self.class_counts[cat] for cat in level} for level in levels
]
name_to_id = self.concept_tree.node_label
freq = np.zeros_like(self.float_onehot_label_array)
level_total_counts = [
sum(v for k, v in level.items()) for level in self.counts_by_level
]
self.freq_by_level = [
{cat: counts[cat] / tot for cat in counts}
for tot, counts in zip(level_total_counts, self.counts_by_level)
]
for d in self.freq_by_level:
for k, v in d.items():
freq[name_to_id[k]] += max(v, eps)
freq = np.clip(a=freq, a_min=eps, a_max=1 / eps)
self.frequency_array = freq
self.freq_adjusted_onehot_array = self.float_onehot_label_array / freq
def _count_labels(self):
labels = self.label_tuples
counts = dict(Grocery=len(labels))
for label in labels:
for lab in label:
if lab in counts:
counts[lab] += 1
else:
counts[lab] = 1
return counts
def _load_label_path_file(self):
with open(self.label_file_path, "r") as f:
paths = [ln.split(",")[0] for ln in f.readlines()]
return paths
def __getitem__(self, idx):
img_path = os.path.join(self.root, self.image_paths[idx])
lab_tups = self.label_tuples[idx]
img = PIL.Image.open(img_path)
if self.transforms:
img = self.transforms(img)
if self.string_labels:
return dict(image=img, string_labels=lab_tups)
else:
int_label = [self.concept_tree.node_label[l] for l in lab_tups]
if self.adjust_freq:
onehot = self.freq_adjusted_onehot_array
else:
onehot = self.float_onehot_label_array
vecs = onehot[int_label] # [n_label, onehot_dim]
if self.use_all_labels:
vec = vecs.mean(0)
else:
vec = vecs[self.label_level]
vec = torch.from_numpy(vec).float()
return dict(image=img, label=vec)
def __len__(self):
return len(self.image_paths)
def collect_statistics(self, stats_lambda, summary_lambda):
return summary_lambda([stats_lambda(item) for item in self])
train_augmentation = torchvision.transforms.Compose(
[
torchvision.transforms.AutoAugment(),
torchvision.transforms.RandomApply(
[
torchvision.transforms.RandomCrop(300, pad_if_needed=True),
torchvision.transforms.RandomCrop(256, pad_if_needed=True),
]
),
torchvision.transforms.Resize((256, 256)),
torchvision.transforms.ToTensor(),
]
)
eval_augmentation = torchvision.transforms.Compose(
[torchvision.transforms.Resize(256), torchvision.transforms.ToTensor()]
)
def pil_image_size(item):
W, H = item["image"].size
return dict(width=W, height=H)
def get_size_stats(sizes):
h_ = []
w_ = []
for d in sizes:
h_.append(d["height"])
w_.append(d["width"])
return {
"max height": max(h_),
"min height": min(h_),
"max width": max(w_),
"min width": min(w_),
"mean height": sum(h_) / len(h_),
"mean width": sum(w_) / len(w_),
}
|
12,860 | a13b60dbb71aad02cb565ca86f9cca2406395502 | from distutils.core import setup
import py2exe
# Dependencies are automatically detected, but it might need fine tuning.
#buildOptions = {"packages": [], "excludes": [""]}
# GUI applications require a different base on Windows (the default is for a
# console application).
#
#expectstub = Target(
# script="expectstub.py"
# )
setup(name="name",
# console based executables
console=['winpexpect_test.py'],
# windows subsystem executables (no console)
windows=[],
# py2exe options
zipfile=None,
# options={"py2exe": py2exe_options},
) |
12,861 | 725f87903c3319614fbe9e54e49129b78cd194f7 | import torch
import torch.utils.data
from .. import utils
import numpy as np
import math
from typing import Dict, Any, List, Optional
class InfiniteSampler(torch.utils.data.Sampler):
def __init__(self, data_source: torch.utils.data.Dataset, replacement=True, seed=None):
super().__init__(data_source)
self.data_source = data_source
self.replacement = replacement
self.seed = utils.seed.get_randstate(seed)
def __iter__(self):
n = len(self.data_source)
if self.replacement:
while True:
yield self.seed.randint(0, n, dtype=np.int64)
else:
i_list = None
pos = n
while True:
if pos >= n:
i_list = self.seed.permutation(n).tolist()
pos = 0
sample = i_list[pos]
pos += 1
yield sample
def __len__(self):
return 0x7FFFFFFF
class FixedRandomSampler(torch.utils.data.Sampler):
def __init__(self, data_source: torch.utils.data.Dataset):
super().__init__(data_source)
self.data_source = data_source
self.order = utils.seed.get_randstate(0xB0C1FA53).permutation(len(self.data_source)).tolist()
def __iter__(self):
for i in self.order:
yield i
def __len__(self):
return len(self.data_source)
class SubsetSampler(torch.utils.data.Sampler):
def __init__(self, data_source: torch.utils.data.Dataset, n_max: int):
super().__init__(data_source)
self.data_source = data_source
self._len = min(len(self.data_source), n_max)
self.order = utils.seed.get_randstate(0xB0C1FA53).choice(len(self.data_source), self._len, replace=False)
def __iter__(self):
for i in self.order:
yield i
def __len__(self):
return self._len
class MultibatchSequentialSampler:
def __init__(self, data_source: torch.utils.data.Dataset, batch_size: int):
self.ds_len = len(data_source)
self.batch_size = batch_size
self.len = self.ds_len // self.batch_size
self.pos = None
def __iter__(self):
if self.pos is None:
self.pos = 0
while self.pos < self.len:
p = self.pos
self.pos += 1
yield [b * self.len + p for b in range(self.batch_size)]
self.pos = None
def __len__(self):
return self.len
def state_dict(self) -> Dict[str, Any]:
return {"pos": self.pos}
def load_state_dict(self, state: Dict[str, Any]):
self.pos = state["pos"]
class BucketedSampler(torch.utils.data.Sampler):
def __init__(self, data_source: torch.utils.data.Dataset, batch_size: int, length_key_name: str = "in_len",
infinite: bool = False, seed: Optional[int] = None, drop_last: bool = False, long_first: bool = False,
random_order: bool = True):
super().__init__(data_source)
self.lens = [data_source[i][length_key_name] for i in range(len(data_source))]
self.batch_size = batch_size
self.seed = utils.seed.get_randstate(seed)
self.infinite = infinite
self.drop_last = drop_last
self.reverse = long_first
self.random_order = random_order
assert (not long_first) or (not self.random_order)
def makebins(self) -> List[List[int]]:
# First shuffle all
order = self.seed.permutation(len(self.lens)).tolist()
if self.drop_last:
order = order[:-(len(order) % self.batch_size)]
# Sort preverses the order of the same-length elements, thus the previous shuffle makes random elements to be
# binned together
order = list(sorted(order, key=lambda i: self.lens[i], reverse=self.reverse))
return [order[i: i + self.batch_size] for i in range(0, len(order), self.batch_size)]
def __iter__(self):
while True:
batches = self.makebins()
t = self.seed.permutation if self.random_order else range
for o in t(len(batches)):
yield batches[o]
if not self.infinite:
break
def __len__(self):
return math.ceil(len(self.lens) / self.batch_size)
|
12,862 | 2dd84cc7c225967a15a898297939e879521d5b79 | """from difference_of_squares import difference, square_of_sum, sum_of_squares
"""
def difference(i):
""" calculate the difference """
return square_of_sum(i) - sum_of_squares(i)
def sum_of_squares(i):
""" sum of squares """
return sum([j**2 for j in range(1, i+1)])
def square_of_sum(i):
""" square the sum """
return sum([j for j in range(1, i+1)])**2
|
12,863 | 435eb0706395e7aa736f6480f71cccebc7bb6bed | import sys, os, time
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import tensorflow as tf
# Import model builder
from model import build_neurosat
# Import tools
from cnf import ensure_datasets
import instance_loader
import itertools
from util import timestamp, memory_usage
from logutil import run_and_log_batch
if __name__ == '__main__':
print( "{timestamp}\t{memory}\tMaking sure ther datasets exits ...".format( timestamp = timestamp(), memory = memory_usage() ) )
ensure_datasets()
if not os.path.isdir( "tmp" ):
os.makedirs( "tmp" )
#end if
epochs = 2**10
d = 128
time_steps = 26
batch_size = 128
batches_per_epoch = 128
early_stopping_window = [ 0 for _ in range(3) ]
early_stopping_threshold = 0.85
# Build model
print( "{timestamp}\t{memory}\tBuilding model ...".format( timestamp = timestamp(), memory = memory_usage() ) )
solver = build_neurosat( d )
# Create batch loader
print( "{timestamp}\t{memory}\tLoading instances ...".format( timestamp = timestamp(), memory = memory_usage() ) )
generator = instance_loader.InstanceLoader( "./instances" )
# If you want to use the entire dataset on each epoch, use:
# batches_per_epoch = len(generator.filenames) // batch_size
test_generator = instance_loader.InstanceLoader( "./test-instances" )
# Create model saver
saver = tf.train.Saver()
# Disallow GPU use
config = tf.ConfigProto(
#device_count = {"GPU":0},
gpu_options = tf.GPUOptions( allow_growth = True ),
)
with tf.Session(config=config) as sess:
# Initialize global variables
print( "{timestamp}\t{memory}\tInitializing global variables ... ".format( timestamp = timestamp(), memory = memory_usage() ) )
sess.run( tf.global_variables_initializer() )
if os.path.exists( "./tmp/neurosat.ckpt" ):
# Restore saved weights
print( "{timestamp}\t{memory}\tRestoring saved model ... ".format( timestamp = timestamp(), memory = memory_usage() ) )
saver.restore(sess, "./tmp/neurosat.ckpt")
#end if
# Run for a number of epochs
print( "{timestamp}\t{memory}\tRunning for {} epochs".format( epochs, timestamp = timestamp(), memory = memory_usage() ) )
for epoch in range( epochs ):
# Save current weights
save_path = saver.save(sess, "./tmp/neurosat.ckpt")
print( "{timestamp}\t{memory}\tMODEL SAVED IN PATH: {save_path}".format( timestamp = timestamp(), memory = memory_usage(), save_path=save_path ) )
if all( [ early_stopping_threshold < v for v in early_stopping_window ] ):
print( "{timestamp}\t{memory}\tEARLY STOPPING because the test accuracy on the last {epochs} epochs were above {threshold:.2f}% accuracy.".format( timestamp = timestamp(), memory = memory_usage(), epochs = len( early_stopping_window ), threshold = early_stopping_threshold * 100 ) )
break
#end if
# Reset training generator and run with a sample of the training instances
print( "{timestamp}\t{memory}\tTRAINING SET BEGIN".format( timestamp = timestamp(), memory = memory_usage() ) )
generator.reset()
epoch_loss = 0.0
epoch_accuracy = 0.0
for b, batch in itertools.islice( enumerate( generator.get_batches( batch_size ) ), batches_per_epoch ):
l, a, p = run_and_log_batch( sess, solver, epoch, b, batch, time_steps )
epoch_loss += l
epoch_accuracy += a
#end for
epoch_loss /= batches_per_epoch
epoch_accuracy /= batches_per_epoch
print( "{timestamp}\t{memory}\tTRAINING SET END Mean loss: {loss:.4f} Mean Accuracy = {accuracy:.4f}".format(
loss = epoch_loss,
accuracy = epoch_accuracy,
timestamp = timestamp(),
memory = memory_usage()
)
)
# Summarize results and print epoch summary
test_loss = 0.0
test_accuracy = 0.0
test_batches = 0
# Reset test generator and run with the test instances
print( "{timestamp}\t{memory}\tTEST SET BEGIN".format( timestamp = timestamp(), memory = memory_usage() ) )
test_generator.reset()
for b, batch in enumerate( test_generator.get_batches( batch_size ) ):
l, a, p = run_and_log_batch( sess, solver, epoch, b, batch, time_steps, train = False )
test_loss += l
test_accuracy += a
test_batches += 1
#end for
# Summarize results and print test summary
test_loss /= test_batches
test_accuracy /= test_batches
print( "{timestamp}\t{memory}\tTEST SET END Mean loss: {loss:.4f} Mean Accuracy = {accuracy:.4f}".format(
loss = test_loss,
accuracy = test_accuracy,
timestamp = timestamp(),
memory = memory_usage()
)
)
early_stopping_window = early_stopping_window[1:] + [ test_accuracy ]
#end for
#end Session
|
12,864 | a6e329e74c5b0145d753d3ca3ff8c2a600fdf1d3 | from django.conf.urls import url
from . import views
app_name = 'annotations'
urlpatterns = [
url(r'^export/(\d+)/create/$', views.create_exportformat, name='create_exportformat'),
url(r'^export/(\d+)/auth/$', views.export_auth, name='export_auth'),
url(r'^export/(\d+)/download/$', views.download_export, name='download_export'),
url(r'^export/(\d+)/$', views.create_export, name='create_export'),
url(r'^manage/annotation/(\d+)/$', views.manage_annotations, name='manage_annotations'),
url(r'^(\d+)/delete/$', views.delete_annotation, name='delete_annotation'),
url(r'^(\d+)/$', views.annotate, name='annotate'),
url(r'^(\d+)/edit/save/$', views.edit_annotation_save, name='edit_annotation_save'),
url(r'^(\d+)/edit/$', views.edit_annotation, name='edit_annotation'),
url(r'^(\d+)/verify/$', views.verify, name='verify'),
url(r'^api/annotation/create/$', views.create_annotation, name='create_annotation'),
url(r'^api/annotation/delete/$', views.api_delete_annotation, name='delete_annotation'),
url(r'^api/annotation/load/$', views.load_annotations, name='load_annotations'),
url(r'^api/annotation/update/$', views.update_annotation, name='update_annotations'),
]
|
12,865 | e5c85ac93e1c87fe37cc08346fd0d79095560b16 | import sqlite3
import logging
import time
__version__ = "0.1.0"
initial_sql = """CREATE TABLE IF NOT EXISTS log(
Id INTEGER PRIMARY KEY AUTOINCREMENT,
TimeStamp TEXT,
Source TEXT,
LogLevel INT,
LogLevelName TEXT,
Message TEXT,
Module TEXT,
FuncName TEXT,
LineNo INT,
Exception TEXT,
Process INT,
Thread TEXT,
ThreadName TEXT
)"""
insertion_sql = """INSERT INTO log(
TimeStamp,
Source,
LogLevel,
LogLevelName,
Message,
Module,
FuncName,
LineNo,
Exception,
Process,
Thread,
ThreadName
)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);
"""
class SQLiteHandler(logging.Handler):
"""
Thread-safe logging handler for SQLite.
"""
def __init__(self, db='app.db'):
logging.Handler.__init__(self)
self.db = db
with sqlite3.connect(self.db) as conn:
conn.execute(initial_sql)
conn.commit()
return
def format_time(self, record):
"""
Create a time stamp
"""
record.dbtime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(record.created))
return
@staticmethod
def prepared_params(record):
d = record.__dict__
return (
d['dbtime'],
d['name'],
d['levelno'],
d['levelname'],
d['msg'] % d['args'],
d['module'],
d['funcName'],
d['lineno'],
d['exc_text'],
d['process'],
str(d['thread']),
d['threadName']
)
def emit(self, record):
self.format(record)
self.format_time(record)
if record.exc_info: # for exceptions
record.exc_text = logging._defaultFormatter.formatException(record.exc_info)
else:
record.exc_text = ""
# Insert the log record
params = self.prepared_params(record)
with sqlite3.connect(self.db) as conn:
conn.execute(insertion_sql, params)
conn.commit() # not efficient, but hopefully thread-safe
return |
12,866 | a09025429e0d60a6ea00153469cb498e913feb29 | # from flask import render_template
# from myapp import application, db
#
#
# @application.errorhandler(404)
# def not_found_error(error):
# return render_template('errors/404.html'), 404
#
#
# @application.errorhandler(500)
# def internal_error(error):
# db.session.rollback()
# return render_template('errors/500.html'), 500
|
12,867 | e04116e697f9aaaef9d2d101f0492d67f02347c4 | import parsing
parsing.parser.add_option("--timed-repeat", metavar="SECONDS", type="float", help="Repeat each test, for a limited time")
def process_options(options):
if options.timed_repeat is not None:
from testoob.running import fixture_decorators
parsing.kwargs["fixture_decorators"].append(
fixture_decorators.get_timed_fixture(options.timed_repeat))
parsing.option_processors.append(process_options)
|
12,868 | 1e7ced31fcb03df750e30ddb7aa03f135f2075b7 | from django.conf.urls import patterns, include, url
from rest_framework import routers
from api import views
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'evemonline.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url('^apikeyinfo/$', 'api.views.api_key_info_list'),
url('^apikeyinfo/(?P<api_token_id>[0-9]+)/$', 'api.views.api_key_info_retrieve'),
url('^apitoken/(?P<user_id>[0-9]+)$', 'api.views.api_token_list'),
url(r'^admin/', include(admin.site.urls)),
)
|
12,869 | 21510265a27134984ca4dd70be2cc51b15bf4264 | import numpy as np
from basemodel import BaseModel
from sklearn import svm, cross_validation
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.neighbors import KNeighborsClassifier
class SVM_QUANT(BaseModel):
def __init__(self, hyperParams=None):
self.model = svm.SVC()
class DT_Quant(BaseModel):
def __init__(self, hyperParams=None):
self.model = DecisionTreeClassifier(random_state=4)
class RandomForest(BaseModel):
def __init__(self, hyperParams=None):
self.model = RandomForestClassifier(n_estimators=50)
class Adaboost(BaseModel):
def __init__(self, hyperParams=None):
self.model = AdaBoostClassifier(base_estimator=RandomForestClassifier(100), n_estimators=100)
class Multinomial(BaseModel):
def __init__(self, hyperParams=None):
self.model = KNeighborsClassifier()
class ExtraTrees(BaseModel):
def __init__(self, hyperParams=None):
self.model = ExtraTreesClassifier(n_estimators=200)
|
12,870 | a95dd29ec685fc5b7146c38593e46fb9b4a4dd17 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
@Author: Fabian Fey
"""
import os
from logzero import logger
COLORPATH = '/etc/regolith/styles/costum-theme/color'
def ChangeColors(colorsList):
colors = [
"#define color_base03 " + colorsList[0],
"#define color_base02 " + colorsList[1],
"#define color_base01 " + colorsList[2],
"#define color_base00 " + colorsList[3],
"#define color_base0 " + colorsList[4],
"#define color_base1 #fffb00",
"#define color_base2 " + colorsList[5],
"#define color_base3 " + colorsList[6],
"#define color_yellow #fffb00",
"#define color_orange #fffb00",
"#define color_red #fffb00",
"#define color_magenta #fffb00",
"#define color_violet #fffb00",
"#define color_blue #fffb00",
"#define color_cyan #fffb00",
"#define color_green #fffb00",
]
with open(COLORPATH, 'w') as colorFile:
logger.info("Updating color file.")
for line in colors:
colorFile.write(line + "\n")
def SetWallpaper(path, file):
wallpaper = path + "/" + file
logger.info("Updating wallpaper.")
command = 'feh --bg-max "' + wallpaper + '"'
os.system(command)
def ResetI3():
logger.info("Resetting i3")
os.system('xrdb -merge ~/.Xresources-regolith && i3 reload')
|
12,871 | 9de009b51146daed062c267950754576eb557833 | import torch
import torch.autograd as autograd
import torch.optim as optim
import numpy as np
import matplotlib as mpl
from datetime import datetime
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import shutil, sys
import mutil
import mnist_model as model
import data_prepare
out_dir = './out/mnist_2000_no_decay{}'.format(datetime.now())
out_dir = out_dir.replace(" ", "_")
print(out_dir)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
shutil.copyfile(sys.argv[0], out_dir + '/training_script.py')
shutil.copyfile("./toy_model.py", out_dir + "/toy_model.py")
shutil.copyfile("./data_prepare.py", out_dir + "/data_prepare.py")
shutil.copyfile("./mnist_10.py", out_dir+ "/mnist_10.py")
# shutil.copyfile("./gan_reproduce.py", out_dir+ "/gan_reproduce.py")
shutil.copyfile("./gan.py", out_dir+ "/gan.py")
sys.stdout = mutil.Logger(out_dir)
gpu = 2
torch.cuda.set_device(gpu)
mb_size = 100 # mini-batch_size
# mode_num = 2
sample_point = 10000
# distance = 10
# start_points = np.array([[0,0],[0,1],[0,2]])
# end_points = np.array([[1,0],[1,1],[1,2]])
start_points = np.array([[0,0]])
end_points = np.array([[1,0]])
Z_dim = 100
X_dim = 2
h_dim = 128
# data = data_prepare.Straight_Line(90, start_points, end_points, type=1)
# data = data_prepare.Data_2D_Circle(mb_size,R = 2)
data = data_prepare.Mnist_10(mb_size, dataset_size = 1000)
tmp_data = data.batch_next(10*10, shuffle=False)
mutil.save_picture_numpy(tmp_data,"./test.png")
#itmp_data = data.batch_next_fixed()
#mutil.save_picture_numpy(tmp_data,out_dir+"./test.png")
#z_draw = Variable(torch.randn(mb_size, Z_dim)).cuda()
# c_dim = mode_num * mode_num
cnt = 0
num = '0'
# else:
# print("you have already creat one.")
# exit(1)
grid_num = 100
G = model.G_Net_conv(in_channel=Z_dim).cuda()
D = model.D_Net_conv(inchannel=1).cuda()
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
# elif classname.find('BatchNorm') != -1:
# m.weight.data.normal_(1.0, 0.02)
# m.bias.data.fill_(0)
G.apply(weights_init)
# G.load_state_dict(torch.load('./out_conv_part/G_20000.model'))
D.apply(weights_init)
# D.load_state_dict(torch.load('./out_conv_part/D_20000.model'))
# G_fake = model.Direct_Net(X_dim+c_dim, 1, h_dim).cuda()
# G.apply(model.weights_init)
# D.apply(model.weights_init)
""" ===================== TRAINING ======================== """
lr = 1e-4
G_solver = optim.Adam(G.parameters(), lr=1e-4,betas=[0.5,0.999])
D_solver = optim.Adam(D.parameters(), lr=1e-4,betas=[0.5,0.999])
ones_label = Variable(torch.ones(mb_size)).cuda()
zeros_label = Variable(torch.zeros(mb_size)).cuda()
criterion = nn.BCELoss()
for it in range(100000):
# Sample data
z = Variable(torch.randn(mb_size, Z_dim,1,1)).cuda()
X = data.batch_next(mb_size, shuffle=False) # with label
X = Variable(torch.from_numpy(X.astype('float32'))).cuda()
# c = Variable(torch.from_numpy(mutil.label_num2vec(c.astype('int')).astype('float32'))).cuda()
D_solver.zero_grad()
# Dicriminator forward-loss-backward-update
G_sample = G(z).detach()
D_real = D(X)
D_fake = D(G_sample)
D_loss_real = criterion(D_real, ones_label)
D_loss_fake = criterion(D_fake, zeros_label)
D_loss = D_loss_real + D_loss_fake
D_loss.backward()
D_solver.step()
# Housekeeping - reset gradient
D.zero_grad()
G.zero_grad()
# Generator forward-loss-backward-update
z = Variable(torch.randn(mb_size, Z_dim,1,1).cuda(), requires_grad=True)
# print(c.cpu().data.numpy().shape)
G_sample = G(z)
# G_sample.register_hook(save_grad('G'))
# G_sample.requires_grad= True
D_fake = D(G_sample)
G_loss = criterion(D_fake, ones_label)
G_loss.backward()
G_solver.step()
# Housekeeping - reset gradient
D.zero_grad()
G.zero_grad()
# if it % 5000 == 0:
# for param_group in G_solver.param_groups:
# param_group['lr'] = param_group['lr'] * 0.8
# for param_group in D_solver.param_groups:
# param_group['lr'] = param_group['lr'] * 0.5
# Print and plot every now and then
if it % 500 == 0:
fig, ax = plt.subplots()
print('Iter-{}; D_accuracy_real/fake: {}/{}; G_accuracy: {}'.format(it, np.round(np.exp(-D_loss_real.data.tolist()[0]), 5),
np.round(1 - np.exp(-D_loss_fake.data.tolist()[0]), 5), np.round(np.exp(-G_loss.data.tolist()[0]), 5)))
X = X.cpu().data.numpy()
G_sample = G(z)
mutil.save_picture(G_sample,'{}/hehe_{}.png'.format(out_dir, str(cnt).zfill(3)),column=10)
cnt += 1
# test_command = os.system("convert -quality 100 -delay 20 {}/*.png {}/video.mp4".format(out_dir, out_dir))
torch.save(G.state_dict(), "{}/G.model".format(out_dir))
torch.save(D.state_dict(), "{}/D.model".format(out_dir))
|
12,872 | 247e7ec49f8cda539d2189ca06573638098b3f4f | from LAFitness import LAFitness
from Member import Member
from datetime import datetime
if __name__ == "__main__":
# load from config file...
# parameters
monthlyFee = 25
initFee = 25
# main
laFitness = LAFitness(monthlyFee, initFee)
LDai= Member("Dai Lian", datetime(2016,12,15))
LDai.pay(150)
LDai.pay(150)
KHuang = Member("Huang Kun", datetime(2017,5,13))
KHuang.pay(75)
JWang = Member("Wang Jinghao", datetime(2017,7,13))
JWang.pay(150)
laFitness.addMember(LDai)
laFitness.addMember(KHuang)
laFitness.addMember(JWang)
laFitness.showAllStatics()
|
12,873 | a1b37349c0daa801a705e632a97e5fb1f12dce4e | # Cats with hats
theCats = {}
loops = 0
#create 100 hatless cats in theCats dictionary
for i in range(1, 1001):
theCats["Cat " + str(i)] = False
for i in range(1, 1001):
loops += 1
count = 0
for cat, hats in theCats.items():
count += 1
if count % loops == 0:
if theCats[cat]:
theCats[cat] = False
else:
theCats[cat] = True
# print results
for cat, hats in theCats.items():
if theCats[cat]:
print(f"{cat} has a hat!")
else:
print(f"{cat} is hatless!")
|
12,874 | a9538fa94cd31f9c574dcc7e79ba7138bd806014 | def dodawanie(a, b):
return(a+b)
def get_info():
print("Program kalkulator. Autor: Kasia")
get_info()
try:
l1 = int(input())
l2 = int(input())
print(dodawanie(l1, l2))
except:
print("Program zakończył się nieoczekiwanym błędem")
print("Możesz go zgłosic pod adresem autor.pl")
|
12,875 | 9bad1a06a5cc083d12cd42d9b67d35a279e7e5e5 | # -*- encoding=utf8 -*-
__author__ = "xsl"
import pytest
from tools.tool import *
from tools.config import *
from airtest.core.api import *
from poco.drivers.android.uiautomation import AndroidUiautomationPoco
poco = AndroidUiautomationPoco(use_airtest_input=True, screenshot_each_action=False)
auto_setup(__file__, logdir=air_log_path, devices=devices)
def test_emoji_wifi(ini):
# 测试用例:连接emoji表情WiFi
poco("com.fdage.eight:id/iv_my").click()
sleep(1.0)
if poco("com.fdage.eight:id/tv_camera_storage", text="未连接").exists():
logging.info("相机未连接")
logging.info("连接相机wifi")
poco("com.fdage.eight:id/tv_battery").click()
sleep(1.0)
poco(text=camera_name).click()
sleep(3.0)
if poco("android:id/button1").exists():
poco("android:id/button1").click()
else:
poco("android:id/button3").click()
sleep(10.0)
poco("向上导航").click()
sleep(3.0)
else:
logging.info("相机已连接")
poco("com.fdage.eight:id/tv_network").click()
sleep(1.0)
poco("com.fdage.eight:id/tv_wifi_name", text="/xushilin😀").wait_for_appearance(60)
poco(text="/xushilin😀").click()
sleep(2.0)
if poco("com.fdage.eight:id/et_input", text="请输入密码").exists():
poco("com.fdage.eight:id/et_input").set_text()
poco("com.fdage.eight:id/et_input").set_text("Aa111111")
sleep(5.0)
else:
sleep(5.0)
poco("com.fdage.eight:id/tv_network_desc", text="已连接到/xushilin😀").wait_for_appearance(60)
logging.info("成功连接emoji表情WiFi!")
poco("com.fdage.eight:id/iv_left")
if __name__ == '__main__':
pytest.main() |
12,876 | 3fd0383b6aef00ca2c300da292c545dc3b1d64c1 | import matplotlib.pyplot as plt
import numpy as np
import h5py
import caesar
import os
import sys
from plotting_methods import *
plt.rc('text', usetex=True)
plt.rc('font', family='serif', size=18)
palette_name = 'tol'
min_mass = 9.
max_mass = 12.
dm = 0.25 # dex
snap = '151'
model = sys.argv[1]
wind = sys.argv[2]
if model == 'm100n1024':
boxsize = 100000.
elif model == 'm50n512':
boxsize = 50000.
elif model == 'm25n512':
boxsize = 25000.
massdata_dir = '/disk01/sapple/cgm/budgets/data/'+model+'_'+wind+'_'+snap+'/'
savedir = '/disk01/sapple/cgm/budgets/plots/'
all_phases = ['Cool CGM (T < Tphoto)', 'Warm CGM (Tphoto < T < 0.5Tvir)', 'Hot CGM (T > 0.5Tvir)',
'Cool CGM (T < 10^5)', 'Warm CGM (10^5 < T < 10^6)', 'Hot CGM (T > 10^6)',
'ISM', 'Wind', 'Dust', 'Stars', 'Dark matter', 'Total baryons']
plot_phases = ['Hot CGM (T > 0.5Tvir)', 'Warm CGM (Tphoto < T < 0.5Tvir)', 'Cool CGM (T < Tphoto)',
'Wind', 'Dust', 'ISM', 'Stars']
plot_phases_labels = [r'Hot CGM $(T > 0.5T_{\rm vir})$', r'Warm CGM $(T_{\rm photo} < T < 0.5T_{\rm vir})$',
r'Cool CGM $(T < T_{\rm photo})$', 'Wind', 'Dust', 'ISM', 'Stars']
colours = ['m', 'b', 'c', 'g', 'tab:orange', 'tab:pink', 'r']
colours = get_cb_colours(palette_name)[::-1]
stats = ['median', 'percentile_25_75', 'std', 'cosmic_median', 'cosmic_std']
mass_stats_file = massdata_dir+model+'_'+wind+'_'+snap+'_mass_budget_stats.h5'
if os.path.isfile(mass_stats_file):
mass_stats = read_phase_stats(mass_stats_file, plot_phases, stats)
else:
caesarfile = '/home/rad/data/'+model+'/'+wind+'/Groups/'+model+'_'+snap+'.hdf5'
sim = caesar.quick_load(caesarfile)
quench = -1.8 + 0.3*sim.simulation.redshift
central = np.array([i.central for i in sim.galaxies])
gal_sm = np.array([i.masses['stellar'].in_units('Msun') for i in sim.galaxies])[central]
gal_sfr = np.array([i.sfr.in_units('Msun/Gyr') for i in sim.galaxies])[central]
gal_ssfr = np.log10(gal_sfr / gal_sm)
gal_pos = np.array([i.pos.in_units('kpc/h') for i in sim.galaxies])[central]
# get the mass budget data:
mass_budget = read_phases(massdata_dir+'mass_budget.h5', all_phases)
mass_stats = {}
mass_bins = get_bin_edges(min_mass, max_mass, dm)
mass_stats['smass_bins'] = get_bin_middle(np.append(mass_bins, mass_bins[-1] + dm))
mask = np.array([True] * len(gal_sm))
mass_stats['all'] = get_phase_stats(gal_sm, gal_pos, mass_budget, mask, all_phases, mass_bins, boxsize, logresults=True)
mask = gal_ssfr > quench
mass_stats['star_forming'] = get_phase_stats(gal_sm, gal_pos, mass_budget, mask, all_phases, mass_bins, boxsize, logresults=True)
mask = gal_ssfr < quench
mass_stats['quenched'] = get_phase_stats(gal_sm, gal_pos, mass_budget, mask, all_phases, mass_bins, boxsize, logresults=True)
write_phase_stats(mass_stats_file, mass_stats, all_phases, stats)
fig, ax = plt.subplots(1, 3, figsize=(15, 6), sharey=True, sharex=True)
ax = ax.flatten()
for i, phase in enumerate(plot_phases):
ax[0].errorbar(mass_stats['smass_bins'], mass_stats['all'][phase]['median'], yerr=mass_stats['all'][phase]['percentile_25_75'],
capsize=3, color=colours[i], label=plot_phases_labels[i])
for i, phase in enumerate(plot_phases):
ax[1].errorbar(mass_stats['smass_bins'], mass_stats['star_forming'][phase]['median'], yerr=mass_stats['star_forming'][phase]['percentile_25_75'],
capsize=3, color=colours[i], label=plot_phases_labels[i])
for i, phase in enumerate(plot_phases):
ax[2].errorbar(mass_stats['smass_bins'], mass_stats['quenched'][phase]['median'], yerr=mass_stats['quenched'][phase]['percentile_25_75'],
capsize=3, color=colours[i], label=plot_phases_labels[i])
ann_labels = ['All', 'Star forming', 'Quenched']
ann_x = [0.88, 0.63, 0.7]
for i in range(3):
ax[i].annotate(ann_labels[i], xy=(ann_x[i], 0.05), xycoords='axes fraction',size=18,
bbox=dict(boxstyle='round', fc='white'))
for i in range(3):
ax[i].set_xlim(min_mass, mass_stats['smass_bins'][-1]+0.5*dm)
ax[i].set_ylim(6.5, 14.5)
ax[i].set_xlabel(r'$\textrm{log} (M_{\star} / \textrm{M}_{\odot})$')
ax[0].set_ylabel(r'$\textrm{log} (M / \textrm{M}_{\odot})$')
ax[0].legend(loc=2, fontsize=14, framealpha=0.)
fig.subplots_adjust(wspace=0.)
plt.savefig(savedir+model+'_'+wind+'_'+snap+'_mass_actual.png', bbox_inches = 'tight')
plt.clf()
|
12,877 | 03b411831dd18ff1939b7a7470514edf2f43ec66 | def get1(nums):
m=len(nums)
for i in range(m):
if i not in nums:
return i
l1=[3,0,1]
print(get1(l1)) |
12,878 | 0991db10ec09ff1006dd959adb3911c943d913f7 | """
Gif.py
David Merrell
2018-06-28
This module defines the Gif class.
Gif objects correspond to whole GIF files;
i.e., for each GIF you want to make, a Gif object
must be constructed and acted upon.
"""
import matplotlib.pyplot as plt
import subprocess as sp
import math
import os
class Gif:
"""
Each instance of this class represents a GIF file.
A Gif object has methods for building individual frames
and then composing them into a GIF file.
The operating principle is this: each frame of the GIF corresponds to
a matplotlib Figure object. This Gif class just keeps things tidy
while you're making a GIF.
"""
def __init__(self, filename, width, height,
max_frames=1000, stride=1,
frame_suff=".png", **kwargs):
"""
Constructor.
:param filename: a filename/path for the GIF we're making
:param width: the width of the GIF, in inches
:param height: the height of the GIF, in inches
Possible keyword arguments:
:param max_frames: the maximum number of frames the GIF may contain.
:param stride: an integer. Every stride-th frame is created;
the others are skipped. Initialized at 1 (i.e., all frames
are created).
:param frame_suff: the kind of image file we want our frames to be
rendered as. Default is .png.
Other than these, the keyword arguments available to Matplotlib
Figure objects are appropriate for this constructor.
"""
# Set basic, necessary attributes
self.filename = filename
self.width = width
self.height = height
self.max_frames = max_frames
# Store the keyword arguments for later,
# when we construct frames
self.kwargs = kwargs
# This is the name of a temporary directory where we'll
# keep intermediate stuff (e.g. frame images)
self.tmp_dir = "__mgl_tmp__"
if not os.path.exists(self.tmp_dir):
res = sp.call(["mkdir", self.tmp_dir])
if res != 0:
print("Error: unable to make temporary directory at {}".format(self.tmp_dir))
raise OSError
else: # If the temporary directory already exists, make sure it's empty
sp.call("rm {}".format(os.path.join(self.tmp_dir,"*")), shell=True)
# Set some other attributes that allow this class
# to do its job
self.stride = stride
self.file_basename = os.path.basename(self.filename)
self.tmp_prefix = os.path.join(self.tmp_dir,self.file_basename.split('.')[0])
self.tmp_suffix = frame_suff
self.frame_count = 0 # keep track of the number of frames
self.in_scope = False # are we currently making a frame?
self.current_frame = None # This will store the figure we are currently building
return
def start_frame(self):
"""
Indicates that we are beginning a new frame for the GIF.
A new Figure object is created, using specifications provided to the
Gif's constructor.
Note that you are constrained to make one frame at a time---for every
start_frame, there must be a end_frame without another start_frame
in between.
:return: fig, a Matplotlib figure object
"""
# Check whether we're supposed to make a frame on this iteration:
if self.frame_count % self.stride != 0:
return
# Check whether we're already making a frame.
if self.in_scope:
print("The Gif object for {} has encountered 'start_frame' twice\
without an intervening 'end_frame'".format(self.filename))
raise SyntaxError
# Construct a new figure
fig = plt.figure(figsize=(self.width,self.height), **(self.kwargs))
self.current_frame = fig
# Set the "in_scope" member True
self.in_scope = True
return self.current_frame
def end_frame(self, **kwargs):
"""
Render, save, and close this frame.
Keyword arguments: all of those available to the
figure.savefig(...) method.
:return: nothing
"""
# Check whether we're supposed to make a frame on this iteration:
if self.frame_count % self.stride != 0:
self.frame_count += 1
return
# Check whether we're still making another frame
if not self.in_scope:
print("The Gif object for {} has encountered 'end_frame' twice\
without an intervening 'start_frame'".format(self.filename))
raise SyntaxError
# Save the frame to the temporary directory
count_width = str(int(math.log10(self.max_frames) + 1))
label = "{:0>"+count_width+"d}"
label = label.format(self.frame_count)
file_path = "{}_{}{}".format(self.tmp_prefix, label, self.tmp_suffix)
self.current_frame.savefig(file_path,**kwargs)
# Close the figure
plt.close(self.current_frame)
# Update some relevant attributes
self.current_frame = None
self.in_scope = False
self.frame_count += 1
return
def close(self):
"""
Call this when all the desired frames have been created.
It creates the GIF and cleans up all temporary files
"""
sp.call(["convert", "{}_*".format(self.tmp_prefix),
self.filename])
sp.call("rm {}_*".format(self.tmp_prefix), shell=True)
sp.call(["rmdir", self.tmp_dir])
|
12,879 | 60d404dc38ecef65f8df6994ba9c27866463de92 | # -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod, abstractproperty
class AbstractClient(object):
__metaclass__ = ABCMeta
@abstractmethod
def findOrgStructureByAddress(self):
pass
@abstractmethod
def getScheduleInfo(self):
pass
@abstractmethod
def getPatientQueue(self):
pass
@abstractmethod
def getPatientInfo(self):
pass
@abstractmethod
def getWorkTimeAndStatus(self):
pass
@abstractmethod
def getWorkTimeAndStatus(self):
pass
@abstractmethod
def enqueue(self):
pass |
12,880 | cbc14dc91b7b4cb71110d0c9a65fa645cd874113 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import re
import csv
from bs4 import BeautifulSoup
if len(sys.argv) <= 1:
sys.exit()
cols = ["id_row","return","name","city","prov","postalcode"]
cw = csv.DictWriter(sys.stdout, cols)
f = open(sys.argv[1], "r")
intext = f.read()
intext = re.sub(r" ", " ", intext)
soup = BeautifulSoup(intext)
r = dict()
m = re.search(r"(\d+)", sys.argv[1])
if m is not None:
r["id_row"] = m.group(1)
#r["link"] = soup.find("form")["action"]
link = soup.find("form")["action"]
m = re.search(r"return=(\d)", link)
if m is not None:
r["return"] = m.group(1)
r["name"] = soup.find(id="lblFullName").string
r["city"] = soup.find(id="lblCity").string
r["prov"] = soup.find(id="lblProvince").string
r["postalcode"] = soup.find(id="lblPostalCode").string
cw.writerow(r)
|
12,881 | a7aaee3dd664cbdb877d1b9b3e4a74891c1f9ba4 | from app import db
from sqlalchemy.orm import relationship
from sqlalchemy_utils.types import PasswordType
from sqlalchemy_searchable import Searchable, SearchQueryMixin
from sqlalchemy_utils.types import TSVectorType
from flask.ext.sqlalchemy import SQLAlchemy, BaseQuery
from wtforms_alchemy import ModelForm
ROLE_USER = 0
ROLE_ADMIN = 1
class User(db.Model):
__tablename__ = "user"
id = db.Column(db.Integer, autoincrement=True, primary_key=True)
nickname = db.Column(db.String(32), index=True,
nullable=False, unique=True)
email = db.Column(db.String(120), index=True, nullable=False, unique=True)
password = db.Column(PasswordType(schemes=['pbkdf2_sha512']),
nullable=False) # Storing encrypted password
role = db.Column(db.SmallInteger, default=ROLE_USER)
posts = relationship("Post", backref="author")
def __repr__(self):
return '<Id %r, Nickname %r, Email %r, Admin %r, posts %r >' % \
(self.id, self.nickname, self.email, self.role, self.posts)
def is_authenticated(self):
# Methods for user managment in Flask-login
# Returns True if the user is authenticated, i.e. they have provided valid
# credentials.
#(Only authenticated users will fulfill the criteria of login_required.)
# TODO
return True
def is_active(self):
# Returns True if this is an active user - in addition to being
# authenticated, they also have activated their account, not been suspended
#, or any condition your application has for rejecting an account.
# Inactive accounts may not log in (without being forced of course).
# TODO
return True
def is_anonymous(self):
# Returns True if this is an anonymous user.
#(Actual users should return False instead.)
return False
def get_id(self):
# Returns a unicode that uniquely identifies this user, and can be used to
# load the user from the user_loader callback. Note that this must be a uni
# code - if the ID is natively an int or some other type, you will need to
# convert it to unicode.
return unicode(self.id)
association_table = db.Table('association',
db.Column('post_id', db.Integer,
db.ForeignKey('post.id')),
db.Column('tag_id', db.Integer,
db.ForeignKey('tag.id')))
class PostQuery(BaseQuery, SearchQueryMixin):
pass
class Post(db.Model, Searchable):
query_class = PostQuery
__tablename__ = "post"
__searchable_columns__ = ['title', 'text']
__search_options__ = {
'catalog': 'pg_catalog.english'
}
id = db.Column(db.Integer, autoincrement=True, primary_key=True)
title = db.Column(db.String(128), nullable=False)
text = db.Column(db.Text, nullable=False)
author_id = db.Column(db.Integer, db.ForeignKey('user.id'))
tags = relationship("Tag", secondary=association_table, backref="posts")
timestamp = db.Column(db.DateTime)
# search_vector is the default name of the var vector in SQLalchemy
search_vector = db.Column(TSVectorType)
def __repr__(self):
return '<Id %r, Title %r, Author %r, tags %r> ' % \
(self.id, self.title, self.author, self.tags)
class Tag(db.Model):
__tablename__ = "tag"
id = db.Column(db.Integer, autoincrement=True, primary_key=True)
name = db.Column(db.String(32), unique=True, nullable=False)
def __repr__(self):
return self.name
|
12,882 | be42717a8bd736378c5ac3921ce701fdc92ff683 | # snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
# snippet-sourcedescription:[deeplens_inference_lambda.py demonstrates how to create an inference Lambda function on an AWS DeepLens model.]
# snippet-service:[deeplens]
# snippet-keyword:[AWS DeepLens]
# snippet-keyword:[Python]
# snippet-sourcesyntax:[python]
# snippet-sourcesyntax:[python]
# snippet-keyword:[Code Sample]
# snippet-sourcetype:[full-example]
# snippet-sourcedate:[2019-01-07]
# snippet-sourceauthor:[AWS]
# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# snippet-start:[deeplens.python.deeplens_inference_lambda.import]
from threading import Thread, Event
import os
import json
import numpy as np
import awscam
import cv2
import greengrasssdk
# snippet-end:[deeplens.python.deeplens_inference_lambda.import]
#snippet-start:[deeplens.python.deeplens_inference_lambda.lambda_handler]
def lambda_handler(event, context):
"""Empty entry point to the Lambda function invoked from the edge."""
return
#snippet-end:[deeplens.python.deeplens_inference_lambda.lambda_handler]
# snippet-start:[deeplens.python.deeplens_inference_lambda.class_LocalDisplay]
class LocalDisplay(Thread):
""" Class for facilitating the local display of inference results
(as images). The class is designed to run on its own thread. In
particular the class dumps the inference results into a FIFO
located in the tmp directory (which lambda has access to). The
results can be rendered using mplayer by typing:
mplayer -demuxer lavf -lavfdopts format=mjpeg:probesize=32 /tmp/results.mjpeg
"""
def __init__(self, resolution):
""" resolution - Desired resolution of the project stream """
# Initialize the base class, so that the object can run on its own
# thread.
super(LocalDisplay, self).__init__()
# List of valid resolutions
RESOLUTION = {'1080p' : (1920, 1080), '720p' : (1280, 720), '480p' : (858, 480)}
if resolution not in RESOLUTION:
raise Exception("Invalid resolution")
self.resolution = RESOLUTION[resolution]
# Initialize the default image to be a white canvas. Clients
# will update the image when ready.
self.frame = cv2.imencode('.jpg', 255*np.ones([640, 480, 3]))[1]
self.stop_request = Event()
def run(self):
""" Overridden method that continually dumps images to the desired
FIFO file.
"""
# Path to the FIFO file. The lambda only has permissions to the tmp
# directory. Pointing to a FIFO file in another directory
# will cause the lambda to crash.
result_path = '/tmp/results.mjpeg'
# Create the FIFO file if it doesn't exist.
if not os.path.exists(result_path):
os.mkfifo(result_path)
# This call will block until a consumer is available
with open(result_path, 'w') as fifo_file:
while not self.stop_request.isSet():
try:
# Write the data to the FIFO file. This call will block
# meaning the code will come to a halt here until a consumer
# is available.
fifo_file.write(self.frame.tobytes())
except IOError:
continue
def set_frame_data(self, frame):
""" Method updates the image data. This currently encodes the
numpy array to jpg but can be modified to support other encodings.
frame - Numpy array containing the image data of the next frame
in the project stream.
"""
ret, jpeg = cv2.imencode('.jpg', cv2.resize(frame, self.resolution))
if not ret:
raise Exception('Failed to set frame data')
self.frame = jpeg
def join(self):
self.stop_request.set()
# snippet-end:[deeplens.python.deeplens_inference_lambda.class_LocalDisplay]
# snippet-start:[deeplens.python.deeplens_inference_lambda.inference_loop]
def infinite_infer_run():
""" Run the DeepLens inference loop frame by frame"""
try:
# This cat-dog model is implemented as binary classifier, since the number
# of labels is small, create a dictionary that converts the machine
# labels to human readable labels.
model_type = 'classification'
output_map = {0: 'dog', 1: 'cat'}
# Create an IoT client for sending to messages to the cloud.
client = greengrasssdk.client('iot-data')
iot_topic = '$aws/things/{}/infer'.format(os.environ['AWS_IOT_THING_NAME'])
# Create a local display instance that will dump the image bytes to a FIFO
# file that the image can be rendered locally.
local_display = LocalDisplay('480p')
local_display.start()
# The sample projects come with optimized artifacts, hence only the artifact
# path is required.
model_path = '/opt/awscam/artifacts/mxnet_resnet18-catsvsdogs_FP32_FUSED.xml'
# Load the model onto the GPU.
client.publish(topic=iot_topic, payload='Loading action cat-dog model')
model = awscam.Model(model_path, {'GPU': 1})
client.publish(topic=iot_topic, payload='Cat-Dog model loaded')
# Since this is a binary classifier only retrieve 2 classes.
num_top_k = 2
# The height and width of the training set images
input_height = 224
input_width = 224
# Do inference until the lambda is killed.
while True:
# inference loop to add. See the next step
...
except Exception as ex:
client.publish(topic=iot_topic, payload='Error in cat-dog lambda: {}'.format(ex))
# snippet-end:[deeplens.python.deeplens_inference_lambda.inference_loop]
# snippet-start:[deeplens.python.deeplens_inference_lambda.inference_step]
# Get a frame from the video stream
ret, frame = awscam.getLastFrame()
if not ret:
raise Exception('Failed to get frame from the stream')
# Resize frame to the same size as the training set.
frame_resize = cv2.resize(frame, (input_height, input_width))
# Run the images through the inference engine and parse the results using
# the parser API, note it is possible to get the output of doInference
# and do the parsing manually, but since it is a classification model,
# a simple API is provided.
parsed_inference_results = model.parseResult(model_type,
model.doInference(frame_resize))
# Get top k results with highest probabilities
top_k = parsed_inference_results[model_type][0:num_top_k]
# Add the label of the top result to the frame used by local display.
# See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html
# for more information about the cv2.putText method.
# Method signature: image, text, origin, font face, font scale, color, and thickness
cv2.putText(frame, output_map[top_k[0]['label']], (10, 70),
cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 165, 20), 8)
# Set the next frame in the local display stream.
local_display.set_frame_data(frame)
# Send the top k results to the IoT console via MQTT
cloud_output = {}
for obj in top_k:
cloud_output[output_map[obj['label']]] = obj['prob']
client.publish(topic=iot_topic, payload=json.dumps(cloud_output))
# snippet-end:[deeplens.python.deeplens_inference_lambda.inference_step]
# snippet-start:[deeplens.python.deeplens_inference_lambda.complete]
#*****************************************************
# *
# Copyright 2018 Amazon.com, Inc. or its affiliates. *
# All Rights Reserved. *
# *
#*****************************************************
""" A sample lambda for cat-dog detection"""
from threading import Thread, Event
import os
import json
import numpy as np
import awscam
import cv2
import greengrasssdk
def lambda_handler(event, context):
"""Empty entry point to the Lambda function invoked from the edge."""
return
class LocalDisplay(Thread):
""" Class for facilitating the local display of inference results
(as images). The class is designed to run on its own thread. In
particular the class dumps the inference results into a FIFO
located in the tmp directory (which lambda has access to). The
results can be rendered using mplayer by typing:
mplayer -demuxer lavf -lavfdopts format=mjpeg:probesize=32 /tmp/results.mjpeg
"""
def __init__(self, resolution):
""" resolution - Desired resolution of the project stream """
# Initialize the base class, so that the object can run on its own
# thread.
super(LocalDisplay, self).__init__()
# List of valid resolutions
RESOLUTION = {'1080p' : (1920, 1080), '720p' : (1280, 720), '480p' : (858, 480)}
if resolution not in RESOLUTION:
raise Exception("Invalid resolution")
self.resolution = RESOLUTION[resolution]
# Initialize the default image to be a white canvas. Clients
# will update the image when ready.
self.frame = cv2.imencode('.jpg', 255*np.ones([640, 480, 3]))[1]
self.stop_request = Event()
def run(self):
""" Overridden method that continually dumps images to the desired
FIFO file.
"""
# Path to the FIFO file. The lambda only has permissions to the tmp
# directory. Pointing to a FIFO file in another directory
# will cause the lambda to crash.
result_path = '/tmp/results.mjpeg'
# Create the FIFO file if it doesn't exist.
if not os.path.exists(result_path):
os.mkfifo(result_path)
# This call will block until a consumer is available
with open(result_path, 'w') as fifo_file:
while not self.stop_request.isSet():
try:
# Write the data to the FIFO file. This call will block
# meaning the code will come to a halt here until a consumer
# is available.
fifo_file.write(self.frame.tobytes())
except IOError:
continue
def set_frame_data(self, frame):
""" Method updates the image data. This currently encodes the
numpy array to jpg but can be modified to support other encodings.
frame - Numpy array containing the image data of the next frame
in the project stream.
"""
ret, jpeg = cv2.imencode('.jpg', cv2.resize(frame, self.resolution))
if not ret:
raise Exception('Failed to set frame data')
self.frame = jpeg
def join(self):
self.stop_request.set()
def infinite_infer_run():
""" Run the DeepLens inference loop frame by frame"""
try:
# This cat-dog model is implemented as binary classifier, since the number
# of labels is small, create a dictionary that converts the machine
# labels to human readable labels.
model_type = 'classification'
output_map = {0: 'dog', 1: 'cat'}
# Create an IoT client for sending to messages to the cloud.
client = greengrasssdk.client('iot-data')
iot_topic = '$aws/things/{}/infer'.format(os.environ['AWS_IOT_THING_NAME'])
# Create a local display instance that will dump the image bytes to a FIFO
# file that the image can be rendered locally.
local_display = LocalDisplay('480p')
local_display.start()
# The sample projects come with optimized artifacts, hence only the artifact
# path is required.
model_path = '/opt/awscam/artifacts/mxnet_resnet18-catsvsdogs_FP32_FUSED.xml'
# Load the model onto the GPU.
client.publish(topic=iot_topic, payload='Loading action cat-dog model')
model = awscam.Model(model_path, {'GPU': 1})
client.publish(topic=iot_topic, payload='Cat-Dog model loaded')
# Since this is a binary classifier only retrieve 2 classes.
num_top_k = 2
# The height and width of the training set images
input_height = 224
input_width = 224
# Do inference until the lambda is killed.
while True:
# Get a frame from the video stream
ret, frame = awscam.getLastFrame()
if not ret:
raise Exception('Failed to get frame from the stream')
# Resize frame to the same size as the training set.
frame_resize = cv2.resize(frame, (input_height, input_width))
# Run the images through the inference engine and parse the results using
# the parser API, note it is possible to get the output of doInference
# and do the parsing manually, but since it is a classification model,
# a simple API is provided.
parsed_inference_results = model.parseResult(model_type,
model.doInference(frame_resize))
# Get top k results with highest probabilities
top_k = parsed_inference_results[model_type][0:num_top_k]
# Add the label of the top result to the frame used by local display.
# See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html
# for more information about the cv2.putText method.
# Method signature: image, text, origin, font face, font scale, color, and thickness
cv2.putText(frame, output_map[top_k[0]['label']], (10, 70),
cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 165, 20), 8)
# Set the next frame in the local display stream.
local_display.set_frame_data(frame)
# Send the top k results to the IoT console via MQTT
cloud_output = {}
for obj in top_k:
cloud_output[output_map[obj['label']]] = obj['prob']
client.publish(topic=iot_topic, payload=json.dumps(cloud_output))
except Exception as ex:
client.publish(topic=iot_topic, payload='Error in cat-dog lambda: {}'.format(ex))
infinite_infer_run()
# snippet-end:[deeplens.python.deeplens_inference_lambda.complete]
|
12,883 | 6be0fee7e208872b03b152bb1953bf1dde74ce88 | import sys,os
import data_processing as dp
import ecc_tools as tools
import timeit
# import pydca-ER module
import matplotlib
#matplotlib.use('agg')
import matplotlib.pyplot as plt
from pydca.erdca import erdca
from pydca.sequence_backmapper import sequence_backmapper
from pydca.msa_trimmer import msa_trimmer
from pydca.msa_trimmer.msa_trimmer import MSATrimmerException
from pydca.dca_utilities import dca_utilities
import numpy as np
import pickle
from gen_ROC_jobID_df import add_ROC
# Import Bio data processing features
import Bio.PDB, warnings
from Bio.PDB import *
pdb_list = Bio.PDB.PDBList()
pdb_parser = Bio.PDB.PDBParser()
from scipy.spatial import distance_matrix
from Bio import BiopythonWarning
from pydca.sequence_backmapper import sequence_backmapper
from pydca.msa_trimmer import msa_trimmer
from pydca.contact_visualizer import contact_visualizer
from pydca.dca_utilities import dca_utilities
warnings.filterwarnings("error")
warnings.simplefilter('ignore', BiopythonWarning)
warnings.simplefilter('ignore', DeprecationWarning)
warnings.simplefilter('ignore', FutureWarning)
warnings.simplefilter('ignore', ResourceWarning)
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
#========================================================================================
data_path = '/data/cresswellclayec/hoangd2_data/Pfam-A.full'
preprocess_path = '/data/cresswellclayec/DCA_ER/biowulf/pfam_ecc/'
data_path = '/home/eclay/Pfam-A.full'
preprocess_path = '/home/eclay/DCA_ER/biowulf/pfam_ecc/'
#pfam_id = 'PF00025'
pfam_id = sys.argv[1]
cpus_per_job = int(sys.argv[2])
job_id = sys.argv[3]
print("Calculating DI for %s using %d (of %d) threads (JOBID: %s)"%(pfam_id,cpus_per_job-4,cpus_per_job,job_id))
# Read in Reference Protein Structure
pdb = np.load('%s/%s/pdb_refs.npy'%(data_path,pfam_id))
# convert bytes to str (python 2 to python 3)
pdb = np.array([pdb[t,i].decode('UTF-8') for t in range(pdb.shape[0]) for i in range(pdb.shape[1])]).reshape(pdb.shape[0],pdb.shape[1])
ipdb = 0
tpdb = int(pdb[ipdb,1])
print('Ref Sequence # should be : ',tpdb-1)
# Load Multiple Sequence Alignment
s = dp.load_msa(data_path,pfam_id)
# Load Polypeptide Sequence from PDB as reference sequence
print(pdb[ipdb,:])
pdb_id = pdb[ipdb,5]
pdb_chain = pdb[ipdb,6]
pdb_start,pdb_end = int(pdb[ipdb,7]),int(pdb[ipdb,8])
pdb_range = [pdb_start-1, pdb_end]
#print('pdb id, chain, start, end, length:',pdb_id,pdb_chain,pdb_start,pdb_end,pdb_end-pdb_start+1)
#print('download pdb file')
pdb_file = pdb_list.retrieve_pdb_file(str(pdb_id),file_format='pdb')
#pdb_file = pdb_list.retrieve_pdb_file(pdb_id)
pfam_dict = {}
#---------------------------------------------------------------------------------------------------------------------#
#--------------------------------------- Create PDB-PP Reference Sequence --------------------------------------------#
#---------------------------------------------------------------------------------------------------------------------#
msa_file, ref_file = tools.write_FASTA(s[tpdb], s, pfam_id, number_form=False,processed=False,path='./pfam_ecc/',nickname='orig')
erdca_visualizer = contact_visualizer.DCAVisualizer('protein', pdb[ipdb,6], pdb[ipdb,5],refseq_file=ref_file)
biomol_info,er_pdb_seq = erdca_visualizer.pdb_content.pdb_chain_sequences[erdca_visualizer.pdb_chain_id]
print('\n\nERDCA-Visualizer pdb seq')
print(er_pdb_seq)
erdca_msa_file, erdca_ref_file = tools.write_FASTA(er_pdb_seq, s, pfam_id, number_form=False,processed=False,path='./pfam_ecc/')
#---------------------------------------------------------------------------------------------------------------------#
if 1: # DCA read in
# Load Multiple Sequence Alignment
s = dp.load_msa(data_path,pfam_id)
# Load Polypeptide Sequence from PDB as reference sequence
print(pdb[ipdb,:])
pdb_id = pdb[ipdb,5]
pdb_chain = pdb[ipdb,6]
pdb_start,pdb_end = int(pdb[ipdb,7]),int(pdb[ipdb,8])
pdb_range = [pdb_start-1, pdb_end]
#print('pdb id, chain, start, end, length:',pdb_id,pdb_chain,pdb_start,pdb_end,pdb_end-pdb_start+1)
#print('download pdb file')
pdb_file = pdb_list.retrieve_pdb_file(str(pdb_id),file_format='pdb')
#pdb_file = pdb_list.retrieve_pdb_file(pdb_id)
pfam_dict = {}
#---------------------------------------------------------------------------------------------------------------------#
chain = pdb_parser.get_structure(str(pdb_id),pdb_file)[0][pdb_chain]
ppb = PPBuilder().build_peptides(chain)
# print(pp.get_sequence())
print('peptide build of chain produced %d elements'%(len(ppb)))
matching_seq_dict = {}
poly_seq = list()
for i,pp in enumerate(ppb):
for char in str(pp.get_sequence()):
poly_seq.append(char)
print('PDB Polypeptide Sequence: \n',poly_seq)
#check that poly_seq matches up with given MSA
poly_seq_range = poly_seq[pdb_range[0]:pdb_range[1]]
print('PDB Polypeptide Sequence (In Proteins PDB range len=%d): \n'%len(poly_seq_range),poly_seq_range)
if len(poly_seq_range) < 10:
print('PP sequence overlap with PDB range is too small.\nWe will find a match\nBAD PDB-RANGE')
poly_seq_range = poly_seq
else:
pp_msa_file_range, pp_ref_file_range = tools.write_FASTA(poly_seq_range, s, pfam_id, number_form=False,processed=False,path='./pfam_ecc/',nickname='range')
erdca_msa_file, erdca_ref_file = tools.write_FASTA(poly_seq_range, s, pfam_id, number_form=False,processed=False,path='./pfam_ecc/')
pp_msa_file, pp_ref_file = tools.write_FASTA(poly_seq, s, pfam_id, number_form=False,processed=False,path='./pfam_ecc/')
erdca_msa_file, erdca_ref_file = tools.write_FASTA(poly_seq, s, pfam_id, number_form=False,processed=False,path='./pfam_ecc/')
#---------------------------------------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------------------------------------#
#---------------------------------- PreProcess FASTA Alignment -------------------------------------------------------#
#---------------------------------------------------------------------------------------------------------------------#
preprocessed_data_outfile = preprocess_path+'MSA_%s_PreProcessed.fa'%pfam_id
print(preprocessed_data_outfile)
print('\n\nPre-Processing MSA with Range PP Seq\n\n')
trimmer = msa_trimmer.MSATrimmer(
erdca_msa_file, biomolecule='PROTEIN',
refseq_file = erdca_ref_file
)
pfam_dict['ref_file'] = erdca_ref_file
try:
preprocessed_data,s_index, cols_removed,s_ipdb,s = trimmer.get_preprocessed_msa(printing=True, saving = False)
except(MSATrimmerException):
ERR = 'PPseq-MSA'
print('Error with MSA trimms\n%s\n'%ERR)
sys.exit()
print('\n\n\n',s[s_ipdb])
#write trimmed msa to file in FASTA format
with open(preprocessed_data_outfile, 'w') as fh:
for seqid, seq in preprocessed_data:
fh.write('>{}\n{}\n'.format(seqid, seq))
#---------------------------------------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------------------------------------#
#----------------------------------------- Run Simulation ERDCA ------------------------------------------------------#
#---------------------------------------------------------------------------------------------------------------------#
try:
print('Initializing ER instance\n\n')
# Compute DI scores using Expectation Reflection algorithm
erdca_inst = erdca.ERDCA(
preprocessed_data_outfile,
'PROTEIN',
s_index = s_index,
pseudocount = 0.5,
num_threads = cpus_per_job-4,
seqid = 0.8)
except:
ref_seq = s[tpdb,:]
print('Using PDB defined reference sequence from MSA:\n',ref_seq)
msa_file, ref_file = tools.write_FASTA(ref_seq, s, pfam_id, number_form=False,processed=False,path=preprocess_path)
pfam_dict['ref_file'] = ref_file
print('Re-trimming MSA with pdb index defined ref_seq')
# create MSATrimmer instance
trimmer = msa_trimmer.MSATrimmer(
msa_file, biomolecule='protein',
refseq_file=ref_file
)
preprocessed_data,s_index, cols_removed,s_ipdb,s = trimmer.get_preprocessed_msa(printing=True, saving = False)
#write trimmed msa to file in FASTA format
with open(preprocessed_data_outfile, 'w') as fh:
for seqid, seq in preprocessed_data:
fh.write('>{}\n{}\n'.format(seqid, seq))
erdca_inst = erdca.ERDCA(
preprocessed_data_outfile,
'PROTEIN',
s_index = s_index,
pseudocount = 0.5,
num_threads = cpus_per_job-4,
seqid = 0.8)
# Save processed data dictionary and FASTA file
pfam_dict['processed_msa'] = preprocessed_data
pfam_dict['msa'] = s
pfam_dict['s_index'] = s_index
pfam_dict['s_ipdb'] = s_ipdb
pfam_dict['cols_removed'] = cols_removed
input_data_file = preprocess_path+"%s_DP_ER.pickle"%(pfam_id)
with open(input_data_file,"wb") as f:
pickle.dump(pfam_dict, f)
f.close()
print('Running ER simulation\n\n')
# Compute average product corrected Frobenius norm of the couplings
start_time = timeit.default_timer()
erdca_DI = erdca_inst.compute_sorted_DI()
run_time = timeit.default_timer() - start_time
print('ER run time:',run_time)
for site_pair, score in erdca_DI[:5]:
print(site_pair, score)
with open('DI/ER/er_DI_%s.pickle'%(pfam_id), 'wb') as f:
pickle.dump(erdca_DI, f)
f.close()
#---------------------------------------------------------------------------------------------------------------------#
plotting = False
if plotting:
# Print Details of protein PDB structure Info for contact visualizeation
print('Using chain ',pdb_chain)
print('PDB ID: ', pdb_id)
from pydca.contact_visualizer import contact_visualizer
visualizer = contact_visualizer.DCAVisualizer('protein', pdb_chain, pdb_id,
refseq_file = pp_ref_file,
sorted_dca_scores = erdca_DI,
linear_dist = 4,
contact_dist = 8.)
contact_map_data = visualizer.plot_contact_map()
#plt.show()
#plt.close()
tp_rate_data = visualizer.plot_true_positive_rates()
#plt.show()
#plt.close()
#print('Contact Map: \n',contact_map_data[:10])
#print('TP Rates: \n',tp_rate_data[:10])
with open(preprocess_path+'ER_%s_contact_map_data.pickle'%(pfam_id), 'wb') as f:
pickle.dump(contact_map_data, f)
f.close()
with open(preprocess_path+'ER_%s_tp_rate_data.pickle'%(pfam_id), 'wb') as f:
pickle.dump(tp_rate_data, f)
f.close()
|
12,884 | 30ff09278265f131b92ef8d937bf8034942c657b | #! /usr/bin/env python3
import io
import sys
import json
from itertools import tee, chain, groupby
from difflib import ndiff
from pprint import pprint
from colorama import Fore, Style
def windows(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b, c = tee([None]+list(iterable)+[None, None], 3)
next(c, None)
next(c, None)
next(b, None)
for window in zip(a, b, c):
yield window
def check_timing(o, next):
if (o is None):
return []
problems = []
if (o['start'] > o['end']):
problems.append(
'* begins after it ends: %s' % (o['start'] - o['end']))
if next:
if (o['start'] > next['start']):
problems.append(
'* begins after successor starts: %s'
% (o['start'] - next['start']))
return problems
def merge(objects):
"merges temporal ranges of objects"
if len(objects) == 0:
return
endpoints = list(chain.from_iterable(
[(o['start'], o['end']) for o in objects]))
for o in objects:
merged = {}
merged.update(o)
merged.update(start=min(endpoints), end=max(endpoints))
yield merged
def downscale_time_resolution(objects, factor=1000):
for o in objects:
if o is None:
yield None
else:
downscaled = {}
downscaled.update(o)
downscaled.update(
start=round(o['start'] / factor),
end=round(o['end'] / factor))
yield downscaled
def exclude(o, *keys):
c = o.copy()
for key in keys:
del c[key]
return c
def show(o, highlight=False):
if o is None:
return
if highlight:
print(Fore.RED, file=sys.stderr, end='')
print('START:\t%s' % o['start'], file=sys.stderr)
pprint(exclude(o, 'sentences', 'tokens'), stream=sys.stderr)
print('END:\t%s' % o['end'], file=sys.stderr)
if highlight:
print(Style.RESET_ALL, file=sys.stderr, end='')
def tell(o=''):
print(o, file=sys.stderr)
def lines_of(buf):
return buf.getvalue().splitlines(keepends=True)
def diff(a, b):
if a == b:
return
show_a = io.StringIO('')
pprint(a, stream=show_a)
show_b = io.StringIO('')
pprint(b, stream=show_b)
lines = list(ndiff(lines_of(show_a), lines_of(show_b)))
if len(lines) > 0:
tell('----------------------------------------')
sys.stderr.writelines(lines)
def diff_on_key(a, b, key):
a_value = a.get(key, None) if a else None
b_value = b.get(key, None) if b else None
diff(a_value, b_value)
def speech_of(o):
for turn in o['turns']:
for speech in turn['speech']:
o = {}
o.update(speech)
o['speaker'] = turn['speaker']
o['sentences'] = turn['sentences']
yield o
def adjust_timings(speech):
skip = 0
prev, curr, next = None, None, None
for prev, curr, next in [downscale_time_resolution(w)
for w in windows(speech)]:
if skip > 0:
skip = skip - 1
continue
problems = check_timing(curr, next)
if problems:
tell('\n----------------------------------------')
tell('\nProblems found in speech:')
for problem in problems:
tell(problem)
tell()
show(prev)
show(curr, highlight=True)
show(next)
tell('\nMerging speech:\n')
for m in merge((prev, curr, next)):
show(m)
if m is not None:
yield m
skip = 2
elif prev is not None:
yield prev
if not skip:
if curr is not None:
yield curr
def prepare(o):
p = {}
# add link to audio file
p['media'] = 'media/audio.mp3'
# titlecase speakers' names
p['speakers'] = [s.title() for s in o['speakers']]
# fix issues in speech timings
p['turns'] = []
for speaker, speech in groupby(adjust_timings(speech_of(o)),
key=lambda x: x['speaker']):
speech = list(speech)
sentences = speech[0]['sentences']
speech = [exclude(s, 'speaker', 'sentences') for s in speech]
turn = {'speaker': speaker,
'sentences': sentences,
'start': speech[0]['start'],
'end': speech[-1]['end']}
turn['speech'] = speech
p['turns'].append(turn)
return p
o = json.load(open(sys.argv[1]))
p = prepare(o)
print(json.dumps(p, indent=4))
|
12,885 | 7cdbf060bcfb6306b1d4b1661180177deba22f9f |
d = [1, 2, 3, 4, 5] # list of int declaration
list_iterator_obj = iter(d)
print(list_iterator_obj.__next__())
print(list_iterator_obj.__next__())
print(list_iterator_obj.__next__())
print(list_iterator_obj.__next__())
print(list_iterator_obj.__next__())
print(list_iterator_obj.__next__())
|
12,886 | 27d14d2518e1fced23952ca7e8224b48816d9657 | #
# classifiers.py
# define classifiers as basis for Neighborhood Graph Analysis
#
# Copyright (c) 2018 Wesley Reinhart.
# This file is part of the crayon project, released under the Modified BSD License.
from __future__ import print_function
import _crayon
import numpy as np
class Classifier:
R""" abstract class defining a neighborhood topology
"""
def __init__(self):
self.s = None
def __sub__(self,other):
return None
def __eq__(self,other):
R""" equality comparison between this and another Classifier,
simply checks if A - B == 0
"""
return (self - other == 0.)
def __ne__(self,other):
R""" inequality comparison between this and another Classifier,
simply checks if A - B > 0
"""
return not self == other
def __str__(self):
R""" hashable representation of the Classifier, as specified
by the constructor
"""
return self.s
class Graph(Classifier):
R""" evaluates topology of neighborhood as presented in a single graph
Args:
A (array): adjacency matrix defining the neighborhood graph
k (int,optional): maximum graphlet size (default 5)
"""
def __init__(self,A,k=5):
if type(A) == tuple:
self.sgdv = A[0]
self.ngdv = A[1]
else:
self.build(A,k)
# build a hashable representation of the graph
self.s = str(self.sgdv.tolist()).replace(' ','')
def build(self,A,k=5):
R""" builds Graph from adjacency matrix and computes necessary quantities for NGA
Args:
A (array): adjacency matrix
k (int,optional): maximum graphlet size (default 5)
"""
# instantiate a Crayon::Graph object
self.cpp = _crayon.neighborhood(A,k)
# retrieve adjacency matrix
self.adj = self.cpp.adj()
# compute its Graphlet Degree Vector
self.gdv = self.cpp.gdv()
# convert node-wise to graph-wise graphlet frequencies
self.sgdv = np.sum(self.gdv,axis=0)
# weight GDV according to dependencies between orbits
o = np.array([1, 2, 2, 2, 3, 4, 3, 3, 4, 3,
4, 4, 4, 4, 3, 4, 6, 5, 4, 5,
6, 6, 4, 4, 4, 5, 7, 4, 6, 6,
7, 4, 6, 6, 6, 5, 6, 7, 7, 5,
7, 6, 7, 6, 5, 5, 6, 8, 7, 6,
6, 8, 6, 9, 5, 6, 4, 6, 6, 7,
8, 6, 6, 8, 7, 6, 7, 7, 8, 5,
6, 6, 4],dtype=np.float)
w = 1. - o / 73.
self.ngdv = self.sgdv * w[:self.sgdv.shape[0]]
self.ngdv = self.ngdv / max(float(np.sum(self.ngdv)),1.)
def __sub__(self,other):
R""" difference between this and another Graph, just the norm
between graph-wide Graphlet Degree Vectors
"""
return np.linalg.norm(self.ngdv-other.ngdv)
class Library:
R""" handles sets of generic signatures from snapshots and ensembles of snapshots
"""
def __init__(self):
self.sigs = []
self.items = []
self.counts = np.array([],dtype=np.int)
self.sizes = np.array([],dtype=np.int)
self.index = {}
self.lookup = {}
def build(self):
return
def find(self,item):
R""" locate an object's signature in the Library
Args:
item (object): object to be located
Returns:
index (int): index of the object's signature
"""
sig = str(item)
try:
return self.index[sig]
except:
return None
def encounter(self,item,count=1,size=0,add=True):
R""" adds an object to the library and returns its index
Args:
item (object): object to consider
count (int,optional): count to add to the library (e.g., frequency from Snapshot) (default 1)
add (bool,optional): should the item be added to the Library? (alternative is only find) (default True)
Returns:
idx (int): the index of the item's signature in the library
"""
sig = str(item)
try:
idx = self.index[sig]
self.counts[idx] += count
self.sizes[idx] = max(self.sizes[idx],size)
except:
idx = len(self.items)
self.sigs.append(sig)
self.items.append(item)
self.counts = np.append(self.counts,count)
self.sizes = np.append(self.sizes,size)
self.index[sig] = idx
return idx
def collect(self,others,counts=True,sizes=True):
R""" merges other Library objects into this one
Args:
others (list of Library objects): Library objects to merge into this one
counts (bool,optional): should the counts of the others be added together? (default True)
sizes (bool,optional): should the maximum size of the others replace this one? (default True)
"""
if type(others) != list:
others = list([others])
if type(others[0]) != type(self):
raise TypeError('Library.collect expects a list of Library objects, but got %s != %s'%(str(type(others[0])),str(type(self))))
# iterate over supplied library instances
for other in others:
for idx in range(len(other.items)):
self.encounter(other.items[idx],
count=(other.counts[idx] if counts else 0),
size=(other.sizes[idx] if sizes else 0))
class GraphLibrary(Library):
R""" handles sets of graphs from snapshots and ensembles of snapshots
"""
def build(self,neighborhoods,k=5):
R""" builds the GraphLibrary from neighborhoods
Args:
neighborhoods (list): list of neighborhoods to build from
k (int,optional): maximum graphlet size (default 5)
"""
g_idx = np.zeros(len(neighborhoods),dtype=np.int)
for i, nn in enumerate(neighborhoods):
G = Graph(nn,k)
g_idx[i] = self.encounter(G)
for i, sig in enumerate(self.sigs):
if sig not in self.lookup:
self.lookup[sig] = np.array([],dtype=np.int)
self.lookup[sig] = np.hstack((self.lookup[sig],np.argwhere(g_idx==self.index[sig]).flatten()))
|
12,887 | 4c1a751d59046189ee999b2d7b6d20dcd28b12c3 | from evdev import InputDevice
from select import select
dev = InputDevice('/dev/input/event0') # This can be any other event number. On my Raspi it turned out to be event0
while True:
r,w,x = select([dev], [], [])
for event in dev.read():
# The event.code for a scroll wheel event is 8, so I do the following
if event.code == 8:
print(event.value) |
12,888 | 2a53702620c887bbeaa459346af6fcd42dbd48ac | # Create your views here.
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from feed.models import JsonFeeds
from rest_api.serializer import FeedSerializer
from datetime import datetime
from rest_framework import permissions
@api_view(['GET', 'POST'])
def feed_list(request):
#List all snippets, or create a new snippet.
if request.method == 'GET':
feeds = JsonFeeds.objects.all()[:25]
serializer = FeedSerializer(feeds, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = FeedSerializer(data=request.DATA)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
12,889 | d0d18f5395040fba9f1294814dd8a67936d4a455 | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField
from wtforms.validators import InputRequired, Length, Email
class LoginForm(FlaskForm):
name = StringField('name', validators=[InputRequired(message='Please enter a name'),Length(min=5)])
password = PasswordField('pass', validators=[InputRequired(), Length(min=6, max=15)])
class SignupForm(FlaskForm):
name = StringField('name', validators=[InputRequired(message='Please enter a name'),Length(min=5)])
email = StringField('email', validators=[InputRequired(), Email(message='Invalid Email'), Length(min=50)])
model = StringField('model', validators=[InputRequired(message='Vehicle model required')])
vid = StringField('vid', validators=[InputRequired(message='Vehicle License Number')])
password = PasswordField('pass', validators=[InputRequired(message='Enter a password'), Length(min=6, max=15)])
|
12,890 | 74a2101351c155f40b6a0018a45d63df7ec32987 | # -*- coding: utf-8 -*-
# Conventions are according to NumPy Docstring.
"""
n! means n * (n − 1) * ... * 3 * 2 * 1
For example, 10! = 10 * 9 * ... * 3 * 2 * 1 = 3628800,
and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27.
Find the sum of the digits in the number 100!
"""
import time
import math
if __name__ == '__main__':
# input factorial number
inputNum = 100
# initialize running time
start = time.time()
numFactorial = math.factorial(inputNum)
numFactorialString = str(numFactorial)
# sum of the digits initialized
sumDigits = 0
for char_idx in range(len(numFactorialString)):
sumDigits += int(numFactorialString[char_idx])
# finalize running time
stop = time.time()
# print results
print "Sum of Digits of %s!: \t %s" %(inputNum, sumDigits)
print "Running Time: \t %.2f sec" %(stop - start)
|
12,891 | adaefd6068a1d558f79d0badc920d14fed4b745b | from selenium import webdriver
from selenium.webdriver.common.by import By # 选择器
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
import time
browser = webdriver.Chrome()
wait_obj = WebDriverWait(browser, timeout=5) # 产生一个等待对象,固定最大等待时间
try:
browser.get('https://www.taobao.com')
input_ele = browser.find_element_by_id('q')
input_ele.send_keys('macpro')
# 定位标签
# 方式1
# btn_ele = browser.find_elements_by_class_name('btn-search')[0]
# 方式2
btn_ele = browser.find_element(By.CSS_SELECTOR, '.btn-search')
btn_ele.click()
# 等待目标区域标签们加载完成
wait_obj.until(EC.presence_of_element_located((By.CLASS_NAME, 'next')))
next_ele = browser.find_element(By.CSS_SELECTOR,'.next a')
next_ele.click()
ele = browser.find_element(By.ID, 'J_Itemlist_PLink_15348329368')
# 获取标签属性
print(ele.get_attribute('href'))
time.sleep(100)
browser.close()
except:
browser.close()
|
12,892 | 90f96a17a8c1b8dca11176d33468c8d6425aa361 | __author__ = 'student'
import cv2
import numpy as np
from detector_node import getHistParams
import pickle
def draw_mask(event,x,y,flags,param):
global frame,drawing,img,mask
samplesize = 100
if (event == cv2.EVENT_LBUTTONDOWN):
img = np.copy(frame)
w,h,c=img.shape
mask = np.zeros((w,h,1),np.uint8)
drawing = True
point = (x, y)
cv2.circle(img, point, samplesize, (0, 255, 0), -1)
cv2.circle(mask, point, samplesize, 255, -1)
img2 = np.copy(img)
cv2.circle(img2, point, samplesize, (255, 0, 0), 1)
cv2.imshow('image', img2)
if (event == cv2.EVENT_MOUSEMOVE):
point = (x, y)
if drawing:
cv2.circle(img, point, samplesize, (0, 255, 0), -1)
cv2.circle(mask, point, samplesize, 255, -1)
img2 = np.copy(img)
cv2.circle(img2, point, samplesize, (255, 0, 0), 1)
cv2.imshow('image', img2)
if (event == cv2.EVENT_LBUTTONUP and drawing):
drawing = False
frame=cv2.imread("balls.png")
cv2.imshow("image",frame)
drawing=False
img = np.copy(frame)
cv2.setMouseCallback('image',draw_mask)
cv2.waitKey(0)
CHANNELS,RANGES,BUCKETS=getHistParams()
hsv=cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
print mask.shape,hsv.shape
roihist = cv2.calcHist([hsv], CHANNELS, mask, BUCKETS, RANGES) #3rd parameter is a mask!!
f=open('hist.pyo', 'w')
pickle.dump(roihist, f)
cv2.destroyAllWindows()
|
12,893 | c087f63b6e922734f80d0ab1345e610dd810539e | import sqlite3
conn=sqlite3.connect('shopDb.db', timeout=10);
chooseDb=conn.cursor();
if(conn):
print('Baglanti Başarılı!')
else:
print('Bağlantı Başarısız!')
def registerPage():
print('--REGISTER PAGE---\n');
print('Name: ');
userName=input();
print('Surname: ');
userSurname=input();
print('Password: ')
userPass=input();
print('e-mail: ');
userEmail=input();
print('Choose one question type and answer: (1-2-3-4)');
a='Where were you born?';
b='Where did your parents meet?';
c='What is your favourite musician?';
d='What makes you different?';
print(a+'(1)\n'+b+'(2)\n'+c+'(3)\n'+d+'(4)\n')
userQues=input();
if(userQues=='1'):
userQues=a;
elif(userQues=='2'):
userQues=b;
elif(userQues=='3'):
userQues=c;
elif(userQues=='4'):
userQues=d;
print('Enter your Answer: ');
userAnswer=input();
print('Enter your phone number: ');
userPhoneNum=input();
sql ="INSERT INTO userInfo VALUES (?,?,?,?,?,?,?);"
chooseDb.execute(sql,(userName,userSurname,userPass,userEmail,userQues,userAnswer,userPhoneNum));
conn.commit();
def loginpage():
attempt = 0;
loop='true';
while loop == 'true':
print('Please enter your e-mail: ');
logEmail=input();
print('Please enter your password: ');
logPass=input();
with sqlite3.connect('shopDb.db') as db:
cursor=db.cursor()
finduser = ("SELECT * FROM userInfo WHERE userEmail=? AND userPassword=?")
cursor.execute(finduser, [logEmail, logPass])
results = cursor.fetchall()
if results:
for i in results:
print("Welcome "+i[0]+"!")
loop = 'false'
categories()
return "exit"
else:
print("Username and password not recognised!")
attempt = attempt+1
if attempt==3:
print('Please re-enter your e-mail: ')
logEmail=input()
with sqlite3.connect('shopDb.db') as db:
cursor = db.cursor()
finduser = ("SELECT * FROM userInfo WHERE userEmail=?")
cursor.execute(finduser, [logEmail])
results = cursor.fetchall()
if results:
for i in results:
print('Please write your security answer: \n')
print(i[4]+': ')
secAnswer = input();
if secAnswer==i[5]:
print('Change your password: ')
newPass=input();
sql = '''UPDATE userInfo SET userPassword=? WHERE userEmail=?'''
chooseDb.execute(sql, (newPass, logEmail));
conn.commit();
else:
print('uncorrect answer')
return "exit"
else:
print('There is no such e-mail in the Database!\n\n')
print('Would you like to sign up? (Y/N)')
choice=input();
if choice=='Y':
loop = 'false'
registerPage();
else:
break
def categories():
loop='true'
while loop=='true':
print('1-COMPUTERS 2-WHITE APPLIANCES 3-SUPERMARKET 4-ELECTRONICS')
mainCateg = input()
if 0 < int(mainCateg) < 5:
subCategories(mainCateg)
loop = 'false'
else:
print ('You entered a false choice!')
def subCategories(mainCateg):
with sqlite3.connect('shopDb.db') as db:
cursor = db.cursor()
if mainCateg=='1':
print('Laptops or Desktops? (1-2)')
subCateg=input()
if subCateg=='1':
print('---Models for Laptops---\n')
find = ("SELECT * FROM laptops")
else:
print('---Models for Desktops---')
find = ("SELECT * FROM desktops")
elif mainCateg=='2':
print('washing machine or dishwashing machine?(1-2)')
subCateg=input()
if subCateg=='1':
print('Models for washers')
find = ("SELECT * FROM washers")
else:
print('Models for dishwashers')
find = ("SELECT * FROM dishwashers")
elif mainCateg=='3':
print('Teah or Water?(1-2)')
subCateg=input()
if subCateg=='1':
print('Models for tea')
find = ("SELECT * FROM tea")
else:
print('Models for water')
find = ("SELECT * FROM water")
elif mainCateg == '4':
print('Cell Phone or TV?(1-2)')
subCateg = input()
if subCateg == '1':
print('Models for phones')
find = ("SELECT * FROM cellphone")
else:
print('Models for TVs')
find = ("SELECT * FROM TV")
cursor.execute(find)
results = cursor.fetchall()
if results:
for i in results:
print(i)
return "exit"
print('Please make a choice: ')
print('I would like to do shopping(--1--)')
print('I would just like to surf(--2--)')
x = input();
if(x=='1'):
print('Please register(1)');
print('Please login if you have an account(2)');
x = input();
if(x=='1'):
registerPage();
elif(x=='2'):
loginpage();
elif(x=='2'):
categories();
else:
print('You entered uncorrect key!');
chooseDb.close();
conn.close(); |
12,894 | c6a456476b8c18af989eba0b72acc8e014bd4dec | class PID:
error = 0
cumErr = 0
derErr = 0
def __init__(self, Kp, Ki, Kd):
self.Kp = Kp
self.Ki = Ki
self.Kd = Kd
def control(self,desired,mesured,controlTarget):
olderr = self.error
self.error = desired - mesured
self.derErr = self.error - olderr
self.cumErr = self.cumErr + self.error
#print(desired+self.Kp*self.error+self.Ki*self.cumErr+self.Kd*self.derErr)
controlTarget(desired+self.Kp*self.error+self.Ki*self.cumErr+self.Kd*self.derErr)
|
12,895 | 86e522dbe54bdd414f9537062925c6a1906c15e6 | import sys
import os
from dotenv import load_dotenv
from pathlib import Path
basedir = os.path.dirname(os.path.abspath(__file__))
env_path = Path(os.path.join(basedir, ".")) / '../.env'
load_dotenv(dotenv_path=env_path)
prevdir = basedir[:basedir.rfind('/')]
sys.path.append(basedir)
sys.path.append(prevdir)
def is_linux_system():
return sys.platform == "linux" or sys.platform == "linux2"
if not is_linux_system():
os.environ['DB_SERVICE'] = "localhost"
os.environ['DB_PORT'] = "45432"
class BaseConfig(object):
APP_NAME = 'CORE'
DB_NAME = os.environ['DB_NAME']
DB_USER = os.environ['DB_USER']
DB_PASS = os.environ['DB_PASS']
DB_SERVICE = os.environ['DB_SERVICE']
DB_PORT = os.environ['DB_PORT']
SQLALCHEMY_DATABASE_URI = 'postgresql://{0}:{1}@{2}:{3}/{4}'.format(
DB_USER, DB_PASS, DB_SERVICE, DB_PORT, DB_NAME
)
SQLALCHEMY_TRACK_MODIFICATIONS = bool(int(os.environ[
'SQLALCHEMY_TRACK_MODIFICATIONS'
]))
|
12,896 | 718f88433e67abfa8379fbc539ee7199d81dbb67 | #! /usr/bin/env python3 -b
# extract markdown documentation from cpp
# complete hack
class Item:
def __init__(self):
self.name = ''
self.decl = ''
self.comment = ''
self.type = ''
self.children = []
def make_doc(cpp, first_only=False):
# comment blocks
items = []
cur_item = None
first = True
indented = False
enum_last = False
for line in cpp.splitlines(True):
if cur_item:
if '///' in line:
cur_item.comment += line
elif first:
cur_item = None
first = False
else:
cur_item.decl += line
if ';' in line or '{' in line:
cur_item = None
if enum_last and ',' in line:
cur_item = None
else:
if not '///' in line: continue
if line.startswith(" "):
items[-1].children += [Item()]
cur_item = items[-1].children[-1]
cur_item.comment += line
indented = True
enum_last = 'enum ' in items[-1].decl
else:
items += [Item()]
cur_item = items[-1]
cur_item.comment += line
indented = False
enum_last = False
def clean_comment(comment):
if comment.startswith(" "):
comment = comment.replace(" ///", "")
comment = comment.replace(" /// ", "")
comment = comment.strip()
else:
comment = comment.replace("/// ", "")
comment = comment.replace("///", "")
comment = comment.strip()
return comment
# main namespace
main_namespace = ''
# hack find
def hack_find(str, s):
return str.find(s) if str.find(s) >= 0 else 10000
# fix type
for item in items:
if item.decl == "":
item.comment = clean_comment(item.comment)
elif "namespace " in item.decl:
item.type = "Namespace"
item.name = item.decl
item.name = item.name.replace("namespace ", "").replace("{", "").strip()
if (main_namespace == ""):
main_namespace = item.name
else:
item.name = main_namespace + "::" + item.name
item.decl = ""
item.comment = clean_comment(item.comment)
elif "using " in item.decl:
if " = " in item.decl:
item.type = "Typedef"
item.name = item.decl
item.name = item.name.partition(" = ")[0].replace("using ", "").strip()
item.comment = clean_comment(item.comment)
else:
item.type = "Function Alias"
item.name = item.decl.partition("::")[2].replace(";", "").strip() + "()"
item.comment = clean_comment(item.comment)
elif 'enum ' in item.decl:
item.type = "Enum"
item.name = item.decl.replace("enum ", "").replace("struct ", "").replace("{", "").strip()
item.comment = clean_comment(item.comment)
if item.children:
item.comment += "\n\n- Values:\n"
for child in item.children:
child.decl = child.decl.replace(";", "").replace("}", "")
child.name = child.decl.partition("=")[0].split()[-1].replace(",", "")
item.comment += " - " + child.name + ": " + child.comment.replace("///", "")
item.decl += child.decl
item.decl += "}\n"
elif 'struct ' in item.decl:
item.type = "Struct"
item.name = item.decl
if "template " in item.name:
item.name = item.name.partition("\n")[2]
if " : " in item.name:
item.name = item.name.partition(" : ")[0]
item.name = item.name.replace("struct ", "").replace("{", "").replace(";", "").strip()
item.comment = clean_comment(item.comment)
if item.children:
item.comment += "\n\n- Members:\n"
for child in item.children:
isvar = " operator" not in child.decl and ("(" not in child.decl or hack_find(child.decl, "=") < hack_find(child.decl, "("))
if isvar:
child.name = child.decl.partition("=")[0].split()[-1].replace(";", "")
item.comment += " - " + child.name + ": " + child.comment.replace("///", "")
item.decl += child.decl
else:
if "{" in child.decl:
child.decl = child.decl.partition("{")[0] + ";\n"
if " : " in child.decl:
child.decl = child.decl.partition(" : ")[0] + ";\n"
child.decl = child.decl.replace("\n", " ") + "\n"
while ' ' in child.decl:
child.decl = child.decl.replace(" ", " ")
child.decl = " " + child.decl.replace(" ;", ";")
child.name = child.decl.partition("(")[0].split()[-1] + "()"
if "operator " in child.decl:
child.name = "operator " + child.name
item.comment += " - " + child.name + ": " + child.comment.replace("///", "")
item.decl += child.decl
item.decl += "}\n"
else:
isvar = " operator" not in item.decl and ("(" not in item.decl or hack_find(item.decl, "=") < hack_find(item.decl, "("))
if isvar:
if "const " in item.decl:
item.type = "Constant"
else:
item.type = "Variable"
item.name = item.decl.partition("=")[0].split()[-1]
item.comment = clean_comment(item.comment)
else:
item.type = "Function"
if "{" in item.decl:
item.decl = item.decl.partition("{")[0] + ";\n"
if " : " in item.decl:
item.decl = item.decl.partition(" : ")[0] + ";\n"
# item.decl = replace_str(item.decl, "\n", " ") + "\n"
# while(contains(item.decl, " ")) item.decl =
# replace_str(item.decl, " ", " ")
item.decl = item.decl.replace("( ", "(").replace(" ;", ";")
item.name = item.decl.partition("(")[0].split()[-1] + "()"
if "operator " in item.decl:
item.name = "operator " + item.name
item.comment = clean_comment(item.comment)
# render comment blocks
md = ''
first = True
for item in items:
if item.name != "":
md += "#### "
md += item.type + " "
md += item.name.replace("<", " <").replace(">", " \\>")
# md += item.name
md += "\n\n"
if item.decl != "":
md += "~~~ .cpp\n"
md += item.decl.replace("{{", "{ {").replace("}}", "} }")
md += "~~~\n\n"
md += item.comment + "\n\n"
if first:
if first_only: return md
md += "## API Documentation\n\n"
first = False
return md
template = '''
<!DOCTYPE html>
<html lang="en">
<head>
<title>Yocto/GL</title>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes">
<link rel="stylesheet" href="style.css">
</head>
<body>
<header>
<nav>
<img src="images/logo_white.png">
<a href="index.html">about</a>
<a href="yocto_gl.html">api</a>
<a href="https://github.com/xelatihy/yocto-gl">github</a>
</nav>
</header>
<article>
$body$
<article>
<footer></footer>
</body>
</html>
'''
def make_html(md):
import markdown, glob
html = markdown.markdown(md, ['markdown.extensions.extra',
'markdown.extensions.codehilite'],
output_format='html5')
html = html.replace('<pre>', '<pre><code>')
html = html.replace('</pre>', '</code></pre>')
for link in glob.glob('docs/*.md'):
link = link.replace('docs/','')
hlink = link.replace('.md', '.html')
html = html.replace(link, hlink)
while '<p><img' in html:
before, _, remainder = html.partition('<p><img')
middle, _, after = remainder.partition('</p>')
html = before + '<figure><img' + middle + '</figure>' + after
html = template.replace('$body$', html)
return html
for filename in ["yocto/yocto_gl.h", "yocto/yocto_gltf.h"]:
with open(filename) as f: cpp = f.read()
md = make_doc(cpp)
html = make_html(md)
filename_md = filename.replace(".h", ".md").replace("yocto/", "docs/")
with open(filename_md, 'wt') as f: f.write(md)
filename_html = filename_md.replace(".md", ".html")
with open(filename_html, 'wt') as f: f.write(html)
for filename in ["yocto/yocto_gl.h"]:
with open(filename) as f: cpp = f.read()
md = make_doc(cpp, True)
html = make_html(md)
filename_md = filename.replace(".h", ".md").replace("yocto/", "docs/")
with open('readme.md', 'wt') as f: f.write(md)
with open('docs/index.html', 'wt') as f: f.write(html)
|
12,897 | eb1c147a0e08e674936080bc59b8ed0569804b1e | import cv2 # after installing opencv
# now time for loading image
# if u want to connect camera --
cap=cv2.VideoCapture(0) # define camera
# start / on
while True:
status_camera,img=cap.read() # start camera to take picture
print(type(img))
cv2.rectangle(img,(0,200),(100,400),(255,0,0),2)
cv2.imshow('live2',img)
if cv2.waitKey(30) & 0xff == 'q' :
break
|
12,898 | 68261306975633e2528cfd37e7a206929c84a099 | #!/usr/bin/env python
# coding=utf-8
familiar_person = {'first_name':'Kobe','last_name':'Bryant','age':31,'city':'Los Angeles'}
print(familiar_person['first_name'])
print(familiar_person['last_name'])
print(familiar_person['age'])
print(familiar_person['city'])
|
12,899 | c263c95f105601ea9bcf0c554b08ade02dcc4b4f | import re
def parse_ranges(string):
"""takes a string ,eg: '1-5, 20, 30->exit'"""
p = re.compile(r'(?P<start>\d+)-?(?P<end>\d+)?')
match = re.finditer(p, string)
ls = list()
for m in match:
begin = int(m.group('start'))
end = int(m.group('end'))+1 if m.group('end') is not None else begin+1
ls.append(i for i in range(begin, end)) # somehow a list of generators
for item in ls:
for value in item:
yield value
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.