text stringlengths 38 1.54M |
|---|
#!/usr/bin/env python3
import getopt
import select
import socket
import ssl
import sys
def usage(f=sys.stdout):
f.write("""\
Usage: %s --tls-cert=FILENAME --tls-key=FILENAME HOST PORT
--disable-tls run without TLS
--tls-cert=FILENAME use this TLS certificate (required without --disable-tls)
--tls-key=FILENAME use this TLS private key (required without --disable-tls)
--help show this help
""" % sys.argv[0])
class options(object):
use_tls = True
tls_cert_filename = None
tls_key_filename = None
CLIENT_SOCKETS = set()
def socket_to_username(s):
return "user %d" % s.fileno()
# Send msg to all connected clients.
def broadcast(msg):
for s in tuple(CLIENT_SOCKETS):
try:
s.sendall(msg)
except socket.error:
CLIENT_SOCKETS.remove(s)
# Parse command line options.
opts, args = getopt.gnu_getopt(sys.argv[1:], "", ["disable-tls", "tls-cert=", "tls-key=", "help"])
for o, a in opts:
if o == "--disable-tls":
options.use_tls = False
elif o == "--tls-cert":
options.tls_cert_filename = a
elif o == "--tls-key":
options.tls_key_filename = a
elif o == "--help":
usage()
sys.exit()
try:
listen_hostname, listen_port = args
listen_port = int(listen_port)
except ValueError:
usage(sys.stderr)
sys.exit(1)
if options.use_tls and (options.tls_cert_filename is None or options.tls_key_filename is None):
print("--tls-cert and --tls-key are required unless --disable-tls is used", file=sys.stderr)
sys.exit(1)
# Open the listening socket.
listen_socket = socket.socket(socket.AF_INET)
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listen_socket.bind((listen_hostname, listen_port))
listen_socket.listen(0)
if options.use_tls:
# TODO
# Wrap listen_socket in TLS and assign the resulting socket.SSLSocket back
# to the listen_socket variable.
raise NotImplementedError("TLS mode not implemented")
while True:
# select.select will notify us which sockets are ready to read.
rset, _, _ = select.select([listen_socket] + list(CLIENT_SOCKETS), [], [])
for s in rset:
if s == listen_socket:
# s is listen_socket, accept a connection.
try:
client_socket, _ = listen_socket.accept()
except ssl.SSLError:
continue
CLIENT_SOCKETS.add(client_socket)
broadcast(("*** %s entered the room.\n" % socket_to_username(client_socket)).encode())
else:
# s is a client socket, read and broadcast.
try:
data = s.recv(1024)
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
continue
except socket.error:
data = None
if data:
broadcast(("<%s> " % socket_to_username(s)).encode() + data.rstrip(b"\n") + b"\n")
else:
CLIENT_SOCKETS.remove(s)
broadcast(("*** %s left the room.\n" % socket_to_username(s)).encode()) |
# python
import sys
import os
import time
import importlib
import argparse
import random
import re
# numpy
import numpy as np
# torch
import torch
from torch import nn, optim
# ours
from data import MonoTextData
from modules import VAE
from modules import LSTMEncoder, LSTMDecoder
# constants
DIVIDER = '------------------------------------------\n'
AVG_LENGTH = None
MIN_LENGTH = None
MAX_LENGTH = None
############################################
# MAIN SAMPLING CODE #
############################################
def test_generation(vae, vocab, args, data, epoch):
# model to eval mode, just in case
vae.eval()
# initalize the header
header = None
if epoch is None:
header = ['Samples generated from model at {}\n'.format(args.model_path)]
else:
header = ['Samples generated at the end of Epoch {}\n'.format(epoch)]
header.append(DIVIDER)
header.append('\n')
# initialize the results list
samples = header
# first get some sentences sampled from the prior
samples += sample_sentences(vae, vocab, args)
# then get a warm interpolation (warm = between corpus sentence embeddings,
# i.e., z's sampled from two posterior distributions)
samples += reconstruction(vae, vocab, args, data)
# then get a cold interpolation (cold = between z's sampled from the prior)
# then get sentence reconstructions
# write the results to file
file_name = None
if epoch is None:
file_name = '{}_test_samples'.format(args.dataset)
else:
file_name = 'samples_epoch{}'.format(epoch)
file_path = os.path.join(args.save_dir, file_name)
with open(file_path, 'w') as file:
file.writelines(samples)
def sample_sentences(vae, vocab, args):
vae.eval()
sampled_sents = []
device = args.device
header = [DIVIDER]
header.append('\tSamples from the prior\n')
header.append(DIVIDER)
for i in range(args.num_sentences):
# sample z from the prior
z = vae.sample_from_prior(1)
decoded_sentence = z2sent(vae, vocab, z, args)
# append to sampled_sents
sampled_sents.append(decoded_sentence)
# initialize the results list
res = header
# join the word-strings for each sentence clean up the results
for i, sent in enumerate(sampled_sents):
# prepend the sample number for convenience
line = '({}): '.format(i) + sent
res.append(line)
res += ['\n\n']
# return the list of sample strings
return res
def cold_interpolation(vae, vocab, args):
res = ''
# generate latent code endpoints
z1 = torch.randn([args.nz]).numpy()
z2 = torch.randn([args.nz]).numpy()
# create the interpolations
z = to_var(torch.from_numpy(interpolate(
start=z1,
end=z2,
steps=(args.num_steps - 2)
))).float()
# sample sentences from each of the interpolations
samples, _ = model.inference(z=z)
# create result string
res += '------- COLD INTERPOLATION --------'
for line in idx2word(samples, i2w=i2w, pad_idx=w2i['<pad>']):
line = ''.join(line)
line = clean_sample(line)
res += '\n' + line
res += '\n\n'
return res
def warm_interpolation(vae, vocab, args, data):
res = ''
# pick two random sentences
i = random.randint(0, len(datasets['test']))
j = random.randint(0, len(datasets['test']))
# convert the sentences to tensors
s_i = torch.tensor([datasets['test'][i]['input']], device=device)
s_i_length = torch.tensor([datasets['test'][i]['length']], device=util.DEVICE)
s_j = torch.tensor([datasets['test'][j]['input']], device=device)
s_j_length = torch.tensor([datasets['test'][j]['length']], device=util.DEVICE)
# encode the two sentences into latent space
with torch.no_grad():
_, _, _, z_i = model(s_i, s_i_length)
_, _, _, z_j = model(s_j, s_j_length)
z_i, z_j = z_i.cpu(), z_j.cpu()
# create the interpolation
z1, z2 = z_i.squeeze().numpy(), z_j.squeeze().numpy()
z = to_var(torch.from_numpy(interpolate(start=z1, end=z2, steps=8)).float())
# generate samples from each code point
samples, _ = model.inference(z=z)
# create the result string
res += '------- WARM INTERPOLATION --------\n'
res += '(Original 1): '
line = ''.join(idx2word(
s_i,
i2w=i2w,
pad_idx=w2i['<pad>']
))
line = clean_sample(line)
res += line + '\n'
for line in idx2word(samples, i2w=i2w, pad_idx=w2i['<pad>']):
line = ''.join(line)
line = clean_sample(line)
res += line + '\n'
res += '(Original 2): '
line = ''.join(idx2word(
s_j,
i2w=i2w,
pad_idx=w2i['<pad>']
))
line = clean_sample(line)
res += line + '\n\n'
return res
def reconstruction(vae, vocab, args, data):
# model to eval, just in case
vae.eval()
# initialize the results list
res = []
# initialize the sentences data structure we'll use
sentences = []
device = args.device
# initialize the header
header = [DIVIDER]
header.append('\tReconstructed sentences\n')
header.append(DIVIDER)
# set results string to the header to start with
res = header
for _ in range(args.num_reconstructions):
# pick short sentences
s_short = get_random_short(data, args)
sentences.append({
'original': s_short,
'type': 'short'
})
for _ in range(args.num_reconstructions):
# pick random sentences
s_rand = get_random(data, args)
sentences.append({
'original': s_rand,
'type': 'random'
})
# pick args.num_reconstructions sentences of each type (short, long, random)
for _ in range(args.num_reconstructions):
# pick long sentences
s_long = get_random_long(data, args)
sentences.append({
'original': s_long,
'type': 'long'
})
# reconstruct each sentence
for i, sentence in enumerate(sentences):
# get the mean and logvar of this each encoded sentence
with torch.no_grad():
sent = torch.tensor(sentence['original'], device=args.device)
sent = sent.unsqueeze(0)
mean, log_v = vae.encoder(sent)
mean, log_v = mean.cpu()[0], log_v.cpu()[0]
stdev = torch.exp(0.5 * log_v)
# decode the mean sentence
mean_sentence = z2sent(vae, vocab, mean, args)
# save the mean sentence
sentence['mean'] = mean
sentence['mean_sentence'] = mean_sentence
# decode a number of sentences sampled from the latent code
# (1) sample a latent code
sentence['random_samples'] = []
for j in range(3):
z = torch.randn(args.nz)
z = z * stdev + mean
# (2) decode the latent code
random_sentence = z2sent(vae, vocab, z, args)
# save the random sentences with their z values
sentence['random_samples'].append((
random_sentence,
z.cpu()
))
# create the result string
for i, sentence in enumerate(sentences):
###############################
# PRINT THE ORIGINAL SENTENCE #
###############################
res.append('\nSentence [{}] ({})\n'.format(i, sentence['type']))
res.append('(Original):\n\t')
res.append(idx2sent(sentence['original'], vocab))
###################################
# PRINT THE MEAN DECODED SENTENCE #
###################################
res.append('(Mean):\n\t')
res.append(sentence['mean_sentence'])
########################################
# PRINT THE RANDOMLY SAMPLED SENTENCES #
########################################
res.append('(Random Samples):\n')
for sent, z in sentence['random_samples']:
# get the distance from the mean and print it in parens
mean = sentence['mean']
res.append('({:.3f} from mean)\n\t'.format(np.linalg.norm(mean - z)))
res.append(sent)
res.append('\n')
res.append('\n\n')
return res
############################################
# HELPER METHODS #
############################################
def interpolate(start, end, steps):
steps = steps + 2
interpolation = np.zeros((start.shape[0], steps))
for dim, (s, e) in enumerate(zip(start, end)):
interpolation[dim] = np.linspace(s, e, steps)
return interpolation.T
def idx2sent(idx, vocab):
# turn a list of idx into a list of string
sent = vocab.decode_sentence(idx)
# join and clean up the word-strings
line = ' '.join(sent)
line = clean_sample(line) + '\n'
# return the clean sentence
return line
def z2sent(vae, vocab, z, args):
device = args.device
# shape z
z = z.view(1,1,-1)
# get the start symbol and end symbol
start = vocab.word2id['<s>']
START = torch.tensor([[start]])
end = vocab.word2id['</s>']
# send both to the proper device
START = START.to(device)
z = z.to(device)
# decode z into a sentence
sentence = vae.decoder.sample_text(START, z, end, device)
# perform idx2word ("decode_sentence") on the sentences and ...
decoded_sentence = vocab.decode_sentence(sentence)
# join the words together with space, clean the line, and append '\n'
line = ' '.join(decoded_sentence)
line = clean_sample(line) + '\n'
return line
def get_random(data, args):
s_rand_idx = random.randint(0, len(data) - 1)
return torch.tensor(data[s_rand_idx], device=args.device)
def calc_length_stats(data, args):
global AVG_LENGTH
global MAX_LENGTH
global MIN_LENGTH
# warm up -- get a stochastic average of length, and a stochastic max
running_length = 0
running_min = 0
running_max = 0
for i in range(args.sample_warmup_period):
rand_idx = random.randint(0, len(data) - 1)
rand_sent = data[rand_idx]
rand_length = len(rand_sent)
# adjust running sum
running_length += rand_length
# adjust running min
if rand_length < running_min:
running_min = rand_length
# adjust running max
if rand_length > running_max:
running_max = rand_length
# we want only sentences whose length is roughly in the first sextile
AVG_LENGTH = running_length / args.sample_warmup_period
MAX_LENGTH = (AVG_LENGTH + running_min) / 3
MIN_LENGTH = (AVG_LENGTH + running_max) / 2
print("Stochastic average of dataset length:\t{}".format(AVG_LENGTH))
print("\tworking max for short sequences:\t{}".format(MAX_LENGTH))
print("\tworking min for long sequences:\t{}".format(MIN_LENGTH))
def get_random_short(data, args):
if AVG_LENGTH is None:
calc_length_stats(data, args)
# find an appropriate sentence
while True:
s_rand_idx = random.randint(0, len(data) - 1)
s_rand_length = len(data[s_rand_idx])
if s_rand_length < MAX_LENGTH:
s_short = torch.tensor(data[s_rand_idx], device=args.device)
return s_short
def get_random_long(data, args):
if AVG_LENGTH is None:
calc_length_stats(data, args)
# find an appropriate sentence
while True:
s_rand_idx = random.randint(0, len(data) - 1)
s_rand_length = len(data[s_rand_idx])
if s_rand_length > MIN_LENGTH:
s_long = torch.tensor(data[s_rand_idx], device=args.device)
return s_long
def clean_sample(line):
# left and right strip the line
line = line.strip()
# remove leading or trailing reserved symbol
if line.startswith('<sos>'):
line = line[5:]
if line.endswith('<eos>'):
line = line[:-5]
# again left and right strip the line
line = line.strip()
# fix the punctuation
line = clean_punctuation(line)
return line
def clean_punctuation(line):
# remove space around colons between numbers
num_colon = re.compile(r'(\d+)\s+:\s+(\d+)')
line = num_colon.sub(r"\1:\2", line)
# remove space before commas, colons, and periods
line = re.sub(r"\s+(,|:|\.)\s+", r"\1 ", line)
# remove space around apostrophes
line = re.sub(r"\s+'\s+", r"'", line)
# remove space before final periods
line = re.sub(r"\s+\.", r".", line)
return line |
#!/usr/bin/env python3
""" Basic Annotations """
def concat(str1: str, str2: str) -> str:
"""concat function w/ annotations"""
return "{}{}".format(str1, str2)
|
import IceRayPy
import camera
def make( P_object_cargo ):
engine = IceRayPy.core.render.Engine()
cargo_camera = camera.make()
engine.camera( cargo_camera['this'] )
cargo_object = P_object_cargo
engine.object( cargo_object['this'] )
return { 'this': engine, '0': cargo_camera, '1': cargo_object }
|
# This files contains your custom actions which can be used to run
# custom Python code.
#
# See this guide on how to implement these action:
# https://rasa.com/docs/rasa/core/actions/#custom-actions/
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from typing import Any, Text, Dict, List
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
from rasa_sdk.events import SlotSet
from rasa_sdk.forms import FormAction
from rasa.core.events import Restarted
import random
import logging
import request_api
logger = logging.getLogger(__name__)
REQUESTED_SLOT = "requested_slot"
_map_int2size = {'0': 'S', '1': 'M', '2': 'L', '3': 'XL', '4': 'XXL'}
def mapping_size(weight, height, style):
if int(weight) <= 49 and height <= '1m6':
tmp = '0'
elif int(weight) <= 55 and height <= '1m68':
tmp = '1'
elif int(weight) <= 62 and height <= '1m74':
tmp = '2'
elif int(weight) <= 67 and height <= '1m8':
tmp = '3'
else:
tmp = '4'
return _map_int2size[tmp]
def map_arr_to_arrDict(urls):
tmps = []
key = ["url"]
for i in range(len(urls)):
url = [urls[i]]
zipbObj = zip(key, url)
tmp = dict(zipbObj)
tmps.append(tmp)
return tmps
def get_imgs_tryon():
imgs = []
url = "https://mtv-fashion.com/wp-content/uploads/2017/05/ao-thun-co-tron-nam-mau-den-3.jpg"
imgs.append(url)
tmps = map_arr_to_arrDict(imgs)
return tmps
def get_clothes_recommend(type_item="phông", gender="nam", color="đen"):
imgs = request_api.get_clothes_recommend(type_item=type_item, gender=gender, color=color)
# index = [i for i in range(0, len(imgs))]
# irecomd = random.sample(index, k=10)
recomds = [i for i in imgs]
key = "buttons"
value = {"title":"Thử mặc", "payload":"/affirm_agree"}
tmps = map_arr_to_arrDict(recomds)
for i in range(len(tmps)):
tmps[i][key] = value
return tmps
def get_suggest_stores():
shops = ["https://shopee.vn/search?keyword=áo%20phông",
"https://tiki.vn/search?q=áo%20phông",
"https://www.zanado.com/thoi-trang.html?run=1&q=áo+phông",
"https://h2tshop.com/tim?q=áo+phông",
"https://shop.steholmes.studio/t-shirts/",
"https://www.lazada.vn/catalog/?q=áo+phông",
"https://canifa.com/catalogsearch/result/?q=áo+phông",
"https://www.zalora.com.my",
"https://shop.steholmes.studio/t-shirts/"
]
prices = [request_api.random_price(),
request_api.random_price(),
request_api.random_price(),
request_api.random_price(),
request_api.random_price(),
request_api.random_price(),
request_api.random_price(),
request_api.random_price(),
request_api.random_price(),]
index = [i for i in range(0, 5)]
recomd = random.sample(index, k=4)
shops = [shops[i] for i in recomd]
sprices = [prices[i] for i in recomd]
return shops, sprices
def form_fillsize(numbers):
tmps = numbers.split()
tmps = " - ".join(tmp for tmp in tmps)
return tmps
class ItemForm(FormAction):
"""
this form action will get the item_group slot from user and determined
the required slots for each item_group
after get all the slots this will retrieval from db to return the
search detail to user
"""
def name(self):
# type: () -> Text
"""Unique indentifier of the form"""
return "item_form"
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
"""A list required slots that the form has to fill"""
return ["type_item", "gender", "color"]
@staticmethod
def type_items()-> List[Text]:
return ["phông", "thể thao", 'sơ mi', 'váy']
def request_next_slot(
self,
dispatcher, # type: CollectingDispatcher
tracker, # type: Tracker
domain, # type: Dict[Text, Any]
): # type: (...) -> Optional[List[Dict]]
"""Request next slots and utter tample if needed else return None"""
for slot in self.required_slots(tracker):
if self._should_request_slot(tracker, slot):
logger.debug("Request next slot '{}'".format(slot))
if slot == 'type_item':
type_items = random.sample(self.type_items(), k=3)
dispatcher.utter_template('utter_suggest_type_item', tracker, type1=type_items[0].upper(),
type2=type_items[1].upper(), type3=type_items[2])
dispatcher.utter_template("utter_ask_{}".format(slot), tracker, silent_fail=False, **tracker.slots)
return [SlotSet(REQUESTED_SLOT, slot)]
return None
def submit(self, dispatcher: CollectingDispatcher, tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict]:
"""Define what the form has to do affter all required slots arre filled"""
dispatcher.utter_template("utter_item_finding", tracker)
return []
class ActionRecommendClothes(Action):
def name(self): # type: () -> Text
return "action_recommend_clothes"
def run(
self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]
) -> List[Dict[Text, Any]]:
color = tracker.get_slot('color')
clothes = get_clothes_recommend(color=color)
# print(clothes)
dispatcher.utter_template("utter_recommend_item_base_img",
tracker,
image=clothes)
# dispatcher.utter_template("utter_button_tryon", tracker)
return []
class ActionSuggestItem(Action):
def name(self): # type: () -> Text
return "action_suggest_item"
def run(
self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],) -> List[Dict[Text, Any]]:
item_1 = "Áo 1"
item_2 = "Áo 2"
item_3 = "Áo 3"
dispatcher.utter_template("utter_suggest_item", tracker, item1=item_1.upper(), item2=item_2.upper(), item3=item_3.upper())
return []
class ActionRespondTryon(Action):
def name(self): # type: () -> Text
return "action_respond_tryon"
def run(
self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any], ) -> List[Dict[Text, Any]]:
imgs = get_imgs_tryon()
url = "https://fakerpbc247.files.wordpress.com/2019/08/87803.png"
arrUrl = [url]
tmps = map_arr_to_arrDict(arrUrl)
# size = tracker.get_slot("size")
dispatcher.utter_template("utter_respond_trying", tracker)
dispatcher.utter_template("utter_respond_title", tracker)
dispatcher.utter_template("utter_respond_tryon", tracker, image=tmps)
return []
class ActionRecommendSize(Action):
def name(self): # type: () -> Text
return "action_recommend_size"
def run(
self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any], ) -> List[Dict[Text, Any]]:
numbers = tracker.get_slot('numbers')
tmp = form_fillsize(numbers)
url = request_api.get_response_recommend_size(tmp)
# print(url)
# url = "https://fakerpbc247.files.wordpress.com/2019/08/87803.png"
# url = "http://" + url
arrUrl = [url]
tmps = map_arr_to_arrDict(arrUrl)
# print(tmps)
dispatcher.utter_template("utter_recommend_size", tracker, image=tmps)
return []
class ActionSuggestStores(Action):
def name(self): # type: () -> Text
return "action_suggest_stores"
def run(
self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any], ) -> List[Dict[Text, Any]]:
shops, sprices = get_suggest_stores()
dispatcher.utter_template("utter_suggest_stores_title", tracker)
for i in range(len(shops)):
dispatcher.utter_template("utter_suggest_stores", tracker, shop=shops[i], price=sprices[i])
class ActionSuggestRecommendSize(Action):
def name(self): # type: () -> Text
return "action_suggest_recommend_size"
def run(
self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any], ) -> List[Dict[Text, Any]]:
url = request_api.get_recommend_size()
print(url)
# url = "fakerpbc247.files.wordpress.com/2019/08/recommend_size1.png"
# url = "http://" + url
# print(url)
arrUrl = [url]
tmps = map_arr_to_arrDict(arrUrl)
# print(tmps)
dispatcher.utter_template("utter_suggest_recommend_size", tracker, image=tmps)
class ActionRestart(Action):
def name(self):
return 'action_restart'
def run(self, dispatcher, tracker, domain):
dispatcher.utter_template('utter_restart', tracker)
return[Restarted()]
class ActionResetSlot(Action):
def name(self):
return 'action_reset_slot'
def run(self, dispatcher, tracker, domain):
return [SlotSet('style', None),
SlotSet('numbers', None),
SlotSet('type_item', None),
SlotSet('gender', None),
SlotSet('color', None)] |
from sys import stdin
from itertools import repeat
def merge(decks):
d_l = 0
for deck in decks:
max_card = 0
for c in deck:
if c[0] > max_card:
max_card = c[0]
d_l += max_card
n_d = [None] * d_l
for deck in decks:
for card in deck:
n_d[card[0]-1] = card
ret = ""
for n in n_d:
if n is not None:
ret += n[1]
return ret
dec = []
d_a = dec.append
for line in stdin:
(index, list) = line.split(':')
deck = zip(map(int, list.split(',')), repeat(index))
d_a(deck)
print merge(dec) |
import os
from datetime import datetime, timedelta
from sys import version_info as info
from typing import Iterable, List, Optional
import pytest
import requests
from auth0.v3.authentication import GetToken
from auth0.v3.management import Auth0 as Auth0sdk
from fastapi.security.http import HTTPAuthorizationCredentials
from jose import jwt
from starlette.status import HTTP_401_UNAUTHORIZED
from fastapi_cloudauth.auth0 import Auth0, Auth0Claims, Auth0CurrentUser
from fastapi_cloudauth.messages import NOT_VERIFIED
from tests.helpers import (
Auths,
BaseTestCloudAuth,
_assert_verifier,
_assert_verifier_no_error,
decode_token,
)
DOMAIN = os.getenv("AUTH0_DOMAIN")
MGMT_CLIENTID = os.getenv("AUTH0_MGMT_CLIENTID")
MGMT_CLIENT_SECRET = os.getenv("AUTH0_MGMT_CLIENT_SECRET")
CLIENTID = os.getenv("AUTH0_CLIENTID")
CLIENT_SECRET = os.getenv("AUTH0_CLIENT_SECRET")
AUDIENCE = os.getenv("AUTH0_AUDIENCE")
CONNECTION = "Username-Password-Authentication"
def assert_env():
assert DOMAIN, "'AUTH0_DOMAIN' is not defined. Set environment variables"
assert (
MGMT_CLIENTID
), "'AUTH0_MGMT_CLIENTID' is not defined. Set environment variables"
assert (
MGMT_CLIENT_SECRET
), "'AUTH0_MGMT_CLIENT_SECRET' is not defined. Set environment variables"
assert CLIENTID, "'AUTH0_CLIENTID' is not defined. Set environment variables"
assert (
CLIENT_SECRET
), "'AUTH0_CLIENT_SECRET' is not defined. Set environment variables"
assert AUDIENCE, "'AUTH0_AUDIENCE' is not defined. Set environment variables"
def init() -> Auth0sdk:
"""
instantiate Auth0 SDK class
Goes to Auth0 dashboard and get followings.
DOMAIN: domain of Auth0 Dashboard Backend Management Client's Applications
MGMT_CLIENTID: client ID of Auth0 Dashboard Backend Management Client's Applications
MGMT_CLIENT_SECRET: client secret of Auth0 Dashboard Backend Management Client's Applications
"""
get_token = GetToken(DOMAIN)
token = get_token.client_credentials(
MGMT_CLIENTID, MGMT_CLIENT_SECRET, f"https://{DOMAIN}/api/v2/",
)
mgmt_api_token = token["access_token"]
auth0 = Auth0sdk(DOMAIN, mgmt_api_token)
return auth0
def add_test_user(
auth0: Auth0sdk,
username=f"test_user{info.major}{info.minor}@example.com",
password="testPass1-",
scopes: Optional[List[str]] = None,
):
"""create test user with Auth0 SDK
Requirements:
CLIENTID: client id of `Default App`. See Applications in Auth0 dashboard
AUDIENCE: create custom API in Auth0 dashboard and add custom permisson (`read:test`).
Then, assing that identifier as AUDIENCE.
"""
resp = requests.post(
f"https://{DOMAIN}/dbconnections/signup",
{
"client_id": CLIENTID,
"email": username,
"password": password,
"connection": CONNECTION,
"username": username,
},
)
user_id = f"auth0|{resp.json()['_id']}"
if scopes:
auth0.users.add_permissions(
user_id,
[
{"permission_name": scope, "resource_server_identifier": AUDIENCE}
for scope in scopes
],
)
def delete_user(
auth0: Auth0sdk,
username=f"test_user{info.major}{info.minor}@example.com",
password="testPass1-",
):
"""delete test user with Auth0 SDK"""
access_token = get_access_token(username=username, password=password)
if not access_token:
return
user_id = jwt.get_unverified_claims(access_token)["sub"]
auth0.users.delete(user_id)
def get_access_token(
username=f"test_user{info.major}{info.minor}@example.com", password="testPass1-",
) -> Optional[str]:
"""
Requirements:
DOMAIN: domain of Auth0 Dashboard Backend Management Client's Applications
CLIENTID: Set client id of `Default App` in environment variable. See Applications in Auth0 dashboard
CLIENT_SECRET: Set client secret of `Default App` in environment variable
AUDIENCE: In Auth0 dashboard, create custom applications and API,
and add permission `read:test` into that API,
and then copy the audience (identifier) in environment variable.
NOTE: the followings setting in Auth0 dashboard is required
- sidebar > Applications > settings > Advanced settings > grant: click `password` on
- top right icon > Set General > API Authorization Settings > Default Directory to Username-Password-Authentication
"""
resp = requests.post(
f"https://{DOMAIN}/oauth/token",
headers={"content-type": "application/x-www-form-urlencoded"},
data={
"grant_type": "password",
"username": username,
"password": password,
"client_id": CLIENTID,
"client_secret": CLIENT_SECRET,
"audience": AUDIENCE,
},
)
access_token = resp.json().get("access_token")
return access_token
def get_id_token(
username=f"test_user{info.major}{info.minor}@example.com", password="testPass1-",
) -> Optional[str]:
"""
Requirements:
DOMAIN: domain of Auth0 Dashboard Backend Management Client's Applications
CLIENTID: Set client id of `Default App` in environment variable. See Applications in Auth0 dashboard
CLIENT_SECRET: Set client secret of `Default App` in environment variable
AUDIENCE: In Auth0 dashboard, create custom applications and API,
and add permission `read:test` into that API,
and then copy the audience (identifier) in environment variable.
NOTE: the followings setting in Auth0 dashboard is required
- sidebar > Applications > settings > Advanced settings > grant: click `password` on
- top right icon > Set General > API Authorization Settings > Default Directory to Username-Password-Authentication
"""
resp = requests.post(
f"https://{DOMAIN}/oauth/token",
headers={"content-type": "application/x-www-form-urlencoded"},
data={
"grant_type": "password",
"username": username,
"password": password,
"client_id": CLIENTID,
"client_secret": CLIENT_SECRET,
},
)
id_token = resp.json().get("id_token")
return id_token
class Auth0Client(BaseTestCloudAuth):
"""
NOTE: RBAC setting must be able
"""
username = f"test_user{info.major}{info.minor}@example.com"
password = "testPass1-"
def setup(self, scope: Iterable[str]) -> None:
assert_env()
auth0sdk = init()
self.scope = scope
self.scope_username = (
f"{'-'.join(self.scope).replace(':', '-')}{self.username}"
if self.scope
else self.username
)
delete_user(auth0sdk, username=self.username, password=self.password)
add_test_user(
auth0sdk,
username=self.username,
password=self.password,
scopes=[self.scope[0]],
)
self.ACCESS_TOKEN = get_access_token(
username=self.username, password=self.password
)
self.ID_TOKEN = get_id_token(username=self.username, password=self.password)
delete_user(auth0sdk, username=self.scope_username)
add_test_user(
auth0sdk,
username=self.scope_username,
password=self.password,
scopes=self.scope,
)
self.SCOPE_ACCESS_TOKEN = get_access_token(
username=self.scope_username, password=self.password
)
self.auth0sdk = auth0sdk
class Auth0InvalidClaims(Auth0Claims):
fake_field: str
class Auth0FakeCurrentUser(Auth0CurrentUser):
user_info = Auth0InvalidClaims
assert DOMAIN and AUDIENCE and CLIENTID
self.TESTAUTH = Auths(
protect_auth=Auth0(domain=DOMAIN, customAPI=AUDIENCE),
protect_auth_ne=Auth0(domain=DOMAIN, customAPI=AUDIENCE, auto_error=False),
ms_auth=Auth0CurrentUser(domain=DOMAIN, client_id=CLIENTID),
ms_auth_ne=Auth0CurrentUser(
domain=DOMAIN, client_id=CLIENTID, auto_error=False
),
invalid_ms_auth=Auth0FakeCurrentUser(domain=DOMAIN, client_id=CLIENTID),
invalid_ms_auth_ne=Auth0FakeCurrentUser(
domain=DOMAIN, client_id=CLIENTID, auto_error=False
),
valid_claim=Auth0Claims,
invalid_claim=Auth0InvalidClaims,
)
def teardown(self):
delete_user(self.auth0sdk, self.username)
delete_user(self.auth0sdk, self.scope_username)
def decode(self):
# access token
header, payload, *_ = decode_token(self.ACCESS_TOKEN)
assert header.get("typ") == "JWT"
assert [self.scope[0]] == payload.get("permissions")
# scope access token
scope_header, scope_payload, *_ = decode_token(self.SCOPE_ACCESS_TOKEN)
assert scope_header.get("typ") == "JWT"
assert set(self.scope) == set(scope_payload.get("permissions"))
# id token
id_header, id_payload, *_ = decode_token(self.ID_TOKEN)
assert id_header.get("typ") == "JWT"
assert id_payload.get("email") == self.username
@pytest.mark.unittest
def test_extra_verify_access_token():
"""
Testing for access token validation:
- validate standard claims: Token expiration (exp) and Token issuer (iss)
- verify token audience (aud) claims
Ref: https://auth0.com/docs/tokens/access-tokens/validate-access-tokens
"""
domain = DOMAIN
customAPI = "https://dummy-domain"
issuer = "https://dummy"
auth = Auth0(domain=domain, customAPI=customAPI, issuer=issuer)
verifier = auth._verifier
auth_no_error = Auth0(
domain=domain, customAPI=customAPI, issuer=issuer, auto_error=False
)
verifier_no_error = auth_no_error._verifier
# correct
token = jwt.encode(
{
"sub": "dummy-ID",
"exp": datetime.utcnow() + timedelta(hours=10),
"iat": datetime.utcnow() - timedelta(hours=10),
"aud": customAPI,
"iss": issuer,
},
"dummy_secret",
headers={"alg": "HS256", "typ": "JWT", "kid": "dummy-kid"},
)
verifier._verify_claims(HTTPAuthorizationCredentials(scheme="a", credentials=token))
verifier_no_error._verify_claims(
HTTPAuthorizationCredentials(scheme="a", credentials=token)
)
# Testing for validation of JWT standard claims
# invalid iss
token = jwt.encode(
{
"sub": "dummy-ID",
"exp": datetime.utcnow() + timedelta(hours=10),
"iat": datetime.utcnow() - timedelta(hours=10),
"aud": customAPI,
"iss": "invalid" + issuer,
},
"dummy_secret",
headers={"alg": "HS256", "typ": "JWT", "kid": "dummy-kid"},
)
e = _assert_verifier(token, verifier)
assert e.status_code == HTTP_401_UNAUTHORIZED and e.detail == NOT_VERIFIED
_assert_verifier_no_error(token, verifier_no_error)
# invalid expiration
token = jwt.encode(
{
"sub": "dummy-ID",
"exp": datetime.utcnow() - timedelta(hours=5),
"iat": datetime.utcnow() - timedelta(hours=10),
"aud": customAPI,
"iss": issuer,
},
"dummy_secret",
headers={"alg": "HS256", "typ": "JWT", "kid": "dummy-kid"},
)
e = _assert_verifier(token, verifier)
assert e.status_code == HTTP_401_UNAUTHORIZED and e.detail == NOT_VERIFIED
_assert_verifier_no_error(token, verifier_no_error)
# Testing for access token specific verification
# invalid aud
# aud must be same as custom API
token = jwt.encode(
{
"sub": "dummy-ID",
"exp": datetime.utcnow() + timedelta(hours=10),
"iat": datetime.utcnow() - timedelta(hours=10),
"aud": customAPI + "incorrect",
"iss": issuer,
},
"dummy_secret",
headers={"alg": "HS256", "typ": "JWT", "kid": "dummy-kid"},
)
e = _assert_verifier(token, verifier)
assert e.status_code == HTTP_401_UNAUTHORIZED and e.detail == NOT_VERIFIED
_assert_verifier_no_error(token, verifier_no_error)
@pytest.mark.unittest
def test_extra_verify_id_token():
"""
Testing for ID token validation:
- validate standard claims: Token expiration (exp) and Token issuer (iss)
- verify token audience (aud) claims: same as Client ID
- verify Nonce
Ref: https://auth0.com/docs/tokens/id-tokens/validate-id-tokens
"""
domain = DOMAIN
client_id = "dummy-client-ID"
nonce = "dummy-nonce"
issuer = "https://dummy"
auth = Auth0CurrentUser(
domain=domain, client_id=client_id, nonce=nonce, issuer=issuer
)
verifier = auth._verifier
auth_no_error = Auth0CurrentUser(
domain=domain, client_id=client_id, nonce=nonce, issuer=issuer, auto_error=False
)
verifier_no_error = auth_no_error._verifier
# correct
token = jwt.encode(
{
"sub": "dummy-ID",
"exp": datetime.utcnow() + timedelta(hours=10),
"iat": datetime.utcnow() - timedelta(hours=10),
"aud": client_id,
"nonce": nonce,
"iss": issuer,
},
"dummy_secret",
headers={"alg": "HS256", "typ": "JWT", "kid": "dummy-kid"},
)
verifier._verify_claims(HTTPAuthorizationCredentials(scheme="a", credentials=token))
verifier_no_error._verify_claims(
HTTPAuthorizationCredentials(scheme="a", credentials=token)
)
# Testing for validation of JWT standard claims
# invalid iss
token = jwt.encode(
{
"sub": "dummy-ID",
"exp": datetime.utcnow() + timedelta(hours=10),
"iat": datetime.utcnow() - timedelta(hours=10),
"aud": client_id,
"iss": "invalid" + issuer,
},
"dummy_secret",
headers={"alg": "HS256", "typ": "JWT", "kid": "dummy-kid"},
)
e = _assert_verifier(token, verifier)
assert e.status_code == HTTP_401_UNAUTHORIZED and e.detail == NOT_VERIFIED
_assert_verifier_no_error(token, verifier_no_error)
# invalid expiration
token = jwt.encode(
{
"sub": "dummy-ID",
"exp": datetime.utcnow() - timedelta(hours=5),
"iat": datetime.utcnow() - timedelta(hours=10),
"aud": client_id,
"iss": issuer,
},
"dummy_secret",
headers={"alg": "HS256", "typ": "JWT", "kid": "dummy-kid"},
)
e = _assert_verifier(token, verifier)
assert e.status_code == HTTP_401_UNAUTHORIZED and e.detail == NOT_VERIFIED
_assert_verifier_no_error(token, verifier_no_error)
# Testing for ID token specific verification
# invalid aud
# aud must be same as Client ID
token = jwt.encode(
{
"sub": "dummy-ID",
"exp": datetime.utcnow() + timedelta(hours=10),
"iat": datetime.utcnow() - timedelta(hours=10),
"aud": client_id + "incorrect",
"iss": issuer,
},
"dummy_secret",
headers={"alg": "HS256", "typ": "JWT", "kid": "dummy-kid"},
)
e = _assert_verifier(token, verifier)
assert e.status_code == HTTP_401_UNAUTHORIZED and e.detail == NOT_VERIFIED
_assert_verifier_no_error(token, verifier_no_error)
# invalid nonce
token = jwt.encode(
{
"sub": "dummy-ID",
"exp": datetime.utcnow() + timedelta(hours=10),
"iat": datetime.utcnow() - timedelta(hours=10),
"aud": client_id,
"nonce": nonce + "invalid",
"iss": issuer,
},
"dummy_secret",
headers={"alg": "HS256", "typ": "JWT", "kid": "dummy-kid"},
)
e = _assert_verifier(token, verifier)
assert e.status_code == HTTP_401_UNAUTHORIZED and e.detail == NOT_VERIFIED
_assert_verifier_no_error(token, verifier_no_error)
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import multiprocessing
import os
import re
import socket
import subprocess
import sys
import warnings
from django.conf import settings
from django.core.management import base
from django import template
# Suppress DeprecationWarnings which clutter the output to the point of
# rendering it unreadable.
warnings.simplefilter('ignore')
cmd_name = __name__.split('.')[-1]
CURDIR = os.path.realpath(os.path.dirname(__file__))
PROJECT_PATH = os.path.realpath(os.path.join(CURDIR, '../..'))
STATIC_PATH = os.path.realpath(os.path.join(PROJECT_PATH, '../static'))
# Known apache regular expression to retrieve it's version
APACHE_VERSION_REG = r'Apache/(?P<version>[\d.]*)'
# Known apache commands to retrieve it's version
APACHE2_VERSION_CMDS = (
(('/usr/sbin/apache2ctl', '-V'), APACHE_VERSION_REG),
(('/usr/sbin/apache2', '-v'), APACHE_VERSION_REG),
)
# Known apache log directory locations
APACHE_LOG_DIRS = (
'/var/log/httpd', # RHEL / Red Hat / CentOS / Fedora Linux
'/var/log/apache2', # Debian / Ubuntu Linux
)
# Default log directory
DEFAULT_LOG_DIR = '/var/log'
def _getattr(obj, name, default):
"""Like getattr but return `default` if None or False.
By default, getattr(obj, name, default) returns default only if
attr does not exist, here, we return `default` even if attr evaluates to
None or False.
"""
value = getattr(obj, name, default) or default
return value
context = template.Context({
'DJANGO_SETTINGS_MODULE': os.environ['DJANGO_SETTINGS_MODULE'],
'HOSTNAME': socket.getfqdn(),
'PROJECT_PATH': os.path.realpath(
_getattr(settings, 'ROOT_PATH', PROJECT_PATH)),
'STATIC_PATH': os.path.realpath(
_getattr(settings, 'STATIC_ROOT', STATIC_PATH)),
'SSLCERT': '/etc/pki/tls/certs/ca.crt',
'SSLKEY': '/etc/pki/tls/private/ca.key',
'CACERT': None,
'PROCESSES': multiprocessing.cpu_count() + 1,
'PYTHON_EXEC': sys.executable,
})
context['PROJECT_ROOT'] = os.path.dirname(context['PROJECT_PATH'])
context['PROJECT_DIR_NAME'] = os.path.basename(
context['PROJECT_PATH'].split(context['PROJECT_ROOT'])[1])
context['PROJECT_NAME'] = context['PROJECT_DIR_NAME']
context['DEFAULT_WSGI_FILE'] = os.path.join(
context['PROJECT_PATH'], 'wsgi.py')
context['WSGI_FILE'] = os.path.join(
context['PROJECT_PATH'], 'horizon_wsgi.py')
VHOSTNAME = context['HOSTNAME'].split('.')
VHOSTNAME[0] = context['PROJECT_NAME']
context['VHOSTNAME'] = '.'.join(VHOSTNAME)
if len(VHOSTNAME) > 1:
context['DOMAINNAME'] = '.'.join(VHOSTNAME[1:])
else:
context['DOMAINNAME'] = 'openstack.org'
context['ADMIN'] = 'webmaster@%s' % context['DOMAINNAME']
context['ACTIVATE_THIS'] = None
virtualenv = os.environ.get('VIRTUAL_ENV')
if virtualenv:
activate_this = os.path.join(
virtualenv, 'bin/activate_this.py')
if os.path.exists(activate_this):
context['ACTIVATE_THIS'] = activate_this
# Try to detect apache's version
# We fallback on 2.4.
context['APACHE2_VERSION'] = 2.4
APACHE2_VERSION = None
for cmd in APACHE2_VERSION_CMDS:
if os.path.exists(cmd[0][0]):
try:
reg = re.compile(cmd[1])
output = subprocess.check_output(cmd[0], stderr=subprocess.STDOUT)
if isinstance(output, bytes):
output = output.decode()
res = reg.search(output)
if res:
APACHE2_VERSION = res.group('version')
break
except subprocess.CalledProcessError:
pass
if APACHE2_VERSION:
ver_nums = APACHE2_VERSION.split('.')
if len(ver_nums) >= 2:
try:
context['APACHE2_VERSION'] = float('.'.join(ver_nums[:2]))
except ValueError:
pass
def find_apache_log_dir():
for log_dir in APACHE_LOG_DIRS:
if os.path.exists(log_dir) and os.path.isdir(log_dir):
return log_dir
return DEFAULT_LOG_DIR
context['LOGDIR'] = find_apache_log_dir()
class Command(base.BaseCommand):
args = ''
help = """Create %(wsgi_file)s
or the contents of an apache %(p_name)s.conf file (on stdout).
The apache configuration is generated on stdout because the place of this
file is distribution dependent.
examples::
manage.py %(cmd_name)s --wsgi # creates %(wsgi_file)s
manage.py %(cmd_name)s --apache # creates an apache vhost conf file (on \
stdout).
manage.py %(cmd_name)s --apache --ssl --mail=%(admin)s \
--project=%(p_name)s --hostname=%(hostname)s
To create an acpache configuration file, redirect the output towards the
location you desire, e.g.::
manage.py %(cmd_name)s --apache > \
/etc/httpd/conf.d/openstack_dashboard.conf
""" % {
'cmd_name': cmd_name,
'p_name': context['PROJECT_NAME'],
'wsgi_file': context['WSGI_FILE'],
'admin': context['ADMIN'],
'hostname': context['VHOSTNAME'], }
def add_arguments(self, parser):
# TODO(ygbo): Add an --nginx option.
parser.add_argument(
"-a", "--apache",
default=False, action="store_true", dest="apache",
help="generate an apache vhost configuration"
)
parser.add_argument(
"--cacert",
dest="cacert",
help=("Use with the --apache and --ssl option to define the path"
" to the SSLCACertificateFile"),
metavar="CACERT"
)
parser.add_argument(
"-f", "--force",
default=False, action="store_true", dest="force",
help="force overwriting of an existing %s file" %
context['WSGI_FILE']
)
parser.add_argument(
"-H", "--hostname",
dest="hostname",
help=("Use with the --apache option to define the server's"
" hostname (default : %s)") % context['VHOSTNAME'],
metavar="HOSTNAME"
)
parser.add_argument(
"--logdir",
dest="logdir",
help=("Use with the --apache option to define the path to "
"the apache log directory(default : %s)"
% context['LOGDIR']),
metavar="CACERT"
)
parser.add_argument(
"-m", "--mail",
dest="mail",
help=("Use with the --apache option to define the web site"
" administrator's email (default : %s)") %
context['ADMIN'],
metavar="MAIL"
)
parser.add_argument(
"-n", "--namedhost",
default=False, action="store_true", dest="namedhost",
help=("Use with the --apache option. The apache vhost "
"configuration will work only when accessed with "
"the proper hostname (see --hostname).")
)
parser.add_argument(
"--processes",
dest="processes",
help=("Use with the --apache option to define the number of "
"apache processes (by default the number of cpus +1 which "
"is %s on this machine).") % context['PROCESSES'],
metavar="PROCESSES"
)
parser.add_argument(
"-p", "--project",
dest="project",
help=("Use with the --apache option to define the project "
"name (default : %s)") % context['PROJECT_NAME'],
metavar="PROJECT"
)
parser.add_argument(
"-s", "--ssl",
default=False, action="store_true", dest="ssl",
help=("Use with the --apache option. The apache vhost "
"configuration will use an SSL configuration")
)
parser.add_argument(
"--sslcert",
dest="sslcert",
help=("Use with the --apache and --ssl option to define "
"the path to the SSLCertificateFile (default : %s)"
) % context['SSLCERT'],
metavar="SSLCERT"
)
parser.add_argument(
"--sslkey",
dest="sslkey",
help=("Use with the --apache and --ssl option to define "
"the path to the SSLCertificateKeyFile "
"(default : %s)") % context['SSLKEY'],
metavar="SSLKEY"
)
parser.add_argument(
"--apache-version",
dest="apache_version",
type=float,
help=("Use with the --apache option to define the apache "
"major (as a floating point number) version "
"(default : %s)."
% context['APACHE2_VERSION']),
metavar="APACHE_VERSION"
)
parser.add_argument(
"-w", "--wsgi",
default=False, action="store_true", dest="wsgi",
help="generate the horizon.wsgi file"
)
def handle(self, *args, **options):
force = options.get('force')
context['SSL'] = options.get('ssl')
if options.get('mail'):
context['ADMIN'] = options['mail']
if options.get('cacert'):
context['CACERT'] = options['cacert']
if options.get('logdir'):
context['LOGDIR'] = options['logdir'].rstrip('/')
if options.get('processes'):
context['PROCESSES'] = options['processes']
if options.get('project'):
context['PROJECT_NAME'] = options['project']
if options.get('hostname'):
context['VHOSTNAME'] = options['hostname']
if options.get('sslcert'):
context['SSLCERT'] = options['sslcert']
if options.get('sslkey'):
context['SSLKEY'] = options['sslkey']
if options.get('apache_version'):
context['APACHE2_VERSION'] = options['apache_version']
if options.get('namedhost'):
context['NAMEDHOST'] = context['VHOSTNAME']
else:
context['NAMEDHOST'] = '*'
# Generate the WSGI.
if options.get('wsgi'):
with open(
os.path.join(CURDIR, 'horizon.wsgi.template'), 'r'
) as fp:
wsgi_template = template.Template(fp.read())
if not os.path.exists(context['WSGI_FILE']) or force:
with open(context['WSGI_FILE'], 'w') as fp:
fp.write(wsgi_template.render(context))
print('Generated "%s"' % context['WSGI_FILE'])
else:
sys.exit('"%s" already exists, use --force to overwrite' %
context['WSGI_FILE'])
# Generate the apache configuration.
elif options.get('apache'):
# first check if custom wsgi file exists, if not, use default:
if not os.path.exists(context['WSGI_FILE']):
context['WSGI_FILE'] = context['DEFAULT_WSGI_FILE']
with open(
os.path.join(CURDIR, 'apache_vhost.conf.template'), 'r'
) as fp:
wsgi_template = template.Template(fp.read())
sys.stdout.write(wsgi_template.render(context))
else:
self.print_help('manage.py', cmd_name)
|
from django.core.management.base import BaseCommand
import layzer.startup
from layzer.layzerjobs import UpdateFeedsJob
class Command(BaseCommand):
args = ""
help = "Refresh all subscriptions"
def handle(self, *args, **kwds):
UpdateFeedsJob().run()
|
# -*- coding=UTF-8 -*-
# pyright: strict
from __future__ import annotations
import re
import os
import logging
_LOGGER = logging.getLogger(__name__)
_IMAGE_DIR = "_images"
_IMAGE_PATTERN = re.compile(r"_images/(.+\.(png|jpg))")
def iter_image_names():
for dirpath, dirnames, filenames in os.walk("."):
dirnames[:] = [
i for i in dirnames if not (i.startswith(("_", ".", "scripts")))
]
for i in filenames:
if not i.endswith((".rst", ".md")):
continue
p = os.path.join(dirpath, i)
_LOGGER.debug("read: %s", p)
with open(p, encoding="utf-8") as f:
for line in f:
for match in _IMAGE_PATTERN.finditer(line):
if match:
yield match.group(1)
def main():
image_names = set(iter_image_names())
for i in os.listdir(_IMAGE_DIR):
if i in image_names:
continue
if not i.endswith((".png", ".jpg")):
_LOGGER.warning("ignore file: %s", i)
continue
p = os.path.join(_IMAGE_DIR, i)
print(p)
os.unlink(p)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
main()
|
#
# Shared methods and classes for testing
#
import pybamm
import numpy as np
class SpatialMethodForTesting(pybamm.SpatialMethod):
"""Identity operators, no boundary conditions."""
def __init__(self, mesh):
for dom in mesh.keys():
mesh[dom].npts_for_broadcast = mesh[dom].npts
super().__init__(mesh)
def spatial_variable(self, symbol):
# for finite volume we use the cell centres
symbol_mesh = self.mesh.combine_submeshes(*symbol.domain)
return pybamm.Vector(symbol_mesh.nodes)
def broadcast(self, symbol, domain):
# for finite volume we send variables to cells and so use number_of_cells
broadcasted_symbol = pybamm.NumpyBroadcast(symbol, domain, self.mesh)
# if the broadcasted symbol evaluates to a constant value, replace the
# symbol-Vector multiplication with a single array
if broadcasted_symbol.is_constant():
broadcasted_symbol = pybamm.Array(
broadcasted_symbol.evaluate(), domain=broadcasted_symbol.domain
)
return broadcasted_symbol
def gradient(self, symbol, discretised_symbol, boundary_conditions):
n = 0
for domain in symbol.domain:
n += self.mesh[domain].npts
gradient_matrix = pybamm.Matrix(np.eye(n))
return gradient_matrix @ discretised_symbol
def divergence(self, symbol, discretised_symbol, boundary_conditions):
n = 0
for domain in symbol.domain:
n += self.mesh[domain].npts
divergence_matrix = pybamm.Matrix(np.eye(n))
return divergence_matrix @ discretised_symbol
def compute_diffusivity(self, symbol):
return symbol
def get_mesh_for_testing(npts=None):
param = pybamm.ParameterValues(
base_parameters={
"Negative electrode width": 0.3,
"Separator width": 0.3,
"Positive electrode width": 0.3,
}
)
geometry = pybamm.Geometry("1D macro", "1D micro")
param.process_geometry(geometry)
submesh_types = {
"negative electrode": pybamm.Uniform1DSubMesh,
"separator": pybamm.Uniform1DSubMesh,
"positive electrode": pybamm.Uniform1DSubMesh,
"negative particle": pybamm.Uniform1DSubMesh,
"positive particle": pybamm.Uniform1DSubMesh,
}
if npts is None:
submesh_pts = {
"negative electrode": {"x": 40},
"separator": {"x": 25},
"positive electrode": {"x": 35},
"negative particle": {"r": 10},
"positive particle": {"r": 10},
}
else:
n = 3 * round(npts / 3)
submesh_pts = {
"negative electrode": {"x": n},
"separator": {"x": n},
"positive electrode": {"x": n},
"negative particle": {"r": npts},
"positive particle": {"r": npts},
}
return pybamm.Mesh(geometry, submesh_types, submesh_pts)
def get_discretisation_for_testing(npts=None):
mesh = get_mesh_for_testing(npts)
spatial_methods = {
"macroscale": SpatialMethodForTesting,
"negative particle": SpatialMethodForTesting,
"positive particle": SpatialMethodForTesting,
}
return pybamm.Discretisation(mesh, spatial_methods)
|
# coding=utf-8
from ..settler import HopfieldSettler
from ..utils import binary_array
from bases import HopfieldTestCase
from mocking import make_net_from_lecture_slides
class HopfieldSettlerTests(HopfieldTestCase):
def test_finding_deep_minimum(self):
net = make_net_from_lecture_slides()
net.set_nodes(binary_array('01010'))
self.assertEqual(net.get_total_energy(), -3)
HopfieldSettler(net).settle()
self.assertEqual(net.get_total_energy(), -5)
self.assertArrayEqual(net.get_nodes(), binary_array('01011'))
|
## imports
import os, time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.path as mplPath
from scipy.misc import imresize
import skimage.io as io
from . import utilities
def backgroundFalseNegErrors( coco_analyze, imgs_info, saveDir ):
loc_dir = saveDir + '/background_errors/false_negatives'
if not os.path.exists(loc_dir):
os.makedirs(loc_dir)
f = open('%s/std_out.txt'%loc_dir, 'w')
f.write("Running Analysis: [False Negatives]\n\n")
tic = time.time()
paths = {}
oksThrs = [.5,.55,.6,.65,.7,.75,.8,.85,.9,.95]
areaRngs = [[32**2,1e5**2]]
areaRngLbls = ['all']
coco_analyze.params.areaRng = areaRngs
coco_analyze.params.areaRngLbl = areaRngLbls
coco_analyze.params.oksThrs = oksThrs
coco_analyze.cocoEval.params.useGtIgnore = 0
coco_analyze.cocoEval.params.gtIgnoreIds = []
coco_analyze.analyze(check_kpts=False, check_scores=False, check_bckgd=True)
badFalseNeg = coco_analyze.false_neg_gts['all',str(.5)]
for tind, t in enumerate(coco_analyze.params.oksThrs):
badFalseNeg = badFalseNeg & coco_analyze.false_neg_gts['all',str(t)]
# bad false negatives are those that are false negatives at all oks thresholds
fn_gts = [coco_analyze.cocoGt.loadAnns(b)[0] for b in badFalseNeg]
f.write("Num. annotations: [%d]\n"%len(coco_analyze.cocoGt.loadAnns(coco_analyze.cocoGt.getAnnIds())))
for oks in oksThrs:
f.write("OKS thresh: [%f]\n"%oks)
f.write(" - Matches: [%d]\n"%len(coco_analyze.bckgd_err_matches[areaRngLbls[0], str(oks), 'gts']))
f.write(" - Bckgd. FN: [%d]\n"%len(coco_analyze.false_neg_gts[areaRngLbls[0],str(oks)]))
sorted_fns = sorted(fn_gts, key=lambda k: -k['num_keypoints'])
sorted_fns = [fff for fff in sorted_fns if fff['num_keypoints']>0]
show_fn = sorted_fns[0:4] + sorted_fns[-4:]
f.write("\nBackground False Negative Errors:\n")
for tind, t in enumerate(show_fn):
name = 'bckd_false_neg_%d'%tind
paths[name] = "%s/%s.pdf"%(loc_dir,name)
f.write("Image_id, ground_truth id, num_keypoints: [%d][%d][%d]\n"%(t['image_id'],t['id'],t['num_keypoints']))
utilities.show_dets([],[t],imgs_info[t['image_id']],paths[name])
max_height = max([d['bbox'][3] for d in fn_gts])
min_height = min([d['bbox'][3] for d in fn_gts])
max_width = max([d['bbox'][2] for d in fn_gts])
min_width = min([d['bbox'][2] for d in fn_gts])
f.write("\nBackground False Negatives Bounding Box Dimenstions:\n")
f.write(" - Min width: [%d]\n"%min_width)
f.write(" - Max width: [%d]\n"%max_width)
f.write(" - Min height: [%d]\n"%min_height)
f.write(" - Max height: [%d]\n"%max_height)
ar_pic = np.zeros((int(max_height)+1,int(max_width)+1))
ar_pic_2 = np.zeros((30,30))
ar_bins = list(range(10))+list(range(10,100,10))+list(range(100,1000,100))+[1000]
ar_pic_3 = np.zeros((10,10))
ar_bins_3 = [np.power(2,x) for x in range(11)]
num_fn_keypoints = {}
areaRngs = [[0, 32 ** 2],[32 ** 2, 64 ** 2],[64 ** 2, 96 ** 2],[96 ** 2, 128 ** 2],[128 ** 2, 1e5 ** 2]]
areaRngLbls = ['small','medium','large','xlarge','xxlarge']
small = 0; medium = 0; large = 0; xlarge = 0; xxlarge = 0
num_people_ranges = [[0,0],[1,1],[2,4],[5,8],[9,100]]
num_people_labels = ['none','one','small grp.','large grp.', 'crowd']
no_people = 0; one = 0; small_grp = 0; large_grp = 0; crowd = 0
segm_heatmap = np.zeros((128,128))
for i,b in enumerate(fn_gts):
if b['num_keypoints'] in num_fn_keypoints:
num_fn_keypoints[b['num_keypoints']] += 1
else:
num_fn_keypoints[b['num_keypoints']] = 1
if b['num_keypoints']==0: continue
b_width = int(b['bbox'][2])
b_height = int(b['bbox'][3])
ar_pic[0:b_height,0:b_width] += 1
if b_width < 1024 and b_height < 1024:
col = [i for i in range(len(ar_bins)-1) if ar_bins[i]<b_width<ar_bins[i+1]]
row = [i for i in range(len(ar_bins)-1) if ar_bins[i]<b_height<ar_bins[i+1]]
ar_pic_2[row,col] += 1
col = [i for i in range(len(ar_bins_3)-1) if ar_bins_3[i]<b_width<ar_bins_3[i+1]]
row = [i for i in range(len(ar_bins_3)-1) if ar_bins_3[i]<b_height<ar_bins_3[i+1]]
ar_pic_3[row,col] += 1
else:
print("False Positive bbox has a side larger than 1024 pixels.")
print("Change lists ar_bins_2 and ar_bins_3 to include larger bins.")
assert(False)
area = b_width * b_height * .5
if areaRngs[0][0] <= area < areaRngs[0][1]:
small += 1
elif areaRngs[1][0] <= area < areaRngs[1][1]:
medium += 1
elif areaRngs[2][0] <= area < areaRngs[2][1]:
large += 1
elif areaRngs[3][0] <= area < areaRngs[3][1]:
xlarge += 1
elif areaRngs[4][0] <= area < areaRngs[4][1]:
xxlarge += 1
anns = coco_analyze.cocoGt.loadAnns(coco_analyze.cocoGt.getAnnIds(b['image_id']))
iscrowd = [ann['iscrowd'] for ann in anns]
num_people = len(anns) if sum(iscrowd)==0 else 100
if num_people_ranges[0][0] <= num_people <= num_people_ranges[0][1]:
no_people += 1
elif num_people_ranges[1][0] <= num_people <= num_people_ranges[1][1]:
one += 1
elif num_people_ranges[2][0] <= num_people <= num_people_ranges[2][1]:
small_grp += 1
elif num_people_ranges[3][0] <= num_people <= num_people_ranges[3][1]:
large_grp += 1
elif num_people_ranges[4][0] <= num_people <= num_people_ranges[4][1]:
crowd += 1
if b['iscrowd']==1: continue
nx, ny = imgs_info[b['image_id']]['width'],imgs_info[b['image_id']]['height']
the_mask = np.zeros((ny,nx))
# Create vertex coordinates for each grid cell...
# (<0,0> is at the top left of the grid in this system)
x, y = np.meshgrid(np.arange(nx), np.arange(ny))
x, y = x.flatten(), y.flatten()
points = np.vstack((x,y)).T
for poly_verts in b['segmentation']:
path = mplPath.Path(np.array([[x,y] for x,y in zip(poly_verts[0::2],poly_verts[1::2])]))
grid = path.contains_points(points)
grid = grid.reshape((ny,nx))
the_mask += np.array(grid, dtype=int)
segm_heatmap += imresize(the_mask,(128,128))
fig = plt.figure(figsize=(10,10))
ax = plt.subplot(111)
ax.imshow(segm_heatmap)
path = "%s/bckd_false_neg_heatmaps.pdf"%(loc_dir)
paths['false_neg_hm'] = path
plt.axis('off')
plt.savefig(path, bbox_inches='tight')
plt.close()
f.write("\nNumber of people in images with Background False Negatives:\n")
f.write(" - No people: [%d]\n"%no_people)
f.write(" - One person: [%d]\n"%one)
f.write(" - Small group (2-4): [%d]\n"%small_grp)
f.write(" - Large Group (5-8): [%d]\n"%large_grp)
f.write(" - Crowd (>=9): [%d]\n"%crowd)
f.write("\nArea size (in pixels) of Background False Negatives:\n")
f.write(" - Small (%d,%d): [%d]\n"%(areaRngs[0][0],areaRngs[0][1],small))
f.write(" - Medium (%d,%d): [%d]\n"%(areaRngs[1][0],areaRngs[1][1],medium))
f.write(" - Large (%d,%d): [%d]\n"%(areaRngs[2][0],areaRngs[2][1],large))
f.write(" - X-Large (%d,%d): [%d]\n"%(areaRngs[3][0],areaRngs[3][1],xlarge))
f.write(" - XX-Large (%d,%d): [%d]\n"%(areaRngs[4][0],areaRngs[4][1],xxlarge))
f.write("\nNumber of visible keypoints for Background False Negatives:\n")
for k in list(num_fn_keypoints.keys()):
if k == 0: continue
f.write(" - [%d] kpts: [%d] False Neg.\n"%(k,num_fn_keypoints[k]))
plt.figure(figsize=(10,10))
plt.imshow(ar_pic,origin='lower')
plt.colorbar()
plt.title('BBox Aspect Ratio',fontsize=20)
plt.xlabel('Width (px)',fontsize=20)
plt.ylabel('Height (px)',fontsize=20)
path = "%s/bckd_false_neg_bbox_aspect_ratio.pdf"%(loc_dir)
plt.savefig(path, bbox_inches='tight')
plt.close()
fig, ax = plt.subplots(figsize=(10,10))
plt.imshow(ar_pic_2,origin='lower')
plt.xticks(range(1,len(ar_bins)+1),["%d"%(x) for x in ar_bins],rotation='vertical')
plt.yticks(range(1,len(ar_bins)+1),["%d"%(x) for x in ar_bins])
plt.colorbar()
plt.grid()
plt.title('BBox Aspect Ratio',fontsize=20)
plt.xlabel('Width (px)',fontsize=20)
plt.ylabel('Height (px)',fontsize=20)
path = "%s/bckd_false_neg_bbox_aspect_ratio_2.pdf"%(loc_dir)
plt.savefig(path, bbox_inches='tight')
plt.close()
fig, ax = plt.subplots(figsize=(10,10))
plt.imshow(ar_pic_3,origin='lower')
plt.xticks([-.5 + x for x in range(11)],["%d"%(x) for x in ar_bins_3])
plt.yticks([-.5 + x for x in range(11)],["%d"%(x) for x in ar_bins_3])
plt.colorbar()
plt.grid()
plt.title('BBox Aspect Ratio',fontsize=20)
plt.xlabel('Width (px)',fontsize=20)
plt.ylabel('Height (px)',fontsize=20)
path = "%s/bckd_false_neg_bbox_aspect_ratio_3.pdf"%(loc_dir)
paths['false_neg_bbox_ar'] = path
plt.savefig(path, bbox_inches='tight')
plt.close()
fig, ax = plt.subplots(figsize=(10,10))
ax.set_axis_bgcolor('lightgray')
plt.bar(range(5),[small,medium,large,xlarge,xxlarge],color='g',align='center')
plt.xticks(range(5),areaRngLbls)
plt.grid()
plt.title('Histogram of Area Size',fontsize=20)
path = "%s/bckd_false_neg_bbox_area_hist.pdf"%(loc_dir)
paths['false_neg_bbox_area_hist'] = path
plt.savefig(path, bbox_inches='tight')
plt.close()
fig, ax = plt.subplots(figsize=(10,10))
ax.set_axis_bgcolor('lightgray')
plt.bar(range(5),[no_people,one,small_grp,large_grp,crowd],color='g',align='center')
plt.xticks(range(5),num_people_labels)
plt.grid()
plt.title('Histogram of Num. of People in Images',fontsize=20)
path = "%s/bckd_false_neg_num_people_histogram.pdf"%(loc_dir)
paths['false_neg_num_ppl_hist'] = path
plt.savefig(path, bbox_inches='tight')
plt.close()
plt.figure(figsize=(10,10))
plt.bar([k for k in list(num_fn_keypoints.keys()) if k!=0],[num_fn_keypoints[k] for k in list(num_fn_keypoints.keys()) if k!= 0],align='center')
plt.title("Histogram of Number of Keypoints",fontsize=20)
path = "%s/bckd_false_neg_num_keypoints_histogram.pdf"%(loc_dir)
paths['false_neg_num_kpts_hist'] = path
plt.savefig(path, bbox_inches='tight')
plt.close()
f.write("\nDone, (t=%.2fs)."%(time.time()-tic))
f.close()
return paths
|
##############################################################################
#
# Copyright (c) 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Support for containment constraints
Either a container or an object can provide constraints on the
containment relationship.
A container expresses constraints through a precondition on it's
`__setitem__` method in it's interface.
Preconditions can be simple callable objects, like functions. They
should raise a ``zope.interface.Invalid`` exception to indicate that a
constraint isn't satisfied:
>>> def preNoZ(container, name, ob):
... "Silly precondition example"
... if name.startswith("Z"):
... raise zope.interface.Invalid("Names can not start with Z")
>>> class I1(zope.interface.Interface):
... def __setitem__(name, on):
... "Add an item"
... __setitem__.precondition = preNoZ
>>> from zope.container.interfaces import IContainer
>>> @zope.interface.implementer(I1, IContainer)
... class C1(object):
... def __repr__(self):
... return 'C1'
Given such a precondition, we can then check whether an object can be
added:
>>> c1 = C1()
>>> checkObject(c1, "bob", None)
>>> checkObject(c1, "Zbob", None)
Traceback (most recent call last):
...
zope.interface.exceptions.Invalid: Names can not start with Z
We can also express constaints on the containers an object can be
added to. We do this by setting a field constraint on an object's
`__parent__` attribute:
>>> import zope.schema
A field constraint is a callable object that returns a boolean value:
>>> def con1(container):
... "silly container constraint"
... if not hasattr(container, 'x'):
... return False
... return True
>>> class I2(zope.interface.Interface):
... __parent__ = zope.schema.Field(constraint = con1)
>>> @zope.interface.implementer(I2)
... class O(object):
... pass
If the constraint isn't satisfied, we'll get a validation error when we
check whether the object can be added:
>>> checkObject(c1, "bob", O())
Traceback (most recent call last):
...
zope.schema._bootstrapinterfaces.ConstraintNotSatisfied: (C1, '__parent__')
Note that the validation error isn't very informative. For that
reason, it's better for constraints to raise Invalid errors when they
aren't satisfied:
>>> def con1(container):
... "silly container constraint"
... if not hasattr(container, 'x'):
... raise zope.interface.Invalid("What, no x?")
... return True
>>> class I2(zope.interface.Interface):
... __parent__ = zope.schema.Field(constraint = con1)
>>> @zope.interface.implementer(I2)
... class O(object):
... pass
>>> checkObject(c1, "bob", O())
Traceback (most recent call last):
...
zope.interface.exceptions.Invalid: What, no x?
>>> c1.x = 1
>>> checkObject(c1, "bob", O())
The `checkObject` function is handy when checking whether we can add an
existing object to a container, but, sometimes, we want to check
whether an object produced by a factory can be added. To do this, we
use `checkFactory`:
>>> class Factory(object):
... def __call__(self):
... return O()
... def getInterfaces(self):
... return zope.interface.implementedBy(O)
>>> factory = Factory()
>>> checkFactory(c1, "bob", factory)
True
>>> del c1.x
>>> checkFactory(c1, "bob", factory)
False
Unlike `checkObject`, `checkFactory`:
- Returns a boolean value
- Takes a factory (e.g. a class) rather than an argument.
The container constraint we defined for C1 isn't actually used to
check the factory:
>>> c1.x = 1
>>> checkFactory(c1, "Zbob", factory)
True
To work with `checkFactory`, a container precondition has to
implement a factory method. This is because a factory, rather than
an object is passed. To illustrate this, we'll make preNoZ its own
factory method:
>>> preNoZ.factory = preNoZ
We can do this (silly thing) because preNoZ doesn't use the object
argument.
>>> checkFactory(c1, "Zbob", factory)
False
"""
__docformat__ = 'restructuredtext'
import sys
import zope.schema
from zope.cachedescriptors.property import readproperty
from zope.dottedname.resolve import resolve
from zope.interface import providedBy
from zope.container.i18n import ZopeMessageFactory as _
from zope.container.interfaces import IContainer
from zope.container.interfaces import InvalidContainerType
from zope.container.interfaces import InvalidItemType
def checkObject(container, name, object):
"""Check containment constraints for an object and container
"""
# check __setitem__ precondition
containerProvided = providedBy(container)
__setitem__ = containerProvided.get('__setitem__')
if __setitem__ is not None:
precondition = __setitem__.queryTaggedValue('precondition')
if precondition is not None:
precondition(container, name, object)
# check that object is not being pasted into itself or its children.
target = container
while target is not None:
if target is object:
raise TypeError("Cannot add an object to itself or its children.")
if zope.location.interfaces.ILocation.providedBy(target):
target = target.__parent__
else:
target = None
# check the constraint on __parent__
__parent__ = providedBy(object).get('__parent__')
try:
validate = __parent__.validate
except AttributeError:
pass
else:
validate(container)
if not containerProvided.extends(IContainer):
# If it doesn't implement IContainer, it can't contain stuff.
raise TypeError(
_('Container is not a valid Zope container.')
)
def checkFactory(container, name, factory):
__setitem__ = providedBy(container).get('__setitem__')
try:
precondition = __setitem__.queryTaggedValue('precondition')
precondition = precondition.factory
except AttributeError:
pass
else:
try:
precondition(container, name, factory)
except zope.interface.Invalid:
return False
# check the constraint on __parent__
__parent__ = factory.getInterfaces().get('__parent__')
try:
validate = __parent__.validate
except AttributeError:
pass
else:
try:
validate(container)
except zope.interface.Invalid:
return False
return True
class IItemTypePrecondition(zope.interface.Interface):
def __call__(container, name, object):
"""Test whether container setitem arguments are valid.
Raise zope.interface.Invalid if the object is invalid.
"""
def factory(container, name, factory):
"""Test whether objects provided by the factory are acceptable
Return a boolean value.
"""
class _TypesBased:
@readproperty
def types(self):
raw_types, module = self.raw_types
types = []
for t in raw_types:
if isinstance(t, str):
t = resolve(t, module)
types.append(t)
self.types = types
return types
def __init__(self, *types, **kw):
if [t for t in types if isinstance(t, str)]:
# have dotted names
module = kw.get('module', sys._getframe(1).f_globals['__name__'])
self.raw_types = types, module
else:
self.types = types
@zope.interface.implementer(IItemTypePrecondition)
class ItemTypePrecondition(_TypesBased):
"""Specify a `__setitem__` precondition that restricts item types
Items must be one of the given types.
>>> class I1(zope.interface.Interface):
... pass
>>> class I2(zope.interface.Interface):
... pass
>>> precondition = ItemTypePrecondition(I1, I2)
>>> class Ob(object):
... pass
>>> ob = Ob()
>>> class Factory(object):
... def __call__(self):
... return Ob()
... def getInterfaces(self):
... return zope.interface.implementedBy(Ob)
>>> factory = Factory()
>>> try:
... precondition(None, 'foo', ob)
... except InvalidItemType as v:
... v.args[0], (v.args[1] is ob), (v.args[2] == (I1, I2))
... else:
... print('Should have failed')
(None, True, True)
>>> try:
... precondition.factory(None, 'foo', factory)
... except InvalidItemType as v:
... v.args[0], (v.args[1] is factory), (v.args[2] == (I1, I2))
... else:
... print('Should have failed')
(None, True, True)
>>> zope.interface.classImplements(Ob, I2)
>>> precondition(None, 'foo', ob)
>>> precondition.factory(None, 'foo', factory)
"""
def __call__(self, container, name, object):
for iface in self.types:
if iface.providedBy(object):
return
raise InvalidItemType(container, object, self.types)
def factory(self, container, name, factory):
implemented = factory.getInterfaces()
for iface in self.types:
if implemented.isOrExtends(iface):
return
raise InvalidItemType(container, factory, self.types)
def contains(*types):
"""Declare that a container type contains only the given types
This is used within a class suite defining an interface to create
a __setitem__ specification with a precondition allowing only the
given types:
>>> class IFoo(zope.interface.Interface):
... pass
>>> class IBar(zope.interface.Interface):
... pass
>>> class IFooBarContainer(IContainer):
... contains(IFoo, IBar)
>>> __setitem__ = IFooBarContainer['__setitem__']
>>> __setitem__.getTaggedValue('precondition').types == (IFoo, IBar)
True
It is invalid to call contains outside a class suite:
>>> contains(IFoo, IBar)
Traceback (most recent call last):
...
TypeError: contains not called from suite
"""
frame = sys._getframe(1)
f_locals = frame.f_locals
f_globals = frame.f_globals
if not (f_locals is not f_globals
and f_locals.get('__module__')
and f_locals.get('__module__') == f_globals.get('__name__')
):
raise TypeError("contains not called from suite")
def __setitem__(key, value):
"""
This serves as a copy of IContainer.__setitem__ to hold
the ``precondition`` attribute. Note that it replaces a local
__setitem__ defined before.
"""
__setitem__.__doc__ = IContainer['__setitem__'].__doc__
__setitem__.precondition = ItemTypePrecondition(
*types,
**dict(module=f_globals['__name__'])
)
f_locals['__setitem__'] = __setitem__
class IContainerTypesConstraint(zope.interface.Interface):
def __call__(object):
"""Test whether object is valid.
Return True if valid.
Raise zope.interface.Invalid if the objet is invalid.
"""
@zope.interface.implementer(IContainerTypesConstraint)
class ContainerTypesConstraint(_TypesBased):
"""Constrain a container to be one of a number of types
>>> class I1(zope.interface.Interface):
... pass
>>> class I2(zope.interface.Interface):
... pass
>>> class Ob(object):
... pass
>>> ob = Ob()
>>> constraint = ContainerTypesConstraint(I1, I2)
>>> try:
... constraint(ob)
... except InvalidContainerType as v:
... (v.args[0] is ob), (v.args[1] == (I1, I2))
... else:
... print('Should have failed')
(True, True)
>>> zope.interface.classImplements(Ob, I2)
>>> constraint(Ob())
True
"""
def __call__(self, object):
for iface in self.types:
if iface.providedBy(object):
return True
raise InvalidContainerType(object, self.types)
def containers(*types):
"""Declare the container types a type can be contained in
This is used within a class suite defining an interface to create
a __parent__ specification with a constraint allowing only the
given types:
>>> class IFoo(IContainer):
... pass
>>> class IBar(IContainer):
... pass
>>> from zope.location.interfaces import IContained
>>> class IFooBarContained(IContained):
... containers(IFoo, IBar)
>>> __parent__ = IFooBarContained['__parent__']
>>> __parent__.constraint.types == (IFoo, IBar)
True
It is invalid to call containers outside a class suite:
>>> containers(IFoo, IBar)
Traceback (most recent call last):
...
TypeError: containers not called from suite
"""
frame = sys._getframe(1)
f_locals = frame.f_locals
f_globals = frame.f_globals
if not (f_locals is not f_globals
and f_locals.get('__module__')
and f_locals.get('__module__') == f_globals.get('__name__')
):
raise TypeError("containers not called from suite")
__parent__ = zope.schema.Field(
constraint=ContainerTypesConstraint(
*types,
**dict(module=f_globals['__name__'])
)
)
f_locals['__parent__'] = __parent__
|
from lib.blockchain import Blockchain
from lib.network import Network
from lib.http_server import MasterServer
if __name__ == '__main__':
b = Blockchain('master')
master = Network.get_master()
server = MasterServer(master, b)
server.serve_forever()
|
class Vector:
__secretCount = 0 # private 私有变量
_protectedParm=100 #protected类型变量,允许本类和及其子类访问
publicCount = 1 # public 公开变量
def __init__(self,a,b):
"""
构造方法
:param a:
:param b:
"""
self.a=a
self.b=b
def __str__(self):
return 'Vector(%d, %d)'% (self.a, self.b)
def __add__(self, other):
return Vector(self.a + other.a, self.b + other.b)
v1 = Vector(2,10)
v2 = Vector(5,-2)
print (v1 + v2)
print(v1.publicCount)
#不能直接访问私有变量,需要特殊处理
print (v1._Vector__secretCount) |
from game import Game
import os
import sys
if len(sys.argv) < 2:
print("Usage: python3 run.py <filename> [play]")
sys.exit()
filename = sys.argv[1]
try:
game_object = Game(filename)
except FileNotFoundError as e:
print(e)
sys.exit()
except ValueError as e:
print(e)
sys.exit()
except TypeError as e:
print(e)
sys.exit()
game_object.print_grid_string()
print()
while True:
try:
user_move = input("Input a move: ").lower().strip()
except EOFError:
sys.exit()
if user_move in ['w','a','s','d','e']:
# Update player location and move history based on the cell the player
# will be moving into
# Message = any message from the cell
# end_game_well: None = game doesn't end. False = Lose game. True = Win game.
message, end_game_well = game_object.game_move(user_move)
game_object.print_grid_string()
print()
if not message == None and end_game_well == None:
print(message, end = "\n\n")
if end_game_well:
print()
print(message, end = '\n\n')
game_object.print_moves()
print()
print("""=====================
====== YOU WIN! =====
=====================""")
sys.exit()
elif end_game_well == False:
print()
print(message, end = '\n\n')
bad_mes = 'The Fire Nation triumphs! The Honourable Furious Forest is' \
' reduced to a pile of ash and is scattered to the winds by the next' \
' storm... You have been roasted.'
print(bad_mes, end = '\n\n')
game_object.print_moves()
print()
print("""=====================
===== GAME OVER =====
=====================""")
sys.exit()
elif user_move == 'q':
print()
print("Bye!")
sys.exit()
else:
game_object.print_grid_string()
print()
print("Please enter a valid move (w, a, s, d, e, q).", end = "\n\n")
|
import asyncio
from abc import abstractmethod
class BaseConsumer:
def __init__(self, queue: asyncio.Queue):
self._queue = queue
@abstractmethod
async def consume(self, task: dict):
pass
|
import requests
api = 'https://ubcexplorer.io/'
allCourseData = {}
# methods for finding course pre-requisites ---------------------
# returns all course data
def courses():
url = api + 'getAllCourses/'
r = requests.get(url)
return r.json()
# returns all the courses for a subject
def courseInfo(code):
url = api + 'getCourseInfo/' + code
r = requests.get(url)
try:
response = r.json()
return response
except ValueError:
return {}
# returns the whole prerequisite tree for a subject, including course information
## course return form:
# {
# "dept": "MATH",
# "code": "MATH 100",
# "name": "MATH",
# "name": "Differential Calculus with Applications to Physical Sciences and Engineering",
# "cred": 3,
# "desc": "Derivatives of elementary functions. Applications and modelling: graphing, optimization. Consult the Faculty of Science Credit Exclusion List: www.calendar.ubc.ca/vancouver/index.cfm?tree=12,215,410,414. [3-0-0]",
# "prer": "High-school calculus and one of (a) a score of 80% or higher in BC Principles of Mathematics 12 or Pre-calculus 12, or (b) a satisfactory score in the UBC Mathematics Basic Skills Test.",
# "link": "https://courses.students.ubc.ca/cs/courseschedule?pname=subjarea&tname=subj-course&dept=MATH&course=100,
# "creq": [],
# "depn": [],
# "preq": [], - will contain more course objects (nested)
# }
def generateCoursePrereqTree(code):
# obtain information on the course
course = courseInfo(code)
# cache information so we don't need to query the same course
allCourseData[code] = course
preq = []
if(course):
for pr in course['preq']:
# recursively obtain the same information for each prereq course
if pr in allCourseData: # if not yet cached
info = allCourseData[pr]
else: # if already cached
info = generateCoursePrereqTree(pr)
preq.append(info)
course['preq'] = preq
return course
|
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def maxPathSum(root):
"""
:type root: TreeNode
:rtype: int
"""
max_sum = float('-inf')
def max_gain(node):
if node is None:
return 0
l = max(max_gain(node.left), 0)
r = max(max_gain(node.right), 0)
price_newpath = node.val + l + r
nonlocal max_sum
max_sum = max(max_sum, price_newpath)
return node.val + max(l, r)
max_gain(root)
return max_sum
node1 = TreeNode(9)
node2 = TreeNode(-10)
node3 = TreeNode(20)
node4 = TreeNode(15)
node5 = TreeNode(7)
node2.left = node1
node2.right = node3
node3.left = node4
node3.right = node5
print(maxPathSum(node2))
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEPS = [
'step',
]
def GenSteps(api):
# We are going to have steps with the same name, so fix it automagically.
api.step.auto_resolve_conflicts = True
# The api.step object is directly callable.
yield api.step('hello', ['echo', 'Hello World'])
yield api.step('hello', ['echo', 'Why hello, there.'])
# You can also manipulate various aspects of the step, such as env.
# These are passed straight through to subprocess.Popen.
# Also, abusing bash -c in this way is a TERRIBLE IDEA DON'T DO IT.
yield api.step('goodbye', ['bash', '-c', 'echo Good bye, $friend.'],
env={'friend': 'Darth Vader'})
def GenTests(_api):
yield 'basic', {}
|
from django.shortcuts import render
from django.views.generic import TemplateView
def index(request):
return render(request, 'index-du.html')
class Paths(TemplateView):
template_name = 'front/tracks/paths/index.html'
class PathDetail(TemplateView):
template_name = 'front/tracks/path/detail/index.html'
class Courses(TemplateView):
template_name = 'front/tracks/courses/index.html'
class CourseDetail(TemplateView):
template_name = 'front/tracks/course/detail/index.html'
|
from pages.models import Page
def list_pages(request):
pages = Page.objects.all().order_by('-title')
return {"pages": pages}
|
import redis
REDIS_HOST = "localhost"
REDIS_PORT = 6379
REDIS_PASSWORD = None
REDIS_KEY = "proxies"
class PoolEmptyError(Exception):
def __init__(self, error_info='IP代理池为空,无法提供有效代理'):
# super().__init__(self)
self.error_info = error_info
def __str__(self):
return self.error_info
class RedisClient(object):
def __init__(self, host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PASSWORD, db_no=0):
"""
初始化
:param host: Redis 地址
:param port: Redis 端口
:param password: Redis 密码
"""
self.db = redis.StrictRedis(host=host, port=port, password=password, decode_responses=True, db=db_no)
def add(self, proxy, value=1, ex=120):
"""
向redis添加代理,有效时间为120秒
:param ex:
:param value:
:param proxy: 代理
:return: True/False
"""
return self.db.set(proxy, value, ex=ex)
def random(self):
"""
随机抽取60个有效代理,从中随机选择一个
:return: 随机代理
"""
proxies_number = self.count()
if proxies_number:
return self.db.randomkey()
else:
raise PoolEmptyError
def delete(self, proxy):
print('代理', proxy, "移除")
return self.db.delete(proxy)
def exist(self, proxy):
"""
判断代理是否存在
:param proxy: 代理
:return: 是否存在
"""
return self.db.exists(proxy) is not None
def count(self):
"""
获取数量
:return: 数量
"""
return self.db.dbsize()
def all(self):
"""
获取全部代理
:return: 全部代理列表 (时间戳从2021-01-01到2025-12-31)
"""
return self.db.keys('*')
def value(self, key):
"""
获取对应的键值
:return: 键值
"""
return self.db.get(key)
|
#
# Programming Assignment 2, CS640
#
# A Gomoku (Gobang) Game
#
# Adapted from CS111
# By Yiwen Gu
#
# You need to implement an AI Player for Gomoku
# A Random Player is provided for you
#
#
from pa2_gomoku import Player
import random
# numpy is used to accelerate matrix computing
import numpy as np
FIVE = 7
FOUR = 6
THREE = 4
TWO = 2
SFOUR = 5
STHREE = 3
STWO = 1
class AIPlayer(Player):
""" a subclass of Player that looks ahead some number of moves and
strategically determines its best next move.
"""
from pa2_gomoku import Board
def __init__(self, checker, difficulty=1, use_accum=True):
"""
:param checker: This player's checker symbol
:param difficulty: specify difficulty
:param use_accum: Specify when calculating internally, whether the function should use
accumulate intermediate values. instead of values w.r.t each single slots.
"""
super().__init__(checker)
depth = 0
seconds = -1
assert (difficulty >= 0)
difficulty = int(difficulty)
if difficulty == 0: # level 0: 1-depth
depth = 1
elif difficulty == 1: # level 1: 3-depth
depth = 3
elif difficulty == 2: # level 2: 7-depth
depth = 7
elif difficulty == 3: # level 3: think 1 seconds
seconds = 1
else:
seconds = pow(2, difficulty - 3)
self.depth = depth
self.seconds = seconds
self.__next_moves = []
self.use_accum = use_accum
self.__temp_record = None
def __init_nextMove(self):
self.__next_moves.clear()
def next_move(self, board: Board):
""" returns the called AIPlayer's next move for a game on
the specified Board object.
input: board is a Board object for the game that the called
Player is playing.
return: row, col are the coordinated of a vacant location on the board
"""
assert (not board.is_full())
assert (self.depth % 2 == 1)
self.num_moves += 1
self.__init_nextMove()
if board.is_empty():
# if it's the 1st move, and it's AI's turn
# then ai will randomly choose one in the center of the board.
cent_row = board.width // 2
cent_col = board.height // 2
buff_row = round(board.width / 20)
buff_col = round(board.width / 20)
self.__next_moves.append(
(random.randint(-buff_row, buff_row) + cent_row,
random.randint(-buff_col, buff_col) + cent_col))
else:
self.__temp_record = np.zeros((board.height, board.width, 4), dtype=np.bool)
self.__my_max(board)
# in the most case, AI will choose the 1st move
row, col = self.__next_moves[0]
# TODO: in the rare case, AI will choose other moves
# row, col = random.choice(self.__next_moves)
return row, col
################################################################
# Implement your strategy here.
# Feel free to call as many as helper functions as you want.
# We only cares the return of this function
################################################################
def __my_max(self, board: Board,
depth=0, alpha=float("-inf"), beta=float("inf")):
# quit condition:
if depth >= self.depth:
# here the negative symbol is a key point.
return -self.__evaluate(board)
me_chk_id = board.get_checker_id(self.checker)
op_chk_id = board.get_checker_id(self.opponent_checker)
for n_row, n_col in board.iter_recent_empty():
if not board.has_neighbor(n_row, n_col): # TODO: this step can also be optimized!
continue
if depth % 2 == 0:
board.add_checker_id(me_chk_id, n_row, n_col)
else:
board.add_checker_id(op_chk_id, n_row, n_col)
value = -self.__my_max(board, depth + 1, -beta, -alpha)
board.delete_checker(n_row, n_col)
if value > alpha:
if depth == 0:
self.__next_moves.clear()
self.__next_moves.append((n_row, n_col))
if value >= beta:
return beta
alpha = value
elif value == alpha:
if depth == 0:
self.__next_moves.append((n_row, n_col))
return alpha
def __calc_score(self, score_count: dict, me_chk_id: int, op_chk_id: int):
"""
:param score_count:
:param me_chk_id:
:param op_chk_id:
:return:
"""
my_count = score_count[me_chk_id]
oppo_count = score_count[op_chk_id]
if oppo_count[FIVE] > 0:
return -10000
if my_count[FIVE] > 0:
return 10000
if my_count[SFOUR] >= 2:
my_count[FOUR] += 1
if oppo_count[SFOUR] >= 2:
oppo_count[FOUR] += 1
if oppo_count[FOUR] > 0:
return -9050
if oppo_count[SFOUR] > 0:
return -9040
if my_count[FOUR] > 0:
return 9030
if my_count[SFOUR] > 0 and my_count[THREE] > 0:
return 9020
if oppo_count[THREE] > 0 and my_count[SFOUR] == 0:
return -9010
if my_count[THREE] > 1 and oppo_count[THREE] == 0 and oppo_count[STHREE] == 0:
return 9000
my_score = op_score = 0
if my_count[SFOUR] > 0:
my_score += 400
if oppo_count[THREE] > 1:
op_score += 500
elif oppo_count[THREE] > 0:
op_score += 100
if my_count[THREE] > 1:
my_score += 2000
elif my_count[THREE] > 0:
my_score += 400
if oppo_count[STHREE] > 0:
op_score += oppo_count[STHREE] * 10
if my_count[STHREE] > 0:
my_score += my_count[STHREE] * 10
if oppo_count[TWO] > 0:
op_score += oppo_count[TWO] * 6
if my_count[TWO] > 0:
my_score += my_count[TWO] * 6
if oppo_count[STWO] > 0:
op_score += oppo_count[STWO] * 2
if my_count[STWO] > 0:
my_score += my_count[STWO] * 2
return my_score - op_score
def __evaluate(self, board: Board):
"""
Evaluate the score of the current board.
:param board: The board object
:return: A score, numeric.
"""
assert (self.__temp_record is not None)
self.__temp_record.fill(0)
# score_board has 3 dims: row, col, and 4 different directions.
me_chk_id = board.get_checker_id(self.checker)
op_chk_id = board.get_checker_id(self.opponent_checker)
score_count = {me_chk_id: [0] * 8, op_chk_id: [0] * 8}
# we only iterate slots which has been used
nz = board.slots != 0
rows, cols = np.where(nz)
for row, col in zip(rows, cols):
if board.slots[row, col] == me_chk_id:
self.__check_point(board, row, col, score_count)
else:
self.__check_point(board, row, col, score_count)
return self.__calc_score(score_count, me_chk_id, op_chk_id)
def __check_point(self, board: Board, row: int, col: int, score_count: dict):
b_width = board.width
b_height = board.height
# mostly left, top, right, bottom edge
col_lower = max(0, col - 4)
col_upper = min(b_width - 1, col + 4)
row_lower = max(0, row - 4)
row_upper = min(b_height - 1, row + 4)
my_record = self.__temp_record[row, col]
# direction (1, 0)
if b_width >= 5 and my_record[0] == 0:
indices = (row, range(col_lower, col_upper + 1))
self.__scan(board, indices, col - col_lower, score_count, 0)
# direction (0, 1)
if b_height >= 5 and my_record[1] == 0:
indices = (range(row_lower, row_upper + 1), col)
self.__scan(board, indices, row - row_lower, score_count, 1)
if b_width >= 5 and b_height >= 5:
# direction (1, 1)
if my_record[2] == 0:
offset = col - row
row_bgn = max(row_lower, -offset)
row_end = min(row_upper, b_width - offset - 1)
indices = (range(row_bgn, row_end + 1), range(row_bgn + offset, row_end + offset + 1))
self.__scan(board, indices, row - row_bgn, score_count, 2)
# direction (1, -1)
if my_record[3] == 0:
offset = col + row
row_bgn = max(row_lower, offset - b_width + 1)
row_end = min(row_upper, offset)
indices = (range(row_bgn, row_end + 1), range(offset - row_bgn, offset - row_end - 1, -1))
self.__scan(board, indices, row - row_bgn, score_count, 3)
def __scan(self, board: Board, indices, self_idx, score_count: dict, direction: int):
line = board.slots[indices]
me_chk_id = line[self_idx]
len_line = len(line)
# initialize the left_idx and right_idx
left_idx = self_idx - 1
right_idx = self_idx + 1
while right_idx < len_line:
if line[right_idx] != me_chk_id:
break
right_idx += 1
right_idx -= 1
while left_idx >= 0:
if line[left_idx] != me_chk_id:
break
left_idx -= 1
left_idx += 1
# how many are the same as the self slot.
me_range = right_idx - left_idx + 1
# initialize the left_range and right_range
left_range = left_idx - 1
right_range = right_idx + 1
while right_range < len_line:
if line[right_range] != 0:
break
right_range += 1
right_range -= 1
while left_range >= 0:
if line[left_range] != 0:
break
left_range -= 1
left_range += 1
# how many are available slots
chess_range = right_range - left_range + 1
self.__set_record(indices, left_idx, right_idx, direction)
if me_range == 5:
score_count[me_chk_id][FIVE] += 1
# Live Four : XMMMMX
# Incoming Four : XMMMMP, PMMMMX
elif me_range == 4:
left_empty = (left_idx > 1 and line[left_idx - 1] == 0)
right_empty = (right_idx < len_line - 1 and line[right_idx + 1] == 0)
if left_empty and right_empty:
score_count[me_chk_id][FOUR] += 1
elif left_empty or right_empty:
score_count[me_chk_id][SFOUR] += 1
# Incoming Four : MXMMM, MMMXM, the two types can both exist
# Live Three : XMMMXX, XXMMMX
# Sleep Three : PMMMX, XMMMP, PXMMMXP
elif me_range == 3:
left_empty = right_empty = left_four = right_four = False
if left_idx > 1 and line[left_idx - 1] == 0:
if left_idx > 2 and line[left_idx - 2] == me_chk_id: # MXMMM
self.__set_record(indices, left_idx - 2, left_idx - 1, direction)
left_four = True
left_empty = True
if right_idx < len_line - 1 and line[right_idx + 1] == 0:
if right_idx < len_line - 2 and line[right_idx + 2] == me_chk_id: # MMMXM
self.__set_record(indices, right_idx + 1, right_idx + 2, direction)
right_four = True
right_empty = True
if left_four or right_four:
score_count[me_chk_id][SFOUR] += 1
elif left_empty and right_empty:
if chess_range > 5: # XMMMXX, XXMMMX
score_count[me_chk_id][THREE] += 1
else: # PXMMMXP
score_count[me_chk_id][STHREE] += 1
elif left_empty or right_empty: # PMMMX, XMMMP
score_count[me_chk_id][STHREE] += 1
# Incoming Four: MMXMM, only check right direction
# Live Three: XMXMMX, XMMXMX the two types can both exist
# Slept Three: PMXMMX, XMXMMP, PMMXMX, XMMXMP
# Live Two: XMMX
# Slept Two: PMMX, XMMP
elif me_range == 2:
left_empty = left_three = right_three = False
right_empty = right_idx < len_line - 1 and line[right_idx + 1] == 0
if left_idx > 1 and line[left_idx - 1] == 0:
if left_idx > 2 and line[left_idx - 2] == me_chk_id:
self.__set_record(indices, left_idx - 2, left_idx - 1, direction)
has_left_3 = left_idx > 3
if has_left_3 and line[left_idx - 3] == 0:
if right_empty: # XMXMMX
score_count[me_chk_id][THREE] += 1
else: # XMXMMP
score_count[me_chk_id][STHREE] += 1
left_three = True
elif not has_left_3 or line[left_idx - 3] != me_chk_id: # PMXMMX
if right_empty:
score_count[me_chk_id][STHREE] += 1
left_three = True
left_empty = True
if right_empty:
if right_idx < len_line - 2 and line[right_idx + 2] == me_chk_id:
has_right_3 = right_idx < len_line - 3
if has_right_3 and line[right_idx + 3] == me_chk_id: # MMXMM
self.__set_record(indices, right_idx + 1, right_idx + 2, direction)
score_count[me_chk_id][SFOUR] += 1
right_three = True
elif has_right_3 and line[right_idx + 3] == 0:
# setRecord(self, x, y, right_idx+1, right_idx+2, dir_index, dir)
if left_empty: # XMMXMX
score_count[me_chk_id][THREE] += 1
else: # PMMXMX
score_count[me_chk_id][STHREE] += 1
right_three = True
elif left_empty: # XMMXMP
score_count[me_chk_id][STHREE] += 1
right_three = True
if left_three or right_three:
pass
elif left_empty and right_empty: # XMMX
score_count[me_chk_id][TWO] += 1
elif left_empty or right_empty: # PMMX, XMMP
score_count[me_chk_id][STWO] += 1
# Live Two: XMXMX, XMXXMX only check right direction
# Slept Two: PMXMX, XMXMP
elif me_range == 1:
left_empty = False
has_right_1 = right_idx < len_line - 1
right_empty = has_right_1 and line[right_idx + 1] == 0
if left_idx > 1 and line[left_idx - 1] == 0:
if left_idx > 2 and line[left_idx - 2] == me_chk_id:
if left_idx > 3 and line[left_idx - 3] == 0:
if right_empty:
pass
elif not has_right_1 or line[right_idx + 1] != me_chk_id: # XMXMP
score_count[me_chk_id][STWO] += 1
left_empty = True
if right_empty:
if right_idx < len_line - 2:
if line[right_idx + 2] == me_chk_id:
if right_idx < len_line - 3 and line[right_idx + 3] == 0:
if left_empty: # XMXMX
# setRecord(self, x, y, left_idx, right_idx+2, dir_index, dir)
score_count[me_chk_id][TWO] += 1
else: # PMXMX
score_count[me_chk_id][STWO] += 1
elif line[right_idx + 2] == 0:
if right_idx < len_line - 4 and line[right_idx + 3] == me_chk_id and line[right_idx + 4] == 0:
# XMXXMX
score_count[me_chk_id][TWO] += 1
def __set_record(self, indices, left: int, right: int, direction: int):
assert (type(indices) == tuple)
tr, tc = indices
new_tr = self.__filter_range(tr, left, right) if type(tr) == range else tr
new_tc = self.__filter_range(tc, left, right) if type(tc) == range else tc
self.__temp_record[new_tr, new_tc, direction] = 1
def __filter_range(self, r: range, left: int, right: int):
step = r.step
if r.step is None:
step = 1
if step > 0:
return range(r[left], r[right] + 1, step)
elif step < 0:
return range(r[left], r[right] - 1, step)
|
from airflow.models import DAG
from airflow.operators.dummy import DummyOperator
from airflow.operators.python import BranchPythonOperator
from airflow.operators.python import ShortCircuitOperator
from airflow.utils.dates import days_ago
from datetime import datetime
from call_back.notify import succes_callback,failure_callback
from call_back.test_call_back import test_callback
args = {
'owner': 'airflow',
'depends_on_past': False,
"email":["abhinavk1236@gmail.com"],
"email_on_failure": True
}
dags=DAG(
dag_id='dag_trigger_rule',
schedule_interval='@daily',
start_date=datetime(2021,8,8,0,0,0),
default_args=args,
tags=['example'],
on_success_callback=succes_callback,
on_failure_callback=failure_callback,
catchup=False
)
with dags as dag:
start = ShortCircuitOperator(task_id="start", python_callable=lambda: False)
skip_task=DummyOperator(task_id="skip_task")
trigger_rule = DummyOperator(task_id="join_1", trigger_rule="none_failed_or_skipped")
task_1 = DummyOperator(task_id="true_1")
end = DummyOperator(task_id="false_1")
start>>skip_task>>trigger_rule>>task_1>>end
|
# Generated by Django 3.1.1 on 2020-09-21 12:17
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
replaces = [('projects', '0004_auto_20200920_1853'), ('projects', '0005_auto_20200921_1210'), ('projects', '0006_project_members')]
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('projects', '0003_auto_20200920_1512'),
]
operations = [
migrations.AlterField(
model_name='projectmembership',
name='member',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='memberships', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='projectmembership',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='memberships', to='projects.project'),
),
migrations.AlterUniqueTogether(
name='projectmembership',
unique_together={('project', 'member')},
),
migrations.AddField(
model_name='project',
name='members',
field=models.ManyToManyField(through='projects.ProjectMembership', to=settings.AUTH_USER_MODEL),
),
]
|
import sys, os
sys.path.append(os.path.abspath(".."))
import spidev
import time
import audioop
import pyaudio
import numpy as np
from math import trunc
from matplotlib import pyplot as plt
from utils import getInputDeviceID, getMicDeviceID, getTimeValues
spi = spidev.SpiDev()
spi.open(0, 1)
spi.max_speed_hz = 7629
# Split an integer input into a two byte array to send via SPI
def write_pot(input):
msb = (input >> 8)
lsb = input & 0xFF
spi.xfer([msb, lsb])
def mic_callback(mic_data, frame_count, time_info, status):
micPower = audioop.rms(mic_data, 2)
micPowerData.append(micPower)
return(mic_data, pyaudio.paContinue)
def calibrate(plot=False):
# Begin main thread of code
audio = pyaudio.PyAudio()
# FOR MAC - built-in mic has device ID 0, USB Audio device has device ID 2
# FOR PI - input audio has device ID 2, mic audio has device ID 3
# Open input stream source
# Open mic stream souce
micStream = audio.open(format=FORMAT,
input_device_index=getMicDeviceID(audio),
channels=1,
rate=RATE,
input=True,
output=True,
frames_per_buffer=CHUNK,
stream_callback=mic_callback)
micStream.start_stream()
while micStream.is_active():
print("Beginning signal...")
write_pot(0x11ff)
time.sleep(2)
write_pot(0x11cc)
time.sleep(2)
write_pot(0x1199)
time.sleep(2)
write_pot(0x1166)
time.sleep(2)
write_pot(0x1133)
time.sleep(2)
write_pot(0x1100)
time.sleep(2)
micStream.stop_stream()
micStream.close()
audio.terminate()
print("Finished listening to calibration signal...")
#m, b = np.polyfit(calibratePowerData, micPowerData[0:len(calibratePowerData)], 1)
interval = trunc(len(micPowerData)/6)
averages = []
index = 0
for i in range (0, 6):
section = micPowerData[index:index+interval]
averages.append(sum(section)/len(section))
index += interval
print(averages)
print(interval)
if plot:
micTimeValues = getTimeValues(RATE, CHUNK, len(micPowerData))
# Mic power over time plot
micPowerFig = plt.figure(figsize=(30,4))
micPowerFig.suptitle('Mic Power over Time', fontsize=14, fontweight='bold')
plt.plot(micTimeValues, micPowerData)
plt.xlabel('Time (s)')
plt.ylabel('UNITS')
plt.show()
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100
CHUNK = 1024
RECORD_SECONDS = 10
calibratePowerData = []
micPowerData = []
calibrate(plot=True)
# Repeatedly switch a MCP4151 digital pot off then on
|
import requests
from bs4 import BeautifulSoup
from random import choice
from Proxy import proxies
import xlsxwriter
from time import sleep
def get_html(url):
r = requests.get(url, headers=user, proxies=proxy)
r.encoding = 'utf8'
return r.text
def get_soup(url):
soup = BeautifulSoup(url, 'html.parser')
print(soup)
return soup
def parser(url):
soup = get_soup(get_html(url))
elems = soup.find_all('div', class_='main')
return elems
def page_data():
sleep(1)
url = 'https://ru.banggood.com/Wholesale-Electronics-c-1091-0-1-1-36-0_page2.html'
soup = get_soup(get_html(url))
elems = soup.find('div', class_='page_num').find_all_next('a')[6].getText()
return str(elems)
def write_excel(array):
workbook = xlsxwriter.Workbook('Banggood.xlsx ')
worksheet = workbook.add_worksheet()
worksheet.write_column('A1', array[0]) # В колонну эксель
worksheet.write_column('B1', array[1])
workbook.close()
useragents = open('useragents.txt').read().split('\n')
proxy = proxies()
for i in range(0, 5):
user = {'user-agent': choice(useragents), 'accept': ''} |
import redis
import os
import sdk
r = redis.StrictRedis(host=sdk.config.state.host,
port=sdk.config.state.port, db=0)
def load(appname, state):
"""Loads app state from Jarvis into |state|.
Returns:
Flag indicating whether or not a saved state was found.
"""
msg_str = r.get(appname)
if msg_str:
state.ParseFromString(msg_str)
return True
return False
def update(appname, new_state):
msg_str = new_state.SerializeToString()
r.set(appname, msg_str)
|
# OWASP-Why-Random-Matters demo
from MT19937 import Random
if __name__ == "__main__":
rnd = Random (100)
print rnd.get() |
def expand_attrs_dict(attrs_dict, *keys):
new_attrs_dict = attrs_dict.copy()
for key in keys:
try:
new_attrs_dict[key] = attrs_dict[key].__dict__
except AttributeError:
pass
return new_attrs_dict
def deepupdate_attrs(obj, attr_dict):
for attr in obj.__dict__:
try:
obj = deepupdate_attrs(obj.__dict__[attr], attr_dict[attr])
except AttributeError:
obj.__dict__[attr] = attr_dict[attr]
return obj
|
{
"description": """
Внезапно руки мишки превратились в длинные щупальца и ринулись в вашу сторону.
От страха все мысли улетучились из вашей головы и вы могли лишь стоять и смотреть в глаза приближающейся старухе с косой.
Склизкие щупальца обвили вас и потянули к себе.
В этот момент вы очнулись и стали вырываться, брыкаться, царапаться и громко кричать, но пришельцу было все равно.
Монстр притягивал вас к себе, щупальца сжимались все сильнее, затрудняя дыхание, а из пасти на вас повеяло жутким смрадом.
Вы зажмурились и пытались думать о чем-нибудь приятном, но любая мысль всегда прерывалась яркими глазами и жуткой улыбкой.
Боль резко пронзила вас.
Она почти моментально охватило все ваше тело выше живота, быстро перерастая в настоящую агонию.
Вы открыли глаза и увидели лишь темноту и смыкающиеся лезвия зубов. Потом была новая вспышка боли и пустота...
""",
"actions": {
"death": True,
"ending": 4
},
}
|
import json
import urllib
import stripe
import requests
from django.shortcuts import render
from django.urls import reverse
from django.http import HttpResponseRedirect
from django.conf import settings
from django.views import View
from django.views.generic import ListView, DetailView
from django.http import JsonResponse
from django.shortcuts import redirect
from .models import Subscription,CustomUser
class SubscriptionListView(ListView):
model = Subscription
template_name = 'subscriptions/subscription_list.html'
def get_context_data(self, *args, **kwargs):
context = super(SubscriptionListView, self).get_context_data(*args, **kwargs)
return context
def render_to_response(self, context, **response_kwargs):
if not self.request.user.is_authenticated:
return HttpResponseRedirect(reverse('login'))
return super(SubscriptionListView, self).render_to_response(context, **response_kwargs)
class SubscriptionDetailView(DetailView):
model = Subscription
template_name = 'subscriptions/subscription_detail.html'
def get_context_data(self, *args, **kwargs):
context = super(SubscriptionDetailView, self).get_context_data(*args, **kwargs)
context['key'] = settings.STRIPE_PUBLISHABLE_KEY
return context
def render_to_response(self, context, **response_kwargs):
if not self.request.user.is_authenticated:
return HttpResponseRedirect(reverse('login'))
return super(SubscriptionDetailView, self).render_to_response(context, **response_kwargs)
class SubscriptionChargeView(View):
def post(self, request, *args, **kwargs):
stripe.api_key = settings.STRIPE_SECRET_KEY
json_data = json.loads(request.body)
subscription_id = Subscription.objects.filter(id=json_data['subscription_id']).first()
print("Subscription--",subscription_id)
user_id = json_data['user']
try:
customer = get_or_create_customer(
self.request.user.email,
json_data['token'],
)
subscription_amount = json_data['amount']
try:
charge = stripe.Charge.create(
amount=subscription_amount,
currency='inr',
customer=customer.id,
description=json_data['description']
)
except Exception as e:
print('Error while charge create is:', e)
if charge:
# update user with subscription
customUser = CustomUser.objects.get(id = user_id)
print('Retrieved user for id= ', customUser)
customUser.subscription = subscription_id
customUser.is_subscription_active = True
customUser.save(update_fields=['subscription','is_subscription_active'])
return JsonResponse({
'status': 'success'
}, status=202)
except Exception as e:
print(e)
return JsonResponse({'status': 'error'}, status=500)
# helpers
def get_or_create_customer(email, token):
stripe.api_key = settings.STRIPE_SECRET_KEY
return stripe.Customer.create(
email=email,
source=token,
)
class StripeAuthorizeView(View):
def get(self, request):
if not self.request.user.is_authenticated:
return HttpResponseRedirect(reverse('login'))
url = 'https://connect.stripe.com/oauth/authorize'
params = {
'response_type': 'code',
'scope': 'read_write',
'client_id': settings.STRIPE_CONNECT_CLIENT_ID,
'redirect_uri': f'http://localhost:8000/users/oauth/callback'
}
url = f'{url}?{urllib.parse.urlencode(params)}'
return redirect(url)
class StripeAuthorizeCallbackView(View):
def get(self, request):
code = request.GET.get('code')
if code:
data = {
'client_secret': settings.STRIPE_SECRET_KEY,
'grant_type': 'authorization_code',
'client_id': settings.STRIPE_CONNECT_CLIENT_ID,
'code': code
}
url = 'https://connect.stripe.com/oauth/token'
resp = requests.post(url, params=data)
print(resp.json())
# add stripe info to the seller
stripe_user_id = resp.json()['stripe_user_id']
stripe_access_token = resp.json()['access_token']
seller = CustomUser.objects.filter(user_id=self.request.user.id).first()
seller.stripe_access_token = stripe_access_token
seller.stripe_user_id = stripe_user_id
seller.save()
url = reverse('home')
response = redirect(url)
return response
class CancelSubscriptions(View):
def patch(self, request):
json_data = json.loads(request.body)
user_id = json_data['user']
print('userid=',user_id)
customUser = CustomUser.objects.get(id = user_id)
customUser.is_subscription_active = False
customUser.save(update_fields=['is_subscription_active'])
return render(request,'home.html')
class ResumeSubscriptions(View):
def patch(self, request):
json_data = json.loads(request.body)
user_id = json_data['user']
print('userid=',user_id)
customUser = CustomUser.objects.get(id = user_id)
customUser.is_subscription_active = True
customUser.save(update_fields=['is_subscription_active'])
return render(request,'home.html')
class RegisterUser(View):
def post(self,request):
data = request.POST
user = CustomUser.objects.create_user(data['email'], data['password'])
user.firstname=data['firstname']
user.lastname=data['lastname']
user.address=data['address']
user.company=data['company']
user.dob=data['dob']
user.is_staff =True
user.save()
msg ='You are registered successfully...!'
return render(request,'register.html',{"msg":msg})
class SignUp(View):
def get(self,request):
return render(request,'register.html')
def email_verify(request):
json_data = json.loads(request.body)
email = json_data['email']
cust = CustomUser.objects.filter(email=email).first()
if cust:
return JsonResponse({'msg': True})
else:
return JsonResponse({'msg': False})
|
#!/usr/bin/env python
import argparse
import os
import subprocess
import sys
from lib.config import LIBCHROMIUMCONTENT_COMMIT, BASE_URL, PLATFORM, \
enable_verbose_mode, is_verbose_mode, get_target_arch
from lib.util import execute_stdout, scoped_cwd
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
VENDOR_DIR = os.path.join(SOURCE_ROOT, 'vendor')
def main():
os.chdir(SOURCE_ROOT)
args = parse_args()
defines = args_to_defines(args)
if args.verbose:
enable_verbose_mode()
update_submodules()
libcc_source_path = args.libcc_source_path
libcc_shared_library_path = args.libcc_shared_library_path
libcc_static_library_path = args.libcc_static_library_path
if args.build_libchromiumcontent:
build_libchromiumcontent(args.verbose, args.target_arch)
dist_dir = os.path.join(VENDOR_DIR, 'brightray', 'vendor',
'libchromiumcontent', 'dist', 'main')
libcc_source_path = os.path.join(dist_dir, 'src')
libcc_shared_library_path = os.path.join(dist_dir, 'shared_library')
libcc_static_library_path = os.path.join(dist_dir, 'static_library')
if PLATFORM != 'win32':
# Download prebuilt clang binaries.
update_clang()
setup_python_libs()
bootstrap_brightray(args.dev, BASE_URL, args.target_arch,
libcc_source_path, libcc_shared_library_path,
libcc_static_library_path)
create_chrome_version_h()
run_update(defines, args.msvs)
def update_submodules():
execute_stdout(['git', 'submodule', 'sync', '--recursive'])
execute_stdout(['git', 'submodule', 'update', '--init', '--recursive'])
def setup_python_libs():
for lib in ['requests']:
with scoped_cwd(os.path.join(VENDOR_DIR, lib)):
execute_stdout([sys.executable, 'setup.py', 'build'])
def bootstrap_brightray(is_dev, url, target_arch, libcc_source_path,
libcc_shared_library_path,
libcc_static_library_path):
bootstrap = os.path.join(VENDOR_DIR, 'brightray', 'script', 'bootstrap')
args = [
'--commit', LIBCHROMIUMCONTENT_COMMIT,
'--target_arch', target_arch,
url,
]
if is_dev:
args = ['--dev'] + args
if (libcc_source_path != None and
libcc_shared_library_path != None and
libcc_static_library_path != None):
args += ['--libcc_source_path', libcc_source_path,
'--libcc_shared_library_path', libcc_shared_library_path,
'--libcc_static_library_path', libcc_static_library_path]
execute_stdout([sys.executable, bootstrap] + args)
def run_update(defines, msvs):
args = [sys.executable, os.path.join(SOURCE_ROOT, 'script', 'update.py')]
if defines:
args += ['--defines', defines]
if msvs:
args += ['--msvs']
execute_stdout(args)
def create_chrome_version_h():
version_file = os.path.join(SOURCE_ROOT, 'vendor', 'brightray', 'vendor',
'libchromiumcontent', 'VERSION')
target_file = os.path.join(SOURCE_ROOT, 'src', 'app', 'common', 'chrome_version.h')
template_file = os.path.join(SOURCE_ROOT, 'script', 'chrome_version.h.in')
with open(version_file, 'r') as f:
version = f.read()
with open(template_file, 'r') as f:
template = f.read()
content = template.replace('{PLACEHOLDER}', version.strip())
# We update the file only if the content has changed (ignoring line ending
# differences).
should_write = True
if os.path.isfile(target_file):
with open(target_file, 'r') as f:
should_write = f.read().replace('r', '') != content.replace('r', '')
if should_write:
with open(target_file, 'w') as f:
f.write(content)
def build_libchromiumcontent(verbose, target_arch):
args = [sys.executable,
os.path.join(SOURCE_ROOT, 'script', 'build-libchromiumcontent.py')]
if verbose:
args += ['-v']
execute_stdout(args + ['--target_arch', target_arch])
def update_clang():
execute_stdout([os.path.join(SOURCE_ROOT, 'script', 'update_clang.sh')])
def parse_args():
parser = argparse.ArgumentParser(description='Bootstrap this project')
parser.add_argument('-v', '--verbose',
action='store_true',
help='Prints the output of subprocesses')
parser.add_argument('-d', '--dev', action='store_true',
help='Do not download static_library build')
parser.add_argument('--msvs', action='store_true',
help='Generate Visual Studio project')
parser.add_argument('--target_arch', default=get_target_arch(),
help='Manually specify the arch to build for')
parser.add_argument('--clang_dir', default='', help='Path to clang binaries')
parser.add_argument('--disable_clang', action='store_true',
help='Use compilers other than clang for building')
parser.add_argument('--build_libchromiumcontent', action='store_true',
help='Build local version of libchromiumcontent')
parser.add_argument('--libcc_source_path', required=False,
help='The source path of libchromiumcontent. ' \
'NOTE: All options of libchromiumcontent are ' \
'required OR let electron choose it')
parser.add_argument('--libcc_shared_library_path', required=False,
help='The shared library path of libchromiumcontent.')
parser.add_argument('--libcc_static_library_path', required=False,
help='The static library path of libchromiumcontent.')
parser.add_argument('--defines', default='',
help='The build variables passed to gyp')
return parser.parse_args()
def args_to_defines(args):
defines = args.defines
if args.disable_clang:
defines += ' clang=0'
if args.clang_dir:
defines += ' make_clang_dir=' + args.clang_dir
defines += ' clang_use_chrome_plugins=0'
return defines
if __name__ == '__main__':
sys.exit(main())
|
from core.models import Todo
from core.forms import Todoform
from django.shortcuts import redirect, render
from core.forms import Todoform
def home(request):
form = Todoform()
todos = Todo.objects.all()
if request.method == 'POST':
form = Todoform(request.POST)
if form.is_valid():
form.save()
return redirect('home')
return render(request,'home.html',{'form': form, 'todos': todos})
def update(request, todo_id):
todo = Todo.objects.get(id=todo_id)
form = Todoform(instance=todo)
if request.method == 'POST':
form = Todoform(request.POST, instance=todo)
if form.is_valid():
form.save()
return redirect('home')
return render(request,'update.html',{'form': form})
def delete(request, todo_id):
if request.method == 'POST':
Todo.objects.get(id=todo_id).delete()
return redirect('home')
|
"""
Class representing transactions in Bank System.
"""
from mysql_engine import *
class Transactions:
transaction_type = None
date = None
amount = None
account_number = None
person = None
def create_transaction(self):
pass
def get_all_transactions(self):
mysql_obj=MySqlEngine()
result=mysql_obj.get_data("transaction")
for i in result:
print("Account Number:", i[5],end="")
print("Transaction Type:", i[1],end="")
print("Amount", i[2],end="")
print("Date", i[3],end="")
print("Person", i[4],end="")
print("Transaction Id Is:", i[0])
def get_transactions(self,search,filter):
mysql_obj=MySqlEngine()
condition=" where {}(date)={}".format(search,filter)
result=mysql_obj.get_data("transaction","*",condition)
if result == None:
print("No Transaction Found")
else:
for i in result:
print("Account Number:", i[5],end="")
print(" Transaction Type:", i[1],end="")
print(" Amount:", i[2],end="")
print(" Date:", i[3],end="")
print(" Person:", i[4],end="")
print(" Transaction Id Is:", i[0])
|
#!/usr/bin/python
#
# Ceph - scalable distributed file system
#
# Copyright (C) Inktank
#
# This is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License version 2.1, as published by the Free Software
# Foundation. See file COPYING.
#
"""
This is intended to be a simulation of processor speed and throughput
"""
from units import MEG, GIG, SECOND
class CPU:
""" Performance Modeling NIC or HBA Simulation """
def __init__(self, name, cores=1, mem=2 * GIG, speed=3 * GIG, ddr=1600):
""" create an interface simulation
name -- name of the simulated processor
cores -- number of cores per processor
mem -- number of bytes of memory per processor
speed -- clock speed in hz
ddr -- memory transfer rate (in MT/s)
"""
HYPER_T = 1.3 # hyper-threading effectivieness
BUS_WIDTH = 64 # bus width (bits)
self.desc = "%d-core %3.1fGhz %s" % (cores, speed / GIG, name)
self.cores = cores # cores per processor
self.mem_size = mem # memory per processor
self.clock = speed # processor clock speed
self.mem_speed = ddr # memory speed
self.hyperthread = HYPER_T # hyperthreading multiplier
width = BUS_WIDTH / 8 # bus width (bytes)
# estimated time for key operations
self.bus_bw = speed * width # max bus transfer rate (B/s)
self.mem_bw = ddr * MEG * width # max mem xfr (B/s)
# CPU calibration constants ... expect these to be over-ridden
self.I_PER_HZ = 6.0 # scaling instruction speed to hz
self.THREAD = 10 # FIX - thread switch time (us)
self.PROC = 20 # FIX - process switch time (us)
self.DMA = 30 # FIX - DMA setup/completion time (us)
def mem_read(self, bytes):
""" return the elapsed time to read that amount of uncached data """
bw = min(self.bus_bw, self.mem_bw)
return float(bytes) * SECOND / bw
def mem_write(self, bytes):
""" return the elapsed time to write that amount of data """
bw = min(self.bus_bw, self.mem_bw)
return float(bytes) * SECOND / bw
def process(self, bytes):
""" return the elapsed time to process that amount of data """
return float(bytes) * SECOND / self.bus_bw
def execute(self, instrs):
""" return the elapsed time to execute # random instructions """
return float(instrs) * SECOND / (self.clock * self.I_PER_HZ)
def thread_us(self):
""" return the elapsed time for a thread switch """
return self.THREAD
def proc_us(self):
""" return the elapsed time for a process switch """
return self.PROC
def dma_us(self):
""" return the elapsed time to set-up/complete a DMA operation"""
return self.DMA
def queue_length(self, rho, max_depth=1000):
""" expected average queue depth as a function of load
rho -- average fraction of time CPU is busy
max_depth -- the longest the queue can possibly be
"""
if (rho >= 1):
return max_depth
else:
avg = rho / (1 - rho)
return avg if avg < max_depth else max_depth
#
# these operations would normally be a function of CPU speed, but
# may also be off-loaded, improving speed and reducing CPU loading
#
def sha_time(self, bytes, width=40):
""" return the elapsed time for a SHA computation
bytes -- bytes to be hashed
width -- desired output hash width
"""
x = 43 # FIX - recalibrate SHA
t_cpu = self.execute(x * bytes)
t_read = self.mem_read(bytes)
t_write = self.mem_write(width)
return t_cpu + t_read + t_write
def sha_cpu(self, bytes, width=40):
""" return the CPU time for a SHA computation
bytes -- bytes to be hashed
width -- desired output hash width
"""
# w/o acceleration CPU time = clock time
return self.sha_time(bytes, width)
def compress_time(self, bytes, comp=2):
""" return the elapsed time for an LZW-like compression
bytes -- input block size
comp -- expected compression factor
"""
x = 68 # FIX - recalibrate compression
t_cpu = self.execute(x * bytes)
t_read = self.mem_read(bytes)
t_write = self.mem_write(bytes / comp)
return t_cpu + t_read + t_write
def compress_cpu(self, bytes, comp=2):
""" return the cpu time for an LZW-like compression """
# w/o acceleration CPU time = clock time
return self.compress_time(bytes, comp)
def decompress_time(self, bytes, comp=2):
""" return the elapsed time for an LZW-like decompression
bytes -- expected output block size
comp -- expected compression factor
"""
x = 33 # FIX - recalibrate decompression
t_cpu = self.execute(x * bytes)
t_read = self.mem_read(bytes / comp)
t_write = self.mem_write(bytes)
return t_cpu + t_read + t_write
def decompress_cpu(self, bytes, comp=2):
""" return the cpu time for an LZW-like decompression """
# w/o acceleration CPU time = clock time
return self.decompress_time(bytes)
def raid6_time(self, bytes, n=6, m=2):
""" return the elapsed time for a RAID-6 write computation """
x = 10 # FIX - recalibrate RAID-6 computation
t_cpu = self.execute(x * n * bytes)
t_read = self.mem_read(n * bytes)
t_write = self.mem_write(m * bytes)
return t_cpu + t_read + t_write
def raid6_cpu(self, bytes, n=6, m=2):
""" return the cpu time for a RAID-6 write computation """
# w/o acceleration CPU time = clock time
return self.raid6_time(bytes, n, m)
def makeCPU(dict):
""" handy function to instantiate a CPU from parameters in a dict """
defaults = {
'cpu': 'Essex',
'speed': 3 * GIG,
'cores': 1,
'mem': 2 * GIG,
'ddr': 1600,
}
# pull the parameters out of the supplied dict
cpu_type = dict['cpu'] if 'cpu' in dict else defaults['cpu']
speed = dict['speed'] if 'speed' in dict else defaults['speed']
cores = dict['cores'] if 'cores' in dict else defaults['cores']
mem = dict['mem'] if 'mem' in dict else defaults['mem']
ddr = dict['ddr'] if 'ddr' in dict else defaults['ddr']
cpu = CPU(cpu_type, speed=speed, cores=cores, mem=mem, ddr=ddr)
return cpu
#
# basic unit test exerciser
#
if __name__ == '__main__':
cpu = makeCPU([])
print("%s w/%dGB of DDR3-%d RAM" %
(cpu.desc, cpu.mem_size / GIG, cpu.mem_speed))
print
print(" thread switch %dus" % (cpu.thread_us()))
print(" process switch %dus" % (cpu.proc_us()))
print(" DMA start/intr %dus" % (cpu.dma_us()))
from Report import Report
r = Report(("mem-rd", "mem-wrt", "process", "instrs"))
print
r.printHeading()
sizes = [1024, 4096, 128*1024, 1024*1024]
for bs in sizes:
mem_r = cpu.mem_read(bs)
mem_w = cpu.mem_write(bs)
mem_p = cpu.process(bs)
mem_x = cpu.execute(bs)
r.printLatency(bs, (mem_r, mem_w, mem_p, mem_x))
r = Report(("sha-1", "comp", "decomp", "RAID-6"))
print
r.printHeading()
sizes = [1024, 4096, 128*1024, 1024*1024]
for bs in sizes:
sha_t = cpu.sha_time(bs)
sha_c = cpu.sha_cpu(bs)
lzwc_t = cpu.compress_time(bs)
lzwc_c = cpu.compress_cpu(bs)
lzwd_t = cpu.decompress_time(bs)
lzwd_c = cpu.decompress_cpu(bs)
raid_t = cpu.raid6_time(bs)
raid_c = cpu.raid6_cpu(bs)
r.printLatency(bs, (sha_t, lzwc_t, lzwd_t, raid_t))
r.printLatency(1, (sha_c, lzwc_c, lzwd_c, raid_c))
|
from __future__ import print_function
import os, glob, shutil, re
import numpy as np
# Strings to replace in jdl template
EXEC = '__EXEC__'
INPUTS = '__INPUTS__'
ARGS = '__ARGS__'
MEM = '__MEM__'
# Input maNtuple campaign
indir = '../maNtuples'
#input_campaign = 'Era04Dec2020v1' # updated later with 2018A
input_campaign = 'Era22Jun2021v1' # data, h4g, hgg: redo with mgg95 trgs. [Note:new EB-only AOD skims]
print('>> Input campaign: maNtuples-%s'%input_campaign)
# Input pt wgts campaign
#ptwgts_campaign = 'bkgNoPtWgts-Era04Dec2020v1' # no 2018A, 2016H+2018 failed lumis
#ptwgts_campaign = 'bkgNoPtWgts-Era04Dec2020v2' # 2016H+2018 failed lumis
#ptwgts_campaign = 'bkgNoPtWgts-Era04Dec2020v3' # redo v2 with nVtx, nPU plots
#ptwgts_campaign = 'bkgNoPtWgts-Era22Jun2021v1' # data, h4g, hgg: redo with mgg95 trgs. [Note:new EB-only AOD skims]
#ptwgts_campaign = 'bkgNoPtWgts-Era22Jun2021v2' # v1 but with bin 50MeV [not used]
#ptwgts_campaign = 'bkgNoPtWgts-Era22Jun2021v3' # duplicate of v1 but with SFs on hgg template
ptwgts_campaign = 'bkgNoPtWgts-Era22Jun2021v4' # duplicate of v3, but with fhgg derived from SM br(hgg). Added later: ptwgts shifted down/up by stat uncerts
print('>> Input pt wgts campaign: %s'%ptwgts_campaign)
#ptwgts_subcampaign = 'bdtgtm0p98_relChgIsolt0p05_etalt1p44/nom-nom' # a0nom-a1nom
#ptwgts_subcampaign = 'bdtgtm0p98_relChgIsolt0p05_etalt1p44/nom-inv' # a0nom-a1nom
#ptwgts_subcampaign = 'bdtgtm0p99_relChgIsolt0p05_etalt1p44/nom-nom' # bdt > -0.99
#ptwgts_subcampaign = 'bdtgtm0p96_relChgIsolt0p05_etalt1p44/nom-nom' # bdt > -0.96
#ptwgts_subcampaign = 'bdtgtm0p98_relChgIsolt0p03_etalt1p44/nom-nom' # relChgIso < 0.03
#ptwgts_subcampaign = 'bdtgtm0p98_relChgIsolt0p07_etalt1p44/nom-nom' # relChgIso < 0.07
#ptwgts_subcampaign = 'bdtgtm0p99_relChgIsolt0p03_etalt1p44/nom-nom' # bdt > -0.99, relChgIso < 0.03
#ptwgts_subcampaign = 'bdtgtm0p96_relChgIsolt0p07_etalt1p44/nom-nom' # bdt > -0.96, relChgIso < 0.07 !! optimal
#ptwgts_subcampaign = 'bdtgtm0p97_relChgIsolt0p06_etalt1p44/nom-nom' # bdt > -0.96, relChgIso < 0.07
#ptwgts_subcampaign = 'bdtgtm0p96_relChgIsolt0p09_etalt1p44/nom-nom' # bdt > -0.96, relChgIso < 0.09
#ptwgts_subcampaign = 'bdtgtm0p96_relChgIsolt0p08_etalt1p44/nom-nom' # bdt > -0.96, relChgIso < 0.08
ptwgts_subcampaign = 'bdtgtm0p96_relChgIsolt0p07_etalt1p44/nom-nom' # bdt > -0.96, relChgIso < 0.07 !! optimal
#ptwgts_subcampaign = 'bdtgtm0p96_relChgIsolt0p07_etalt1p44/nom-inv' # bdt > -0.96, relChgIso < 0.07 !! optimal
print('>> Input pt wgts sub-campaign: %s'%ptwgts_subcampaign)
# Output bkg campaign
#this_campaign = 'bkgPtWgts-Era04Dec2020v1' # using bkgNoPtWgts-Era04Dec2020v1/bdtgtm0p98_relChgIsolt0p05_etalt1p44
#this_campaign = 'bkgPtWgts-Era04Dec2020v2' # using bkgNoPtWgts-Era04Dec2020v2/bdtgtm0p98_relChgIsolt0p05_etalt1p44
#this_campaign = 'bkgPtWgts-Era04Dec2020v3' # using bkgNoPtWgts-Era04Dec2020v3/bdtgtm0p98_relChgIsolt0p05_etalt1p44 [same as v2 + nVtx, nPU plots]
#this_campaign = 'bkgPtWgts-Era22Jun2021v1' # maNtuples-Era22Jun2021v1 + bkgNoPtWgts-Era22Jun2021v1/bdtgtm0p96_relChgIsolt0p07_etalt1p44 [mgg95 trgs]
#this_campaign = 'bkgPtWgts-Era22Jun2021v2' # bkgPtWgts-Era22Jun2021v1, bin50MeV
#this_campaign = 'bkgPtWgts-Era22Jun2021v3' # duplicate of v1 but with SFs on hgg template
#this_campaign = 'bkgPtWgts-Era22Jun2021v4' # duplicate of v3, but with fhgg from SM br(hgg)
#this_campaign = 'bkgPtWgts-Era22Jun2021v5' # v4, but with addtl ptwgts shifted down/up by stat uncerts
this_campaign = 'bkgPtWgts-Era22Jun2021v6' # v4, but with pt wgts smoothing (no shifting anymore)
print('>> Output campaign:',this_campaign)
exec_file = 'run_bkg_ptwgts.sh'
#tar_file = 'h2aa.tgz'
#tar_file = 'h2aa-inv.tgz'
#tar_file = 'h2aa_%s.tgz'%ptwgts_subcampaign.split('/')[-1]
#tar_file = 'h2aa_%s_%s.tgz'%(ptwgts_subcampaign.split('/')[-1], ptwgts_subcampaign.split('_')[0]) # bdt scan
#tar_file = 'h2aa_%s_%s.tgz'%(ptwgts_subcampaign.split('/')[-1], ptwgts_subcampaign.split('_')[1]) # relChgIso scan
tar_file = 'h2aa_%s_%s_%s.tgz'%(ptwgts_subcampaign.split('/')[-1], ptwgts_subcampaign.split('_')[0], ptwgts_subcampaign.split('_')[1]) # bdt,relChgIso scan
assert os.path.isfile(tar_file), ' !! input tarfile not found: %s'%tar_file
doRun2 = True
run2dir = 'Run2'
# Output jdl directory
cwd = os.getcwd()+'/'
#jdl_folder = 'jdls/%s'%this_campaign
jdl_folder = 'jdls/%s/%s'%(this_campaign, ptwgts_subcampaign)
if doRun2: jdl_folder += '/%s'%run2dir
if not os.path.isdir(jdl_folder):
os.makedirs(jdl_folder)
print('>> jdl folder:',jdl_folder)
runs = ['2016', '2017', '2018'] # Full Run2 bkg should still by yr-by-yr except using full Run2 pt wgts.
for r in runs:
print('>> For run:',r)
#if r != '2016': continue
# pt weights
ptwgts_indir = '/store/group/lpchaa4g/mandrews/%s/%s/%s/Weights'%(run2dir if doRun2 else r, ptwgts_campaign, ptwgts_subcampaign)
print(' .. pt wgts indir:', ptwgts_indir)
# ntuple IO
eos_tgtdir = '/store/user/lpchaa4g/mandrews/%s/%s/%s'%(r, this_campaign, ptwgts_subcampaign)
if doRun2: eos_tgtdir += '/%s'%run2dir
print(' .. eos tgtdir:', eos_tgtdir)
# Define samples
# pt reweighting only applied to data mH-SBlo, mH-SBhi
#yr = '' if doRun2 else r
yr = r
samples = ['data%s'%yr]
mhregions = {'data%s'%yr: ['sblo', 'sbhi']}
for sample in samples:
print(' >> For sample:',sample)
in_list = '%s/%s/%s_file_list.txt'%(indir, input_campaign, sample)
print(' .. using input list: %s'%(in_list))
assert os.path.isfile(in_list), ' !! input maNtuple list not found!'
for mhregion in mhregions[sample]:
print(' >> For mhregion:',mhregion)
# Get f_SBlow scenarios and pt wgt files
# Each scenario will use different pt wgts
#flos = [None]
flo_files = glob.glob('/eos/uscms/%s/*ptwgts.root'%ptwgts_indir)
# flo_dict: key: f_SBlow, value: LFN filepath to pt wgts file
# e.g. flo_dict['0.6413'] = /store/.../..flo0.6413_ptwgts.root
flo_dict = {flo.split('_')[-2].strip('flo'):flo.replace('/eos/uscms/','') for flo in flo_files}
print(' .. found %d f_SBlow scenarios: %s'%(len(flo_dict), ' '.join(flo_dict.keys())))
for flo in flo_dict:
print(' >> For f_SBlow: %s'%flo)
#flo_str = '%.3f'%flo if flo is not None else str(flo)
flo_str = flo
#input_ptwgts = '%s_sb2sr_blind_None_flo%s_ptwgts'%(sample, flo_str)
#ptwgts_file = '%s/%s.root'%(ptwgts_indir, input_ptwgts)
#print(' .. using pt wgts: %s'%ptwgts_file)
ptwgts_file = flo_dict[flo]
print(' .. using pt wgts: %s'%flo_dict[flo])
#'''
# Read in condor config template
with open('jdls/condor_runbkg.jdl', "r") as template_file:
file_data = template_file.read()
# Replace condor config template strings with sample-specific values
file_data = file_data.replace(EXEC, cwd+exec_file)
file_data = file_data.replace(INPUTS, '%s, %s, %s'%(cwd+exec_file, cwd+tar_file, cwd+in_list))
file_data = file_data.replace(ARGS, '%s %s %s %s %s %s %s'
%(sample, mhregion, in_list.split('/')[-1], eos_tgtdir, flo_str, ptwgts_file, tar_file))
file_data = file_data.replace(MEM, '4800')
# Write out sample-specific condor config
with open('%s/condor_runbkg_%s_mh%s_flo%s.jdl'%(jdl_folder, sample, mhregion, flo_str), "w") as sample_file:
sample_file.write(file_data)
#'''
print('>> jdl folder:',jdl_folder)
|
# User Preference Structure & Recommendation Effectiveness Analysis 3.5
import pathlib
import pandas as pd
import numpy as np
from scipy.stats import entropy
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_distances
import re
import matplotlib as mpl
import matplotlib.pyplot as plt
'''
# input format
# candidate_user_profile: | User_ID | Song_ID |
# song_profile: | Song_ID | Title | Artist | Genre | Release_Date | Plays_count | Likes_count
# target_user_profile: | User_ID | Song_ID |
# scale_data: | User_ID | Pref_Ques01:scale_score |...| Pref_Ques20:scale_score | 生理性別 | 年齡 | 教育程度 | 專業領域 |
# evaluation_data: | User_ID | Rec_Type | Latin_Square_Order
# | Song_01_Known:known_or_unknown_statement | Song_01_Satisfy:rating |...
# | Song_30_Known:known_or_unknown_statement | Song_30_Satisfy:rating |
# | Song_ID | Title | Artist | Genre | Release_Date | Plays_count | Likes_count |
'''
def profile_processing(profile):
# artist keywords authority control
artist_idx = pd.DataFrame({'keyword': pd.unique(profile['Artist'])})
artist_idx.rename('{0}_Art'.format, inplace=True)
artist_idx.reset_index(level=0, inplace=True)
artist_idx.set_index('keyword', drop=True, inplace=True)
artist_idx.rename(columns={'index': 'Art_Idx'}, inplace=True)
# genre keywords authority control
genre_idx = pd.DataFrame({'keyword': pd.unique(profile['Genre'])})
genre_idx.rename('{0}_Gen'.format, inplace=True)
genre_idx.reset_index(level=0, inplace=True)
genre_idx.set_index('keyword', drop=True, inplace=True)
genre_idx.rename(columns={'index': 'Gen_Idx'}, inplace=True)
# artist & genre keywords authority control merge into song_profile
profile = profile.merge(artist_idx, left_on='Artist', right_on='keyword')
profile = profile.merge(genre_idx, left_on='Genre', right_on='keyword')
profile['Keywords'] = profile.apply(
lambda x: "{0}, {1}".format(str(x['Art_Idx']), str(x['Gen_Idx'])), axis=1)
return profile
def user_evaluation(evaluation_data):
evaluation_data.sort_values(by='User_ID', inplace=True)
evaluation_data.set_index(['User_ID', 'Rec_Type'], inplace=True)
# split [Song_ID: known_val] to ['Song_ID', 'Known'] & [Song_ID: satisfy_val] to ['Song_ID', 'Rating']
known = evaluation_data.filter(regex='Known').stack().str.split(':', expand=True)
rating = evaluation_data.filter(regex='Satisfy').stack().str.split(':', expand=True).iloc[:, 1].astype(float)
# drop & reset deprecated index
known.reset_index(level=2, drop=True, inplace=True)
rating.reset_index(level=2, drop=True, inplace=True)
# concat ['Known'] & ['Satisfy']
evaluation_data = pd.concat([known, rating], axis=1, sort=False)
evaluation_data.columns = ['Song_ID', 'Known', 'Satisfy']
evaluation_data['Song_ID'] = evaluation_data['Song_ID'].astype(int)
return evaluation_data
def profile_description(profile):
p_song_count = len(pd.unique(profile['Song_ID']))
p_genre_count = len(pd.unique(profile['Genre']))
p_artist_count = len(pd.unique(profile['Artist']))
p_plays_count = profile['Plays_count'].describe().map("{:.2f}".format)
p_likes_count = profile['Likes_count'].describe().map("{:.2f}".format)
p_release_date = profile['Release_Date'].describe()
return print(
"Song Count: {0}\nGenre Count: {1}\nArtist Count: {2}\n"
"Plays Count:\n{3}\nLikes Count:\n{4}\nRelease Date:\n{5}\n".format(
p_song_count, p_genre_count, p_artist_count, p_plays_count, p_likes_count, p_release_date
))
def preference_structure_scale(scale_data):
scale_data.set_index('User_ID', inplace=True)
scale_list = []
# split [Pref_ID: pref_val] to ['Pref_ID', 'Pref_Val'] & transform to {'User_ID': 'Pref_Val'}
for user_scale in scale_data.filter(regex='Pref').iterrows():
scale_values = user_scale[1].sort_values().str.split(':', expand=True)
scale_values = pd.DataFrame(
scale_values[1].astype(float).to_numpy(), index=scale_values[0].to_numpy(), columns=[user_scale[0]]).T
scale_list.append(scale_values)
scale_data = pd.concat(scale_list, sort=False)
return scale_data
def unknown_filter(input_data, mask, original_col, revised_col, revised_value):
input_data[revised_col] = input_data[original_col]
input_data.loc[~mask, revised_col] = revised_value
return
def metrics_calc(metric_name, input_data, key_col, data_col, metric_func):
data = input_data.groupby(key_col)[data_col].apply(metric_func)
data.rename(metric_name, inplace=True)
return data
def metrics_df(metrics_param, metrics_index):
data_df = []
for metric, param in metrics_param.items():
data = metrics_calc(metric, param[0], param[1], param[2], param[3])
if data.shape[0] != metrics_index.groupby(param[1]).count().shape[0]:
data = data.reindex(metrics_index.groupby(param[1]).count().index, fill_value=0)
else:
pass
data_df.append(data)
data_df = pd.concat(data_df, axis=1)
return data_df
def metrics_interact(input_data, variable_matrix, metrics_col_list, judgement_metric):
if judgement_metric == 'Satisfaction_RE':
judgement_col = 'Satisfy'
elif judgement_metric == 'Satisfaction_Unknown_RE':
judgement_col = 'Unknown_Satisfy_all'
elif judgement_metric == 'Serendipity_RE':
judgement_col = 'Unknown_Satisfy_3'
else:
judgement_col = judgement_metric
judgement_sum = metrics_calc(
'{0}_sum'.format(judgement_col), input_data, ['User_ID', 'Rec_Type'],
judgement_col, lambda rec: rec.sum())
for metric in metrics_col_list:
interact_col = '{0}_X_{1}'.format(metric, judgement_metric)
variable_matrix[interact_col] = variable_matrix[metric] * judgement_sum
return
def entropy_based_diversity(profile):
label_list = pd.concat([profile['Gen_Idx'], profile['Art_Idx']], axis=0, ignore_index=True, sort=True)
value, counts = np.unique(list(label_list), return_counts=True)
return entropy(counts, base=10)
def similarity_based_diversity(profile, calc_func):
# Keywords Count matrix
vectorizer = CountVectorizer(lowercase=False)
text_matrix = vectorizer.fit_transform(profile['Keywords'])
text_matrix = pd.DataFrame(text_matrix.toarray(), columns=vectorizer.get_feature_names(), index=profile.index)
profile_sim_list = cosine_distances(text_matrix)
return calc_func(profile_sim_list)
def minmax_popularity_based_novelty(profile, min_pop, max_pop, calc_func):
return profile.apply(lambda x: (max_pop - x) / (max_pop - min_pop)).agg(calc_func)
def log_popularity_based_novelty(profile, calc_func):
return profile.apply(lambda x: -np.log10(x)).agg(calc_func)
def minmax_time_aware_novelty(profile, first_release, latest_release, calc_func):
return profile.apply(lambda x: (x - first_release) / (latest_release - first_release)).agg(calc_func)
def log_time_aware_novelty(profile, first_release, calc_func):
return profile.apply(lambda x: np.log10((x - first_release) / np.timedelta64(1, 'D'))).agg(calc_func)
def distance_based_novelty(profile, user_profile, calc_func):
# get target user's User Profile
user_profile = user_profile[user_profile['User_ID'] == pd.unique(profile['User_ID'])[0]]
# unify Keywords index from User Profile & Recommendation Profile
all_profile = pd.concat([profile, user_profile], axis=0, ignore_index=True, sort=False)
all_profile.drop(columns=['Art_Idx', 'Gen_Idx', 'Keywords'], inplace=True)
all_profile = profile_processing(all_profile)
# distinguish between User Profile & Recommendation Profile
all_profile.rename(columns={'Rec_Type': 'Profile_Type'}, inplace=True)
all_profile.sort_values(by='Profile_Type', inplace=True)
all_profile['Profile_Type'].fillna(value='UserProfile', inplace=True)
all_profile.set_index(['Profile_Type', 'Song_ID'], inplace=True)
# Keywords Count matrix
vectorizer = CountVectorizer(lowercase=False)
text_matrix = vectorizer.fit_transform(all_profile['Keywords'])
text_matrix = pd.DataFrame(text_matrix.toarray(), columns=vectorizer.get_feature_names(), index=all_profile.index)
profile_sim_list = cosine_distances(
text_matrix.loc[text_matrix.index.get_level_values(0) == 'UserProfile', :],
text_matrix.loc[text_matrix.index.get_level_values(0) != 'UserProfile', :]
)
return calc_func(profile_sim_list)
def long_tail_plots(candidate_likes):
candidate_count = pd.DataFrame(candidate_likes.value_counts().to_numpy(), columns=['Likes'])
candidate_count['Size'] = pd.cut(candidate_count['Likes'].to_numpy(), bins=10, labels=range(1, 11)).codes
fig_candidate_count, axs_candidate_count = plt.subplots(dpi=200, constrained_layout=True)
axs_candidate_count.scatter(
candidate_count.index, candidate_count['Likes'].to_numpy(),
c=candidate_count['Size'].to_numpy(), s=np.sqrt(candidate_count['Likes'].to_numpy())*3)
return
def main():
# ----------------------------------------------------------------------
# Initialization
print("------- User Preference Structure & Recommendation Effectiveness Analysis 3.0 -------")
# read 2000 Candidate User Profile
candidate_user_profile = pd.read_csv(
pathlib.Path.cwd().joinpath("Candidate User Profile", "ALL_USER_PROFILE_can_user_profile.csv"),
sep=',', encoding='utf8')
print("------- Candidate Profile Count -------")
print("Candidate Profile include:\n {0} users & {1} songs\n every profile has {2} songs at least".format(
len(pd.unique(candidate_user_profile['User_ID'])), len(pd.unique(candidate_user_profile['Song_ID'])),
pd.unique(candidate_user_profile['Count']).min()))
# read User List for mapping
user_list = pd.read_csv(
pathlib.Path.cwd().joinpath("User Evaluation", "user_list.csv"), sep=',', encoding='utf8', dtype=str)
# read Song Evaluation Form Data
evaluation_data = pd.read_csv(
pathlib.Path.cwd().joinpath("User Evaluation", "SongEvalForm_Value.csv"), sep=',', encoding='utf8', dtype=str)
# read Preference Structure Scale Data
scale_data = pd.read_csv(
pathlib.Path.cwd().joinpath("User Evaluation", "PrefStruScale_Value.csv"), sep=',', encoding='utf8', dtype=str)
# read 126 Target User Profile
print("------- 126 Target User Profile description -------")
target_user_profile = pd.read_csv(
pathlib.Path.cwd().joinpath("User Evaluation", "TARGET_USER_user_profile.csv"),
sep=',', encoding='utf8', dtype={'User_ID': str})
target_user_profile = profile_processing(target_user_profile)
target_user_profile['Release_Date'] = pd.to_datetime(target_user_profile['Release_Date'],
infer_datetime_format=True)
profile_description(target_user_profile)
# read 15333 Candidate Songs Profile
print("------- 15333 Candidate Songs Profile description -------")
candidate_song_profile = pd.read_csv(
pathlib.Path.cwd().joinpath("Song Information Profile", "ALL_SONG_PROFILE_song_profile_full.csv"),
sep=',', encoding='utf8')
candidate_song_profile['Release_Date'] = pd.to_datetime(
candidate_song_profile['Release_Date'], infer_datetime_format=True)
profile_description(candidate_song_profile)
# read all Recommendation List
recommendation_list = pd.read_csv(
pathlib.Path.cwd().joinpath("User Evaluation", "ALL_USER_rec_list.csv"),
sep=',', encoding='utf8', dtype={'User_ID': str})
recommendation_list.drop(columns=['Art_Idx', 'Gen_Idx', 'Keywords'], inplace=True)
recommendation_list = profile_processing(recommendation_list)
recommendation_list['Release_Date'] = pd.to_datetime(recommendation_list['Release_Date'],
infer_datetime_format=True)
# Recommendation Evaluation Profile processing
recommendation_evaluation = user_evaluation(evaluation_data)
recommendation_evaluation_profile = pd.merge(
recommendation_evaluation.reset_index(), recommendation_list,
left_on=['User_ID', 'Rec_Type', 'Song_ID'], right_on=['User_ID', 'Strategy_Type', 'Song_ID'])
# concat global song profile (Target User + Recommendation Candidate)
print("------- global song profile description -------")
global_profile = pd.concat([target_user_profile, candidate_song_profile], axis=0, sort=False)
# get global Plays_count (drop_duplicates by chosen largest Plays_count)
global_profile['Plays_count'] = global_profile['Plays_count'].replace(0, 1) # avoid 0 Plays_count
global_plays = global_profile.groupby('Song_ID')['Plays_count'].max()
global_min_plays = global_plays.min()
global_max_plays = global_plays.max()
log_global_plays = -np.log10(global_plays)
print("global Plays_count:\n{0}\nglobal log(Plays_count):\n{1}".format(
global_plays.describe().map("{:.2f}".format), log_global_plays.describe().map("{:.5f}".format)))
# get global Likes_count (drop_duplicates by chosen largest Likes_count)
global_likes = global_profile.groupby('Song_ID')['Likes_count'].max()
global_min_likes = global_likes.min()
global_max_likes = global_likes.max()
log_global_likes = -np.log10(global_likes)
print("global Likes_count:\n{0}\nglobal log(Likes_count):\n{1}".format(
global_likes.describe().map("{:.2f}".format), log_global_likes.describe().map("{:.5f}".format)))
# get global Release_Date
global_release = global_profile.groupby('Song_ID')['Release_Date'].max()
global_first_release = global_profile['Release_Date'].min() - np.timedelta64(1, 'D') # avoid 0 Release_Date_period
global_latest_release = global_profile['Release_Date'].max()
global_release_period = (global_release - global_first_release) / np.timedelta64(1, 'D') # type conversion to int
log_global_release_period = np.log10(global_release_period)
print("global Release_Date:\n{0}\nRelease_period:\n{1}\nglobal log(Release_period):\n{2}".format(
global_release.describe(),
global_release_period.describe().map("{:.2f}".format),
log_global_release_period.describe().map("{:.5f}".format)))
# ----------------------------------------------------------------------
# Preference Structure (Moderator Variables)
# Preference Scale: Diversity; Openness; Identity; Involvement
scale_up = preference_structure_scale(scale_data)
scale_up['rev_Invol_05'] = 6 - scale_up['Invol_05']
scale_up['rev_Invol_06'] = 6 - scale_up['Invol_06']
scale_up['Diversity_Scale_UP'] = (scale_up['Dive_04'] + scale_up['Dive_03']) / 2
scale_up['Openness_Scale_UP'] = (scale_up['Open_05'] + scale_up['Open_06'] + scale_up['Open_03']) / 3
scale_up['Identity_Scale_UP'] = (scale_up['Iden_04'] + scale_up['Iden_03']) / 2
scale_up['Involvement_Scale_UP'] = (scale_up['rev_Invol_06'] + scale_up['Invol_04'] + scale_up['Invol_02']) / 3
scale_up = scale_up
# Demographic Statistics data
print("------- Demographic Statistics -------")
demog = scale_data.filter(regex='生理性別|年齡|教育程度|專業領域')
print(pd.value_counts(demog['生理性別']))
print(pd.value_counts(demog['年齡']))
print(pd.value_counts(demog['教育程度']))
print(pd.value_counts(demog['專業領域']))
# User Profile Metrics
# 1. Diversity
up_diversity_param = {
# entropy_based
'Div_Entropy_UP': [
target_user_profile, 'User_ID', target_user_profile.columns, entropy_based_diversity],
# similarity_based
'Div_AvgSim_UP': [
target_user_profile, 'User_ID', target_user_profile.columns,
lambda up: similarity_based_diversity(
up, lambda song_sim: song_sim.sum() / (song_sim.shape[0] * (song_sim.shape[1] - 1)))],
'Div_SumSim_UP': [
target_user_profile, 'User_ID', target_user_profile.columns,
lambda up: similarity_based_diversity(
up, lambda song_sim: np.tril(song_sim).sum())],
# genre_count
'Div_GenreCount_UP': [
target_user_profile, 'User_ID', 'Gen_Idx', lambda genre_keyword: genre_keyword.nunique()],
# artist_count
'Div_ArtistCount_UP': [
target_user_profile, 'User_ID', 'Art_Idx', lambda artist_keyword: artist_keyword.nunique()],
}
# 2. Openness
up_openness_param = {
# min_max_normalization_plays_popularity_based
'Opn_MMAvgPopPlays_UP': [
target_user_profile, 'User_ID', 'Plays_count',
lambda up: minmax_popularity_based_novelty(up, global_min_plays, global_max_plays, 'mean')],
'Opn_MMSumPopPlays_UP': [
target_user_profile, 'User_ID', 'Plays_count',
lambda up: minmax_popularity_based_novelty(up, global_min_plays, global_max_plays, 'sum')],
'Opn_MMMedPopPlays_UP': [
target_user_profile, 'User_ID', 'Plays_count',
lambda up: minmax_popularity_based_novelty(up, global_min_plays, global_max_plays, 'median')],
# log_plays_popularity_based
'Opn_LogAvgPopPlays_UP': [
target_user_profile, 'User_ID', 'Plays_count', lambda up: log_popularity_based_novelty(up, 'mean')],
'Opn_LogSumPopPlays_UP': [
target_user_profile, 'User_ID', 'Plays_count', lambda up: log_popularity_based_novelty(up, 'sum')],
'Opn_LogMedPopPlays_UP': [
target_user_profile, 'User_ID', 'Plays_count', lambda up: log_popularity_based_novelty(up, 'median')],
# min_max_normalization_likes_popularity_based
'Opn_MMAvgPopLikes_UP': [
target_user_profile, 'User_ID', 'Likes_count',
lambda up: minmax_popularity_based_novelty(up, global_min_likes, global_max_likes, 'mean')],
'Opn_MMSumPopLikes_UP': [
target_user_profile, 'User_ID', 'Likes_count',
lambda up: minmax_popularity_based_novelty(up, global_min_likes, global_max_likes, 'sum')],
'Opn_MMMedPopLikes_UP': [
target_user_profile, 'User_ID', 'Likes_count',
lambda up: minmax_popularity_based_novelty(up, global_min_likes, global_max_likes, 'median')],
# log_likes_popularity_based
'Opn_LogAvgPopLikes_UP': [
target_user_profile, 'User_ID', 'Likes_count', lambda up: log_popularity_based_novelty(up, 'mean')],
'Opn_LogSumPopLikes_UP': [
target_user_profile, 'User_ID', 'Likes_count', lambda up: log_popularity_based_novelty(up, 'sum')],
'Opn_LogMedPopLikes_UP': [
target_user_profile, 'User_ID', 'Likes_count', lambda up: log_popularity_based_novelty(up, 'median')],
# min_max_normalization_time_aware
'Opn_MMAvgTime_UP': [
target_user_profile, 'User_ID', 'Release_Date',
lambda up: minmax_time_aware_novelty(up, global_first_release, global_latest_release, 'mean')],
'Opn_MMSumTime_UP': [
target_user_profile, 'User_ID', 'Release_Date',
lambda up: minmax_time_aware_novelty(up, global_first_release, global_latest_release, 'sum')],
'Opn_MMMedTime_UP': [
target_user_profile, 'User_ID', 'Release_Date',
lambda up: minmax_time_aware_novelty(up, global_first_release, global_latest_release, 'median')],
# log_time_aware
'Opn_LogAvgTime_UP': [
target_user_profile, 'User_ID', 'Release_Date',
lambda up: log_time_aware_novelty(up, global_first_release, 'mean')],
'Opn_LogSumTime_UP': [
target_user_profile, 'User_ID', 'Release_Date',
lambda up: log_time_aware_novelty(up, global_first_release, 'sum')],
'Opn_LogMedTime_UP': [
target_user_profile, 'User_ID', 'Release_Date',
lambda up: log_time_aware_novelty(up, global_first_release, 'median')],
# count_user_profile
'Opn_UserProfCount_UP': [
target_user_profile, 'User_ID', 'Song_ID', lambda like_song: like_song.nunique()]
}
# metrics output
up_diversity_df = metrics_df(up_diversity_param, target_user_profile)
up_openness_df = metrics_df(up_openness_param, target_user_profile)
# Moderator Variables
mo_matrix = pd.concat([scale_up, up_diversity_df, up_openness_df, demog], axis=1, sort=False)
mo_matrix = mo_matrix.round(5)
# user data De-identification & Anonymization
mo_matrix.index.names = ['User_ID']
mo_matrix = pd.merge(mo_matrix, user_list, on=['User_ID'], right_index=True, sort=False)
mo_matrix.set_index('User_SN', inplace=True)
print("------- User Preference Structure (Moderator Variables) -------")
mo_matrix.to_csv(pathlib.Path.cwd() / "ModeratorVar.csv", sep='\t', encoding='utf8')
print(mo_matrix)
# ----------------------------------------------------------------------
# Recommendation Effectiveness (Dependent Variables)
# Coverage
recommendation_total_count = len(pd.unique(recommendation_evaluation_profile['Song_ID']))
recommendation_each_count = recommendation_evaluation_profile.groupby(['Rec_Type'])['Song_ID'].apply(
lambda x: len(pd.unique(x)))
print("------- Recommendation Coverage -------")
print(
"All Strategies totally recommend: {0} songs\n"
" UserCF recommend: {1} songs\n"
" CBF recommend: {2} songs\n"
" TopPlay recommend: {3} songs".format(
recommendation_total_count, recommendation_each_count['UserCF'],
recommendation_each_count['CBF'], recommendation_each_count['TopPlay']))
# User Judgement Metrics
# Serendipity masks & filters
# unknown & satisfaction rated all
unknown_satisfy_all_mask = \
(recommendation_evaluation_profile['Known'] == 'unknown') & (recommendation_evaluation_profile['Satisfy'] > -1)
unknown_filter(recommendation_evaluation_profile, unknown_satisfy_all_mask, 'Satisfy', 'Unknown_Satisfy_all', 0)
# unknown & satisfaction rated 3~4
unknown_satisfy_3_mask = \
(recommendation_evaluation_profile['Known'] == 'unknown') & (recommendation_evaluation_profile['Satisfy'] > 2)
unknown_filter(recommendation_evaluation_profile, unknown_satisfy_3_mask, 'Satisfy', 'Unknown_Satisfy_3', 0)
# strict version: unknown & satisfaction rated 4
unknown_satisfy_4_mask = \
(recommendation_evaluation_profile['Known'] == 'unknown') & (recommendation_evaluation_profile['Satisfy'] > 3)
unknown_filter(recommendation_evaluation_profile, unknown_satisfy_4_mask, 'Satisfy', 'Unknown_Satisfy_4', 0)
re_judgement_param = {
# 1. Unknown
# count
'Unknown_Count_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Known',
lambda rec: rec.value_counts(dropna=False)['unknown']
if rec.value_counts(dropna=False).index.isin(['unknown']).any() else 0],
# ratio
'Unknown_Ratio_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Known',
lambda rec: rec.value_counts(dropna=False)['unknown'] / rec.value_counts(dropna=False).sum()
if rec.value_counts(dropna=False).index.isin(['unknown']).any() else 0],
# 2. Satisfaction
'Satisfaction_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Satisfy',
lambda rec: rec.mean()],
# 3. Serendipity
# unknown & satisfaction rated 3~4
'Serendipity_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Unknown_Satisfy_3',
lambda rec: rec.mean()],
# unknown & satisfaction rated 4
'strict_Serendipity_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Unknown_Satisfy_4',
lambda rec: rec.mean()],
}
# Recommendation Evaluation Metrics
# 1. Diversity
re_diversity_param = {
# entropy_based
'Div_Entropy_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], recommendation_evaluation_profile.columns,
entropy_based_diversity],
# similarity_based
'Div_AvgSim_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], recommendation_evaluation_profile.columns,
lambda rec: similarity_based_diversity(
rec, lambda song_sim: song_sim.sum() / (song_sim.shape[0] * (song_sim.shape[1] - 1)))],
'Div_SumSim_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], recommendation_evaluation_profile.columns,
lambda rec: similarity_based_diversity(
rec, lambda song_sim: np.tril(song_sim).sum())]
}
# 2. Novelty(unknown)
# Novelty(unknown) masks & filters
unknown_mask = recommendation_evaluation_profile['Known'] == 'unknown'
# plays_popularity_based(unknown)
unknown_filter(recommendation_evaluation_profile, unknown_mask,
'Plays_count', 'Unknown_Plays_count', global_max_plays)
# likes_popularity_based(unknown)
unknown_filter(recommendation_evaluation_profile, unknown_mask,
'Likes_count', 'Unknown_Likes_count', global_max_likes)
# time_aware(unknown)
unknown_filter(recommendation_evaluation_profile, unknown_mask,
'Release_Date', 'Unknown_Release_Date', (global_first_release + np.timedelta64(1, 'D')))
re_novelty_unknown_param = {
# min_max_normalization_plays_popularity_based(unknown)
'Nov_MMAvgPopPlays_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Unknown_Plays_count',
lambda rec: minmax_popularity_based_novelty(rec, global_min_plays, global_max_plays, 'mean')],
'Nov_MMSumPopPlays_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Unknown_Plays_count',
lambda rec: minmax_popularity_based_novelty(rec, global_min_plays, global_max_plays, 'sum')],
'Nov_MMMedPopPlays_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Unknown_Plays_count',
lambda rec: minmax_popularity_based_novelty(rec, global_min_plays, global_max_plays, 'median')],
# log_plays_popularity_based(unknown)
'Nov_LogAvgPopPlays_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Unknown_Plays_count',
lambda rec: log_popularity_based_novelty(rec, 'mean')],
'Nov_LogSumPopPlays_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Unknown_Plays_count',
lambda rec: log_popularity_based_novelty(rec, 'sum')],
'Nov_LogMedPopPlays_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Unknown_Plays_count',
lambda rec: log_popularity_based_novelty(rec, 'median')],
# min_max_normalization_likes_popularity_based(unknown)
'Nov_MMAvgPopLikes_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Unknown_Likes_count',
lambda rec: minmax_popularity_based_novelty(rec, global_min_likes, global_max_likes, 'mean')],
'Nov_MMSumPopLikes_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Unknown_Likes_count',
lambda rec: minmax_popularity_based_novelty(rec, global_min_likes, global_max_likes, 'sum')],
'Nov_MMMedPopLikes_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Unknown_Likes_count',
lambda rec: minmax_popularity_based_novelty(rec, global_min_likes, global_max_likes, 'median')],
# log_likes_popularity_based(unknown)
'Nov_LogAvgPopLikes_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Unknown_Likes_count',
lambda rec: log_popularity_based_novelty(rec, 'mean')],
'Nov_LogSumPopLikes_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Unknown_Likes_count',
lambda rec: log_popularity_based_novelty(rec, 'sum')],
'Nov_LogMedPopLikes_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Unknown_Likes_count',
lambda rec: log_popularity_based_novelty(rec, 'median')],
# min_max_normalization_time_aware(unknown)
'Nov_MMAvgTime_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Unknown_Release_Date',
lambda rec: minmax_time_aware_novelty(rec, global_first_release, global_latest_release, 'mean')],
'Nov_MMSumTime_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Unknown_Release_Date',
lambda rec: minmax_time_aware_novelty(rec, global_first_release, global_latest_release, 'sum')],
'Nov_MMMedTime_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Unknown_Release_Date',
lambda rec: minmax_time_aware_novelty(rec, global_first_release, global_latest_release, 'median')],
# log_time_aware(unknown)
'Nov_LogAvgTime_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Unknown_Release_Date',
lambda rec: log_time_aware_novelty(rec, global_first_release, 'mean')],
'Nov_LogSumTime_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Unknown_Release_Date',
lambda rec: log_time_aware_novelty(rec, global_first_release, 'sum')],
'Nov_LogMedTime_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Unknown_Release_Date',
lambda rec: log_time_aware_novelty(rec, global_first_release, 'median')],
# distance_based(unknown)
'Nov_AvgDist_RE': [
recommendation_evaluation_profile[unknown_mask], ['User_ID', 'Rec_Type'],
recommendation_evaluation_profile.columns,
lambda rec: distance_based_novelty(
rec, target_user_profile, lambda song_sim: song_sim.sum() / (song_sim.shape[0] * 30))],
'Nov_SumDist_RE': [
recommendation_evaluation_profile[unknown_mask], ['User_ID', 'Rec_Type'],
recommendation_evaluation_profile.columns,
lambda rec: distance_based_novelty(
rec, target_user_profile, lambda song_sim: song_sim.sum())]
}
# 3. Novelty(all)
re_novelty_all_param = {
# min_max_normalization_plays_popularity_based(all)
'NovAll_MMAvgPopPlays_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Plays_count',
lambda rec: minmax_popularity_based_novelty(rec, global_min_plays, global_max_plays, 'mean')],
'NovAll_MMSumPopPlays_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Plays_count',
lambda rec: minmax_popularity_based_novelty(rec, global_min_plays, global_max_plays, 'sum')],
'NovAll_MMMedPopPlays_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Plays_count',
lambda rec: minmax_popularity_based_novelty(rec, global_min_plays, global_max_plays, 'median')],
# log_plays_popularity_based(all)
'NovAll_LogAvgPopPlays_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Plays_count',
lambda rec: log_popularity_based_novelty(rec, 'mean')],
'NovAll_LogSumPopPlays_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Plays_count',
lambda rec: log_popularity_based_novelty(rec, 'sum')],
'NovAll_LogMedPopPlays_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Plays_count',
lambda rec: log_popularity_based_novelty(rec, 'median')],
# min_max_normalization_likes_popularity_based(all)
'NovAll_MMAvgPopLikes_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Likes_count',
lambda rec: minmax_popularity_based_novelty(rec, global_min_likes, global_max_likes, 'mean')],
'NovAll_MMSumPopLikes_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Likes_count',
lambda rec: minmax_popularity_based_novelty(rec, global_min_likes, global_max_likes, 'sum')],
'NovAll_MMMedPopLikes_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Likes_count',
lambda rec: minmax_popularity_based_novelty(rec, global_min_likes, global_max_likes, 'median')],
# log_likes_popularity_based(all)
'NovAll_LogAvgPopLikes_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Likes_count',
lambda rec: log_popularity_based_novelty(rec, 'mean')],
'NovAll_LogSumPopLikes_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Likes_count',
lambda rec: log_popularity_based_novelty(rec, 'sum')],
'NovAll_LogMedPopLikes_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Likes_count',
lambda rec: log_popularity_based_novelty(rec, 'median')],
# min_max_normalization_time_aware(all)
'NovAll_MMAvgTime_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Release_Date',
lambda rec: minmax_time_aware_novelty(rec, global_first_release, global_latest_release, 'mean')],
'NovAll_MMSumTime_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Release_Date',
lambda rec: minmax_time_aware_novelty(rec, global_first_release, global_latest_release, 'sum')],
'NovAll_MMMedTime_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Release_Date',
lambda rec: minmax_time_aware_novelty(rec, global_first_release, global_latest_release, 'median')],
# log_time_aware(all)
'NovAll_LogAvgTime_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Release_Date',
lambda rec: log_time_aware_novelty(rec, global_first_release, 'mean')],
'NovAll_LogSumTime_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Release_Date',
lambda rec: log_time_aware_novelty(rec, global_first_release, 'sum')],
'NovAll_LogMedTime_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], 'Release_Date',
lambda rec: log_time_aware_novelty(rec, global_first_release, 'median')],
# distance_based(all)
'NovAll_AvgDist_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], recommendation_evaluation_profile.columns,
lambda rec: distance_based_novelty(
rec, target_user_profile, lambda song_sim: song_sim.sum() / (song_sim.shape[0] * 30))],
'NovAll_SumDist_RE': [
recommendation_evaluation_profile, ['User_ID', 'Rec_Type'], recommendation_evaluation_profile.columns,
lambda rec: distance_based_novelty(
rec, target_user_profile, lambda song_sim: song_sim.sum())]
}
# metrics output
re_judgement = metrics_df(re_judgement_param, recommendation_evaluation_profile)
re_diversity = metrics_df(re_diversity_param, recommendation_evaluation_profile)
re_novelty_unknown = metrics_df(re_novelty_unknown_param, recommendation_evaluation_profile)
re_novelty_all = metrics_df(re_novelty_all_param, recommendation_evaluation_profile)
# Dependent Variables
dv_matrix = pd.concat([re_judgement, re_diversity, re_novelty_unknown, re_novelty_all], axis=1, sort=False)
# metrics interaction
novelty_unknown_list = [
'Nov_MMAvgPopPlays_RE', 'Nov_LogAvgPopPlays_RE', 'Nov_MMAvgPopLikes_RE', 'Nov_LogAvgPopLikes_RE',
'Nov_MMAvgTime_RE', 'Nov_LogAvgTime_RE', 'Nov_AvgDist_RE'
]
novelty_all_list = [
'NovAll_MMAvgPopPlays_RE', 'NovAll_LogAvgPopPlays_RE', 'NovAll_MMAvgPopLikes_RE', 'NovAll_LogAvgPopLikes_RE',
'NovAll_MMAvgTime_RE', 'NovAll_LogAvgTime_RE', 'NovAll_AvgDist_RE'
]
# Novelty(unknown) X Satisfaction_RE
metrics_interact(recommendation_evaluation_profile, dv_matrix, novelty_unknown_list, 'Satisfaction_Unknown_RE')
# Novelty(unknown) X Serendipity_RE
metrics_interact(recommendation_evaluation_profile, dv_matrix, novelty_unknown_list, 'Serendipity_RE')
# Novelty(all) X Satisfaction_RE
metrics_interact(recommendation_evaluation_profile, dv_matrix, novelty_all_list, 'Satisfaction_RE')
dv_matrix = dv_matrix.round(5)
# user data De-identification & Anonymization
dv_matrix = pd.merge(dv_matrix, user_list, on=['User_ID'], right_index=True, sort=False)
dv_matrix.reset_index(level=0, drop=True, inplace=True)
dv_matrix.set_index(['User_SN', dv_matrix.index], inplace=True)
# get Dummy Variables
dv_matrix['Rec_Type_SN'] = dv_matrix.rename(
level='Rec_Type', index={'UserCF': 0, 'CBF': 1, 'TopPlay': 2}).index.get_level_values('Rec_Type')
rec_type_dummy = \
pd.get_dummies(dv_matrix.index.get_level_values('Rec_Type')).add_prefix('dummy_').set_index(dv_matrix.index)
dv_matrix = dv_matrix.join(rec_type_dummy, sort=False)
# get Latin Square Order
user_a_pattern = r'[user]{4}[a]{1}\d{3}'
user_b_pattern = r'[user]{4}[b]{1}\d{3}'
user_c_pattern = r'[user]{4}[c]{1}\d{3}'
strategy_order = ['UserCF', 'CBF', 'TopPlay']
latin_square = dv_matrix.reset_index()['User_SN'].drop_duplicates().to_frame()
latin_square['strategy_order'] = None
for index, row in latin_square.iterrows():
if re.search(user_a_pattern, row['User_SN']):
latin_square.at[index, 'strategy_order'] = np.roll(strategy_order, 0)
elif re.search(user_b_pattern, row['User_SN']):
latin_square.at[index, 'strategy_order'] = np.roll(strategy_order, -1)
elif re.search(user_c_pattern, row['User_SN']):
latin_square.at[index, 'strategy_order'] = np.roll(strategy_order, -2)
latin_square[['1', '2', '3']] = pd.DataFrame(
latin_square['strategy_order'].to_numpy().tolist(), index=latin_square.index)
latin_square = latin_square.drop(columns='strategy_order').set_index('User_SN')
latin_square = latin_square.stack(0).reset_index()
latin_square.columns = ['User_SN', 'LatinSquare_SN', 'Rec_Type']
latin_square['LatinSquare_SN'] = latin_square['LatinSquare_SN'].astype(int)
latin_square.set_index(['User_SN', 'Rec_Type'], inplace=True)
dv_matrix = pd.merge(dv_matrix, latin_square, on=['User_SN', 'Rec_Type'])
print("------- Recommendation Effectiveness (Dependent Variables) -------")
dv_matrix.to_csv(pathlib.Path.cwd() / "DependentVar.csv", sep='\t', encoding='utf8')
print(dv_matrix)
# ----------------------------------------------------------------------
# Long Format Variables combine
dv_matrix.reset_index(level=1, drop=False, inplace=True)
all_matrix_long = pd.merge(mo_matrix, dv_matrix, on=['User_SN'], sort=False)
print("------- Long Format -------")
all_matrix_long.to_csv(pathlib.Path.cwd() / "All_LongFormat.csv", sep='\t', encoding='utf8')
print(all_matrix_long)
# ----------------------------------------------------------------------
# All Data combine
# processing & clean
recommendation_evaluation_profile.drop(columns=['Art_Idx', 'Gen_Idx', 'Keywords', 'Strategy_Type'], inplace=True)
recommendation_evaluation_profile = profile_processing(recommendation_evaluation_profile)
recommendation_evaluation_profile.rename(
columns={'Prediction_Rating': 'UserCF_Pred', 'Profile_Sim': 'CBF_Pred'}, inplace=True)
recommendation_evaluation_profile['UserCF_Pred'].fillna(np.NaN, inplace=True)
recommendation_evaluation_profile['CBF_Pred'].fillna(np.NaN, inplace=True)
recommendation_evaluation_profile = \
pd.merge(recommendation_evaluation_profile, user_list, on=['User_ID'], sort=False)
recommendation_evaluation_profile.set_index(['User_SN', 'Rec_Type'], inplace=True)
recommendation_evaluation_profile.sort_index(inplace=True)
recommendation_evaluation_profile['repeated_Song_SN'] = \
recommendation_evaluation_profile.groupby(['User_SN']).cumcount() + 1
# merge Recommendation Song Profile
all_matrix = pd.merge(recommendation_evaluation_profile, all_matrix_long, on=['User_SN', 'Rec_Type'], sort=False)
print("------- All Data -------")
all_matrix.to_csv(pathlib.Path.cwd() / "All_Data.csv", sep='\t', encoding='utf8')
print(all_matrix)
# ----------------------------------------------------------------------
# Descriptive Statistics
# Moderator Variables
print("------- Descriptive Statistics: User Preference Structure (Moderator Variables) -------")
mo_matrix_ds = pd.concat(
[mo_matrix.describe(), mo_matrix.agg([pd.DataFrame.kurtosis, pd.DataFrame.skew])],
axis=0, join='outer', sort=False).T
mo_matrix_ds.to_csv(pathlib.Path.cwd() / "ModeratorVar_StatsDesc.csv", sep='\t', encoding='utf8')
print(mo_matrix_ds)
# Dependent Variables
print("------- Descriptive Statistics: Recommendation Effectiveness (Dependent Variables) -------")
dv_matrix_ds = pd.concat(
[dv_matrix.groupby('Rec_Type').describe(),
dv_matrix.groupby('Rec_Type').agg([pd.DataFrame.kurtosis, pd.DataFrame.skew])],
axis=1, join='outer', sort=False).T.sort_index(level=0, sort_remaining=False)
dv_matrix_ds['ALL'] = pd.concat(
[dv_matrix.describe(), dv_matrix.agg([pd.DataFrame.kurtosis, pd.DataFrame.skew])],
axis=0, join='outer', sort=False).unstack()
dv_matrix_ds = dv_matrix_ds.stack().unstack(1)
dv_matrix_ds.to_csv(pathlib.Path.cwd() / "DependentVar_StatsDesc.csv", sep='\t', encoding='utf8')
print(dv_matrix_ds)
# ----------------------------------------------------------------------
# Plots Area
print("------- Plots -------")
# plots font style
mpl.rcParams['font.family'] = 'sans-serif'
mpl.rcParams['font.sans-serif'] = ['Cambria']
# Long Tail plots
usercf_rec_list = recommendation_evaluation_profile.groupby(['Rec_Type'])['Song_ID'].apply(
lambda x: pd.unique(x))['UserCF']
usercf_candidate_likes = candidate_user_profile.loc[
candidate_user_profile['Song_ID'].isin(usercf_rec_list), 'Song_ID']
cbf_rec_list = recommendation_evaluation_profile.groupby(['Rec_Type'])['Song_ID'].apply(
lambda x: pd.unique(x))['CBF']
cbf_candidate_likes = candidate_user_profile.loc[
candidate_user_profile['Song_ID'].isin(cbf_rec_list), 'Song_ID']
topplay_rec_list = recommendation_evaluation_profile.groupby(['Rec_Type'])['Song_ID'].apply(
lambda x: pd.unique(x))['TopPlay']
topplay_candidate_likes = candidate_user_profile.loc[
candidate_user_profile['Song_ID'].isin(topplay_rec_list), 'Song_ID']
long_tail_plots(candidate_user_profile['Song_ID'])
long_tail_plots(usercf_candidate_likes)
long_tail_plots(cbf_candidate_likes)
long_tail_plots(topplay_candidate_likes)
plt.show()
return
if __name__ == "__main__":
main()
|
"""144. Binary Tree Preorder Traversal"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution(object):
def preorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
### BFS
stack = [root]
res = []
while stack:
node = stack.pop()
if not node:
continue
res.append(node.val)
stack.append(node.right)
stack.append(node.left)
return res
#### preorder
res = []
self.dfs(root, res)
return res
def dfs(self, node, res):
if not node:
return None
res.append(node.val)
self.dfs(node.left, res)
self.dfs(node.right, res)
##### OOP
self.res = []
self.dfs(root)
return self.res
def dfs(self, node):
if not node:
return
self.res.append(node.val)
self.dfs(node.left)
self.dfs(node.right)
|
from shared_matrix import SharedMatrix
from custom_barrier import CustomBarrier
from threading import Thread, current_thread
from typing import Tuple, Union
from statistics import mean
class Worker(Thread):
def __init__(self, barrier: CustomBarrier, observer_barrier: Union[CustomBarrier, None], shared_matrix: SharedMatrix, pos: Tuple[int, int], iterations: int, verbose_logging: bool) -> None:
super().__init__(daemon=True)
if verbose_logging:
self.log = lambda msg: print(f'[{current_thread()}]{msg}')
else:
self.log = lambda _: None
self.observer_barrier = observer_barrier
self.barrier = barrier
self.matrix = shared_matrix
self.cell = pos
self.iterations = iterations
def phase_1(self):
# Calculate average of neighboring cells
# Step 1: Read neighboring cells
i, j = self.cell
north_pos = (i - 1, j)
south_pos = (i + 1, j)
west_pos = (i, j - 1)
east_pos = (i, j + 1)
cells = []
for pos in [north_pos, south_pos, west_pos, east_pos]:
try:
if pos[0] < 0 or pos[1] < 0:
continue
val = self.matrix.get(pos)
cells.append(val)
except IndexError:
continue
# Step 2: Calculate average
return mean(cells)
def phase_2(self, average) -> None:
# Write average to cell
self.matrix.set(self.cell, average)
def run(self):
for _ in range(self.iterations):
avg = self.phase_1()
self.log(f'Phase 1 complete. Average: {avg}')
self.barrier.wait()
if self.observer_barrier != None:
self.observer_barrier.wait()
self.phase_2(avg)
self.barrier.wait()
self.log('Phase 2 complete')
self.log('Done')
|
# Generated by Django 2.2.7 on 2020-01-21 13:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0002_product_size'),
]
operations = [
migrations.AddField(
model_name='product',
name='type',
field=models.CharField(blank=True, choices=[('X', 'Select type'), ('Dresses', 'Dresses'), ('Bracelets', 'Bracelets')], default='X', max_length=1, null=True),
),
]
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Sheets API hook."""
from __future__ import annotations
from typing import Any, Sequence
from googleapiclient.discovery import build
from airflow.exceptions import AirflowException
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class GSheetsHook(GoogleBaseHook):
"""
Interact with Google Sheets via Google Cloud connection.
Reading and writing cells in Google Sheet: https://developers.google.com/sheets/api/guides/values
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param api_version: API Version
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v4",
delegate_to: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
delegate_to=delegate_to,
impersonation_chain=impersonation_chain,
)
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self.delegate_to = delegate_to
self._conn = None
def get_conn(self) -> Any:
"""
Retrieves connection to Google Sheets.
:return: Google Sheets services object.
"""
if not self._conn:
http_authorized = self._authorize()
self._conn = build("sheets", self.api_version, http=http_authorized, cache_discovery=False)
return self._conn
def get_values(
self,
spreadsheet_id: str,
range_: str,
major_dimension: str = "DIMENSION_UNSPECIFIED",
value_render_option: str = "FORMATTED_VALUE",
date_time_render_option: str = "SERIAL_NUMBER",
) -> list:
"""
Gets values from Google Sheet from a single range.
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/get
:param spreadsheet_id: The Google Sheet ID to interact with
:param range_: The A1 notation of the values to retrieve.
:param major_dimension: Indicates which dimension an operation should apply to.
DIMENSION_UNSPECIFIED, ROWS, or COLUMNS
:param value_render_option: Determines how values should be rendered in the output.
FORMATTED_VALUE, UNFORMATTED_VALUE, or FORMULA
:param date_time_render_option: Determines how dates should be rendered in the output.
SERIAL_NUMBER or FORMATTED_STRING
:return: An array of sheet values from the specified sheet.
"""
service = self.get_conn()
response = (
service.spreadsheets()
.values()
.get(
spreadsheetId=spreadsheet_id,
range=range_,
majorDimension=major_dimension,
valueRenderOption=value_render_option,
dateTimeRenderOption=date_time_render_option,
)
.execute(num_retries=self.num_retries)
)
return response.get("values", [])
def batch_get_values(
self,
spreadsheet_id: str,
ranges: list,
major_dimension: str = "DIMENSION_UNSPECIFIED",
value_render_option: str = "FORMATTED_VALUE",
date_time_render_option: str = "SERIAL_NUMBER",
) -> dict:
"""
Gets values from Google Sheet from a list of ranges.
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/batchGet
:param spreadsheet_id: The Google Sheet ID to interact with
:param ranges: The A1 notation of the values to retrieve.
:param major_dimension: Indicates which dimension an operation should apply to.
DIMENSION_UNSPECIFIED, ROWS, or COLUMNS
:param value_render_option: Determines how values should be rendered in the output.
FORMATTED_VALUE, UNFORMATTED_VALUE, or FORMULA
:param date_time_render_option: Determines how dates should be rendered in the output.
SERIAL_NUMBER or FORMATTED_STRING
:return: Google Sheets API response.
"""
service = self.get_conn()
response = (
service.spreadsheets()
.values()
.batchGet(
spreadsheetId=spreadsheet_id,
ranges=ranges,
majorDimension=major_dimension,
valueRenderOption=value_render_option,
dateTimeRenderOption=date_time_render_option,
)
.execute(num_retries=self.num_retries)
)
return response
def update_values(
self,
spreadsheet_id: str,
range_: str,
values: list,
major_dimension: str = "ROWS",
value_input_option: str = "RAW",
include_values_in_response: bool = False,
value_render_option: str = "FORMATTED_VALUE",
date_time_render_option: str = "SERIAL_NUMBER",
) -> dict:
"""
Updates values from Google Sheet from a single range.
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/update
:param spreadsheet_id: The Google Sheet ID to interact with.
:param range_: The A1 notation of the values to retrieve.
:param values: Data within a range of the spreadsheet.
:param major_dimension: Indicates which dimension an operation should apply to.
DIMENSION_UNSPECIFIED, ROWS, or COLUMNS
:param value_input_option: Determines how input data should be interpreted.
RAW or USER_ENTERED
:param include_values_in_response: Determines if the update response should
include the values of the cells that were updated.
:param value_render_option: Determines how values should be rendered in the output.
FORMATTED_VALUE, UNFORMATTED_VALUE, or FORMULA
:param date_time_render_option: Determines how dates should be rendered in the output.
SERIAL_NUMBER or FORMATTED_STRING
:return: Google Sheets API response.
"""
service = self.get_conn()
body = {"range": range_, "majorDimension": major_dimension, "values": values}
response = (
service.spreadsheets()
.values()
.update(
spreadsheetId=spreadsheet_id,
range=range_,
valueInputOption=value_input_option,
includeValuesInResponse=include_values_in_response,
responseValueRenderOption=value_render_option,
responseDateTimeRenderOption=date_time_render_option,
body=body,
)
.execute(num_retries=self.num_retries)
)
return response
def batch_update_values(
self,
spreadsheet_id: str,
ranges: list,
values: list,
major_dimension: str = "ROWS",
value_input_option: str = "RAW",
include_values_in_response: bool = False,
value_render_option: str = "FORMATTED_VALUE",
date_time_render_option: str = "SERIAL_NUMBER",
) -> dict:
"""
Updates values from Google Sheet for multiple ranges.
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/batchUpdate
:param spreadsheet_id: The Google Sheet ID to interact with
:param ranges: The A1 notation of the values to retrieve.
:param values: Data within a range of the spreadsheet.
:param major_dimension: Indicates which dimension an operation should apply to.
DIMENSION_UNSPECIFIED, ROWS, or COLUMNS
:param value_input_option: Determines how input data should be interpreted.
RAW or USER_ENTERED
:param include_values_in_response: Determines if the update response should
include the values of the cells that were updated.
:param value_render_option: Determines how values should be rendered in the output.
FORMATTED_VALUE, UNFORMATTED_VALUE, or FORMULA
:param date_time_render_option: Determines how dates should be rendered in the output.
SERIAL_NUMBER or FORMATTED_STRING
:return: Google Sheets API response.
"""
if len(ranges) != len(values):
raise AirflowException(
f"'Ranges' and 'Lists' must be of equal length. "
f"'Ranges' is of length: {len(ranges)} and 'Values' is of length: {len(values)}."
)
service = self.get_conn()
data = []
for idx, range_ in enumerate(ranges):
value_range = {"range": range_, "majorDimension": major_dimension, "values": values[idx]}
data.append(value_range)
body = {
"valueInputOption": value_input_option,
"data": data,
"includeValuesInResponse": include_values_in_response,
"responseValueRenderOption": value_render_option,
"responseDateTimeRenderOption": date_time_render_option,
}
response = (
service.spreadsheets()
.values()
.batchUpdate(spreadsheetId=spreadsheet_id, body=body)
.execute(num_retries=self.num_retries)
)
return response
def append_values(
self,
spreadsheet_id: str,
range_: str,
values: list,
major_dimension: str = "ROWS",
value_input_option: str = "RAW",
insert_data_option: str = "OVERWRITE",
include_values_in_response: bool = False,
value_render_option: str = "FORMATTED_VALUE",
date_time_render_option: str = "SERIAL_NUMBER",
) -> dict:
"""
Append values from Google Sheet from a single range.
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/append
:param spreadsheet_id: The Google Sheet ID to interact with
:param range_: The A1 notation of the values to retrieve.
:param values: Data within a range of the spreadsheet.
:param major_dimension: Indicates which dimension an operation should apply to.
DIMENSION_UNSPECIFIED, ROWS, or COLUMNS
:param value_input_option: Determines how input data should be interpreted.
RAW or USER_ENTERED
:param insert_data_option: Determines how existing data is changed when new data is input.
OVERWRITE or INSERT_ROWS
:param include_values_in_response: Determines if the update response should
include the values of the cells that were updated.
:param value_render_option: Determines how values should be rendered in the output.
FORMATTED_VALUE, UNFORMATTED_VALUE, or FORMULA
:param date_time_render_option: Determines how dates should be rendered in the output.
SERIAL_NUMBER or FORMATTED_STRING
:return: Google Sheets API response.
"""
service = self.get_conn()
body = {"range": range_, "majorDimension": major_dimension, "values": values}
response = (
service.spreadsheets()
.values()
.append(
spreadsheetId=spreadsheet_id,
range=range_,
valueInputOption=value_input_option,
insertDataOption=insert_data_option,
includeValuesInResponse=include_values_in_response,
responseValueRenderOption=value_render_option,
responseDateTimeRenderOption=date_time_render_option,
body=body,
)
.execute(num_retries=self.num_retries)
)
return response
def clear(self, spreadsheet_id: str, range_: str) -> dict:
"""
Clear values from Google Sheet from a single range.
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/clear
:param spreadsheet_id: The Google Sheet ID to interact with
:param range_: The A1 notation of the values to retrieve.
:return: Google Sheets API response.
"""
service = self.get_conn()
response = (
service.spreadsheets()
.values()
.clear(spreadsheetId=spreadsheet_id, range=range_)
.execute(num_retries=self.num_retries)
)
return response
def batch_clear(self, spreadsheet_id: str, ranges: list) -> dict:
"""
Clear values from Google Sheet from a list of ranges.
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/batchClear
:param spreadsheet_id: The Google Sheet ID to interact with
:param ranges: The A1 notation of the values to retrieve.
:return: Google Sheets API response.
"""
service = self.get_conn()
body = {"ranges": ranges}
response = (
service.spreadsheets()
.values()
.batchClear(spreadsheetId=spreadsheet_id, body=body)
.execute(num_retries=self.num_retries)
)
return response
def get_spreadsheet(self, spreadsheet_id: str):
"""
Retrieves spreadsheet matching the given id.
:param spreadsheet_id: The spreadsheet id.
:return: An spreadsheet that matches the sheet filter.
"""
response = (
self.get_conn()
.spreadsheets()
.get(spreadsheetId=spreadsheet_id)
.execute(num_retries=self.num_retries)
)
return response
def get_sheet_titles(self, spreadsheet_id: str, sheet_filter: list[str] | None = None):
"""
Retrieves the sheet titles from a spreadsheet matching the given id and sheet filter.
:param spreadsheet_id: The spreadsheet id.
:param sheet_filter: List of sheet title to retrieve from sheet.
:return: An list of sheet titles from the specified sheet that match
the sheet filter.
"""
response = self.get_spreadsheet(spreadsheet_id=spreadsheet_id)
if sheet_filter:
titles = [
sh["properties"]["title"]
for sh in response["sheets"]
if sh["properties"]["title"] in sheet_filter
]
else:
titles = [sh["properties"]["title"] for sh in response["sheets"]]
return titles
def create_spreadsheet(self, spreadsheet: dict[str, Any]) -> dict[str, Any]:
"""
Creates a spreadsheet, returning the newly created spreadsheet.
:param spreadsheet: an instance of Spreadsheet
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#Spreadsheet
:return: An spreadsheet object.
"""
self.log.info("Creating spreadsheet: %s", spreadsheet["properties"]["title"])
response = (
self.get_conn().spreadsheets().create(body=spreadsheet).execute(num_retries=self.num_retries)
)
self.log.info("Spreadsheet: %s created", spreadsheet["properties"]["title"])
return response
|
from PyQt5 import QtCore, QtGui, QtWidgets
class Vehicle(object):
def __init__(self, brand, model, year):
self.brand = brand
self.model = model
self.year = year
class InputWidget(QtGui.QWidget):
def __init__(self):
super(InputWidget, self).__init__()
self.setFixedSize(240, 300)
self.ui = Ui_Form()
self.ui.setupUi(self)
self.ui.save_pushButton.clicked.connect(self.save)
self.ui.cancel_pushButton.clicked.connect(self.cancel)
def save(self):
self.vehicle = Vehicle(params)
def cancel(self):
self.vehicle = None
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
self.layoutWidget = QtGui.QWidget(Form)
self.layoutWidget.setGeometry(QtCore.QRect(0, 20, 193, 103))
self.layoutWidget.setObjectName(_fromUtf8("layoutWidget"))
self.gridLayout = QtGui.QGridLayout(self.layoutWidget)
self.gridLayout.setSizeConstraint(QtGui.QLayout.SetNoConstraint)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.brand_label = QtGui.QLabel(self.layoutWidget)
self.brand_label.setObjectName(_fromUtf8("brand_label"))
self.gridLayout.addWidget(self.brand_label, 0, 0, 1, 1)
self.model_lineEdit = QtGui.QLineEdit(self.layoutWidget)
self.model_lineEdit.setObjectName(_fromUtf8("model_lineEdit"))
self.gridLayout.addWidget(self.model_lineEdit, 1, 3, 1, 1)
self.brand_lineEdit = QtGui.QLineEdit(self.layoutWidget)
self.brand_lineEdit.setObjectName(_fromUtf8("brand_lineEdit"))
self.gridLayout.addWidget(self.brand_lineEdit, 0, 3, 1, 1)
self.model_label = QtGui.QLabel(self.layoutWidget)
self.model_label.setObjectName(_fromUtf8("model_label"))
self.gridLayout.addWidget(self.model_label, 1, 0, 1, 1)
self.year_label = QtGui.QLabel(self.layoutWidget)
self.year_label.setObjectName(_fromUtf8("year_label"))
self.gridLayout.addWidget(self.year_label, 2, 0, 1, 1)
self.cancel_pushButton = QtGui.QPushButton(self.layoutWidget)
self.cancel_pushButton.setObjectName(_fromUtf8("cancel_pushButton"))
self.gridLayout.addWidget(self.cancel_pushButton, 3, 3, 1, 1)
self.year_lineEdit = QtGui.QLineEdit(self.layoutWidget)
self.year_lineEdit.setObjectName(_fromUtf8("year_lineEdit"))
self.gridLayout.addWidget(self.year_lineEdit, 2, 3, 1, 1)
self.save_pushButton = QtGui.QPushButton(self.layoutWidget)
self.save_pushButton.setObjectName(_fromUtf8("save_pushButton"))
self.gridLayout.addWidget(self.save_pushButton, 3, 0, 1, 1)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem, 3, 2, 1, 1)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem1, 3, 1, 1, 1)
QtCore.QMetaObject.connectSlotsByName(Form) |
from socket import *
import threading
from config import *
class Server:
def __init__(self, hostname, port):
self.hostname = hostname
self.port = port
self.clients = {}
self.threads = []
self.keep_alive = True
self.socket = socket(AF_INET, SOCK_STREAM)
self.socket.settimeout(2.0)
self.socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.socket.bind((hostname, port))
self.socket.listen(5)
self.main_thread = threading.Thread(target=self.update, args=(lambda:self.keep_alive,))
def sendMessage(self, author, message_raw, destiny=None):
def send(message, destiny=None):
if not destiny:
for username, connection in list(self.clients.items()):
if username != author:
connection['conn'].send(message.encode())
else:
if destiny in self.clients:
self.clients[destiny]['conn'].send(message.encode())
else:
error_message = build_message_text('SERVER', 'Chat', f'Ops, usuário {destiny} não encontrato...')
self.clients[author]['conn'].send(error_message.encode())
if author == 'server':
send(message_raw)
elif message_raw.startswith(COMMANDS['LIST']):
message_text = f'{author} aqui está uma lista de todos os usuários disponíveis:'
for username, connection in list(self.clients.items()):
if username != author:
message_text += f"\n{build_message_text('SERVER', username, '')}"
message = build_message_text('SERVER', 'Chat', message_text)
send(message + "\n", author)
elif message_raw.startswith(COMMANDS['QUIT']):
message = build_message_text('SERVER', 'Chat', f'{author} está saindo do chat...')
send(message)
self.endConnection(author)
elif message_raw.startswith(COMMANDS['PRIVATE']):
message_split = message_raw.split()
action = message_split.pop(0)
destiny = message_split.pop(0)
message = build_message_text('PRIVATE', author, " ".join(message_split))
send(message, destiny)
else:
message = build_message_text('GLOBAL', author, message_raw)
send(message, destiny)
def handleConnection(self, keep_alive, username):
connection = self.clients[username]
print(build_message_text('USER', username, f'Inicializando...'))
while connection['keep_alive']:
try:
message = connection['conn'].recv(4026).decode()
self.sendMessage(username, message)
except timeout:
pass
except Exception as e:
print(build_message_text('SERVER', 'ERROR', f'{username} - {e}'))
print(build_message_text('SERVER', 'ERROR', f'Interrompendo conexão de {username}'))
self.endConnection(username)
break
return
def endConnection(self, username):
try:
print(build_message_text('SERVER', 'Chat', f'{username} - Desligando KeepAlive ...'))
self.clients[username]['keep_alive'] = False
print(build_message_text('SERVER', 'Chat', f'{username} - Unindo Thread...'))
self.clients[username]['thread'].join()
print(build_message_text('SERVER', 'Chat', f'{username} - Fechando conexão'))
self.clients[username]['conn'].close()
print(build_message_text('SERVER', 'Chat', f'{username} - Apagando dados'))
del self.clients[username]['thread']
del self.clients[username]['conn']
del self.clients[username]
except Exception as e:
del self.clients[username]['thread']
del self.clients[username]['conn']
del self.clients[username]
print(build_message_text('SERVER', 'ERROR', f'{username} - {e}'))
def update(self, keep_alive):
print(build_message_text('SERVER', 'Chat', f'Servidor aberto em {self.hostname}:{self.port}'))
while keep_alive():
try:
connection, addr = self.socket.accept()
message = build_message_text('SERVER', 'Chat', 'Digite seu apelido')
connection.send(message.encode())
username = connection.recv(1024).decode()
while username in self.clients:
message = build_message_text('SERVER', 'Chat', 'Esse apelido já está sendo usado, tente outro')
connection.send(message.encode())
username = connection.recv(1024).decode()
connection.settimeout(0.2)
message = build_message_text('SERVER', 'Chat', 'Seja bem vindo! \n\tUtilize o comando /l para listar os usuários onlines \n\tUtilize o comando /p para mandar mensagens privadas \n\t Utilize o comando /r responder a última mensagem privada recebida \n\t Utilize o comado /q para sair\n')
connection.send(message.encode())
self.clients[username] = {
'conn': connection,
'username': username,
'thread': {},
'keep_alive': True
}
print(build_message_text('USER', username, f'Entrou no chat'))
self.sendMessage('server', build_message_text('USER', username, 'entrou no bate-papo'))
self.clients[username]['thread'] = threading.Thread(target=self.handleConnection, args=(lambda:self.clients[username]['keep_alive'], username))
self.clients[username]['thread'].start()
except timeout:
pass
except Exception as e:
print(build_message_text('SERVER', 'ERROR', f'{e}'))
return
def start(self):
print(build_message_text('SERVER', 'Chat', 'Inicializando servidor...'))
self.main_thread.start()
print(build_message_text('SERVER', 'Chat', 'Pressione qualquer tecla para parar a execução...'))
input()
print(build_message_text('SERVER', 'Chat', 'Parando thread principal...'))
self.keep_alive = False
self.main_thread.join()
for username, connection in list(self.clients.items()):
self.endConnection(username)
print(build_message_text('SERVER', 'Chat', 'Saindo...'))
server = Server(SERVER_HOST, SERVER_PORT)
server.start() |
import numpy as np
import DateTimeTools as TT
def UTPlotLabel(fig,axis='x',seconds=False):
if hasattr(fig,'gca'):
ax=fig.gca()
else:
ax = fig
R = ax.axis()
mt=ax.xaxis.get_majorticklocs()
labels=np.zeros(mt.size,dtype='S8')
for i in range(0,mt.size):
tmod = mt[i] % 24.0
hh,mm,ss,ms=TT.DectoHHMM(tmod,True,True,Split=True)
if seconds:
utstr=u'{:02n}:{:02n}:{:02n}'.format(hh,mm,ss)
else:
if ss >= 30:
mm+=1
ss = 0
if mm > 59:
hh+=1
mm=0
if hh > 23:
hh = 0
utstr=u'{:02n}:{:02n}'.format(hh,mm)
labels[i]=utstr
labels= np.array(labels).astype('U')
ax.set_xticks(mt)
ax.set_xticklabels(labels)
ax.axis(R)
|
import cv2
import numpy as np
img1 = np.zeros((250,500,3), np.uint8)
img1 = cv2.rectangle(img1,(200,0), (300,100),(255,255,255),-1)
img2 = cv2.imread("image.jpg")
img2 = cv2.resize(img2,(500,250))
#bitAnd = cv2.bitwise_and(img2,img1);
#bitOr = cv2.bitwise_or(img2,img1)
#bitXor = cv2.bitwise_xor(img1,img2);
bitNot1 = cv2.bitwise_not(img1);
bitNot2 = cv2.bitwise_not(img2);
cv2.imshow("img1",img1)
cv2.imshow("img2",img2)
#cv2.imshow('bitAnd',bitAnd)
#cv2.imshow('bitOr',bitOr)
#cv2.imshow('bitXor',bitXor)
cv2.imshow('bitNot1',bitNot1)
cv2.imshow('bitNot2',bitNot2)
print("Size of image1:",img1.shape)
print("Size of image2:",img2.shape)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('library', '0022_auto_20190918_1425'),
]
operations = [
migrations.AlterField(
model_name='document',
name='entry_type',
field=models.CharField(blank=True, max_length=13, null=True, verbose_name=b'Document type', choices=[(b'undefined', 'Undefined'), (b'article', 'Article'), (b'book', 'Book'), (b'booklet', 'Booklet'), (b'conference', 'Conference'), (b'inbook', 'Inbook'), (b'incollection', 'Incollection'), (b'inproceedings', 'Inproceedings'), (b'manual', 'Manual'), (b'mastersthesis', "Master's Thesis"), (b'misc', 'Misc'), (b'phdthesis', 'Ph.D. Thesis'), (b'proceedings', 'Proceedings'), (b'techreport', 'Tech Report'), (b'unpublished', 'Unpublished')]),
),
]
|
from lr.tests import *
class TestPublisherController(TestController):
def test_index(self):
response = self.app.get(url('publish'))
# Test response...
def test_index_as_xml(self):
response = self.app.get(url('formatted_publish', format='xml'))
def test_create(self):
response = self.app.post(url('publish'))
def test_new(self):
response = self.app.get(url('new_publish'))
def test_new_as_xml(self):
response = self.app.get(url('formatted_new_publish', format='xml'))
def test_update(self):
response = self.app.put(url('publish', id=1))
def test_update_browser_fakeout(self):
response = self.app.post(url('publish', id=1), params=dict(_method='put'))
def test_delete(self):
response = self.app.delete(url('publish', id=1))
def test_delete_browser_fakeout(self):
response = self.app.post(url('publish', id=1), params=dict(_method='delete'))
def test_show(self):
response = self.app.get(url('publish', id=1))
def test_show_as_xml(self):
response = self.app.get(url('formatted_publish', id=1, format='xml'))
def test_edit(self):
response = self.app.get(url('edit_publish', id=1))
def test_edit_as_xml(self):
response = self.app.get(url('formatted_edit_publish', id=1, format='xml'))
|
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def longestUnivaluePath(self, root: TreeNode) -> int:
answer = 0
def caller(node):
nonlocal answer
if not node: return 0
# divide and conquer
leftLength = caller(node.left)
rightLength = caller(node.right)
leftSide, rightSide = 0, 0
if node.left and node.val == node.left.val:
leftSide = leftLength + 1
if node.right and node.val == node.right.val:
rightSide = rightLength + 1
answer = max(answer, leftSide + rightSide)
return max(0, max(rightSide, leftSide))
caller(root)
return answer |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 27 06:06:55 2018
@author: mromiario
"""
import operator #library untuk mengambil value tertinggi dictionary
#Dibuat oleh Ibu Ade Romadhony
#Dikembangkan/dilengkapi oleh Muhammad Romi Ario Utomo - 1301154311
def read_file_init_table(fname): #membaca 1000 data untuk data training
tag_count = {}
tag_count['<start>'] = 0
word_tag = {}
tag_trans = {}
with open(fname) as f:
content = f.readlines()
# you may also want to remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
idx_line = 0
is_first_word = 0
counter = 1
while (idx_line < len(content)) and (counter <= 1000):#membatasi data yang diolah, 1000 data pertama
prev_tag = '<start>'
while (not content[idx_line].startswith('</kalimat')):
if not content[idx_line].startswith('<kalimat'):
content_part = content[idx_line].split('\t')
if content_part[1] in tag_count:
tag_count[content_part[1]] += 1
else:
tag_count[content_part[1]] = 1
current_word_tag = content_part[0]+','+content_part[1]
if current_word_tag in word_tag:
word_tag[current_word_tag] += 1
else:
word_tag[current_word_tag] = 1
if is_first_word == 1:
current_tag_trans = '<start>,'+content_part[1]
is_first_word = 0
else:
current_tag_trans = prev_tag+','+content_part[1]
if current_tag_trans in tag_trans:
tag_trans[current_tag_trans] += 1
else:
tag_trans[current_tag_trans] = 1
prev_tag = content_part[1]
else:
tag_count['<start>'] += 1
is_first_word = 1
idx_line = idx_line + 1
idx_line = idx_line+1
counter+=1
return tag_count, word_tag, tag_trans
def create_trans_prob_table(tag_trans, tag_count): #Membuat matriks transisi
#print(tag_trans)
trans_prob = {}
for tag1 in tag_count.keys():
for tag2 in tag_count.keys():
#print('tag1 = ')
#print(tag1)
trans_idx = tag1+','+tag2
#print('trans_idx = ')
#print(trans_idx)
if trans_idx in tag_trans:
#print(trans_idx)
trans_prob[trans_idx] = tag_trans[trans_idx]/tag_count[tag1]
return trans_prob
def create_emission_prob_table(word_tag, tag_count): #Membuat matriks emisi
emission_prob = {}
for word_tag_entry in word_tag.keys():
word_tag_split = word_tag_entry.split(',')
if word_tag_entry[0] == ',' :
current_word = word_tag_entry[0]
current_tag = word_tag_entry[2:]
else :
current_word = word_tag_split[0]
current_tag = word_tag_split[-1] #mengambil tag, karena posisi tag di array terakhir
emission_key = current_word+','+current_tag
emission_prob[emission_key] = word_tag[word_tag_entry]/tag_count[current_tag]
return emission_prob
def viterbi(trans_prob, emission_prob, tag_count, sentence): #Membuat matriks vitervi
#initialization
viterbi_mat = {}
tag_sequence = []
prev_word = ''
sentence_words = sentence.split()
for i in range(len(sentence_words)):
viterbi_mat[sentence_words[i]] = {} #pendefinisian dictionary di dalam dictionary
for tag in tag_count.keys() :
viterbi_mat[sentence_words[i]][tag]=0 #membuat dictionary word yang isi dari setiap word adalah dictionary tag
for i in range(len(sentence_words)):
if i==0 :
prev_key = str('<start>') #kondisi kata yang ada di awal kalimat
max_value = 1 #Nilai maksimal dari tag start
for key in tag_count.keys() :
trans = str(prev_key+','+key)
emis = str(sentence_words[i]+','+key)
if (trans in trans_prob) and (emis in emission_prob):
viterbi_mat[sentence_words[i]][key] = max_value*trans_prob[trans]*emission_prob[emis] #Mengalikan nilai maksimum sebelumnya dengan niai probabilitas trans dan emisi
prev_word=sentence_words[i]
prev_key = max(viterbi_mat[prev_word].items(), key=operator.itemgetter(1))[0] #backtracking mencari hasil perkalian yang paling maksimal
if (viterbi_mat[prev_word][prev_key] != 0) :
max_value = viterbi_mat[prev_word][prev_key]
tag_sequence.append(prev_key) #menyimpan sekuens
else :
tag_sequence.append('NN') #kondisi apabila kata tidak ada di kamus latih, tagnya diassign NN
return viterbi_mat, tag_sequence
def read_dataset(fname): #Membaca dataset yang digunakan sebagai data uji
sentences = []
tags = []
with open(fname) as f:
content = f.readlines()
# you may also want to remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
idx_line = 0
counter = 1001
while (idx_line < len(content)) and (counter<=1020): #mengambil kalimat 1001 - 1020
sent = []
tag = []
while not content[idx_line].startswith('</kalimat'):
if not content[idx_line].startswith('<kalimat'):
content_part = content[idx_line].split('\t')
sent.append(content_part[0])
tag.append(content_part[1])
idx_line = idx_line + 1
sentences.append(sent)
tags.append(tag)
idx_line = idx_line+2
counter+=1
return sentences, tags
####DATA TRAINING
tag_count, word_tag, tag_trans = read_file_init_table('Dataset.txt') #membaca dan mengolah data latih
#print(tag_count)
#print(word_tag)
trans_prob = create_trans_prob_table(tag_trans, tag_count) #membuat matriks transisi
#print(trans_prob)
emission_prob = create_emission_prob_table(word_tag, tag_count) #membuat matriks emisi
#print(emission_prob)
#DATA TESTING
kalimat,tag_aktual = read_dataset('Dataset.txt') #membaca data uji
JumBenar = 0
TotalTag = 0
for i in range(len(kalimat)) :
print('')
str1 = ' '.join(kalimat[i])
print('Kalimat:',str1)
matriks_viterbi, tag_uji = viterbi(trans_prob, emission_prob, tag_count, str1)
print('Tag sebenarnya :',tag_aktual[i])
print('Tag uji :',tag_uji)
for j in range(len(tag_aktual[i])) :
if tag_aktual[i][j] == tag_uji[j] :
JumBenar += 1 #menghitung jumlah tag yang diprediksi benar
else:
print('KATA YANG SALAH :',kalimat[i][j])
print('tag aktual :', tag_aktual[i][j])
print('tag yang diuji salah :',tag_uji[j])
TotalTag +=1 #menghitung jumlah tag keseluruhan
print('')
print('Akurasi Viterbi :',JumBenar/TotalTag*100) #menghitung akurasi
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 3 10:05:29 2019
@author: zoescrewvala
"""
import os
import cartopy
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import numpy as np
import pandas as pd
import xarray as xr
import pygemfxns_gcmbiasadj as gcmbiasadj
#%% X-Y PLOT
#ds = Dataset(os.getcwd() + '/Output/simulations/CanESM2/R1_CanESM2_rcp26_c1_ba1_1sets_2000_2100.nc')
ds = xr.open_dataset(os.getcwd() + '/../Output/simulations/CanESM2/R1_CanESM2_rcp45_c1_ba1_1sets_1950_2000.nc')
#dens_ice = 917 # in kg/m^3
#mb = ds.loc[:,'mb_mwea']
#area = ds.loc[:,'area']
#mb_uncertainty = ds.loc[:,'mb_mwea_sigma']
# variables for vol over time plot
# variables
time = ds.variables['year'].values[:]
glac_runoff = ds.variables['runoff_glac_monthly'].values[:]
offglac_runoff = ds.variables['offglac_runoff_monthly'].values[:]
total_runoff = glac_runoff + offglac_runoff
total_runoff = total_runoff[:,:,0]
runoff_region = np.sum(total_runoff, axis=0)
runoff_init = np.sum(total_runoff[:,0])
runoff_norm = runoff_region/runoff_init
runoff_region_annual = gcmbiasadj.annual_sum_2darray(total_runoff)
runoff_region_annual = np.sum(runoff_region_annual, axis=0)
# X,Y values
x_values = time
y_values = runoff_region_annual
#y2_values = ds.loc[...]
# Set up your plot (and/or subplots)
fig, ax = plt.subplots(1, 1, squeeze=False, sharex=False, sharey=False, gridspec_kw = {'wspace':0.4, 'hspace':0.15})
# Plot
# zorder controls the order of the plots (higher zorder plots on top)
# label used to automatically generate legends (legends can be done manually for more control)
ax[0,0].plot(x_values, y_values, color='k', linewidth=1, zorder=2, label='plot1')
#ax[0,0].scatter(x_values, y_values, color='k', zorder=2, s=2)
#ax[0,0].scatter(x_values, y_values[7,:], color='m', zorder=2, s=2)
#ax[0,0].plot(x_values, y2_values, color='b', linewidth=1, zorder=2, label='plot2')
# Fill between
# fill between is useful for putting colors between plots (e.g., error bounds)
#ax[0,0].fill_between(x, y_low, y_high, facecolor='k', alpha=0.2, zorder=1)
# Text
# text can be used to manually add labels or to comment on plot
# transform=ax.transAxes means the x and y are between 0-1
ax[0,0].text(0.5, 1.03, 'Test glaciers', size=10, horizontalalignment='center', verticalalignment='top',
transform=ax[0,0].transAxes)
# X-label
ax[0,0].set_xlabel('Year', size=12)
#ax[0,0].set_xlim(time_values_annual[t1_idx:t2_idx].min(), time_values_annual[t1_idx:t2_idx].max())
#ax[0,0].xaxis.set_tick_params(labelsize=12)
#ax[0,0].xaxis.set_major_locator(plt.MultipleLocator(50))
#ax[0,0].xaxis.set_minor_locator(plt.MultipleLocator(10))
#ax[0,0].set_xticklabels(['2015','2050','2100'])
# Y-label
ax[0,0].set_ylabel('Runoff [m^3]', size=12)
#ax[0,0].set_ylim(0,1.1)
#ax[0,0].yaxis.set_major_locator(plt.MultipleLocator(0.2))
#ax[0,0].yaxis.set_minor_locator(plt.MultipleLocator(0.05))
# Tick parameters
# controls the plotting of the ticks
#ax[0,0].yaxis.set_ticks_position('both')
#ax[0,0].tick_params(axis='both', which='major', labelsize=12, direction='inout')
#ax[0,0].tick_params(axis='both', which='minor', labelsize=12, direction='inout')
# Example Legend
# Option 1: automatic based on labels
#ax[0,0].legend(loc=(0.05, 0.05), fontsize=10, labelspacing=0.25, handlelength=1, handletextpad=0.25, borderpad=0,
# frameon=False)
# Option 2: manually define legend
#leg_lines = []
#labels = ['plot1', 'plot2']
#label_colors = ['k', 'b']
#for nlabel, label in enumerate(labels):
# line = Line2D([0,1],[0,1], color=label_colors[nlabel], linewidth=1)
# leg_lines.append(line)
#ax[0,0].legend(leg_lines, labels, loc=(0.05,0.05), fontsize=10, labelspacing=0.25, handlelength=1,
# handletextpad=0.25, borderpad=0, frameon=False)
# Save figure
# figures can be saved in any format (.jpg, .png, .pdf, etc.)
fig.set_size_inches(4, 4)
figure_fp = os.getcwd() + '/../Output/plots/'
if os.path.exists(figure_fp) == False:
os.makedirs(figure_fp)
figure_fn = 'runoff_plot_sample.png'
fig.savefig(figure_fp + figure_fn, bbox_inches='tight', dpi=300)
|
# example_publisher.py
import pika, os, logging
logging.basicConfig()
# Parse CLODUAMQP_URL (fallback to localhost)
url = os.environ.get('CLOUDAMQP_URL', 'amqp://oivaegml:LFBj9SGoVYhWqr14OJRrBY8KoI-NxXie@prawn.rmq.cloudamqp.com/oivaegml/%2f')
params = pika.URLParameters(url)
params.socket_timeout = 5
connection = pika.BlockingConnection(params) # Connect to CloudAMQP
channel = connection.channel() # start a channel
channel.queue_declare(queue='pdfprocess') # Declare a queue
# send a message
channel.basic_publish(exchange='', routing_key='pdfprocess', body='User information')
print ("[x] Message sent to consumer")
connection.close() |
width = int(input())
height = int(input())
symbol = input()
for i in range(height):
for j in range(width):
if i == 0 or i == height - 1:
print(symbol, end="")
elif j == 0 or j == width - 1:
print(symbol, end="")
else:
print(" ", end="")
print()
|
from django.shortcuts import render, get_object_or_404
from django.http import JsonResponse, HttpResponseBadRequest, HttpResponse
from django.core import serializers
from .forms import IncidentForm, UpdateForm, DeveloperForm
from .models import Incident,Developers
# Create your views here.
def indexView(request):
form = IncidentForm()
update_form = UpdateForm()
developer_form = DeveloperForm()
incidents = Incident.objects.all()
developers = []
dev_info = Developers.objects.all()
for i in incidents:
developers += [i.developer.all()]
zip_incident = zip(incidents,developers)
return render(request, 'index.html', {"form": form, "incident": zip_incident, "update_form":update_form, "dev_info":dev_info, "developer_form": developer_form})
def postIncident(request):
if request.is_ajax and request.method == "POST":
form = IncidentForm(request.POST)
if form.is_valid():
instance = form.save()
dev_data = instance.developer.all()
ser_instance = serializers.serialize('json',[instance,])
ser_dev = serializers.serialize('json', dev_data)
return JsonResponse({"instance": ser_instance, 'dev_instance':ser_dev},status=200)
else:
return JsonResponse({"error": form.errors},status=400)
return JsonResponse({"error":"error"},status=400)
def checkName(request):
if request.is_ajax and request.method=="GET":
company_name = request.GET.get("company_name",None)
if Incident.objects.filter(company_name=company_name).exists():
return JsonResponse({"valid":False},status = 200)
else:
return JsonResponse({"valid":True},status = 200)
return JsonResponse({},status = 400)
def delete_post(request, test_id):
remv_post = Incident.objects.get(id = test_id)
if request.method=='DELETE':
remv_post.delete()
return JsonResponse({
'valid':True
})
return HttpResponseBadRequest('invalid')
def update_post(request, test_id):
if request.method == "PUT":
all_data = request.body.decode('utf-8').split('&')
dev_team = list(filter(None,all_data[3].split('=')[1].split('+')))
spc_name = all_data[1].split('=')[1].split('+')
spc_comp = all_data[0].split('=')[1].split('+')
#allows to add names with spaces
str_spc_name = ''
str_comp_name = ''
for val in spc_name:
str_spc_name += val + ' '
for dal in spc_comp:
str_comp_name += dal + ' '
clean_data = {
'company_name': str_comp_name,
'first_name': str_spc_name,
'last_name': all_data[2].split('=')[1],
}
form = UpdateForm(clean_data)
if form.is_valid():
obj, was_created = Incident.objects.update_or_create(id = test_id, defaults = clean_data)
obj.developer.clear()
if obj != None:
for i in dev_team:
dev_obj = Developers.objects.get(id = i)
obj.developer.add(dev_obj)
obj.save()
dev_data = obj.developer.all()
ser_dev = serializers.serialize('json', dev_data)
ser_instance = serializers.serialize('json',[obj])
return JsonResponse({"instance": ser_instance, 'dev_instance':ser_dev},status=200)
else:
return JsonResponse({"error": form.errors},status=400)
else:
return JsonResponse({"error":"error"},status=400)
#Requests for the developer model starts here
def create_developer(request):
if request.is_ajax and request.method == "POST":
form = DeveloperForm(request.POST)
if form.is_valid():
id_list = request.POST.getlist('incidents')
instance = form.save()
for id in id_list:
vari = Incident.objects.get(id = id)
instance.developer_teams.add(vari)
instance.save()
print(instance.developer_teams.all())
ser_instance = serializers.serialize('json',[instance,])
return JsonResponse({"instance": ser_instance},status=200)
else:
return JsonResponse({"error": form.errors},status=400)
return JsonResponse({"error":"error"},status=400)
def delete_developer(request, test_id):
remv_dev = Developers.objects.get(id = test_id)
if request.method=='DELETE':
remv_dev.delete()
return JsonResponse({
'valid':True
})
return HttpResponseBadRequest('invalid')
def checkTeamName(request):
if request.is_ajax and request.method=="GET":
team_name = request.GET.get("team_name",None)
if Developers.objects.filter(team_name=team_name).exists():
return JsonResponse({"valid":False},status = 200)
else:
return JsonResponse({"valid":True},status = 200)
return JsonResponse({},status = 400)
def update_developer(request, test_id):
if request.method == "PUT":
all_data = request.body.decode('utf-8').split('&')
spc_name = all_data[0].split('=')[1].split('+')[0]
spc_email = all_data[1].split('=')[1].split('%40')
#allows to add names with spaces
str_spc_email = spc_email[0]+'@'+spc_email[1]
clean_data = {
'team_name': spc_name,
'team_email': str_spc_email,
'team_number': all_data[2].split('=')[1],
}
form = DeveloperForm(clean_data)
if form.is_valid():
obj, was_created = Developers.objects.update_or_create(id = test_id, defaults = clean_data)
if obj != None:
obj.save()
ser_instance = serializers.serialize('json',[obj])
return JsonResponse({"dev_instance": ser_instance,},status=200)
else:
return JsonResponse({"error": form.errors},status=400)
else:
return JsonResponse({"error":"error"},status=400) |
class Morphs:
def __init__(self, morphs):
(surface, attr) = morphs.split('\t', 1) # 行を最初の\tで区切る
attr = attr.split(',')
self.surface = surface # 表層形
self.base = attr[5] # 基本形
self.pos = attr[0] # 品詞
self.pos1 = attr[1] # 品詞細分類
class Chunk:
def __init__(self, morphs, dst):
self.morphs = morphs
self.dst = dst
self.srcs = []
class Sentence():
def __init__(self, chunks):
self.chunks = chunks
for i, chunk in enumerate(self.chunks):
if chunk.dst != -1:
self.chunks[chunk.dst].srcs.append(i)
if __name__ == '__main__':
c = 0
sentences = []
morphs = []
chunks = []
fname = '../data/src/ai.ja.text.parsed'
with open(fname, 'r') as f:
# 一行ごと処理
for line in f:
# 係り受け解析の行
if line[0] == '*':
if morphs != []:
chunks.append(Chunk(morphs, dst))
morphs = []
dst = int(line.split()[2].rstrip('D'))
# 改行のみの行は無視
elif line == '\n':
continue
# 文末(EOS)の行
elif line != 'EOS\n':
morphs.append(Morphs(line))
else:
chunks.append(Chunk(morphs, dst))
sentences.append(Sentence(chunks))
morphs = []
chunks = []
dst = None
# 以上は41.pyと同様 ====================================================================================================================
with open('./ans47.txt', 'w') as f:
for sentence in sentences:
for chunk in sentence.chunks:
for morph in chunk.morphs:
if morph.pos == '動詞': # chunkの左から順番に動詞を探す
for i, src in enumerate(chunk.srcs): # 見つけた動詞の係り元chunkが「サ変接続名詞+を」で構成されるか確認
if len(sentence.chunks[src].morphs) == 2 and sentence.chunks[src].morphs[0].pos1 == 'サ変接続' and sentence.chunks[src].morphs[1].surface == 'を':
predicate = ''.join([sentence.chunks[src].morphs[0].surface, sentence.chunks[src].morphs[1].surface, morph.base])
cases = []
modi_chunks = []
for src_r in chunk.srcs[:i] + chunk.srcs[i + 1:]: # 残りの係り元chunkから助詞を探す
case = [morph.surface for morph in sentence.chunks[src_r].morphs if morph.pos == '助詞']
if len(case) > 0: # 助詞を含むchunkの場合は助詞と項を取得
cases = cases + case
modi_chunks.append(''.join(morph.surface for morph in sentence.chunks[src_r].morphs if morph.pos != '記号'))
if len(cases) > 0: # 助詞が1つ以上見つかった場合は重複除去後辞書順にソートし、項と合わせて出力
cases = sorted(list(set(cases)))
line = '{}\t{}\t{}'.format(predicate, ' '.join(cases), ' '.join(modi_chunks))
print(line, file=f)
break |
'''
Created on Dec 8, 2019
@author: slane
'''
inputFile = open("input.txt","r")
totalFuel = 0
for line in inputFile.readlines():
fuel = int(line)
fuel = int(fuel/3) - 2
fuelPlus = int(fuel/3) - 2
while (fuelPlus > 0):
totalFuel += fuelPlus
fuelPlus = int(fuelPlus/3) - 2
totalFuel += fuel
inputFile.close()
print ("Total Fuel Needed: ",totalFuel) |
def flattenList(x):
""" Flatten a list using recursion """
flattened = []
for i in x:
if type(i) not in (list, tuple, set):
flattened.append(i)
else:
flattened += flattenList(i)
return flattened
if __name__ == "__main__":
for test_case in ([1, 2, 3], [1, [2, [3]]], ['1', '2', 'python', (3, 4), (3, 4, [5, [6]])], []):
z = flattenList(test_case)
print(z)
|
"""
Use DMatrix api to train and test
Cross-validation hasn't support DMatrix yet, but will do soon.
"""
import os
import xlearn as xl
import pandas as pd
data_path = r"F:\for learn\Python\Repo_sources\xlearn\demo\regression\house_price"
train_file = os.path.join(data_path, "house_price_train.txt")
test_file = os.path.join(data_path, "house_price_test.txt")
output_model = os.path.join(data_path, "temp.model")
output_file = os.path.join(data_path, "output.txt")
param = {"task": "reg", "lr": 0.2, "lambda": 0.002, "metric": "mae"}
if __name__ == '__main__':
train_data = pd.read_csv(train_file, sep="\t", header=None)
test_data = pd.read_csv(test_file, sep="\t", header=None)
columns = train_data.columns
X_train = train_data[columns[1:]]
y_train = train_data[0]
X_test = test_data[columns[1:]]
y_test = test_data[0]
train_matrix = xl.DMatrix(X_train, y_train)
test_matrix = xl.DMatrix(X_test, y_test)
fm_model = xl.create_fm()
fm_model.setTrain(train_matrix)
fm_model.setValidate(test_matrix)
fm_model.fit(param, output_model)
fm_model.setTest(test_matrix)
fm_model.predict(output_model, output_file)
|
class Solution:
def twoOutOfThree(self, nums1, nums2, nums3):
num_set1 = set()
for num in nums1:
num_set1.add(num)
num_set2 = set()
for num in nums2:
num_set2.add(num)
num_set3 = set()
for num in nums3:
num_set3.add(num)
set1 = num_set1.intersection(num_set2)
set2 = num_set2.intersection(num_set3)
set3 = num_set1.intersection(num_set3)
return set1.union(set2).union(set3)
sol = Solution()
print(sol.twoOutOfThree([3,1],[2,3],[1,2])) |
import MetaTrader5 as mt5
# 显示有关MetaTrader 5程序包的数据
print("MetaTrader5 package author: ", mt5.__author__)
print("MetaTrader5 package version: ", mt5.__version__)
# 建立与MetaTrader 5程序端的连接
if not mt5.initialize():
print("initialize() failed, error code =", mt5.last_error())
quit()
# 获取所有交易品种
symbols = mt5.symbols_get()
print('Symbols: ', len(symbols))
count = 0
# 显示前五个交易品种
for s in symbols:
count += 1
print("{}. {}".format(count, s.name))
if count == 5: break
print()
# 获取名称中包含RU的交易品种
ru_symbols = mt5.symbols_get("*RU*")
print('len(*RU*): ', len(ru_symbols))
for s in ru_symbols:
print(s.name)
print()
# 获取名称中不包含USD、EUR、JPY和GBP的交易品种
group_symbols = mt5.symbols_get(group="*,!*USD*,!*EUR*,!*JPY*,!*GBP*")
print('len(*,!*USD*,!*EUR*,!*JPY*,!*GBP*):', len(group_symbols))
for s in group_symbols:
print(s.name, ":", s)
# 断开与MetaTrader 5程序端的连接
mt5.shutdown() |
from mxnet import ndarray as nd
from mxnet import autograd as ag
import random
# 批量获取数据
def load_data_iter():
# 索引
idx = list(range(sample_num))
# 乱序
random.shuffle(idx)
for i in range(0, sample_num, batch_size):
j = nd.array(idx[i: min(i + batch_size, sample_num)])
yield nd.take(X, j), nd.take(y, j)
# 定义模型,这里就是一个简单的线性回归
def net(input_data):
return nd.dot(input_data, learn_w) + learn_b
# 损失函数
def square_loss(yhat, real_y):
# 防止boradcast
return (yhat - real_y.reshape(shape=yhat.shape)) ** 2
# 梯度下降,原地操作
def SGD(params, lr):
for param in params:
param[:] = param - lr * param.grad
if __name__ == '__main__':
# 走多少轮训练集
epoch = 10
# 学习率
learning_rate = .001
# 样本数量
sample_num = 1000
# 样本维度
input_dim = 2
# 跑批数目
batch_size = 10
# 生成样本数据
X = nd.random_normal(0, 1, shape=(sample_num, input_dim))
# 真实权重
true_w = [2, -3.4]
# 真实偏置
true_b = 4.5
# 生成样本数据
y = true_w[0] * X[:, 0] + true_w[1] * X[:, 1] + true_b
# 给数据加噪声
y += .01 * nd.random_normal(shape=y.shape)
# 测试前10条
# print(X[:10],y[:10])
# 测试批量load数据
# for data, label in load_data_iter():
# print(data, label)
# break
# 权重和偏置参数初始化
learn_w = nd.random_normal(shape=(input_dim, 1))
learn_b = nd.zeros((1,))
params = [learn_w, learn_b]
# 权重参数梯度占位符
for param in params:
param.attach_grad()
# 正式开始训练
for e in range(epoch):
total_loss = 0
for data, label in load_data_iter():
with ag.record():
# 预测值
out = net(data)
loss = square_loss(out, label)
# 回传loss
loss.backward()
# 按照梯度更新权重参数
SGD(params, learning_rate)
total_loss += nd.sum(loss).asscalar()
print('Epoch %d , average loss is %f' % (e, total_loss / sample_num))
# print(learn_w )
|
# coding:utf-8
################
#练习3:数字与数学计算
################
# 题目:企业发放的奖金根据利润提成。利润(I)低于或等于10万元时,奖金可提10%;利润高于10万元,低于20万元时,
# 低于10万元的部分按10%提成,高于10万元的部分,可可提成7.5%;20万到40万之间时,高于20万元的部分,可提成
# 5%;40万到60万之间时高于40万元的部分,可提成3%;60万到100万之间时,高于60万元的部分,可提成1.5%,高
# 于100万元时,超过100万元的部分按1%提成,从键盘输入当月利润I,求应发放奖金总数?
# 程序分析:条件语句的运用,if else
# bonus1 10万元时奖金
# bonus2 20万元时奖金
# bouns3 40万元时奖金
# bouns4 60万元时奖金
# bouns5 100万元时奖金
# 奖金= 固定奖金 + 超出固定绩效部分绩效*提成比例
bonus1 = 100000*0.1
bonus2 = bonus1 + 100000 * 0.075
bonus3 = bonus2 + 200000 * 0.05
bonus4 = bonus3 + 200000 * 0.03
bonus5 = bonus4 + 400000 * 0.015
mon = int(input (" 请输入月利润:\n "))
if mon >= 1000000:
bonus = (mon - 1000000) * 0.01 + bonus5
else:
if mon >= 600000:
bonus = (mon - 600000) * 0.015 + bonus4
else:
if mon >= 400000:
bonus = (mon - 400000) * 0.03 + bonus3
else:
if mon >= 200000:
bonus = (mon - 200000) * 0.05 + bonus2
else:
if mon >= 100000:
bonus = (mon - 100000) * 0.075 + bonus1
else:
if mon < 100000:
bonus = mon * 0.1
print( bonus)
#input获取的数据是字符串,无法直接进行比较比较,需要先将字符串转换为整数
|
import re
import math
from uuid import UUID
from .exceptions import Invalid
def str_validation(val, key=None, min_length=None, max_length=None, regex=None, choices=None, cast=None, *args, **kwargs):
if cast:
try:
val = str(val)
except (ValueError, TypeError):
raise Invalid(f'key: "{key}" contains invalid item "{type(val).__name__}": unable to convert from type to str')
if not isinstance(val, str):
raise Invalid(f'key: "{key}" contains invalid item "{val}" with type "{type(val).__name__}": not of type string')
if min_length is not None and len(val) < min_length:
raise Invalid(f'key: "{key}" contains invalid item "{val}": less then minimal length of {min_length}')
if max_length is not None and len(val) > max_length:
raise Invalid(f'key: "{key}" contains invalid item "{val}": more then maximal length of {max_length}')
if regex is not None and not bool(re.match(regex, val)):
raise Invalid(f'key: "{key}" contains invalid item "{val}": does not adhere to regex rules {regex}')
if choices is not None and val not in choices:
raise Invalid(f'key: "{key}" contains invalid item "{val}": not in valid choices {choices}')
return val
def int_validation(val, key=None, min_amount=None, max_amount=None, cast=None, *args, **kwargs):
if cast:
try:
val = int(val)
except (ValueError, TypeError):
raise Invalid(f'key: "{key}" contains invalid item "{type(val).__name__}": unable to convert from type "{val}" to integer')
if not isinstance(val, int):
raise Invalid(f'key: "{key}" contains invalid item "{val}" with type "{type(val).__name__}": not of type int')
if min_amount is not None and val < min_amount:
raise Invalid(f'key: "{key}" contains invalid item "{val}": integer is less then {min_amount}')
if max_amount is not None and val > max_amount:
raise Invalid(f'key: "{key}" contains invalid item "{val}": integer is less then {max_amount}')
return val
def float_validation(val, key=None, min_amount=None, max_amount=None, cast=None, *args, **kwargs):
if cast:
try:
val = float(val)
except (ValueError, TypeError):
raise Invalid(f'key: "{key}" contains invalid item "{type(val).__name__}": unable to convert from type "{val}" to float')
if not isinstance(val, float) or math.isnan(val):
raise Invalid(f'key: "{key}" contains invalid item "{val}" with type "{type(val).__name__}": not of type float')
if min_amount is not None and val < min_amount:
raise Invalid(f'key: "{key}" contains invalid item "{val}": float is less then {min_amount}')
if max_amount is not None and val > max_amount:
raise Invalid(f'key: "{key}" contains invalid item "{val}": float is less then {max_amount}')
return val
def list_validation(val, key=None, min_amount=None, max_amount=None, *args, **kwargs):
if not isinstance(val, list):
raise Invalid(f'key: "{key}" contains invalid item "{val}" with type "{type(val).__name__}": not of type list')
if min_amount is not None and len(val) < min_amount:
raise Invalid(f'key: "{key}" contains invalid item "{val}": contains less then minimal amount of {min_amount}')
if max_amount is not None and len(val) > max_amount:
raise Invalid(f'{key} contains invalid item {val}: contains more then maximum amount of {max_amount}')
return val
def dict_validation(val, key=None, min_amount=None, max_amount=None, key_regex=None, *args, **kwargs):
if not isinstance(val, dict):
raise Invalid(f'"{key}": is not a dictionary')
if min_amount is not None and len(val) < min_amount:
raise Invalid(f'key: "{key}" contains invalid item "{val}": {len(val)} contains less then minimal amount of {min_amount}')
if max_amount is not None and len(val) > max_amount:
raise Invalid(f'key: "{key}" contains invalid item "{val}": {len(val)} contains more then maximum amount of {max_amount}')
if key_regex is not None and not all(bool(re.match(key_regex, str(i))) for i in val.keys()):
for i in val.keys():
if not re.match(key_regex, str(i)):
failed = str(i)
break
raise Invalid(f'{key}: has dictionary key "{failed}" that does not adhere to regex {key_regex}')
return val
def uuid_validation(val, key=None, *args, **kwargs):
try:
_ = UUID(val, version=4)
except (ValueError, AttributeError, TypeError):
raise Invalid('key: "{key}" contains invalid item "{val}": invalid UUID4')
return val
types = {
'int': int_validation,
'str': str_validation,
'float': float_validation,
'uuid': uuid_validation,
'list': list_validation,
'list_dicts': list_validation,
'dict': dict_validation,
'aso_array': dict_validation,
}
|
num = int(input("Digite um número inteiro: "))
resp = num // 10
dezena = resp % 10
print("O dígito das dezenas é", dezena) |
# -*- coding: utf-8 -*-
import random
import hashlib
def get_chars(length=None, mode=2):
"""
:param length: 生成字符的长度,如果不指定则为返回长度为1-20
:param mode: 0,小写,1,大写,2,混合
:return:
"""
if not length:
length = random.randint(1,20)
lc = 'abcdefghijklmnopqrstuvwxyz'
if mode == 0:
cs = random.choices(list(lc), k=length)
elif mode == 1:
cs = random.choices(list(lc.upper()), k=length)
else:
lc = lc + lc.upper()
cs = random.choices(list(lc), k=length)
return "".join(cs)
def hash_obj(obj):
if isinstance(obj, str):
obj = obj.encode("utf-8")
m = hashlib.sha256()
m.update(obj)
return m.hexdigest()
if __name__ == "__main__":
print(hash_obj("hello world"))
|
from gtts import gTTS
from playsound import playsound
import requests
import random
import webbrowser
def noticias():
url = ('https://newsapi.org/v2/top-headlines?'
'country=br&'
'apiKey=05d5ce74721c41698d58009213297db9')
req = requests.get(url, timeout=3000)
json = req.json()
# PERCORRENDO AS 10 PRIMEIRAS NOTÍCIAS
for c in range(10):
print('Notícia ' + str(c + 1))
print(json['articles'][c]['title'])
print(json['articles'][c]['description'])
noticias() |
count=0
entry ='Y'
while entry !='N' and entry!= 'n':
print(count)
entry = input('please enter "Y" to continue or "N" to quit:')
if entry == 'Y' or entry == 'y':
count+= 1
elif entry !='N'and entry != 'n':
print('"'+ entry + '" is not valid choice')
|
from bs4 import BeautifulSoup
import requests
from Mail import sendMail
from writeAndRead import saveData, checkData
data = 0
source = requests.get('https://www.worldometers.info/coronavirus/').text
soup = BeautifulSoup(source, 'html.parser')
table = soup.find('tbody')
i = 3
for row in table.find_all_next(string=True):
i = i + 1
if 'Denmark' in row:
i = 0
if i == 2:
data = row
break
data = int((str(data).replace(',', '')))
if data > checkData():
print('Sending email')
sendMail(data)
print('Saving data')
saveData(str(data))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-01-30 23:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0011_content_position'),
]
operations = [
migrations.AlterField(
model_name='basicitem',
name='cover',
field=models.ImageField(null=True, upload_to=''),
),
migrations.AlterField(
model_name='basicitem',
name='discus_domain',
field=models.CharField(blank=True, default='', max_length=256),
),
migrations.AlterField(
model_name='basicitem',
name='html_description',
field=models.TextField(blank=True, default=''),
),
migrations.AlterField(
model_name='basicitem',
name='html_favicon',
field=models.ImageField(null=True, upload_to=''),
),
migrations.AlterField(
model_name='basicitem',
name='html_keywords',
field=models.TextField(blank=True, default=''),
),
migrations.AlterField(
model_name='basicitem',
name='html_title',
field=models.TextField(blank=True, default=''),
),
migrations.AlterField(
model_name='basicitem',
name='profile',
field=models.ImageField(null=True, upload_to=''),
),
]
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# __author__ = "Zhangye"
# Date: 18-6-6
for i in range(7,0,-1):
print(i) |
from collections import Counter # importing the dict subclass Counter to count how many times
# each color appears and store the colors and their counts in a dictionary as keys and values
def matchingSocks(n, ar):
dictionary = Counter(ar) # using Counter which will store the colors as keys and their counts as values in a dictionary
count = dictionary.values() # count is a list of the number of times each color appears in the array
pairs = 0
for x in count: # let the value in the dictionary be x
pairs = pairs + x // 2 # dividing the count values by 2 to get the number of pairs, whole numbers only.
return pairs
n1 = 30
array1 = (30,1,9,94,5,9,7,5,7,29,5,7,9,1,2,5,8,22,10,9,9,15,2,9,10,10,2,7,3) # first array with 30 elements
print(matchingSocks(n1,array1))
n2 = 80
array2 = (8,19,9,15,10,11,16,19,7,8,3,13,17,17,8,4,9,14,15,11,12,20,20,\
7,12,6,18,9,20,9,15,13,16,4,2,10,7,16,14,5,18,1,13,7,11,9,8,9,2,\
10,8,3,11,9,2,7,17,13,19,2,19,6,7,19,8,20,10,18,15,3,16,19,1,9,13,17,14,4,6,15) # first array with 80 elements
print(matchingSocks(n2,array2))
|
# Generated by Django 3.2.4 on 2021-10-17 22:56
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0036_alter_guestlog_special_note'),
]
operations = [
migrations.AlterField(
model_name='guestlog',
name='date_logged',
field=models.DateTimeField(blank=True, default=datetime.datetime.now),
),
]
|
#!/usr/bin/env python
# coding=utf-8
import unittest
from epherousa.searchers.ExploitDB import ExploitDB
from epherousa.searchers.PacketStorm import PacketStorm
from epherousa.searchers.SecurityFocus import SecurityFocus
from epherousa.test.base_test import BaseTest
class TestSearcherCVE(BaseTest):
def setUp(self):
# Dirty COW CVE
self.cve = 'CVE-2016-5195'
# just for exploitdb
self.exploitdb_cve = 'CVE-2016-5840'
self.limit = 5
def test_exploitdb(self):
exploitdb = ExploitDB(_cve=self.exploitdb_cve, _limit=self.limit)
exploitdb.find_exploits()
self.assertGreater(len(exploitdb.exploits), 0,
'Exploit-db could not find any {} exploit'.format(self.exploitdb_cve))
def test_packetstorm(self):
packetstorm = PacketStorm(_cve=self.cve, _limit=self.limit)
packetstorm.find_exploits()
self.assertGreater(len(packetstorm.exploits), 0, 'PacketStorm could not find any Dirty COW exploit')
# def test_zerodaytoday(self):
# zerodaytoday = ZeroDayToday(_cve=self.cve, _limit=self.limit)
# zerodaytoday.find_exploits()
# self.assertGreater(len(zerodaytoday.exploits), 0, '0day.today could not find any Dirty COW exploit')
def test_securityfocus(self):
securityfocus = SecurityFocus(_cve=self.cve, _limit=self.limit)
securityfocus.find_exploits()
self.assertGreater(len(securityfocus.exploits), 0, 'SecurityFocus could not find any Dirty COW exploit')
class TestSearcherPhrase(unittest.TestCase):
def setUp(self):
self.phrase = 'Dirty COW'
self.limit = 5
def test_exploitdb(self):
exploitdb = ExploitDB(_search_string=self.phrase, _limit=self.limit)
exploitdb.find_exploits()
self.assertGreater(len(exploitdb.exploits), 0, 'Exploit-db could not find any Dirty COW exploit')
def test_packetstorm(self):
packetstorm = PacketStorm(_search_string=self.phrase, _limit=self.limit)
packetstorm.find_exploits()
self.assertGreater(len(packetstorm.exploits), 0, 'PacketStorm could not find any Dirty COW exploit')
# def test_zerodaytoday(self):
# zerodaytoday = ZeroDayToday(_search_string=self.phrase, _limit=self.limit)
# zerodaytoday.find_exploits()
# self.assertGreater(len(zerodaytoday.exploits), 0, '0day.today could not find any Dirty COW exploit')
def test_securityfocus(self):
securityfocus = SecurityFocus(_search_string=self.phrase, _limit=self.limit)
securityfocus.find_exploits()
self.assertGreater(len(securityfocus.exploits), 0, 'SecurityFocus could not find any Dirty COW exploit')
|
class Employee(object):
def __init__(self, name, job_title, start_date):
self.name = name
self.job_title = job_title
self.start_date = start_date
def set_name(self, name):
self.name = name
def get_name(self):
return self.name
class Company(object):
"""This represents a company in which people work"""
def __init__(self, name, title, start_date):
self.name = name
self.title = title
self.start_date = start_date
self.employees = set()
def get_name(self):
"""Returns the name of the company"""
return self.name
# Add the remaining methods to fill the requirements above
def __repr__(self):
employee_list = ''
for employee in self.employees:
employee_list += employee.name + ', '
return ('%s, the %s, founded on %s employs %s' % (repr(self.name), repr(self.title), repr(self.start_date), repr(employee_list)))
ThisCo = Company('ThisCo', 'Company of zees', 'apr 1 2017')
me = Employee('Billing', 'Billing', 'apr 1 2017')
you = Employee('Shipping', 'Shipping', 'apr 1 2017')
we = Employee('Accounting', 'Accounting', 'apr 1 2017')
ThisCo.employees.add(me)
ThisCo.employees.add(you)
ThisCo.employees.add(we)
print(ThisCo) |
#-*- coding: UTF-8 -*-
import re
from flask import render_template, request, redirect, url_for, json
from xichuangzhu import app, db
from xichuangzhu.models.work_model import Work, WorkImage, WorkReview
from xichuangzhu.models.author_model import Author
from xichuangzhu.models.dynasty_model import Dynasty
# page home
#--------------------------------------------------
@app.route('/')
def index():
works = Work.query.order_by(db.func.rand()).limit(4)
work_images = WorkImage.query.order_by(WorkImage.create_time.desc()).limit(18)
work_reviews = WorkReview.query.filter(WorkReview.is_publish == True).order_by(WorkReview.create_time.desc()).limit(4)
authors = Author.query.order_by(db.func.rand()).limit(5)
dynasties = Dynasty.query.order_by(Dynasty.start_year)
return render_template('site/index.html', works=works, work_images=work_images, work_reviews=work_reviews, authors=authors, dynasties=dynasties)
# json - gene works data for index page
@app.route('/index_works', methods=['POST'])
def index_works():
works = Work.query.order_by(db.func.rand()).limit(4)
return render_template('widget/index_works.widget', works=works)
# page about
#--------------------------------------------------
@app.route('/about')
def about():
return render_template('site/about.html')
# page 404
#--------------------------------------------------
@app.errorhandler(404)
def page_404(error):
return render_template('site/404.html'), 404
# page 500
#--------------------------------------------------
@app.errorhandler(500)
def page_500(error):
return render_template('site/500.html'), 500 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Data Enlighten Technology (Beijing) Co.,Ltd
__author__ = 'ada'
import Lib.Logger.log4py as log
class Pinyin():
def __init__(self, data_path='./Mandarin.dat'):
self.dict = {}
for line in open(data_path):
k, v = line.split('\t')
self.dict[k] = v
self.splitter = ''
def get_pinyin(self, chars=u"输入"):
result = []
try:
for char in chars:
key = "%X" % ord(char)
try:
result.append(self.dict[key].split(" ")[0].strip()[:-1].lower())
except:
result.append(char)
except Exception as e:
log.e(e)
return self.splitter.join(result)
def get_initials(self, char=u'你好'):
try:
return self.dict["%X" % ord(char)].split(" ")[0][0]
except:
return char
if __name__ == '__main__':
pinyintest = Pinyin()
result = pinyintest.get_pinyin("测试2a论2台a.!")
print(result) |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import math
import numpy
def distance(x1,y1,x2,y2) :
return math.sqrt((x1-x2)**2 + (y1-y2)**2)
def KNN(trainData, type, textData, K) :
s = []
T = {}
for i in range(len(trainData)):
s.append(distance(trainData[i][0],trainData[i][1],textData[0],textData[1]))
arg = numpy.argsort(s)
for i in range(K):
T[type[arg[i]]] = 0
for i in range(K):
T[type[arg[i]]] += 1
max = 0
maxType = None
for type, index in T.items():
if (max < index):
max = index
maxType = type
return type
if __name__ == "__main__" :
trainData = [
[1,1],
[1,0],
[0,0],
[10,2],
[10,1],
]
type = ['A','A','A','B','B']
textData = [2,2]
K = 2
print(KNN(trainData,type,textData,K)) |
#<ImportSpecificModules>
import ShareYourSystem as SYS
import numpy as np
import scipy.stats
from tables import *
import time
import operator
import os
#</ImportSpecificModules>
#<DefineLocals>
HookStr="Mul"
#</DefineLocals>
#<DefineClass>
class DistanceClass(SYS.ObjectsClass):
#<DefineHookMethods>
def initAfter(self):
#<DefineSpecificDo>
self.IntsList=[1,4,3]
self.PowerFloat=0.5
self.SquaredIntsList=[1,16,3]
self.UnitsInt=3
self.DistanceFloat=np.sqrt(sum(self.SquaredIntsList))
#</DefineSpecificDo>
#Definition the features
self['App_Model_ParameterizingDict']={
'ColumningTuplesList':
[
#ColumnStr #Col
('PowerFloat', Float64Col()),
('IntsList', (Int64Col,'UnitsInt'))
],
'IsFeaturingBool':True,
'ScanningTuplesList':
[
('IntsList',[[1,2,3],[4,5]])
]
}
#Definition the outputs
self['App_Model_ResultingDict']={
'ColumningTuplesList':
[
#ColumnStr #Col
('SquaredIntsList', (Int64Col,'UnitsInt')),
('DistanceFloat', Float64Col()),
('IntsList', (Int64Col,'UnitsInt'))
],
'JoiningTuple':("","Parameter")
}
def outputAfter(self,**_LocalOutputingVariablesDict):
#set the SquaredIntsList
self.SquaredIntsList=map(lambda __Int:__Int**2,self.IntsList)
#set the SumInt
self.DistanceFloat=np.power(sum(self.SquaredIntsList),self.PowerFloat)
#</DefineHookMethods>
#</DefineTriggeringHookMethods>
def bindIntsListAfter(self):
#Bind with UnitsInt setting
self.UnitsInt=len(self.IntsList)
#</DefineTriggeringHookMethods>
#</DefineClass>
#<DefineAttestingFunctions>
def attest_insert():
#Insert the default output
Distance=SYS.DistanceClass(
).update(
[
('IntsList',[4,5]),
('PowerFloat',0.5)
]
).insert('Result'
).update(
[
('IntsList',[4,5]),
('PowerFloat',1.)
]
).insert(
).update(
[
('IntsList',[4,5]),
('PowerFloat',2.)
]
).insert(
).update(
[
('IntsList',[1,2,3]),
('PowerFloat',0.5)
]
).insert(
).update(
[
('IntsList',[4,6]),
('PowerFloat',1.)
]
).insert(
).update(
[
('IntsList',[1,2,3]),
('PowerFloat',1.)
]
).insert(
).update(
[
('IntsList',[0,1]),
('PowerFloat',0.5)
]
).insert(
).hdfclose()
#Return the object and the h5py
return "\n\n\n\n"+SYS.represent(
Distance
)+'\n\n\n'+SYS.represent(
os.popen('/usr/local/bin/h5ls -dlr '+Distance.HdformatingPathStr).read()
)
def attest_merge():
#Retrieve
Distance=SYS.DistanceClass(
).merge('Result',
[
('UnitsInt',(operator.eq,2)),
]
).hdfclose()
#Return the object and the h5py
return "\n\n\n\n"+SYS.represent(
Distance
)
def attest_retrieve():
#Retrieve
Distance=SYS.DistanceClass(
).update(
[
('/App_Model_ResultingDict/MergingTuplesList',
[
('UnitsInt',(operator.eq,2))
]
),
('/App_Model_ResultingDict/RetrievingIndexesListsList',[
('DistanceFloat',(operator.gt,30.)),
('__IntsList',(SYS.getIsEqualBool,[4,5])),
('ParameterizedJoinedList',(SYS.getIsEqualBool,[0,1]))
])
]
).retrieve('Result'
).hdfclose()
#Return the object and the h5py
return "\n\n\n\n"+SYS.represent(
Distance
)
def attest_recover():
#Recover
Distance=SYS.DistanceClass(
).update(
[
('/App_Model_ResultingDict/MergingTuplesList',
[
('UnitsInt',(operator.eq,2))
]
),
('/App_Model_ResultingDict/RetrievingIndexesListsList',[
('DistanceFloat',(operator.gt,30.)),
#('__IntsList',(SYS.getIsEqualBool,[4,5])),
#('ParameterizedJoinedList',(SYS.getIsEqualBool,[0,1]))
]),
('/App_Model_ParameterizingDict/RetrievingIndexesListsList',[
('IntsList',(SYS.getIsEqualBool,[4,5])),
])
]
).recover('Result'
).hdfclose()
#Return the object and the h5py
return "\n\n\n\n"+SYS.represent(
Distance
)
def attest_scan():
#Scan
Distance=SYS.DistanceClass(
).scan('Result'
).hdfclose()
#Return the object and the h5py
return "\n\n\n\n"+SYS.represent(
Distance
)+'\n\n\n'+SYS.represent(
os.popen('/usr/local/bin/h5ls -dlr '+Distance.HdformatingPathStr).read()
)
#</DefineAttestingFunctions>
|
from django import forms
from geofr.constants import REGIONS, DEPARTMENTS
class RegionField(forms.ChoiceField):
"""Form field to select a single french region."""
def __init__(self, *args, **kwargs):
kwargs["choices"] = REGIONS
super().__init__(**kwargs)
class DepartmentField(forms.ChoiceField):
"""Form field to select a single french department."""
def __init__(self, *args, **kwargs):
kwargs["choices"] = DEPARTMENTS
super().__init__(**kwargs)
|
import torch
import time
from apex import amp
import os
import sys
import math
from utils import options, utils, criterions
from utils.ddp_trainer import DDPTrainer
from utils.meters import StopwatchMeter, TimeMeter
import data
from data import data_utils, load_dataset_splits
from models import build_model
import torch.nn.functional as F
import numpy as np
from torch.utils.tensorboard import SummaryWriter
MAX = 2147483647
def _gen_seeds(shape):
return np.random.uniform(1, MAX, size=shape).astype(np.float32)
seed_shape = (32 * 1024 * 12, )
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def main(args):
print(args)
if args.platform == "gpu":
device = torch.device('cuda:' + args.device_id)
device_func = torch.cuda
elif args.platform == "npu":
device = torch.device('npu:' + args.device_id)
device_func = torch.npu
# import any Python files in the optim/ directory
# print(f"{os.path.dirname(__file__)}")
# for file in os.listdir(os.path.dirname(__file__) + "/optim"):
# if file.endswith('.py') and not file.startswith('_'):
# module = file[:file.find('.py')]
# importlib.import_module('optim.' + module)
else:
device = torch.device('cpu')
print("Running on device {}".format(device))
args.device = device
if args.max_tokens is None:
args.max_tokens = 6000
torch.manual_seed(args.seed)
src_dict, tgt_dict = data_utils.load_dictionaries(args)
datasets = load_dataset_splits(args, ['train', 'valid', 'test'], src_dict, tgt_dict)
seed = _gen_seeds(seed_shape)
seed = torch.from_numpy(seed)
seed = seed.to(device)
model = build_model(args, seed=seed)
model = model.to(device)
print('| num. model params: {}'.format(sum(p.numel() for p in model.parameters())))
criterion = criterions.LabelSmoothedCrossEntropyCriterion(args).to(device)
# optimizer = optim.build_optimizer(args, model.parameters())
# Build trainer
# trainer = DDPTrainer(args, model)
print('| model {}, criterion {}'.format(args.arch, criterion.__class__.__name__))
print('| training on {} devices'.format(args.distributed_world_size))
print('| max sentences per NPU = {}'.format(args.max_sentences))
optimizer = torch.optim.Adam(model.parameters(), betas=(0.9, 0.98), eps=1e-9)
if args.amp:
model, optimizer = amp.initialize(model, optimizer, opt_level=args.amp_level, loss_scale=8, verbosity=0)
writer = SummaryWriter(args.save_dir)
epoch_itr = data.EpochBatchIterator(
dataset=datasets[args.train_subset],
max_tokens=args.max_tokens,
max_sentences=args.max_sentences_valid,
max_positions=args.max_positions,
ignore_invalid_inputs=True,
required_batch_size_multiple=8,
seed=args.seed,
num_shards=1,
shard_id=0,
max_positions_num=96,
)
# Train until the learning rate gets too small or model reaches target score
max_epoch = args.max_epoch or math.inf
max_update = args.max_update or math.inf
train_meter = StopwatchMeter()
train_meter.start()
valid_losses = [None]
valid_subsets = args.valid_subset.split(',')
run_summary = {'loss': float('inf'),
'val_loss': float('inf'),
'speed': 0,
'accuracy': 0}
# max_update
while epoch_itr.epoch < max_epoch:
# train for one epoch
train(args, datasets, epoch_itr, model, criterion, optimizer)
if epoch_itr.epoch % args.validate_interval == 0:
valid_losses = validate(args, datasets, epoch_itr, model, criterion, optimizer)
writer.add_scalar('loss/val', valid_losses[0], epoch_itr.epoch)
writer.close()
train_meter.stop()
print('| done training in {:.1f} seconds'.format(train_meter.sum))
def train(args, datasets, epoch_itr, model, criterion, optimizer):
"""Train the model for one epoch."""
model.train()
optimizer.zero_grad()
itr = epoch_itr.next_epoch_itr()
# update parameters every N batches
if epoch_itr.epoch <= len(args.update_freq):
update_freq = args.update_freq[epoch_itr.epoch - 1]
else:
update_freq = args.update_freq[-1]
num_batches = len(epoch_itr)
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
sentence_s = AverageMeter('Sentence/s', ':6.3f')
losses = AverageMeter('Loss', ':.4f')
progress = ProgressMeter(int(num_batches),
[batch_time, data_time, sentence_s, losses],
prefix = "Epoch: [{}]".format(epoch_itr.epoch))
print("Update Frequence is :", str(update_freq))
first_valid = args.valid_subset.split(',')[0]
max_update = args.max_update or math.inf
end = time.time()
for i, sample in enumerate(itr):
data_time.update(time.time() - end)
# move sample to device
sample = utils.move_to_device(args, sample)
# calculate loss and sample size
# src_tokens, src_lengths, prev_output_tokens
# npu only accept int32 tensors
if args.platform == "npu":
sample['net_input']['src_tokens'] = sample['net_input']['src_tokens'].to(torch.int32)
sample['net_input']['prev_output_tokens'] = sample['net_input']['prev_output_tokens'].to(torch.int32)
sample['target'] = sample['target'].to(torch.int32)
elif args.platform == "gpu":
sample['net_input']['src_tokens'] = sample['net_input']['src_tokens'].to(torch.int64)
sample['net_input']['prev_output_tokens'] = sample['net_input']['prev_output_tokens'].to(torch.int64)
sample['target'] = sample['target'].to(torch.int64)
logits, _ = model(sample['net_input']['src_tokens'], sample['net_input']['src_lengths'], sample['net_input']['prev_output_tokens'])
target = sample['target']
probs = F.log_softmax(logits, dim=-1, dtype=torch.float32)
loss = criterion(probs, target)
losses.update(loss.item() / sample['ntokens'] / math.log(2))
if args.amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
if i % 10 == 0:
progress.display(i)
batch_time.update(time.time() - end)
end = time.time()
print("End of epoch, batch_size:", args.max_sentences, 'Time: {:.3f}'.format(batch_time.avg), ' Sentence/s@all {:.3f}'.format(
args.max_sentences / batch_time.avg))
def validate(args, datasets, model, criterion, optimizer):
"""Evaluate the model on the validation set(s) and return the losses."""
model.eval()
# Initialize data iterator
itr = data.EpochBatchIterator(
dataset=datasets[subset],
max_tokens=args.max_tokens,
max_sentences=args.max_sentences_valid,
max_positions=args.max_positions,
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=8,
seed=args.seed,
num_shards=1,
shard_id=0,
max_positions_num=1024,
).next_epoch_itr(shuffle=False)
num_batches = len(itr)
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
progress = ProgressMeter(
int(num_batches),
[batch_time, losses],
prefix='Test: ')
for i, sample in enumerate(itr):
# move sample to device
sample = utils.move_to_device(args, sample)
with torch.no_grad():
if args.platform == "npu":
sample['net_input']['src_tokens'] = sample['net_input']['src_tokens'].to(torch.int32)
sample['net_input']['prev_output_tokens'] = sample['net_input']['prev_output_tokens'].to(torch.int32)
sample['target'] = sample['target'].to(torch.int32)
elif args.platform == "gpu":
sample['net_input']['src_tokens'] = sample['net_input']['src_tokens'].to(torch.int64)
sample['net_input']['prev_output_tokens'] = sample['net_input']['prev_output_tokens'].to(torch.int64)
sample['target'] = sample['target'].to(torch.int64)
logits, _ = model(sample['net_input']['src_tokens'], sample['net_input']['src_lengths'], sample['net_input']['prev_output_tokens'])
target = sample['target']
probs = F.log_softmax(logits, dim=-1, dtype=torch.float32)
loss = criterion(probs, target)
losses.update(loss.item() / sample['ntokens'] / math.log(2))
if i % 10 == 0:
progress.display(i)
print(f'Validation loss: {losses.avg}')
return losses.avg
if __name__ == '__main__':
parser = options.get_training_parser()
ARGS = options.parse_args_and_arch(parser)
main(ARGS) |
from App.Mysql.MysqlTool import MysqlTool
class Error(MysqlTool):
"""
错误信息表
"""
_table = 'db_error'
|
class Node:
def __init__(self,dataval):
self.dataval=dataval
self.nextval=None
class Linkedlist:
def __init__(self):
self.headval=None
def printlist(self):
printval=self.headval
while printval!=None:
print(printval.dataval)
printval=printval.nextval
def scarch(self,k):
p=self.headval
if p!=None:
while p.nextval!=None:
if p.dataval==k:
return True
p=p.nextval
if p.dataval==k:
return True
return False
list=Linkedlist()
list.headval=Node("sat")
e1=Node("Sun")
e2=Node("Mon")
list.headval.nextval=e1
e1.nextval=e2
list.printlist()
print("Given node is",list.scarch("sat"))
|
from functions.microphone import listen_microphone
from kym import Kym
from sentence import Sentence
import pandas as pd
import sys
import os
training_data_path = "C:/Users/vitor/Documents/KYM/src/database/traindata.csv"
test_data_path = "C:/Users/vitor/Documents/KYM/src/test.txt"
kym = Kym(data_path=training_data_path)
kym.init()
if __name__ == "__main__":
resp = None
for test in pd.read_csv(test_data_path)["sentences"]:
# sentence = listen_microphone()
# sentence = Sentence(sentence)
resp = Sentence([test], kym)
print(test, resp.label) |
import sys
import math
#import mathutils
import random
import datetime
# Direction Map 1D
# =======
# 2 . 1
# =======
def nnX1D( i ):
if i == 1:
result = 1
elif i == 2:
result = -1
else:
result = 0
return result
# Direction Map 2D
# =======
# 2
# 3 . 1
# 4
# =======
def nnX2D( i ):
if i == 1:
result = 1
elif i == 3: #was 3
result = -1
else:
result = 0
return result
def nnY2D( i ):
if i == 2:
result = 1
elif i == 4:
result = -1
else:
result = 0
return result
# Direction Map 3D
# =======
# 2
# 3 . 1 5 = +Z, 6 = -Z
# 4
# =======
def nnX3D( i ):
if i == 1:
result = 1
elif i == 2: #was 3
result = -1
else:
result = 0
return result
def nnY3D( i ):
if i == 2:
result = 1
elif i == 4:
result = -1
else:
result = 0
return result
def nnZ3D( i ):
if i == 5:
result = 1
elif i == 6:
result = -1
else:
result = 0
return result
# Direction Map 4D
# =======
# 2
# 3 . 1 5 = +Z, 6 = -Z
# 4 7 = +W, 8 = +W
# =======
def nnX4D( i ):
if i == 1:
result = 1
elif i == 2: #was 3
result = -1
else:
result = 0
return result
def nnY4D( i ):
if i == 2:
result = 1
elif i == 4:
result = -1
else:
result = 0
return result
def nnZ4D( i ):
if i == 5:
result = 1
elif i == 6:
result = -1
else:
result = 0
return result
def nnW4D( i ):
if i == 7:
result = 1
elif i == 8:
result = -1
else:
result = 0
return result
#
# 0 - Wall
# 1 - Open
# 2 - Visited
#
def create3DGrid( porosity, size ):
grid = []
for i in range( size ):
grid.append([])
for j in range( size ):
grid[i].append([])
for k in range( size ):
grid[i][j].append( 1 )
for i in range(int(size**3 * porosity)):
x = random.randint(0, size-1)
y = random.randint(0, size-1)
z = random.randint(0, size-1)
grid[x][y][z] = 0
return grid
#
# 0 - Wall
# 1 - Open
# 2 - Visited
#
def create2DGrid( size ):
grid = []
for i in range( size ):
grid.append([])
for j in range( size ):
grid[i].append([])
for k in range( size ):
grid[i][j].append( 1 )
return grid
#
# simpleRandomWalk1D
#
def simpleRandomWalk1D( steps, repeat ):
row = 0
col = 0
stepGrid = []
sumX = []
originFlag = False
originCount = 0
# Initialize Grid
for i in range( steps ):
stepGrid.append([])
sumX.append(0)
#r.append([])
for j in range( repeat ):
stepGrid[i].append(0)
#r[i].append(0)
while col < repeat:
x = 0
xDir = 0
prevX = 0
while row < steps:
direction = random.randint(1, 2)
xDir = nnX1D( direction )
prevX = x
x += xDir
stepGrid[row][col] = x
sumX[row] += x
#r[row][col] = math.sqrt( x**2 )
if x == 0:
originFlag = True
row += 1
col += 1
row = 0
if originFlag:
originCount += 1
originFlag = False
# output
row = 0
col = 0
fileName = './output/1D/randWalk_1D_st' + str(steps) + '_sa' + str(repeat) + '.dat'
fileNameP0_X = './output/1D/randWalk_1D_st' + str(steps) + '_sa' + str(repeat) + '_P0_X.dat'
outFile = open(fileName, 'w')
# Print out walks
while row < steps:
outFile.write( "%6d" % ( row ) )
while col < repeat:
outFile.write( "%6d" % ( stepGrid[row][col] ) )
col += 1
avgX = ( sumX[row] / repeat )
outFile.write( "%8.2f" % ( avgX ) ) # Average X
outFile.write( "%8.2f" % ( avgX**2 ) ) # Average X Squared
outFile.write( "\n" )
col = 0
row += 1
outFile.close()
probFile = open(fileNameP0_X, 'w')
probFile.write( "%8.2f\n" % (originCount / repeat) )
probFile.close()
#### END OF 1D
#
# simpleRandomWalk2D
#
def simpleRandomWalk2D( steps, repeat ):
row = 0
col = 0
stepGridX = []
stepGridY = []
sumX = []
sumY = []
originFlag = False
originCount = 0
# Initialize Grid
for i in range( steps ):
stepGridX.append([])
stepGridY.append([])
sumX.append(0)
sumY.append(0)
#r.append([])
for j in range( repeat ):
stepGridX[i].append(0)
stepGridY[i].append(0)
while col < repeat:
x = 0
y = 0
xDir = 0
yDir = 0
prevX = 0
prevY = 0
while row < steps:
direction = random.randint(1, 4)
xDir = nnX2D( direction )
yDir = nnY2D( direction )
prevX = x
prevY = y
x += xDir
y += yDir
stepGridX[row][col] = x
stepGridY[row][col] = y
sumX[row] += x
sumY[row] += y
if x == 0 and y == 0:
originFlag = True
row += 1
col += 1
row = 0
if originFlag:
originCount += 1
originFlag = False
# output
row = 0
col = 0
fileName_X = './output/2D/randWalk_2D_st' + str(steps) + '_sa' + str(repeat) + '_X.dat'
fileName_Y = './output/2D/randWalk_2D_st' + str(steps) + '_sa' + str(repeat) + '_Y.dat'
fileName_P0 = './output/2D/randWalk_2D_st' + str(steps) + '_sa' + str(repeat) + '_P0.dat'
outFile_X = open(fileName_X, 'w')
outFile_Y = open(fileName_Y, 'w')
# Print out walks
while row < steps:
outFile_X.write( "%6d" % ( row ) )
outFile_Y.write( "%6d" % ( row ) )
while col < repeat:
outFile_X.write( "%6d" % ( stepGridX[row][col] ) )
outFile_Y.write( "%6d" % ( stepGridY[row][col] ) )
col += 1
avgX = ( sumX[row] / repeat )
avgY = ( sumY[row] / repeat )
outFile_X.write( "%8.2f" % ( avgX ) ) # Average X
outFile_X.write( "%8.2f" % ( avgX**2 ) ) # Average X Squared
outFile_X.write( "\n" )
outFile_Y.write( "%8.2f" % ( avgY ) ) # Average Y
outFile_Y.write( "%8.2f" % ( avgY**2 ) ) # Average Y Squared
outFile_Y.write( "\n" )
col = 0
row += 1
outFile_X.close()
outFile_Y.close()
probFile = open(fileName_P0, 'w')
probFile.write( "%8.2f\n" % (originCount / repeat) )
probFile.close()
#### END OF 2D
#
# simpleRandomWalk3D
#
def simpleRandomWalk3D( steps, repeat ):
row = 0
col = 0
stepGridX = []
stepGridY = []
stepGridZ = []
sumX = []
sumY = []
sumZ = []
originFlag = False
originCount = 0
# Initialize Grid
for i in range( steps ):
stepGridX.append([])
stepGridY.append([])
stepGridZ.append([])
sumX.append(0)
sumY.append(0)
sumZ.append(0)
#r.append([])
for j in range( repeat ):
stepGridX[i].append(0)
stepGridY[i].append(0)
stepGridZ[i].append(0)
while col < repeat:
x = 0
y = 0
z = 0
xDir = 0
yDir = 0
zDir = 0
prevX = 0
prevY = 0
prevZ = 0
while row < steps:
direction = random.randint(1, 6)
xDir = nnX3D( direction )
yDir = nnY3D( direction )
zDir = nnZ3D( direction )
prevX = x
prevY = y
prevZ = z
x += xDir
y += yDir
z += zDir
stepGridX[row][col] = x
stepGridY[row][col] = y
stepGridZ[row][col] = z
sumX[row] += x
sumY[row] += y
sumZ[row] += z
if x == 0 and y == 0 and z == 0:
originFlag = True
row += 1
col += 1
row = 0
if originFlag:
originCount += 1
originFlag = False
# output
row = 0
col = 0
fileName_X = './output/3D/randWalk_3D_st' + str(steps) + '_sa' + str(repeat) + '_X.dat'
fileName_Y = './output/3D/randWalk_3D_st' + str(steps) + '_sa' + str(repeat) + '_Y.dat'
fileName_Z = './output/3D/randWalk_3D_st' + str(steps) + '_sa' + str(repeat) + '_Z.dat'
fileName_P0 = './output/3D/randWalk_3D_st' + str(steps) + '_sa' + str(repeat) + '_P0.dat'
outFile_X = open(fileName_X, 'w')
outFile_Y = open(fileName_Y, 'w')
outFile_Z = open(fileName_Z, 'w')
# Print out walks
while row < steps:
outFile_X.write( "%6d" % ( row ) )
outFile_Y.write( "%6d" % ( row ) )
outFile_Z.write( "%6d" % ( row ) )
while col < repeat:
outFile_X.write( "%6d" % ( stepGridX[row][col] ) )
outFile_Y.write( "%6d" % ( stepGridY[row][col] ) )
outFile_Z.write( "%6d" % ( stepGridZ[row][col] ) )
col += 1
avgX = ( sumX[row] / repeat )
avgY = ( sumY[row] / repeat )
avgZ = ( sumZ[row] / repeat )
outFile_X.write( "%8.2f" % ( avgX ) ) # Average X
outFile_X.write( "%8.2f" % ( avgX**2 ) ) # Average X Squared
outFile_X.write( "\n" )
outFile_Y.write( "%8.2f" % ( avgY ) ) # Average Y
outFile_Y.write( "%8.2f" % ( avgY**2 ) ) # Average Y Squared
outFile_Y.write( "\n" )
outFile_Z.write( "%8.2f" % ( avgZ ) ) # Average Z
outFile_Z.write( "%8.2f" % ( avgZ**2 ) ) # Average Z Squared
outFile_Z.write( "\n" )
col = 0
row += 1
outFile_X.close()
outFile_Y.close()
outFile_Z.close()
probFile = open(fileName_P0, 'w')
probFile.write( "%8.2f\n" % (originCount / repeat) )
probFile.close()
#### END OF 3D
#
# simpleRandomWalk4D
#
def simpleRandomWalk4D( steps, repeat ):
row = 0
col = 0
stepGridX = []
stepGridY = []
stepGridZ = []
stepGridW = []
sumX = []
sumY = []
sumZ = []
sumW = []
originFlag = False
originCount = 0
# Initialize Grid
for i in range( steps ):
stepGridX.append([])
stepGridY.append([])
stepGridZ.append([])
stepGridW.append([])
sumX.append(0)
sumY.append(0)
sumZ.append(0)
sumW.append(0)
#r.append([])
for j in range( repeat ):
stepGridX[i].append(0)
stepGridY[i].append(0)
stepGridZ[i].append(0)
stepGridW[i].append(0)
while col < repeat:
x = 0
y = 0
z = 0
w = 0
xDir = 0
yDir = 0
zDir = 0
wDir = 0
prevX = 0
prevY = 0
prevZ = 0
prevW = 0
while row < steps:
direction = random.randint(1, 8)
xDir = nnX4D( direction )
yDir = nnY4D( direction )
zDir = nnZ4D( direction )
wDir = nnW4D( direction )
prevX = x
prevY = y
prevZ = z
prevW = w
x += xDir
y += yDir
z += zDir
w += wDir
stepGridX[row][col] = x
stepGridY[row][col] = y
stepGridZ[row][col] = z
stepGridW[row][col] = w
sumX[row] += x
sumY[row] += y
sumZ[row] += z
sumW[row] += w
if x == 0 and y == 0 and z == 0 and w == 0:
originFlag = True
row += 1
col += 1
row = 0
if originFlag:
originCount += 1
originFlag = False
# output
row = 0
col = 0
fileName_X = './output/4D/randWalk_4D_st' + str(steps) + '_sa' + str(repeat) + '_X.dat'
fileName_Y = './output/4D/randWalk_4D_st' + str(steps) + '_sa' + str(repeat) + '_Y.dat'
fileName_Z = './output/4D/randWalk_4D_st' + str(steps) + '_sa' + str(repeat) + '_Z.dat'
fileName_W = './output/4D/randWalk_4D_st' + str(steps) + '_sa' + str(repeat) + '_W.dat'
fileName_P0 = './output/4D/randWalk_4D_st' + str(steps) + '_sa' + str(repeat) + '_P0.dat'
outFile_X = open(fileName_X, 'w')
outFile_Y = open(fileName_Y, 'w')
outFile_Z = open(fileName_Z, 'w')
outFile_W = open(fileName_W, 'w')
# Print out walks
while row < steps:
outFile_X.write( "%6d" % ( row ) )
outFile_Y.write( "%6d" % ( row ) )
outFile_Z.write( "%6d" % ( row ) )
outFile_W.write( "%6d" % ( row ) )
while col < repeat:
outFile_X.write( "%6d" % ( stepGridX[row][col] ) )
outFile_Y.write( "%6d" % ( stepGridY[row][col] ) )
outFile_Z.write( "%6d" % ( stepGridZ[row][col] ) )
outFile_W.write( "%6d" % ( stepGridW[row][col] ) )
col += 1
avgX = ( sumX[row] / repeat )
avgY = ( sumY[row] / repeat )
avgZ = ( sumZ[row] / repeat )
avgW = ( sumW[row] / repeat )
outFile_X.write( "%8.2f" % ( avgX ) ) # Average X
outFile_X.write( "%8.2f" % ( avgX**2 ) ) # Average X Squared
outFile_X.write( "\n" )
outFile_Y.write( "%8.2f" % ( avgY ) ) # Average Y
outFile_Y.write( "%8.2f" % ( avgY**2 ) ) # Average Y Squared
outFile_Y.write( "\n" )
outFile_Z.write( "%8.2f" % ( avgZ ) ) # Average Z
outFile_Z.write( "%8.2f" % ( avgZ**2 ) ) # Average Z Squared
outFile_Z.write( "\n" )
outFile_W.write( "%8.2f" % ( avgW ) ) # Average Z
outFile_W.write( "%8.2f" % ( avgW**2 ) ) # Average Z Squared
outFile_W.write( "\n" )
col = 0
row += 1
outFile_X.close()
outFile_Y.close()
outFile_Z.close()
outFile_W.close()
probFile = open(fileName_P0, 'w')
probFile.write( "%8.2f\n" % (originCount / repeat) )
probFile.close()
#### END OF 4D
def nonReversalRandomWalk( shape, n ):
count = 0
x = 0
y = 0
z = 0
xDir = 0
yDir = 0
zDir = 0
prevX = 0
prevY = 0
prevZ = 0
rotX = 0.0
rotY = 0.0
rotZ = 0.0
#outFile.write( "%s\n%s\n%s" %
# ( "Non-Reversal Random Walk",
# "=========================",
# "t x(t) y(t) z(t) X Y Z\n" ) )
while count < n:
direction = random.randint(1, 6)
xDir = nnX( direction )
yDir = nnY( direction )
zDir = nnZ( direction )
while (x + xDir == prevX) and (y + yDir == prevY) and (z + zDir == prevZ):
direction = random.randint(1, 6)
xDir = nnX( direction )
yDir = nnY( direction )
zDir = nnZ( direction )
prevX = x
prevY = y
prevZ = z
x += xDir
y += yDir
z += zDir
#if x != 0:
# rotX = 1.57
#if z != 0:
# rotY = 1.57
shape.delta_location = mathutils.Vector((x,y,z))
#addCylinder(mathutils.Vector((
# (x + prevX)/2.0,
# (y + prevY)/2.0,
# (z + prevZ)/2.0)),
# mathutils.Euler((rotX, rotY, rotZ)))
outFile.write( "%6d%6d%6d%6d%6d%6d\n" % ( x, y, z, xDir, yDir, zDir ) )
count += 1
#
# randomWalkInMedia
#
def randomWalkInMedia( steps, repeat, porosity, size ):
row = 0
col = 0
rGrid = []
r2Grid = []
sumR = []
sumR2 = []
grid = create3DGrid( 1 - porosity, size )
# Initialize Grid
for i in range( steps ):
rGrid.append([])
r2Grid.append([])
sumR.append(0)
sumR2.append(0)
#r.append([])
for j in range( repeat ):
rGrid[i].append(0)
r2Grid[i].append(0)
while col < repeat:
x = 0
y = 0
z = 0
xDir = 0
yDir = 0
zDir = 0
prevX = 0
prevY = 0
prevZ = 0
while row < steps:
direction = random.randint(1, 6)
xDir = nnX3D( direction )
yDir = nnY3D( direction )
zDir = nnZ3D( direction )
# If a site is blocked, move some where else.
while (grid[x + xDir][y + yDir][z + zDir] == 0):
direction = random.randint(1, 6)
xDir = nnX3D( direction )
yDir = nnY3D( direction )
zDir = nnZ3D( direction )
prevX = x
prevY = y
prevZ = z
x += xDir
y += yDir
z += zDir
# Standard Deviation
rGrid[row][col] = math.sqrt((x**2) + (y**2) + (z**2))
r2Grid[row][col] = rGrid[row][col]**2
sumR[row] += rGrid[row][col]
sumR2[row] += r2Grid[row][col]
row += 1
col += 1
row = 0
# output
row = 0
col = 0
fileName_R = './output/porosity/randWalk_3D_' + str(size) + '_st' + str(steps) + '_sa' + str(repeat) + '_por' + str(porosity) + '_R.dat'
fileName_R2 = './output/porosity/randWalk_3D_' + str(size) + '_st' + str(steps) + '_sa' + str(repeat) + '_por' + str(porosity) + '_R2.dat'
outFile_R = open(fileName_R, 'w')
outFile_R2 = open(fileName_R2, 'w')
# Print out walks
while row < steps:
outFile_R.write( "%6d" % ( row ) )
outFile_R2.write( "%6d" % ( row ) )
while col < repeat:
outFile_R.write( "%6d" % ( rGrid[row][col] ) )
outFile_R2.write( "%6d" % ( r2Grid[row][col] ) )
col += 1
avgR = ( sumR[row] / repeat )
avgR2 = ( sumR2[row] / repeat )
outFile_R.write( "%8.2f" % ( avgR ) ) # Average X
outFile_R.write( "%8.2f" % ( avgR2 ) ) # Average X Squared
outFile_R.write( "\n" )
outFile_R2.write( "%8.2f" % ( avgR ) ) # Average Y
outFile_R2.write( "%8.2f" % ( avgR2 ) ) # Average Y Squared
outFile_R2.write( "\n" )
col = 0
row += 1
outFile_R.close()
outFile_R2.close()
#### END OF 3D
#
# selfAvoidingRandomWalk
#
def selfAvoidingRandomWalk( steps, repeat, size ):
row = 0
col = 0
stepGridX = []
stepGridY = []
sumX = []
sumY = []
sumOfThisRunX = []
sumOfThisRunY = []
centerOfMassX = []
centerOfMassY = []
runStop = []
rgx = []
rgy = []
rg = []
grid = create2DGrid( size )
# Initialize Grid
for i in range( steps ):
stepGridX.append([])
stepGridY.append([])
sumX.append(0)
sumY.append(0)
#r.append([])
for j in range( repeat ):
stepGridX[i].append(0)
stepGridY[i].append(0)
sumOfThisRunX.append(0)
sumOfThisRunY.append(0)
centerOfMassX.append(0)
centerOfMassY.append(0)
runStop.append(0)
rgx.append(0)
rgy.append(0)
rg.append(0)
numSuccess = 0
while col < repeat:
x = 0
y = 0
xDir = 0
yDir = 0
prevX = 0
prevY = 0
numSteps = 0
while row < steps:
direction = random.randint(1, 4)
xDir = nnX2D( direction )
yDir = nnY2D( direction )
while (x + xDir == prevX) and (y + yDir == prevY):
direction = random.randint(1, 4)
xDir = nnX2D( direction )
yDir = nnY2D( direction )
prevX = x
prevY = y
x += xDir
y += yDir
stepGridX[row][col] = x
stepGridY[row][col] = y
sumX[row] += x
sumY[row] += y
if ( grid[x][y] == 2 ):
numSteps = row
runStop[col] = row
break
else:
grid[x][y] = 2
row += 1
if row == steps:
numSteps = steps
runStop[col] = row - 1
numSuccess += 1
col += 1
row = 0
# Radius of Gyration
row = 0
col = 0
# Center of Mass
while col < repeat:
while row <= runStop[col]:
sumOfThisRunX[col] += stepGridX[row][col]
sumOfThisRunY[col] += stepGridY[row][col]
row += 1
col += 1
row = 0
col = 0
row = 0
# Calculating Rg
while col < repeat:
centerOfMassX[col] = sumOfThisRunX[col] / steps
centerOfMassY[col] = sumOfThisRunY[col] / steps
sumOfDistanceX = 0
sumOfDistanceY = 0
while row <= runStop[col]:
sumOfDistanceX += math.pow(stepGridX[row][col] - centerOfMassX[col], 2)
sumOfDistanceY += math.pow(stepGridY[row][col] - centerOfMassY[col], 2)
row += 1
tSteps = runStop[col]
if tSteps == 0:
tSteps = 1
rgx[col] = math.sqrt(sumOfDistanceX) / tSteps
rgy[col] = math.sqrt(sumOfDistanceY) / tSteps
rg[col] = math.sqrt( math.pow(rgx[col], 2) + math.pow(rgy[col], 2) )
col += 1
row = 0
col = 0
row = 0
# output
row = 0
col = 0
fileName_X = './output/avoiding/randWalk_2D_' + str(size) + '_st' + str(steps) + '_sa' + str(repeat) + '_X.dat'
fileName_Y = './output/avoiding/randWalk_2D_' + str(size) + '_st' + str(steps) + '_sa' + str(repeat) + '_Y.dat'
fileName_Success = './output/avoiding/randWalk_2D_' + str(size) + '_st' + str(steps) + '_sa' + str(repeat) + '_Success.dat'
fileName_Rg = './output/avoiding/randWalk_2D_' + str(size) + '_st' + str(steps) + '_sa' + str(repeat) + '_Rg.dat'
outFile_X = open(fileName_X, 'w')
outFile_Y = open(fileName_Y, 'w')
# Print out walks
while row < steps:
outFile_X.write( "%6d" % ( row ) )
outFile_Y.write( "%6d" % ( row ) )
while col < repeat:
outFile_X.write( "%6d" % ( stepGridX[row][col] ) )
outFile_Y.write( "%6d" % ( stepGridY[row][col] ) )
col += 1
avgX = ( sumX[row] / repeat )
avgY = ( sumY[row] / repeat )
outFile_X.write( "%8.2f" % ( avgX ) ) # Average X
outFile_X.write( "%8.2f" % ( avgX**2 ) ) # Average X Squared
outFile_X.write( "\n" )
outFile_Y.write( "%8.2f" % ( avgY ) ) # Average Y
outFile_Y.write( "%8.2f" % ( avgY**2 ) ) # Average Y Squared
outFile_Y.write( "\n" )
col = 0
row += 1
outFile_X.close()
outFile_Y.close()
outFile_success = open(fileName_Success, 'w')
outFile_success.write("%8.2f\n" % (numSuccess / repeat))
outFile_success.close()
# Print Radius of Gyration
row = 0
col = 0
outfile_Rg = open(fileName_Rg, 'w')
while col < repeat:
outfile_Rg.write( "%6d" % ( col ) )
outfile_Rg.write( "%8.2f" % ( rgx[col] ) )
outfile_Rg.write( "%8.2f" % ( rgy[col] ) )
outfile_Rg.write( "%8.2f" % ( rg[col] ) )
outfile_Rg.write( "\n" )
col += 1
outfile_Rg.close()
#### END OF 2D
|
from spikeextractors import RecordingExtractor
import numpy as np
from .basepreprocessorrecording import BasePreprocessorRecordingExtractor
from spikeextractors.extraction_tools import check_get_traces_args
class NormalizeByQuantileRecording(BasePreprocessorRecordingExtractor):
preprocessor_name = 'NormalizeByQuantile'
def __init__(self, recording, scale=1.0, median=0.0, q1=0.01, q2=0.99, seed=0):
BasePreprocessorRecordingExtractor.__init__(self, recording)
random_data = self._get_random_data_for_scaling(seed=seed).ravel()
loc_q1, pre_median, loc_q2 = np.quantile(random_data, q=[q1, 0.5, q2])
pre_scale = abs(loc_q2 - loc_q1)
self._scalar = scale / pre_scale
self._offset = median - pre_median * self._scalar
self.has_unscaled = False
self._kwargs = {'recording': recording.make_serialized_dict(), 'scale': scale, 'median': median,
'q1': q1, 'q2': q2, 'seed': seed}
def _get_random_data_for_scaling(self, num_chunks=50, chunk_size=500, seed=0):
N = self._recording.get_num_frames()
random_ints = np.random.RandomState(seed=seed).randint(0, N - chunk_size, size=num_chunks)
chunk_list = []
for ff in np.sort(random_ints):
chunk = self._recording.get_traces(start_frame=ff,
end_frame=ff + chunk_size)
chunk_list.append(chunk)
return np.concatenate(chunk_list, axis=1)
@check_get_traces_args
def get_traces(self, channel_ids=None, start_frame=None, end_frame=None, return_scaled=True):
assert return_scaled, "'normalize_by_quantile' only supports return_scaled=True"
traces = self._recording.get_traces(channel_ids=channel_ids,
start_frame=start_frame,
end_frame=end_frame,
return_scaled=return_scaled)
return traces * self._scalar + self._offset
def normalize_by_quantile(recording, scale=1.0, median=0.0, q1=0.01, q2=0.99, seed=0):
'''
Rescale the traces from the given recording extractor with a scalar
and offset. First, the median and quantiles of the distribution are estimated.
Then the distribution is rescaled and offset so that the scale is given by the
distance between the quantiles (1st and 99th by default) is set to `scale`,
and the median is set to the given median.
Parameters
----------
recording: RecordingExtractor
The recording extractor to be transformed
scalar: float
Scale for the output distribution
median: float
Median for the output distribution
q1: float (default 0.01)
Lower quantile used for measuring the scale
q1: float (default 0.99)
Upper quantile used for measuring the
seed: int
Random seed for reproducibility
Returns
-------
rescaled_traces: NormalizeByQuantileRecording
The rescaled traces recording extractor object
'''
return NormalizeByQuantileRecording(
recording=recording,
scale=scale,
median=median,
q1=q1,
q2=q2,
seed=seed
)
|
# Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===========================================================================
"""generate json desc for maximum_grad"""
from mindspore._extends.graph_kernel.model.model import GraphKernelUnsupportedException as GKException
from ._utils import Expander, ExpanderInfoValidator as VLD
@VLD.check_all_formats_same
class MaximumGrad(Expander):
"""MaximumGrad expander"""
def _check(self):
if not self.attrs.get('grad_x', True) and not self.attrs.get('grad_y', True):
raise GKException("both grad_x and grad_y are False.")
return super()._check()
def _expand(self, graph_builder):
input_x, input_y, input_dout = self.inputs
ge_result = graph_builder.emit('GreaterEqual', [input_x, input_y])
ge_result = graph_builder.emit('Cast', [ge_result], attrs={'dst_type': input_x.dtype})
dx = graph_builder.emit('Mul', [ge_result, input_dout])
dy = graph_builder.emit('Sub', [input_dout, dx])
# output two results, regardless of grad_x and grad_y
return dx, dy
|
import unittest
from dedupe import common
class TestConf(unittest.TestCase):
def test_not_null(self):
self.assertTrue(common.conf is not None)
if __name__ == '__main__':
unittest.main() |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import os
rep=os.walk('C:\\Users\\Sujji\\Assignment')
d1={}
for r,d,f in rep:
for file in f:
d1.setdefault(file,[]).append(r)
file_name=input('Enter the file name:')
for k,v in d1.items():
if file_name.lower() in k.lower():
for i in v:
print(i)
print('-'*50)
import os
print(os.getcwd())
os.chdir("G:\\LetsUpgrade\\test")
print(os.getcwd())
for file in os.listdir("."):
if file.endswith(".png"):
first_name=file.rsplit(".",1)[0]
new_name=first_name+".jpg"
print(new_name)
os.rename(file,new_name)
# In[ ]:
|
class Solution:
def validPalindrome(self, s):
left = 0
right = len(s) - 1
x = self.even(s, left, right)
y = self.odd(s, left, right)
return x or y
def even(self, s, left, right):
deleted = False
while left < right:
if s[left] != s[right]:
if not deleted:
left += 1
deleted = True
continue
else:
return False
left += 1
right -= 1
return True
def odd(self, s, left, right):
deleted = False
while left < right:
if s[left] != s[right]:
if not deleted:
right -= 1
deleted = True
continue
else:
return False
left += 1
right -= 1
return True
s = Solution()
# 19 81
print(s.validPalindrome("aguokepatgbnvfqmgmlcupuufxoohdfpgjdmysgvhmvffcnqxjjxqncffvmhvgsymdjgpfdhooxfuupuculmgmqfvnbgtapekouga"))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 29 12:22:31 2021
@author: xies
"""
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
from skimage import io, util
from os import path
from tqdm import tqdm
import pickle as pkl
from measureSemiauto import measure_track_timeseries_from_segmentations,cell_cycle_annotate,collate_timeseries_into_cell_centric_table
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
dirnames = {}
# dirnames['WT_R1'] = '/Users/xies/OneDrive - Stanford/Skin/Two photon/NMS/09-29-2022 RB-KO pair/WT/R1'
# dirnames['WT_R2'] = '/Users/xies/OneDrive - Stanford/Skin/Two photon/NMS/09-29-2022 RB-KO pair/WT/R2'
# dirnames['WT_R3'] = '/Users/xies/OneDrive - Stanford/Skin/Two photon/NMS/03-26-2023 RB-KO pair/M6 WT/R1'
# dirnames['WT_R4'] = '/Users/xies/OneDrive - Stanford/Skin/Two photon/NMS/03-26-2023 RB-KO pair/M6 WT/R2'
# dirnames['RBKO_R1'] = '/Users/xies/OneDrive - Stanford/Skin/Two photon/NMS/09-29-2022 RB-KO pair/RBKO/R1'
# dirnames['RBKO_R2'] = '/Users/xies/OneDrive - Stanford/Skin/Two photon/NMS/09-29-2022 RB-KO pair/RBKO/R2'
# dirnames['RBKO_R3'] = '/Users/xies/OneDrive - Stanford/Skin/Two photon/NMS/03-26-2023 RB-KO pair/M1 RBKO/R1'
# dirnames['RBKO_R4'] = '/Users/xies/OneDrive - Stanford/Skin/Two photon/NMS/03-26-2023 RB-KO pair/M1 RBKO/R2'
dirnames['RBKOp107het_R2'] = '/Users/xies/OneDrive - Stanford/Skin/Two photon/NMS/05-04-2023 RBKO p107het pair/F8 RBKO p107 het/R2'
dx = {}
dx['WT_R1'] = 0.206814922817744/1.5
dx['WT_R2'] = 0.206814922817744/1.5
dx['WT_R3'] = 0.165243202683616
dx['WT_R4'] = 0.165243202683616
dx['RBKO_R1'] = 0.206814922817744/1.5
dx['RBKO_R2'] = 0.206814922817744/1.5
dx['RBKO_R3'] = 0.165243202683616
dx['RBKO_R4'] = 0.165243202683616
dx['RBKOp107het_R2'] = 0.165243202683616
mouse = {'WT_R1':'WT_M1','WT_R2':'WT_M1','RBKO_R1':'RBKO_M2','RBKO_R2':'RBKO_M2'
,'WT_R3':'WT_M3','WT_R4':'WT_M3','RBKO_R3':'RBKO_M4','RBKO_R4':'RBKO_M4'
,'RBKOp107het_R2':'RBKOp107het_M1'}
pairs = {'WT_M1':'Pair 1','RBKO_M2':'Pair 1','WT_M3':'Pair 2','RBKO_M4':'Pair 2','RBKOp107het_M1':'Pair 3'}
RECALCULATE = True
#%% Load and collate manual track+segmentations
# Dictionary of manual segmentation (there should be no first or last time point)
for name,dirname in dirnames.items():
for mode in ['curated']:
print(f'---- Working on {name} {mode} ----')
if name == 'WT_R4' and mode == 'manual':
continue
genotype = name.split('_')[0]
# Construct pathnames
pathdict = {}
pathdict['Segmentation'] = path.join(dirname,f'manual_tracking/{mode}_clahe.tif')
pathdict['H2B'] = path.join(dirname,'master_stack/G.tif')
pathdict['FUCCI'] = path.join(dirname,'master_stack/R.tif')
pathdict['Frame averages'] = path.join(dirname,'high_fucci_avg_size.csv')
pathdict['Cell cycle annotations'] = path.join(dirname,f'{name}_cell_cycle_annotations.xlsx')
# Construct metadata
metadata = {}
metadata['um_per_px'] = dx[name]
metadata['Region'] = name
metadata['Mouse'] = mouse[name]
metadata['Pair'] = pairs[mouse[name]]
metadata['Genotype'] = genotype
metadata['Mode'] = mode
metadata['Dirname'] = dirname
#% Re-construct tracks with manually fixed tracking/segmentation
# if RECALCULATE:
tracks = measure_track_timeseries_from_segmentations(name,pathdict,metadata)
tracks = cell_cycle_annotate(tracks,pathdict,metadata)
# Save to the manual tracking folder
with open(path.join(dirname,'manual_tracking',f'{name}_complete_cycles_fixed_{mode}.pkl'),'wb') as file:
pkl.dump(tracks,file)
# Construct the cell-centric metadata dataframe
df,tracks = collate_timeseries_into_cell_centric_table(tracks,metadata)
df.to_csv(path.join(dirname,f'manual_tracking/{name}_dataframe_{mode}.csv'))
# Save to the manual tracking folder
with open(path.join(dirname,'manual_tracking',f'{name}_complete_cycles_fixed_{mode}.pkl'),'wb') as file:
pkl.dump(tracks,file)
|
import struct
PUMP_GPIO_NUM = 4
HEATER_GPIO_NUM = 4
SPIDEV = "/dev/spidev0.0"
def gpio_output_enable(number):
# Enable the GPIO for modification
# If GPIO is already exported, it will error out,
try:
f = open("/sys/class/gpio/export","w")
f.write(str(number))
f.close()
except IOError:
# GPIO already exported
pass
# Set GPIO direction to output
f = open("/sys/class/gpio/gpio"+str(number)+"/direction","w")
f.write("out")
f.close()
def gpio_set(number):
gpio_output_enable(number)
f = open("/sys/class/gpio/gpio"+str(number)+"/value","w")
f.write("1")
f.close()
def gpio_clear(number):
gpio_output_enable(number)
f = open("/sys/class/gpio/gpio"+str(number)+"/value","w")
f.write("0")
f.close()
# Reads the current tempreature from the ADC IC
def read_temperature():
f = open(SPIDEV,"rb")
data = f.read(4)
f.close()
# convert the first 16 bits to an signed short
short = struct.unpack(">h",data[0:2])[0]
# Get the last 2 bits by dividing by 2^2 (4)
shifted = short/(4)
# convert 1/4 degrees units to a degrees C
tempC = shifted / 4.0
tempF = tempC * 9 / 5 + 32
return tempF
# Enables the circulating water pump
def enable_pump():
gpio_clear(PUMP_GPIO_NUM)
# Disable the circulating water pump
def disable_pump():
gpio_set(PUMP_GPIO_NUM)
# Enable Heating Coil
def enable_heater():
gpio_clear(HEATER_GPIO_NUM)
# Disable Heating Coil
def disable_heater():
gpio_set(HEATER_GPIO_NUM)
|
from setuptools import setup
setup(name='uluplot',
version='0.1.0',
description='Flexible map plot utility based on gmplot',
url='http://github.com/ulu5/uluplot',
author='ulu5',
author_email='ulu_5@yahoo.com',
license='MIT',
packages=['uluplot'],
install_requires=[
'requests',
])
|
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pytest
import ibis
from ibis.expr import datatypes as dt
from ibis.expr import schema as sch
pytestmark = pytest.mark.pandas
@pytest.mark.parametrize(
('column', 'expected_dtype'),
[
([True, False, False], dt.boolean),
(np.int8([-3, 9, 17]), dt.int8),
(np.uint8([3, 0, 16]), dt.uint8),
(np.int16([-5, 0, 12]), dt.int16),
(np.uint16([5569, 1, 33]), dt.uint16),
(np.int32([-12, 3, 25000]), dt.int32),
(np.uint32([100, 0, 6]), dt.uint32),
(np.uint64([666, 2, 3]), dt.uint64),
(np.int64([102, 67228734, -0]), dt.int64),
(np.float32([45e-3, -0.4, 99.0]), dt.float),
(np.float64([-3e43, 43.0, 10000000.0]), dt.double),
(['foo', 'bar', 'hello'], dt.string),
(
[
pd.Timestamp('2010-11-01 00:01:00'),
pd.Timestamp('2010-11-01 00:02:00.1000'),
pd.Timestamp('2010-11-01 00:03:00.300000'),
],
dt.timestamp,
),
(
pd.date_range('20130101', periods=3, tz='US/Eastern'),
dt.Timestamp('US/Eastern'),
),
(
[
pd.Timedelta('1 days'),
pd.Timedelta('-1 days 2 min 3us'),
pd.Timedelta('-2 days +23:57:59.999997'),
],
dt.Interval('ns'),
),
(pd.Series(['a', 'b', 'c', 'a']).astype('category'), dt.Category()),
],
)
def test_infer_simple_dataframe(column, expected_dtype):
df = pd.DataFrame({'col': column})
assert sch.infer(df) == ibis.schema([('col', expected_dtype)])
def test_infer_exhaustive_dataframe():
df = pd.DataFrame(
{
'bigint_col': np.array(
[0, 10, 20, 30, 40, 50, 60, 70, 80, 90], dtype='i8'
),
'bool_col': np.array(
[
True,
False,
True,
False,
True,
None,
True,
False,
True,
False,
],
dtype=np.bool_,
),
'bool_obj_col': np.array(
[
True,
False,
np.nan,
False,
True,
np.nan,
True,
np.nan,
True,
False,
],
dtype=np.object_,
),
'date_string_col': [
'11/01/10',
None,
'11/01/10',
'11/01/10',
'11/01/10',
'11/01/10',
'11/01/10',
'11/01/10',
'11/01/10',
'11/01/10',
],
'double_col': np.array(
[
0.0,
10.1,
np.nan,
30.299999999999997,
40.399999999999999,
50.5,
60.599999999999994,
70.700000000000003,
80.799999999999997,
90.899999999999991,
],
dtype=np.float64,
),
'float_col': np.array(
[
np.nan,
1.1000000238418579,
2.2000000476837158,
3.2999999523162842,
4.4000000953674316,
5.5,
6.5999999046325684,
7.6999998092651367,
8.8000001907348633,
9.8999996185302734,
],
dtype='f4',
),
'int_col': np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype='i4'),
'month': [11, 11, 11, 11, 2, 11, 11, 11, 11, 11],
'smallint_col': np.array(
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype='i2'
),
'string_col': [
'0',
'1',
None,
'double , whammy',
'4',
'5',
'6',
'7',
'8',
'9',
],
'timestamp_col': [
pd.Timestamp('2010-11-01 00:00:00'),
None,
pd.Timestamp('2010-11-01 00:02:00.100000'),
pd.Timestamp('2010-11-01 00:03:00.300000'),
pd.Timestamp('2010-11-01 00:04:00.600000'),
pd.Timestamp('2010-11-01 00:05:00.100000'),
pd.Timestamp('2010-11-01 00:06:00.150000'),
pd.Timestamp('2010-11-01 00:07:00.210000'),
pd.Timestamp('2010-11-01 00:08:00.280000'),
pd.Timestamp('2010-11-01 00:09:00.360000'),
],
'tinyint_col': np.array(
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype='i1'
),
'year': [
2010,
2010,
2010,
2010,
2010,
2009,
2009,
2009,
2009,
2009,
],
}
)
expected = [
('bigint_col', dt.int64),
('bool_col', dt.boolean),
('bool_obj_col', dt.boolean),
('date_string_col', dt.string),
('double_col', dt.double),
('float_col', dt.float),
('int_col', dt.int32),
('month', dt.int64),
('smallint_col', dt.int16),
('string_col', dt.string),
('timestamp_col', dt.timestamp),
('tinyint_col', dt.int8),
('year', dt.int64),
]
assert sch.infer(df) == ibis.schema(expected)
def test_apply_to_schema_with_timezone():
data = {'time': pd.date_range('2018-01-01', '2018-01-02', freq='H')}
df = pd.DataFrame(data)
expected = df.assign(time=df.time.astype('datetime64[ns, EST]'))
desired_schema = ibis.schema([('time', 'timestamp("EST")')])
result = desired_schema.apply_to(df.copy())
tm.assert_frame_equal(expected, result)
# TODO(kszucs): test_Schema_to_pandas
|
#!/usr/bin/python
import os
import sys
import time
# set django environment
from django.core.management import setup_environ
import settings
setup_environ(settings)
from django.template import Template, Context
from django.template.loader import *
def print_usage():
"""
display the help message to the user
"""
if len(sys.argv) >= 3 and sys.argv[1].lower() == 'help' and commands.has_key(sys.argv[2]):
# print doc string for method
args = {
'doc': commands[sys.argv[2]].__doc__,
'program': sys.argv[0],
'key': sys.argv[2],
}
print("""
Usage for %(key)s:
%(program)s %(key)s
%(doc)s
""" % args)
else:
sorted_keys = commands.keys()
sorted_keys.sort()
args = {
'program': sys.argv[0],
'commands': "\n".join(sorted_keys),
}
print("""Usage:
%(program)s <command>
Where <command> is:
%(commands)s
For extra help, type:
%(program)s help <command>
""" % args)
def is_hidden(path):
"""
Check if a file or folder is hidden.
"""
title = file_title(path)
return title[-1] == '~' or title[0] == '.'
def file_title(path):
"""
Get only the title of a path.
"""
names = path.split(os.sep)
if len(names) == 0:
return path
else:
return names[-1]
def walk(compile_func):
"""
for each source file, call compile_func with these arguments:
source file absolute path
dest file absolute path
"""
force = examine_watchlist()
for root, dirs, files in os.walk(settings.PREPARSE_DIR, followlinks=True):
relative = root.replace(settings.PREPARSE_DIR, "")
if len(relative) > 0 and relative[0] == os.sep:
relative = relative[1:]
for file in files:
in_file = os.path.join(root, file)
if not is_hidden(in_file):
out_file = os.path.join(settings.PREPARSE_OUTPUT, relative, file)
compile_func(in_file, out_file, force)
def compare_file_date(in_file, out_file):
"""
standard file compare: if in_file is newer, return true
"""
in_file_modified = os.path.getmtime(in_file)
out_file_modified = -1
if os.path.exists(out_file):
out_file_modified = os.path.getmtime(out_file)
return in_file_modified > out_file_modified
watchlist = {}
def examine_watchlist():
"""
if any template files have changed since this function was last called,
return True and update the list
"""
new_item = False
for template_dir in settings.TEMPLATE_DIRS:
for root, dirs, files in os.walk(template_dir):
for file in files:
full_path = os.path.join(root, file)
file_modified = os.path.getmtime(full_path)
if watchlist.has_key(full_path):
if file_modified > watchlist[full_path]:
new_item = True
else:
new_item = True
watchlist[full_path] = file_modified
return new_item
def compile_file(source, dest, force):
"""
parse source and write to dest, only if source is newer
force will make it definitely compile
"""
if force or compare_file_date(source, dest):
print("Parsing %s." % file_title(source))
file = open(dest, 'w')
in_text = open(source, 'r').read().decode()
template = Template(in_text)
# manually add settings from settings.py :(
hash = settings.PREPARSE_CONTEXT
hash.update({
'MEDIA_URL': settings.MEDIA_URL,
})
context = Context(hash)
file.write(template.render(context))
file.close()
def clean_file(source, dest, force):
if os.path.exists(dest):
os.remove(dest)
print("removing %s" % dest)
def compile():
"""
compile every file, mirroring directory structure
"""
walk(compile_file)
def clean():
"""
delete auto-generated files
"""
walk(clean_file)
def monitor():
"""
Watches for new or changed files that are candidates for being preparsed,
and automatically re-parses them.
"""
while True:
compile()
try:
time.sleep(0.5)
except KeyboardInterrupt:
sys.exit(0)
commands = {
'help': print_usage,
'parse': compile,
'clean': clean,
'monitor': monitor,
}
if __name__ == '__main__':
if len(sys.argv) < 2 or not commands.has_key(sys.argv[1]):
print_usage();
sys.exit(1)
else:
commands[sys.argv[1]]()
|
import taichi as ti
import numpy as np
from matplotlib import cm
import os
cmap = cm.get_cmap('magma')
res = 256, 64, 64
#res = 1024, 256, 1
rho = ti.field(float, res)
vel = ti.Vector.field(3, float, res)
img = ti.field(float, (res[0], res[1]))
def load(frame):
path = f'/tmp/{frame:06d}.npz'
if not os.path.exists(path):
return False
with np.load(path) as data:
rho.from_numpy(data['rho'])
vel.from_numpy(data['vel'])
return True
@ti.func
def color(x, y, z):
return vel[x, y, z].norm() * 5
@ti.kernel
def render():
for x, y in img:
ret = 0.0
cnt = 0
for z in range(res[2] // 4, max(1, res[2] * 3 // 4)):
ret += color(x, y, z)
cnt += 1
img[x, y] = ret / cnt
frame = 0
while load(frame):
print('render for', frame)
render()
im = cmap(img.to_numpy())
ti.imshow(im)
ti.imwrite(im, f'/tmp/{frame:06d}.png')
frame += 1
|
from typing import Iterable
from cimsparql.query_support import combine_statements, group_query
def _query_str(var_list: Iterable[str], rdf_type: str, connection: str) -> str:
select = "SELECT ?mrid " + " ".join([f"?{x}" for x in var_list])
where = [
f"?s rdf:type cim:{rdf_type}",
f"?s cim:{rdf_type}.{connection} ?mrid",
*[f"?s cim:{rdf_type}.{x} ?{x}" for x in var_list],
]
return combine_statements(select, group_query(where))
def powerflow(power: Iterable[str] = ("p", "q")) -> str:
return _query_str(power, "SvPowerFlow", "Terminal")
def voltage(voltage_vars: Iterable[str] = ("v", "angle")) -> str:
return _query_str(voltage_vars, "SvVoltage", "TopologicalNode")
def tapstep() -> str:
return """
SELECT ?mrid ?position
WHERE {
?t_mrid rdf:type cim:SvTapStep .
?t_mrid cim:SvTapStep.TapChanger ?mrid .
?t_mrid cim:SvTapStep.position ?position .
}
"""
|
import sqlitedatastore as datastore
from annoutil import find_x_including_y, find_xs_in_y
if __name__ == '__main__':
datastore.connect()
anno_name = 'affiliation'
for doc_id in datastore.get_all_ids(limit=-1):
row = datastore.get(doc_id, fl=['content'])
text = row['content']
sentences = datastore.get_annotation(doc_id, 'sentence')
tokens = datastore.get_annotation(doc_id, 'token')
annos = datastore.get_annotation(doc_id, anno_name)
for sentence in sentences:
annos_in_sentence = find_xs_in_y(annos, sentence)
if annos_in_sentence == []:
continue
prev = False
for token in find_xs_in_y(tokens, sentence):
if find_x_including_y(annos_in_sentence, token) is None:
prev = False
print('{0}\t{1}\t{2}'.format(
text[token['begin']:token['end']], token['POS'], 'O'))
else:
if prev:
print('{0}\t{1}\tI-{2}'.format(
text[token['begin']:token['end']], token['POS'], anno_name))
else:
print('{0}\t{1}\tB-{2}'.format(
text[token['begin']:token['end']], token['POS'], anno_name))
prev = True
print() # 文の区切り
datastore.close()
|
#configuracion_module
from django.conf.urls import url
from django.contrib import admin
from django.views.generic import TemplateView
from django.contrib.auth.decorators import login_required
urlpatterns = [
url(r'^configuracion',
login_required(TemplateView.as_view(template_name="configuracion/configuracion.html")),
name='config'),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.