blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3842504d29b91a2950c189a1b6ce977c040ce35c | ac1431fd24a9116709a74f19fcedb2bc0abec95b | /fizzbuzz.py | c6b8d97c1cea160d341cbd4176695385e2a6e996 | [] | no_license | fuscano/mi_primer_programa | 33ebc4a8fab6b3aca6e22cf56b59106d42197239 | 544fc9cd9d649a15540a541f5ea99b702f728dd3 | refs/heads/master | 2020-04-09T15:02:38.253466 | 2018-12-11T11:01:40 | 2018-12-11T11:01:40 | 160,414,351 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,297 | py |
numeros = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 20, 30, 60, 100]
for indices in range(len(numeros)):
numero = numeros[indices]
if numero % 3 == 0 or numero % 5 == 0:
numeros[indices] = ''
if numero % 3 == 0 :
numeros[indices] += 'Fizz'
if numero % 5 == 0 :
numeros[indices] += 'Buzz'
print(numeros)
'''
Realizar el FizzBuzz con las mismas reglas pero en el caso que el numero sea divisible entre 3 y 5, cambiar el texto por “Bazinga”.
'''
numeros_bazinga = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 20, 30, 60, 100]
for indices in range(len(numeros_bazinga)):
numero = numeros_bazinga[indices]
if numero % 3 == 0 or numero % 5 == 0:
numeros_bazinga[indices] = ''
numeros_bazinga[indices] += 'Bazinga'
print(numeros_bazinga)
'''
Crear un programa que encuentre el numero más grande de una lista (sin usar la función max).
'''
lista_numeros = [1, 2, 3, 4, 5, 6, 7, 8, 9, 160, 151, 12, 13, 14, 15, 20, 30, 60, 120]
max_valor = 0
for indice in range(len(lista_numeros)):
valor = lista_numeros[indice]
if valor > max_valor:
max_valor = valor
print('El maximo valor es {}'.format(max_valor))
'''
Crear un programa que guarde e imprima varias listas con todos los números que estén dentro de una lista proporcionada por el usuario y sean múltiplos de 2, de 3, de 5 y de 7.
Ejemplo:
input = [1, 10, 70, 30, 50, 55]
multiplos_dos = [10, 70, 30, 50]
multiplos_tres = [30]
multiplos_cinco = [10, 70, 30, 60, 55]
multiplos_siete = [70]
'''
entrada = [1, 10, 70, 30, 50, 55]
multiplos_dos = []
multiplos_tres = []
multiplos_cinco = []
multiplos_siete = []
for indice_nuevo in range(len(entrada)):
nuevo_valor = entrada[indice_nuevo]
if nuevo_valor % 2 == 0 :
multiplos_dos.append(nuevo_valor)
if nuevo_valor % 3 == 0 :
multiplos_tres.append(nuevo_valor)
if nuevo_valor % 5 == 0:
multiplos_cinco.append(nuevo_valor)
if nuevo_valor % 7 == 0 :
multiplos_siete.append(nuevo_valor)
print('Los multipos de 2 son {}'.format(multiplos_dos))
print('Los multipos de 3 son {}'.format(multiplos_tres))
print('Los multipos de 5 son {}'.format(multiplos_cinco))
print('Los multipos de 7 son {}'.format(multiplos_siete)) | [
"jonafuscano1@gmail.com"
] | jonafuscano1@gmail.com |
6169e0ffbb24b1abe7c04cb37a356c5e0dbdc47a | fd9b248c7a47bcc288ed466ad54aaddd4d70cc52 | /testsAutomates.py | bbff2c627e633ea61074a2eab97234292ca85fa5 | [] | no_license | IemProg/FSMs_with_Python | 3c5fa748bd20ee864fa91663a524aa510a715e49 | 909ef318220da1644a5713a955c18ad15d75337a | refs/heads/master | 2020-04-07T05:18:35.387392 | 2019-01-17T19:58:03 | 2019-01-17T19:58:03 | 158,091,880 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,183 | py | # -*- coding: utf-8 -*-
from transition import *
from state import *
import os
import copy
from sp import *
from parser import *
from itertools import product
from automateBase import AutomateBase
from automate import Automate
show = True # Mettre à True pour afficher graphiquement les tests de construction d'automates
def equalList (l, m) :
l1 = copy.deepcopy(l)
l2 = copy.deepcopy(m)
if len(l1) == 0 and len(l2) == 0:
return True
elif len(l1) == 0 or len(l2) == 0:
return False
else:
e = l1[0]
if e in l2:
l1.remove(e)
l2.remove(e)
return equalList(l1,l2)
else:
return False
#####
# Définition des différents automates de test
#####
# auto1 reconnait les mots sur {a,b} ayant un nombre impair de a
# Déterministe/Complet
s01 = State(0,True,False)
s11 = State(1,False,True)
auto1 = Automate([Transition(s01,'b',s01),Transition(s01,'a',s11),Transition(s11,'b',s11),Transition(s11,'a',s01)])
# auto2 reconnait les mots de la forme a*b*
# Déterministe/Non Complet
s02 = State(0,True,True)
s12 = State(1,False,True)
auto2 = Automate([Transition(s02,'a',s02),Transition(s02,'b',s12),Transition(s12,'b',s12)])
# auto3 (exemple dans le sujet) reconnait les mots avec 3 a consécutifs
# ND/NC
s03 = State(0,True,False)
s13 = State(1,False,False)
s23 = State(2,False,False)
s33 = State(3,False,True)
auto3 = Automate([Transition(s03,'a',s03),Transition(s03,'b',s03),Transition(s03,'a',s13),Transition(s13,'a',s23),Transition(s23,'a',s33),Transition(s33,'a',s33),Transition(s33,'b',s33)])
# auto4 reconnait les mots contenant (au moins) un a
# ND/C
s04 = State(0,True,False)
s14 = State(1,False,True)
auto4 = Automate([Transition(s04,'a',s04),Transition(s04,'b',s04),Transition(s04,'a',s14),Transition(s14,'a',s14),Transition(s14,'b',s14)])
# auto5 reconnait les mots commençant par un b (avec deux états initiaux)
# ND/C
s05 = State(0,True,False)
s15 = State(1,True,False)
s25 = State(2,False,True)
s35 = State(3,False,False)
auto5 = Automate([Transition(s05,'a',s35),Transition(s15,'a',s35),Transition(s05,'b',s25),Transition(s15,'b',s35),Transition(s25,'a',s25),Transition(s25,'b',s25),Transition(s35,'a',s35),Transition(s35,'b',s35)])
"""
auto1.show("auto1")
auto2.show("auto2")
auto3.show("auto3")
auto4.show("auto4")
auto5.show("auto5")
"""
#####
# Tests des fonctions
#####
print("Début des tests :")
# Fonction succ
if (auto1.succ([s01],'a') == None):
print("Succ non définie")
else:
print("Tests fonction succ:")
cpt = 0
test = auto1.succ([s01],'a')
target = [s11]
if equalList(test, target):
cpt = cpt+1
else:
print("- Fail test 1 : renvoie", test, "au lieu de", target)
test = auto4.succ([s04],'a')
target = [s04,s14]
if equalList(test, target):
cpt = cpt+1
else:
print("- Fail test 2 : renvoie", test, "au lieu de", target)
test = auto3.succ([s03,s13],'b')
target = [s03]
if equalList(test, target):
cpt = cpt+1
else:
print("- Fail test 3 : renvoie", test, "au lieu de", target)
test = auto5.succ([s05,s15],'a')
target = [s35]
if equalList(test, target):
cpt = cpt+1
else:
print("- Fail test 4 : renvoie", test, "au lieu de", target)
test = auto3.succ([s13,s23],'b')
target = []
if equalList(test, target):
cpt = cpt+1
else:
print("- Fail test 5 : renvoie", test, "au lieu de", target)
print(cpt, "tests sur 5 réussis.")
# Fonction accepte
if auto1.accepte(auto1,"a") == None:
print("Accepte non définie")
else:
print("Tests fonction accepte:")
cpt = 0
test = auto1.accepte(auto1,"ababab")
target = True
if test == target:
cpt = cpt+1
else:
print("- Fail test 1 : renvoie", test, "au lieu de", target)
test = auto2.accepte(auto2,"")
target = True
if test == target:
cpt = cpt+1
else:
print("- Fail test 2 : renvoie", test, "au lieu de", target)
test = auto1.accepte(auto1,"abba")
target = False
if test == target:
cpt = cpt+1
else:
print("- Fail test 3 : renvoie", test, "au lieu de", target)
test = auto3.accepte(auto3,"abaaab")
target = True
if test == target:
cpt = cpt+1
else:
print("- Fail test 4 : renvoie", test, "au lieu de", target)
test = auto3.accepte(auto3,"abaab")
target = False
if test == target:
cpt = cpt+1
else:
print("- Fail test 5 : renvoie", test, "au lieu de", target)
test = auto5.accepte(auto5,"ba")
target = True
if test == target:
cpt = cpt+1
else:
print("- Fail test 6 : renvoie", test, "au lieu de", target)
print(cpt, "tests sur 6 réussis.")
# Fonction estComplet
if auto1.estComplet(auto1,"ab") == None:
print("estComplet non définie")
else:
print("Tests fonction estComplet:")
cpt = 0
test = auto1.estComplet(auto1,"ab")
target = True
if test == target:
cpt = cpt+1
else:
print("- Fail test 1 : renvoie", test, "au lieu de", target)
test = auto1.estComplet(auto2,"ab")
target = False
if test == target:
cpt = cpt+1
else:
print("- Fail test 2 : renvoie", test, "au lieu de", target)
test = auto1.estComplet(auto3,"ab")
target = False
if test == target:
cpt = cpt+1
else:
print("- Fail test 3 : renvoie", test, "au lieu de", target)
test = auto1.estComplet(auto4,"ab")
target = True
if test == target:
cpt = cpt+1
else:
print("- Fail test 4 : renvoie", test, "au lieu de", target)
test = auto1.estComplet(auto5,"ab")
target = True
if test == target:
cpt = cpt+1
else:
print("- Fail test 5 : renvoie", test, "au lieu de", target)
print(cpt, "tests sur 5 réussis.")
# fonction estDeterministe
if auto1.estDeterministe(auto1) == None:
print("estDeterministe non définie")
else:
print("Tests fonction estDeterministe:")
cpt = 0
test = auto1.estDeterministe(auto1)
target = True
if test == target:
cpt = cpt+1
else:
print("- Fail test 1 : renvoie", test, "au lieu de", target)
test = auto1.estDeterministe(auto2)
target = True
if test == target:
cpt = cpt+1
else:
print("- Fail test 2 : renvoie", test, "au lieu de", target)
test = auto1.estDeterministe(auto3)
target = False
if test == target:
cpt = cpt+1
else:
print("- Fail test 3 : renvoie", test, "au lieu de", target)
test = auto1.estDeterministe(auto4)
target = False
if test == target:
cpt = cpt+1
else:
print("- Fail test 4 : renvoie", test, "au lieu de", target)
test = auto1.estDeterministe(auto5)
target = False
if test == target:
cpt = cpt+1
else:
print("- Fail test 5 : renvoie", test, "au lieu de", target)
print(cpt, "tests sur 5 réussis.")
# fonction completeAutomate
if auto1.completeAutomate(auto1,"ab") == None:
print("completeAutomate non définie")
else:
val = input("Appuyer sur une touche pour test 1 de completeAutomate")
test = auto1.completeAutomate(auto1,"ab")
if show:
auto1.show("cA1_auto1")
test.show("cA1_auto1_complet")
else:
print(auto1)
print(test)
val = input("Appuyer sur une touche pour test 2 de completeAutomate")
test = auto1.completeAutomate(auto3,"ab")
if show:
auto3.show("cA2_auto3")
test.show("cA2_auto3_complet")
else:
print(auto3)
print(test)
# fonction determinisation
if auto1.determinisation(auto1) == None:
print("determinisation non définie")
else:
val = input("Appuyer sur une touche pour test 1 de determinisation")
test = auto1.determinisation(auto1)
if show:
auto1.show("d1_auto1")
test.show("d1_auto1_deterministe")
else:
print(auto1)
print(test)
val = input("Appuyer sur une touche pour test 2 de determinisation")
test = auto1.determinisation(auto3)
if show:
auto3.show("d2_auto3")
test.show("d2_auto3_deterministe")
else:
print(auto3)
print(test)
# fonction complementaire
if auto1.complementaire(auto1,"ab") == None:
print("complementaire non définie")
else:
val = input("Appuyer sur une touche pour test 1 de complementaire")
test = auto1.complementaire(auto1,"ab")
if show:
auto1.show("c1_auto1")
test.show("c1_auto1_complementaire")
else:
print(auto1)
print(test)
val = input("Appuyer sur une touche pour test 2 de complementaire")
test = auto1.complementaire(auto2,"ab")
if show:
auto2.show("c2_auto2")
test.show("c2_auto2_complementaire")
else:
print(auto2)
print(test)
# fonction intersection
if auto1.intersection(auto1,auto2) == None:
print("intersection non définie")
else:
val = input("Appuyer sur une touche pour test 1 de intersection")
test = auto1.intersection(auto1,auto2)
if show:
auto1.show("i1_auto1")
auto2.show("i1_auto2")
test.show("i1_intersection")
else:
print(auto1)
print(auto2)
print(test)
# fonction union (facultative)
if auto1.union(auto1,auto2) == None:
print("union non définie")
else:
val = input("Appuyer sur une touche pour test 1 de union")
test = auto1.union(auto1,auto2)
if show:
auto1.show("u1_auto1")
auto2.show("u1_auto2")
test.show("u1_union")
else:
print(auto1)
print(auto2)
print(test)
# fonction concatenation
if auto1.concatenation(auto1,auto2) == None:
print("concatenation non définie")
else:
val = input("Appuyer sur une touche pour test 1 de concatenation")
test = auto1.concatenation(auto1,auto2)
if show:
auto1.show("conc1_auto1")
auto2.show("conc1_auto2")
test.show("conc1_concatenation")
else:
print(auto1)
print(auto2)
print(test)
val = input("Appuyer sur une touche pour test 2 de concatenation")
test = auto1.concatenation(auto2,auto5)
if show:
auto2.show("conc2_auto2")
auto5.show("conc2_auto5")
test.show("conc2_concatenation")
else:
print(auto2)
print(auto5)
print(test)
# fonction etoile
if auto1.etoile(auto1) == None:
print("etoile non définie")
else:
val = input("Appuyer sur une touche pour test 1 de etoile")
test = auto1.etoile(auto5)
if show:
auto5.show("e1_auto5")
test.show("e1_etoile")
else:
print(auto5)
print(test)
| [
"btoxic24@gmail.com"
] | btoxic24@gmail.com |
1f7023a8396aaa1e95e2598545db12884d2f3eb9 | 5547486a73cb16d08a0f5400e02558d61706e2e8 | /csr_mhqa/argument_parser.py | 620be9180a8ae482c711b0e085a6f95c474ca7aa | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | jobine/HGN | 6cd64bd1b175a4ae6d412fca09e73fe45b1ea087 | 6d8dd5ebf2a5077652b09a00610ade89bfcd6b4b | refs/heads/master | 2023-05-11T21:31:37.289472 | 2021-05-31T08:59:29 | 2021-05-31T08:59:29 | 355,061,160 | 0 | 2 | MIT | 2021-04-21T17:11:51 | 2021-04-06T04:54:24 | Python | UTF-8 | Python | false | false | 8,692 | py | # coding=utf-8
#/usr/bin/env python3
import os
import argparse
import torch
import json
import logging
import random
import numpy as np
from os.path import join
from envs import DATASET_FOLDER, MODEL_FOLDER, OUTPUT_FOLDER
from model_envs import ALL_MODELS, MODEL_CLASSES
logger = logging.getLogger(__name__)
def boolean_string(s):
if s.lower() not in {'false', 'true'}:
raise ValueError('Not a valid boolean string')
return s.lower() == 'true'
def json_to_argv(json_file):
j = json.load(open(json_file))
argv = []
for k, v in j.items():
new_v = str(v) if v is not None else None
argv.extend(['--' + k, new_v])
return argv
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def complete_default_train_parser(args):
if args.gpu_id:
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_id)
# set n_gpu
if args.local_rank == -1:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if args.data_parallel:
args.n_gpu = torch.cuda.device_count()
else:
args.n_gpu = 1
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
args.num_gnn_layers = int(args.gnn.split(':')[1].split(',')[0])
args.num_gnn_heads = int(args.gnn.split(':')[1].split(',')[1])
if len(args.mask_edge_types):
args.mask_edge_types = list(map(int, args.mask_edge_types.split(',')))
args.max_doc_len = 512
args.batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
# TODO: only support albert-xxlarge-v2 now
args.input_dim = 768 if 'base' in args.encoder_name_or_path else (4096 if 'albert' in args.encoder_name_or_path else 1024)
# output dir name
if not args.exp_name:
args.exp_name = '_'.join([args.encoder_name_or_path,
'lr' + str(args.learning_rate),
'bs' + str(args.batch_size)])
args.exp_name = os.path.join(args.output_dir, args.exp_name)
set_seed(args)
os.makedirs(args.exp_name, exist_ok=True)
torch.save(args, join(args.exp_name, "training_args.bin"))
return args
def default_train_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--output_dir',
type=str,
default=OUTPUT_FOLDER,
help='Directory to save model and summaries')
parser.add_argument("--exp_name",
type=str,
default=None,
help="If set, this will be used as directory name in OUTOUT folder")
parser.add_argument("--config_file",
type=str,
default=None,
help="configuration file for command parser")
parser.add_argument("--dev_gold_file",
type=str,
default=join(DATASET_FOLDER, 'data_raw', 'hotpot_dev_distractor_v1.json'))
# model
parser.add_argument("--model_type",
default='bert',
type=str,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--max_seq_length", default=512, type=int)
parser.add_argument("--max_query_length", default=50, type=int)
parser.add_argument("--encoder_name_or_path",
default='bert-base-uncased',
type=str,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS))
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--per_gpu_train_batch_size",
default=8,
type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--eval_batch_size", default=16, type=int)
# eval
parser.add_argument("--encoder_ckpt", default=None, type=str)
parser.add_argument("--model_ckpt", default=None, type=str)
# Environment
parser.add_argument("--data_parallel",
default=False,
type=boolean_string,
help="use data parallel or not")
parser.add_argument("--gpu_id", default=None, type=str, help="GPU id")
parser.add_argument('--fp16',
type=boolean_string,
default='false',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
# learning and log
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument("--num_train_epochs", default=10.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=1e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument('--logging_steps', type=int, default=50,
help="Log every X updates steps.")
# hyper-parameter
parser.add_argument('--q_update', type=boolean_string, default='False', help='Whether update query')
parser.add_argument("--trans_drop", type=float, default=0.2)
parser.add_argument("--trans_heads", type=int, default=3)
# graph
parser.add_argument('--num_edge_type', type=int, default=8)
parser.add_argument('--mask_edge_types', type=str, default="0")
parser.add_argument('--gnn', default='gat:1,2', type=str, help='gat:n_layer, n_head')
parser.add_argument("--gnn_drop", type=float, default=0.3)
parser.add_argument('--q_attn', type=boolean_string, default='True', help='whether use query attention in GAT')
parser.add_argument("--lstm_drop", type=float, default=0.3)
parser.add_argument("--max_para_num", default=4, type=int)
parser.add_argument("--max_sent_num", default=40, type=int)
parser.add_argument("--max_entity_num", default=60, type=int)
parser.add_argument("--max_ans_ent_num", default=15, type=int)
# bi attn
parser.add_argument('--ctx_attn', type=str, default='gate_att_up', choices=['no_gate', 'gate_att_or', 'gate_att_up'])
parser.add_argument("--ctx_attn_hidden_dim", type=int, default=300)
parser.add_argument("--bi_attn_drop", type=float, default=0.3)
parser.add_argument("--hidden_dim", type=int, default=300)
# loss
parser.add_argument("--ans_lambda", type=float, default=1)
parser.add_argument("--type_lambda", type=float, default=1)
parser.add_argument("--para_lambda", type=float, default=1)
parser.add_argument("--sent_lambda", type=float, default=5)
parser.add_argument("--ent_lambda", type=float, default=1)
parser.add_argument("--sp_threshold", type=float, default=0.5)
return parser
| [
"jobine_cn@hotmail.com"
] | jobine_cn@hotmail.com |
f11f52e01879d9755a5cd9d124a98efe4d68e7d2 | b75229c262e377c5e4dc6363be6e1b639afb85aa | /gist_backup.py | 1dbcff447b855079a1659f10fa119a5d6919143d | [] | no_license | foolishflyfox/fscript | e18ccf469c2b94ccf2d750812d314f4b17c73d79 | 7637b9b2fb614a255aa8668ddcde33f745876ab4 | refs/heads/master | 2021-08-10T23:46:53.277122 | 2020-04-21T01:54:14 | 2020-04-21T01:54:14 | 162,393,088 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,095 | py | #!env python
# Note: if you are in China, you should make sure your shell can't visit
# foreign websites, otherwise you may fail to use git api
import argparse
import urllib
import json
import requests
import os
from datetime import datetime
from urllib.request import urlopen
parser = argparse.ArgumentParser(description='Pull gists from github')
parser.add_argument('git_account', type=str,
help="Specify the github account name you want pull gist")
parser.add_argument('-p', '--perpage', default=30, type=int,
help="Specify the number of entries per page")
parser.add_argument('-i', '--information_file', default='./gist_information.json',
type=str, help="The file of storing gist informations ")
opt = parser.parse_args()
user = opt.git_account
perpage = opt.perpage
information_file = opt.information_file
print('github user:', user)
root_url = 'https://api.github.com/users/'+user
userurl = urlopen(root_url)
public_gists = json.load(userurl)
gistcount = public_gists['public_gists']
print(f'Found gists : {gistcount}')
pages = (gistcount-1) // perpage + 1
print(f"Found pages : {pages}")
# dir '.' is the directory of running this script
# not the directory where script in
if os.path.exists(information_file):
with open(information_file, 'r') as f:
gist_information = json.load(f)
else:
gist_information = dict()
update_information = dict()
files_counter = 0
for page in range(pages):
print(f"Processing page number {page+1} ...")
pageUrl = root_url + '/gists?page=' + str(page+1)
gisturl = urlopen(pageUrl)
gist_entries = json.load(gisturl)
for gist_info in gist_entries:
files_counter += 1
gist_file = gist_info['files']
gist_file_name = list(gist_file.keys())[0]
gist_file_raw_url = gist_file[gist_file_name]['raw_url']
gist_updated_time = gist_info['updated_at']
gist_file_description = gist_info['description']
update_information[gist_file_name] = {
'updated_at': gist_updated_time,
'description': gist_file_description
}
if (gist_file_name in gist_information and
gist_information[gist_file_name]['updated_at'] == gist_updated_time):
print(f'No.{files_counter} file {gist_file_name} is up to date')
del gist_information[gist_file_name]
else:
if gist_file_name in gist_information:
del gist_information[gist_file_name]
print(f"No.{files_counter} file {gist_file_name} is updating...", end= ' ')
gist_content = requests.get(gist_file_raw_url).text
with open(gist_file_name, 'w') as f:
f.write(gist_content)
print('OK')
for gist_file_name in gist_information:
if os.path.exists(os.path.join('.', gist_file_name)):
os.remove(os.path.join('.', gist_file_name))
print(f'File "{gist_file_name}" is deleted')
with open(information_file, 'w') as f:
json.dump(update_information, f)
print('Complete backup')
| [
"fenghuabin1992@163.com"
] | fenghuabin1992@163.com |
13ca807029ee668cc1db389b2189d8a365a30962 | 7e443af193d6c541d9e4ad45bd44255ac98c0f40 | /src/fastCrawler/test_api.py | e03325cbaa3e0ab189a9bc14a3a8652623f246ba | [] | no_license | aseempatni/MentionMe | 5334d4c7b12ab3c908e4684bdfd6d9d703064b6a | 1ed0600ba581ac77e4271e10d4145602550a2e2f | refs/heads/master | 2020-04-10T01:50:29.231003 | 2015-12-11T09:48:26 | 2015-12-11T09:48:26 | 40,751,832 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,845 | py | # testing twitter APIs
import sys
import time
from twython import Twython, TwythonRateLimitError
#import config
import json
import thread
import os
# read config
config_file = open('config.json','r')
configs = json.load(config_file)
config_file.close()
# apply config
client_args = configs['client_args']
keys = configs['keys']
user = 16388972
key = {
"app_key": "dI2Ed6y0U9pn0f1kEQvGLXnIl",
"app_secret": "YdFcoL0R88iGfSaiFGVPY5YSx0ouG5xvHjgfwcvh7r0UOCkYJe"
}
def crawl_more_user_data(user):
# Crawl user friends
friendfile = open(str(user),'a+')
cursor = ''
while True:
try:
if cursor=='':
friends = twitter.get_friends_ids(user_id=user,count=5000)
else:
friends = twitter.get_friends_ids(user_id=user,count=5000,cursor=cursor)
json.dump(friends,friendfile)
cursor = friends['next_cursor_str']
print cursor
if cursor=='0':
# all friends done
break
except TwythonRateLimitError:
# rate limit reached
reset = int(twitter.get_lastfunction_header('x-rate-limit-reset'))
msg = "waiting for "+str(reset - time.time())+ ' sec'
print msg
wait = max(reset - time.time(), 0) + 10 # addding 10 second pad
time.sleep(wait)
except Exception as e:
# other exceptions
print e.__doc__+" "+ e.message
break
friendfile.close()
# read sys args
app_key = key["app_key"]
app_secret = key["app_secret"]
# initialize twython
twitter = Twython(app_key, app_secret, oauth_version=2,client_args=client_args)
ACCESS_TOKEN = twitter.obtain_access_token()
twitter = Twython(app_key, access_token=ACCESS_TOKEN)
if __name__ == "__main__":
crawl_more_user_data(user)
| [
"apatnip@gmail.com"
] | apatnip@gmail.com |
15ffd68d61b4a460ef95ddadae10b0d714791ef3 | bc233c24523f05708dd1e091dca817f9095e6bb5 | /bitmovin_api_sdk/models/dolby_digital_plus_downmixing_preferred_mode.py | dbe4b9349d8eabb5c12189d531b9a85ed63ac8e2 | [
"MIT"
] | permissive | bitmovin/bitmovin-api-sdk-python | e3d6cf8eb8bdad62cb83ec77c0fc4950b06b9cdd | b0860c0b1be7747cf22ad060985504da625255eb | refs/heads/main | 2023-09-01T15:41:03.628720 | 2023-08-30T10:52:13 | 2023-08-30T10:52:13 | 175,209,828 | 13 | 14 | MIT | 2021-04-29T12:30:31 | 2019-03-12T12:47:18 | Python | UTF-8 | Python | false | false | 268 | py | # coding: utf-8
from enum import Enum
from six import string_types, iteritems
from bitmovin_api_sdk.common.poscheck import poscheck_model
class DolbyDigitalPlusDownmixingPreferredMode(Enum):
LO_RO = "LO_RO"
LT_RT = "LT_RT"
PRO_LOGIC_II = "PRO_LOGIC_II"
| [
"openapi@bitmovin.com"
] | openapi@bitmovin.com |
2646f88f0590dd62b8ba725e67e06c4e9c20406e | b0fce7b572c78ee67ea0e2bd27e2837fffe66891 | /setup.py | 87b23bf3cfa596e1163e4cf81c7292a8ba217f97 | [
"MIT"
] | permissive | ZizhouJia/pyson | 300bd4c68cec3c0a42c5f3135e0447149ca86ebe | ba80336e6ec43456c0d1bf3e71109609b9489181 | refs/heads/master | 2020-08-15T07:55:24.704936 | 2019-11-29T08:46:35 | 2019-11-29T08:46:35 | 215,304,822 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 725 | py | import setuptools
setuptools.setup(
name="pypyson",
version='0.01',
description="A JSON like more powerful object notation for python",
license="MIT License",
author="ZizhouJia",
author_email="jiazizhou@126.com",
url="http://github.com/ZizhouJia/pyson",
packages=setuptools.find_packages(),
install_requires=["antlr4-python3-runtime"],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Operating System :: OS Independent'
],
data_files=[('pyson/init', ['pyson/init/checker_scheme.pyson'])],
python_requires='>=3.6'
) | [
"jiazizhou@126.com"
] | jiazizhou@126.com |
50a5aac6db1eb1123552eab226989d962b71cf62 | 1865e18c5d8dbe3cf5b98324827316e24e0b0377 | /spark_analysis.py | 080c773a4cc29f5bca1162f4ffe8f47bfe7e6496 | [] | no_license | ortizcapetta/BigDataP3 | ff262329f0078d3aa66e0be2dc40e8df79452e65 | aa6652e1f0e661e1757f9ff4d40bc2617e877f06 | refs/heads/master | 2020-04-09T11:29:39.729049 | 2018-12-10T17:22:17 | 2018-12-10T17:22:17 | 160,311,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 692 | py | # coding=utf-8
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
model1res = spark.read.csv('Results/model1res.csv')
model2res = spark.read.csv('Results/model2res.csv')
model1res.createOrReplaceTempView("model1")
model1 = spark.sql("select _c1 as sentiment, count(*) "
"from model1 "
"group by sentiment "
"order by _c1 asc ")
model1.show()
model2res.createOrReplaceTempView("model2")
model2 = spark.sql("select _c1 as sentiment, count(*) "
"from model2 "
"group by sentiment "
"order by _c1 asc ")
model2.show()
| [
"alemariortiz@yahoo.com"
] | alemariortiz@yahoo.com |
690d9d675600e9866b2c9ca5f0c311a6a28b8792 | b4c1998e2b23d71ac5a884f5c4b4f7da1732ee13 | /plot-time_mmap_touch-time_per_op.py | be38e830d3b1fd1a9fbe823271e5b2dc0d08d3ec | [] | no_license | multifacet/0sim-plotting-scripts | 0ab3ece3aff002d5ced50f410f460f5821caaa02 | 1c5035e071b1ffb9d69e66bdd8c7537e88f9d424 | refs/heads/master | 2022-07-10T19:18:46.648948 | 2022-06-29T20:31:36 | 2022-06-29T20:31:52 | 200,109,910 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,663 | py | #!/usr/bin/env python2
import matplotlib.pyplot as plt
import numpy as np
import re
import itertools
from collections import OrderedDict
from sys import argv, exit
from paperstyle import MARKERS, COLORS, IS_PDF, FIGSIZE
data = OrderedDict()
def rdtsc_to_msec(ticks, freq):
return ticks / float(freq)
for arg in argv[1:]:
label, filename, freq = arg.split(":")
data[label] = []
freq = int(freq) # KHz
with open(filename, 'r') as f:
first = int(f.readline()[8:].strip())
last = int(f.readline()[7:].strip())
# take only 1 in 10 data points. These are smooth curves so reducing
# resolution shouldn't hurt. However, it will allow data to fit in
# memory.
j = 0
prev = first
for line in f.readlines():
v = int(line.strip())
if j % 100 == 0 and j > 0: # discard the first
data[label].append(rdtsc_to_msec(v - prev, freq))
j += 1
prev = v
plt.figure(1, figsize=FIGSIZE)
markers = itertools.cycle(MARKERS)
colors = itertools.cycle(COLORS)
handles = []
def ops_to_gb(x):
return float(x)*(1<<12) / (1 << 30) * 10
for label, ys in data.items():
xs = np.arange(len(ys))
xs = map(ops_to_gb, xs)
h_plot, = plt.plot(xs, ys, label = label, linestyle = 'None', marker = markers.next(), color = colors.next())
handles.append(h_plot)
plt.legend(handles=handles)
plt.gca().set_xlim(left=0)
plt.gca().set_ylim(bottom=0)
plt.xlabel('Memory Used (GB)')
plt.ylabel("$\Delta$ Time (msec)")
plt.grid(True)
plt.savefig("/tmp/figure.%s" % ("pdf" if IS_PDF else "png"), bbox_inches="tight")
plt.show()
| [
"markm@cs.wisc.edu"
] | markm@cs.wisc.edu |
ce554e2695eb9840c6d0399b1f782c9eb8d9d10e | d30cb6a597f6a5fad9a01da77594a225daf9a211 | /Lesson 4 - File Handling/project/attempt_1/suffix.py | 34ed3d75c984c7bdfaaf0517676f8b2ac263c7dd | [] | no_license | jmwoloso/Python_2 | 290ef8b0c7db8347fa25cf39da26f39e218d9c68 | 06c45545ed064d0e9c4fd15cc81cf454cb079c9d | refs/heads/master | 2020-04-24T02:18:34.058148 | 2015-08-02T21:02:02 | 2015-08-02T21:02:02 | 37,082,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,197 | py | #!/usr/bin/python3
# A Program Used for Assigning Suffixes
# suffix.py
#
# Created by: Jason Wolosonovich
# 02-24-2015
#
# Lesson 4 - Project 1, Attempt 1
"""
Contains a dict that houses the extensions of many common
file types.
"""
#global file_suffix_dict
file_suffix_dict = {
1 : ".txt",
2 : ".doc",
3 : ".docx",
4 : ".png",
5 : ".jpeg",
6 : ".py",
7 : ".pyc",
8 : ".rtf",
9 : ".log",
10 : ".csv",
11 : ".dat",
12 : ".ppt",
13 : ".tar",
14 : ".tar.gz",
15 : ".mpg",
16 : ".mpeg",
17 : ".mp4",
18 : ".wmv",
19 : ".svg",
20 : ".xls",
21 : ".xlsx",
22 : ".accdb",
23 : ".db",
24 : ".bat",
25 : ".sql",
26 : ".tar.bz2",
27 : ""
}
| [
"jmwoloso@asu.edu"
] | jmwoloso@asu.edu |
33765590730029bbe09c1508395f13525e87b192 | ac831e55c1b63cb7952d62110b774c0faabfc7b5 | /lib/pyasn1_modules/rfc2459.py | 78991fa0af4f523202bae7d9a97dde88b6b6dc16 | [
"Apache-2.0"
] | permissive | itielshwartz/BackendApi | 3d82713082868efc144635a612d43132c27363fe | bc21013f8d96bbf0fba7a99f1deb5486ad32b168 | refs/heads/master | 2016-09-06T06:16:17.827250 | 2015-10-05T14:11:44 | 2015-10-05T14:11:44 | 34,627,343 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 47,191 | py | #
# X.509 message syntax
#
# ASN.1 source from:
# http://www.trl.ibm.com/projects/xml/xss4j/data/asn1/grammars/x509.asn
# http://www.ietf.org/rfc/rfc2459.txt
#
# Sample captures from:
# http://wiki.wireshark.org/SampleCaptures/
#
from pyasn1.type import tag, namedtype, namedval, univ, constraint, char, useful
MAX = 64 # XXX ?
#
# PKIX1Explicit88
#
# Upper Bounds
ub_name = univ.Integer(32768)
ub_common_name = univ.Integer(64)
ub_locality_name = univ.Integer(128)
ub_state_name = univ.Integer(128)
ub_organization_name = univ.Integer(64)
ub_organizational_unit_name = univ.Integer(64)
ub_title = univ.Integer(64)
ub_match = univ.Integer(128)
ub_emailaddress_length = univ.Integer(128)
ub_common_name_length = univ.Integer(64)
ub_country_name_alpha_length = univ.Integer(2)
ub_country_name_numeric_length = univ.Integer(3)
ub_domain_defined_attributes = univ.Integer(4)
ub_domain_defined_attribute_type_length = univ.Integer(8)
ub_domain_defined_attribute_value_length = univ.Integer(128)
ub_domain_name_length = univ.Integer(16)
ub_extension_attributes = univ.Integer(256)
ub_e163_4_number_length = univ.Integer(15)
ub_e163_4_sub_address_length = univ.Integer(40)
ub_generation_qualifier_length = univ.Integer(3)
ub_given_name_length = univ.Integer(16)
ub_initials_length = univ.Integer(5)
ub_integer_options = univ.Integer(256)
ub_numeric_user_id_length = univ.Integer(32)
ub_organization_name_length = univ.Integer(64)
ub_organizational_unit_name_length = univ.Integer(32)
ub_organizational_units = univ.Integer(4)
ub_pds_name_length = univ.Integer(16)
ub_pds_parameter_length = univ.Integer(30)
ub_pds_physical_address_lines = univ.Integer(6)
ub_postal_code_length = univ.Integer(16)
ub_surname_length = univ.Integer(40)
ub_terminal_id_length = univ.Integer(24)
ub_unformatted_address_length = univ.Integer(180)
ub_x121_address_length = univ.Integer(16)
class UniversalString(char.UniversalString): pass
class BMPString(char.BMPString): pass
class UTF8String(char.UTF8String): pass
id_pkix = univ.ObjectIdentifier('1.3.6.1.5.5.7')
id_pe = univ.ObjectIdentifier('1.3.6.1.5.5.7.1')
id_qt = univ.ObjectIdentifier('1.3.6.1.5.5.7.2')
id_kp = univ.ObjectIdentifier('1.3.6.1.5.5.7.3')
id_ad = univ.ObjectIdentifier('1.3.6.1.5.5.7.48')
id_qt_cps = univ.ObjectIdentifier('1.3.6.1.5.5.7.2.1')
id_qt_unotice = univ.ObjectIdentifier('1.3.6.1.5.5.7.2.2')
id_ad_ocsp = univ.ObjectIdentifier('1.3.6.1.5.5.7.48.1')
id_ad_caIssuers = univ.ObjectIdentifier('1.3.6.1.5.5.7.48.2')
class AttributeValue(univ.Any): pass
class AttributeType(univ.ObjectIdentifier): pass
class AttributeTypeAndValue(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type', AttributeType()),
namedtype.NamedType('value', AttributeValue())
)
class Attribute(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type', AttributeType()),
namedtype.NamedType('vals', univ.SetOf(componentType=AttributeValue()))
)
id_at = univ.ObjectIdentifier('2.5.4')
id_at_name = univ.ObjectIdentifier('2.5.4.41')
id_at_sutname = univ.ObjectIdentifier('2.5.4.4')
id_at_givenName = univ.ObjectIdentifier('2.5.4.42')
id_at_initials = univ.ObjectIdentifier('2.5.4.43')
id_at_generationQualifier = univ.ObjectIdentifier('2.5.4.44')
class X520name(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString',
char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
namedtype.NamedType('printableString',
char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
namedtype.NamedType('universalString',
char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
namedtype.NamedType('utf8String',
char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
namedtype.NamedType('bmpString',
char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name)))
)
id_at_commonName = univ.ObjectIdentifier('2.5.4.3')
class X520CommonName(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
namedtype.NamedType('printableString', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
namedtype.NamedType('universalString', char.UniversalString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
namedtype.NamedType('utf8String',
char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
namedtype.NamedType('bmpString',
char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name)))
)
id_at_localityName = univ.ObjectIdentifier('2.5.4.7')
class X520LocalityName(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
namedtype.NamedType('printableString', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
namedtype.NamedType('universalString', char.UniversalString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
namedtype.NamedType('utf8String',
char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
namedtype.NamedType('bmpString',
char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name)))
)
id_at_stateOrProvinceName = univ.ObjectIdentifier('2.5.4.8')
class X520StateOrProvinceName(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString',
char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
namedtype.NamedType('printableString', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
namedtype.NamedType('universalString', char.UniversalString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
namedtype.NamedType('utf8String',
char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
namedtype.NamedType('bmpString',
char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name)))
)
id_at_organizationName = univ.ObjectIdentifier('2.5.4.10')
class X520OrganizationName(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
namedtype.NamedType('printableString', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
namedtype.NamedType('universalString', char.UniversalString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
namedtype.NamedType('utf8String', char.UTF8String().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
namedtype.NamedType('bmpString', char.BMPString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name)))
)
id_at_organizationalUnitName = univ.ObjectIdentifier('2.5.4.11')
class X520OrganizationalUnitName(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
namedtype.NamedType('printableString', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
namedtype.NamedType('universalString', char.UniversalString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
namedtype.NamedType('utf8String', char.UTF8String().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
namedtype.NamedType('bmpString', char.BMPString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name)))
)
id_at_title = univ.ObjectIdentifier('2.5.4.12')
class X520Title(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString',
char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
namedtype.NamedType('printableString',
char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
namedtype.NamedType('universalString',
char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
namedtype.NamedType('utf8String',
char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
namedtype.NamedType('bmpString',
char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title)))
)
id_at_dnQualifier = univ.ObjectIdentifier('2.5.4.46')
class X520dnQualifier(char.PrintableString): pass
id_at_countryName = univ.ObjectIdentifier('2.5.4.6')
class X520countryName(char.PrintableString):
subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(2, 2)
pkcs_9 = univ.ObjectIdentifier('1.2.840.113549.1.9')
emailAddress = univ.ObjectIdentifier('1.2.840.113549.1.9.1')
class Pkcs9email(char.IA5String):
subtypeSpec = char.IA5String.subtypeSpec + constraint.ValueSizeConstraint(1, ub_emailaddress_length)
# ----
class DSAPrivateKey(univ.Sequence):
"""PKIX compliant DSA private key structure"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', univ.Integer(namedValues=namedval.NamedValues(('v1', 0)))),
namedtype.NamedType('p', univ.Integer()),
namedtype.NamedType('q', univ.Integer()),
namedtype.NamedType('g', univ.Integer()),
namedtype.NamedType('public', univ.Integer()),
namedtype.NamedType('private', univ.Integer())
)
# ----
class RelativeDistinguishedName(univ.SetOf):
componentType = AttributeTypeAndValue()
class RDNSequence(univ.SequenceOf):
componentType = RelativeDistinguishedName()
class Name(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('', RDNSequence())
)
class DirectoryString(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString',
char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType('printableString',
char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType('universalString',
char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType('utf8String',
char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType('ia5String', char.IA5String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
# hm, this should not be here!? XXX
)
# certificate and CRL specific structures begin here
class AlgorithmIdentifier(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('algorithm', univ.ObjectIdentifier()),
namedtype.OptionalNamedType('parameters', univ.Any())
)
class Extension(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('extnID', univ.ObjectIdentifier()),
namedtype.DefaultedNamedType('critical', univ.Boolean('False')),
namedtype.NamedType('extnValue', univ.Any())
)
class Extensions(univ.SequenceOf):
componentType = Extension()
sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
class SubjectPublicKeyInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('algorithm', AlgorithmIdentifier()),
namedtype.NamedType('subjectPublicKey', univ.BitString())
)
class UniqueIdentifier(univ.BitString): pass
class Time(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('utcTime', useful.UTCTime()),
namedtype.NamedType('generalTime', useful.GeneralizedTime())
)
class Validity(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('notBefore', Time()),
namedtype.NamedType('notAfter', Time())
)
class CertificateSerialNumber(univ.Integer): pass
class Version(univ.Integer):
namedValues = namedval.NamedValues(
('v1', 0), ('v2', 1), ('v3', 2)
)
class TBSCertificate(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('version', Version('v1').subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('serialNumber', CertificateSerialNumber()),
namedtype.NamedType('signature', AlgorithmIdentifier()),
namedtype.NamedType('issuer', Name()),
namedtype.NamedType('validity', Validity()),
namedtype.NamedType('subject', Name()),
namedtype.NamedType('subjectPublicKeyInfo', SubjectPublicKeyInfo()),
namedtype.OptionalNamedType('issuerUniqueID', UniqueIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('subjectUniqueID', UniqueIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('extensions', Extensions().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
)
class Certificate(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('tbsCertificate', TBSCertificate()),
namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
namedtype.NamedType('signatureValue', univ.BitString())
)
# CRL structures
class RevokedCertificate(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('userCertificate', CertificateSerialNumber()),
namedtype.NamedType('revocationDate', Time()),
namedtype.OptionalNamedType('crlEntryExtensions', Extensions())
)
class TBSCertList(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('version', Version()),
namedtype.NamedType('signature', AlgorithmIdentifier()),
namedtype.NamedType('issuer', Name()),
namedtype.NamedType('thisUpdate', Time()),
namedtype.OptionalNamedType('nextUpdate', Time()),
namedtype.OptionalNamedType('revokedCertificates', univ.SequenceOf(componentType=RevokedCertificate())),
namedtype.OptionalNamedType('crlExtensions', Extensions().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
)
class CertificateList(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('tbsCertList', TBSCertList()),
namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
namedtype.NamedType('signature', univ.BitString())
)
# Algorithm OIDs and parameter structures
pkcs_1 = univ.ObjectIdentifier('1.2.840.113549.1.1')
rsaEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.1')
md2WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.2')
md5WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.4')
sha1WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.5')
id_dsa_with_sha1 = univ.ObjectIdentifier('1.2.840.10040.4.3')
class Dss_Sig_Value(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('r', univ.Integer()),
namedtype.NamedType('s', univ.Integer())
)
dhpublicnumber = univ.ObjectIdentifier('1.2.840.10046.2.1')
class ValidationParms(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('seed', univ.BitString()),
namedtype.NamedType('pgenCounter', univ.Integer())
)
class DomainParameters(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('p', univ.Integer()),
namedtype.NamedType('g', univ.Integer()),
namedtype.NamedType('q', univ.Integer()),
namedtype.NamedType('j', univ.Integer()),
namedtype.OptionalNamedType('validationParms', ValidationParms())
)
id_dsa = univ.ObjectIdentifier('1.2.840.10040.4.1')
class Dss_Parms(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('p', univ.Integer()),
namedtype.NamedType('q', univ.Integer()),
namedtype.NamedType('g', univ.Integer())
)
# x400 address syntax starts here
teletex_domain_defined_attributes = univ.Integer(6)
class TeletexDomainDefinedAttribute(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_type_length))),
namedtype.NamedType('value', char.TeletexString())
)
class TeletexDomainDefinedAttributes(univ.SequenceOf):
componentType = TeletexDomainDefinedAttribute()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, ub_domain_defined_attributes)
terminal_type = univ.Integer(23)
class TerminalType(univ.Integer):
subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueSizeConstraint(0, ub_integer_options)
namedValues = namedval.NamedValues(
('telex', 3),
('teletelex', 4),
('g3-facsimile', 5),
('g4-facsimile', 6),
('ia5-terminal', 7),
('videotex', 8)
)
class PresentationAddress(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('pSelector', univ.OctetString().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('sSelector', univ.OctetString().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('tSelector', univ.OctetString().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('nAddresses', univ.SetOf(componentType=univ.OctetString()).subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3),
subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
)
extended_network_address = univ.Integer(22)
class E163_4_address(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('number', char.NumericString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_e163_4_number_length),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('sub-address', char.NumericString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_e163_4_sub_address_length),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class ExtendedNetworkAddress(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('e163-4-address', E163_4_address()),
namedtype.NamedType('psap-address', PresentationAddress().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class PDSParameter(univ.Set):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('printable-string', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length))),
namedtype.OptionalNamedType('teletex-string', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length)))
)
local_postal_attributes = univ.Integer(21)
class LocalPostalAttributes(PDSParameter): pass
class UniquePostalName(PDSParameter): pass
unique_postal_name = univ.Integer(20)
poste_restante_address = univ.Integer(19)
class PosteRestanteAddress(PDSParameter): pass
post_office_box_address = univ.Integer(18)
class PostOfficeBoxAddress(PDSParameter): pass
street_address = univ.Integer(17)
class StreetAddress(PDSParameter): pass
class UnformattedPostalAddress(univ.Set):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('printable-address', univ.SequenceOf(componentType=char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length)).subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_physical_address_lines)))),
namedtype.OptionalNamedType('teletex-string', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_unformatted_address_length)))
)
physical_delivery_office_name = univ.Integer(10)
class PhysicalDeliveryOfficeName(PDSParameter): pass
physical_delivery_office_number = univ.Integer(11)
class PhysicalDeliveryOfficeNumber(PDSParameter): pass
extension_OR_address_components = univ.Integer(12)
class ExtensionORAddressComponents(PDSParameter): pass
physical_delivery_personal_name = univ.Integer(13)
class PhysicalDeliveryPersonalName(PDSParameter): pass
physical_delivery_organization_name = univ.Integer(14)
class PhysicalDeliveryOrganizationName(PDSParameter): pass
extension_physical_delivery_address_components = univ.Integer(15)
class ExtensionPhysicalDeliveryAddressComponents(PDSParameter): pass
unformatted_postal_address = univ.Integer(16)
postal_code = univ.Integer(9)
class PostalCode(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('numeric-code', char.NumericString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_postal_code_length))),
namedtype.NamedType('printable-code', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_postal_code_length)))
)
class PhysicalDeliveryCountryName(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('x121-dcc-code', char.NumericString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_numeric_length,
ub_country_name_numeric_length))),
namedtype.NamedType('iso-3166-alpha2-code', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_alpha_length, ub_country_name_alpha_length)))
)
class PDSName(char.PrintableString):
subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_pds_name_length)
physical_delivery_country_name = univ.Integer(8)
class TeletexOrganizationalUnitName(char.TeletexString):
subtypeSpec = char.TeletexString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_organizational_unit_name_length)
pds_name = univ.Integer(7)
teletex_organizational_unit_names = univ.Integer(5)
class TeletexOrganizationalUnitNames(univ.SequenceOf):
componentType = TeletexOrganizationalUnitName()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, ub_organizational_units)
teletex_personal_name = univ.Integer(4)
class TeletexPersonalName(univ.Set):
componentType = namedtype.NamedTypes(
namedtype.NamedType('surname', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_surname_length),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('given-name', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_given_name_length),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('initials', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_initials_length),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('generation-qualifier', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_generation_qualifier_length),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
)
teletex_organization_name = univ.Integer(3)
class TeletexOrganizationName(char.TeletexString):
subtypeSpec = char.TeletexString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_organization_name_length)
teletex_common_name = univ.Integer(2)
class TeletexCommonName(char.TeletexString):
subtypeSpec = char.TeletexString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_common_name_length)
class CommonName(char.PrintableString):
subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_common_name_length)
common_name = univ.Integer(1)
class ExtensionAttribute(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('extension-attribute-type', univ.Integer().subtype(
subtypeSpec=constraint.ValueSizeConstraint(0, ub_extension_attributes),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('extension-attribute-value',
univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class ExtensionAttributes(univ.SetOf):
componentType = ExtensionAttribute()
subtypeSpec = univ.SetOf.subtypeSpec + constraint.ValueSizeConstraint(1, ub_extension_attributes)
class BuiltInDomainDefinedAttribute(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_type_length))),
namedtype.NamedType('value', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_value_length)))
)
class BuiltInDomainDefinedAttributes(univ.SequenceOf):
componentType = BuiltInDomainDefinedAttribute()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, ub_domain_defined_attributes)
class OrganizationalUnitName(char.PrintableString):
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, ub_organizational_unit_name_length)
class OrganizationalUnitNames(univ.SequenceOf):
componentType = OrganizationalUnitName()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, ub_organizational_units)
class PersonalName(univ.Set):
componentType = namedtype.NamedTypes(
namedtype.NamedType('surname', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_surname_length),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('given-name', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_given_name_length),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('initials', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_initials_length),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('generation-qualifier', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_generation_qualifier_length),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
)
class NumericUserIdentifier(char.NumericString):
subtypeSpec = char.NumericString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_numeric_user_id_length)
class OrganizationName(char.PrintableString):
subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_organization_name_length)
class PrivateDomainName(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('numeric', char.NumericString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_name_length))),
namedtype.NamedType('printable', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_name_length)))
)
class TerminalIdentifier(char.PrintableString):
subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_terminal_id_length)
class X121Address(char.NumericString):
subtypeSpec = char.NumericString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_x121_address_length)
class NetworkAddress(X121Address): pass
class AdministrationDomainName(univ.Choice):
tagSet = univ.Choice.tagSet.tagExplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 2)
)
componentType = namedtype.NamedTypes(
namedtype.NamedType('numeric', char.NumericString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(0, ub_domain_name_length))),
namedtype.NamedType('printable', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(0, ub_domain_name_length)))
)
class CountryName(univ.Choice):
tagSet = univ.Choice.tagSet.tagExplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 1)
)
componentType = namedtype.NamedTypes(
namedtype.NamedType('x121-dcc-code', char.NumericString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_numeric_length,
ub_country_name_numeric_length))),
namedtype.NamedType('iso-3166-alpha2-code', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_alpha_length, ub_country_name_alpha_length)))
)
class BuiltInStandardAttributes(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('country-name', CountryName()),
namedtype.OptionalNamedType('administration-domain-name', AdministrationDomainName()),
namedtype.OptionalNamedType('network-address', NetworkAddress().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('terminal-identifier', TerminalIdentifier().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('private-domain-name', PrivateDomainName().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('organization-name', OrganizationName().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
namedtype.OptionalNamedType('numeric-user-identifier', NumericUserIdentifier().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
namedtype.OptionalNamedType('personal-name', PersonalName().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))),
namedtype.OptionalNamedType('organizational-unit-names', OrganizationalUnitNames().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6)))
)
class ORAddress(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('built-in-standard-attributes', BuiltInStandardAttributes()),
namedtype.OptionalNamedType('built-in-domain-defined-attributes', BuiltInDomainDefinedAttributes()),
namedtype.OptionalNamedType('extension-attributes', ExtensionAttributes())
)
#
# PKIX1Implicit88
#
id_ce_invalidityDate = univ.ObjectIdentifier('2.5.29.24')
class InvalidityDate(useful.GeneralizedTime): pass
id_holdinstruction_none = univ.ObjectIdentifier('2.2.840.10040.2.1')
id_holdinstruction_callissuer = univ.ObjectIdentifier('2.2.840.10040.2.2')
id_holdinstruction_reject = univ.ObjectIdentifier('2.2.840.10040.2.3')
holdInstruction = univ.ObjectIdentifier('2.2.840.10040.2')
id_ce_holdInstructionCode = univ.ObjectIdentifier('2.5.29.23')
class HoldInstructionCode(univ.ObjectIdentifier): pass
id_ce_cRLReasons = univ.ObjectIdentifier('2.5.29.21')
class CRLReason(univ.Enumerated):
namedValues = namedval.NamedValues(
('unspecified', 0),
('keyCompromise', 1),
('cACompromise', 2),
('affiliationChanged', 3),
('superseded', 4),
('cessationOfOperation', 5),
('certificateHold', 6),
('removeFromCRL', 8)
)
id_ce_cRLNumber = univ.ObjectIdentifier('2.5.29.20')
class CRLNumber(univ.Integer):
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(0, MAX)
class BaseCRLNumber(CRLNumber): pass
id_kp_serverAuth = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.1.1')
id_kp_clientAuth = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.2')
id_kp_codeSigning = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.3')
id_kp_emailProtection = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.4')
id_kp_ipsecEndSystem = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.5')
id_kp_ipsecTunnel = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.6')
id_kp_ipsecUser = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.7')
id_kp_timeStamping = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.8')
id_pe_authorityInfoAccess = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.1')
id_ce_extKeyUsage = univ.ObjectIdentifier('2.5.29.37')
class KeyPurposeId(univ.ObjectIdentifier): pass
class ExtKeyUsageSyntax(univ.SequenceOf):
componentType = KeyPurposeId()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, MAX)
class ReasonFlags(univ.BitString):
namedValues = namedval.NamedValues(
('unused', 0),
('keyCompromise', 1),
('cACompromise', 2),
('affiliationChanged', 3),
('superseded', 4),
('cessationOfOperation', 5),
('certificateHold', 6)
)
class SkipCerts(univ.Integer):
subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueSizeConstraint(0, MAX)
id_ce_policyConstraints = univ.ObjectIdentifier('2.5.29.36')
class PolicyConstraints(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('requireExplicitPolicy', SkipCerts().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('inhibitPolicyMapping', SkipCerts().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
)
id_ce_basicConstraints = univ.ObjectIdentifier('2.5.29.19')
class BasicConstraints(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('cA', univ.Boolean(False)),
namedtype.OptionalNamedType('pathLenConstraint',
univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, MAX)))
)
id_ce_subjectDirectoryAttributes = univ.ObjectIdentifier('2.5.29.9')
class SubjectDirectoryAttributes(univ.SequenceOf):
componentType = Attribute()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, MAX)
class EDIPartyName(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('nameAssigner', DirectoryString().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('partyName',
DirectoryString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class AnotherName(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type-id', univ.ObjectIdentifier()),
namedtype.NamedType('value',
univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class GeneralName(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('otherName',
AnotherName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('rfc822Name',
char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('dNSName',
char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.NamedType('x400Address',
ORAddress().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
namedtype.NamedType('directoryName',
Name().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
namedtype.NamedType('ediPartyName',
EDIPartyName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))),
namedtype.NamedType('uniformResourceIdentifier',
char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))),
namedtype.NamedType('iPAddress', univ.OctetString().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
namedtype.NamedType('registeredID', univ.ObjectIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8)))
)
class GeneralNames(univ.SequenceOf):
componentType = GeneralName()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, MAX)
class AccessDescription(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('accessMethod', univ.ObjectIdentifier()),
namedtype.NamedType('accessLocation', GeneralName())
)
class AuthorityInfoAccessSyntax(univ.SequenceOf):
componentType = AccessDescription()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, MAX)
id_ce_deltaCRLIndicator = univ.ObjectIdentifier('2.5.29.27')
class DistributionPointName(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('fullName', GeneralNames().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.NamedType('nameRelativeToCRLIssuer', RelativeDistinguishedName().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
)
class DistributionPoint(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('distributionPoint', DistributionPointName().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('reasons', ReasonFlags().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('cRLIssuer', GeneralNames().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
)
class BaseDistance(univ.Integer):
subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(0, MAX)
id_ce_cRLDistributionPoints = univ.ObjectIdentifier('2.5.29.31')
class CRLDistPointsSyntax(univ.SequenceOf):
componentType = DistributionPoint
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, MAX)
id_ce_issuingDistributionPoint = univ.ObjectIdentifier('2.5.29.28')
class IssuingDistributionPoint(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('distributionPoint', DistributionPointName().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.NamedType('onlyContainsUserCerts', univ.Boolean(False).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('onlyContainsCACerts', univ.Boolean(False).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('onlySomeReasons', ReasonFlags().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
namedtype.NamedType('indirectCRL', univ.Boolean(False).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)))
)
class GeneralSubtree(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('base', GeneralName()),
namedtype.NamedType('minimum', BaseDistance(0).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('maximum', BaseDistance().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
)
class GeneralSubtrees(univ.SequenceOf):
componentType = GeneralSubtree()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, MAX)
id_ce_nameConstraints = univ.ObjectIdentifier('2.5.29.30')
class NameConstraints(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('permittedSubtrees', GeneralSubtrees().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('excludedSubtrees', GeneralSubtrees().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
)
class DisplayText(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('visibleString',
char.VisibleString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
namedtype.NamedType('utf8String', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200)))
)
class NoticeReference(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('organization', DisplayText()),
namedtype.NamedType('noticeNumbers', univ.SequenceOf(componentType=univ.Integer()))
)
class UserNotice(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('noticeRef', NoticeReference()),
namedtype.OptionalNamedType('explicitText', DisplayText())
)
class CPSuri(char.IA5String): pass
class PolicyQualifierId(univ.ObjectIdentifier):
subtypeSpec = univ.ObjectIdentifier.subtypeSpec + constraint.SingleValueConstraint(id_qt_cps, id_qt_unotice)
class CertPolicyId(univ.ObjectIdentifier): pass
class PolicyQualifierInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('policyQualifierId', PolicyQualifierId()),
namedtype.NamedType('qualifier', univ.Any())
)
id_ce_certificatePolicies = univ.ObjectIdentifier('2.5.29.32')
class PolicyInformation(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('policyIdentifier', CertPolicyId()),
namedtype.OptionalNamedType('policyQualifiers', univ.SequenceOf(componentType=PolicyQualifierInfo()).subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
)
class CertificatePolicies(univ.SequenceOf):
componentType = PolicyInformation()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, MAX)
id_ce_policyMappings = univ.ObjectIdentifier('2.5.29.33')
class PolicyMapping(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('issuerDomainPolicy', CertPolicyId()),
namedtype.NamedType('subjectDomainPolicy', CertPolicyId())
)
class PolicyMappings(univ.SequenceOf):
componentType = PolicyMapping()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, MAX)
id_ce_privateKeyUsagePeriod = univ.ObjectIdentifier('2.5.29.16')
class PrivateKeyUsagePeriod(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('notBefore', useful.GeneralizedTime().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('notAfter', useful.GeneralizedTime().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
id_ce_keyUsage = univ.ObjectIdentifier('2.5.29.15')
class KeyUsage(univ.BitString):
namedValues = namedval.NamedValues(
('digitalSignature', 0),
('nonRepudiation', 1),
('keyEncipherment', 2),
('dataEncipherment', 3),
('keyAgreement', 4),
('keyCertSign', 5),
('cRLSign', 6),
('encipherOnly', 7),
('decipherOnly', 8)
)
id_ce = univ.ObjectIdentifier('2.5.29')
id_ce_authorityKeyIdentifier = univ.ObjectIdentifier('2.5.29.35')
class KeyIdentifier(univ.OctetString): pass
id_ce_subjectKeyIdentifier = univ.ObjectIdentifier('2.5.29.14')
class SubjectKeyIdentifier(KeyIdentifier): pass
class AuthorityKeyIdentifier(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('keyIdentifier', KeyIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('authorityCertIssuer', GeneralNames().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('authorityCertSerialNumber', CertificateSerialNumber().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
id_ce_certificateIssuer = univ.ObjectIdentifier('2.5.29.29')
class CertificateIssuer(GeneralNames): pass
id_ce_subjectAltName = univ.ObjectIdentifier('2.5.29.17')
class SubjectAltName(GeneralNames): pass
id_ce_issuerAltName = univ.ObjectIdentifier('2.5.29.18')
class IssuerAltName(GeneralNames): pass
| [
"ishwartz@ebay.com"
] | ishwartz@ebay.com |
a8ff977cc5ae9a4b5514fef8b6bc461e26babfbc | f613e2db37f3eefd67a05f09801b50212ddac1e7 | /shixin.py | 2aff705c8323f551ea8ec34c82e7954238c90963 | [] | no_license | crawlerwolf/shixin | d33b8060379086c2775ddfa450edfdc8a6c4ad97 | 59f21072c5dfb57105de9658c4d182202372e915 | refs/heads/master | 2021-07-12T19:23:06.052545 | 2020-08-31T02:56:06 | 2020-08-31T02:56:06 | 196,349,344 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,423 | py | # *_*coding=utf-8*_*
import requests
import random
from fake_useragent import UserAgent
import time
import re
import json
from Mongo import *
from urllib3.exceptions import InsecureRequestWarning
from urllib3 import disable_warnings
disable_warnings(InsecureRequestWarning)
ua = UserAgent()
headers = {
'Referer':'https://www.baidu.com/s?ie=utf-8&f=8&rsv_bp=0&rsv_idx=1&ch=5&tn=98012088_4_dg&wd=%E5%A4%B1%E4%BF%A1%E8%A2%AB%E6%89%A7%E8%A1%8C%E4%BA%BA&rsv_pq=84302d5100007635&rsv_t=4491SqcV8fI0MyG46sVyco5fdkgMRmpq9fOwXzKnYxreD2sekv%2FkaUkKNHYth1Azb5V43Q&rqlang=cn&rsv_enter=1&rsv_sug3=12&rsv_sug1=9&rsv_sug7=101',
'User-Agent': ua.random}
session = requests.session()
def get_info(info_s): # 获取数据
print('提取数据...')
for info in info_s["data"][0]["result"]: # 提取数据
if info['regDate']:
reg_date = '{}年{}月{}日'.format(info['regDate'][0:4], info['regDate'][4:6], info['regDate'][6:8])
data = {
'iname': info['iname'],
'caseCode': info['caseCode'],
'cardNum': info['cardNum'],
'businessEntity': info['businessEntity'],
'courtName': info['courtName'],
'areaName': info['areaName'],
'gistId': info['gistId'],
'regDate': reg_date,
'gistUnit': info['gistUnit'],
'duty': info['duty'],
'performance': info['performance'],
'disruptTypeName': info['disruptTypeName'],
'publishDate': info['publishDate']
}
print('存储中...')
print(data)
to_mongo(data)
print('完成储存')
else:
data = {
'iname': info['iname'],
'caseCode': info['caseCode'],
'cardNum': info['cardNum'],
'businessEntity': info['businessEntity'],
'courtName': info['courtName'],
'areaName': info['areaName'],
'gistId': info['gistId'],
'regDate': info['regDate'],
'gistUnit': info['gistUnit'],
'duty': info['duty'],
'performance': info['performance'],
'disruptTypeName': info['disruptTypeName'],
'publishDate': info['publishDate']
}
print('存储中...')
print(data)
to_mongo(data)
print('完成储存')
print('完成数据提取')
return '完成提取'
# get_http('大连阳光新世纪房地产开发有限公司')
def to_mongo(data):
while True:
try:
ShiXin_info.update({'caseCode': data['caseCode']}, {'$set': data}, True)
return '完成储存'
except pymongo.errors.ServerSelectionTimeoutError:
print('等待连接mongo数据库')
time.sleep(30)
except:
time.sleep(30)
def get_http(name): # 请求网站
try:
url = 'https://sp0.baidu.com/8aQDcjqpAAV3otqbppnN2DJv/api.php?'
params = {'resource_id': 6899,
'query': '失信被执行人名单',
'cardNum': '',
'iname': name,
'areaName': '',
'ie': 'utf-8',
'oe': 'utf-8',
'format': 'json',
't': int(round(time.time()*1000)),
'cb': 'jQuery11020023017826865680435_1510236300897',
'_': int(round(time.time()*1000)) - random.randint(2000000, 4000001)}
time.sleep(random.uniform(6, 8)) # 设置随即时间间隔
print('查找:', name)
web_data = requests.get(url, headers=headers, params=params, verify=False, timeout=50)
web_data.encoding = web_data.apparent_encoding
if web_data.status_code == 200: # 获得信息
info_s = web_data.text
pattern = re.compile('.*?jQuery.*?\((.*?)}\);', re.S)
if re.search(pattern, info_s) != None:
info_s = re.search(pattern, info_s).group(1) + '}'
info_s = json.loads(info_s)
if info_s["data"] == []:
print('未找到相关失信信息:', name)
return '未找到'
if info_s["data"] != []:
print('获取第1页')
txt = get_info(info_s) # 获取数据
get_next(name)
return txt
if web_data.status_code != 200:
txt = get_http(name)
if txt == '未找到':
return '未找到'
if txt == '完成提取':
return '完成提取'
except (requests.exceptions.ReadTimeout, requests.exceptions.ConnectionError) as e:
print('重新请求:', name)
txt = get_http(name)
if txt == '未找到':
return '未找到'
if txt == '完成提取':
return '完成提取'
return get_http
def get_next(name): # 获取下一页
for pn in range(10, 201, 10):
page_num = (pn + 10)//10
print('请求下一页:第' + str(page_num) + '页')
params = {'resource_id': 6899,
'query': '失信被执行人名单',
'cardNum': '',
'iname': name,
'areaName': '',
'ie': 'utf-8',
'oe': 'utf-8',
'format': 'json',
'pn': pn,
'rn': 10,
't': int(round(time.time() * 1000)),
'cb': 'jQuery11020023017826865680435_1510236300897',
'_': int(round(time.time() * 1000)) - random.randint(2000000, 4000001)
}
try:
url = 'https://sp0.baidu.com/8aQDcjqpAAV3otqbppnN2DJv/api.php?'
time.sleep(random.uniform(5, 9)) # 设置随即时间间隔
web_data = session.get(url, headers=headers, params=params, verify=False, proxies={'http': proxy()}, timeout=500)
web_data.encoding = web_data.apparent_encoding
info_s = web_data.text
pattern = re.compile('.*?jQuery.*?\((.*?)}\);', re.S)
if re.search(pattern, info_s) != None:
info_s = re.search(pattern, info_s).group(1) + '}'
info_s = json.loads(info_s)
if web_data.status_code == 200: # 获得信息
if info_s["data"] == []:
print('未找到:', '第' + str(page_num) + '页', '完成提取')
return '未找到'
if info_s["data"] != []:
get_info(info_s) # 获取信息
if web_data.status_code != 200:
print('提取数据失败')
for num in range(1,4):
txt = get_next_again(name, pn)
if txt == '未找到':
return '未找到'
if txt == '完成提取':
return '完成提取'
continue
except (requests.exceptions.ReadTimeout, requests.exceptions.ConnectionError) as e:
for num in range(1, 4):
txt = get_next_again(name, pn)
if txt == '未找到':
return '未找到'
if txt == '完成提取':
return '完成提取'
continue
return get_next
def get_next_again(name, pn): # 获取下一页
page_num = (pn + 10)//10
print('重新请求:第' + str(page_num) + '页')
params = {'resource_id': 6899,
'query': '失信被执行人名单',
'cardNum': '',
'iname': name,
'areaName': '',
'ie': 'utf-8',
'oe': 'utf-8',
'format': 'json',
'pn': pn,
'rn': 10,
't': int(round(time.time() * 1000)),
'cb': 'jQuery11020023017826865680435_1510236300897',
'_': int(round(time.time() * 1000)) - random.randint(2000000, 4000001)
}
try:
url = 'https://sp0.baidu.com/8aQDcjqpAAV3otqbppnN2DJv/api.php?'
time.sleep(random.uniform(5, 9)) # 设置随即时间间隔
web_data = session.get(url, headers=headers, params=params, verify=False, proxies={'http': proxy()}, timeout=500)
web_data.encoding = web_data.apparent_encoding
info_s = web_data.text
pattern = re.compile('.*?jQuery.*?\((.*?)}\);', re.S)
if re.search(pattern, info_s) != None:
info_s = re.search(pattern, info_s).group(1) + '}'
info_s = json.loads(info_s)
if web_data.status_code == 200: # 获得信息
if info_s["data"] == []:
print('未找到:', '第' + str(page_num) + '页', '完成提取')
return '未找到'
if info_s["data"] != []:
txt = get_info(info_s) # 获取信息
return txt
elif web_data.status_code != 200:
print('提取数据失败')
return '提取数据失败'
except(requests.exceptions.ReadTimeout, requests.exceptions.ConnectionError) as e:
return get_next_again
if __name__ == '__main__':
get_http('湖南雁能配电设备有限公司')
| [
"154080724@qq.com"
] | 154080724@qq.com |
3db9ea2ba0d5dd5664802a1c9f2123f87ae6ebe3 | 07057ce73af021e67069d6fb4278c25136911645 | /Sock_merchant.py | 727ec779ec093b7ec19a26d5b2c712aa96731403 | [] | no_license | himraj123456789/competitive-programming- | cfe7bba53be9f1d9d66e0ec325eb3e30c9ff8169 | 186dbd7e9965a72cc11c8f4cba8e33beb8ea83da | refs/heads/master | 2022-11-02T06:30:49.647793 | 2020-06-19T03:18:03 | 2020-06-19T03:18:03 | 262,454,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 722 | py | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the sockMerchant function below.
def sockMerchant(n, ar):
ar.sort()
pair=0
i=0
while(i<len(ar)-1):
count=1
select=ar[i]
j=i+1
while(select==ar[j]):
count=count+1
j=j+1
if(j==len(ar)):
break
x=int(count/2)
pair=pair+x
i=j
return pair
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
ar = list(map(int, input().rstrip().split()))
result = sockMerchant(n, ar)
fptr.write(str(result) + '\n')
fptr.close()
| [
"noreply@github.com"
] | noreply@github.com |
7e1dfb3b37336ba6d0aca323d37084f40f5cd797 | d9d09007bd52cdfb8394c4ef92ec02d206c678c8 | /Numbers.py | a3344fb694289c554365e4e6e08f7e8d654f8322 | [] | no_license | smithi35/Basic-Cryptography | b56e9d504a8256380d9f5113b32ea961ef013bc0 | 1ec27ac0a4e845db3fa6ff6d36b6d2679d36afd5 | refs/heads/master | 2021-01-17T12:49:41.011829 | 2016-06-18T20:11:53 | 2016-06-18T20:11:53 | 56,406,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,122 | py | import fileinput
from Letter import Letter
alphabet = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
def isDash(char) :
ascii = ord(char)
isD = False
if ascii is 45:
isD = True;
return isD
def isNumber(char) :
ascii = ord(char)
isN = False
if ascii > 47 and ascii < 58 :
isN = True
return isN
# return the letter at the opposite end of the alphabet
def number_cypher(letter) :
number = int(letter)
number = number - 1
actual = alphabet[number]
return actual
def main() :
file = open("Numbers.txt", "r")
contents = file.read()
contents = contents.upper()
output = ""
letter = ""
for char in contents:
# first get the letter
if isNumber(char) :
letter = letter + char
else :
if letter is not "" :
output = output + number_cypher(letter)
if not isDash(char) :
output = output + char
letter = ""
if letter is not "" :
output = output + number_cypher(letter)
print(output)
file.close()
file = open("output.txt", "w")
file.write(output)
file.close()
main() | [
"ian_smith_12a@mymts.net"
] | ian_smith_12a@mymts.net |
445dce17dcf61ccfcfaba0766ae79eafea84c864 | 5909a85db23b2dbe3da0ecf4460ee2d0ca784dbc | /v3surf/Lib/site-packages/buoyant/buoy.py | 5769cf175fa2abcb516265799aaf4788324e0ceb | [] | no_license | rafskov/flasksurfdiary1 | 56432e7836f44ce95c3d8f52a42f92a17c39ad5f | b593ec7626278fdb506dfa6f0bf32785005f8287 | refs/heads/master | 2022-12-12T03:52:58.930934 | 2020-03-21T05:15:47 | 2020-03-21T05:15:47 | 248,900,349 | 0 | 0 | null | 2022-12-08T03:51:06 | 2020-03-21T03:34:37 | Python | UTF-8 | Python | false | false | 7,032 | py | # -*- coding: utf-8 -*-
# Copyright 2014-17 Neil Freeman
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import csv
import re
from io import BytesIO, StringIO
import requests
from pytz import utc
from . import properties, timezone
# Both take station as a GET argument.
OBS_ENDPOINT = "https://sdf.ndbc.noaa.gov/sos/server.php"
CAM_ENDPOINT = 'https://www.ndbc.noaa.gov/buoycam.php'
'''
request=GetObservation
service=SOS
version=1.0.0
offering=urn:ioos:station:wmo:41012
observedproperty=air_pressure_at_sea_level
responseformat=text/csv
eventtime=latest
'''
def parse_unit(prop, dictionary, dt=None):
'''Do a fuzzy match for `prop` in the dictionary, taking into account unit suffix.'''
# add the observation's time
try:
dt = timezone.parse_datetime(dictionary.get('date_time'))
except TypeError:
dt = None
# 'prop' is a stub of the property's attribute key, so search for matches
matches = [k for k in dictionary.keys() if prop in k]
try:
value = dictionary[matches[0]]
unit = re.search(r' \(([^)]+)\)', matches[0])
except IndexError:
# No matches: fail out
return None
# Sometimes we get a list of values (e.g. waves)
if ';' in value:
# Ignore empty values
values = [val for val in value.split(';') if val != '']
if unit:
return [Observation(v, unit.group(1), dt) for v in values]
else:
return values
# Sometimes there's no value! Sometimes there's no unit!
if not value or not unit:
return value or None
return Observation(value, unit.group(1), dt)
def _degroup(iterable, propertylist):
return [{prop: parse_unit(prop, row) for prop in propertylist} for row in iterable]
'''
Response looks like:
station_id,sensor_id,"latitude (degree)","longitude (degree)",date_time,"depth (m)","air_pressure_at_sea_level (hPa)"
urn:ioos:station:wmo:41012,urn:ioos:sensor:wmo:41012::baro1,30.04,-80.55,2014-02-19T12:50:00Z,0.00,1022.1
'''
class Buoy(object):
'''Wrapper for the NDBC Buoy information mini-API'''
__dict__ = {}
params = {
'request': 'GetObservation',
'service': 'SOS',
'version': '1.0.0',
'responseformat': 'text/csv',
}
def __init__(self, bouyid, eventtime=None):
self.id = bouyid
self.refresh()
if eventtime:
if eventtime.tzinfo:
eventtime = eventtime.astimezone(utc)
eventtime = timezone.iso_format(eventtime)
self.eventtime = eventtime or 'latest'
def refresh(self):
self.__dict__ = {
'lat': None,
'lon': None,
'datetime': None,
}
def _get(self, observation, as_group=None):
return self.__dict__.setdefault(observation, self.fetch(observation, as_group))
def fetch(self, observation, as_group=None):
params = {
'offering': 'urn:ioos:station:wmo:{}'.format(self.id),
'observedproperty': observation,
'eventtime': self.eventtime
}
params.update(self.params)
request = requests.get(OBS_ENDPOINT, params=params)
try:
reader = csv.DictReader(StringIO(request.text))
if as_group:
return _degroup(reader, getattr(properties, observation))
else:
result = next(reader)
if 'ows:ExceptionReport' in str(result):
raise AttributeError(observation)
except StopIteration:
raise AttributeError(observation)
self.__dict__['station_id'] = result.get('station_id')
self.__dict__['sensor_id'] = result.get('sensor_id')
try:
self.__dict__['lon'] = float(result.get('longitude (degree)'))
self.__dict__['lat'] = float(result.get('latitude (degree)'))
except TypeError:
self.__dict__['lon'], self.__dict__['lat'] = None, None
self.__dict__['depth'] = parse_unit('depth', result)
return parse_unit(observation, result)
@property
def air_pressure_at_sea_level(self):
return self._get('air_pressure_at_sea_level')
@property
def air_temperature(self):
return self._get('air_temperature')
@property
def currents(self):
try:
return self._get('currents', as_group=True)
except IndexError:
pass
@property
def sea_floor_depth_below_sea_surface(self):
return self._get('sea_floor_depth_below_sea_surface')
@property
def sea_water_electrical_conductivity(self):
return self._get('sea_water_electrical_conductivity')
@property
def sea_water_salinity(self):
return self._get('sea_water_salinity')
@property
def sea_water_temperature(self):
return self._get('sea_water_temperature')
@property
def waves(self):
try:
return self._get('waves', as_group=True)[0]
except IndexError:
pass
@property
def winds(self):
try:
return self._get('winds', as_group=True)[0]
except IndexError:
pass
@property
def image_url(self):
return '{0}?station={id}'.format(CAM_ENDPOINT, id=self.id)
def _write_img(self, handle):
i = requests.get(CAM_ENDPOINT, params={'station': self.id})
for chunk in i.iter_content():
handle.write(chunk)
@property
def image(self):
output = BytesIO()
self._write_img(output)
output.seek(0)
return output
def save_image(self, filename):
with open(filename, 'wb') as f:
self._write_img(f)
@property
def coords(self):
return self.__dict__.get('lat'), self.__dict__.get('lon')
@property
def depth(self):
return self.__dict__.get('depth')
class Observation(float):
def __init__(self, value, unit, datetime=None):
self.value = value
self._unit = unit
self._datetime = datetime
def __new__(cls, value, *args):
return float.__new__(cls, value)
@property
def unit(self):
return self._unit
@property
def datetime(self):
return self._datetime
def __repr__(self):
return "Observation({}, '{}')".format(self.__float__(), self.unit)
def __str__(self):
return "{} {}".format(self.__float__(), self.unit)
| [
"rskovron@gmail.com"
] | rskovron@gmail.com |
93da1323dcced8853a800e02af6b960aa14851cb | ad40f7dfa859d6d6033e5cee2c7f65479f5fd1ea | /Python Codes by NIYATI SINHA/app34.py | 0ab7f5efa604833aef6149ffa4ce109f87e3805d | [] | no_license | NiyatiSinha-yb/PYTHON-Codes-By-Niyati-Sinha | 97778b90e75c8d6071c5d49ebe956e6b6dae8f02 | 233f934ee4aa0a11b414d6fe0c6d43c7b916a74b | refs/heads/master | 2023-03-27T13:22:39.494873 | 2021-04-01T09:42:28 | 2021-04-01T09:42:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | for item in range(5):#for item in [0,1,2,3,4]
print(item)
# range function creates an object, its not a list but its a special kind of object that we can iterate over
# and with each iteration this object spit out a new no
print("_____")
for item in range(5,10):
print(item)
print("_____")
for any_name in range(5,10,2): #from 5 to 10 jump by 2 units # item is just a user given name to the object
print(any_name)# we can keep anyname instead of item as well | [
"niyati.sinha2999@gmail.com"
] | niyati.sinha2999@gmail.com |
f9380f7e2c34a2f3f43d1be2d550d2ef2660ec4a | 4e19cbed4595c98eb6ae9c764b7eb880ab4e8b36 | /trayectorias_parabolicas.py | 45e9098f031fd425518e70ccb1a8297372ad378f | [] | no_license | josueaguilar/SimParabolic | 74bd1a8b58e10bd8a3eae8ba2ebea84c0d241547 | 3bcda7d7c410e24f674d118cb93672bee808e4fa | refs/heads/master | 2022-10-15T15:10:03.045642 | 2020-06-13T22:35:02 | 2020-06-13T22:35:02 | 272,096,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,081 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 27 13:11:59 2019
@author: Josue
"""
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 23 13:21:11 2019
@author: Josue
"""
#
import turtle as tur
import math
import numpy as np
#--- Calculo de la cinematica
def cinematica (theta,Vi,X,sampl,g):
#Componente vectorial en Y
Viy= Vi*(math.sin(math.radians(theta)))
#Componente vectorial en X
Vx= Vi*(math.cos(math.radians(theta)))
#----Se calcula el tiempo total de la simulacion
tT= X/Vx
#Creo mi vector de tiempo para muestrear en diferentes puntos la simulacion (N muestras)
t=np.arange(tT/sampl,tT+(tT/sampl),tT/sampl)
#Calculo los desplazamientos en X
deltaX= Vx * t
#Calculo los desplazamientos en Y
deltaY= (Viy*t) + ((0.5*g)*(t*t))
return t, deltaX, deltaY
#----------Se comprueba si llega a cero la pelota
def midePique(X,sampl,deltaX,deltaY):
for i in range(0, len(deltaY)):
if deltaY[i] <= 0:
pique= i
dist_falt=X- deltaX[pique] #Distancia faltante
samp_falt= sampl - (pique+1) #muestras faltantes
pique+= 1 #para que
break
else:
pique= len(deltaY)-1
dist_falt=X- deltaX[pique] #Distancia faltante
samp_falt= sampl - (pique+1) #muestras faltantes
pique+= 1
return pique, dist_falt, samp_falt
def pintaEscenario():
tur.setup(600, 600, 0, 0)
tur.screensize(300, 150)
tur.title("Simulador tiro parabolico")
tur.hideturtle()
tur.colormode(255)
tur.pensize(3)
tur.pencolor(0,255,0)
#Cesped
tur.penup()
tur.goto(-250, 0)
tur.pendown()
tur.goto(220, 0)
tur.pensize(5)
tur.pencolor(0,0,0)
#Porteria
tur.penup()
tur.goto(220, 0)
tur.pendown()
tur.goto(220, 40)
tur.goto(200,40)
tur.goto(200,0)
#Balon
tur.penup()
tur.goto(-250,0)
tur.dot(10,0, 0, 0)
pintaEscenario()
#---------------------------------------
#-----------Parametros Cinematica
#---------------------------------------
#Angulo de disparo en grados
theta= 60 #20
#Velocidad inicial en m/s
Vi= 5 #15
#Distancia porteria-pelota en mts
X= 11 #22
#muestras de la simulacion
sampl= 50
#gravedad
g= -9.8
#factor amortiguamiento
D= 0.2
#Se calcula la trayectoria de la pelota
t, deltaX, deltaY= cinematica(theta,Vi,X,sampl,g)
# se calcula la distancia a la porteria de pique de la pelota
pique,dist_falt,samp_falt= midePique(X,sampl,deltaX,deltaY)
Vi2=Vi
dist_falt2= dist_falt
samp_falt2=samp_falt
sumaPiques= pique
while(samp_falt2>0):
Vi2= Vi2*(1-D)
t2, deltaX2, deltaY2= cinematica (theta,Vi2,dist_falt2,samp_falt2,g)
pique2,dist_falt2,samp_falt2= midePique(deltaX2[len(deltaX2)-1],len(deltaX2),deltaX2,deltaY2)
#Se llena el vector con el rebote
for i in range(0, (pique2)):
deltaY[sumaPiques+i]=deltaY2[i]
sumaPiques = sumaPiques+ pique2
samp_falt2= sampl -sumaPiques
dist_falt2=deltaX[0]*samp_falt2
#---------------------------------------
#---------------------------------------
#---------------------------------------
#dibujar trayectoria
tur.pendown()
disTot = 450 #distancia total entre balon y porteria en pixeles
posIni = -250 #inicia en la posicion -250
#Se escala en funcion de pixeles a metros
for i in range(0, len(t)):
ajusteX= int((deltaX[i] * disTot)/X) + posIni
ajusteY= int((deltaY[i] * disTot)/X)
if ajusteY >= -3: #para pintar solo los positivos
tur.pencolor(0,0,0)
tur.goto(ajusteX,ajusteY)
tur.dot(10,0, 0, 0)
#else:
#tur.pencolor(255,255,255) #pinto de blanco
#tur.goto(ajusteX,ajusteY)
#tur.dot(10,0,0,0)
tur.done()
| [
"josue.aguilar.garrido@gmail.com"
] | josue.aguilar.garrido@gmail.com |
db4947dd7f21941b4aac995c4fe2285f661d7466 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit_noisy1448.py | ae49b0570c58881f82ac7b3f628b829ccd29533b | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,294 | py | # qubit number=5
# total number=51
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[1]) # number=26
prog.cz(input_qubit[4],input_qubit[1]) # number=27
prog.h(input_qubit[1]) # number=28
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
prog.h(input_qubit[1]) # number=34
prog.cz(input_qubit[4],input_qubit[1]) # number=35
prog.z(input_qubit[4]) # number=46
prog.rx(0.8011061266653969,input_qubit[2]) # number=37
prog.h(input_qubit[1]) # number=36
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=48
prog.cz(input_qubit[1],input_qubit[0]) # number=49
prog.h(input_qubit[0]) # number=50
prog.x(input_qubit[0]) # number=39
prog.cx(input_qubit[1],input_qubit[0]) # number=40
prog.cx(input_qubit[0],input_qubit[1]) # number=42
prog.x(input_qubit[1]) # number=43
prog.cx(input_qubit[0],input_qubit[1]) # number=44
prog.x(input_qubit[2]) # number=11
prog.y(input_qubit[1]) # number=45
prog.x(input_qubit[3]) # number=12
prog.h(input_qubit[2]) # number=41
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.cx(input_qubit[1],input_qubit[0]) # number=22
prog.x(input_qubit[4]) # number=47
prog.x(input_qubit[0]) # number=23
prog.cx(input_qubit[1],input_qubit[0]) # number=24
prog.cx(input_qubit[0],input_qubit[1]) # number=30
prog.x(input_qubit[1]) # number=31
prog.cx(input_qubit[0],input_qubit[1]) # number=32
prog.x(input_qubit[2]) # number=15
prog.h(input_qubit[4]) # number=29
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = FakeVigo()
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy1448.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
dd9f3feb5a59ede4b0691cb43f723fa131995daa | 348ec96c777a995f1b722b53d530c4867d9d6989 | /dao/dao/ayurveda_config.py | 6a7cf9e52d94d7505dd2879def24b77a050cda75 | [] | no_license | Ravall/daodiet | efbdcf4e7d92dbc36770da150fa92df712af3e0d | db233a9f643b82bda7f17dcd75616534a1b84f50 | refs/heads/master | 2020-05-20T11:30:47.539137 | 2013-05-20T14:21:37 | 2013-05-20T14:21:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | # -*- coding: utf-8 -*-
from datetime import time
WAKEUP_TIME = time(6, 0)
BREAKFAST_TIME = (time(7, 0), time(9, 0))
LUNCH_TIME = (time(11, 0), time(14, 0))
WAKEUP_NOON = time(12, 0)
DINNER_TIME = (time(17, 0), time(19, 0))
SLEEP_TIME = time(22, 0)
| [
"valery.ravall@gmail.com"
] | valery.ravall@gmail.com |
15fc4653908e3156a68b8e3e0f193240883ea77a | ba5502b3df3cf4755f7d4b8c2eb40eb9fb467a35 | /btcturk-examples.py | 0b9d2907c1dcf38cd1b30e86973c5b283490a036 | [] | no_license | onurgozupek/btcturkpro-python | 8f67e277fd72f400fce3e5cc910fd707aaadf88b | 8e3da7b898758bf53a0aeb2b1ac2cc9221ae37e8 | refs/heads/master | 2021-02-15T00:25:29.659712 | 2021-01-07T05:36:59 | 2021-01-07T05:36:59 | 244,848,404 | 6 | 1 | null | 2021-01-07T05:37:00 | 2020-03-04T08:38:31 | Python | UTF-8 | Python | false | false | 1,351 | py | import btcturk-python
#Create Order Example
pairSymbol = "BTC_TRY"
price = "53000"
quantity = "0.01"
orderType = "buy"
orderMethod = "limit"
stopPrice = "0"
createOrder(pairSymbol , price , quantity , orderType , orderMethod , stopPrice)
#Check Balances Example
checkBalances(pairSymbol)
#Delete Order Example
orderId = 1234567890
deleteOrder(orderId)
#Check Open Orders Example
checkOpenOrders(pairSymbol)
#Print OrderBook (last 10 orders) Example
orderBook(pairSymbol, 10, 1):
#Print OrderBook (last 30 orders) Example
orderBook(pairSymbol, 30, 1):
#Get User Transactions Example (buy orders)
numerator = "BTC"
denominator = "TRY"
orderType = "buy"
userTransactions(numerator, denominator, orderType)
#Get User Transactions Example (sell orders)
numerator = "BTC"
denominator = "TRY"
orderType = "sell"
userTransactions(numerator, denominator, orderType)
#Get User Transactions Example (all order types)
numerator = "BTC"
denominator = "TRY"
orderType = "all"
userTransactions(numerator, denominator, orderType)
#Get User Transactions Example (Only crypto pair transactions)
numerator = "crypto"
orderType = "all"
userTransactions(numerator, denominator, orderType)
#Get User Transactions Example (All pairs, both buy/sell transactions)
numerator = "all"
orderType = "all"
userTransactions(numerator, denominator, orderType)
| [
"noreply@github.com"
] | noreply@github.com |
5e91bc42ff6c1921c12148e9ae79b7b64596285a | 438e77b35c1d9cfa0873da89fc1ad0567ff4179b | /Tools/Pendulum.py | 246b9c8445f75cdc83f22227a543550e02781922 | [
"MIT"
] | permissive | JordParma/Terminus | 3aa5b6fcaadab8798403f0bcf8b5f59be060531f | a44920c95b59cbbc45eb63e516131224be353e0a | refs/heads/master | 2020-07-23T10:32:56.753635 | 2017-12-19T20:44:07 | 2017-12-19T20:44:07 | 94,356,289 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,838 | py | # -*- coding: utf-8 -*-
'''
Mass Spring Damper
'''
def double_pendulumn():
# Double pendulum formula translated from the C code at
# http://www.physics.usyd.edu.au/~wheat/dpend_html/solve_dpend.c
from numpy import sin, cos, pi, array
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate as integrate
import matplotlib.animation as animation
G = 9.8 # acceleration due to gravity, in m/s^2
L1 = 1.0 # length of pendulum 1 in m
L2 = 1.0 # length of pendulum 2 in m
M1 = 1.0 # mass of pendulum 1 in kg
M2 = 1.0 # mass of pendulum 2 in kg
def derivs(state, t):
dydx = np.zeros_like(state)
dydx[0] = state[1]
del_ = state[2] - state[0]
den1 = (M1 + M2) * L1 - M2 * L1 * cos(del_) * cos(del_)
dydx[1] = (M2 * L1 * state[1] * state[1] * sin(del_) * cos(del_)
+ M2 * G * sin(state[2]) * cos(del_) + M2 * L2 * state[3] * state[3] * sin(del_)
- (M1 + M2) * G * sin(state[0])) / den1
dydx[2] = state[3]
den2 = (L2 / L1) * den1
dydx[3] = (-M2 * L2 * state[3] * state[3] * sin(del_) * cos(del_)
+ (M1 + M2) * G * sin(state[0]) * cos(del_)
- (M1 + M2) * L1 * state[1] * state[1] * sin(del_)
- (M1 + M2) * G * sin(state[2])) / den2
return dydx
# create a time array from 0..100 sampled at 0.05 second steps
dt = 0.05
t = np.arange(0.0, 20, dt)
# th1 and th2 are the initial angles (degrees)
# w10 and w20 are the initial angular velocities (degrees per second)
th1 = 120.0
w1 = 0.0
th2 = -10.0
w2 = 0.0
rad = pi / 180
# initial state
state = np.array([th1, w1, th2, w2]) * pi / 180.
# integrate ODE using scipy.integrate.
y = integrate.odeint(derivs, state, t)
x1 = L1 * sin(y[:, 0])
y1 = -L1 * cos(y[:, 0])
x2 = L2 * sin(y[:, 2]) + x1
y2 = -L2 * cos(y[:, 2]) + y1
fig = plt.figure()
ax = fig.add_subplot(111, autoscale_on=False, xlim=(-2, 2), ylim=(-2, 2))
ax.grid()
line, = ax.plot([], [], 'o-', lw=2)
time_template = 'time = %.1fs'
time_text = ax.text(0.05, 0.9, '', transform=ax.transAxes)
def init():
line.set_data([], [])
time_text.set_text('')
return line, time_text
def animate(i):
thisx = [0, x1[i], x2[i]]
thisy = [0, y1[i], y2[i]]
line.set_data(thisx, thisy)
time_text.set_text(time_template % (i * dt))
return line, time_text
ani = animation.FuncAnimation(fig, animate, np.arange(1, len(y)),
interval=25, blit=True, init_func=init)
# Save as mp4 file.
# ani.save('double_pendulum.mp4', fps=15)
plt.show()
if __name__ == '__main__':
double_pendulumn()
| [
"johnnyfroehlich@gmail.com"
] | johnnyfroehlich@gmail.com |
f01a7b1bca0efc934505a42a9401c5c3639633ca | 930e0730dd1eb8802b07cf2a60dc2f285754c567 | /vae_v2.py | 608a24bdc3cbdb57e7bc0d65ec3a2e23e547be3c | [] | no_license | xiaoxianedwindu/ecg-multi-encoder | fb3f3d54d3d8dddd67f6bd3d38b84650d9982036 | c6a082f8e28af2706bdb5d994e741ae436ae17e9 | refs/heads/master | 2022-12-14T11:05:49.117357 | 2020-09-21T06:05:05 | 2020-09-21T06:05:05 | 296,981,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,287 | py | '''
VAE-2 script
'''
import keras
from keras.layers import Conv1D, Conv2DTranspose, Input, Flatten, Dense, Lambda, Reshape, Layer, LeakyReLU, UpSampling1D, AveragePooling1D, Activation
from keras.layers import BatchNormalization
from keras.models import Model
from keras.losses import binary_crossentropy
from keras import backend as K
import numpy as np
import matplotlib.pyplot as plt
from tensorflow import pad
from graph import ECG_model
from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard, ReduceLROnPlateau, LearningRateScheduler
from utils import *
from config import get_config
from imblearn.over_sampling import SMOTE
from tensorflow.python.framework.ops import disable_eager_execution
disable_eager_execution()
def Conv1DTranspose(input_tensor, filters, kernel_size, strides=2, padding='same', activation='relu', name='conv12d'):
"""
input_tensor: tensor, with the shape (batch_size, time_steps, dims)
filters: int, output dimension, i.e. the output tensor will have the shape of (batch_size, time_steps, filters)
kernel_size: int, size of the convolution kernel
strides: int, convolution step size
padding: 'same' | 'valid'
"""
x = Lambda(lambda x: K.expand_dims(x, axis=2))(input_tensor)
x = Conv2DTranspose(filters=filters, kernel_size=(kernel_size, 1), strides=(strides, 1), padding=padding, activation='relu', name = name)(x)
x = Lambda(lambda x: K.squeeze(x, axis=2))(x)
return x
class ReflectionPadding1D(Layer):
def __init__(self, padding=(64, 64), **kwargs):
self.padding = tuple(padding)
super(ReflectionPadding1D, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
return input_shape[1] + self.padding[0] + self.padding[1]
def call(self, input_tensor, mask=None):
padding_left, padding_right = self.padding
return pad(input_tensor, [[0, 0], [padding_left, padding_right], [0, 0]], mode='REFLECT')
def get_config(self):
config = super(ReflectionPadding1D, self).get_config()
#print(config)
return config
class ReflectionPadding1D_decode(Layer):
def __init__(self, padding=(128, 128), **kwargs):
self.padding = tuple(padding)
super(ReflectionPadding1D_decode, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
return input_shape[1] + self.padding[0] + self.padding[1]
def call(self, input_tensor, mask=None):
padding_left, padding_right = self.padding
return pad(input_tensor, [[0, 0], [padding_left, padding_right], [0, 0]], mode='REFLECT')
def get_config(self):
config = super(ReflectionPadding1D_decode, self).get_config()
#print(config)
return config
# Load dataset
config = get_config()
(X,y) = loaddata_nosplit_scaled_std(config.input_size, config.feature)
classes = ['A', 'E', 'j', 'L', 'N', 'P', 'R', 'V']#['N','V','/','A','F','~']#,'L','R',f','j','E','a']#,'J','Q','e','S']
from sklearn.model_selection import train_test_split
X, Xval, y, yval = train_test_split(X, y, test_size=0.25, random_state=1)
if config.smote:
print(SMOTE)
sm = SMOTE(sampling_strategy = 'auto', random_state=12)
X, y = sm.fit_sample(X, y)
Xe = np.expand_dims(X, axis=2)
Xvale = np.expand_dims(Xval, axis=2)
import pandas as pd
y = np.array(pd.DataFrame(y).idxmax(axis=1))
yval = np.array(pd.DataFrame(yval).idxmax(axis=1))
target_train = y
target_test = yval
# Data & model configuration
batch_size = config.batch
no_epochs = config.ae_epochs
verbosity = 1
latent_dim = 2
num_channels = 1
# Reshape data
input_train = Xe
input_test = Xvale
input_shape = (config.input_size, 1)
# Parse numbers as floats
input_train = input_train.astype('float32')
input_test = input_test.astype('float32')
# # =================
# # Encoder
# # =================
kernel_size =16
s = 2
def encoder_conv_block(inputs, config):
kernel_size =16
s = 2
layer = Conv1D(filters=config.filter_length,
kernel_size=kernel_size,
padding='same',
strides=2,
kernel_initializer='he_normal',
activation=LeakyReLU(alpha=0.2))(inputs)
layer = ReflectionPadding1D()(layer)
layer = Conv1D(filters=config.filter_length,
kernel_size=kernel_size,
padding='same',
strides=2,
kernel_initializer='he_normal')(layer)
layer = Activation(LeakyReLU(alpha=0.2))(layer)
layer = ReflectionPadding1D()(layer)
layer = Conv1D(filters=config.filter_length,
kernel_size=kernel_size,
padding='same',
strides=2,
kernel_initializer='he_normal')(layer)
layer = Activation(LeakyReLU(alpha=0.2))(layer)
layer = ReflectionPadding1D()(layer)
layer = Conv1D(filters=config.filter_length,
kernel_size=kernel_size,
padding='same',
strides=2,
kernel_initializer='he_normal')(layer)
layer = Activation(LeakyReLU(alpha=0.2))(layer)
layer = ReflectionPadding1D()(layer)
return layer
def decoder_conv_block(inputs, config):
#layer = Dense(config.input_size* 32, activation =LeakyReLU(alpha=0.2))(inputs)
#layer = Reshape((config.input_size, 1))(layer)
kernel_size = 8
s = 2
layer = UpSampling1D(size=2)(inputs)
layer = Conv1D(filters=config.filter_length,
kernel_size=kernel_size,
padding='same',
strides=s,
kernel_initializer='he_normal',
)(layer)
layer = ReflectionPadding1D_decode()(layer)
layer = AveragePooling1D()(layer)
layer = Activation(LeakyReLU(alpha=0.2))(layer)
layer = UpSampling1D(size=2)(layer)
layer = Conv1D(filters=config.filter_length,
kernel_size=kernel_size,
padding='same',
strides=s,
kernel_initializer='he_normal',
)(layer)
layer = ReflectionPadding1D_decode()(layer)
layer = AveragePooling1D()(layer)
layer = Activation(LeakyReLU(alpha=0.2))(layer)
layer = UpSampling1D(size=2)(layer)
layer = Conv1D(filters=config.filter_length,
kernel_size=kernel_size,
padding='same',
strides=s,
kernel_initializer='he_normal',
)(layer)
layer = ReflectionPadding1D_decode()(layer)
layer = AveragePooling1D()(layer)
layer = Activation(LeakyReLU(alpha=0.2))(layer)
layer = UpSampling1D(size=2)(layer)
layer = Conv1D(filters=config.filter_length,
kernel_size=kernel_size,
padding='same',
strides=s,
kernel_initializer='he_normal',
)(layer)
layer = ReflectionPadding1D_decode()(layer)
layer = AveragePooling1D()(layer)
layer = Activation(LeakyReLU(alpha=0.2))(layer)
layer = UpSampling1D(size=2)(layer)
layer = Conv1D(filters=config.filter_length,
kernel_size=kernel_size,
padding='same',
strides=s,
kernel_initializer='he_normal',
)(layer)
from keras.layers.wrappers import TimeDistributed
layer = TimeDistributed(Dense(1, LeakyReLU(alpha=0.2)))(layer)
return layer
# Definition
i = Input(shape=input_shape, name='encoder_input')
layer = encoder_conv_block(i, config)
x = Flatten()(layer)
x = Dense(20, activation='relu')(x)
x = BatchNormalization()(x)
mu = Dense(latent_dim, name='latent_mu')(x)
sigma = Dense(latent_dim, name='latent_sigma')(x)
# Get Conv2D shape for Conv2DTranspose operation in decoder
conv_shape = K.int_shape(layer)
print(conv_shape)
# Define sampling with reparameterization trick
def sample_z(args):
mu, sigma = args
batch = K.shape(mu)[0]
dim = K.int_shape(mu)[1]
eps = K.random_normal(shape=(batch, dim))
return mu + K.exp(sigma / 2) * eps
# Use reparameterization trick
z = Lambda(sample_z, output_shape=(latent_dim, ), name='z')([mu, sigma])
# Instantiate encoder
encoder = Model(i, [mu, sigma, z], name='encoder')
encoder.summary()
# =================
# Decoder
# =================
# Definition
d_i = Input(shape=(latent_dim, ), name='decoder_input')
x = Dense(conv_shape[1] * conv_shape[2], activation='relu')(d_i)
x = BatchNormalization()(x)
x = Reshape((conv_shape[1], conv_shape[2]))(x)
o = decoder_conv_block(x, config)
# Instantiate decoder
decoder = Model(d_i, o, name='decoder')
decoder.summary()
# =================
# VAE as a whole
# =================
# Instantiate VAE
vae_outputs = decoder(encoder(i)[2])
vae = Model(i, vae_outputs, name='vae')
vae.summary()
# Define loss
def kl_reconstruction_loss(true, pred):
# Reconstruction loss
reconstruction_loss = binary_crossentropy(K.flatten(true), K.flatten(pred)) * 256
# KL divergence loss
kl_loss = 1 + sigma - K.square(mu) - K.exp(sigma)
kl_loss = K.sum(kl_loss, axis=-1)
kl_loss *= -0.5
# Total loss = 50% rec + 50% KL divergence loss
return K.mean(reconstruction_loss + kl_loss)
# Compile VAE
vae.compile(optimizer='adam', loss=kl_reconstruction_loss)
# Train autoencoder
vae.fit(input_train, input_train, epochs = no_epochs, batch_size = batch_size, validation_data = (input_test, input_test))
#vae.fit(input_train, input_train, epochs = no_epochs, batch_size = batch_size, validation_split = validation_split)
# =================
# Results visualization
# Credits for original visualization code: https://keras.io/examples/variational_autoencoder_deconv/
# (François Chollet).
# Adapted to accomodate this VAE.
# =================
def viz_latent_space(encoder, data):
input_data, target_data = data
mu, _, _ = encoder.predict(input_data)
plt.figure(figsize=(8, 10))
scatter = plt.scatter(mu[:, 0], mu[:, 1], c=target_data)
plt.xlabel('z - dim 1')
plt.ylabel('z - dim 2')
plt.legend(handles = scatter.legend_elements()[0],labels= classes)
plt.show()
def plot_some_signals(vae, data):
x_vae_pred = vae.predict(data)
from matplotlib import pyplot as plt
xaxis = np.arange(0,config.input_size)
for count in range(5):
plt.plot(xaxis, x_vae_pred[count])
plt.title("vae reconstructed beats")
plt.xlabel("beat length")
plt.ylabel("signal")
plt.show()
# Plot results
data = (input_test, target_test)
viz_latent_space(encoder, data)
plot_some_signals(vae, input_test)
Xde = vae.predict(input_train)
Xvalde = vae.predict(input_test)
target_train = np.array(pd.get_dummies(target_train))
target_test = np.array(pd.get_dummies(target_test))
(m, n) = target_train.shape
target_train = target_train.reshape((m, 1, n ))
(mvl, nvl) = target_test.shape
target_test = target_test.reshape((mvl, 1, nvl))
callbacks = [
EarlyStopping(patience = config.patience, verbose=1),
ReduceLROnPlateau(factor = 0.5, patience = 3, min_lr = 0.01, verbose=1),
TensorBoard( log_dir='./logs', histogram_freq=0, write_graph = True, write_grads=False, write_images=True),
ModelCheckpoint('models/{}-vae-2-latest.hdf5'.format(config.feature), monitor='val_loss', save_best_only=False, verbose=1, period=10)
]
initial_epoch = 0
model = ECG_model(config)
model.fit(Xde, target_train,
validation_data=(Xvalde, target_test),
epochs=config.epochs,
batch_size=config.batch,
callbacks=callbacks,
initial_epoch=initial_epoch)
print_results(config, model, Xvalde, target_test, classes, "vae-2-") | [
"2467121d@student.gla.ac.uk"
] | 2467121d@student.gla.ac.uk |
0d1890cdb76f474288b918b5475c39e94b3fe831 | c9af677534dffbc3f72e6df3db82c2323d303a79 | /FirstTest.py | 7a5035df2444f12d71d8ed143da0e79a51bf2076 | [] | no_license | simple565/GuiTest | 9af9cc48697d686c088a9189467eb0b7388abced | 531c1639fee82abc210999ce0f180d3280c6b642 | refs/heads/master | 2020-07-10T20:19:35.999058 | 2019-08-25T23:23:29 | 2019-08-25T23:23:29 | 204,361,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 505 | py | from appium import webdriver
desired_caps = {}
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '8.1.0'
desired_caps['deviceName'] = 'cc8acaca'
desired_caps['appPackage'] = 'com.android.settings'
desired_caps['appActivity'] = '.Settings'
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
driver.find_element_by_class_name()
driver.find_element_by_xpath()
driver.find_elements_by_id()
driver.find_elements_by_xpath()
driver.find_elements_by_class_name()
| [
"467914885@qq.com"
] | 467914885@qq.com |
de87b1312d042de25ad09e1c1273fad0b0bc68a4 | fa4b2b4ce915b4e58737f65efe7d18d1f45cbe27 | /home/migrations/0001_initial.py | 6f7e2af882d3cea72915015cbf036f11d7df263e | [] | no_license | Wishez/cosmeticsyou-v2.0 | 0fde09158944415b2471cb07dcf1e2cd1df85923 | a0f6a1b11622cb36a5084781ad35f4eed2778f66 | refs/heads/master | 2022-12-26T12:47:53.693887 | 2020-10-12T20:22:30 | 2020-10-12T20:27:54 | 293,092,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,946 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-10 15:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Callback',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('callback_name', models.CharField(max_length=30, verbose_name=b'\xd0\x98\xd0\xbc\xd1\x8f')),
('callback_phone', models.CharField(max_length=30, verbose_name=b'\xd0\xa2\xd0\xb5\xd0\xbb\xd0\xb5\xd1\x84\xd0\xbe\xd0\xbd')),
('callback_message', models.TextField(max_length=250, verbose_name=b'\xd0\x9a\xd0\xbe\xd0\xbc\xd0\xbc\xd0\xb5\xd0\xbd\xd1\x82\xd0\xb0\xd1\x80\xd0\xb8\xd0\xb9')),
],
options={
'verbose_name': '\u041e\u0431\u0440\u0430\u0442\u043d\u044b\u0439 \u0432\u044b\u0437\u043e\u0432',
'verbose_name_plural': '\u041e\u0431\u0440\u0430\u0442\u043d\u044b\u0435 \u0432\u044b\u0437\u043e\u0432\u044b',
},
),
migrations.CreateModel(
name='Program',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('img', models.FileField(blank=True, null=True, upload_to=b'uploads/program/', verbose_name=b'\xd0\x98\xd0\xb7\xd0\xbe\xd0\xb1\xd1\x80\xd0\xb0\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xbf\xd1\x80\xd0\xbe\xd0\xb3\xd1\x80\xd0\xb0\xd0\xbc\xd0\xbc\xd1\x8b')),
('title', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x97\xd0\xb0\xd0\xb3\xd0\xbe\xd0\xbb\xd0\xbe\xd0\xb2\xd0\xbe\xd0\xba')),
('p', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x9f\xd0\xb0\xd1\x80\xd0\xb0\xd0\xb3\xd1\x80\xd0\xb0\xd1\x84 \xd0\xbf\xd0\xbe\xd0\xb4 \xd0\xb7\xd0\xb0\xd0\xb3\xd0\xbe\xd0\xbb\xd0\xbe\xd0\xb2\xd0\xba\xd0\xbe\xd0\xbc')),
('offer_1', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x9f\xd1\x80\xd0\xb5\xd0\xb4\xd0\xbb\xd0\xbe\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 1 (\xd0\xbf\xd0\xbe\xd0\xb4-\xd0\xb7\xd0\xb0\xd0\xb3\xd0\xbe\xd0\xbb\xd0\xbe\xd0\xb2\xd0\xbe\xd0\xba)')),
('action_1_1', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5')),
('action_1_2', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_1_3', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_1_4', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('offer_2', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x9f\xd1\x80\xd0\xb5\xd0\xb4\xd0\xbb\xd0\xbe\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 (\xd0\xbf\xd0\xbe\xd0\xb4-\xd0\xb7\xd0\xb0\xd0\xb3\xd0\xbe\xd0\xbb\xd0\xbe\xd0\xb2\xd0\xbe\xd0\xba)')),
('action_2_1', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5')),
('action_2_2', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_2_3', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_2_4', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_2_5', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5')),
('action_2_6', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_2_7', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_2_8', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_2_9', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5')),
('action_2_10', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_2_11', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_2_12', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('offer_3', models.CharField(blank=True, max_length=300, null=True, verbose_name=b'\xd0\x9f\xd1\x80\xd0\xb5\xd0\xb4\xd0\xbb\xd0\xbe\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 3 (\xd0\xbf\xd0\xbe\xd0\xb4-\xd0\xb7\xd0\xb0\xd0\xb3\xd0\xbe\xd0\xbb\xd0\xbe\xd0\xb2\xd0\xbe\xd0\xba)')),
('action_3_1', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5')),
('action_3_2', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_3_3', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_3_4', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_3_5', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5')),
('action_3_6', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_3_7', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_3_8', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_3_9', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5')),
('action_3_10', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_3_11', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_3_12', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('offer_4', models.CharField(blank=True, max_length=300, null=True, verbose_name=b'\xd0\x9f\xd1\x80\xd0\xb5\xd0\xb4\xd0\xbb\xd0\xbe\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 4 (\xd0\xbf\xd0\xbe\xd0\xb4-\xd0\xb7\xd0\xb0\xd0\xb3\xd0\xbe\xd0\xbb\xd0\xbe\xd0\xb2\xd0\xbe\xd0\xba)')),
('action_4_1', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5')),
('action_4_2', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_4_3', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_4_4', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_4_5', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5')),
('action_4_6', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_4_7', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_4_8', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_4_9', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5')),
('action_4_10', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_4_11', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_4_12', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
],
options={
'verbose_name': '\u0421\u0442\u0430\u0440\u0442\u043e\u0432\u0430\u044f \u043f\u0440\u043e\u0433\u0440\u0430\u043c\u043c\u0430',
'verbose_name_plural': '\u0421\u0442\u0430\u0440\u0442\u043e\u0432\u044b\u0435 \u043f\u0440\u043e\u0433\u0440\u0430\u043c\u043c\u044b',
},
),
migrations.CreateModel(
name='Slider',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slide_1', models.FileField(blank=True, null=True, upload_to=b'uploads/slider/', verbose_name=b'\xd0\xa1\xd0\xbb\xd0\xb0\xd0\xb9\xd0\xb4')),
('slide_2', models.FileField(blank=True, null=True, upload_to=b'uploads/slider/', verbose_name=b'\xd0\xa1\xd0\xbb\xd0\xb0\xd0\xb9\xd0\xb4')),
('slide_3', models.FileField(blank=True, null=True, upload_to=b'uploads/slider/', verbose_name=b'\xd0\xa1\xd0\xbb\xd0\xb0\xd0\xb9\xd0\xb4')),
('slide_4', models.FileField(blank=True, null=True, upload_to=b'uploads/slider/', verbose_name=b'\xd0\xa1\xd0\xbb\xd0\xb0\xd0\xb9\xd0\xb4')),
('slide_5', models.FileField(blank=True, null=True, upload_to=b'uploads/slider/', verbose_name=b'\xd0\xa1\xd0\xbb\xd0\xb0\xd0\xb9\xd0\xb4')),
],
options={
'verbose_name': '\u0421\u043b\u0430\u0439\u0434\u0435\u0440',
'verbose_name_plural': '\u0421\u043b\u0430\u0439\u0434\u044b',
},
),
]
| [
"shiningfinger@list.ru"
] | shiningfinger@list.ru |
247c913d852cbd4106982180f4eae2ce09706d8f | fa38f2f3ab4d1935e3a856eaa53729635f964034 | /wxpy_lenRec/04-askShowURL.py | 0ceda8427c856074668a79603e28a0c342ea7efb | [] | no_license | PlantainZ/MTCNNfaceRgnz_gratuationDesign | da5441ed8d5b862c801b9af57a7d7b1d0ee7e4ee | e64b1884ae788e9eb7e087ed001d8621d261ccca | refs/heads/master | 2020-05-24T15:36:35.564950 | 2019-05-18T08:52:53 | 2019-05-18T08:52:53 | 187,335,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 802 | py | #codeing=utf-8
import wx
import wx.html2
class MyBrowser(wx.Dialog):
def __init__(self, *args, **kwds):
wx.Dialog.__init__(self, *args, **kwds)
sizer = wx.BoxSizer(wx.VERTICAL)
self.browser = wx.html2.WebView.New(self)
sizer.Add(self.browser, 1, wx.EXPAND, 10)
self.SetSizer(sizer)
self.SetSize((700, 700))
def askURL(self):
dlg = wx.TextEntryDialog(self, 'Enter a URL', 'HTMLWindow')
if dlg.ShowModal() == wx.ID_OK:
return self.browser.LoadURL(dlg.GetValue())
if __name__ == '__main__':
app = wx.App()
dialog = MyBrowser(None, -1)
dialog.askURL()
# dialog.browser.LoadURL("https://www.baidu.com/?tn=91960356_hao_pg") #加载页面。如果是加载html字符串应该使用 dialog.browser.SetPage(html_string,"")
dialog.Show()
app.MainLoop() | [
"593525228@qq.com"
] | 593525228@qq.com |
f1852414a1506a6b10a1010751f35b8c44e0caba | 7104726233d98dd714a445f4f516bce954680f7f | /PuThresholdTuning/python/runForest_PbPb_MIX_75X_PUThresholdVarR020.py | 8fd43dffcd346d87d79593e728a23b559bab3f6a | [
"CC0-1.0"
] | permissive | mverwe/JetRecoValidation | 7b09dada9a797b0ccf39064bdbc801639a8dd229 | ee8b3fd94bac16390b367dc5030489738ff67958 | refs/heads/master | 2021-01-10T06:54:12.312670 | 2016-02-25T10:35:19 | 2016-02-25T10:35:19 | 43,553,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,822 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process('HiForest')
process.options = cms.untracked.PSet(
# wantSummary = cms.untracked.bool(True)
#SkipEvent = cms.untracked.vstring('ProductNotFound')
)
################################################################################
# HiForest labelling info
################################################################################
process.load("HeavyIonsAnalysis.JetAnalysis.HiForest_cff")
process.HiForest.inputLines = cms.vstring("HiForest V3",)
import subprocess
version = subprocess.Popen(["(cd $CMSSW_BASE/src && git describe --tags)"], stdout=subprocess.PIPE, shell=True).stdout.read()
if version == '':
version = 'no git info'
process.HiForest.HiForestVersion = cms.untracked.string(version)
################################################################################
# Input source
################################################################################
process.source = cms.Source("PoolSource",
duplicateCheckMode = cms.untracked.string("noDuplicateCheck"),
fileNames = cms.untracked.vstring(
"/store/user/twang/Pyquen_DiJet_pt40_5020GeV_GEN_SIM_PU_20150813/Pyquen_DiJet_pt40_5020GeV_step3_RECODEBUG_20150813/3179e0200600a67eea51209589c07fdd/step3_RECODEBUG_RAW2DIGI_L1Reco_RECO_PU_100_1_ppt.root"
#"/store/relval/CMSSW_7_5_0_pre5/RelValPhotonJets_Pt_10_13_HI/GEN-SIM-RECO/MCHI2_75_V2-v2/00000/BAA0D4EC-AF0B-E511-95A6-02163E011865.root"
))
#root://cmsxrootd.fnal.gov//
# Number of events we want to process, -1 = all events
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10))
#####################################################################################
# Load Global Tag, Geometry, etc.
#####################################################################################
process.load('Configuration.StandardSequences.Services_cff')
process.load('Configuration.Geometry.GeometryDB_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.load('Configuration.StandardSequences.Digi_cff')
process.load('Configuration.StandardSequences.SimL1Emulator_cff')
process.load('Configuration.StandardSequences.DigiToRaw_cff')
process.load('Configuration.StandardSequences.RawToDigi_cff')
#process.load('Configuration.StandardSequences.ReconstructionHeavyIons_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
# PbPb 53X MC
from Configuration.AlCa.GlobalTag_condDBv2 import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '75X_mcRun2_HeavyIon_v5', '')
#process.GlobalTag.toGet.extend([
# cms.PSet(record = cms.string("HeavyIonRcd"),
# tag = cms.string("CentralityTable_HFtowers200_HydjetDrum5_v750x02_mc"),
# connect = cms.string("frontier://FrontierProd/CMS_CONDITIONS"),
# label = cms.untracked.string("HFtowersHydjetDrum5")
# ),
#])
from HeavyIonsAnalysis.Configuration.CommonFunctions_cff import *
#overrideGT_PbPb2760(process)
overrideJEC_PbPb2760(process)
process.load("RecoHI.HiCentralityAlgos.CentralityBin_cfi")
process.centralityBin.Centrality = cms.InputTag("hiCentrality")
process.centralityBin.centralityVariable = cms.string("HFtowers")
process.centralityBin.nonDefaultGlauberModel = cms.string("HydjetDrum5")
################################################################################
# Define tree output
################################################################################
process.TFileService = cms.Service("TFileService",
fileName=cms.string("HiForest.root"))
################################################################################
# Additional Reconstruction and Analysis: Main Body
################################################################################
#begin: MV edits
## PF jets
process.load('HiRecoPFJets_PuThreshold_cff') ##creates sequence hiRecoPFJets
process.load('akPu2PFJetSequence5_cff')
process.load('akPu2PFJetSequence10_cff')
process.load('akPu2PFJetSequence15_cff')
process.load('akPu2PFJetSequence20_cff')
process.load('akPu2PFJetSequence25_cff')
process.jetSequencesPF = cms.Sequence(process.hiRecoPFJets2
*process.akPu2PFJetSequence5
+process.akPu2PFJetSequence10
+process.akPu2PFJetSequence15
+process.akPu2PFJetSequence20
+process.akPu2PFJetSequence25
)
## Calo jets
process.load('HiRecoCaloJets_PuThreshold_cff') ##creates sequence hiRecoPFJets
process.load('akPu2CaloJetSequence2_cff')
process.load('akPu2CaloJetSequence4_cff')
process.load('akPu2CaloJetSequence6_cff')
process.load('akPu2CaloJetSequence8_cff')
process.load('akPu2CaloJetSequence10_cff')
process.jetSequencesCalo = cms.Sequence(process.hiRecoCaloJets2
+process.akPu2CaloJetSequence2
+process.akPu2CaloJetSequence4
+process.akPu2CaloJetSequence6
+process.akPu2CaloJetSequence8
+process.akPu2CaloJetSequence10
)
#end: MV edits
process.load('HeavyIonsAnalysis.EventAnalysis.hievtanalyzer_mc_cfi')
process.hiEvtAnalyzer.doMC = cms.bool(False) #the gen info dataformat has changed in 73X, we need to update hiEvtAnalyzer code
process.load('HeavyIonsAnalysis.EventAnalysis.hltanalysis_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.HiGenAnalyzer_cfi')
#####################################################################################
# To be cleaned
process.load('HeavyIonsAnalysis.JetAnalysis.ExtraTrackReco_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.TrkAnalyzers_MC_cff')
process.load("HeavyIonsAnalysis.TrackAnalysis.METAnalyzer_cff")
process.load("HeavyIonsAnalysis.JetAnalysis.pfcandAnalyzer_cfi")
process.load('HeavyIonsAnalysis.JetAnalysis.rechitanalyzer_cfi')
process.rechitAna = cms.Sequence(process.rechitanalyzer+process.pfTowers)
process.pfcandAnalyzer.skipCharged = False
process.pfcandAnalyzer.pfPtMin = 0
#####################################################################################
#########################
# Track Analyzer
#########################
process.anaTrack.qualityStrings = cms.untracked.vstring(['highPurity','tight','loose'])
process.pixelTrack.qualityStrings = cms.untracked.vstring('highPurity')
process.hiTracks.cut = cms.string('quality("highPurity")')
# set track collection to iterative tracking
process.anaTrack.trackSrc = cms.InputTag("hiGeneralTracks")
# clusters missing in recodebug - to be resolved
process.anaTrack.doPFMatching = False
process.pixelTrack.doPFMatching = False
process.anaTrack.doSimVertex = True
process.anaTrack.doSimTrack = True
# process.ppTrack.fillSimTrack = True
process.load("SimTracker.TrackAssociation.trackingParticleRecoTrackAsssociation_cff")
process.tpRecoAssocGeneralTracks = process.trackingParticleRecoTrackAsssociation.clone()
process.tpRecoAssocGeneralTracks.label_tr = cms.InputTag("hiGeneralTracks")
process.quickTrackAssociatorByHits.ComponentName = cms.string('quickTrackAssociatorByHits')
#####################
# Photons
#####################
process.load('HeavyIonsAnalysis.PhotonAnalysis.ggHiNtuplizer_cfi')
process.ggHiNtuplizer.genParticleSrc = cms.InputTag("genParticles")
#####################
# muons
######################
#process.load("HeavyIonsAnalysis.MuonAnalysis.hltMuTree_cfi")
#process.hltMuTree.doGen = cms.untracked.bool(True)
#process.load("RecoHI.HiMuonAlgos.HiRecoMuon_cff")
#process.muons.JetExtractorPSet.JetCollectionLabel = cms.InputTag("akVs3PFJets")
#process.globalMuons.TrackerCollectionLabel = "hiGeneralTracks"
#process.muons.TrackExtractorPSet.inputTrackCollection = "hiGeneralTracks"
#process.muons.inputCollectionLabels = ["hiGeneralTracks", "globalMuons", "standAloneMuons:UpdatedAtVtx", "tevMuons:firstHit", "tevMuons:picky", "tevMuons:dyt"]
# HYDJET RECO file didn't have ak2GenJets and ak6GenJets as input, so removed them
# and ran our own hiGenJets sequence
from RecoHI.HiJetAlgos.HiGenJets_cff import ak2HiGenJets, ak3HiGenJets, ak4HiGenJets
from RecoJets.Configuration.GenJetParticles_cff import genParticlesForJets
genParticlesForJets.ignoreParticleIDs += cms.vuint32( 12,14,16)
process.hiSelectGenJets = cms.Sequence(
genParticlesForJets +
ak2HiGenJets +
ak3HiGenJets +
ak4HiGenJets
)
process.anaTrack.doSimTrack = cms.untracked.bool(False)
process.HiGenParticleAna.genParticleSrc = cms.untracked.InputTag("genParticles")
process.load("GeneratorInterface.HiGenCommon.HeavyIon_cff")
process.ana_step = cms.Path(process.heavyIon*
process.hltanalysis *
#temp process.hltobject *
process.centralityBin *
process.hiEvtAnalyzer*
process.HiGenParticleAna*
#process.hiGenJetsCleaned*
process.quickTrackAssociatorByHits*
#process.tpRecoAssocGeneralTracks + #used in HiPFJetAnalyzer
process.hiSelectGenJets +
process.jetSequencesPF +
process.jetSequencesCalo +
process.ggHiNtuplizer +
process.pfcandAnalyzer +
process.rechitAna +
#temp process.hltMuTree +
process.HiForest +
# process.cutsTPForFak +
# process.cutsTPForEff +
process.anaTrack
#process.pixelTrack
)
process.load('HeavyIonsAnalysis.JetAnalysis.EventSelection_cff')
process.phltJetHI = cms.Path( process.hltJetHI )
#process.pcollisionEventSelection = cms.Path(process.collisionEventSelection)
# process.pHBHENoiseFilter = cms.Path( process.HBHENoiseFilter ) #should be put back in later
#process.pHBHENoiseFilterResultProducer = cms.Path( process.HBHENoiseFilterResultProducer )
process.phfCoincFilter = cms.Path(process.hfCoincFilter )
process.phfCoincFilter3 = cms.Path(process.hfCoincFilter3 )
process.pprimaryVertexFilter = cms.Path(process.primaryVertexFilter )
#process.phltPixelClusterShapeFilter = cms.Path(process.siPixelRecHits*process.hltPixelClusterShapeFilter )
process.phiEcalRecHitSpikeFilter = cms.Path(process.hiEcalRecHitSpikeFilter )
process.pAna = cms.EndPath(process.skimanalysis)
# Customization
| [
"marta.verweij@cern.ch"
] | marta.verweij@cern.ch |
8921fb8ee476bb325fd7f6922aedb2688f7cf24b | 4cd08571b6139b503420ad91d49affbd5729abfb | /framework/executor/logic_form_util.py | 2e0f79cc7f710d9c37912401f0a3bcba16bfbf07 | [
"BSD-3-Clause",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | PhoebusSi/rng-kbqa | 2609bed29fd0ca1dfe17f313f19fdc93d17ebfde | da7e655d337d6206b564bb82140082d52296f1a9 | refs/heads/main | 2023-08-21T16:47:11.208413 | 2021-10-12T19:11:54 | 2021-10-12T19:11:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,045 | py | import networkx as nx
from typing import List, Union
from collections import defaultdict
from pathlib import Path
from tqdm import tqdm
from executor.sparql_executor import execute_query
import json
REVERSE = True # if REVERSE, then reverse relations are also taken into account for semantic EM
path = str(Path(__file__).parent.absolute())
reverse_properties = {}
with open(path + '/../ontology/reverse_properties', 'r') as f:
for line in f:
reverse_properties[line.split('\t')[0]] = line.split('\t')[1].replace('\n', '')
with open(path + '/../ontology/fb_roles', 'r') as f:
content = f.readlines()
relation_dr = {}
relations = set()
for line in content:
fields = line.split()
relation_dr[fields[1]] = (fields[0], fields[2])
relations.add(fields[1])
with open(path + '/../ontology/fb_types', 'r') as f:
content = f.readlines()
upper_types = defaultdict(lambda: set())
types = set()
for line in content:
fields = line.split()
upper_types[fields[0]].add(fields[2])
types.add(fields[0])
types.add(fields[2])
function_map = {'le': '<=', 'ge': '>=', 'lt': '<', 'gt': '>'}
def lisp_to_nested_expression(lisp_string):
"""
Takes a logical form as a lisp string and returns a nested list representation of the lisp.
For example, "(count (division first))" would get mapped to ['count', ['division', 'first']].
"""
stack: List = []
current_expression: List = []
tokens = lisp_string.split()
for token in tokens:
while token[0] == '(':
nested_expression: List = []
current_expression.append(nested_expression)
stack.append(current_expression)
current_expression = nested_expression
token = token[1:]
current_expression.append(token.replace(')', ''))
while token[-1] == ')':
current_expression = stack.pop()
token = token[:-1]
return current_expression[0]
def get_symbol_type(symbol: str) -> int:
if symbol.__contains__('^^'):
return 2
elif symbol in types:
return 3
elif symbol in relations:
return 4
elif symbol:
return 1
def same_logical_form(form1: str, form2: str) -> bool:
if form1.__contains__("@@UNKNOWN@@") or form2.__contains__("@@UNKNOWN@@"):
return False
try:
G1 = logical_form_to_graph(lisp_to_nested_expression(form1))
except Exception:
return False
try:
G2 = logical_form_to_graph(lisp_to_nested_expression(form2))
except Exception:
return False
def node_match(n1, n2):
if n1['id'] == n2['id'] and n1['type'] == n2['type']:
func1 = n1.pop('function', 'none')
func2 = n2.pop('function', 'none')
tc1 = n1.pop('tc', 'none')
tc2 = n2.pop('tc', 'none')
if func1 == func2 and tc1 == tc2:
return True
else:
return False
# if 'function' in n1 and 'function' in n2 and n1['function'] == n2['function']:
# return True
# elif 'function' not in n1 and 'function' not in n2:
# return True
# else:
# return False
else:
return False
def multi_edge_match(e1, e2):
if len(e1) != len(e2):
return False
values1 = []
values2 = []
for v in e1.values():
values1.append(v['relation'])
for v in e2.values():
values2.append(v['relation'])
return sorted(values1) == sorted(values2)
return nx.is_isomorphic(G1, G2, node_match=node_match, edge_match=multi_edge_match)
def logical_form_to_graph(expression: List) -> nx.MultiGraph:
G = _get_graph(expression)
G.nodes[len(G.nodes())]['question_node'] = 1
return G
def _get_graph(
expression: List) -> nx.MultiGraph: # The id of question node is always the same as the size of the graph
if isinstance(expression, str):
G = nx.MultiDiGraph()
if get_symbol_type(expression) == 1:
G.add_node(1, id=expression, type='entity')
elif get_symbol_type(expression) == 2:
G.add_node(1, id=expression, type='literal')
elif get_symbol_type(expression) == 3:
G.add_node(1, id=expression, type='class')
# G.add_node(1, id="common.topic", type='class')
elif get_symbol_type(expression) == 4: # relation or attribute
domain, rang = relation_dr[expression]
G.add_node(1, id=rang, type='class') # if it's an attribute, the type will be changed to literal in arg
G.add_node(2, id=domain, type='class')
G.add_edge(2, 1, relation=expression)
if REVERSE:
if expression in reverse_properties:
G.add_edge(1, 2, relation=reverse_properties[expression])
return G
if expression[0] == 'R':
G = _get_graph(expression[1])
size = len(G.nodes())
mapping = {}
for n in G.nodes():
mapping[n] = size - n + 1
G = nx.relabel_nodes(G, mapping)
return G
elif expression[0] in ['JOIN', 'le', 'ge', 'lt', 'gt']:
G1 = _get_graph(expression=expression[1])
G2 = _get_graph(expression=expression[2])
size = len(G2.nodes())
qn_id = size
if G1.nodes[1]['type'] == G2.nodes[qn_id]['type'] == 'class':
if G2.nodes[qn_id]['id'] in upper_types[G1.nodes[1]['id']]:
G2.nodes[qn_id]['id'] = G1.nodes[1]['id']
# G2.nodes[qn_id]['id'] = G1.nodes[1]['id']
mapping = {}
for n in G1.nodes():
mapping[n] = n + size - 1
G1 = nx.relabel_nodes(G1, mapping)
G = nx.compose(G1, G2)
if expression[0] != 'JOIN':
G.nodes[1]['function'] = function_map[expression[0]]
return G
elif expression[0] == 'AND':
G1 = _get_graph(expression[1])
G2 = _get_graph(expression[2])
size1 = len(G1.nodes())
size2 = len(G2.nodes())
if G1.nodes[size1]['type'] == G2.nodes[size2]['type'] == 'class':
# if G2.nodes[size2]['id'] in upper_types[G1.nodes[size1]['id']]:
G2.nodes[size2]['id'] = G1.nodes[size1]['id']
# IIRC, in nx.compose, for the same node, its information can be overwritten by its info in the second graph
# So here for the AND function we force it to choose the type explicitly provided in the logical form
mapping = {}
for n in G1.nodes():
mapping[n] = n + size2 - 1
G1 = nx.relabel_nodes(G1, mapping)
G2 = nx.relabel_nodes(G2, {size2: size1 + size2 - 1})
G = nx.compose(G1, G2)
return G
elif expression[0] == 'COUNT':
G = _get_graph(expression[1])
size = len(G.nodes())
G.nodes[size]['function'] = 'count'
return G
elif expression[0].__contains__('ARG'):
G1 = _get_graph(expression[1])
size1 = len(G1.nodes())
G2 = _get_graph(expression[2])
size2 = len(G2.nodes())
# G2.nodes[1]['class'] = G2.nodes[1]['id'] # not sure whether this is needed for sparql
G2.nodes[1]['id'] = 0
G2.nodes[1]['type'] = 'literal'
G2.nodes[1]['function'] = expression[0].lower()
if G1.nodes[size1]['type'] == G2.nodes[size2]['type'] == 'class':
# if G2.nodes[size2]['id'] in upper_types[G1.nodes[size1]['id']]:
G2.nodes[size2]['id'] = G1.nodes[size1]['id']
mapping = {}
for n in G1.nodes():
mapping[n] = n + size2 - 1
G1 = nx.relabel_nodes(G1, mapping)
G2 = nx.relabel_nodes(G2, {size2: size1 + size2 - 1})
G = nx.compose(G1, G2)
return G
elif expression[0] == 'TC':
G = _get_graph(expression[1])
size = len(G.nodes())
G.nodes[size]['tc'] = (expression[2], expression[3])
return G
def graph_to_logical_form(G, start, count: bool = False):
if count:
return '(COUNT ' + none_function(G, start) + ')'
else:
return none_function(G, start)
def get_end_num(G, s):
end_num = defaultdict(lambda: 0)
for edge in list(G.edges(s)): # for directed graph G.edges is the same as G.out_edges, not including G.in_edges
end_num[list(edge)[1]] += 1
return end_num
def set_visited(G, s, e, relation):
end_num = get_end_num(G, s)
for i in range(0, end_num[e]):
if G.edges[s, e, i]['relation'] == relation:
G.edges[s, e, i]['visited'] = True
def binary_nesting(function: str, elements: List[str], types_along_path=None) -> str:
if len(elements) < 2:
print("error: binary function should have 2 parameters!")
if not types_along_path:
if len(elements) == 2:
return '(' + function + ' ' + elements[0] + ' ' + elements[1] + ')'
else:
return '(' + function + ' ' + elements[0] + ' ' + binary_nesting(function, elements[1:]) + ')'
else:
if len(elements) == 2:
return '(' + function + ' ' + types_along_path[0] + ' ' + elements[0] + ' ' + elements[1] + ')'
else:
return '(' + function + ' ' + types_along_path[0] + ' ' + elements[0] + ' ' \
+ binary_nesting(function, elements[1:], types_along_path[1:]) + ')'
def count_function(G, start):
return '(COUNT ' + none_function(G, start) + ')'
def none_function(G, start, arg_node=None, type_constraint=True):
if arg_node is not None:
arg = G.nodes[arg_node]['function']
path = list(nx.all_simple_paths(G, start, arg_node))
assert len(path) == 1
arg_clause = []
for i in range(0, len(path[0]) - 1):
edge = G.edges[path[0][i], path[0][i + 1], 0]
if edge['reverse']:
relation = '(R ' + edge['relation'] + ')'
else:
relation = edge['relation']
arg_clause.append(relation)
# Deleting edges until the first node with out degree > 2 is meet
# (conceptually it should be 1, but remember that add edges is both directions)
while i >= 0:
flag = False
if G.out_degree[path[0][i]] > 2:
flag = True
G.remove_edge(path[0][i], path[0][i + 1], 0)
i -= 1
if flag:
break
if len(arg_clause) > 1:
arg_clause = binary_nesting(function='JOIN', elements=arg_clause)
# arg_clause = ' '.join(arg_clause)
else:
arg_clause = arg_clause[0]
return '(' + arg.upper() + ' ' + none_function(G, start) + ' ' + arg_clause + ')'
# arg = -1
# for nei in G[start]:
# if G.nodes[nei]['function'].__contains__('arg'):
# arg = nei
# arg_function = G.nodes[nei]['function']
# if arg != -1:
# edge = G.edges[start, arg, 0]
# if edge['reverse']:
# relation = '(R ' + edge['relation'] + ')'
# else:
# relation = edge['relation']
# G.remove_edge(start, arg, 0)
# return '(' + arg_function.upper() + ' ' + none_function(G, start) + ' ' + relation + ')'
if G.nodes[start]['type'] != 'class':
return G.nodes[start]['id']
end_num = get_end_num(G, start)
clauses = []
if G.nodes[start]['question'] and type_constraint:
clauses.append(G.nodes[start]['id'])
for key in end_num.keys():
for i in range(0, end_num[key]):
if not G.edges[start, key, i]['visited']:
relation = G.edges[start, key, i]['relation']
G.edges[start, key, i]['visited'] = True
set_visited(G, key, start, relation)
if G.edges[start, key, i]['reverse']:
relation = '(R ' + relation + ')'
if G.nodes[key]['function'].__contains__('<') or G.nodes[key]['function'].__contains__('>'):
if G.nodes[key]['function'] == '>':
clauses.append('(gt ' + relation + ' ' + none_function(G, key) + ')')
if G.nodes[key]['function'] == '>=':
clauses.append('(ge ' + relation + ' ' + none_function(G, key) + ')')
if G.nodes[key]['function'] == '<':
clauses.append('(lt ' + relation + ' ' + none_function(G, key) + ')')
if G.nodes[key]['function'] == '<=':
clauses.append('(le ' + relation + ' ' + none_function(G, key) + ')')
else:
clauses.append('(JOIN ' + relation + ' ' + none_function(G, key) + ')')
if len(clauses) == 0:
return G.nodes[start]['id']
if len(clauses) == 1:
return clauses[0]
else:
return binary_nesting(function='AND', elements=clauses)
def get_lisp_from_graph_query(graph_query):
G = nx.MultiDiGraph()
aggregation = 'none'
arg_node = None
for node in graph_query['nodes']:
# G.add_node(node['nid'], id=node['id'].replace('.', '/'), type=node['node_type'], question=node['question_node'], function=node['function'])
G.add_node(node['nid'], id=node['id'], type=node['node_type'], question=node['question_node'],
function=node['function'], cla=node['class'])
if node['question_node'] == 1:
qid = node['nid']
if node['function'] != 'none':
aggregation = node['function']
if node['function'].__contains__('arg'):
arg_node = node['nid']
for edge in graph_query['edges']:
G.add_edge(edge['start'], edge['end'], relation=edge['relation'], reverse=False, visited=False)
G.add_edge(edge['end'], edge['start'], relation=edge['relation'], reverse=True, visited=False)
if 'count' == aggregation:
# print(count_function(G, qid))
return count_function(G, qid)
else:
# print(none_function(G, qid))
return none_function(G, qid, arg_node=arg_node)
def lisp_to_sparql(lisp_program: str):
clauses = []
order_clauses = []
entities = set() # collect entites for filtering
# identical_variables = {} # key should be smaller than value, we will use small variable to replace large variable
identical_variables_r = {} # key should be larger than value
expression = lisp_to_nested_expression(lisp_program)
superlative = False
if expression[0] in ['ARGMAX', 'ARGMIN']:
superlative = True
# remove all joins in relation chain of an arg function. In another word, we will not use arg function as
# binary function here, instead, the arity depends on the number of relations in the second argument in the
# original function
if isinstance(expression[2], list):
def retrieve_relations(exp: list):
rtn = []
for element in exp:
if element == 'JOIN':
continue
elif isinstance(element, str):
rtn.append(element)
elif isinstance(element, list) and element[0] == 'R':
rtn.append(element)
elif isinstance(element, list) and element[0] == 'JOIN':
rtn.extend(retrieve_relations(element))
return rtn
relations = retrieve_relations(expression[2])
expression = expression[:2]
expression.extend(relations)
sub_programs = _linearize_lisp_expression(expression, [0])
question_var = len(sub_programs) - 1
count = False
def get_root(var: int):
while var in identical_variables_r:
var = identical_variables_r[var]
return var
for i, subp in enumerate(sub_programs):
i = str(i)
if subp[0] == 'JOIN':
if isinstance(subp[1], list): # R relation
if subp[2][:2] in ["m.", "g."]: # entity
clauses.append("ns:" + subp[2] + " ns:" + subp[1][1] + " ?x" + i + " .")
entities.add(subp[2])
elif subp[2][0] == '#': # variable
clauses.append("?x" + subp[2][1:] + " ns:" + subp[1][1] + " ?x" + i + " .")
else: # literal (actually I think literal can only be object)
if subp[2].__contains__('^^'):
data_type = subp[2].split("^^")[1].split("#")[1]
if data_type not in ['integer', 'float', 'dateTime']:
subp[2] = f'"{subp[2].split("^^")[0] + "-08:00"}"^^<{subp[2].split("^^")[1]}>'
# subp[2] = subp[2].split("^^")[0] + '-08:00^^' + subp[2].split("^^")[1]
else:
subp[2] = f'"{subp[2].split("^^")[0]}"^^<{subp[2].split("^^")[1]}>'
clauses.append(subp[2] + " ns:" + subp[1][1] + " ?x" + i + " .")
else:
if subp[2][:2] in ["m.", "g."]: # entity
clauses.append("?x" + i + " ns:" + subp[1] + " ns:" + subp[2] + " .")
entities.add(subp[2])
elif subp[2][0] == '#': # variable
clauses.append("?x" + i + " ns:" + subp[1] + " ?x" + subp[2][1:] + " .")
else: # literal
if subp[2].__contains__('^^'):
data_type = subp[2].split("^^")[1].split("#")[1]
if data_type not in ['integer', 'float', 'dateTime']:
subp[2] = f'"{subp[2].split("^^")[0] + "-08:00"}"^^<{subp[2].split("^^")[1]}>'
else:
subp[2] = f'"{subp[2].split("^^")[0]}"^^<{subp[2].split("^^")[1]}>'
clauses.append("?x" + i + " ns:" + subp[1] + " " + subp[2] + " .")
elif subp[0] == 'AND':
var1 = int(subp[2][1:])
rooti = get_root(int(i))
root1 = get_root(var1)
if rooti > root1:
identical_variables_r[rooti] = root1
else:
identical_variables_r[root1] = rooti
root1 = rooti
# identical_variables[var1] = int(i)
if subp[1][0] == "#":
var2 = int(subp[1][1:])
root2 = get_root(var2)
# identical_variables[var2] = int(i)
if root1 > root2:
# identical_variables[var2] = var1
identical_variables_r[root1] = root2
else:
# identical_variables[var1] = var2
identical_variables_r[root2] = root1
else: # 2nd argument is a class
clauses.append("?x" + i + " ns:type.object.type ns:" + subp[1] + " .")
elif subp[0] in ['le', 'lt', 'ge', 'gt']: # the 2nd can only be numerical value
clauses.append("?x" + i + " ns:" + subp[1] + " ?y" + i + " .")
if subp[0] == 'le':
op = "<="
elif subp[0] == 'lt':
op = "<"
elif subp[0] == 'ge':
op = ">="
else:
op = ">"
if subp[2].__contains__('^^'):
data_type = subp[2].split("^^")[1].split("#")[1]
if data_type not in ['integer', 'float', 'dateTime']:
subp[2] = f'"{subp[2].split("^^")[0] + "-08:00"}"^^<{subp[2].split("^^")[1]}>'
else:
subp[2] = f'"{subp[2].split("^^")[0]}"^^<{subp[2].split("^^")[1]}>'
clauses.append(f"FILTER (?y{i} {op} {subp[2]})")
elif subp[0] == 'TC':
var = int(subp[1][1:])
# identical_variables[var] = int(i)
rooti = get_root(int(i))
root_var = get_root(var)
if rooti > root_var:
identical_variables_r[rooti] = root_var
else:
identical_variables_r[root_var] = rooti
year = subp[3]
if year == 'NOW':
from_para = '"2015-08-10"^^xsd:dateTime'
to_para = '"2015-08-10"^^xsd:dateTime'
else:
from_para = f'"{year}-12-31"^^xsd:dateTime'
to_para = f'"{year}-01-01"^^xsd:dateTime'
clauses.append(f'FILTER(NOT EXISTS {{?x{i} ns:{subp[2]} ?sk0}} || ')
clauses.append(f'EXISTS {{?x{i} ns:{subp[2]} ?sk1 . ')
clauses.append(f'FILTER(xsd:datetime(?sk1) <= {from_para}) }})')
if subp[2][-4:] == "from":
clauses.append(f'FILTER(NOT EXISTS {{?x{i} ns:{subp[2][:-4] + "to"} ?sk2}} || ')
clauses.append(f'EXISTS {{?x{i} ns:{subp[2][:-4] + "to"} ?sk3 . ')
else: # from_date -> to_date
clauses.append(f'FILTER(NOT EXISTS {{?x{i} ns:{subp[2][:-9] + "to_date"} ?sk2}} || ')
clauses.append(f'EXISTS {{?x{i} ns:{subp[2][:-9] + "to_date"} ?sk3 . ')
clauses.append(f'FILTER(xsd:datetime(?sk3) >= {to_para}) }})')
elif subp[0] in ["ARGMIN", "ARGMAX"]:
superlative = True
if subp[1][0] == '#':
var = int(subp[1][1:])
rooti = get_root(int(i))
root_var = get_root(var)
# identical_variables[var] = int(i)
if rooti > root_var:
identical_variables_r[rooti] = root_var
else:
identical_variables_r[root_var] = rooti
else: # arg1 is class
clauses.append(f'?x{i} ns:type.object.type ns:{subp[1]} .')
if len(subp) == 3:
clauses.append(f'?x{i} ns:{subp[2]} ?sk0 .')
elif len(subp) > 3:
for j, relation in enumerate(subp[2:-1]):
if j == 0:
var0 = f'x{i}'
else:
var0 = f'c{j - 1}'
var1 = f'c{j}'
if isinstance(relation, list) and relation[0] == 'R':
clauses.append(f'?{var1} ns:{relation[1]} ?{var0} .')
else:
clauses.append(f'?{var0} ns:{relation} ?{var1} .')
clauses.append(f'?c{j} ns:{subp[-1]} ?sk0 .')
if subp[0] == 'ARGMIN':
order_clauses.append("ORDER BY ?sk0")
elif subp[0] == 'ARGMAX':
order_clauses.append("ORDER BY DESC(?sk0)")
order_clauses.append("LIMIT 1")
elif subp[0] == 'COUNT': # this is easy, since it can only be applied to the quesiton node
var = int(subp[1][1:])
root_var = get_root(var)
identical_variables_r[int(i)] = root_var # COUNT can only be the outtermost
count = True
# Merge identical variables
for i in range(len(clauses)):
for k in identical_variables_r:
clauses[i] = clauses[i].replace(f'?x{k} ', f'?x{get_root(k)} ')
question_var = get_root(question_var)
for i in range(len(clauses)):
clauses[i] = clauses[i].replace(f'?x{question_var} ', f'?x ')
if superlative:
arg_clauses = clauses[:]
for entity in entities:
clauses.append(f'FILTER (?x != ns:{entity})')
clauses.insert(0,
f"FILTER (!isLiteral(?x) OR lang(?x) = '' OR langMatches(lang(?x), 'en'))")
clauses.insert(0, "WHERE {")
if count:
clauses.insert(0, f"SELECT COUNT DISTINCT ?x")
elif superlative:
clauses.insert(0, "{SELECT ?sk0")
clauses = arg_clauses + clauses
clauses.insert(0, "WHERE {")
clauses.insert(0, f"SELECT DISTINCT ?x")
else:
clauses.insert(0, f"SELECT DISTINCT ?x")
clauses.insert(0, "PREFIX ns: <http://rdf.freebase.com/ns/>")
clauses.append('}')
clauses.extend(order_clauses)
if superlative:
clauses.append('}')
clauses.append('}')
# for clause in clauses:
# print(clause)
return '\n'.join(clauses)
def _linearize_lisp_expression(expression: list, sub_formula_id):
sub_formulas = []
for i, e in enumerate(expression):
if isinstance(e, list) and e[0] != 'R':
sub_formulas.extend(_linearize_lisp_expression(e, sub_formula_id))
expression[i] = '#' + str(sub_formula_id[0] - 1)
sub_formulas.append(expression)
sub_formula_id[0] += 1
return sub_formulas
# I don't think this is ever gonna be implemented
def lisp_to_lambda(expressions: Union[List[str], str]): # from lisp-grammar formula to lambda DCS
# expressions = lisp_to_nested_expression(source_formula)
if not isinstance(expressions, list):
return expressions
if expressions[0] == 'AND':
return lisp_to_lambda(expressions[1]) + ' AND ' + lisp_to_lambda(expressions[2])
elif expressions[0] == 'JOIN':
return lisp_to_lambda(expressions[1]) + '*' + lisp_to_lambda(expressions[2])
| [
"semihyavuz9091@gmail.com"
] | semihyavuz9091@gmail.com |
ddc7b72d60621d0532fe559144dc283ce61f27ab | de2cc3f7be2fd6644d4990f25f0d725da273addc | /14.py | a16d3329aae9533a874ef4034dcd9569f15d924c | [] | no_license | ll996075dd/xuexi | 8972565fbe19666e3e186f7980e3a67638113ad0 | b2dbbab95d0f8ed9efc98085984e017ee67dbd91 | refs/heads/main | 2023-01-23T00:30:41.746088 | 2020-11-30T00:56:35 | 2020-11-30T00:56:35 | 307,001,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | '''给一个不多于5位的正整数,
要求:一、求它是几位数,二、逆序打印出各位数字。
程序分析:学会分解出每一位数。'''
def output(num,l):
if l == 0:
return
print (num[l-1])
output(num,l-1)
num = input("请输入一个不多于5位的正整数:")
l = len(num)
output(num,l)
print('\n长度是为%d' %l)
#递归调用
| [
"noreply@github.com"
] | noreply@github.com |
3ae943c05939e10eb7593fa7d5be7c5f831a76c5 | 19d47d47c9614dddcf2f8d744d883a90ade0ce82 | /pynsxt/swagger_client/models/aws_gateway_amis_list_result.py | 5882e38c27c689298103233daa9be4d54349462e | [] | no_license | darshanhuang1/pynsxt-1 | 9ed7c0da9b3a64e837a26cbbd8b228e811cee823 | fb1091dff1af7f8b8f01aec715682dea60765eb8 | refs/heads/master | 2020-05-25T14:51:09.932853 | 2018-05-16T12:43:48 | 2018-05-16T12:43:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,184 | py | # coding: utf-8
"""
NSX API
VMware NSX REST API # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.aws_gateway_ami_info import AwsGatewayAmiInfo # noqa: F401,E501
from swagger_client.models.list_result import ListResult # noqa: F401,E501
from swagger_client.models.resource_link import ResourceLink # noqa: F401,E501
from swagger_client.models.self_resource_link import SelfResourceLink # noqa: F401,E501
class AwsGatewayAmisListResult(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'_self': 'SelfResourceLink',
'links': 'list[ResourceLink]',
'schema': 'str',
'cursor': 'str',
'sort_ascending': 'bool',
'sort_by': 'str',
'result_count': 'int',
'results': 'list[AwsGatewayAmiInfo]'
}
attribute_map = {
'_self': '_self',
'links': '_links',
'schema': '_schema',
'cursor': 'cursor',
'sort_ascending': 'sort_ascending',
'sort_by': 'sort_by',
'result_count': 'result_count',
'results': 'results'
}
def __init__(self, _self=None, links=None, schema=None, cursor=None, sort_ascending=None, sort_by=None, result_count=None, results=None): # noqa: E501
"""AwsGatewayAmisListResult - a model defined in Swagger""" # noqa: E501
self.__self = None
self._links = None
self._schema = None
self._cursor = None
self._sort_ascending = None
self._sort_by = None
self._result_count = None
self._results = None
self.discriminator = None
if _self is not None:
self._self = _self
if links is not None:
self.links = links
if schema is not None:
self.schema = schema
if cursor is not None:
self.cursor = cursor
if sort_ascending is not None:
self.sort_ascending = sort_ascending
if sort_by is not None:
self.sort_by = sort_by
if result_count is not None:
self.result_count = result_count
if results is not None:
self.results = results
@property
def _self(self):
"""Gets the _self of this AwsGatewayAmisListResult. # noqa: E501
:return: The _self of this AwsGatewayAmisListResult. # noqa: E501
:rtype: SelfResourceLink
"""
return self.__self
@_self.setter
def _self(self, _self):
"""Sets the _self of this AwsGatewayAmisListResult.
:param _self: The _self of this AwsGatewayAmisListResult. # noqa: E501
:type: SelfResourceLink
"""
self.__self = _self
@property
def links(self):
"""Gets the links of this AwsGatewayAmisListResult. # noqa: E501
The server will populate this field when returing the resource. Ignored on PUT and POST. # noqa: E501
:return: The links of this AwsGatewayAmisListResult. # noqa: E501
:rtype: list[ResourceLink]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this AwsGatewayAmisListResult.
The server will populate this field when returing the resource. Ignored on PUT and POST. # noqa: E501
:param links: The links of this AwsGatewayAmisListResult. # noqa: E501
:type: list[ResourceLink]
"""
self._links = links
@property
def schema(self):
"""Gets the schema of this AwsGatewayAmisListResult. # noqa: E501
:return: The schema of this AwsGatewayAmisListResult. # noqa: E501
:rtype: str
"""
return self._schema
@schema.setter
def schema(self, schema):
"""Sets the schema of this AwsGatewayAmisListResult.
:param schema: The schema of this AwsGatewayAmisListResult. # noqa: E501
:type: str
"""
self._schema = schema
@property
def cursor(self):
"""Gets the cursor of this AwsGatewayAmisListResult. # noqa: E501
Opaque cursor to be used for getting next page of records (supplied by current result page) # noqa: E501
:return: The cursor of this AwsGatewayAmisListResult. # noqa: E501
:rtype: str
"""
return self._cursor
@cursor.setter
def cursor(self, cursor):
"""Sets the cursor of this AwsGatewayAmisListResult.
Opaque cursor to be used for getting next page of records (supplied by current result page) # noqa: E501
:param cursor: The cursor of this AwsGatewayAmisListResult. # noqa: E501
:type: str
"""
self._cursor = cursor
@property
def sort_ascending(self):
"""Gets the sort_ascending of this AwsGatewayAmisListResult. # noqa: E501
:return: The sort_ascending of this AwsGatewayAmisListResult. # noqa: E501
:rtype: bool
"""
return self._sort_ascending
@sort_ascending.setter
def sort_ascending(self, sort_ascending):
"""Sets the sort_ascending of this AwsGatewayAmisListResult.
:param sort_ascending: The sort_ascending of this AwsGatewayAmisListResult. # noqa: E501
:type: bool
"""
self._sort_ascending = sort_ascending
@property
def sort_by(self):
"""Gets the sort_by of this AwsGatewayAmisListResult. # noqa: E501
Field by which records are sorted # noqa: E501
:return: The sort_by of this AwsGatewayAmisListResult. # noqa: E501
:rtype: str
"""
return self._sort_by
@sort_by.setter
def sort_by(self, sort_by):
"""Sets the sort_by of this AwsGatewayAmisListResult.
Field by which records are sorted # noqa: E501
:param sort_by: The sort_by of this AwsGatewayAmisListResult. # noqa: E501
:type: str
"""
self._sort_by = sort_by
@property
def result_count(self):
"""Gets the result_count of this AwsGatewayAmisListResult. # noqa: E501
Count of results found (across all pages), set only on first page # noqa: E501
:return: The result_count of this AwsGatewayAmisListResult. # noqa: E501
:rtype: int
"""
return self._result_count
@result_count.setter
def result_count(self, result_count):
"""Sets the result_count of this AwsGatewayAmisListResult.
Count of results found (across all pages), set only on first page # noqa: E501
:param result_count: The result_count of this AwsGatewayAmisListResult. # noqa: E501
:type: int
"""
self._result_count = result_count
@property
def results(self):
"""Gets the results of this AwsGatewayAmisListResult. # noqa: E501
Aws Gateway amis list # noqa: E501
:return: The results of this AwsGatewayAmisListResult. # noqa: E501
:rtype: list[AwsGatewayAmiInfo]
"""
return self._results
@results.setter
def results(self, results):
"""Sets the results of this AwsGatewayAmisListResult.
Aws Gateway amis list # noqa: E501
:param results: The results of this AwsGatewayAmisListResult. # noqa: E501
:type: list[AwsGatewayAmiInfo]
"""
self._results = results
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AwsGatewayAmisListResult):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"tcraft@pivotal.io"
] | tcraft@pivotal.io |
7c50e42927a96ef3468d56e0da76e5c012bd5630 | 0d49ae03b0dc382ffeabd1e8e63f9c35487e9db3 | /index.py | 9a266474dba0123017a7e8a015749e5fcf9cef8a | [] | no_license | Chaitanya009/Linear_Regression_scratch | c38a13acb2d8064ffe57914416e127474abecd04 | d0b7b4d7c6fbf00eec1ca2b22ab8140023ea1fbb | refs/heads/master | 2020-03-07T00:45:07.916547 | 2018-03-28T16:02:04 | 2018-03-28T16:02:04 | 127,165,141 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,549 | py | from numpy import *
def compute_error_for_given_points(b, m, points):
totalError = 0
for i in range(0, len(points)):
x = points[i, 0]
y = points[i, 1]
totalError += (y - (m * x + b))**2
return totalError/float(len(points))
def step_gradient(b_current, m_current, points, learning_rate):
#gradient descent
b_gradient = 0
m_gradient = 0
N = float(len(points))
for i in range(0, len(points)):
x = points[i, 0]
y = points[i, 1]
b_gradient += -(2/N) * (y - ((m_current * x) + b_current))
b_gradient += -(2/N) * x * (y - ((m_current * x) + b_current))
new_b = b_current - (learning_rate * b_gradient)
new_m = m_current - (learning_rate * m_gradient)
return [new_b, new_m]
def gradient_descent_runner(points, starting_b, starting_m, learning_rate, num_iterations):
b = starting_b
m = starting_m
for i in range(num_iterations):
b, m = step_gradient(b, m, array(points), learning_rate)
return [b, m]
def run():
points = genfromtxt('data.csv', delimiter=',')
#hyperparamters
learning_rate = 0.0001
#y = mx + b
initial_b = 0
initial_m = 0
num_iterations = 1000
[b, m] = gradient_descent_runner(points, initial_b, initial_m, learning_rate, num_iterations)
print "Starting gradient descent at b = {0}, m = {1}, error = {2}".format(initial_b, initial_m, compute_error_for_given_points(initial_b, initial_m, points))
print "Running..."
print "After {0} iterations b = {1}, m = {2}, error = {3}".format(num_iterations, b, m, compute_error_for_given_points(b, m, points))
if __name__ == '__main__':
run() | [
"chaitanya.thakre009@gmail.com"
] | chaitanya.thakre009@gmail.com |
353ab642a8ae08763c52fbf98af5efa618985a9d | fb8cbebdf034b2f478943752d5443afc82c6eef5 | /tuirer/venv/lib/python3.6/site-packages/pygments/lexers/int_fiction.py | 1265072250a996322b6fafbc8a93654c2281faf8 | [] | no_license | fariasjr/CitiTuirer | f64e0ec93ef088f8140bb0961d2ad4ed3b59448a | deb3f7a9c2d45b8a7f54639037f097b99abdac11 | refs/heads/master | 2020-03-24T05:10:36.261050 | 2018-08-01T20:24:30 | 2018-08-01T20:24:30 | 142,477,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 55,826 | py | # -*- coding: utf-8 -*-
"""
pygments.lexers.int_fiction
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for interactive fiction languages.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import (RegexLexer, bygroups, default, include, this,
using, words)
from pygments.token import (Comment, Error, Generic, Keyword, Name, Number,
Operator, Punctuation, String, Text)
__all__ = ['Inform6Lexer', 'Inform6TemplateLexer', 'Inform7Lexer',
'Tads3Lexer']
class Inform6Lexer(RegexLexer):
"""
For `Inform 6 <http://inform-fiction.org/>`_ source code.
.. versionadded:: 2.0
"""
name = 'Inform 6'
aliases = ['inform6', 'i6']
filenames = ['*.inf']
flags = re.MULTILINE | re.DOTALL | re.UNICODE
_name = r'[a-zA-Z_]\w*'
# Inform 7 maps these four character classes to their ASCII
# equivalents. To support Inform 6 inclusions within Inform 7,
# Inform6Lexer maps them too.
_dash = u'\\-\u2010-\u2014'
_dquote = u'"\u201c\u201d'
_squote = u"'\u2018\u2019"
_newline = u'\\n\u0085\u2028\u2029'
tokens = {
'root': [
(r'\A(!%%[^%s]*[%s])+' % (_newline, _newline), Comment.Preproc,
'directive'),
default('directive')
],
'_whitespace': [
(r'\s+', Text),
(r'![^%s]*' % _newline, Comment.Single)
],
'default': [
include('_whitespace'),
(r'\[', Punctuation, 'many-values'), # Array initialization
(r':|(?=;)', Punctuation, '#pop'),
(r'<', Punctuation), # Second angle bracket in an action statement
default(('expression', '_expression'))
],
# Expressions
'_expression': [
include('_whitespace'),
(r'(?=sp\b)', Text, '#pop'),
(r'(?=[%s%s$0-9#a-zA-Z_])' % (_dquote, _squote), Text,
('#pop', 'value')),
(r'\+\+|[%s]{1,2}(?!>)|~~?' % _dash, Operator),
(r'(?=[()\[%s,?@{:;])' % _dash, Text, '#pop')
],
'expression': [
include('_whitespace'),
(r'\(', Punctuation, ('expression', '_expression')),
(r'\)', Punctuation, '#pop'),
(r'\[', Punctuation, ('#pop', 'statements', 'locals')),
(r'>(?=(\s+|(![^%s]*))*[>;])' % _newline, Punctuation),
(r'\+\+|[%s]{2}(?!>)' % _dash, Operator),
(r',', Punctuation, '_expression'),
(r'&&?|\|\|?|[=~><]?=|[%s]{1,2}>?|\.\.?[&#]?|::|[<>+*/%%]' % _dash,
Operator, '_expression'),
(r'(has|hasnt|in|notin|ofclass|or|provides)\b', Operator.Word,
'_expression'),
(r'sp\b', Name),
(r'\?~?', Name.Label, 'label?'),
(r'[@{]', Error),
default('#pop')
],
'_assembly-expression': [
(r'\(', Punctuation, ('#push', '_expression')),
(r'[\[\]]', Punctuation),
(r'[%s]>' % _dash, Punctuation, '_expression'),
(r'sp\b', Keyword.Pseudo),
(r';', Punctuation, '#pop:3'),
include('expression')
],
'_for-expression': [
(r'\)', Punctuation, '#pop:2'),
(r':', Punctuation, '#pop'),
include('expression')
],
'_keyword-expression': [
(r'(from|near|to)\b', Keyword, '_expression'),
include('expression')
],
'_list-expression': [
(r',', Punctuation, '#pop'),
include('expression')
],
'_object-expression': [
(r'has\b', Keyword.Declaration, '#pop'),
include('_list-expression')
],
# Values
'value': [
include('_whitespace'),
# Strings
(r'[%s][^@][%s]' % (_squote, _squote), String.Char, '#pop'),
(r'([%s])(@\{[0-9a-fA-F]{1,4}\})([%s])' % (_squote, _squote),
bygroups(String.Char, String.Escape, String.Char), '#pop'),
(r'([%s])(@.{2})([%s])' % (_squote, _squote),
bygroups(String.Char, String.Escape, String.Char), '#pop'),
(r'[%s]' % _squote, String.Single, ('#pop', 'dictionary-word')),
(r'[%s]' % _dquote, String.Double, ('#pop', 'string')),
# Numbers
(r'\$[+%s][0-9]*\.?[0-9]*([eE][+%s]?[0-9]+)?' % (_dash, _dash),
Number.Float, '#pop'),
(r'\$[0-9a-fA-F]+', Number.Hex, '#pop'),
(r'\$\$[01]+', Number.Bin, '#pop'),
(r'[0-9]+', Number.Integer, '#pop'),
# Values prefixed by hashes
(r'(##|#a\$)(%s)' % _name, bygroups(Operator, Name), '#pop'),
(r'(#g\$)(%s)' % _name,
bygroups(Operator, Name.Variable.Global), '#pop'),
(r'#[nw]\$', Operator, ('#pop', 'obsolete-dictionary-word')),
(r'(#r\$)(%s)' % _name, bygroups(Operator, Name.Function), '#pop'),
(r'#', Name.Builtin, ('#pop', 'system-constant')),
# System functions
(words((
'child', 'children', 'elder', 'eldest', 'glk', 'indirect', 'metaclass',
'parent', 'random', 'sibling', 'younger', 'youngest'), suffix=r'\b'),
Name.Builtin, '#pop'),
# Metaclasses
(r'(?i)(Class|Object|Routine|String)\b', Name.Builtin, '#pop'),
# Veneer routines
(words((
'Box__Routine', 'CA__Pr', 'CDefArt', 'CInDefArt', 'Cl__Ms',
'Copy__Primitive', 'CP__Tab', 'DA__Pr', 'DB__Pr', 'DefArt', 'Dynam__String',
'EnglishNumber', 'Glk__Wrap', 'IA__Pr', 'IB__Pr', 'InDefArt', 'Main__',
'Meta__class', 'OB__Move', 'OB__Remove', 'OC__Cl', 'OP__Pr', 'Print__Addr',
'Print__PName', 'PrintShortName', 'RA__Pr', 'RA__Sc', 'RL__Pr', 'R_Process',
'RT__ChG', 'RT__ChGt', 'RT__ChLDB', 'RT__ChLDW', 'RT__ChPR', 'RT__ChPrintA',
'RT__ChPrintC', 'RT__ChPrintO', 'RT__ChPrintS', 'RT__ChPS', 'RT__ChR',
'RT__ChSTB', 'RT__ChSTW', 'RT__ChT', 'RT__Err', 'RT__TrPS', 'RV__Pr',
'Symb__Tab', 'Unsigned__Compare', 'WV__Pr', 'Z__Region'),
prefix='(?i)', suffix=r'\b'),
Name.Builtin, '#pop'),
# Other built-in symbols
(words((
'call', 'copy', 'create', 'DEBUG', 'destroy', 'DICT_CHAR_SIZE',
'DICT_ENTRY_BYTES', 'DICT_IS_UNICODE', 'DICT_WORD_SIZE', 'false',
'FLOAT_INFINITY', 'FLOAT_NAN', 'FLOAT_NINFINITY', 'GOBJFIELD_CHAIN',
'GOBJFIELD_CHILD', 'GOBJFIELD_NAME', 'GOBJFIELD_PARENT',
'GOBJFIELD_PROPTAB', 'GOBJFIELD_SIBLING', 'GOBJ_EXT_START',
'GOBJ_TOTAL_LENGTH', 'Grammar__Version', 'INDIV_PROP_START', 'INFIX',
'infix__watching', 'MODULE_MODE', 'name', 'nothing', 'NUM_ATTR_BYTES', 'print',
'print_to_array', 'recreate', 'remaining', 'self', 'sender', 'STRICT_MODE',
'sw__var', 'sys__glob0', 'sys__glob1', 'sys__glob2', 'sys_statusline_flag',
'TARGET_GLULX', 'TARGET_ZCODE', 'temp__global2', 'temp__global3',
'temp__global4', 'temp_global', 'true', 'USE_MODULES', 'WORDSIZE'),
prefix='(?i)', suffix=r'\b'),
Name.Builtin, '#pop'),
# Other values
(_name, Name, '#pop')
],
# Strings
'dictionary-word': [
(r'[~^]+', String.Escape),
(r'[^~^\\@({%s]+' % _squote, String.Single),
(r'[({]', String.Single),
(r'@\{[0-9a-fA-F]{,4}\}', String.Escape),
(r'@.{2}', String.Escape),
(r'[%s]' % _squote, String.Single, '#pop')
],
'string': [
(r'[~^]+', String.Escape),
(r'[^~^\\@({%s]+' % _dquote, String.Double),
(r'[({]', String.Double),
(r'\\', String.Escape),
(r'@(\\\s*[%s]\s*)*@((\\\s*[%s]\s*)*[0-9])*' %
(_newline, _newline), String.Escape),
(r'@(\\\s*[%s]\s*)*\{((\\\s*[%s]\s*)*[0-9a-fA-F]){,4}'
r'(\\\s*[%s]\s*)*\}' % (_newline, _newline, _newline),
String.Escape),
(r'@(\\\s*[%s]\s*)*.(\\\s*[%s]\s*)*.' % (_newline, _newline),
String.Escape),
(r'[%s]' % _dquote, String.Double, '#pop')
],
'plain-string': [
(r'[^~^\\({\[\]%s]+' % _dquote, String.Double),
(r'[~^({\[\]]', String.Double),
(r'\\', String.Escape),
(r'[%s]' % _dquote, String.Double, '#pop')
],
# Names
'_constant': [
include('_whitespace'),
(_name, Name.Constant, '#pop'),
include('value')
],
'_global': [
include('_whitespace'),
(_name, Name.Variable.Global, '#pop'),
include('value')
],
'label?': [
include('_whitespace'),
(_name, Name.Label, '#pop'),
default('#pop')
],
'variable?': [
include('_whitespace'),
(_name, Name.Variable, '#pop'),
default('#pop')
],
# Values after hashes
'obsolete-dictionary-word': [
(r'\S\w*', String.Other, '#pop')
],
'system-constant': [
include('_whitespace'),
(_name, Name.Builtin, '#pop')
],
# Directives
'directive': [
include('_whitespace'),
(r'#', Punctuation),
(r';', Punctuation, '#pop'),
(r'\[', Punctuation,
('default', 'statements', 'locals', 'routine-name?')),
(words((
'abbreviate', 'endif', 'dictionary', 'ifdef', 'iffalse', 'ifndef', 'ifnot',
'iftrue', 'ifv3', 'ifv5', 'release', 'serial', 'switches', 'system_file',
'version'), prefix='(?i)', suffix=r'\b'),
Keyword, 'default'),
(r'(?i)(array|global)\b', Keyword,
('default', 'directive-keyword?', '_global')),
(r'(?i)attribute\b', Keyword, ('default', 'alias?', '_constant')),
(r'(?i)class\b', Keyword,
('object-body', 'duplicates', 'class-name')),
(r'(?i)(constant|default)\b', Keyword,
('default', 'expression', '_constant')),
(r'(?i)(end\b)(.*)', bygroups(Keyword, Text)),
(r'(?i)(extend|verb)\b', Keyword, 'grammar'),
(r'(?i)fake_action\b', Keyword, ('default', '_constant')),
(r'(?i)import\b', Keyword, 'manifest'),
(r'(?i)(include|link)\b', Keyword,
('default', 'before-plain-string')),
(r'(?i)(lowstring|undef)\b', Keyword, ('default', '_constant')),
(r'(?i)message\b', Keyword, ('default', 'diagnostic')),
(r'(?i)(nearby|object)\b', Keyword,
('object-body', '_object-head')),
(r'(?i)property\b', Keyword,
('default', 'alias?', '_constant', 'property-keyword*')),
(r'(?i)replace\b', Keyword,
('default', 'routine-name?', 'routine-name?')),
(r'(?i)statusline\b', Keyword, ('default', 'directive-keyword?')),
(r'(?i)stub\b', Keyword, ('default', 'routine-name?')),
(r'(?i)trace\b', Keyword,
('default', 'trace-keyword?', 'trace-keyword?')),
(r'(?i)zcharacter\b', Keyword,
('default', 'directive-keyword?', 'directive-keyword?')),
(_name, Name.Class, ('object-body', '_object-head'))
],
# [, Replace, Stub
'routine-name?': [
include('_whitespace'),
(_name, Name.Function, '#pop'),
default('#pop')
],
'locals': [
include('_whitespace'),
(r';', Punctuation, '#pop'),
(r'\*', Punctuation),
(r'"', String.Double, 'plain-string'),
(_name, Name.Variable)
],
# Array
'many-values': [
include('_whitespace'),
(r';', Punctuation),
(r'\]', Punctuation, '#pop'),
(r':', Error),
default(('expression', '_expression'))
],
# Attribute, Property
'alias?': [
include('_whitespace'),
(r'alias\b', Keyword, ('#pop', '_constant')),
default('#pop')
],
# Class, Object, Nearby
'class-name': [
include('_whitespace'),
(r'(?=[,;]|(class|has|private|with)\b)', Text, '#pop'),
(_name, Name.Class, '#pop')
],
'duplicates': [
include('_whitespace'),
(r'\(', Punctuation, ('#pop', 'expression', '_expression')),
default('#pop')
],
'_object-head': [
(r'[%s]>' % _dash, Punctuation),
(r'(class|has|private|with)\b', Keyword.Declaration, '#pop'),
include('_global')
],
'object-body': [
include('_whitespace'),
(r';', Punctuation, '#pop:2'),
(r',', Punctuation),
(r'class\b', Keyword.Declaration, 'class-segment'),
(r'(has|private|with)\b', Keyword.Declaration),
(r':', Error),
default(('_object-expression', '_expression'))
],
'class-segment': [
include('_whitespace'),
(r'(?=[,;]|(class|has|private|with)\b)', Text, '#pop'),
(_name, Name.Class),
default('value')
],
# Extend, Verb
'grammar': [
include('_whitespace'),
(r'=', Punctuation, ('#pop', 'default')),
(r'\*', Punctuation, ('#pop', 'grammar-line')),
default('_directive-keyword')
],
'grammar-line': [
include('_whitespace'),
(r';', Punctuation, '#pop'),
(r'[/*]', Punctuation),
(r'[%s]>' % _dash, Punctuation, 'value'),
(r'(noun|scope)\b', Keyword, '=routine'),
default('_directive-keyword')
],
'=routine': [
include('_whitespace'),
(r'=', Punctuation, 'routine-name?'),
default('#pop')
],
# Import
'manifest': [
include('_whitespace'),
(r';', Punctuation, '#pop'),
(r',', Punctuation),
(r'(?i)global\b', Keyword, '_global'),
default('_global')
],
# Include, Link, Message
'diagnostic': [
include('_whitespace'),
(r'[%s]' % _dquote, String.Double, ('#pop', 'message-string')),
default(('#pop', 'before-plain-string', 'directive-keyword?'))
],
'before-plain-string': [
include('_whitespace'),
(r'[%s]' % _dquote, String.Double, ('#pop', 'plain-string'))
],
'message-string': [
(r'[~^]+', String.Escape),
include('plain-string')
],
# Keywords used in directives
'_directive-keyword!': [
include('_whitespace'),
(words((
'additive', 'alias', 'buffer', 'class', 'creature', 'data', 'error', 'fatalerror',
'first', 'has', 'held', 'initial', 'initstr', 'last', 'long', 'meta', 'multi',
'multiexcept', 'multiheld', 'multiinside', 'noun', 'number', 'only', 'private',
'replace', 'reverse', 'scope', 'score', 'special', 'string', 'table', 'terminating',
'time', 'topic', 'warning', 'with'), suffix=r'\b'),
Keyword, '#pop'),
(r'[%s]{1,2}>|[+=]' % _dash, Punctuation, '#pop')
],
'_directive-keyword': [
include('_directive-keyword!'),
include('value')
],
'directive-keyword?': [
include('_directive-keyword!'),
default('#pop')
],
'property-keyword*': [
include('_whitespace'),
(r'(additive|long)\b', Keyword),
default('#pop')
],
'trace-keyword?': [
include('_whitespace'),
(words((
'assembly', 'dictionary', 'expressions', 'lines', 'linker',
'objects', 'off', 'on', 'symbols', 'tokens', 'verbs'), suffix=r'\b'),
Keyword, '#pop'),
default('#pop')
],
# Statements
'statements': [
include('_whitespace'),
(r'\]', Punctuation, '#pop'),
(r'[;{}]', Punctuation),
(words((
'box', 'break', 'continue', 'default', 'give', 'inversion',
'new_line', 'quit', 'read', 'remove', 'return', 'rfalse', 'rtrue',
'spaces', 'string', 'until'), suffix=r'\b'),
Keyword, 'default'),
(r'(do|else)\b', Keyword),
(r'(font|style)\b', Keyword,
('default', 'miscellaneous-keyword?')),
(r'for\b', Keyword, ('for', '(?')),
(r'(if|switch|while)', Keyword,
('expression', '_expression', '(?')),
(r'(jump|save|restore)\b', Keyword, ('default', 'label?')),
(r'objectloop\b', Keyword,
('_keyword-expression', 'variable?', '(?')),
(r'print(_ret)?\b|(?=[%s])' % _dquote, Keyword, 'print-list'),
(r'\.', Name.Label, 'label?'),
(r'@', Keyword, 'opcode'),
(r'#(?![agrnw]\$|#)', Punctuation, 'directive'),
(r'<', Punctuation, 'default'),
(r'move\b', Keyword,
('default', '_keyword-expression', '_expression')),
default(('default', '_keyword-expression', '_expression'))
],
'miscellaneous-keyword?': [
include('_whitespace'),
(r'(bold|fixed|from|near|off|on|reverse|roman|to|underline)\b',
Keyword, '#pop'),
(r'(a|A|an|address|char|name|number|object|property|string|the|'
r'The)\b(?=(\s+|(![^%s]*))*\))' % _newline, Keyword.Pseudo,
'#pop'),
(r'%s(?=(\s+|(![^%s]*))*\))' % (_name, _newline), Name.Function,
'#pop'),
default('#pop')
],
'(?': [
include('_whitespace'),
(r'\(', Punctuation, '#pop'),
default('#pop')
],
'for': [
include('_whitespace'),
(r';', Punctuation, ('_for-expression', '_expression')),
default(('_for-expression', '_expression'))
],
'print-list': [
include('_whitespace'),
(r';', Punctuation, '#pop'),
(r':', Error),
default(('_list-expression', '_expression', '_list-expression', 'form'))
],
'form': [
include('_whitespace'),
(r'\(', Punctuation, ('#pop', 'miscellaneous-keyword?')),
default('#pop')
],
# Assembly
'opcode': [
include('_whitespace'),
(r'[%s]' % _dquote, String.Double, ('operands', 'plain-string')),
(_name, Keyword, 'operands')
],
'operands': [
(r':', Error),
default(('_assembly-expression', '_expression'))
]
}
def get_tokens_unprocessed(self, text):
# 'in' is either a keyword or an operator.
# If the token two tokens after 'in' is ')', 'in' is a keyword:
# objectloop(a in b)
# Otherwise, it is an operator:
# objectloop(a in b && true)
objectloop_queue = []
objectloop_token_count = -1
previous_token = None
for index, token, value in RegexLexer.get_tokens_unprocessed(self,
text):
if previous_token is Name.Variable and value == 'in':
objectloop_queue = [[index, token, value]]
objectloop_token_count = 2
elif objectloop_token_count > 0:
if token not in Comment and token not in Text:
objectloop_token_count -= 1
objectloop_queue.append((index, token, value))
else:
if objectloop_token_count == 0:
if objectloop_queue[-1][2] == ')':
objectloop_queue[0][1] = Keyword
while objectloop_queue:
yield objectloop_queue.pop(0)
objectloop_token_count = -1
yield index, token, value
if token not in Comment and token not in Text:
previous_token = token
while objectloop_queue:
yield objectloop_queue.pop(0)
class Inform7Lexer(RegexLexer):
"""
For `Inform 7 <http://inform7.com/>`_ source code.
.. versionadded:: 2.0
"""
name = 'Inform 7'
aliases = ['inform7', 'i7']
filenames = ['*.ni', '*.i7x']
flags = re.MULTILINE | re.DOTALL | re.UNICODE
_dash = Inform6Lexer._dash
_dquote = Inform6Lexer._dquote
_newline = Inform6Lexer._newline
_start = r'\A|(?<=[%s])' % _newline
# There are three variants of Inform 7, differing in how to
# interpret at signs and braces in I6T. In top-level inclusions, at
# signs in the first column are inweb syntax. In phrase definitions
# and use options, tokens in braces are treated as I7. Use options
# also interpret "{N}".
tokens = {}
token_variants = ['+i6t-not-inline', '+i6t-inline', '+i6t-use-option']
for level in token_variants:
tokens[level] = {
'+i6-root': list(Inform6Lexer.tokens['root']),
'+i6t-root': [ # For Inform6TemplateLexer
(r'[^%s]*' % Inform6Lexer._newline, Comment.Preproc,
('directive', '+p'))
],
'root': [
(r'(\|?\s)+', Text),
(r'\[', Comment.Multiline, '+comment'),
(r'[%s]' % _dquote, Generic.Heading,
('+main', '+titling', '+titling-string')),
default(('+main', '+heading?'))
],
'+titling-string': [
(r'[^%s]+' % _dquote, Generic.Heading),
(r'[%s]' % _dquote, Generic.Heading, '#pop')
],
'+titling': [
(r'\[', Comment.Multiline, '+comment'),
(r'[^%s.;:|%s]+' % (_dquote, _newline), Generic.Heading),
(r'[%s]' % _dquote, Generic.Heading, '+titling-string'),
(r'[%s]{2}|(?<=[\s%s])\|[\s%s]' % (_newline, _dquote, _dquote),
Text, ('#pop', '+heading?')),
(r'[.;:]|(?<=[\s%s])\|' % _dquote, Text, '#pop'),
(r'[|%s]' % _newline, Generic.Heading)
],
'+main': [
(r'(?i)[^%s:a\[(|%s]+' % (_dquote, _newline), Text),
(r'[%s]' % _dquote, String.Double, '+text'),
(r':', Text, '+phrase-definition'),
(r'(?i)\bas\b', Text, '+use-option'),
(r'\[', Comment.Multiline, '+comment'),
(r'(\([%s])(.*?)([%s]\))' % (_dash, _dash),
bygroups(Punctuation,
using(this, state=('+i6-root', 'directive'),
i6t='+i6t-not-inline'), Punctuation)),
(r'(%s|(?<=[\s;:.%s]))\|\s|[%s]{2,}' %
(_start, _dquote, _newline), Text, '+heading?'),
(r'(?i)[a(|%s]' % _newline, Text)
],
'+phrase-definition': [
(r'\s+', Text),
(r'\[', Comment.Multiline, '+comment'),
(r'(\([%s])(.*?)([%s]\))' % (_dash, _dash),
bygroups(Punctuation,
using(this, state=('+i6-root', 'directive',
'default', 'statements'),
i6t='+i6t-inline'), Punctuation), '#pop'),
default('#pop')
],
'+use-option': [
(r'\s+', Text),
(r'\[', Comment.Multiline, '+comment'),
(r'(\([%s])(.*?)([%s]\))' % (_dash, _dash),
bygroups(Punctuation,
using(this, state=('+i6-root', 'directive'),
i6t='+i6t-use-option'), Punctuation), '#pop'),
default('#pop')
],
'+comment': [
(r'[^\[\]]+', Comment.Multiline),
(r'\[', Comment.Multiline, '#push'),
(r'\]', Comment.Multiline, '#pop')
],
'+text': [
(r'[^\[%s]+' % _dquote, String.Double),
(r'\[.*?\]', String.Interpol),
(r'[%s]' % _dquote, String.Double, '#pop')
],
'+heading?': [
(r'(\|?\s)+', Text),
(r'\[', Comment.Multiline, '+comment'),
(r'[%s]{4}\s+' % _dash, Text, '+documentation-heading'),
(r'[%s]{1,3}' % _dash, Text),
(r'(?i)(volume|book|part|chapter|section)\b[^%s]*' % _newline,
Generic.Heading, '#pop'),
default('#pop')
],
'+documentation-heading': [
(r'\s+', Text),
(r'\[', Comment.Multiline, '+comment'),
(r'(?i)documentation\s+', Text, '+documentation-heading2'),
default('#pop')
],
'+documentation-heading2': [
(r'\s+', Text),
(r'\[', Comment.Multiline, '+comment'),
(r'[%s]{4}\s' % _dash, Text, '+documentation'),
default('#pop:2')
],
'+documentation': [
(r'(?i)(%s)\s*(chapter|example)\s*:[^%s]*' %
(_start, _newline), Generic.Heading),
(r'(?i)(%s)\s*section\s*:[^%s]*' % (_start, _newline),
Generic.Subheading),
(r'((%s)\t.*?[%s])+' % (_start, _newline),
using(this, state='+main')),
(r'[^%s\[]+|[%s\[]' % (_newline, _newline), Text),
(r'\[', Comment.Multiline, '+comment'),
],
'+i6t-not-inline': [
(r'(%s)@c( .*?)?([%s]|\Z)' % (_start, _newline),
Comment.Preproc),
(r'(%s)@([%s]+|Purpose:)[^%s]*' % (_start, _dash, _newline),
Comment.Preproc),
(r'(%s)@p( .*?)?([%s]|\Z)' % (_start, _newline),
Generic.Heading, '+p')
],
'+i6t-use-option': [
include('+i6t-not-inline'),
(r'(\{)(N)(\})', bygroups(Punctuation, Text, Punctuation))
],
'+i6t-inline': [
(r'(\{)(\S[^}]*)?(\})',
bygroups(Punctuation, using(this, state='+main'),
Punctuation))
],
'+i6t': [
(r'(\{[%s])(![^}]*)(\}?)' % _dash,
bygroups(Punctuation, Comment.Single, Punctuation)),
(r'(\{[%s])(lines)(:)([^}]*)(\}?)' % _dash,
bygroups(Punctuation, Keyword, Punctuation, Text,
Punctuation), '+lines'),
(r'(\{[%s])([^:}]*)(:?)([^}]*)(\}?)' % _dash,
bygroups(Punctuation, Keyword, Punctuation, Text,
Punctuation)),
(r'(\(\+)(.*?)(\+\)|\Z)',
bygroups(Punctuation, using(this, state='+main'),
Punctuation))
],
'+p': [
(r'[^@]+', Comment.Preproc),
(r'(%s)@c( .*?)?([%s]|\Z)' % (_start, _newline),
Comment.Preproc, '#pop'),
(r'(%s)@([%s]|Purpose:)' % (_start, _dash), Comment.Preproc),
(r'(%s)@p( .*?)?([%s]|\Z)' % (_start, _newline),
Generic.Heading),
(r'@', Comment.Preproc)
],
'+lines': [
(r'(%s)@c( .*?)?([%s]|\Z)' % (_start, _newline),
Comment.Preproc),
(r'(%s)@([%s]|Purpose:)[^%s]*' % (_start, _dash, _newline),
Comment.Preproc),
(r'(%s)@p( .*?)?([%s]|\Z)' % (_start, _newline),
Generic.Heading, '+p'),
(r'(%s)@\w*[ %s]' % (_start, _newline), Keyword),
(r'![^%s]*' % _newline, Comment.Single),
(r'(\{)([%s]endlines)(\})' % _dash,
bygroups(Punctuation, Keyword, Punctuation), '#pop'),
(r'[^@!{]+?([%s]|\Z)|.' % _newline, Text)
]
}
# Inform 7 can include snippets of Inform 6 template language,
# so all of Inform6Lexer's states are copied here, with
# modifications to account for template syntax. Inform7Lexer's
# own states begin with '+' to avoid name conflicts. Some of
# Inform6Lexer's states begin with '_': these are not modified.
# They deal with template syntax either by including modified
# states, or by matching r'' then pushing to modified states.
for token in Inform6Lexer.tokens:
if token == 'root':
continue
tokens[level][token] = list(Inform6Lexer.tokens[token])
if not token.startswith('_'):
tokens[level][token][:0] = [include('+i6t'), include(level)]
def __init__(self, **options):
level = options.get('i6t', '+i6t-not-inline')
if level not in self._all_tokens:
self._tokens = self.__class__.process_tokendef(level)
else:
self._tokens = self._all_tokens[level]
RegexLexer.__init__(self, **options)
class Inform6TemplateLexer(Inform7Lexer):
"""
For `Inform 6 template
<http://inform7.com/sources/src/i6template/Woven/index.html>`_ code.
.. versionadded:: 2.0
"""
name = 'Inform 6 template'
aliases = ['i6t']
filenames = ['*.i6t']
def get_tokens_unprocessed(self, text, stack=('+i6t-root',)):
return Inform7Lexer.get_tokens_unprocessed(self, text, stack)
class Tads3Lexer(RegexLexer):
"""
For `TADS 3 <http://www.tads.org/>`_ source code.
"""
name = 'TADS 3'
aliases = ['tads3']
filenames = ['*.t']
flags = re.DOTALL | re.MULTILINE
_comment_single = r'(?://(?:[^\\\n]|\\+[\w\W])*$)'
_comment_multiline = r'(?:/\*(?:[^*]|\*(?!/))*\*/)'
_escape = (r'(?:\\(?:[\n\\<>"\'^v bnrt]|u[\da-fA-F]{,4}|x[\da-fA-F]{,2}|'
r'[0-3]?[0-7]{1,2}))')
_name = r'(?:[_a-zA-Z]\w*)'
_no_quote = r'(?=\s|\\?>)'
_operator = (r'(?:&&|\|\||\+\+|--|\?\?|::|[.,@\[\]~]|'
r'(?:[=+\-*/%!&|^]|<<?|>>?>?)=?)')
_ws = r'(?:\\|\s|%s|%s)' % (_comment_single, _comment_multiline)
_ws_pp = r'(?:\\\n|[^\S\n]|%s|%s)' % (_comment_single, _comment_multiline)
def _make_string_state(triple, double, verbatim=None, _escape=_escape):
if verbatim:
verbatim = ''.join(['(?:%s|%s)' % (re.escape(c.lower()),
re.escape(c.upper()))
for c in verbatim])
char = r'"' if double else r"'"
token = String.Double if double else String.Single
escaped_quotes = r'+|%s(?!%s{2})' % (char, char) if triple else r''
prefix = '%s%s' % ('t' if triple else '', 'd' if double else 's')
tag_state_name = '%sqt' % prefix
state = []
if triple:
state += [
(r'%s{3,}' % char, token, '#pop'),
(r'\\%s+' % char, String.Escape),
(char, token)
]
else:
state.append((char, token, '#pop'))
state += [
include('s/verbatim'),
(r'[^\\<&{}%s]+' % char, token)
]
if verbatim:
# This regex can't use `(?i)` because escape sequences are
# case-sensitive. `<\XMP>` works; `<\xmp>` doesn't.
state.append((r'\\?<(/|\\\\|(?!%s)\\)%s(?=[\s=>])' %
(_escape, verbatim),
Name.Tag, ('#pop', '%sqs' % prefix, tag_state_name)))
else:
state += [
(r'\\?<!([^><\\%s]|<(?!<)|\\%s%s|%s|\\.)*>?' %
(char, char, escaped_quotes, _escape), Comment.Multiline),
(r'(?i)\\?<listing(?=[\s=>]|\\>)', Name.Tag,
('#pop', '%sqs/listing' % prefix, tag_state_name)),
(r'(?i)\\?<xmp(?=[\s=>]|\\>)', Name.Tag,
('#pop', '%sqs/xmp' % prefix, tag_state_name)),
(r'\\?<([^\s=><\\%s]|<(?!<)|\\%s%s|%s|\\.)*' %
(char, char, escaped_quotes, _escape), Name.Tag,
tag_state_name),
include('s/entity')
]
state += [
include('s/escape'),
(r'\{([^}<\\%s]|<(?!<)|\\%s%s|%s|\\.)*\}' %
(char, char, escaped_quotes, _escape), String.Interpol),
(r'[\\&{}<]', token)
]
return state
def _make_tag_state(triple, double, _escape=_escape):
char = r'"' if double else r"'"
quantifier = r'{3,}' if triple else r''
state_name = '%s%sqt' % ('t' if triple else '', 'd' if double else 's')
token = String.Double if double else String.Single
escaped_quotes = r'+|%s(?!%s{2})' % (char, char) if triple else r''
return [
(r'%s%s' % (char, quantifier), token, '#pop:2'),
(r'(\s|\\\n)+', Text),
(r'(=)(\\?")', bygroups(Punctuation, String.Double),
'dqs/%s' % state_name),
(r"(=)(\\?')", bygroups(Punctuation, String.Single),
'sqs/%s' % state_name),
(r'=', Punctuation, 'uqs/%s' % state_name),
(r'\\?>', Name.Tag, '#pop'),
(r'\{([^}<\\%s]|<(?!<)|\\%s%s|%s|\\.)*\}' %
(char, char, escaped_quotes, _escape), String.Interpol),
(r'([^\s=><\\%s]|<(?!<)|\\%s%s|%s|\\.)+' %
(char, char, escaped_quotes, _escape), Name.Attribute),
include('s/escape'),
include('s/verbatim'),
include('s/entity'),
(r'[\\{}&]', Name.Attribute)
]
def _make_attribute_value_state(terminator, host_triple, host_double,
_escape=_escape):
token = (String.Double if terminator == r'"' else
String.Single if terminator == r"'" else String.Other)
host_char = r'"' if host_double else r"'"
host_quantifier = r'{3,}' if host_triple else r''
host_token = String.Double if host_double else String.Single
escaped_quotes = (r'+|%s(?!%s{2})' % (host_char, host_char)
if host_triple else r'')
return [
(r'%s%s' % (host_char, host_quantifier), host_token, '#pop:3'),
(r'%s%s' % (r'' if token is String.Other else r'\\?', terminator),
token, '#pop'),
include('s/verbatim'),
include('s/entity'),
(r'\{([^}<\\%s]|<(?!<)|\\%s%s|%s|\\.)*\}' %
(host_char, host_char, escaped_quotes, _escape), String.Interpol),
(r'([^\s"\'<%s{}\\&])+' % (r'>' if token is String.Other else r''),
token),
include('s/escape'),
(r'["\'\s&{<}\\]', token)
]
tokens = {
'root': [
(u'\ufeff', Text),
(r'\{', Punctuation, 'object-body'),
(r';+', Punctuation),
(r'(?=(argcount|break|case|catch|continue|default|definingobj|'
r'delegated|do|else|for|foreach|finally|goto|if|inherited|'
r'invokee|local|nil|new|operator|replaced|return|self|switch|'
r'targetobj|targetprop|throw|true|try|while)\b)', Text, 'block'),
(r'(%s)(%s*)(\()' % (_name, _ws),
bygroups(Name.Function, using(this, state='whitespace'),
Punctuation),
('block?/root', 'more/parameters', 'main/parameters')),
include('whitespace'),
(r'\++', Punctuation),
(r'[^\s!"%-(*->@-_a-z{-~]+', Error), # Averts an infinite loop
(r'(?!\Z)', Text, 'main/root')
],
'main/root': [
include('main/basic'),
default(('#pop', 'object-body/no-braces', 'classes', 'class'))
],
'object-body/no-braces': [
(r';', Punctuation, '#pop'),
(r'\{', Punctuation, ('#pop', 'object-body')),
include('object-body')
],
'object-body': [
(r';', Punctuation),
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
(r':', Punctuation, ('classes', 'class')),
(r'(%s?)(%s*)(\()' % (_name, _ws),
bygroups(Name.Function, using(this, state='whitespace'),
Punctuation),
('block?', 'more/parameters', 'main/parameters')),
(r'(%s)(%s*)(\{)' % (_name, _ws),
bygroups(Name.Function, using(this, state='whitespace'),
Punctuation), 'block'),
(r'(%s)(%s*)(:)' % (_name, _ws),
bygroups(Name.Variable, using(this, state='whitespace'),
Punctuation),
('object-body/no-braces', 'classes', 'class')),
include('whitespace'),
(r'->|%s' % _operator, Punctuation, 'main'),
default('main/object-body')
],
'main/object-body': [
include('main/basic'),
(r'(%s)(%s*)(=?)' % (_name, _ws),
bygroups(Name.Variable, using(this, state='whitespace'),
Punctuation), ('#pop', 'more', 'main')),
default('#pop:2')
],
'block?/root': [
(r'\{', Punctuation, ('#pop', 'block')),
include('whitespace'),
(r'(?=[[\'"<(:])', Text, # It might be a VerbRule macro.
('#pop', 'object-body/no-braces', 'grammar', 'grammar-rules')),
# It might be a macro like DefineAction.
default(('#pop', 'object-body/no-braces'))
],
'block?': [
(r'\{', Punctuation, ('#pop', 'block')),
include('whitespace'),
default('#pop')
],
'block/basic': [
(r'[;:]+', Punctuation),
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
(r'default\b', Keyword.Reserved),
(r'(%s)(%s*)(:)' % (_name, _ws),
bygroups(Name.Label, using(this, state='whitespace'),
Punctuation)),
include('whitespace')
],
'block': [
include('block/basic'),
(r'(?!\Z)', Text, ('more', 'main'))
],
'block/embed': [
(r'>>', String.Interpol, '#pop'),
include('block/basic'),
(r'(?!\Z)', Text, ('more/embed', 'main'))
],
'main/basic': [
include('whitespace'),
(r'\(', Punctuation, ('#pop', 'more', 'main')),
(r'\[', Punctuation, ('#pop', 'more/list', 'main')),
(r'\{', Punctuation, ('#pop', 'more/inner', 'main/inner',
'more/parameters', 'main/parameters')),
(r'\*|\.{3}', Punctuation, '#pop'),
(r'(?i)0x[\da-f]+', Number.Hex, '#pop'),
(r'(\d+\.(?!\.)\d*|\.\d+)([eE][-+]?\d+)?|\d+[eE][-+]?\d+',
Number.Float, '#pop'),
(r'0[0-7]+', Number.Oct, '#pop'),
(r'\d+', Number.Integer, '#pop'),
(r'"""', String.Double, ('#pop', 'tdqs')),
(r"'''", String.Single, ('#pop', 'tsqs')),
(r'"', String.Double, ('#pop', 'dqs')),
(r"'", String.Single, ('#pop', 'sqs')),
(r'R"""', String.Regex, ('#pop', 'tdqr')),
(r"R'''", String.Regex, ('#pop', 'tsqr')),
(r'R"', String.Regex, ('#pop', 'dqr')),
(r"R'", String.Regex, ('#pop', 'sqr')),
# Two-token keywords
(r'(extern)(%s+)(object\b)' % _ws,
bygroups(Keyword.Reserved, using(this, state='whitespace'),
Keyword.Reserved)),
(r'(function|method)(%s*)(\()' % _ws,
bygroups(Keyword.Reserved, using(this, state='whitespace'),
Punctuation),
('#pop', 'block?', 'more/parameters', 'main/parameters')),
(r'(modify)(%s+)(grammar\b)' % _ws,
bygroups(Keyword.Reserved, using(this, state='whitespace'),
Keyword.Reserved),
('#pop', 'object-body/no-braces', ':', 'grammar')),
(r'(new)(%s+(?=(?:function|method)\b))' % _ws,
bygroups(Keyword.Reserved, using(this, state='whitespace'))),
(r'(object)(%s+)(template\b)' % _ws,
bygroups(Keyword.Reserved, using(this, state='whitespace'),
Keyword.Reserved), ('#pop', 'template')),
(r'(string)(%s+)(template\b)' % _ws,
bygroups(Keyword, using(this, state='whitespace'),
Keyword.Reserved), ('#pop', 'function-name')),
# Keywords
(r'(argcount|definingobj|invokee|replaced|targetobj|targetprop)\b',
Name.Builtin, '#pop'),
(r'(break|continue|goto)\b', Keyword.Reserved, ('#pop', 'label')),
(r'(case|extern|if|intrinsic|return|static|while)\b',
Keyword.Reserved),
(r'catch\b', Keyword.Reserved, ('#pop', 'catch')),
(r'class\b', Keyword.Reserved,
('#pop', 'object-body/no-braces', 'class')),
(r'(default|do|else|finally|try)\b', Keyword.Reserved, '#pop'),
(r'(dictionary|property)\b', Keyword.Reserved,
('#pop', 'constants')),
(r'enum\b', Keyword.Reserved, ('#pop', 'enum')),
(r'export\b', Keyword.Reserved, ('#pop', 'main')),
(r'(for|foreach)\b', Keyword.Reserved,
('#pop', 'more/inner', 'main/inner')),
(r'(function|method)\b', Keyword.Reserved,
('#pop', 'block?', 'function-name')),
(r'grammar\b', Keyword.Reserved,
('#pop', 'object-body/no-braces', 'grammar')),
(r'inherited\b', Keyword.Reserved, ('#pop', 'inherited')),
(r'local\b', Keyword.Reserved,
('#pop', 'more/local', 'main/local')),
(r'(modify|replace|switch|throw|transient)\b', Keyword.Reserved,
'#pop'),
(r'new\b', Keyword.Reserved, ('#pop', 'class')),
(r'(nil|true)\b', Keyword.Constant, '#pop'),
(r'object\b', Keyword.Reserved, ('#pop', 'object-body/no-braces')),
(r'operator\b', Keyword.Reserved, ('#pop', 'operator')),
(r'propertyset\b', Keyword.Reserved,
('#pop', 'propertyset', 'main')),
(r'self\b', Name.Builtin.Pseudo, '#pop'),
(r'template\b', Keyword.Reserved, ('#pop', 'template')),
# Operators
(r'(__objref|defined)(%s*)(\()' % _ws,
bygroups(Operator.Word, using(this, state='whitespace'),
Operator), ('#pop', 'more/__objref', 'main')),
(r'delegated\b', Operator.Word),
# Compiler-defined macros and built-in properties
(r'(__DATE__|__DEBUG|__LINE__|__FILE__|'
r'__TADS_MACRO_FORMAT_VERSION|__TADS_SYS_\w*|__TADS_SYSTEM_NAME|'
r'__TADS_VERSION_MAJOR|__TADS_VERSION_MINOR|__TADS3|__TIME__|'
r'construct|finalize|grammarInfo|grammarTag|lexicalParent|'
r'miscVocab|sourceTextGroup|sourceTextGroupName|'
r'sourceTextGroupOrder|sourceTextOrder)\b', Name.Builtin, '#pop')
],
'main': [
include('main/basic'),
(_name, Name, '#pop'),
default('#pop')
],
'more/basic': [
(r'\(', Punctuation, ('more/list', 'main')),
(r'\[', Punctuation, ('more', 'main')),
(r'\.{3}', Punctuation),
(r'->|\.\.', Punctuation, 'main'),
(r'(?=;)|[:)\]]', Punctuation, '#pop'),
include('whitespace'),
(_operator, Operator, 'main'),
(r'\?', Operator, ('main', 'more/conditional', 'main')),
(r'(is|not)(%s+)(in\b)' % _ws,
bygroups(Operator.Word, using(this, state='whitespace'),
Operator.Word)),
(r'[^\s!"%-_a-z{-~]+', Error) # Averts an infinite loop
],
'more': [
include('more/basic'),
default('#pop')
],
# Then expression (conditional operator)
'more/conditional': [
(r':(?!:)', Operator, '#pop'),
include('more')
],
# Embedded expressions
'more/embed': [
(r'>>', String.Interpol, '#pop:2'),
include('more')
],
# For/foreach loop initializer or short-form anonymous function
'main/inner': [
(r'\(', Punctuation, ('#pop', 'more/inner', 'main/inner')),
(r'local\b', Keyword.Reserved, ('#pop', 'main/local')),
include('main')
],
'more/inner': [
(r'\}', Punctuation, '#pop'),
(r',', Punctuation, 'main/inner'),
(r'(in|step)\b', Keyword, 'main/inner'),
include('more')
],
# Local
'main/local': [
(_name, Name.Variable, '#pop'),
include('whitespace')
],
'more/local': [
(r',', Punctuation, 'main/local'),
include('more')
],
# List
'more/list': [
(r'[,:]', Punctuation, 'main'),
include('more')
],
# Parameter list
'main/parameters': [
(r'(%s)(%s*)(?=:)' % (_name, _ws),
bygroups(Name.Variable, using(this, state='whitespace')), '#pop'),
(r'(%s)(%s+)(%s)' % (_name, _ws, _name),
bygroups(Name.Class, using(this, state='whitespace'),
Name.Variable), '#pop'),
(r'\[+', Punctuation),
include('main/basic'),
(_name, Name.Variable, '#pop'),
default('#pop')
],
'more/parameters': [
(r'(:)(%s*(?=[?=,:)]))' % _ws,
bygroups(Punctuation, using(this, state='whitespace'))),
(r'[?\]]+', Punctuation),
(r'[:)]', Punctuation, ('#pop', 'multimethod?')),
(r',', Punctuation, 'main/parameters'),
(r'=', Punctuation, ('more/parameter', 'main')),
include('more')
],
'more/parameter': [
(r'(?=[,)])', Text, '#pop'),
include('more')
],
'multimethod?': [
(r'multimethod\b', Keyword, '#pop'),
include('whitespace'),
default('#pop')
],
# Statements and expressions
'more/__objref': [
(r',', Punctuation, 'mode'),
(r'\)', Operator, '#pop'),
include('more')
],
'mode': [
(r'(error|warn)\b', Keyword, '#pop'),
include('whitespace')
],
'catch': [
(r'\(+', Punctuation),
(_name, Name.Exception, ('#pop', 'variables')),
include('whitespace')
],
'enum': [
include('whitespace'),
(r'token\b', Keyword, ('#pop', 'constants')),
default(('#pop', 'constants'))
],
'grammar': [
(r'\)+', Punctuation),
(r'\(', Punctuation, 'grammar-tag'),
(r':', Punctuation, 'grammar-rules'),
(_name, Name.Class),
include('whitespace')
],
'grammar-tag': [
include('whitespace'),
(r'"""([^\\"<]|""?(?!")|\\"+|\\.|<(?!<))+("{3,}|<<)|'
r'R"""([^\\"]|""?(?!")|\\"+|\\.)+"{3,}|'
r"'''([^\\'<]|''?(?!')|\\'+|\\.|<(?!<))+('{3,}|<<)|"
r"R'''([^\\']|''?(?!')|\\'+|\\.)+'{3,}|"
r'"([^\\"<]|\\.|<(?!<))+("|<<)|R"([^\\"]|\\.)+"|'
r"'([^\\'<]|\\.|<(?!<))+('|<<)|R'([^\\']|\\.)+'|"
r"([^)\s\\/]|/(?![/*]))+|\)", String.Other, '#pop')
],
'grammar-rules': [
include('string'),
include('whitespace'),
(r'(\[)(%s*)(badness)' % _ws,
bygroups(Punctuation, using(this, state='whitespace'), Keyword),
'main'),
(r'->|%s|[()]' % _operator, Punctuation),
(_name, Name.Constant),
default('#pop:2')
],
':': [
(r':', Punctuation, '#pop')
],
'function-name': [
(r'(<<([^>]|>>>|>(?!>))*>>)+', String.Interpol),
(r'(?=%s?%s*[({])' % (_name, _ws), Text, '#pop'),
(_name, Name.Function, '#pop'),
include('whitespace')
],
'inherited': [
(r'<', Punctuation, ('#pop', 'classes', 'class')),
include('whitespace'),
(_name, Name.Class, '#pop'),
default('#pop')
],
'operator': [
(r'negate\b', Operator.Word, '#pop'),
include('whitespace'),
(_operator, Operator),
default('#pop')
],
'propertyset': [
(r'\(', Punctuation, ('more/parameters', 'main/parameters')),
(r'\{', Punctuation, ('#pop', 'object-body')),
include('whitespace')
],
'template': [
(r'(?=;)', Text, '#pop'),
include('string'),
(r'inherited\b', Keyword.Reserved),
include('whitespace'),
(r'->|\?|%s' % _operator, Punctuation),
(_name, Name.Variable)
],
# Identifiers
'class': [
(r'\*|\.{3}', Punctuation, '#pop'),
(r'object\b', Keyword.Reserved, '#pop'),
(r'transient\b', Keyword.Reserved),
(_name, Name.Class, '#pop'),
include('whitespace'),
default('#pop')
],
'classes': [
(r'[:,]', Punctuation, 'class'),
include('whitespace'),
(r'>', Punctuation, '#pop'),
default('#pop')
],
'constants': [
(r',+', Punctuation),
(r';', Punctuation, '#pop'),
(r'property\b', Keyword.Reserved),
(_name, Name.Constant),
include('whitespace')
],
'label': [
(_name, Name.Label, '#pop'),
include('whitespace'),
default('#pop')
],
'variables': [
(r',+', Punctuation),
(r'\)', Punctuation, '#pop'),
include('whitespace'),
(_name, Name.Variable)
],
# Whitespace and comments
'whitespace': [
(r'^%s*#(%s|[^\n]|(?<=\\)\n)*\n?' % (_ws_pp, _comment_multiline),
Comment.Preproc),
(_comment_single, Comment.Single),
(_comment_multiline, Comment.Multiline),
(r'\\+\n+%s*#?|\n+|([^\S\n]|\\)+' % _ws_pp, Text)
],
# Strings
'string': [
(r'"""', String.Double, 'tdqs'),
(r"'''", String.Single, 'tsqs'),
(r'"', String.Double, 'dqs'),
(r"'", String.Single, 'sqs')
],
's/escape': [
(r'\{\{|\}\}|%s' % _escape, String.Escape)
],
's/verbatim': [
(r'<<\s*(as\s+decreasingly\s+likely\s+outcomes|cycling|else|end|'
r'first\s+time|one\s+of|only|or|otherwise|'
r'(sticky|(then\s+)?(purely\s+)?at)\s+random|stopping|'
r'(then\s+)?(half\s+)?shuffled|\|\|)\s*>>', String.Interpol),
(r'<<(%%(_(%s|\\?.)|[\-+ ,#]|\[\d*\]?)*\d*\.?\d*(%s|\\?.)|'
r'\s*((else|otherwise)\s+)?(if|unless)\b)?' % (_escape, _escape),
String.Interpol, ('block/embed', 'more/embed', 'main'))
],
's/entity': [
(r'(?i)&(#(x[\da-f]+|\d+)|[a-z][\da-z]*);?', Name.Entity)
],
'tdqs': _make_string_state(True, True),
'tsqs': _make_string_state(True, False),
'dqs': _make_string_state(False, True),
'sqs': _make_string_state(False, False),
'tdqs/listing': _make_string_state(True, True, 'listing'),
'tsqs/listing': _make_string_state(True, False, 'listing'),
'dqs/listing': _make_string_state(False, True, 'listing'),
'sqs/listing': _make_string_state(False, False, 'listing'),
'tdqs/xmp': _make_string_state(True, True, 'xmp'),
'tsqs/xmp': _make_string_state(True, False, 'xmp'),
'dqs/xmp': _make_string_state(False, True, 'xmp'),
'sqs/xmp': _make_string_state(False, False, 'xmp'),
# Tags
'tdqt': _make_tag_state(True, True),
'tsqt': _make_tag_state(True, False),
'dqt': _make_tag_state(False, True),
'sqt': _make_tag_state(False, False),
'dqs/tdqt': _make_attribute_value_state(r'"', True, True),
'dqs/tsqt': _make_attribute_value_state(r'"', True, False),
'dqs/dqt': _make_attribute_value_state(r'"', False, True),
'dqs/sqt': _make_attribute_value_state(r'"', False, False),
'sqs/tdqt': _make_attribute_value_state(r"'", True, True),
'sqs/tsqt': _make_attribute_value_state(r"'", True, False),
'sqs/dqt': _make_attribute_value_state(r"'", False, True),
'sqs/sqt': _make_attribute_value_state(r"'", False, False),
'uqs/tdqt': _make_attribute_value_state(_no_quote, True, True),
'uqs/tsqt': _make_attribute_value_state(_no_quote, True, False),
'uqs/dqt': _make_attribute_value_state(_no_quote, False, True),
'uqs/sqt': _make_attribute_value_state(_no_quote, False, False),
# Regular expressions
'tdqr': [
(r'[^\\"]+', String.Regex),
(r'\\"*', String.Regex),
(r'"{3,}', String.Regex, '#pop'),
(r'"', String.Regex)
],
'tsqr': [
(r"[^\\']+", String.Regex),
(r"\\'*", String.Regex),
(r"'{3,}", String.Regex, '#pop'),
(r"'", String.Regex)
],
'dqr': [
(r'[^\\"]+', String.Regex),
(r'\\"?', String.Regex),
(r'"', String.Regex, '#pop')
],
'sqr': [
(r"[^\\']+", String.Regex),
(r"\\'?", String.Regex),
(r"'", String.Regex, '#pop')
]
}
def get_tokens_unprocessed(self, text, **kwargs):
pp = r'^%s*#%s*' % (self._ws_pp, self._ws_pp)
if_false_level = 0
for index, token, value in (
RegexLexer.get_tokens_unprocessed(self, text, **kwargs)):
if if_false_level == 0: # Not in a false #if
if (token is Comment.Preproc and
re.match(r'%sif%s+(0|nil)%s*$\n?' %
(pp, self._ws_pp, self._ws_pp), value)):
if_false_level = 1
else: # In a false #if
if token is Comment.Preproc:
if (if_false_level == 1 and
re.match(r'%sel(if|se)\b' % pp, value)):
if_false_level = 0
elif re.match(r'%sif' % pp, value):
if_false_level += 1
elif re.match(r'%sendif\b' % pp, value):
if_false_level -= 1
else:
token = Comment
yield index, token, value
| [
"jornadaciti@ug4c08.windows.cin.ufpe.br"
] | jornadaciti@ug4c08.windows.cin.ufpe.br |
1d831cb9cfb9b9f7db96f0499fe3f0d02ab6c4ee | 6302d46032f704aa2c8bb6e2810c19e3bb90c1c4 | /server/netflix_backend/movies_api/migrations/0002_auto_20210219_1954.py | 7ccbe630791c36378645ba0357d4a4f295324d1c | [] | no_license | raghavendra-musubi/netflix-django-rest-react-redux | 304d28f68e13e9962f31593441ae1b7b36743952 | fe78061ccc1c27ff78697cb5f21d92a313b8a7c0 | refs/heads/main | 2023-03-09T21:32:30.409919 | 2021-02-24T19:03:32 | 2021-02-24T19:03:32 | 340,214,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | # Generated by Django 3.1.6 on 2021-02-19 19:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('movies_api', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='movie',
name='category_id',
field=models.PositiveSmallIntegerField(choices=[(1, 'Adventure'), (2, 'Action'), (3, 'Thriller'), (4, 'Horror'), (5, 'Comedy'), (6, 'Musical'), (7, 'Romance'), (8, 'Drama'), (9, 'Fantasy')]),
),
]
| [
"raghavendra@techis.io"
] | raghavendra@techis.io |
51454841f8ba4911c06ddc7c0242c0ab00f41e5d | 4eafc9dd445b1f292a0a4cb94dea06f813458520 | /myDemo/PythonCommonModule/mofaFun.py | b638d8ccb0f978c02394ec3260a1dfff759a9294 | [] | no_license | kele5215/PycharmProjects | 0fdbf584e55774ba643264b1700960862802f9af | 67bbfa6ffc240ddb838c4b56971a006ea0586cfa | refs/heads/master | 2023-01-05T11:24:32.405810 | 2019-10-21T08:26:49 | 2019-10-21T08:26:49 | 191,890,830 | 0 | 0 | null | 2023-01-04T23:08:54 | 2019-06-14T06:49:14 | Jupyter Notebook | UTF-8 | Python | false | false | 717 | py | # _*_ coding:UTF8 _*_
class Company(object):
def __init__(self, employee_list):
self.employee = employee_list
# 魔法函数,给类加可迭代类型
def __getitem__(self, item):
return self.employee[item]
def __str__(self):
return ",".join(self.employee)
def __repr__(self):
return "*".join(self.employee)
company = Company(['11', '22', '33'])
# print(len(company))
for em in company:
print(em)
# for em in company.employee:
# print("bu yong mo fa : " + em)
# 可以切片和获取长度
company1 = company[:2]
print(len(company1))
for em in company1:
print("qie pian :" + em)
company = Company(['Derek', 'Tom', 'Jack'])
print(company)
| [
"77523916@qq.com"
] | 77523916@qq.com |
1f30212294a603d8593be882ead4569e1ac74274 | 492b3a10ac12cbcce5c5a1968642df3f86cb3de5 | /rotatearr.py | d000c934830a9d77c511f5098626d3216f42a48d | [] | no_license | mxxu/leetcodeOJ | bcf6a9edc9db32e11443d9d510c036e72eba73d5 | 27589a20f9ef1edee9f7687fbffd82fab6224580 | refs/heads/master | 2021-01-17T14:47:52.576943 | 2017-03-11T12:57:26 | 2017-03-11T12:57:26 | 23,454,895 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | class Solution(object):
def rotate(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: void Do not return anything, modify nums in-place instead.
"""
if not nums or k <= 0:
return
n = len(nums)
k = k % n
if k <= 0:
return
def reverse(l, i, j):
while i < j:
l[i], l[j] = l[j], l[i]
i += 1
j -= 1
k = n - k
reverse(nums, 0, k-1)
reverse(nums, k, n-1)
reverse(nums, 0, n-1)
s = Solution()
nums = range(1, 10)
print nums
s.rotate(nums, 3)
print nums
| [
"xumx@tupo.com"
] | xumx@tupo.com |
ac66d7dc5fc559ad4bd16ab25d9a32bd733df94f | 27c071224dfb52254cff8e176ce09fe62d9fcb71 | /mobileperf/extlib/xlsxwriter/packager.py | 3d944b6a5d291de0391a64e5ee9c430ff16890c4 | [
"MIT"
] | permissive | wawau/mobileperf | 7d171bd813f98bdd68c4bdcf7e68b7cebe0b9a66 | 6a1313a8b354f58831a0b3d3023a1ac5b2b47c08 | refs/heads/master | 2022-06-11T15:41:20.160362 | 2020-05-07T03:03:30 | 2020-05-07T03:03:30 | 261,952,182 | 1 | 0 | MIT | 2020-05-07T04:49:02 | 2020-05-07T04:49:01 | null | UTF-8 | Python | false | false | 23,084 | py | ###############################################################################
#
# Packager - A class for writing the Excel XLSX Worksheet file.
#
# Copyright 2013-2018, John McNamara, jmcnamara@cpan.org
#
# Standard packages.
import os
import stat
import tempfile
from shutil import copy
from .compatibility import StringIO
from .compatibility import BytesIO
# Package imports.
from .app import App
from .contenttypes import ContentTypes
from .core import Core
from .custom import Custom
from .relationships import Relationships
from .sharedstrings import SharedStrings
from .styles import Styles
from .theme import Theme
from .vml import Vml
from .table import Table
from .comments import Comments
from .exceptions import EmptyChartSeries
class Packager(object):
"""
A class for writing the Excel XLSX Packager file.
This module is used in conjunction with XlsxWriter to create an
Excel XLSX container file.
From Wikipedia: The Open Packaging Conventions (OPC) is a
container-file technology initially created by Microsoft to store
a combination of XML and non-XML files that together form a single
entity such as an Open XML Paper Specification (OpenXPS)
document. http://en.wikipedia.org/wiki/Open_Packaging_Conventions.
At its simplest an Excel XLSX file contains the following elements::
____ [Content_Types].xml
|
|____ docProps
| |____ app.xml
| |____ core.xml
|
|____ xl
| |____ workbook.xml
| |____ worksheets
| | |____ sheet1.xml
| |
| |____ styles.xml
| |
| |____ theme
| | |____ theme1.xml
| |
| |_____rels
| |____ workbook.xml.rels
|
|_____rels
|____ .rels
The Packager class coordinates the classes that represent the
elements of the package and writes them into the XLSX file.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self):
"""
Constructor.
"""
super(Packager, self).__init__()
self.tmpdir = ''
self.in_memory = False
self.workbook = None
self.worksheet_count = 0
self.chartsheet_count = 0
self.chart_count = 0
self.drawing_count = 0
self.table_count = 0
self.num_vml_files = 0
self.num_comment_files = 0
self.named_ranges = []
self.filenames = []
###########################################################################
#
# Private API.
#
###########################################################################
def _set_tmpdir(self, tmpdir):
# Set an optional user defined temp directory.
self.tmpdir = tmpdir
def _set_in_memory(self, in_memory):
# Set the optional 'in_memory' mode.
self.in_memory = in_memory
def _add_workbook(self, workbook):
# Add the Excel::Writer::XLSX::Workbook object to the package.
self.workbook = workbook
self.chart_count = len(workbook.charts)
self.drawing_count = len(workbook.drawings)
self.num_vml_files = workbook.num_vml_files
self.num_comment_files = workbook.num_comment_files
self.named_ranges = workbook.named_ranges
for worksheet in self.workbook.worksheets():
if worksheet.is_chartsheet:
self.chartsheet_count += 1
else:
self.worksheet_count += 1
def _create_package(self):
# Write the xml files that make up the XLSX OPC package.
self._write_worksheet_files()
self._write_chartsheet_files()
self._write_workbook_file()
self._write_chart_files()
self._write_drawing_files()
self._write_vml_files()
self._write_comment_files()
self._write_table_files()
self._write_shared_strings_file()
self._write_app_file()
self._write_core_file()
self._write_custom_file()
self._write_content_types_file()
self._write_styles_file()
self._write_theme_file()
self._write_root_rels_file()
self._write_workbook_rels_file()
self._write_worksheet_rels_files()
self._write_chartsheet_rels_files()
self._write_drawing_rels_files()
self._add_image_files()
self._add_vba_project()
return self.filenames
def _filename(self, xml_filename):
# Create a temp filename to write the XML data to and store the Excel
# filename to use as the name in the Zip container.
if self.in_memory:
os_filename = StringIO()
else:
(fd, os_filename) = tempfile.mkstemp(dir=self.tmpdir)
os.close(fd)
self.filenames.append((os_filename, xml_filename, False))
return os_filename
def _write_workbook_file(self):
# Write the workbook.xml file.
workbook = self.workbook
workbook._set_xml_writer(self._filename('xl/workbook.xml'))
workbook._assemble_xml_file()
def _write_worksheet_files(self):
# Write the worksheet files.
index = 1
for worksheet in self.workbook.worksheets():
if worksheet.is_chartsheet:
continue
if worksheet.constant_memory:
worksheet._opt_reopen()
worksheet._write_single_row()
worksheet._set_xml_writer(self._filename('xl/worksheets/sheet'
+ str(index) + '.xml'))
worksheet._assemble_xml_file()
index += 1
def _write_chartsheet_files(self):
# Write the chartsheet files.
index = 1
for worksheet in self.workbook.worksheets():
if not worksheet.is_chartsheet:
continue
worksheet._set_xml_writer(self._filename('xl/chartsheets/sheet'
+ str(index) + '.xml'))
worksheet._assemble_xml_file()
index += 1
def _write_chart_files(self):
# Write the chart files.
if not self.workbook.charts:
return
index = 1
for chart in self.workbook.charts:
# Check that the chart has at least one data series.
if not chart.series:
raise EmptyChartSeries("Chart%d must contain at least one "
"data series. See chart.add_series()."
% index)
chart._set_xml_writer(self._filename('xl/charts/chart'
+ str(index) + '.xml'))
chart._assemble_xml_file()
index += 1
def _write_drawing_files(self):
# Write the drawing files.
if not self.drawing_count:
return
index = 1
for drawing in self.workbook.drawings:
drawing._set_xml_writer(self._filename('xl/drawings/drawing'
+ str(index) + '.xml'))
drawing._assemble_xml_file()
index += 1
def _write_vml_files(self):
# Write the comment VML files.
index = 1
for worksheet in self.workbook.worksheets():
if not worksheet.has_vml and not worksheet.has_header_vml:
continue
if worksheet.has_vml:
vml = Vml()
vml._set_xml_writer(self._filename('xl/drawings/vmlDrawing'
+ str(index) + '.vml'))
vml._assemble_xml_file(worksheet.vml_data_id,
worksheet.vml_shape_id,
worksheet.comments_list,
worksheet.buttons_list)
index += 1
if worksheet.has_header_vml:
vml = Vml()
vml._set_xml_writer(self._filename('xl/drawings/vmlDrawing'
+ str(index) + '.vml'))
vml._assemble_xml_file(worksheet.vml_header_id,
worksheet.vml_header_id * 1024,
None,
None,
worksheet.header_images_list)
self._write_vml_drawing_rels_file(worksheet, index)
index += 1
def _write_comment_files(self):
# Write the comment files.
index = 1
for worksheet in self.workbook.worksheets():
if not worksheet.has_comments:
continue
comment = Comments()
comment._set_xml_writer(self._filename('xl/comments'
+ str(index) + '.xml'))
comment._assemble_xml_file(worksheet.comments_list)
index += 1
def _write_shared_strings_file(self):
# Write the sharedStrings.xml file.
sst = SharedStrings()
sst.string_table = self.workbook.str_table
if not self.workbook.str_table.count:
return
sst._set_xml_writer(self._filename('xl/sharedStrings.xml'))
sst._assemble_xml_file()
def _write_app_file(self):
# Write the app.xml file.
properties = self.workbook.doc_properties
app = App()
# Add the Worksheet heading pairs.
app._add_heading_pair(['Worksheets', self.worksheet_count])
# Add the Chartsheet heading pairs.
app._add_heading_pair(['Charts', self.chartsheet_count])
# Add the Worksheet parts.
for worksheet in self.workbook.worksheets():
if worksheet.is_chartsheet:
continue
app._add_part_name(worksheet.name)
# Add the Chartsheet parts.
for worksheet in self.workbook.worksheets():
if not worksheet.is_chartsheet:
continue
app._add_part_name(worksheet.name)
# Add the Named Range heading pairs.
if self.named_ranges:
app._add_heading_pair(['Named Ranges', len(self.named_ranges)])
# Add the Named Ranges parts.
for named_range in self.named_ranges:
app._add_part_name(named_range)
app._set_properties(properties)
app._set_xml_writer(self._filename('docProps/app.xml'))
app._assemble_xml_file()
def _write_core_file(self):
# Write the core.xml file.
properties = self.workbook.doc_properties
core = Core()
core._set_properties(properties)
core._set_xml_writer(self._filename('docProps/core.xml'))
core._assemble_xml_file()
def _write_custom_file(self):
# Write the custom.xml file.
properties = self.workbook.custom_properties
custom = Custom()
if not len(properties):
return
custom._set_properties(properties)
custom._set_xml_writer(self._filename('docProps/custom.xml'))
custom._assemble_xml_file()
def _write_content_types_file(self):
# Write the ContentTypes.xml file.
content = ContentTypes()
content._add_image_types(self.workbook.image_types)
worksheet_index = 1
chartsheet_index = 1
for worksheet in self.workbook.worksheets():
if worksheet.is_chartsheet:
content._add_chartsheet_name('sheet' + str(chartsheet_index))
chartsheet_index += 1
else:
content._add_worksheet_name('sheet' + str(worksheet_index))
worksheet_index += 1
for i in range(1, self.chart_count + 1):
content._add_chart_name('chart' + str(i))
for i in range(1, self.drawing_count + 1):
content._add_drawing_name('drawing' + str(i))
if self.num_vml_files:
content._add_vml_name()
for i in range(1, self.table_count + 1):
content._add_table_name('table' + str(i))
for i in range(1, self.num_comment_files + 1):
content._add_comment_name('comments' + str(i))
# Add the sharedString rel if there is string data in the workbook.
if self.workbook.str_table.count:
content._add_shared_strings()
# Add vbaProject if present.
if self.workbook.vba_project:
content._add_vba_project()
# Add the custom properties if present.
if self.workbook.custom_properties:
content._add_custom_properties()
content._set_xml_writer(self._filename('[Content_Types].xml'))
content._assemble_xml_file()
def _write_styles_file(self):
# Write the style xml file.
xf_formats = self.workbook.xf_formats
palette = self.workbook.palette
font_count = self.workbook.font_count
num_format_count = self.workbook.num_format_count
border_count = self.workbook.border_count
fill_count = self.workbook.fill_count
custom_colors = self.workbook.custom_colors
dxf_formats = self.workbook.dxf_formats
styles = Styles()
styles._set_style_properties([
xf_formats,
palette,
font_count,
num_format_count,
border_count,
fill_count,
custom_colors,
dxf_formats])
styles._set_xml_writer(self._filename('xl/styles.xml'))
styles._assemble_xml_file()
def _write_theme_file(self):
# Write the theme xml file.
theme = Theme()
theme._set_xml_writer(self._filename('xl/theme/theme1.xml'))
theme._assemble_xml_file()
def _write_table_files(self):
# Write the table files.
index = 1
for worksheet in self.workbook.worksheets():
table_props = worksheet.tables
if not table_props:
continue
for table_props in table_props:
table = Table()
table._set_xml_writer(self._filename('xl/tables/table'
+ str(index) + '.xml'))
table._set_properties(table_props)
table._assemble_xml_file()
self.table_count += 1
index += 1
def _write_root_rels_file(self):
# Write the _rels/.rels xml file.
rels = Relationships()
rels._add_document_relationship('/officeDocument', 'xl/workbook.xml')
rels._add_package_relationship('/metadata/core-properties',
'docProps/core.xml')
rels._add_document_relationship('/extended-properties',
'docProps/app.xml')
if self.workbook.custom_properties:
rels._add_document_relationship('/custom-properties',
'docProps/custom.xml')
rels._set_xml_writer(self._filename('_rels/.rels'))
rels._assemble_xml_file()
def _write_workbook_rels_file(self):
# Write the _rels/.rels xml file.
rels = Relationships()
worksheet_index = 1
chartsheet_index = 1
for worksheet in self.workbook.worksheets():
if worksheet.is_chartsheet:
rels._add_document_relationship('/chartsheet',
'chartsheets/sheet'
+ str(chartsheet_index)
+ '.xml')
chartsheet_index += 1
else:
rels._add_document_relationship('/worksheet',
'worksheets/sheet'
+ str(worksheet_index)
+ '.xml')
worksheet_index += 1
rels._add_document_relationship('/theme', 'theme/theme1.xml')
rels._add_document_relationship('/styles', 'styles.xml')
# Add the sharedString rel if there is string data in the workbook.
if self.workbook.str_table.count:
rels._add_document_relationship('/sharedStrings',
'sharedStrings.xml')
# Add vbaProject if present.
if self.workbook.vba_project:
rels._add_ms_package_relationship('/vbaProject', 'vbaProject.bin')
rels._set_xml_writer(self._filename('xl/_rels/workbook.xml.rels'))
rels._assemble_xml_file()
def _write_worksheet_rels_files(self):
# Write data such as hyperlinks or drawings.
index = 0
for worksheet in self.workbook.worksheets():
if worksheet.is_chartsheet:
continue
index += 1
external_links = (worksheet.external_hyper_links +
worksheet.external_drawing_links +
worksheet.external_vml_links +
worksheet.external_table_links +
worksheet.external_comment_links)
if not external_links:
continue
# Create the worksheet .rels dirs.
rels = Relationships()
for link_data in external_links:
rels._add_worksheet_relationship(*link_data)
# Create .rels file such as /xl/worksheets/_rels/sheet1.xml.rels.
rels._set_xml_writer(self._filename('xl/worksheets/_rels/sheet'
+ str(index) + '.xml.rels'))
rels._assemble_xml_file()
def _write_chartsheet_rels_files(self):
# Write the chartsheet .rels files for links to drawing files.
index = 0
for worksheet in self.workbook.worksheets():
if not worksheet.is_chartsheet:
continue
index += 1
external_links = worksheet.external_drawing_links
if not external_links:
continue
# Create the chartsheet .rels xlsx_dir.
rels = Relationships()
for link_data in external_links:
rels._add_worksheet_relationship(*link_data)
# Create .rels file such as /xl/chartsheets/_rels/sheet1.xml.rels.
rels._set_xml_writer(self._filename('xl/chartsheets/_rels/sheet'
+ str(index) + '.xml.rels'))
rels._assemble_xml_file()
def _write_drawing_rels_files(self):
# Write the drawing .rels files for worksheets with charts or drawings.
index = 0
for worksheet in self.workbook.worksheets():
if worksheet.drawing:
index += 1
if not worksheet.drawing_links:
continue
# Create the drawing .rels xlsx_dir.
rels = Relationships()
for drawing_data in worksheet.drawing_links:
rels._add_document_relationship(*drawing_data)
# Create .rels file such as /xl/drawings/_rels/sheet1.xml.rels.
rels._set_xml_writer(self._filename('xl/drawings/_rels/drawing'
+ str(index) + '.xml.rels'))
rels._assemble_xml_file()
def _write_vml_drawing_rels_file(self, worksheet, index):
# Write the vmlDdrawing .rels files for worksheets with images in
# headers or footers.
# Create the drawing .rels dir.
rels = Relationships()
for drawing_data in worksheet.vml_drawing_links:
rels._add_document_relationship(*drawing_data)
# Create .rels file such as /xl/drawings/_rels/vmlDrawing1.vml.rels.
rels._set_xml_writer(self._filename('xl/drawings/_rels/vmlDrawing'
+ str(index)
+ '.vml.rels'))
rels._assemble_xml_file()
def _add_image_files(self):
# Write the /xl/media/image?.xml files.
workbook = self.workbook
index = 1
for image in workbook.images:
filename = image[0]
ext = '.' + image[1]
image_data = image[2]
xml_image_name = 'xl/media/image' + str(index) + ext
if not self.in_memory:
# In file mode we just write or copy the image file.
os_filename = self._filename(xml_image_name)
if image_data:
# The data is in a byte stream. Write it to the target.
os_file = open(os_filename, mode='wb')
os_file.write(image_data.getvalue())
os_file.close()
else:
copy(filename, os_filename)
# Allow copies of Windows read-only images to be deleted.
try:
os.chmod(os_filename,
os.stat(os_filename).st_mode | stat.S_IWRITE)
except OSError:
pass
else:
# For in-memory mode we read the image into a stream.
if image_data:
# The data is already in a byte stream.
os_filename = image_data
else:
image_file = open(filename, mode='rb')
image_data = image_file.read()
os_filename = BytesIO(image_data)
image_file.close()
self.filenames.append((os_filename, xml_image_name, True))
index += 1
def _add_vba_project(self):
# Copy in a vbaProject.bin file.
vba_project = self.workbook.vba_project
vba_is_stream = self.workbook.vba_is_stream
if not vba_project:
return
xml_vba_name = 'xl/vbaProject.bin'
if not self.in_memory:
# In file mode we just write or copy the VBA file.
os_filename = self._filename(xml_vba_name)
if vba_is_stream:
# The data is in a byte stream. Write it to the target.
os_file = open(os_filename, mode='wb')
os_file.write(vba_project.getvalue())
os_file.close()
else:
copy(vba_project, os_filename)
else:
# For in-memory mode we read the vba into a stream.
if vba_is_stream:
# The data is already in a byte stream.
os_filename = vba_project
else:
vba_file = open(vba_project, mode='rb')
vba_data = vba_file.read()
os_filename = BytesIO(vba_data)
vba_file.close()
self.filenames.append((os_filename, xml_vba_name, True))
| [
"look.lk@alibaba-inc.com"
] | look.lk@alibaba-inc.com |
8ea50c401f6dda1b3e53316433b1ca8319e6b1ab | 4ac91c3be9e46f690eae7cfd047d0bc0370cde5c | /handcraft/app/models.py | 7bad06fffdfb6f56b58070d6aa230a1f72fee492 | [] | no_license | YoYoAdorkable/ngx_status | 9eeb172756e42736e5ff8a54e09ed2452a071adb | d0e17574c771a2ce1e767f15931df2ede29a6350 | refs/heads/master | 2021-01-13T15:55:20.531786 | 2016-12-26T04:03:37 | 2016-12-26T04:03:37 | 76,818,913 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,860 | py | #coding: utf-8
import hashlib
from datetime import datetime
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from . import db, login_manager
class User(UserMixin, db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(64), unique=True, index=True)
username = db.Column(db.String(64), unique=True, index=True)
phone = db.Column(db.String(11))
password_hash = db.Column(db.String(128))
role = db.Column(db.String(1))
active = db.Column(db.String(1))
token = db.Column(db.String(128))
@staticmethod
def insert_admin(email, username, password, phone, role):
user = User(email=email, username=username, phone=phone, password_hash=password, role=role, active='1')
db.session.add(user)
db.session.commit()
@property
def password(self):
flash(u'密码不可读', 'danger')
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
class Group(db.Model):
__tablename__ = 'group'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True, index=True)
comment = db.Column(db.String(64), unique=True, index=True)
class User_Group(db.Model):
__tablename__ = 'user_group'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
group_id = db.Column(db.Integer, db.ForeignKey('group.id'))
# callback function for flask-login extentsion
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
| [
"yu.hailong@kuyun.com"
] | yu.hailong@kuyun.com |
29ebf2327a5c2399759825f1e87178cfc4bafb9e | f45abc9a5ebf975bb41021dafa878bb828831b96 | /settings.py | 1176f7542c36f17313a3c45e199bc99408503eef | [] | no_license | DmitryAB/guestbook | 9cbbed1f15a94e6d5a0cfe644a6a2daf807e25f9 | b1bcb90deedfcb075064fc97fcc8a74e92975fb3 | refs/heads/master | 2020-05-01T12:12:47.281084 | 2012-07-31T13:00:39 | 2012-07-31T13:00:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,987 | py | # Django settings for guestbook project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = ''
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'guestbook.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| [
"dmitriy.generalov@inbox.ru"
] | dmitriy.generalov@inbox.ru |
7185202b785b88226c157a2bc01c1c802b222e00 | 8d2b1533b720fc18536742bb93946bb34a8ca226 | /findmeanAddtimeAddmean.py | 8e7b7dd0ee8b8a7898a0918564e430675a7f5c7f | [] | no_license | YCTMZJL/ARDroneSDK_preDataProcess | 7453430b8bbf6acc4b2c8f40564279bf09ea8891 | 93ef2f5e56ab787b99a779396274222a52ae6ed5 | refs/heads/master | 2021-01-22T23:44:07.130115 | 2017-03-21T05:42:40 | 2017-03-21T05:42:40 | 85,663,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,691 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 22 21:38:12 2016
@author: ml
"""
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 17 15:20:09 2016
@author: root
"""
import numpy as np
import cPickle as cP
import matplotlib.pyplot as plt
#import math
from mpl_toolkits.mplot3d import Axes3D
# 持久化保存对象
def saveObj(obj, filePath):
file_obj = open(filePath, "wb")
cP.dump(obj, file_obj)
file_obj.close()
# 加载持久化的对象
def loadObj(filePath):
file_obj = open(filePath, "rb")
obj = cP.load(file_obj)
return obj
##############################################
giventype1 = ['AccX_phys_filt [mg]','AccY_phys_filt [mg]','AccZ_phys_filt [mg]']
giventype2 = ['GyroX_phys_filt [deg/s]','GyroY_phys_filt [deg/s]','GyroZ_phys_filt [deg/s]']
giventype = giventype1 + giventype2
#f1 = open('/home/ml/Software/ARDrone_SDK_2_0_1/Example/Linux/Build/Realse/mesures_20160715_164903.txt','rb')
#f1 = open('mesures_20160715_164903.txt','rb')
f1 = open('/home/ml/Software/ARDrone_SDK_2_0_1/Examples/Linux/Build/Release/mesures_stable_20160730_153349.txt','rb')
lines=f1.readlines();
f1.close()
count = 0
indexRecord = []
Infoline = lines[1].split(';')
for j in range(len(Infoline)):
for i in range(len(giventype)):
if (giventype[i] == Infoline[j].strip(' ')):#去除字符串首末的空格
indexRecord.append(j)
break;
timerecord=[]
deltatime = []
data = []#[giventype] #[]
groupnum = 10 #每多少个区一次平均
sumdata = [0,0,0]
sumtime = [0]
meandata=[0,0,0]
for i in range(2,len(lines)):
temp = []
linedata = lines[i].split(';')
timerecord.append(linedata[0].split('_'))
####################################
if ( i == 2 ) :# day_h_m_ms
deltatime.append(float(0))#yinggaixie1sange dangzhong1haixuyao1yige _
elif(timerecord[i-2][0] == timerecord[i-3][0] and timerecord[i-2][1] == timerecord[i-3][1] and timerecord[i-2][2] == timerecord[i-3][2]):
delta = (float(timerecord[i-2][3])- float(timerecord[i-3][3]))*0.001
deltatime.append(delta)
# if (delta*0.001>10 or delta*0.001==0): print i-2,'delta',delta
elif(timerecord[i-2][0] == timerecord[i-3][0] and timerecord[i-2][1] == timerecord[i-3][1]):
delta = (float(timerecord[i-2][3])-float(timerecord[i-3][3]))*0.001+(float(timerecord[i-2][2])-float(timerecord[i-3][2]))*60
if (delta*0.001>10 or delta*0.001==0): print i-2,delta*0.001
deltatime.append(delta)
######################################
for index in indexRecord:
temp.append(float(linedata[index]))
data.append(temp)
#######################################
if ( i == 2 ):
# sumdata = np.add(sumdata,np.array(data)[0:,0:3])
#meandata = sumdata
continue #头不作处理
elif ( i == 3 ):
#elif ( i < 3 + groupnum ):
sumdata = np.vstack(( sumdata, np.add(sumdata, np.array(data)[i-2,0:3]*(deltatime[i-2])) ))
sumtime = np.vstack((sumtime,np.add(sumtime,np.array(deltatime)[i-2]) ))
if sumtime[i-2] == 0 :
meandata = np.vstack((meandata,np.array(data)[i-2,0:3]))
else:
meandata = np.vstack((meandata,(sumdata[i-2] / sumtime[i-2])))
elif ( i < 3 + groupnum ):
sumdata = np.vstack((sumdata,np.add(sumdata[i-3],np.array(data)[i-2,0:3]*(deltatime[i-2]))))
sumtime = np.vstack((sumtime,np.add(sumtime[i-3],np.array(deltatime)[i-2])))
if sumtime[i-2] == 0 :
meandata = np.vstack((meandata,np.array(data)[i-2,0:3]))
else:
meandata = np.vstack((meandata,(sumdata[i-2] / sumtime[i-2])))
#elif ( i < 4+ groupnum ):
else:
sumdata = np.vstack((sumdata,np.add(sumdata[i-3],np.array(data)[i-2,0:3]*(deltatime[i-2])) - np.array(data)[i-2-groupnum ,0:3]*(deltatime[i-2-groupnum]) ))
sumtime = np.vstack((sumtime,np.add(sumtime[i-3],np.array(deltatime)[i-2]) - np.array(deltatime)[i-2-groupnum] ))
meandata = np.vstack((meandata,(sumdata[i-2] / sumtime[i-2])))
print np.shape(data)
###########################################################################
#np.savetxt('XYZacc_160705__205110_right.txt',data,fmt='%s',delimiter=' ')
############################################################################
dataOri = np.array(data).copy()
data = np.array(meandata)#np.array(data)
datainUse = data.copy()#/1000
##########################################
meanaccX = np.mean(data[1:10,0].T)#-35.854862327
print meanaccX
#-4.88260384226 #-26.6490947569 #- #12.2746653721
meanaccY =np.mean(data[1:,1].T)# 33.159057149
#67.7608625058#70.472140655##69.4756070888
meanaccZ =np.mean(data[1:,2].T)#-1031.950#-1028.0009666
#-1031.31441295#-#-1025.56600925# -1004.12261109
#-1003.13 #-1001.37242649
length = len(data)
meanaccX = np.tile(meanaccX,length)
meanaccY = np.tile(meanaccY,length)
meanaccZ = np.tile(meanaccZ,length)
meanacc = np.vstack((meanaccX,meanaccY, meanaccZ)).T
print np.shape(meanacc)
datainUse[:,0:3] = datainUse[:,0:3].copy() - meanacc
datainUse[0] = [0,0,0]#第一组数设置为零
##########################################
acc= datainUse[:,0:3].copy()
vel = datainUse[:,0:3].copy()
s = datainUse[:,0:3].copy()
time = 0.01
for i in range(1,len(datainUse)):
#vel[i] = np.add( vel[i]* deltatime[i], vel[i-1] )
#curV = acc[i]
curV=np.add (acc[i], acc[i-1])/2
vel[i] = np.add( curV* deltatime[i], vel[i-1] )
############################3
#s[i] = np.add( vel[i]* deltatime[i], s[i-1] )
#curS=vel[i]
curS=np.add (vel[i], vel[i-1])/2
s[i] = np.add( curS* deltatime[i], s[i-1] )
print 's_mean=',np.mean(s[:,2].T)
print 'accX=',np.mean(data[1:,0].T),' accY=',np.mean(data[1:,1].T),' accZ=',np.mean(data[1:,2].T)
print 'accX_minusMean=',np.mean(datainUse[1:,0].T),' accY_minusMean=',np.mean(datainUse[1:,1].T),' accZ_minusMean=',np.mean(datainUse[1:,2].T)
########################################################################
plt.figure('accXYZ_ori')
plt.plot(range(len(data[:,0].T)), dataOri[:,0].T,'b.-',label='accX') #[:,8:11]
plt.plot(range(len(data[:,1].T)), dataOri[:,1].T,'r.-',label='accY')
plt.plot(range(len(data[:,2].T)), dataOri[:,2].T,'g.-',label='accZ')
####################################################
####################################################
plt.figure('accXYZ_Mean')
plt.plot(range(len(data[:,0].T)), data[:,0].T,'b.-',label='accX') #[:,8:11]
plt.plot(range(len(data[:,1].T)), data[:,1].T,'r.-',label='accY')
plt.plot(range(len(data[:,2].T)), data[:,2].T,'g.-',label='accZ')
plt.plot(range(len(deltatime)), deltatime,'c.-',label='accZ')
####################################################
plt.figure('accXYZ_minusMean')
plt.plot(range(len(data[:,0].T)), datainUse[:,0].T,'b.-',label='accX') #[:,8:11]
plt.plot(range(len(data[:,1].T)), datainUse[:,1].T,'r.-',label='accY')
plt.plot(range(len(data[:,2].T)), datainUse[:,2].T,'g.-',label='accZ')
#############################################
plt.figure('sZ')
plt.plot(range(len(s[:,2].T)), s[:,2].T,'g.-',label='s')
#############################################
'''
##############################################################################
plt.figure('GyrosXYZ')
plt.plot(range(len(data[:,3].T)), data[:,3].T,'b.-',label='GyrosX') #[:,8:11]
plt.plot(range(len(data[:,4].T)), data[:,4].T,'r.-',label='GyrosY')
plt.plot(range(len(data[:,5].T)), data[:,5].T,'g.-',label='GyrosZ')
###############################################################################
plt.figure('Acc-GyrosX')
plt.plot(range(len(data[:,0].T)), data[:,0].T,'b.-',label='accX') #[:,8:11]
plt.plot(range(len(data[:,3].T)), data[:,3].T,'r.-',label='GyrosX')
#################################################################################
plt.figure('Acc-GyrosY')
plt.plot(range(len(data[:,1].T)), data[:,1].T,'b.-',label='accY') #[:,8:11]
plt.plot(range(len(data[:,4].T)), data[:,4].T,'r.-',label='GyrosY')
#################################################################################
plt.figure('Acc-GyrosZ')
plt.plot(range(len(data[:,2].T)), data[:,2].T,'c.-',label='accY') #[:,8:11]
plt.plot(range(len(data[:,5].T)), data[:,5].T,'m.-',label='GyrosY')
#'''
###############################################################################
fig = plt.figure('3D_s')# 据说是版本的wenti
#ax=plt.subplot(111,projection='3d') #创建一个三维的绘图工程
ax = Axes3D(fig)
#plt.plot_surface(s[:,0].T, s[:,1].T,s[:,1].T, rstride=1, cstride=1, cmap='rainbow')
ax.scatter(s[:,0],s[:,1],s[:,2],c='y') #绘点
ax.set_zlabel('Z') #坐标轴
ax.set_ylabel('Y')
ax.set_xlabel('X')
plt.show()
##############################################################
#''' | [
"yootu@qq.com"
] | yootu@qq.com |
25722449e337fcc1a439b9924f6d59afced074b5 | 72575d5be68406befde886019844f6b8fab3ae5b | /renders/stories-list.py | 26700bb704e76fa4ecc95c511dba920f417c4071 | [] | no_license | rlucca/icescrum-cl | 24a755f3d8e9a50898e0f7d853641b2e4769b8f9 | e801303b1b762fe3d78527d962d3631fa9d79997 | refs/heads/master | 2021-01-10T20:26:37.723903 | 2014-10-09T16:47:30 | 2014-10-09T16:51:04 | 22,731,672 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 866 | py | #!/usr/bin/python
import json
from sys import stdin, argv
data = json.load(stdin)
story_state = { 1: "Suggested",
2: "Accepted",
3: "Estimated",
4: "Planned",
5: "In progress",
7: "Done" };
interested_states = []
for number in argv[1:]:
try:
interested_states.append(int(number))
except:
pass
EM_ANDAMENTO=5
if len(interested_states) == 0:
interested_states.append(EM_ANDAMENTO)
data_filtered = []
for x in data:
try:
if x['state'] in interested_states:
data_filtered.insert(0, "%d - %s - Tasks: %d" % (x['id'], x['name'], len(x['tasks'])))
#else:
# data_filtered.append("%d - %s - State: %s" % (x['id'], x['name'], story_state[x['state']]))
except TypeError:
continue
for line in data_filtered:
print line
#print len(data_filtered)
| [
"rlucca@gmail.com"
] | rlucca@gmail.com |
0907267e98b96a3bfb69062100eb901fb42b8d3d | f7463bd0ab18b41611d5ac725f65d3db3a3a7a1d | /Generation Python - A Beginner's Course/13_Functions/13.5(return_v2)/7.py | 05f6fc472054d62e8f0ac4d289c449cf867dab43 | [] | no_license | Sergey-Laznenko/Stepik | f81c5aeead3fbd20628129d60ccce92b34724b97 | 5e1a1a76c3f6ed487cf8fc847913c890c8eac840 | refs/heads/master | 2022-12-28T19:01:48.670540 | 2020-10-18T15:23:58 | 2020-10-18T15:23:58 | 279,022,462 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | def is_palindrome(a):
if str(a) == str(a)[::-1]:
return True
else:
return False
def is_prime(b):
if b in (0, 1):
return False
if b % 2 == 0:
return False
for i in range(3, round(b ** (1 / 2) + 1), 2):
if b % i == 0:
return False
return True
def is_even(c):
if c % 2 == 0:
return True
else:
return False
pws = input().split(':')
a = pws[0]
b = pws[1]
c = pws[2]
if is_palindrome(a) == is_prime(b) == is_even(c):
print('True')
else:
print('False')
| [
"Laznenko.Sergey@gmail.com"
] | Laznenko.Sergey@gmail.com |
36281c66608fe8e93f8a23404e7d95bc69692daf | 868984471bfb35bd1879a76c2bbabcb5ecbd739c | /src/core/config.py | 1e678805db643ae1852a769d4369503c88d3c4d3 | [] | no_license | i65535/ADServer | d1f2ca43d77593f6469e480ba5240ff09412abf2 | e3b57fb8fc913ade0ddb38312f19465f506bcb8b | refs/heads/master | 2020-04-06T07:00:03.615446 | 2016-08-22T10:21:30 | 2016-08-22T10:21:30 | 65,715,618 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,048 | py | # -*- coding: utf-8 -*-
# Copyright (c) 20016-2016 The i65535.
# See LICENSE for details.
from common.range import DigitRange, strRange
class SysConfig(object):
RequestOffset = DigitRange(30,5,300) # 请求超前或滞后服务器时间的最大值.
RequestTimeout = DigitRange(30,30,60) # 与云节点通信超时时间.
XMLRPCPort = DigitRange(8080,4000,65535) # 服务器XMLRPC服务的端口号.
HeartInterval = DigitRange(60,30,120) # 与扩展节点心跳的时间间隔.
SessionTimeout = DigitRange(60,10,720) # 管理后台session超时时间(分钟).
CheckStockInterval = DigitRange(3,1,60) # 补货的间隔时间.
CloudBootSign = strRange("CloudBoot",1,60) # cloudBoot模板的标记.
store = {
"time_offset":RequestOffset,
"request_timeout":RequestTimeout,
"server_port":XMLRPCPort,
"session_timeout":SessionTimeout,
"check_stock_interval":CheckStockInterval,
"cloud_boot_sign":CloudBootSign
} | [
"shaojun_d@126.com"
] | shaojun_d@126.com |
027433a6dadf7ef800139f0f080e7e4691a43441 | e4dad14988885980cad43c7a9c66b3c4a80db7b8 | /geopic/tests/test_picinfo.py | b5148cee09813f87ba7818db3c9823bf534fd1cc | [] | no_license | gizmux/geopic | 1d513e232811e7587ba8a420ceebf400f0e14d68 | f58810df96d7f539cb115c9605650baabec22925 | refs/heads/master | 2021-01-20T08:41:08.902472 | 2017-05-23T22:59:25 | 2017-05-23T22:59:25 | 90,178,090 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,068 | py | from unittest import TestCase
from datetime import datetime
import geopic
class TestPicInfo(TestCase):
def test_bad_file(self):
pInfo = geopic.PicInfo("doesntexist.jpg")
self.assertFalse(pInfo.isValid())
def test_simple_construction(self):
pInfo = geopic.PicInfo("./geopic/tests/pic_crete.jpg")
self.assertTrue(pInfo.isValid())
def test_dateTime_extraction(self):
pInfo = geopic.PicInfo("./geopic/tests/pic_crete.jpg")
expectedDateTime = datetime(2016, 9, 7, 13, 55, 34)
self.assertTrue(expectedDateTime == pInfo.dateTime())
def test_coordinate_extraction(self):
pInfo = geopic.PicInfo("./geopic/tests/pic_crete.jpg")
expectedCoordinates = (24.545555555555556, 35.12)
self.assertTrue(expectedCoordinates == pInfo.coordinates())
def test_location_extraction(self):
pInfo = geopic.PicInfo("./geopic/tests/pic_crete.jpg")
expectedLocation = geopic.Location(24.545555555555556, 35.12)
self.assertTrue(expectedLocation == pInfo.location())
| [
"gizmux@gmail.com"
] | gizmux@gmail.com |
634d025a9b80404544a7aa474771e130e66cb7b9 | 8d93d7c18d7568bd8c9bb9a6745f212e73e722ae | /polybar/.config/polybar/weather.py | 07562caa92d84c9f8111cc30d8928185e7cd661d | [
"MIT"
] | permissive | thornmir/dotfiles | 7bdc3af65e93d29409bc442deb7c10a7ba87a0b4 | 7a5151e614a28703afb9cd24b1df588f03032141 | refs/heads/master | 2021-09-02T00:41:55.671616 | 2017-12-29T12:05:35 | 2017-12-29T12:05:35 | 104,068,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,077 | py | #!/usr/bin/env python
import json
import urllib
import urllib.parse
import urllib.request
import os
def main():
try:
with open(os.path.join(
os.path.expanduser("~"),
'.config',
'polybar',
'city')) as f:
city = f.readline().strip()
with open(os.path.join(
os.path.expanduser("~"),
'.config',
'polybar',
'weather.json')) as f:
api_key = json.load(f)['api_key']
except:
return ' Cannot read city'
try:
data = urllib.parse.urlencode({'q': city, 'appid': api_key})
weather = json.loads(urllib.request.urlopen(
'http://api.openweathermap.org/data/2.5/weather?' + data)
.read())
desc = weather['weather'][0]['description'].capitalize()
temp = int(float(weather['main']['temp']) - 272.15)
return '{}, {}°C'.format(desc, temp)
except:
return ' Server unreachable'
if __name__ == "__main__":
print(main())
| [
"chaosteil@gmail.com"
] | chaosteil@gmail.com |
71574601ac2b63d3341288b90ea931c5e3941b71 | 13a32b92b1ba8ffb07e810dcc8ccdf1b8b1671ab | /home--tommy--mypy/mypy/lib/python2.7/site-packages/scikits/statsmodels/tools/decorators.py | b67ab7f9182886af449a828b1f8d2348ab11ea16 | [
"Unlicense"
] | permissive | tommybutler/mlearnpy2 | 8ec52bcd03208c9771d8d02ede8eaa91a95bda30 | 9e5d377d0242ac5eb1e82a357e6701095a8ca1ff | refs/heads/master | 2022-10-24T23:30:18.705329 | 2022-10-17T15:41:37 | 2022-10-17T15:41:37 | 118,529,175 | 0 | 2 | Unlicense | 2022-10-15T23:32:18 | 2018-01-22T23:27:10 | Python | UTF-8 | Python | false | false | 7,973 | py | from numpy.testing import *
import warnings
__all__ = ['resettable_cache','cache_readonly', 'cache_writable']
class CacheWriteWarning(UserWarning):
pass
class ResettableCache(dict):
"""
Dictionary whose elements mey depend one from another.
If entry `B` depends on entry `A`, changing the values of entry `A` will
reset the value of entry `B` to a default (None); deleteing entry `A` will
delete entry `B`. The connections between entries are stored in a
`_resetdict` private attribute.
Parameters
----------
reset : dictionary, optional
An optional dictionary, associated a sequence of entries to any key
of the object.
items : var, optional
An optional dictionary used to initialize the dictionary
Examples
--------
>>> reset = dict(a=('b',), b=('c',))
>>> cache = resettable_cache(a=0, b=1, c=2, reset=reset)
>>> assert_equal(cache, dict(a=0, b=1, c=2))
>>> print "Try resetting a"
>>> cache['a'] = 1
>>> assert_equal(cache, dict(a=1, b=None, c=None))
>>> cache['c'] = 2
>>> assert_equal(cache, dict(a=1, b=None, c=2))
>>> cache['b'] = 0
>>> assert_equal(cache, dict(a=1, b=0, c=None))
>>> print "Try deleting b"
>>> del(cache['a'])
>>> assert_equal(cache, {})
"""
def __init__(self, reset=None, **items):
self._resetdict = reset or {}
dict.__init__(self, **items)
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
for mustreset in self._resetdict.get(key, []):
self[mustreset] = None
def __delitem__(self, key):
dict.__delitem__(self, key)
for mustreset in self._resetdict.get(key, []):
del(self[mustreset])
resettable_cache = ResettableCache
class CachedAttribute(object):
def __init__(self, func, cachename=None, resetlist=None):
self.fget = func
self.name = func.__name__
self.cachename = cachename or '_cache'
self.resetlist = resetlist or ()
def __get__(self, obj, type=None):
if obj is None:
return self.fget
# Get the cache or set a default one if needed
_cachename = self.cachename
_cache = getattr(obj, _cachename, None)
if _cache is None:
setattr(obj, _cachename, resettable_cache())
_cache = getattr(obj, _cachename)
# Get the name of the attribute to set and cache
name = self.name
_cachedval = _cache.get(name, None)
# print "[_cachedval=%s]" % _cachedval
if _cachedval is None:
# Call the "fget" function
_cachedval = self.fget(obj)
# Set the attribute in obj
# print "Setting %s in cache to %s" % (name, _cachedval)
try:
_cache[name] = _cachedval
except KeyError:
setattr(_cache, name, _cachedval)
# Update the reset list if needed (and possible)
resetlist = self.resetlist
if resetlist is not ():
try:
_cache._resetdict[name] = self.resetlist
except AttributeError:
pass
# else:
# print "Reading %s from cache (%s)" % (name, _cachedval)
return _cachedval
def __set__(self, obj, value):
errmsg = "The attribute '%s' cannot be overwritten" % self.name
warnings.warn(errmsg, CacheWriteWarning)
class CachedWritableAttribute(CachedAttribute):
#
def __set__(self, obj, value):
_cache = getattr(obj, self.cachename)
name = self.name
try:
_cache[name] = value
except KeyError:
setattr(_cache, name, value)
class _cache_readonly(object):
"""
Decorator for CachedAttribute
"""
def __init__(self, cachename=None, resetlist=None):
self.func = None
self.cachename = cachename
self.resetlist = resetlist or None
def __call__(self, func):
return CachedAttribute(func,
cachename=self.cachename,
resetlist=self.resetlist)
cache_readonly = _cache_readonly()
class cache_writable(_cache_readonly):
"""
Decorator for CachedWritableAttribute
"""
def __call__(self, func):
return CachedWritableAttribute(func,
cachename=self.cachename,
resetlist=self.resetlist)
#this has been copied from nitime a long time ago
#TODO: ceck whether class has change in nitime
class OneTimeProperty(object):
"""A descriptor to make special properties that become normal attributes.
This is meant to be used mostly by the auto_attr decorator in this module.
Author: Fernando Perez, copied from nitime
"""
def __init__(self,func):
"""Create a OneTimeProperty instance.
Parameters
----------
func : method
The method that will be called the first time to compute a value.
Afterwards, the method's name will be a standard attribute holding
the value of this computation.
"""
self.getter = func
self.name = func.func_name
def __get__(self,obj,type=None):
"""This will be called on attribute access on the class or instance. """
if obj is None:
# Being called on the class, return the original function. This way,
# introspection works on the class.
#return func
#print 'class access'
return self.getter
val = self.getter(obj)
#print "** auto_attr - loading '%s'" % self.name # dbg
setattr(obj, self.name, val)
return val
if __name__ == "__main__":
### Tests resettable_cache ----------------------------------------------------
reset = dict(a=('b',), b=('c',))
cache = resettable_cache(a=0, b=1, c=2, reset=reset)
assert_equal(cache, dict(a=0, b=1, c=2))
#
print "Try resetting a"
cache['a'] = 1
assert_equal(cache, dict(a=1, b=None, c=None))
cache['c'] = 2
assert_equal(cache, dict(a=1, b=None, c=2))
cache['b'] = 0
assert_equal(cache, dict(a=1, b=0, c=None))
#
print "Try deleting b"
del(cache['a'])
assert_equal(cache, {})
### ---------------------------------------------------------------------------
class Example(object):
#
def __init__(self):
self._cache = resettable_cache()
self.a = 0
#
@cache_readonly
def b(self):
return 1
@cache_writable(resetlist='d')
def c(self):
return 2
@cache_writable(resetlist=('e', 'f'))
def d(self):
return self.c + 1
#
@cache_readonly
def e(self):
return 4
@cache_readonly
def f(self):
return self.e + 1
ex = Example()
print "(attrs : %s)" % str(ex.__dict__)
print "(cached : %s)" % str(ex._cache)
print "Try a :", ex.a
print "Try accessing/setting a readonly attribute"
assert_equal(ex.__dict__, dict(a=0, _cache={}))
print "Try b #1:", ex.b
b = ex.b
assert_equal(b, 1)
assert_equal(ex.__dict__, dict(a=0, _cache=dict(b=1,)))
# assert_equal(ex.__dict__, dict(a=0, b=1, _cache=dict(b=1)))
ex.b = -1
print "Try dict", ex.__dict__
assert_equal(ex._cache, dict(b=1,))
#
print "Try accessing/resetting a cachewritable attribute"
c = ex.c
assert_equal(c, 2)
assert_equal(ex._cache, dict(b=1, c=2))
d = ex.d
assert_equal(d, 3)
assert_equal(ex._cache, dict(b=1, c=2, d=3))
ex.c = 0
assert_equal(ex._cache, dict(b=1, c=0, d=None, e=None, f=None))
d = ex.d
assert_equal(ex._cache, dict(b=1, c=0, d=1, e=None, f=None))
ex.d = 5
assert_equal(ex._cache, dict(b=1, c=0, d=5, e=None, f=None))
| [
"tbutler.github@internetalias.net"
] | tbutler.github@internetalias.net |
2ce54eb4cec1a31e0f060c295607f8016a614d45 | 6129da29826ac7ffc386ab176d6a26dfaf971199 | /models/prep_data.py | 35cf710e46b48dadb76b3598cdf5a658c8457d59 | [] | no_license | vikrant4k/AppDetection | 08b068b7f05758bf24a0e3ce3c7ff26e8d1333e4 | 8e9323545faada2b802d26bb148fb98f16ee5277 | refs/heads/master | 2020-03-18T20:32:00.573679 | 2018-06-22T09:43:14 | 2018-06-22T09:43:14 | 135,221,973 | 0 | 0 | null | 2018-06-12T12:04:04 | 2018-05-29T00:36:54 | Java | UTF-8 | Python | false | false | 3,589 | py | import glob
import pandas as pd
from datetime import datetime
import requests
import json
from collections import Counter
import constants
from api_key import places_API_key
from lat_lon_handler import get_lat_lon_distance
make_request = False
user = constants.user
def read_all_csv_in_dir(path):
all_files = glob.glob(path + "/*.csv")
frame = pd.DataFrame()
df_list = []
for file in all_files:
df = pd.read_csv(file, index_col=None, header=0)
df_list.append(df)
df.columns = constants.cols
frame = pd.concat(df_list)
return frame
def discover_launcher(app_name):
if app_name in constants.launcher_types:
return constants.launcher_string
return app_name
def get_time(timestamp):
return datetime.fromtimestamp(timestamp/1000).strftime(constants.timestamp_format)
def get_closest_loc_type(res_list, lat1, lon1):
closest_idx = 0
closest_dist = 10000
for i, res in enumerate(res_list):
loc = res['geometry']['location']
lat2, lon2 = loc['lat'], loc['lng']
dist = get_lat_lon_distance(lat1, lon1, lat2, lon2)
if dist < closest_dist:
closest_dist = dist
closest_idx = i
return res_list[closest_idx]['types'][0]
def get_location_type(lat, long, cached):
if lat == 0 and long == 0:
print("Lat long were both 0")
return "NA"
if (lat, long) in cached:
return cached[(lat, long)]
if not make_request:
return "NO REQUEST"
print("Making request")
req = requests.get("https://maps.googleapis.com/maps/api/place/nearbysearch/json?parameters",\
params = {
'key' : places_API_key,
'location' : "{},{}".format(lat, long),
'radius' : "{}.0".format(constants.radius),
})
try:
loc_type = get_closest_loc_type(req.json()['results'], lat, long)
except IndexError:
loc_type = "NA"
cached[(lat, long)] = loc_type
return loc_type
def apply_most_common_loc_type(df):
counter = Counter()
for i, row in df.iterrows():
lat, long = row.lat, row.long
counter[(lat, long)] += 1
most_freq_loc = counter.most_common(1)[0][0]
print(most_freq_loc)
# For now we assume that the most frequent location is home
df['location_type'] = df.apply(lambda row: \
"home" if (row.lat, row.long) == most_freq_loc else row.location_type, axis=1)
def apply_time_cluster(df):
times = {}
timeslot = 0
prev_timestamp = datetime.strptime(df.iloc[0].timestamp, constants.timestamp_format)
for i, row in df.iterrows():
timestamp = datetime.strptime(row.timestamp, constants.timestamp_format)
time_diff = timestamp - prev_timestamp
time_diff_mins = int(round(time_diff.total_seconds() / 60))
if time_diff_mins > constants.time_cluster_interval:
timeslot += 1
times[row.timestamp] = timeslot
prev_timestamp = timestamp
df['session_nr'] = df['timestamp'].apply(lambda stamp: times[stamp])
def main():
df = read_all_csv_in_dir("./data/{}".format(user))
print("Total number of features:", len(constants.cols))
print("Total number of rows", df.shape[0])
df.columns = constants.cols
df = df.drop_duplicates()
df = df.sort_values(by=['timestamp']) # Make sure the data is chronological
cached_loc_types = {}
df['timestamp'] = df['timestamp'].apply(get_time)
df['activity_type'] = df['activity_type'].apply(lambda n: constants.int2activity[n])
df['brightness_level'] = df['brightness_level'].apply(lambda x: x / 40000)
df['app_name'] = df['app_name'].apply(discover_launcher)
apply_time_cluster(df)
df.to_csv("./data/{}/prepared_data/full_data.csv".format(user), index=False)
main()
| [
"stianste@stud.ntnu.no"
] | stianste@stud.ntnu.no |
75dd47dd930ad1027d4c8585d2a8d261b808fcf5 | a25bdc53f3e847ea27a067830c02f73adc0189d1 | /DES/Permutation.py | 43b2f3065e75e9857ae02bada8726a4e8cee2e76 | [] | no_license | vnpnh/Cryptography | 6bb157c0f4bdcc7bbdaf1a46312ff3146fbf4853 | 325b42756e5b14ca3db2901d80d9d04923908f1e | refs/heads/master | 2023-04-16T19:28:03.142061 | 2021-04-29T19:16:31 | 2021-04-29T19:16:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,911 | py | #this permutation for key
p56bit = [57, 49, 41, 33, 25, 17, 9, 1, 58, 50, 42, 34,
26, 18, 10, 2, 59, 51, 43, 35, 27, 19, 11, 3,
60, 52, 44, 36, 63, 55, 47, 39, 31, 23, 15, 7,
62, 54, 46, 38, 30, 22, 14, 6, 61, 53, 45, 37,
29, 21, 13, 5, 28, 20, 12, 4]
p48bit = [14, 17, 11, 24, 1, 5, 3, 28, 15, 6, 21, 10, 23, 19,
12, 4, 26, 8, 16, 7, 27, 20, 13, 2, 41, 52, 31, 37,
47, 55, 30, 40, 51, 45, 33, 48, 44, 49, 39, 56, 34,
53, 46, 42, 50, 36, 29, 32]
p64bit = [58, 50, 42, 34, 26, 18, 10, 2,
60, 52, 44, 36, 28, 20, 12, 4,
62, 54, 46, 38, 30, 22, 14, 6,
64, 56, 48, 40, 32, 24, 16, 8,
57, 49, 41, 33, 25, 17, 9, 1,
59, 51, 43, 35, 27, 19, 11, 3,
61, 53, 45, 37, 29, 21, 13, 5,
63, 55, 47, 39, 31, 23, 15, 7]
#this permutation for message
E48 = [32, 1, 2, 3, 4, 5,
4, 5, 6, 7, 8, 9,
8, 9, 10, 11, 12, 13,
12, 13, 14, 15, 16, 17,
16, 17, 18, 19, 20, 21,
20, 21, 22, 23, 24, 25,
24, 25, 26, 27, 28, 29,
28, 29, 30, 31, 32, 1]
E32 = [16, 7, 20, 21,
29, 12, 28, 17,
1, 15, 23, 26,
5, 18, 31, 10,
2, 8, 24, 14,
32, 27, 3, 9,
19, 13, 30, 6,
22, 11, 4, 25]
#Final Permutation
Final = [40, 8, 48, 16, 56, 24, 64, 32,
39, 7, 47, 15, 55, 23, 63, 31,
38, 6, 46, 14, 54, 22, 62, 30,
37, 5, 45, 13, 53, 21, 61, 29,
36, 4, 44, 12, 52, 20, 60, 28,
35, 3, 43, 11, 51, 19, 59, 27,
34, 2, 42, 10, 50, 18, 58, 26,
33, 1, 41, 9, 49, 17, 57, 25]
| [
"noreply@github.com"
] | noreply@github.com |
69773f30b585638497431d4484f4dc00e07ca523 | 8bd0497ba99c7a80ae761267bc9e371371d12a6e | /twitter/init_db.py | 4f3dbc6cec20ec7dec00fd5d4c0bf9bc4b544812 | [] | no_license | TongTianUM/python-miscellany | f3df94114af91e3577ea885c62295ce9903064d7 | 81aa86647cd37acf76645db9870061cf1eb3b0d2 | refs/heads/master | 2021-08-23T13:40:32.414794 | 2017-12-05T03:20:41 | 2017-12-05T03:20:41 | 90,571,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | import sqlite3
reset = True
conn = sqlite3.connect('tweets.db')
cur = conn.cursor()
if reset:
cur.execute("DROP TABLE IF EXISTS Tweets")
cur.execute("CREATE TABLE Tweets (tweet_id INTEGER, tweet_text TEXT, likes INTEGER)")
cur.execute("DROP TABLE IF EXISTS Hashtags")
cur.execute("CREATE TABLE Hashtags (hashtag_id INTEGER PRIMARY KEY AUTOINCREMENT, hashtag_text TEXT, "
"num_occurrences INTEGER)")
cur.execute("DROP TABLE IF EXISTS Tweetsdetail")
cur.execute("CREATE TABLE Tweetsdetail (tweet_id INTEGER, hashtag_id INTEGER)")
conn.close()
| [
"noreply@github.com"
] | noreply@github.com |
b2675f662be96e49ab8d4e0c301e40732a490cef | 24d070c6410fdf7212c4c37c2fadc932cd4e8aec | /trac/wiki/test.py | b5941785692974b8d7b29d2432aee83ae302c289 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | clubturbo/Trac-1.4.2 | 4f111e8df9e8007a0e02080bec560361b25fc11c | 254ce54a3c2fb86b4f31810ddeabbd4ff8b54a78 | refs/heads/master | 2023-01-20T16:20:44.724154 | 2020-12-03T08:57:08 | 2020-12-03T08:57:08 | 317,922,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,528 | py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2004-2020 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at https://trac.edgewall.org/log/.
import difflib
import io
import os
import re
import unittest
# Python 2.7 `assertMultiLineEqual` calls `safe_repr(..., short=True)`
# which breaks our custom failure display in WikiTestCase.
try:
from unittest.util import safe_repr
except ImportError:
pass
else:
unittest.case.safe_repr = lambda obj, short=False: safe_repr(obj, False)
from trac.test import EnvironmentStub, MockRequest
from trac.util.datefmt import datetime_now, to_utimestamp, utc
from trac.util.text import strip_line_ws, to_unicode
from trac.web.chrome import web_context
from trac.wiki.formatter import (HtmlFormatter, InlineHtmlFormatter,
OutlineFormatter)
class WikiTestCase(unittest.TestCase):
generate_opts = {}
def __init__(self, title, input, expected, file, line,
setup=None, teardown=None, context=None, default_data=False,
enable_components=None, disable_components=None,
env_path='', destroying=False):
unittest.TestCase.__init__(self, 'test')
self.title = title
self.input = input
self.expected = expected
if file.endswith('.pyc'):
file = file.replace('.pyc', '.py')
self.file = file
self.line = line
self._setup = setup
self._teardown = teardown
self._context = context
self.context = None
self._env_kwargs = {'default_data': default_data,
'enable': enable_components,
'disable': disable_components,
'path': env_path, 'destroying': destroying}
def _create_env(self):
env = EnvironmentStub(**self._env_kwargs)
# -- intertrac support
env.config.set('intertrac', 'genshi.title', "Genshi's Trac")
env.config.set('intertrac', 'genshi.url', "https://genshi.edgewall.org")
env.config.set('intertrac', 't', 'trac')
env.config.set('intertrac', 'th.title', "Trac Hacks")
env.config.set('intertrac', 'th.url', "http://trac-hacks.org")
# -- safe schemes
env.config.set('wiki', 'safe_schemes',
'data,file,ftp,http,https,svn,svn+ssh,'
'rfc-2396.compatible,rfc-2396+under_score')
return env
def setUp(self):
self.env = self._create_env()
self.req = MockRequest(self.env, script_name='/')
context = self._context
if context:
if isinstance(self._context, tuple):
context = web_context(self.req, *self._context)
else:
context = web_context(self.req, 'wiki', 'WikiStart')
self.context = context
# Remove the following lines in order to discover
# all the places were we should use the req.href
# instead of env.href
self.env.href = self.req.href
self.env.abs_href = self.req.abs_href
self.env.db_transaction(
"INSERT INTO wiki VALUES(%s,%s,%s,%s,%s,%s,%s)",
('WikiStart', 1, to_utimestamp(datetime_now(utc)), 'joe',
'--', 'Entry page', 0))
if self._setup:
self._setup(self)
def tearDown(self):
self.env.reset_db()
if self._teardown:
self._teardown(self)
def test(self):
"""Testing WikiFormatter"""
formatter = self.formatter()
v = unicode(formatter.generate(**self.generate_opts))
v = v.replace('\r', '').replace(u'\u200b', '') # FIXME: keep ZWSP
v = strip_line_ws(v, leading=False)
try:
self.assertEqual(self.expected, v)
except AssertionError as e:
msg = to_unicode(e)
match = re.match(r"u?'(.*)' != u?'(.*)'", msg)
if match:
g1 = ["%s\n" % x for x in match.group(1).split(r'\n')]
g2 = ["%s\n" % x for x in match.group(2).split(r'\n')]
expected = ''.join(g1)
actual = ''.join(g2)
wiki = repr(self.input).replace(r'\n', '\n')
diff = ''.join(list(difflib.unified_diff(g1, g2, 'expected',
'actual')))
# Tip: sometimes, 'expected' and 'actual' differ only by
# whitespace, so it can be useful to visualize them, e.g.
# expected = expected.replace(' ', '.')
# actual = actual.replace(' ', '.')
def info(*args):
return '\n========== %s: ==========\n%s' % args
msg = info('expected', expected)
msg += info('actual', actual)
msg += info('wiki', ''.join(wiki))
msg += info('diff', diff)
raise AssertionError( # See below for details
'%s\n\n%s:%s: "%s" (%s flavor)' \
% (msg, self.file, self.line, self.title, formatter.flavor))
def formatter(self):
return HtmlFormatter(self.env, self.context, self.input)
def shortDescription(self):
return 'Test ' + self.title
class OneLinerTestCase(WikiTestCase):
def formatter(self):
return InlineHtmlFormatter(self.env, self.context, self.input)
class EscapeNewLinesTestCase(WikiTestCase):
generate_opts = {'escape_newlines': True}
def formatter(self):
return HtmlFormatter(self.env, self.context, self.input)
class OutlineTestCase(WikiTestCase):
def formatter(self):
class Outliner(object):
flavor = 'outliner'
def __init__(self, env, context, input):
self.outliner = OutlineFormatter(env, context)
self.input = input
def generate(self):
out = io.StringIO()
self.outliner.format(self.input, out)
return out.getvalue()
return Outliner(self.env, self.context, self.input)
def wikisyntax_test_suite(data=None, setup=None, file=None, teardown=None,
context=None, default_data=False,
enable_components=None, disable_components=None,
env_path=None, destroying=False):
suite = unittest.TestSuite()
def add_test_cases(data, filename):
tests = re.compile('^(%s.*)$' % ('=' * 30), re.MULTILINE).split(data)
next_line = 1
line = 0
for title, test in zip(tests[1::2], tests[2::2]):
title = title.lstrip('=').strip()
if line != next_line:
line = next_line
if not test or test == '\n':
continue
next_line += len(test.split('\n')) - 1
if 'SKIP' in title or 'WONTFIX' in title:
continue
blocks = test.split('-' * 30 + '\n')
if len(blocks) < 5:
blocks.extend([None] * (5 - len(blocks)))
input, page, oneliner, page_escape_nl, outline = blocks[:5]
for cls, expected in [
(WikiTestCase, page),
(OneLinerTestCase, oneliner and oneliner[:-1]),
(EscapeNewLinesTestCase, page_escape_nl),
(OutlineTestCase, outline)]:
if expected:
tc = cls(title, input, expected, filename, line,
setup, teardown, context, default_data,
enable_components, disable_components,
env_path, destroying)
suite.addTest(tc)
if data:
add_test_cases(data, file)
else:
if os.path.exists(file):
with open(file, 'r') as fobj:
data = fobj.read().decode('utf-8')
add_test_cases(data, file)
else:
print('no ' + file)
return suite
| [
"jonn@mindhunterx"
] | jonn@mindhunterx |
5e0f8927ea9d8093f9e7e55430b10f131efd58eb | 3a8f870abd1481187758be33720a10526ac0daa0 | /shopeelaptopreview.py | d6403306debd0b5f410d5ed790414efcd0ef8dc4 | [] | no_license | arief130598/scrapper | b734d842664a8183da501059647f5ee99c809f3f | 4d813700279d46fd6a38097c16eac519cf6db418 | refs/heads/master | 2022-12-17T05:08:50.385903 | 2020-01-16T04:29:18 | 2020-01-16T04:29:18 | 226,952,112 | 0 | 0 | null | 2022-12-08T03:26:48 | 2019-12-09T19:37:30 | Python | UTF-8 | Python | false | false | 3,298 | py | import datetime
import json
import time
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.action_chains import ActionChains
import pyodbc
chrome_options = Options()
# chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument("user-agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/77.0.3865.90 Safari/537.36")
chrome_options.add_argument("Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,"
"*/*;q=0.8,application/signed-exchange;v=b3")
chrome_options.add_argument("Accept-Encoding: gzip, deflate, br")
chrome_options.add_argument("Accept-Language: en-US,en;q=0.9,id-ID;q=0.8,id;q=0.7")
chrome_options.add_argument('cookie: _gcl_au=1.1.1823793498.1570803319; _fbp=fb.2.1570803319229.963816320; SPC_IA=-1; '
'SPC_F=lzdTdFn9lDaLVnKWRnboUIDX4Fk7ojc1; REC_T_ID=9494fbe0-ec31-11e9-a888-b496914fea38; '
'_gcl_aw=GCL.1572880920.Cj0KCQiAtf_tBRDtARIsAIbAKe12O8hCcDKowKHsotaObvhR'
'-1pGcw_K1PaJqoWsyYytuXUVV-KtSjkaAposEALw_wcB; _ga=GA1.3.1049602029.1572880972; '
'_gac_UA-61904553-8=1.1572880996.Cj0KCQiAtf_tBRDtARIsAIbAKe12O8hCcDKowKHsotaObvhR'
'-1pGcw_K1PaJqoWsyYytuXUVV-KtSjkaAposEALw_wcB; '
'cto_lwid=a20d9c0d-fb1b-4071-a893-7816fb16b75c; _med=affiliates; SPC_EC=-; SPC_U=-; '
'SPC_SI=o2jprtgr5ie9g907h0wvof4gi277y7qh; _gid=GA1.3.1661687575.1575346405; '
'csrftoken=iz7UQvbgu60crCYlaSBVRphT357auSj7; AMP_TOKEN=%24NOT_FOUND; '
'REC_MD_20=1575438194; _dc_gtm_UA-61904553-8=1; SPC_T_IV="7SoYJr/leMwZQwVgx8Yw9Q=="; '
'SPC_T_ID="zIQZlbAQ/rPOVv5hOdZRkRiAyc/ivMhK8cdMTwuy4NCfkIZICbCMeA4qXe5kDtB4TlK5ncnZ+D6'
'+SB6LBkaIE31yD6tFulQEpUEnZeemjHE="')
# driver = webdriver.Chrome(executable_path='/home/ubuntu/chromedriver', chrome_options=chrome_options)
driver = webdriver.Chrome(executable_path='/root/PycharmProjects/scrapper/chromedriver', chrome_options=chrome_options)
dataakhir = []
for a in range(0, 100):
url = 'https://shopee.co.id/Makanan-Minuman-cat.157?page=' + a.__str__() + '&sortBy=pop'
driver.get(url)
time.sleep(2)
status = 1
while status == 1:
try:
bottomscroll = driver.find_element_by_class_name('shopee-footer-section')
actions = ActionChains(driver)
actions.move_to_element(bottomscroll).perform()
status = 0
except Exception as e:
print(e)
driver.get(url)
time.sleep(2)
home = BeautifulSoup(driver.page_source, "lxml")
data = home.findAll('script', type="application/ld+json")
listproduk = []
for x, i in enumerate(data):
if x == 0 or x == 1:
print(i)
else:
listproduk.append(json.loads(i.text))
for i in listproduk:
nama = i.get('name')
print(nama)
dataakhir.append(nama)
driver.close()
| [
"ariefmaulana130598@gmail.com"
] | ariefmaulana130598@gmail.com |
70cb3d09402bd71b84303f0fe648479b8846a4b2 | e93d1931789c99922a6b5ff3cf7e3bfe1c8bce3d | /blog/urls.py | feda7193e8da66168c2c798b1763fd13b33d3f73 | [] | no_license | nhutphong/djangoblog | 2653fcc34285788e7b34048acc7a078c88536c5c | e4bf2a0d43727c248b2a2006910a68063f99f186 | refs/heads/master | 2023-03-16T10:59:51.700275 | 2022-10-18T03:40:39 | 2022-10-18T03:40:39 | 237,549,725 | 1 | 0 | null | 2022-03-12T01:04:49 | 2020-02-01T02:23:09 | Python | UTF-8 | Python | false | false | 1,089 | py | from django.urls import path
from django.contrib.auth.decorators import login_required
from .views import (
ArticleListView,
ArticleCreateView,
ArticleDetailView,
ArticleUpdateView,
ArticleDeleteView,
PaginationListView,
SearchResultsView,
)
from . import views_filter
app_name = 'articles'
urlpatterns = [
#/blog/
path('demo/', views_filter.demo, name='demo-list'),
path('filter/', views_filter.filter_test, name='filter-list'),
path('pagination/', PaginationListView.as_view(), name='pagination-list'),
path('timkiem/', SearchResultsView.as_view(), name='search-results'),
path('', ArticleListView.as_view(), name='article-list'),
path('create/', ArticleCreateView.as_view(), name='article-create'),
path('<slug:slug>/', ArticleDetailView.as_view(), name='article-detail'),
path(
'<slug:slug>/update/',
ArticleUpdateView.as_view(),
name='article-update'
),
path(
'<slug:slug>/delete/',
ArticleDeleteView.as_view(),
name='article-delete'
)
] | [
"nhutphong@outlook.com"
] | nhutphong@outlook.com |
c43d2cdc8e070993b75e62cbe7d8896cd9a340c1 | d45d55eedabb71f940573d80e7179deee1a117a4 | /router.py | ddc3f2660129ce6b8f5ed05996a068eb314de597 | [] | no_license | Untou4able/c2j | 25c6b1d915ef82862077d3049e9aeb1c1f58241a | ea89fecb6d3016017aa0496171d95ff025caddc3 | refs/heads/master | 2020-06-04T03:32:06.626665 | 2018-05-18T04:30:20 | 2018-05-18T04:30:20 | 191,857,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 729 | py | import filereader
import filters
class Router(object):
def __init__(self, f):
self._fileReader = filereader.FileReader(f)
self._interfaceFilter = filters.InterfaceFilterPort()
self._staticFilter = filters.StaticFilter()
self._fileReader.addFilter(self._interfaceFilter)
self._fileReader.addFilter(self._staticFilter)
self._fileReader.read()
def __getitem__(self, key):
return self._interfaceFilter.matches.get(key)
@property
def interfaces(self):
return self._interfaceFilter.matches
@property
def statics(self):
return self._staticFilter.matches
if __name__ == '__main__':
from pprint import pprint
r = Router(open('/tmp/PE-006-01.krasnet.ru.cfg'))
pprint(r.statics)
| [
"gg.russia.crew@gmail.com"
] | gg.russia.crew@gmail.com |
368dcffda7f96405c7f7b0b4251a2e5f2ce4c54d | 516796a4a2eef3ebf7c599418363373bb1ffcb0d | /student_school_administration_tool.py | 1daa7333eec7e75d0a009e552af059ffdbabb8c1 | [] | no_license | sriraj33/school_admin_tool | 477b14e1fcc14738dd4d6cc7c4ea75aa6615915c | 54e639115f83a5b8a61d9092a712cc97b3401b1e | refs/heads/master | 2022-12-06T08:58:14.584607 | 2020-08-19T13:30:46 | 2020-08-19T13:30:46 | 288,739,564 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,394 | py | import csv
def write_into_csv(info_list):
with open("student_info.csv", "a", newline="")as csv_file:
writer = csv.writer(csv_file)
if csv_file.tell() == 0:
writer.writerow(["Name", "Age", "Number", "Email ID"])
writer.writerow(info_list)
if __name__ == "__main__":
condition = True
student_num = 1
while(condition):
student_info = input("Enter student infomation for student #{} in the following format (Name Age Contact_number Email_ID): ".format(student_num))
student_info_list = student_info.split(" ")
print("\nThe entered information is -\nName: {}\nAge: {}\nNunber: {}\nEmail_ID: {}".format(student_info_list[0], student_info_list[1], student_info_list[2], student_info_list[3]))
choice_check = input("Is the enterd information correct (yes/no): ")
if choice_check == "yes":
write_into_csv(student_info_list)
condition_check = input("Enter (yes/no) if you want to enter information for another student: ")
if condition_check == "yes":
condition = True
student_num = student_num + 1
else:
condition = False
elif choice_check == "no":
print("\nPlease re-enter the values")
| [
"noreply@github.com"
] | noreply@github.com |
ef2644e1c6043bf598e5787f0d0496d14ec53188 | 686c268c0c29a8adb24a7cce0bb7f3dd26dabb5f | /scrape_mars.py | 46a08f61b426e03f574575f1f3ff14f6656d8f79 | [] | no_license | r33nava/Mission-to-Mars | 9b4e8931be04525ed772ce9b8e252590410e54ca | 297340d631e80ed85032959dc35d2e7671d276d9 | refs/heads/master | 2020-04-25T01:26:02.161245 | 2019-05-05T04:17:22 | 2019-05-05T04:17:22 | 172,408,137 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,749 | py |
# coding: utf-8
#Imports & Dependencies
from splinter import Browser
from bs4 import BeautifulSoup
#Site Navigation
executable_path = {"executable_path": "/Users/r33nava/Downloads/chromedriver"}
browser = Browser("chrome", **executable_path, headless=False)
# Defining scrape & dictionary
def scrape():
final_data = {}
output = marsNews()
final_data["mars_news"] = output[0]
final_data["mars_paragraph"] = output[1]
final_data["mars_image"] = marsImage()
final_data["mars_weather"] = marsWeather()
final_data["mars_facts"] = marsFacts()
final_data["mars_hemisphere"] = marsHem()
return final_data
# # NASA Mars News
def marsNews():
news_url = "https://mars.nasa.gov/news/"
browser.visit(news_url)
html = browser.html
soup = BeautifulSoup(html, "html.parser")
article = soup.find("div", class_='list_text')
news_title = article.find("div", class_="content_title").text
news_p = article.find("div", class_ ="article_teaser_body").text
output = [news_title, news_p]
return output
# # JPL Mars Space Images - Featured Image
def marsImage():
image_url = "https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
browser.visit(image_url)
html = browser.html
soup = BeautifulSoup(html, "html.parser")
image = soup.find("img", class_="thumb")["src"]
featured_image_url = "https://www.jpl.nasa.gov" + image
return featured_image_url
# # Mars Weather
def marsWeather():
import tweepy
# Twitter API Keys
def get_file_contents(filename):
try:
with open(filename, 'r') as f:
return f.read().strip()
except FileNotFoundError:
print("'%s' file not found" % filename)
consumer_key = get_file_contents('consumer_key')
consumer_secret = get_file_contents('consumer_secret')
access_token = get_file_contents('access_token')
access_token_secret = get_file_contents('access_token_secret')
# Setup Tweepy API Authentication
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, parser=tweepy.parsers.JSONParser())
target_user = "MarsWxReport"
tweet = api.user_timeline(target_user, count =1)
mars_weather = ((tweet)[0]['text'])
return mars_weather
# # Mars Facts
def marsFacts():
import pandas as pd
facts_url = "https://space-facts.com/mars/"
browser.visit(facts_url)
mars_data = pd.read_html(facts_url)
mars_data = pd.DataFrame(mars_data[0])
mars_data.columns = ["Description", "Value"]
mars_data = mars_data.set_index("Description")
mars_facts = mars_data.to_html(index = True, header =True)
return mars_facts
# # Mars Hemispheres
def marsHem():
import time
hemispheres_url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(hemispheres_url)
html = browser.html
soup = BeautifulSoup(html, "html.parser")
mars_hemisphere = []
products = soup.find("div", class_ = "result-list" )
hemispheres = products.find_all("div", class_="item")
for hemisphere in hemispheres:
title = hemisphere.find("h3").text
title = title.replace("Enhanced", "")
end_link = hemisphere.find("a")["href"]
image_link = "https://astrogeology.usgs.gov/" + end_link
browser.visit(image_link)
html = browser.html
soup=BeautifulSoup(html, "html.parser")
downloads = soup.find("div", class_="downloads")
image_url = downloads.find("a")["href"]
dictionary = {"title": title, "img_url": image_url}
mars_hemisphere.append(dictionary)
return mars_hemisphere
| [
"noreply@github.com"
] | noreply@github.com |
a6ca5478a1318faad2f8dd58a438cf120afd7ab0 | 9b08d0b422df377a7890820c01ea3b5e266abd70 | /html_downloader.py | 7332d8be05cdec5a9f1c5f27bad51f6932dfab3a | [] | no_license | jizhidianxue/wiki_spider | 238b9f2ea17b4c1f8067d611a85ed07af65eb5e3 | 45ec5f5ead1d38fb1d757f198816daa91685314f | refs/heads/master | 2020-04-01T04:01:03.863130 | 2018-10-13T07:18:36 | 2018-10-13T07:18:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 780 | py | import string
from urllib import request
from urllib.parse import quote
import requests
class HtmlDownloader(object):
def download(self, url):
if url is None:
return None
kv = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.18 Safari/537.36',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9'
}
response = requests.get(url, headers=kv)
response.encoding = response.apparent_encoding
if response.status_code != 200:
return None
return response.text
| [
"zyang7@zju.edu.cn"
] | zyang7@zju.edu.cn |
604462ed935d3a7cb285c795360481b9dce62d44 | f1c6178b5f0bb6cbd3d42d9326e9f9c41701e0a6 | /Day 11/d11.py | 29bb6a6108fd358dc789fd51e8abb12082c212e6 | [] | no_license | princemathew1997/random-python | 779d377fb43a39b37584b7f3a5702f0f29e98ad0 | 80b9065353525465b87636efcd7879d5f7a8ae76 | refs/heads/main | 2023-01-31T09:38:27.056368 | 2020-12-19T15:20:39 | 2020-12-19T15:20:39 | 319,009,807 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | #for loop
a=input("Enter a number")
rev=0
for i in a:
a=int(a)
b=a%10
rev=rev*10+b
a=a//10
print("Reverse of the number is",rev)
| [
"princepmd6@gmail.com"
] | princepmd6@gmail.com |
3580cc8016cea2fd44c3a59ab3750aa83cc6851d | 3e5f12dfc851a0f32b761d923e1e5b6ffd13d79c | /cleverbot.py | 2e5fd5dc7333520e826f273791f331991d6c39e9 | [] | no_license | paulbooth/Chaterr | 8590bd25fc47a54cf2c8b7a032fe20dd7c7b3ad6 | b39b30b88511cc70eaa350e249910ff0b311f51b | refs/heads/master | 2018-12-29T00:16:52.152256 | 2012-10-03T23:41:34 | 2012-10-03T23:41:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,343 | py | #!/usr/bin/python
"""
This library lets you open chat session with cleverbot (www.cleverbot.com)
Example of how to use the bindings:
>>> import cleverbot
>>> cb=cleverbot.Session()
>>> print cb.Ask("Hello there")
'Hello.'
"""
import urllib2
import hashlib
import re
import sys
class ServerFullError(Exception):
pass
ReplyFlagsRE = re.compile('<INPUT NAME=(.+?) TYPE=(.+?) VALUE="(.*?)">', re.IGNORECASE | re.MULTILINE)
class Session(object):
keylist=['stimulus','start','sessionid','vText8','vText7','vText6','vText5','vText4','vText3','vText2','icognoid','icognocheck','prevref','emotionaloutput','emotionalhistory','asbotname','ttsvoice','typing','lineref','fno','sub','islearning','cleanslate']
headers={}
headers['User-Agent']='Mozilla/5.0 (Windows NT 6.1; WOW64; rv:7.0.1) Gecko/20100101 Firefox/7.0'
headers['Accept']='text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
headers['Accept-Language']='en-us;q=0.8,en;q=0.5'
headers['X-Moz']='prefetch'
headers['Accept-Charset']='ISO-8859-1,utf-8;q=0.7,*;q=0.7'
headers['Referer']='http://www.cleverbot.com'
headers['Cache-Control']='no-cache, no-cache'
headers['Pragma']='no-cache'
def __init__(self):
self.arglist=['','y','','','','','','','','','wsf','','','','','','','','','0','Say','1','false']
self.MsgList=[]
def Send(self):
data=encode(self.keylist,self.arglist)
digest_txt=data[9:29]
hash=hashlib.md5(digest_txt).hexdigest()
self.arglist[self.keylist.index('icognocheck')]=hash
data=encode(self.keylist,self.arglist)
req=urllib2.Request("http://www.cleverbot.com/webservicemin",data,self.headers)
f=urllib2.urlopen(req)
reply=f.read()
return reply
def Ask(self,q):
self.arglist[self.keylist.index('stimulus')]=q
if self.MsgList: self.arglist[self.keylist.index('lineref')]='!0'+str(len(self.MsgList)/2)
asw=self.Send()
self.MsgList.append(q)
answer = parseAnswers(asw)
for k,v in answer.iteritems():
try:
self.arglist[self.keylist.index(k)] = v
except ValueError:
pass
self.arglist[self.keylist.index('emotionaloutput')]=''
text = answer['ttsText']
self.MsgList.append(text)
return text
def parseAnswers(text):
d = {}
keys = ["text", "sessionid", "logurl", "vText8", "vText7", "vText6", "vText5", "vText4", "vText3",
"vText2", "prevref", "foo", "emotionalhistory", "ttsLocMP3", "ttsLocTXT",
"ttsLocTXT3", "ttsText", "lineRef", "lineURL", "linePOST", "lineChoices",
"lineChoicesAbbrev", "typingData", "divert"]
values = text.split("\r")
i = 0
for key in keys:
d[key] = values[i]
i += 1
return d
def encode(keylist,arglist):
text=''
for i in range(len(keylist)):
k=keylist[i]; v=quote(arglist[i])
text+='&'+k+'='+v
text=text[1:]
return text
always_safe = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'abcdefghijklmnopqrstuvwxyz'
'0123456789' '_.-')
def quote(s, safe = '/'): #quote('abc def') -> 'abc%20def'
safe += always_safe
safe_map = {}
for i in range(256):
c = chr(i)
safe_map[c] = (c in safe) and c or ('%%%02X' % i)
res = map(safe_map.__getitem__, s)
return ''.join(res)
def main():
import sys
cb = Session()
q = ''
while q != 'bye':
try:
q = raw_input("> ")
except KeyboardInterrupt:
print
sys.exit()
print cb.Ask(q)
if __name__ == "__main__":
if len(sys.argv) > 1 :
cb = Session()
for arg in sys.argv[1:]:
print cb.Ask(arg)
else:
main() | [
"thephantompaulbooth@gmail.com"
] | thephantompaulbooth@gmail.com |
9cc1c460455651b0f889ad516ee9646df9796df9 | 885c920bb2770289053816d23e26fad25bc24096 | /BinarySearch.py | 49474f551d54470a29af6628814e7d9591fed2ad | [] | no_license | mtuv/BinSearch | 00fe2d4ac3537c815f928a526f413e1fc4f44364 | 657ec74aafb11ea11857fd33672525692421e805 | refs/heads/master | 2022-04-20T09:48:22.294758 | 2020-04-15T03:43:00 | 2020-04-15T03:43:00 | 255,797,465 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 631 | py | def binary_search(array, target):
left = 0
right = len(array) - 1
while True:
if right < left:
return -1
midpoint = (left + right) // 2
if array[midpoint] < target:
left = midpoint + 1
elif array[midpoint] > target:
right = midpoint - 1
else:
return midpoint
primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97]
result = binary_search(primes, 97)
if result != -1:
print ("Element is present at index % d" % result)
else:
print ("Element is not present in array")
| [
"63384676+mtuv@users.noreply.github.com"
] | 63384676+mtuv@users.noreply.github.com |
0b13920f4b7bf9af5f88456f7cab77b587438f66 | bfd75153048a243b763614cf01f29f5c43f7e8c9 | /1906101097-李波/0303text2.py | 141c3d75c1e836526e0ff73784811f0195b795ac | [] | no_license | gschen/sctu-ds-2020 | d2c75c78f620c9246d35df262529aa4258ef5787 | e1fd0226b856537ec653c468c0fbfc46f43980bf | refs/heads/master | 2021-01-01T11:06:06.170475 | 2020-07-16T03:12:13 | 2020-07-16T03:12:13 | 239,245,834 | 17 | 10 | null | 2020-04-18T13:46:24 | 2020-02-09T04:22:05 | Python | UTF-8 | Python | false | false | 102 | py | class Test:
def prt(self):
print(self)
print(self.__class__)
t = Test()
t.prt()
| [
"2974011252@qq.com"
] | 2974011252@qq.com |
9da746164e40ff74bb887fd59775557656eb228e | 21e87dc5abaf8c8dfe7adfb72c38648f415d038c | /16_developer_tools/11_compileall/example/subfolder2/c.py | 4713d0f8c91464a958dcfae43283a515af70bba3 | [] | no_license | ariesduanmu/python3_standard_library | f2badbb6047b6003ddeccb77ba2892074510f0ff | 905ae53d0970be442bcf3d2a9dc3eadbc58367e5 | refs/heads/master | 2022-04-23T21:05:52.862076 | 2020-04-23T16:44:14 | 2020-04-23T16:44:14 | 241,277,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | # -*- coding: utf-8 -*-
# @Author: Li Qin
# @Date: 2020-02-24 09:30:22
# @Last Modified by: Li Qin
# @Last Modified time: 2020-02-24 09:30:56
def minus(a, b):
return a-b | [
"aries.duanmu@gmail.com"
] | aries.duanmu@gmail.com |
d06b868fa88c5d499dd32895fd542a19fc18deb0 | eed7b5aa4861086d34e539e7bbfeff4286506692 | /src/Game/Effects/spend_power.py | 2286d1f6e407736c8ea6bf6088203090a386bc5c | [] | no_license | dfwarden/DeckBuilding | 0be2ccb68fc9a69c8eaa1d8acedeaa7cebef1a31 | 0b5a7573a3cf33430fe61e4ff8a8a7a0ae20b258 | refs/heads/master | 2021-01-18T09:52:51.880892 | 2015-02-03T03:21:17 | 2015-02-03T03:21:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py |
class SpendPower:
""" Represents an effect to spend power this turn """
def __init__(self, power):
""" Initialize the Effect with the power to spend """
self.power = power
def perform(self, context):
""" Perform the Game Effect """
context.owner.spendPower(self.power) | [
"cloew123@gmail.com"
] | cloew123@gmail.com |
ad8ea5912f2475677a294ad6b496f6e6354dab53 | f7fe9c722b8fa7ed6e66080053706a495fffb2d8 | /tensorflow/python/distribute/failure_handling/failure_handling.py | 0acb76e8a30174efd1c32a7cf020cdaea0d941c2 | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] | permissive | Poet-LiBai/tensorflow | 832d9d8ddb58b5560ba19119cf107bbe857208e7 | f354ef21ae067a73fbc2ab45a7a5ceda4b0a1ff4 | refs/heads/master | 2022-05-02T04:06:27.411162 | 2022-04-21T02:45:39 | 2022-04-21T02:49:33 | 155,213,121 | 0 | 0 | Apache-2.0 | 2018-10-29T13:04:55 | 2018-10-29T13:04:55 | null | UTF-8 | Python | false | false | 35,947 | py | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for `WorkerPreemptionHandler`.
This is currently under development and the API is subject to change.
WorkerPreemptionHandler reduces loss of training progress caused by termination
(preemption or maintenance) of workers in multi-worker synchronous training and
avoid surfacing an error indistinguishable from application errors to the
job scheduler or users.
"""
import os
import signal
import sys
import threading
import time
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.distribute.failure_handling import gce_util
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
_INITIAL_RUN_COUNT_KEY = 'RUN_TO_CHECKPOINT'
_FINAL_RUN_COUNT_KEY = 'LAST_RUN_TO_CHECKPOINT'
# This key is used to guarantee that only one worker (and it's the earliest
# one that receives a preemption signal) sets _received_own_sigterm,
# leads the step resolution, and controls the grace period timeline.
_PREEMPTION_WORKER_KEY = 'TERMINATED_WORKER'
_ACKNOWLEDGE_KEY = 'RECEIVED_SIGNAL'
_ITERATION_VARIABLE = 'checkpointed_runs'
_STOP_WATCHING_CLUSTER_VALUE = 'STOP_WATCHER'
def _mwms_write_checkpoint_dir(checkpoint_dir, task_type, task_id,
cluster_spec):
"""Returns checkpoint_dir for chief and a temp dir for any other worker."""
dirpath = os.path.dirname(checkpoint_dir)
base = os.path.basename(checkpoint_dir)
if not multi_worker_util.is_chief(
cluster_spec=cluster_spec, task_type=task_type, task_id=task_id):
base_dirpath = 'workertemp_' + str(task_id)
dirpath = os.path.join(dirpath, base_dirpath)
gfile.MakeDirs(dirpath)
return os.path.join(dirpath, base)
# TODO(wxinyi): rename time_till_termination to grace_period.
class TerminationConfig(object):
"""Configurations to customize for a platform other than Google's Borg or GCP.
A TerminationConfig can be created and passed to the
`WorkerPreemptionHandler` to provide customization based on the platform.
It will deliver three pieces of information:
* How to decide if there is a termination event soon
The termination notification and how to fetch it varies across platforms. Thus
we accept a user-defined function, `termination_watcher_function`, and execute
it repeatedly to check for termination notification.
`termination_watcher_function` should be a function that returns True if a
termination notification has been made available and False otherwise. And the
function should be lightweight and non-blocking so that we can clean up the
resources properly if no termination signal is ever raised until training
finishes.
* How to exit the program
We are asking for an `exit_fn` to execute after saving the checkpoint to exit
the training program gracefully. For MultiWorkerMirroredStrategy, a restart is
inevitable to reset the program's state. However, you can configure the
`exit_fn` to facilitate the restart and make the training experience
smooth. How so? Maybe your platform has an agreement to a RESTART_CODE that’s
recognized as a program auto-restart signal, or you may have a coordinating
script that starts up the training, in which you can configure the program to
auto-restart if it ever exits with this RESTART_CODE. In both cases,
you can configure `exit_fn` to be `sys.exit(RESTART_CODE)` and then wouldn’t
even notice that the training has been interrupted and restarted.
* How long do we have from receiving a termination event notice till the
actual termination.
Some platforms have the gap time as long as, say, one hour. In this case, you
might want to utilize this time for training as much as possible until you
have to save a checkpoint and exit. We can utilize this information if you
pass it through the `time_till_termination` argument.
*The default behavior*:
If you are training with Google’s Borg system or GCP, we automatically detect
the platform and make the right configuration for you. Besides these two
platforms, the default behavior on an unrecognized platform is:
* If `termination_event` is `None`, we will treat `signal.SIGTERM` as a
termination event.
* If `exit_fn` not configured, we exit the program with an arbitrary code 42.
* If `time_till_termination` is not configured, the default is 0, and we will
wrap up the current training step, save a checkpoint, and exit the program as
soon as we receive the termination signal.
"""
def __init__(self,
termination_watcher_function=None,
exit_fn=None,
time_till_termination=None):
self.termination_watcher_function = termination_watcher_function
self.exit_fn = exit_fn
self.time_till_termination = time_till_termination
# TODO(wxinyi): configure the exit function based on device type (GPU or TPU).
class GCPTerminationConfig(TerminationConfig):
"""Configurations for GCP GPU VM."""
def __init__( # pylint: disable=super-init-not-called
self,
termination_watcher_function=None,
exit_fn=None,
time_till_termination=None):
self.termination_watcher_function = termination_watcher_function or gce_util.termination_watcher_function_gce
self.exit_fn = exit_fn or gce_util.gce_exit_fn
self.time_till_termination = time_till_termination or gce_util.GRACE_PERIOD_GCE
class BorgTerminationConfig(TerminationConfig):
"""Configurations for Borg."""
def __init__( # pylint: disable=super-init-not-called
self,
termination_watcher_function=None,
exit_fn=None,
time_till_termination=None):
self.termination_watcher_function = termination_watcher_function
default_exit_fn = lambda: sys.exit(42)
self.exit_fn = exit_fn or default_exit_fn
self.time_till_termination = time_till_termination or 0
def _complete_config_for_environement(platform_device, termination_config):
"""Complete un-filled fields of TerminationConfig based on platform."""
if platform_device is gce_util.PlatformDevice.GCE_GPU:
return GCPTerminationConfig(termination_config.termination_watcher_function,
termination_config.exit_fn,
termination_config.time_till_termination)
else:
# The default we chose are the same as the ones used by Borg. So we just
# return this.
return BorgTerminationConfig(
termination_config.termination_watcher_function,
termination_config.exit_fn,
termination_config.time_till_termination)
# Implementation:
# Each worker will create its own WorkerPreemptionHandler instance, and the
# instances communicate through coordination services. Each
# WorkerPreemptionHandler conduct three tasks in parallel:
# - Watches out for its own preemption signal. (_poll_termination_signal_thread)
# - Watches out for a step key from the coordination service made available
# by any member in the cluster (_cluster_wise_termination_watcher_thread)
# - The main thread for training.
#
# The life cycle of a WorkerPreemptionHandler is as below:
#
# It starts two threads as two watcher as described above. And it starts
# training. Each time before it starts a training step, it will check if any
# information has been made available by the two watchers: The
# _poll_termination_signal_thread will be in charge of the _received_own_sigterm
# event, the _cluster_wise_termination_watcher_thread will be in charge of the
# _received_checkpoint_step event.
#
# If at any point the local worker receives a preemption signal,
# _poll_termination_signal_thread will set _received_own_sigterm.
# Next time before it attempts to run a training step, it will deal with the
# event, by setting its current finished step + 1 as the step after which a
# checkpoint should be saved and make it available to all the workers through
# the coordination service. It will then continue training.
#
# This step key will be picked up by the other watcher,
# _cluster_wise_termination_watcher_thread, both on the worker to be preempted
# and other workers. And it will set the _received_checkpoint_step event.
# Now, if there is a long grace period before the training
# has to terminate (e.g., an hour), we would like to keep training and save a
# checkpoint again right before the termination. Thus this watcher thread will
# move on to watch out for a final step-to-save key. Otherwise,
# it has finished all the task to do.
#
# Back to the main training thread. Again, before the next training step, the
# WorkerPreemptionHandler found that _received_checkpoint_step is set. If the
# local worker has not finished the required step after which to save a
# checkpoint, it will not do anything. Continue training and it will revisit
# after another step. If the step is met, then it will save a checkpoint,
# which requires participation of all workers.
#
# After this checkpoint is saved, if there is NO long grace period, all workers
# will just exit. If there is, all workers will enter a grace period countdown
# phase (_final_checkpoint_countdown) and clear the _received_checkpoint_step
# event. They will then continue training.
#
# For the worker to be preempted, during this countdown period, it will check
# whether the grace period is almost ending before its every step. If not,
# nothing needs to be done. If so, it will again set a step-to-save key and made
# it available to all workers. This is still watched by
# _cluster_wise_termination_watcher_thread and gestured by
# _received_checkpoint_step. A similar process is repeated: all workers save
# a checkpoint at an agreed step. And after they finish saving, they recognize
# that they have finished a countdown period for an extended grace period, and
# they all exit.
#
# When the program restarts and WorkerPreemptionHandler object is created, it
# will restore the checkpoint.
class WorkerPreemptionHandler(object):
"""Preemption and error handler for synchronous training.
The API helps coordinate all workers to save a checkpoint upon receiving a
preemption signal and helps propagate accurate error messages during training.
When the program recovers from preemption, the checkpoint that is passed to
initialize a `WorkerPreemptionHandler` object will be loaded
automatically.
Right after the initialization, a thread starts to watch out for a termination
signal for any member in the cluster, but the signal will only be handled
(which includes aligning the step to save a checkpoint, saving a checkpoint,
and exiting with a platform recognized restart code) after entering a
`WorkerPreemptionHandler.run` call.
Example usage:
```python
strategy = tf.distribute.MultiWorkerMirroredStrategy()
with strategy.scope():
dataset, model, optimizer = ...
fh_checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
worker_preemption_watcher = tf.distribute.WorkerPreemptionHandler(
cluster_resolver, fh_checkpoint, checkpoint_directory)
# `worker_preemption_watcher.total_runs` will be restored to its
# checkpointed value when training is restored after interruption.
for epoch in range(worker_preemption_watcher.total_runs //
STEPS_PER_EPOCH, num_epochs):
for step in range(worker_preemption_watcher.total_runs %
STEPS_PER_EPOCH, num_steps):
# distributed_train_step is a function wrapped by strategy.run
loss += worker_preemption_watcher.run(distributed_train_step,
args=(next(dataset),))
```
`WorkerPreemptionHandler` will create a CheckpointManager to manage the
checkpoint and only one CheckpointManager should be active in a particular
directory at a time. Thus, if the user would like to save a checkpoint for
purpose other than fault tolerance, e.g., for evaluation, they should save it
in a directory different from the one passed to a
`WorkerPreemptionHandler`.
This API targets multi-client distributed training, and right now only
`tf.distribute.MultiWorkerMirroredStrategy` is supported.
"""
def __init__(self,
cluster_resolver,
checkpoint,
checkpoint_dir,
termination_config=TerminationConfig()):
"""Creates the failure handler.
Args:
cluster_resolver: a `tf.distribute.cluster_resolver.ClusterResolver`. You
may also get it through the `cluster_resolver` attribute of the strategy
in use.
checkpoint: a `tf.train.Checkpoint` that will be saved upon preemption and
loaded upon restart by the `WorkerPreemptionHandler` API automatically.
checkpoint_dir: a directory for the `WorkerPreemptionHandler` to play with
checkpoints. `WorkerPreemptionHandler` will create a
`tf.train.CheckpointManager` to manage the passed-in `checkpoint`. Since
only one `tf.train.CheckpointManager` should be active in a particular
directory at a time, this `checkpoint_dir` arg should preferably be
separated from where the user saves their checkpoint for non-fault
tolerance purpose.
termination_config: a `TerminationConfig` object to configure for a
platform other than Google Borg or GCP.
"""
self._cluster_resolver = cluster_resolver
self._checkpoint = checkpoint
self._id_in_cluster = str(
multi_worker_util.id_in_cluster(
self._cluster_resolver.cluster_spec(),
self._cluster_resolver.task_type,
self._cluster_resolver.task_id))
# The number of calls to `WorkerPreemptionHandler.run` when the latest
# checkpoint was saved.
self._checkpointed_runs = variables.Variable(
initial_value=constant_op.constant(0, dtype=dtypes.int64),
trainable=False,
name=_ITERATION_VARIABLE)
if not hasattr(self._checkpoint,
_ITERATION_VARIABLE):
setattr(self._checkpoint, _ITERATION_VARIABLE,
self._checkpointed_runs)
# Make CheckpointManagers. MultiWorkerMirroredStrategy requires different
# setup on chief and on other workers.
self._read_checkpoint_manager = checkpoint_management.CheckpointManager(
checkpoint, directory=checkpoint_dir, max_to_keep=1)
if multi_worker_util.is_chief(
cluster_spec=cluster_resolver.cluster_spec(),
task_type=cluster_resolver.task_type,
task_id=cluster_resolver.task_id):
self._write_checkpoint_manager = self._read_checkpoint_manager
else:
self._write_checkpoint_manager = checkpoint_management.CheckpointManager(
checkpoint,
_mwms_write_checkpoint_dir(checkpoint_dir, cluster_resolver.task_type,
cluster_resolver.task_id,
cluster_resolver.cluster_spec()),
max_to_keep=1)
self._read_checkpoint_manager.restore_or_initialize()
# grace period countdown. Set to True for all workers once they finish
# timing saving a checkpoint. Once entering this phase, new
# preemption/maintenance notice will not be handled, since the whole cluster
# goes down as the worker who first initiates the grace period goes down.
self._final_checkpoint_countdown = False
self._estimated_run_time = 0
# An internal step counter that's restored to checkpointed_iterations when
# training is restored. It increments by one every time
# `WorkerPreemptionHandler.run` is called. Note that in this case, the
# user must pass a single-step training function to
# `WorkerPreemptionHandler.run` instead of a multiple-step one.
self._run_counter = self._checkpointed_runs.numpy()
# The worker itself has received preeption signal.
self._received_own_sigterm = threading.Event()
# Some member (could be oneself) has received preemption signal, and the
# step number to save a checkpoint has been aligned.
self._received_checkpoint_step = threading.Event()
self._platform_device = gce_util.detect_platform()
completed_termination_config = _complete_config_for_environement(
self._platform_device, termination_config)
self._termination_watcher_function = completed_termination_config.termination_watcher_function
self._exit_fn = completed_termination_config.exit_fn
self._grace_period = completed_termination_config.time_till_termination
# When training is interrupted, we explicitly call the cleanup methods for
# the thread watching for local worker's termination signal and the thread
# watching for clusterwise information before we save a checkpoint and exit.
# In the final chapter of the training where no interruption is encountered,
# we rely on __del__ to clean up. However, there is no guarantee when or
# whether __del__ is executed, thus we make the threads daemon to avoid it
# preventing program from exit.
self._cluster_wise_termination_watcher_thread = threading.Thread(
target=self._watch_step_to_save_key,
name='PeerTerminationWatcher-%s' % self._id_in_cluster,
daemon=True)
logging.info('Start watcher for peer\'s signal.')
self._cluster_wise_termination_watcher_thread.start()
self._poll_termination_signal_thread = None
if completed_termination_config.termination_watcher_function:
self._start_polling_for_termination_signal()
else:
self._start_watching_for_signal()
def _start_watching_for_signal(self):
signal.signal(signal.SIGTERM, self._sigterm_handler_fn)
def _start_polling_for_termination_signal(self):
self._poll_termination_signal_thread_should_stop = threading.Event()
self._poll_termination_signal_thread = threading.Thread(
target=self._poll_termination_signal,
name='WorkerTerminationSignalWatcher-%s' % self._id_in_cluster,
daemon=True)
logging.info('Start polling for termination signal.')
self._poll_termination_signal_thread.start()
def _poll_termination_signal(self):
"""Poll maintenance notice and notify peers if receiving one."""
while True:
if self._poll_termination_signal_thread_should_stop.is_set(
) or self._final_checkpoint_countdown:
return
if self._termination_watcher_function():
break
time.sleep(1)
self._maybe_set_received_own_sigterm()
def _maybe_set_received_own_sigterm(self):
"""Claim earliest preemption if no one else has done it before."""
try:
context.context().set_config_key_value(_PREEMPTION_WORKER_KEY,
self._id_in_cluster)
logging.info('Member %s has received termination notice.',
self._id_in_cluster)
self._received_own_sigterm_time = time.time()
self._received_own_sigterm.set()
# This is to handle the case that a worker has received termination
# notice but hasn't come to the next step to set the step key. Other
# workers might receive a termination notice too, and attempt to set the
# config key again, which causes this error. This can be safely ignored
# since checkpoint should be saved as early as the earliest call is made.
except errors.AlreadyExistsError:
logging.info('Member %s has received termination notice. But some other '
'worker has received it as well! Leaving'
' it to them to decide when to checkpoint. ',
self._id_in_cluster)
return
def _stop_poll_termination_signal_thread(self):
if self._poll_termination_signal_thread:
self._poll_termination_signal_thread_should_stop.set()
self._poll_termination_signal_thread.join()
self._poll_termination_signal_thread = None
logging.info('Shut down watcher for one\'s own termination signal')
def _stop_cluster_wise_termination_watcher_thread(self):
"""Stop the thread that is _watch_step_to_save_key."""
if self._cluster_wise_termination_watcher_thread:
try:
context.context().set_config_key_value(_INITIAL_RUN_COUNT_KEY,
_STOP_WATCHING_CLUSTER_VALUE)
except (errors.AlreadyExistsError, errors.UnavailableError):
# We'll ignore any error in the process of setting this key. There
# certainly will be a AlreadyExistError since all workers are trying to
# push this key. Or some worker might have exited already, leading to a
# errors.UnavailableError or errors.AbortedError.
pass
try:
context.context().set_config_key_value(_FINAL_RUN_COUNT_KEY,
_STOP_WATCHING_CLUSTER_VALUE)
except (errors.AlreadyExistsError, errors.UnavailableError):
pass
finally:
self._cluster_wise_termination_watcher_thread.join()
self._cluster_wise_termination_watcher_thread = None
logging.info('Shut down watcher for peer\'s termination signal.')
def __del__(self):
self._stop_cluster_wise_termination_watcher_thread()
self._stop_poll_termination_signal_thread()
@property
def total_runs(self):
"""Returns the number of times `WorkerPreemptionHandler.run` is called.
This value tracks the number of all calls to
`WorkerPreemptionHandler.run` including those before the program is
restarted and the training is restored. The user can compute their total
number of iterations by:
`worker_preemption_watcher.run * number_of_steps_in_train_function`,
while for tf.distribute.MultiWorkerMirroredStrategy users,
`number_of_steps_in_train_function` should be one.
"""
return self._run_counter
def run(self,
distributed_train_function,
*args,
**kwargs):
"""Runs a training function with error and preemption handling.
This function handles the preemption signal from any peer in the cluster by
saving the training progress and exiting gracefully. (Specifically, when
running on Borg, it exits with a special code so that the cluster
automatically restarts the training after the down worker is back.) It will
also propagate any program error encountered during execution of
`distributed_train_function` to all workers so that they can raise the same
error.
The `distributed_train_function` argument should be a distributed train
function (i.e., containing a call to `tf.distribute.Strategy.run`). For
`tf.distribute.MultiWorkerMirroredStrategy` users, we recommend passing in a
single-step `distributed_train_function` to
`WorkerPreemptionHandler.run` so that the checkpoint can be saved in
time in case a preemption signal or maintenance notice is sent.
Besides the preemption and error handling part,
`WorkerPreemptionHandler.run(distributed_train_function, *args,
**kwargs)` has the same effect and output as
`distributed_train_function(*args, **kwargs)`. `distributed_train_function`
can return either some or no result. The following is a shortened example:
```python
@tf.function
def distributed_train_step(iterator):
# A distributed single-step training function.
def step_fn(inputs):
# A per-replica single-step training function.
x, y = inputs
...
return loss
per_replica_losses = strategy.run(step_fn, args=(next(iterator),))
return strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)
for epoch in range(worker_preemption_watcher.total_runs //
STEPS_PER_EPOCH, EPOCHS_TO_RUN):
iterator = iter(multi_worker_dataset)
total_loss = 0.0
num_batches = 0
for step in range(worker_preemption_watcher.total_runs %
STEPS_PER_EPOCH, STEPS_PER_EPOCH):
total_loss += worker_preemption_watcher.run(distributed_train_step)
num_batches += 1
train_loss = total_loss / num_batches
print('Epoch: %d, train_loss: %f.' %(epoch.numpy(), train_loss))
train_accuracy.reset_states()
```
Args:
distributed_train_function: A (single-step) distributed training function.
*args: args for `distributed_train_function`.
**kwargs: kwargs for `distributed_train_function`.
Raises:
Program error encountered by any member in the cluster encounters one
while executing the `distributed_train_function`, or any error from the
program error propagation process.
Returns:
Result of running the `distributed_train_function`.
"""
# TODO(wxinyi): after we support use with TPUStrategy, we should expand the
# API doc to state that `distributed_train_function` does not need to be a
# single-step training function, since a multi-step host-training loop is
# the dominant use case for TPU user. Besides, passing in a multi-step
# `distributed_train_function` will require the user to track their own
# training steps.
try:
self._checkpoint_if_preempted()
run_begin_time = time.time()
result = distributed_train_function(*args, **kwargs)
new_run_time = time.time() - run_begin_time
self._run_counter += 1
# Update the average run time with the new run.
self._estimated_run_time = self._estimated_run_time + (
new_run_time - self._estimated_run_time) / self._run_counter
except errors.OpError as e:
logging.info('Propagating error to cluster: %r: %s', e, e)
try:
context.context().report_error_to_cluster(e.error_code, e.message)
except Exception as ex: # pylint: disable=broad-except
logging.info('Ignoring error during error propagation: %r:%s', ex, ex)
raise
return result
def _save_checkpoint(self):
"""Saves the checkpoint and exit program."""
logging.info('WorkerPreemptionHandler: Starting saving a checkpoint.')
self._checkpointed_runs.assign(self.total_runs)
start_time = time.monotonic()
self._write_checkpoint_manager.save()
# All workers need to participate in saving a checkpoint to avoid
# deadlock. They need to write to different paths so that they would not
# override each other. We make temporary directories for non-chief
# workers to write to, and clean them up afterward.
if not multi_worker_util.is_chief(
cluster_spec=self._cluster_resolver.cluster_spec(),
task_type=self._cluster_resolver.task_type,
task_id=self._cluster_resolver.task_id):
gfile.DeleteRecursively(
os.path.dirname(self._write_checkpoint_manager.directory))
end_time = time.monotonic()
logging.info('Checkpoint finished at path %s',
self._write_checkpoint_manager.directory)
self._checkpoint_time = end_time - start_time
def _checkpoint_if_preempted(self):
"""Checkpoint if any worker has received a preemption signal.
This function handles preemption signal reported by any worker in the
cluster. The current implementation relies on the fact that all workers in a
MultiWorkerMirroredStrategy training cluster have a step number difference
maximum of 1.
- If the signal comes from the worker itself (i.e., where this failure
handler sits), the worker will notify all peers to checkpoint after they
finish CURRENT_STEP+1 steps, where CURRENT_STEP is the step this worker has
just finished. And the worker will wait for all peers to acknowledge that
they have received its preemption signal and the final-step number before
the worker proceeds on training the final step.
- If the signal comes from another member in the cluster but NO final-step
info is available, proceed on training, because it will be available after
finishing the next step.
- If the signal comes from some other member in the cluster, and final-step
info is available, if the worker has not finished these steps yet, keep
training; otherwise, checkpoint and exit with a cluster-recognized restart
code.
"""
if self._final_checkpoint_countdown:
run_count_config_key = _FINAL_RUN_COUNT_KEY
else:
run_count_config_key = _INITIAL_RUN_COUNT_KEY
if self._received_checkpoint_step.is_set():
run_count_key = context.context().get_config_key_value(
run_count_config_key)
if run_count_key == str(self._run_counter):
self._save_checkpoint()
if self._time_to_exit():
self._stop_poll_termination_signal_thread()
self._stop_cluster_wise_termination_watcher_thread()
logging.info('WorkerPreemptionHandler: checkpoint saved. Exiting.')
self._exit_fn()
else:
logging.info('Continue training for the grace period.')
self._final_checkpoint_countdown = True
self._received_checkpoint_step.clear()
elif self._received_own_sigterm.is_set():
# Only the worker who gets termination signal first among the cluster
# will enter this branch. The following will happen in chronological
# order:
# 1. The worker just receives a preemption signal and enters this branch
# for the first time. It will set a step-to-checkpoint and let the cluster
# know.
# 2. If there is a long grace period, it will also set
# _final_checkpoint_countdown, so that during this grace period, it will
# re-enter this branch to check if grace period is ending.
# 3. If it is, set a step-to-checkpoint key again.
if self._final_checkpoint_countdown:
if self._target_time_for_termination < time.time():
logging.info(
'Grace period almost ended. Final call to save a checkpoint!')
else:
return
step_to_save_at = str(self._run_counter + 1)
logging.info('Termination caught in main thread on preempted worker')
context.context().set_config_key_value(run_count_config_key,
step_to_save_at)
logging.info('%s set to %s', run_count_config_key, step_to_save_at)
n_workers = multi_worker_util.worker_count(
self._cluster_resolver.cluster_spec(),
self._cluster_resolver.task_type)
for i in range(n_workers):
context.context().get_config_key_value(
f'{_ACKNOWLEDGE_KEY}_{run_count_config_key}_{i}')
logging.info('Sigterm acknowledgement from replica %d received', i)
self._setup_countdown_if_has_grace_period_and_not_already_counting_down()
def _time_to_exit(self):
"""Return whether to exit: exit if no grace period or grace period ends."""
# we should directly exit in either of the two cases:
# 1. if no grace period is provided;
# 2. if there is a grace period, and we're in countdown period. This,
# together with the fact that _received_checkpoint_step is set (again),
# means it's time to exit: when there is a grace period, a worker
# receives preemption signal and sets the step key. Then all workers
# receive the step key and set their local _received_checkpoint_step
# event, enters this branch in _checkpoint_if_preempted, make a
# checkpoint. Then they set _final_checkpoint_countdown to True, clear
# _received_checkpoint_step, and continue training. New preemption
# signals anywhere in the cluster will not be handled, because
# _PREEMPTION_WORKER_KEY is occupied. The only chance that
# _received_checkpoint_step gets set again is when the worker who has
# received the preemption signal earlier decide it's time to do a final
# checkpoint (by checking if it already passes
# _target_time_for_termination). It will upload a final step key. All
# workers receive this key and again set _received_checkpoint_step. So,
# if we found out that _received_checkpoint_step is set, and also
# _final_checkpoint_countdown is true, it's checkpoint and exit time.
return (self._grace_period <= 0) or self._final_checkpoint_countdown
def _setup_countdown_if_has_grace_period_and_not_already_counting_down(self):
"""Set up at the beginning of a countdown period for long grace period."""
if self._grace_period > 0 and not self._final_checkpoint_countdown:
# A factor to provide more buffer / inaccuracy.
# TODO(wxinyi): update buffer_factor as needed. Maybe deduct a constant.
buffer_factor = 3
# Timing by 2 since while the preempted worker needs to do 1 extra step
# when time_till_final_call <=0, other workers might need to do x step
# where 0<x<2
self._target_time_for_termination = (
self._received_own_sigterm_time + self._grace_period -
buffer_factor * self._estimated_run_time * 2)
def _sigterm_handler_fn(self, signum, frame):
"""Upload the to-be-preempted worker's id to coordination service."""
del signum, frame
self._maybe_set_received_own_sigterm()
def _watch_step_to_save_key(self):
"""Watch out for step-to-save config key and acknowledge.
All workers, including the one to be preempted, execute this function to get
step-to-save.
"""
step_value = context.context().get_config_key_value(_INITIAL_RUN_COUNT_KEY)
# get_config_key_value does not return until it gets some result. Thus at
# the time to clean up, we upload a _STOP_WATCHING_CLUSTER_VALUE as the
# value so we can join the thread executing _watch_step_to_save_key.
if step_value != _STOP_WATCHING_CLUSTER_VALUE:
# This must be set before we set the ack key below, otherwise its value
# in _checkpoint_if_preempted may be outdated.
self._received_checkpoint_step.set()
ack_key = f'{_ACKNOWLEDGE_KEY}_{_INITIAL_RUN_COUNT_KEY}_{self._id_in_cluster}'
context.context().set_config_key_value(ack_key, '1')
logging.info(
'WorkerPreemptionHandler: %s set, '
'preemption awareness acknowledged', ack_key)
# If a positive grace_period is not configured, we get the
# _INITIAL_RUN_COUNT_KEY and then we're done. _checkpoint_if_preempted
# will save a checkpoint and then exit. Otherwise, we need to move on to
# wait for the _FINAL_RUN_COUNT_KEY, the one that the preempted worker
# will set after we utilize the extended grace period to train, so that
# a final checkpoint should be made right before the termination.
if self._grace_period > 0:
# Continue to wait until a final call is made.
final_step_value = context.context().get_config_key_value(
_FINAL_RUN_COUNT_KEY)
if final_step_value != _STOP_WATCHING_CLUSTER_VALUE:
ack_key = f'{_ACKNOWLEDGE_KEY}_{_FINAL_RUN_COUNT_KEY}_{self._id_in_cluster}'
context.context().set_config_key_value(ack_key, '1')
logging.info('WorkerPreemptionHandler: %s acknowledged, final '
'checkpoint timing received.', ack_key)
self._received_checkpoint_step.set()
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
50c78cdca2d41ecc29bc6c3d7e3bad53b774a4ba | aed7373081b696b9bcfe9219f9c97272d1a04421 | /app/clue/add_questions.py | 888e894088767b25edb980c8017b794ca4eb56ce | [] | no_license | zeo210/jGame | fa20fbcfada2df87d89cd0224ca78a6d46dac44a | 2ff98ae3306d4427046d9c2c82183f097bf2ba28 | refs/heads/master | 2016-09-15T17:54:08.986834 | 2015-02-26T00:55:06 | 2015-02-26T00:55:06 | 28,906,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,469 | py | from app import db, basedir
from app.clue.models import Episode, Value, Category, Answer, Clue
import csv
import os
def replacing_normalize(string):
return string.decode('utf8').encode('ascii', errors='xmlcharrefreplace')
def add_questions(target):
with open(target) as open_csv_file:
csv_reader = csv.reader(open_csv_file)
next(csv_reader)
count = 0
for row in csv_reader:
print(row)
episode = Episode.query.filter_by(episode=int(row[0])).first()
if episode is None:
episode = Episode(episode=int(row[0]))
db.session.add(episode)
db.session.commit()
Episode.query.filter_by(episode=int(row[0])).first()
normalized_category = replacing_normalize(row[1])
category = Category.query.filter_by(category=normalized_category).first()
if category is None:
category = Category(category=normalized_category)
db.session.add(category)
db.session.commit()
category = Category.query.filter_by(category=normalized_category).first()
value = Value.query.filter_by(value=int(row[2])).first()
if value is None:
value = Value(value=int(row[2]))
db.session.add(value)
db.session.commit()
value = Value.query.filter_by(value=int(row[2])).first()
normalized_answer = replacing_normalize(row[4])
answer = Answer.query.filter_by(answer=normalized_answer).first()
if answer is None:
answer = Answer(answer=normalized_answer)
db.session.add(answer)
db.session.commit()
answer = Answer.query.filter_by(answer=normalized_answer).first()
normalized_clue = replacing_normalize(row[3])
clue = Clue.query.\
filter_by(clue=normalized_clue, category=category, answer=answer).\
first()
if clue is None:
clue = Clue(episode=episode,
category=category,
value=value,
answer=answer,
clue=normalized_clue)
db.session.add(clue)
db.session.commit()
if __name__ == '__main__':
add_questions(os.path.join(basedir, *['app','clue','clues.csv']))
| [
"ribo.2102@gmail.com"
] | ribo.2102@gmail.com |
fcfe661e79630bab10199d1b2d0a546070ee7f80 | 4a22680287ffa15e411da187aa4999ecff006e4c | /python/work/spider/get_movie_tagname.py | a732ee94795ea7d555ee6e55dac8bcdedfeee2d7 | [] | no_license | bopopescu/develop | cd5ab34c0d08fd173dce6efdb863fddfcd0323c6 | 3eee463da36b4da773b933d252038dfe48cd8fae | refs/heads/master | 2022-11-22T23:47:29.565352 | 2018-07-02T02:00:59 | 2018-07-02T02:00:59 | 281,995,209 | 0 | 0 | null | 2020-07-23T15:56:03 | 2020-07-23T15:56:02 | null | UTF-8 | Python | false | false | 5,872 | py | #-*- encoding:utf-8 -*-
'''
Created on 2017-04-21
@author: dedong.xu
@description: 爬取电影的标签
'''
#standard lib
import re
import json
import time
import urllib2
import logging
import random
#3rd lib
from lxml import etree
from poster.streaminghttp import register_openers
base_get_url = "http://10.10.3.196:8080/tool/vmdbid_relationid?count=1&start="
post_url = "http://10.10.3.196:8080/tool/vmdbidtag"
douban_id = "26260853"
base_url = "https://movie.douban.com/subject"
LOG_FILENAME = "log.txt"
time_list = [5, 10, 15, 20, 25, 30]
useragent_list = ["Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.2)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1) ",
"Mozilla/4.0 (compatible; MSIE 5.0; Windows NT) ",
"Mozilla/5.0 (Windows; U; Windows NT 5.2) Gecko/2008070208 Firefox/3.0.1 ",
"Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070309 Firefox/2.0.0.3",
"Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070803 Firefox/1.5.0.12",
]
def log(info):
""" 记录log日志 """
logging.basicConfig(filename = LOG_FILENAME, level = logging.NOTSET, filemode = "a", format = "%(asctime)s : %(message)s")
logging.info(info)
class PostAccessUrl(object):
""" HTTP POST请求 """
def access_url(self, url, params):
""" 访问url """
try:
register_openers()
params = json.dumps(params)
req = urllib2.Request(url, data = params, headers = {"Content-Type": "application/json"})
response = urllib2.urlopen(req)
return response.read()
except Exception, e:
return str(e)
class GetAccessUrl(object):
""" HTTP GET 请求"""
def access_url(self, url, useragent):
""" 访问url """
if url is None:
return
try:
print useragent
request = urllib2.Request(url)
request.add_header("User-Agent", useragent)
res = urllib2.urlopen(request)
if res.code != 200:
return
except Exception, e:
return
return res.read()
def access_url2(self, url):
""" 访问url """
if url is None:
return
try:
res = urllib2.urlopen(url)
if res.code != 200:
return
except Exception, e:
return
return res.read()
class UrlDownloader(GetAccessUrl):
""" url下载器 """
def __init__(self):
""" 将父类的方法改名 """
self.download = super(self.__class__, self).access_url
class HtmlParser_By_Re(object):
""" html解析器 """
def get_tags(self, html_doc):
""" 使用正则表达式解析 """
p1 = r'<div class="tags-body">\s*(<a href="/tag/[\d\D]+?" class="">[\d\D]+?</a>\s*)+\s*</div>'
res = re.search(p1, html_doc)
if res:
p2 = r'<a href="/tag/[\d\D]+?" class="">([\d\D]+?)</a>\s*'
return re.findall(p2, res.group())
return []
class HtmlParse_By_lxml(object):
""" html解析器 """
def parser(self, html_doc):
""" 解析html文档 """
if html_doc is None:
return
try:
page = etree.HTML(html_doc.decode('utf-8'))
except Exception as e:
print "parser error: ", str(e)
page = None
return page
def get_tags(self, page):
""" 获取标签名字 """
res = page.xpath(u'//div[@class="tags-body"]')
"""
#这种方法也可以
for record in res:
for i in record.getchildren():
print i.text,
return [i.text for record in res for i in record.getchildren()]
"""
return [i.text for record in res for i in record.findall("a")]
class SpiderMain(object):
""" 爬虫主程序 """
def __init__(self):
""" 初始化变量 """
self.ud = UrlDownloader()
self.hpbr = HtmlParser_By_Re()
self.hpbl = HtmlParse_By_lxml()
self.gau = GetAccessUrl()
self.pau = PostAccessUrl()
def crawl_test(self, url=None):
n = 1
while 1:
useragent= random.choice(useragent_list)
url = "https://movie.douban.com/subject/26260853"
html_doc = self.ud.download(url, useragent)
page = self.hpbl.parser(html_doc)
tag_list = self.hpbl.get_tags(page)
n += 1
def crawl(self, url, base_get_url, post_url):
""" 程序入口 """
n = 10
while 1:
get_url = base_get_url + str(n)
res = json.loads(self.gau.access_url2(get_url))
if res["subjects"]:
doubanid = res["subjects"][0]["doubanid"]
vmdbid = res["subjects"][0]["vmdbid"]
url = "%s/%s" % (base_url, doubanid)
useragent= random.choice(useragent_list)
html_doc = self.ud.download(url, useragent)
page = self.hpbl.parser(html_doc)
tag_list = self.hpbl.get_tags(page)
params = {"subjects": []}
di= {"vmdbid": vmdbid, "tags": tag_list}
params["subjects"].append(di)
res = self.pau.access_url(post_url, params)
sleep_time = random.choice(time_list)
time.sleep(sleep_time)
n += 1
else:
break
if __name__ == "__main__":
spider = SpiderMain()
spider.crawl(base_url, base_get_url, post_url)
| [
"buildbot@goland.cn"
] | buildbot@goland.cn |
f4b602b08c5ee4b186dcd6141f1093234617e993 | a1a1a1f2b3d026449c953d4a84a3a95c22d5ef11 | /experiment_results/performance_stats/analysis.py | e0586defb60e4478101d768eab1a176ae013e0d0 | [
"BSD-3-Clause"
] | permissive | molguin92/paramics_traci | 810ab8aac1ab73a8bbd62e0f71c0bde9c5f692f6 | adcc38785c165ec4b668e2b587f615cf5461e1b0 | refs/heads/master | 2021-01-22T04:01:56.332570 | 2017-06-16T21:47:55 | 2017-06-16T21:47:55 | 81,477,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,417 | py | import matplotlib
import pandas
from matplotlib import pyplot
from matplotlib2tikz import save as tikz_save
from psutil import virtual_memory
import numpy
matplotlib.style.use('ggplot')
pyplot.interactive(False)
def to_min_secs(x, pos):
x = int(x)
minutes = x // 60
seconds = x % 60
return '{:02d}:{:02d}'.format(minutes, seconds)
def system_performance():
total_RAM_mb = virtual_memory().total / (1024 * 1024)
c = ['timestamp', 'escrituras hdd', 'lecturas hdd',
'total I/o', '% freq procesador', '% rendimiento procesador',
'% de uso procesador', '% tiempo procesador', 'MB disponibles RAM']
df = pandas.read_csv('SystemStats.csv', encoding='ISO-8859-1')
df.columns = c
df['timestamp'] = pandas.to_datetime(df['timestamp'])
starttime = df['timestamp'].min()
df['delta_t'] = starttime
temp = (df['timestamp'] - df['delta_t']).map(lambda x: int(round(x.total_seconds())))
df['delta_t'] = temp
df['MB disponibles RAM'] = pandas.to_numeric(df['MB disponibles RAM'])
df['% uso RAM'] = df['MB disponibles RAM'].map(lambda free: ((total_RAM_mb - free) / total_RAM_mb) * 100)
df['% de uso procesador'] = pandas.to_numeric(df['% de uso procesador'], errors='coerce')
fig, ax = pyplot.subplots()
ax.plot(df['delta_t'], df['% de uso procesador'], label='% uso procesador')
ax.plot(df['delta_t'], df['% uso RAM'], label='% uso RAM')
ax.legend(loc='lower center')
pyplot.xlabel('Tiempo (MM:SS)')
pyplot.ylabel('Porcentaje')
pyplot.xlim([0, 1600])
pyplot.ylim([0, 60])
formatter = matplotlib.ticker.FuncFormatter(to_min_secs)
ax.xaxis.set_major_formatter(formatter)
ax.set_facecolor('white')
ax.grid(color='#a1a1a1', linestyle='-', alpha=0.1)
tikz_save('system_performance.tex',
figureheight='\\figureheight',
figurewidth='\\figurewidth')
# pyplot.savefig('system_performance.pgf')
#pyplot.show()
def system_io():
total_RAM_mb = virtual_memory().total / (1024 * 1024)
c = ['timestamp', 'escrituras hdd', 'lecturas hdd',
'total I/O', '% freq procesador', '% rendimiento procesador',
'% de uso procesador', '% tiempo procesador', 'MB disponibles RAM']
df = pandas.read_csv('SystemStats.csv', encoding='ISO-8859-1')
df.columns = c
df['timestamp'] = pandas.to_datetime(df['timestamp'])
starttime = df['timestamp'].min()
df['delta_t'] = starttime
temp = (df['timestamp'] - df['delta_t']).map(lambda x: int(round(x.total_seconds())))
df['delta_t'] = temp
df['MB disponibles RAM'] = pandas.to_numeric(df['MB disponibles RAM'])
df['% uso RAM'] = df['MB disponibles RAM'].map(lambda free: ((total_RAM_mb - free) / total_RAM_mb) * 100)
df['% de uso procesador'] = pandas.to_numeric(df['% de uso procesador'], errors='coerce')
df['total I/O'] = pandas.to_numeric(df['total I/O'], errors='coerce')
fig, ax = pyplot.subplots()
ax.plot(df['delta_t'], df['total I/O'], label='Operaciones I/O en disco por segundo')
ax.legend(loc='upper left')
pyplot.xlabel('Tiempo (MM:SS)')
pyplot.ylabel('Operaciones I/O por segundo')
pyplot.xlim([0, 1600])
formatter = matplotlib.ticker.FuncFormatter(to_min_secs)
ax.xaxis.set_major_formatter(formatter)
ax.set_facecolor('white')
ax.grid(color='#a1a1a1', linestyle='-', alpha=0.1)
tikz_save('system_io.tex',
figureheight='\\figureheight',
figurewidth='\\figurewidth')
# pyplot.savefig('system_io.pgf')
# pyplot.show()
def vehicles_vs_time_evolution():
cols = ['time', 'nvehicles']
df = pandas.read_csv('vehicles_vs_time/nvehicles.csv')
df.columns = cols
cols2 = ['time', 'realtime']
df2 = pandas.read_csv('vehicles_vs_time/realtime.csv')
df2.columns = cols2
df['realtime'] = df2['realtime']
df['time'] = df['time'].map(lambda x: x - 27000)
fig, ax = pyplot.subplots()
ax.set_facecolor('white')
ax.grid(color='#a1a1a1', linestyle='-', alpha=0.1)
ax.plot(df['realtime'], df['nvehicles'], '.-', label='Tiempo Real')
ax.plot(df['time'], df['nvehicles'], '.-', label='Tiempo Simulado')
formatter = matplotlib.ticker.FuncFormatter(to_min_secs)
ax.xaxis.set_major_formatter(formatter)
pyplot.ylabel('Número de Vehículos en Simulación')
pyplot.xlabel('Tiempo [MM:SS]')
pyplot.legend(loc='lower right')
tikz_save('timevsvehicles_evolution.tex',
figureheight='\\figureheight',
figurewidth='\\figurewidth')
# pyplot.savefig('timevsvehicles_evolution.pgf')
# pyplot.show()
def vehicles_vs_time():
cols = ['nvhcs', 't', 'demand', 'runID']
df = pandas.read_csv('NVeh_vs_T.csv', encoding='ISO-8859-1', sep=';')
df.columns = cols
df['nvhcs'] = pandas.to_numeric(df['nvhcs'])
df['t'] = pandas.to_numeric(df['t'])
df['runID'] = pandas.to_numeric(df['runID'])
df['demand'] = df['demand'].map(lambda x: float(x.strip('%')) / 100)
df100 = df.loc[df['demand'] == 1.00]
df75 = df.loc[df['demand'] == 0.75]
df50 = df.loc[df['demand'] == 0.50]
df25 = df.loc[df['demand'] == 0.25]
mean_df = pandas.DataFrame(columns=['demand', 'mean_vhcs', 'mean_time'])
mean_df.loc[0] = [1.00, df100['nvhcs'].mean(), df100['t'].mean()]
mean_df.loc[1] = [0.75, df75['nvhcs'].mean(), df75['t'].mean()]
mean_df.loc[2] = [0.50, df50['nvhcs'].mean(), df50['t'].mean()]
mean_df.loc[3] = [0.25, df25['nvhcs'].mean(), df25['t'].mean()]
# from this point onward, plot
fig, ax = pyplot.subplots()
ax.set_facecolor('white')
ax.grid(color='#a1a1a1', linestyle='-', alpha=0.1)
pyplot.xlim([df['nvhcs'].min() - 50, df['nvhcs'].max() + 50])
pyplot.ylim(0, df['t'].max() + 120)
yticks_mins = numpy.arange(0, df['t'].max() + 120, 120)
yticks_10secs = numpy.arange(0, df['t'].max() + 120, 60)
xticks = numpy.arange(200, 1500, 100)
xticks_minor = numpy.arange(150, 1500, 10)
ax.set_yticks(yticks_mins)
ax.set_yticks(yticks_10secs, minor=True)
ax.set_xticks(xticks)
ax.set_xticks(xticks_minor, minor=True)
# trendline
z = numpy.polyfit(df['nvhcs'], df['t'], 2)
p = numpy.poly1d(z)
nx = range(0, int(df['nvhcs'].max()) + 200)
ax.plot(nx, p(nx), '-.', alpha=0.3, label='Ajuste polinomial', color='#F06449')
# scatter
ax.plot(df100['nvhcs'], df100['t'], 'o', color='#17BEBB', label='Factor de demanda 100%')
ax.plot(df75['nvhcs'], df75['t'], 'o', color='#EF2D56', label='Factor de demanda 75%')
ax.plot(df50['nvhcs'], df50['t'], 'o', color='#8CD867', label='Factor de demanda 50%')
ax.plot(df25['nvhcs'], df25['t'], 'o', color='#2F243A', label='Factor de demanda 25%')
ax.legend(loc='upper left')
pyplot.ylabel('Tiempo (MM:SS)')
formatter = matplotlib.ticker.FuncFormatter(to_min_secs)
ax.yaxis.set_major_formatter(formatter)
pyplot.xlabel('Cantidad promedio vehículos en simulación')
# pyplot.title('Scatterplot: Cantidad promedio de vehículos vs duración en tiempo real de simulación')
# pyplot.savefig('n_vhcs_vs_time.pgf')
# pyplot.show()
tikz_save('n_vhcs_vs_time.tex',
figureheight='\\figureheight',
figurewidth='\\figurewidth')
if __name__ == '__main__':
system_performance()
vehicles_vs_time()
vehicles_vs_time_evolution()
system_io()
| [
"molguin@dcc.uchile.cl"
] | molguin@dcc.uchile.cl |
5f00557a89702a8281ec74a5b343517935a20879 | 7782f56bc9d9d2271263e29c3f747b1169c36881 | /las2dem.py | 9a9e19ff644c1976c1a14b0828df59954c9a1f26 | [
"MIT"
] | permissive | manolaz/map2mesh | 6b1bc500a4179c4ac9df3ea9da51e4790977e44e | 19fa22043af42a8a76187b289ac54c02527eede2 | refs/heads/master | 2022-12-30T05:15:07.145619 | 2020-09-19T16:25:20 | 2020-09-19T16:25:20 | 261,340,219 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 526 | py | from WBT.whitebox_tools import WhiteboxTools
import click
wbt = WhiteboxTools()
wbt.wbt.set_working_dir("./")
@click.command()
@click.option('--sourcefile', prompt='LIDAR Poincloud LAS sourcefile relative path?')
def lidar2dem(sourcefile):
export_file_name = (sourcefile).strip('.las') + '.tif'
wbt.lidar_idw_interpolation(
i=sourcefile,
output=export_file_name,
parameter="elevation",
returns="last",
resolution=1.5,
weight=2.0,
radius=2.5
)
if __name__ == '__main__':
lidar2dem() | [
"trungupdate@gmail.com"
] | trungupdate@gmail.com |
fe7513f7710b742033b78053789207899a5d0c59 | 2d64c6aae362a7b447d19b53dc97a1ca9fdabcec | /insertPosition.py | b65ff003a591ac9816916803ad16898cc212d979 | [] | no_license | rahuljha12527/GeeksForGeeksPyhtomCourse | 867ec6a57115ba7ce949ab00ad31c12b48d0dbc1 | bd15b4e26f0824ddc5b3faa60e0a3d76cd4bf173 | refs/heads/master | 2023-08-16T04:59:09.570075 | 2021-10-09T17:58:44 | 2021-10-09T17:58:44 | 387,244,469 | 0 | 0 | null | 2021-10-09T17:58:45 | 2021-07-18T18:33:14 | Python | UTF-8 | Python | false | false | 686 | py | def insertAtPosition(head,pos,data):
#code here
temp=Node(data)
if pos==0:
#print(temp)
tempp=Node(data)
tempp.next=head
#print(temp)
return tempp
len=0
curr=head
while curr:
curr=curr.next
len=len+1
if pos>len:
return head
curr=head
for i in range(pos-1):
curr=curr.next
temp.next=curr.next
curr.next=temp
return head
def insertAtPosition(head,pos,data):
node=Node(data)
i=1
while head and i<pos:
i+=1
head=head.next
if head:
node.next=head.next
head.next=node | [
"rahuljha12527@bitbucket.org"
] | rahuljha12527@bitbucket.org |
0b7ceb9c5e76556898ae43c7eaaa195f004fd20e | 99a1753c8c7558d6a1c3e66173ad45e791e1ee96 | /Day4/list0104_1.py | 8e6a11d57df98fae7afbd5ca8cb57831277a0e6f | [] | no_license | YunjinJo/PythonProject1 | 16fb7ed2d512cb5c7b12394699f9606da1d33236 | f4cb6ecec410dc766b61214880d48875a83bd267 | refs/heads/master | 2023-06-12T09:07:10.679735 | 2021-07-09T02:24:58 | 2021-07-09T02:24:58 | 380,867,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 695 | py | import tkinter
root = tkinter.Tk()
root.title('맵 데이터')
canvas = tkinter.Canvas(width = 336, height = 240)
canvas.pack()
img = [
tkinter.PhotoImage(file = 'Chapter1/chip0.png'), #풀
tkinter.PhotoImage(file='Chapter1/chip1.png'), #꽃
tkinter.PhotoImage(file='Chapter1/chip2.png'), #나무
tkinter.PhotoImage(file='Chapter1/chip3.png') #바다
]
map_data = [
[0,1,0,2,2,2,2],
[3,0,0,0,2,2,2],
[3,0,0,1,0,0,0],
[3,3,0,0,0,0,1],
[3,3,3,3,0,0,0]
]
for y in range(5): #5행 y=0,1,2,3,4
for x in range(7): #7열 x=0,1,2,...,6
n = map_data[y][x]
canvas.create_image(x*48+24, y*48+24, image = img[n])
root.mainloop() | [
"black44jo@naver.com"
] | black44jo@naver.com |
3a477511b9a1515eb7f809725f01077e69591b5f | 44b912d5d3e396fb8907a86cb47f838196bd015f | /classes/song.py | 287d6b19ec3f58b2ebf4655d40d603912515b199 | [] | no_license | saerx/karaoke_weekend_homework | eff03fde6c7bde07496fabd85e567a0b09c1e7ea | f0d8c55451d18c1dab50b3056536b47559970877 | refs/heads/main | 2023-01-12T19:39:23.798602 | 2020-11-15T15:37:56 | 2020-11-15T15:37:56 | 312,588,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | class Song:
def __init__(self, code_num, title, artist, run_time, year):
self.code_num = code_num
self.title = title
self.artist = artist
self.run_time = run_time
self.year = year
| [
"saerlaith.robyn@gmail.com"
] | saerlaith.robyn@gmail.com |
08d5b19395bcf10b03b2efdd7b18b440b39aacd4 | c9404fdfc44d44ad0308534759a173338c2462ee | /app/routes.py | 6cc3b69b3d28598f7abde033f0508db4f662dfcc | [] | no_license | danielkocot/kitchenmaid | 1c575e97abbe892c180f549f1c94b699c7c0f579 | 6df737cf7d0dae6a8877b23e044a3cb7beeadb0a | refs/heads/master | 2021-05-05T17:19:02.532339 | 2018-01-14T16:34:30 | 2018-01-14T16:34:30 | 117,447,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,300 | py | from flask import render_template, flash, redirect, url_for
from app import app, db
from app.forms import NewForm
from app.models import Grocery
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html')
@app.route('/list')
def list():
groceries = Grocery.query.all()
return render_template('list.html', groceries=groceries)
@app.route('/new', methods=['GET','POST'])
def new():
form = NewForm()
if form.validate_on_submit():
grocery = Grocery(name=form.name.data, stock=form.stock.data, best_before=form.best_before_date.data)
db.session.add(grocery)
db.session.commit()
flash('Congrats, the Grocery is added')
return redirect(url_for('new'))
return render_template('new.html', form=form)
@app.route('/edit/<id>', methods=['GET','POST'])
def edit(id):
pass
@app.route('/delete/<id>', methods=['GET','POST'])
def delete():
pass
@app.route('/add/<id>', methods=['GET','POST'])
def add(id):
grocery = Grocery.query.get(id)
grocery.stock += 1
db.session.commit()
return redirect(url_for('list'))
@app.route('/take/<id>', methods=['GET','POST'])
def take(id):
grocery = Grocery.query.get(id)
grocery.stock -= 1
db.session.commit()
return redirect(url_for('list')) | [
"danielgrycman@icloud.com"
] | danielgrycman@icloud.com |
9235562e15c62f994a5e57b01b43bd583aa0de3d | 1f35d14c9fd229182d86c2c0a7a36b092acc78fd | /11/convert.py | c95fa07a888932cc8e582b1544c40203a6da95b1 | [] | no_license | campoloj/SoftwareDevelopment2016 | b160824b15634bfa2bab91da628cee2ab458862b | c04863a4f01e755988afc5592bfd1f65b4d39a0e | refs/heads/master | 2020-05-23T07:54:10.272046 | 2017-01-30T21:05:29 | 2017-01-30T21:05:29 | 80,461,277 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,215 | py | from dealer.globals import *
from dealer.dealer import Dealer
from dealer.player_state import PlayerState
from dealer.species import Species
from dealer.traitcard import TraitCard
from dealer.action4 import Action4
from dealer.action import *
class Convert(object):
"""
Methods for converting between JSON input and Python objects
"""
def __init__(self):
pass
@classmethod
def json_to_dealer(cls, json_config):
"""
Converts a JSON Configuration into a Dealer object
:param json_config: A JSON Configuration as specified by the data definition at
http://www.ccs.neu.edu/home/matthias/4500-s16/8.html
:return: a Dealer object
"""
assert(len(json_config) == CONFIG_LENGTH)
[json_lop, wh_food, json_deck] = json_config
lop = [cls.json_to_player(json_player) for json_player in json_lop]
deck = [cls.json_to_trait(trait_card) for trait_card in json_deck]
dealer = Dealer(lop, wh_food, deck)
dealer.validate_attributes()
return dealer
@classmethod
def dealer_to_json(cls, dealer):
"""
Converts a Dealer object into a JSON Configuration
:param dealer: a Dealer object
:return: a JSON Configuration as specified by the data definition at
http://www.ccs.neu.edu/home/matthias/4500-s16/8.html
"""
dealer.validate_attributes()
json_players = [cls.player_to_json(player) for player in dealer.list_of_players]
json_deck = [cls.trait_to_json(trait_card) for trait_card in dealer.deck]
return [json_players, dealer.watering_hole, json_deck]
@classmethod
def json_to_step4(cls, json_step4):
"""
Converts a JSON step 4 into a List of Action4 Objects.
:param json_step4: A JSON Step4 as specified by the data definition at
http://www.ccs.neu.edu/home/matthias/4500-s16/11.html
:return: List of Action4
"""
result = []
for json_action4 in json_step4:
result.append(cls.json_to_action4(json_action4))
return result
@classmethod
def json_to_action4(cls, json_action4):
"""
Converts a JSON action into a Action4 Object.
:param json_action4: A JSON Action4 as specified by the data definition at
http://www.ccs.neu.edu/home/matthias/4500-s16/11.html
:return: Action4
"""
food_card_action = FoodCardAction(json_action4[0])
actions = [food_card_action]
list_of_gp = json_action4[1]
list_of_gb = json_action4[2]
list_of_bt = json_action4[3]
list_of_rt = json_action4[4]
actions += (cls.json_to_grow_action(list_of_gp) + cls.json_to_grow_action(list_of_gb) +
cls.json_to_species_action(list_of_bt) + cls.json_to_replace_trait_action(list_of_rt))
return Action4(actions)
@classmethod
def json_to_grow_action(cls, list_of_json_grow):
"""
Converts a List of JSON grow actions to a List of GrowActions
:param list_of_json_grow: a List of GP or GB specified by the data definition at
http://www.ccs.neu.edu/home/matthias/4500-s16/11.html
:return: List of GrowAction
"""
result = []
for json_grow in list_of_json_grow:
result.append(GrowAction(json_grow[0], json_grow[1], json_grow[2]))
return result
@classmethod
def json_to_species_action(cls, list_of_bt):
"""
Converts a List of JSON bt to a List of AddSpeciesActions
:param list_of_bt: a List of BT specified by the data definition at
http://www.ccs.neu.edu/home/matthias/4500-s16/11.html
:return: List of AddSpeciesAction
"""
result = []
for bt in list_of_bt:
traits = []
for i in range(1, len(bt)):
traits.append(bt[i])
result.append(AddSpeciesAction(bt[0], traits))
return result
@classmethod
def json_to_replace_trait_action(cls, list_of_rt):
"""
Converts a List of JSON grow actions to a List of GrowActions
:param list_of_rt:
:return:
"""
result = []
for rt in list_of_rt:
result.append(ReplaceTraitAction(rt[0], rt[1], rt[2]))
return result
@classmethod
def json_to_feeding(cls, json_feeding):
"""
Converts a JSON Feeding into a Python representation of a Feeding
:param json_feeding: a Feeding as specified by the data definition at
http://www.ccs.neu.edu/home/matthias/4500-s16/6.html
:return: [PlayerState, Natural+, [PlayerState,...]] representing the attacking PlayerState,
the available watering hole food, and the PlayerStates of other players in the game
"""
assert(len(json_feeding) == FEEDING_LENGTH)
[json_player, wh_food, json_lop] = json_feeding
assert(wh_food > MIN_WATERING_HOLE)
player = cls.json_to_player(json_player)
other_players = [cls.json_to_player(op) for op in json_lop]
return [player, wh_food, other_players]
@classmethod
def json_to_player(cls, json_player):
"""
Converts a JSON Player+ to a PlayerState
:param json_player: a JSON Player+ as specified by the data definition at
http://www.ccs.neu.edu/home/matthias/4500-s16/8.html
:return: a PlayerState object
"""
gdict = globals()
if len(json_player) == PLAYER_LENGTH:
[[gdict[ID], player_id], [gdict[SPECIES], json_los], [gdict[BAG], food_bag]] = json_player
cards = []
else:
[[gdict[ID], player_id], [gdict[SPECIES], json_los],
[gdict[BAG], food_bag], [gdict[CARDS], cards]] = json_player
player_species = [cls.json_to_species(json_species) for json_species in json_los]
player_hand = [cls.json_to_trait(trait_card) for trait_card in cards]
player_obj = PlayerState(name=player_id, hand=player_hand, food_bag=food_bag, species=player_species)
player_obj.validate_attributes()
return player_obj
@classmethod
def player_to_json(cls, player):
"""
Converts a PlayerState to a JSON Player+. Does not render empty hands.
:param player: a PlayerState object
:return: a JSON Player+ as specified by the data definition at
http://www.ccs.neu.edu/home/matthias/4500-s16/8.html
"""
player.validate_attributes()
json_species = [cls.species_to_json(species_obj) for species_obj in player.species]
json_hand = [cls.trait_to_json(trait_card) for trait_card in player.hand]
json_player = [[ID, player.name], [SPECIES, json_species], [BAG, player.food_bag]]
if json_hand:
json_player.append([CARDS, json_hand])
return json_player
@classmethod
def json_to_species(cls, json_species):
"""
Converts a JSON Species+ into a Species.
:param json_species: a JSON Species+ as specified by the data definition at
http://www.ccs.neu.edu/home/matthias/4500-s16/6.html
:return: a Species object
"""
gdict = globals()
if len(json_species) == SPECIES_LENGTH:
[[gdict[FOOD], species_food], [gdict[BODY], species_body], [gdict[POPULATION], species_pop],
[gdict[TRAITS], json_species_traits]] = json_species
fat_food = False
else:
[[gdict[FOOD], species_food], [gdict[BODY], species_body], [gdict[POPULATION], species_pop],
[gdict[TRAITS], json_species_traits], [gdict[FATFOOD], fat_food]] = json_species
species_traits = [cls.json_to_trait(trait) for trait in json_species_traits]
species_obj = Species(species_pop, species_food, species_body, species_traits, fat_food)
species_obj.validate_attributes()
return species_obj
@classmethod
def species_to_json(cls, species_obj):
"""
Converts a Species object into a JSON Species+. Does not render empty fat-food.
:param species_obj: a Species object
:return: a JSON Species+ as specified by the data definition at
http://www.ccs.neu.edu/home/matthias/4500-s16/6.html
"""
species_obj.validate_attributes()
json_traits = [cls.trait_to_json(trait) for trait in species_obj.traits]
json_species = [[FOOD, species_obj.food], [BODY, species_obj.body],
[POPULATION, species_obj.population], [TRAITS, json_traits]]
if species_obj.fat_storage:
json_species.append([FATFOOD, species_obj.fat_storage])
return json_species
@classmethod
def json_to_trait(cls, json_trait):
"""
Converts a JSON Trait or SpeciesCard into a TraitCard
:param json_trait: a JSON Trait or SpeciesCard as specified by the data definitions at
http://www.ccs.neu.edu/home/matthias/4500-s16/5.html and
http://www.ccs.neu.edu/home/matthias/4500-s16/8.html, respectively.
:return: a TraitCard object
"""
if isinstance(json_trait, basestring):
[food, trait] = [False, json_trait]
else:
[food, trait] = json_trait
trait_card = TraitCard(trait, food)
trait_card.validate_attributes()
return trait_card
@classmethod
def trait_to_json(cls, trait_card):
"""
Converts a TraitCard into a JSON Trait or SpeciesCard
:param trait_card: a TraitCard object
:return: a JSON Trait or SpeciesCard as specified by the data definitions at
http://www.ccs.neu.edu/home/matthias/4500-s16/5.html and
http://www.ccs.neu.edu/home/matthias/4500-s16/8.html, respectively.
"""
trait_card.validate_attributes()
return trait_card.trait if trait_card.food_points is False else [trait_card.food_points, trait_card.trait]
| [
"Sam@SLucyk.local"
] | Sam@SLucyk.local |
291114857f37d3d78566776fdf66822d72158cd2 | b4eb63fa4dea38b102e1f307ffef55741cb799e6 | /testcode.py | 29ef18d1562699267fac49c16e04ae3f5efff386 | [] | no_license | venkpras/unfinished-work | d0a7cabf5a91424a945eba77b84fbce870dd556d | fcb4497289fa08147f4aa862581c2be29e7fb82c | refs/heads/main | 2023-03-18T23:38:16.998868 | 2021-03-15T11:34:47 | 2021-03-15T11:34:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | import sys
sys.path.append(".")
import json
from Organization import Organization
p = Organization()
p.dump_schema()
| [
"venkpras92@gmail.com"
] | venkpras92@gmail.com |
ef6fcee73ceb037040e12f062adc54ae90566b7b | 7e9730526ffd4f4a8784ec43a329744a05ce0b9a | /english_dict.py | 10ec0a301780dc74972d599ba77b313bbc521c80 | [] | no_license | 960314scott/python200820 | 8e64672dbb0c43fd9c280c23defe81f74dec430e | 00ecc0b78ff72cccd7cc5c4486c841663f849985 | refs/heads/master | 2022-12-03T10:59:55.097393 | 2020-08-20T07:34:37 | 2020-08-20T07:34:37 | 288,938,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,339 | py | d = {}
print('歡迎來到英文高手字典。')
while True:
print('1.新增字典')
print('2.列出字典')
print('3.英翻中')
print('4.中翻英')
print('5.測驗')
print('6.離開')
option=input('請輸入選項:')
if option == "6":
break
if option == "1":
while True:
voc = input('請輸入英文單字,退出請輸入0:')
if voc=="0":
break
if voc not in d:
voc_zh = input('請入中文意思')
d[voc]=voc_zh
else:
print('已存在')
elif option == "2":
s = sorted(d)
for i in s:
print(i,':',d[i])
elif option == '3':
get=1
while True:
voc=input("請輸入英文單字,輸入0退出")
if voc == "0" :
break
for k in d.keys():
if voc==k:
print(voc,"的中文是",d[voc])
get=2
if get == 1:
print("無此單字")
elif option == "4":
while True:
get=1
ch=input("請輸入中文,輸入0退出")
if ch=="0":
break
for k,v in d.items():
if ch == v:
print(k,"的中文是",v)
get=2
if get == 1:
print("無此單字")
elif option == "5":
lan=input("請選擇考中文(輸入1)考英文(輸入2)")
score=0
if lan==1:
for k,v in d.items():
print(k)
ans = input('中文?')
if ans == k:
print("正確")
score=score+(100/int(len(d)))
else:
print("錯了請加油")
print("你的分數是",score)
if lan == 2:
for k,v in d.items():
print(v)
ans = input('英文?')
if ans == v:
print("正確")
score=score+(100/int(len(d)))
else:
print("錯了請加油")
print("你的分數是",score)
print('感謝使用') | [
"noreply@github.com"
] | noreply@github.com |
4ad418861639bf2757ac0706d1208fdf8bc7e3df | a73e07ee85f159f18bebea0e755dab2d577d1c54 | /NLP/BERT/bert_naver_movie.py | 6f72644747369f352250ce3feeaa0667160ecaa5 | [] | no_license | bbiyongel/TIL | 469ae2ad30adfaa9f321c9b2f7bc513fed85e33c | 487ceaf7448461a745f532f9e75637e540b7eecc | refs/heads/master | 2021-08-17T04:28:57.050409 | 2021-07-24T20:55:04 | 2021-07-24T20:55:04 | 242,614,208 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 20,451 | py | # -*- coding: utf-8 -*-
"""bert_naver_movie.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1tIf0Ugdqg4qT7gcxia3tL7und64Rv1dP
# **네이버 영화리뷰 감정분석 with Hugging Face BERT**
BERT(Bidirectional Encoder Representations from Transformers)는 구글이 개발한 사전훈련(pre-training) 모델입니다. 위키피디아 같은 텍스트 코퍼스를 사용해서 미리 학습을 하면, 언어의 기본적인 패턴을 이해한 모델이 만들어집니다. 이를 기반으로 새로운 문제에 적용하는 전이학습(transfer learning)을 수행합니다. 좀 더 적은 데이터로 보다 빠르게 학습이 가능하다는 장점이 있습니다. 그래서 최근 자연어처리의 핵심 기법으로 떠오르고 있습니다.
이 예제에서는 한글 NLP의 Hello world라고 할 수 있는 네이버 영화리뷰 감정분석을 구현해보겠습니다. 가장 유명한 모델 중 하나인 Hugging Face의 PyTorch BERT를 사용하였습니다. 아래의 Chris McCormick의 블로그를 참조하여 한글에 맞게 수정하였음을 미리 알려드립니다.
< BERT Fine-Tuning Tutorial with PyTorch ><br>
-> https://mccormickml.com/2019/07/22/BERT-fine-tuning
<br>
<br>
<br>
BERT에 대해서 좀 더 자세한 설명은 박상길님과 Jay Alammar의 블로그를 참조하시기 바랍니다.
< BERT 톺아보기 ><br>
-> http://docs.likejazz.com/bert/
< The Illustrated BERT, ELMo, and co. (How NLP Cracked Transfer Learning) ><br>
-> http://jalammar.github.io/illustrated-bert/
<br>
<br>
<br>
<br>
# **준비 사항**
"""
# Hugging Face의 트랜스포머 모델을 설치
!pip install transformers
import tensorflow as tf
import torch
from transformers import BertTokenizer
from transformers import BertForSequenceClassification, AdamW, BertConfig
from transformers import get_linear_schedule_with_warmup
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
import random
import time
import datetime
"""<br>
<br>
# **데이터 로드**
"""
# 네이버 영화리뷰 감정분석 데이터 다운로드
!git clone https://github.com/e9t/nsmc.git
"""박은정님의 네이버 영화리뷰 감정분석 데이터를 Github에서 다운로드 합니다. 아래와 같이 nsmc 디렉토리에 있는 ratings_train.txt와 ratings_test.txt를 사용하겠습니다.
<br>
<br>
<br>
"""
# 디렉토리의 파일 목록
!ls nsmc -la
# 판다스로 훈련셋과 테스트셋 데이터 로드
train = pd.read_csv("nsmc/ratings_train.txt", sep='\t')
test = pd.read_csv("nsmc/ratings_test.txt", sep='\t')
print(train.shape)
print(test.shape)
"""훈련셋 150,000개와 테스트셋 50,000개의 데이터가 존재합니다.
<br>
<br>
<br>
"""
# 훈련셋의 앞부분 출력
train.head(10)
"""id는 회원정보, document는 리뷰 문장입니다. label이 0이면 부정, 1이면 긍정으로 분류됩니다. id는 사용하지 않기 때문에 document와 label만 추출하겠습니다.
<br>
<br>
# **전처리 - 훈련셋**
"""
# 리뷰 문장 추출
sentences = train['document']
sentences[:10]
# BERT의 입력 형식에 맞게 변환
sentences = ["[CLS] " + str(sentence) + " [SEP]" for sentence in sentences]
sentences[:10]
"""
BERT의 입력은 위의 그림과 같은 형식입니다. Classification을 뜻하는 [CLS] 심볼이 제일 앞에 삽입됩니다. 파인튜닝시 출력에서 이 위치의 값을 사용하여 분류를 합니다. [SEP]은 Seperation을 가리키는데, 두 문장를 구분하는 역할을 합니다. 이 예제에서는 문장이 하나이므로 [SEP]도 하나만 넣습니다.
<br>
<br>
<br>
"""
# 라벨 추출
labels = train['label'].values
labels
# BERT의 토크나이저로 문장을 토큰으로 분리
tokenizer = BertTokenizer.from_pretrained('bert-base-multilingual-cased', do_lower_case=False)
tokenized_texts = [tokenizer.tokenize(sent) for sent in sentences]
print (sentences[0])
print (tokenized_texts[0])
"""BERT는 형태소분석으로 토큰을 분리하지 않습니다. WordPiece라는 통계적인 방식을 사용합니다. 한 단어내에서 자주 나오는 글자들을 붙여서 하나의 토큰으로 만듭니다. 이렇게 하면 언어에 상관없이 토큰을 생성할 수 있다는 장점이 있습니다. 또한 신조어 같이 사전에 없는 단어를 처리하기도 좋습니다.
위의 결과에서 ## 기호는 앞 토큰과 이어진다는 표시입니다. 토크나이저는 여러 언어의 데이터를 기반으로 만든 'bert-base-multilingual-cased'를 사용합니다. 그래서 한글도 처리가 가능합니다.
<br>
<br>
<br>
"""
# 입력 토큰의 최대 시퀀스 길이
MAX_LEN = 128
# 토큰을 숫자 인덱스로 변환
input_ids = [tokenizer.convert_tokens_to_ids(x) for x in tokenized_texts]
# 문장을 MAX_LEN 길이에 맞게 자르고, 모자란 부분을 패딩 0으로 채움
input_ids = pad_sequences(input_ids, maxlen=MAX_LEN, dtype="long", truncating="post", padding="post")
input_ids[0]
"""보통 딥러닝 모델에는 토큰 자체를 입력으로 넣을 수 없습니다. 임베딩 레이어에는 토큰을 숫자로 된 인덱스로 변환하여 사용합니다. BERT의 토크나이저는 {단어토큰:인덱스}로 구성된 단어사전을 가지고 있습니다. 이를 참조하여 토큰을 인덱스로 바꿔줍니다.
<br>
<br>
<br>
"""
# 어텐션 마스크 초기화
attention_masks = []
# 어텐션 마스크를 패딩이 아니면 1, 패딩이면 0으로 설정
# 패딩 부분은 BERT 모델에서 어텐션을 수행하지 않아 속도 향상
for seq in input_ids:
seq_mask = [float(i>0) for i in seq]
attention_masks.append(seq_mask)
print(attention_masks[0])
# 훈련셋과 검증셋으로 분리
train_inputs, validation_inputs, train_labels, validation_labels = train_test_split(input_ids,
labels,
random_state=2018,
test_size=0.1)
# 어텐션 마스크를 훈련셋과 검증셋으로 분리
train_masks, validation_masks, _, _ = train_test_split(attention_masks,
input_ids,
random_state=2018,
test_size=0.1)
# 데이터를 파이토치의 텐서로 변환
train_inputs = torch.tensor(train_inputs)
train_labels = torch.tensor(train_labels)
train_masks = torch.tensor(train_masks)
validation_inputs = torch.tensor(validation_inputs)
validation_labels = torch.tensor(validation_labels)
validation_masks = torch.tensor(validation_masks)
print(train_inputs[0])
print(train_labels[0])
print(train_masks[0])
print(validation_inputs[0])
print(validation_labels[0])
print(validation_masks[0])
# 배치 사이즈
batch_size = 32
# 파이토치의 DataLoader로 입력, 마스크, 라벨을 묶어 데이터 설정
# 학습시 배치 사이즈 만큼 데이터를 가져옴
train_data = TensorDataset(train_inputs, train_masks, train_labels)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)
validation_data = TensorDataset(validation_inputs, validation_masks, validation_labels)
validation_sampler = SequentialSampler(validation_data)
validation_dataloader = DataLoader(validation_data, sampler=validation_sampler, batch_size=batch_size)
"""<br>
<br>
# **전처리 - 테스트셋**
"""
# 리뷰 문장 추출
sentences = test['document']
sentences[:10]
# BERT의 입력 형식에 맞게 변환
sentences = ["[CLS] " + str(sentence) + " [SEP]" for sentence in sentences]
sentences[:10]
# 라벨 추출
labels = test['label'].values
labels
# BERT의 토크나이저로 문장을 토큰으로 분리
tokenizer = BertTokenizer.from_pretrained('bert-base-multilingual-cased', do_lower_case=False)
tokenized_texts = [tokenizer.tokenize(sent) for sent in sentences]
print (sentences[0])
print (tokenized_texts[0])
# 입력 토큰의 최대 시퀀스 길이
MAX_LEN = 128
# 토큰을 숫자 인덱스로 변환
input_ids = [tokenizer.convert_tokens_to_ids(x) for x in tokenized_texts]
# 문장을 MAX_LEN 길이에 맞게 자르고, 모자란 부분을 패딩 0으로 채움
input_ids = pad_sequences(input_ids, maxlen=MAX_LEN, dtype="long", truncating="post", padding="post")
input_ids[0]
# 어텐션 마스크 초기화
attention_masks = []
# 어텐션 마스크를 패딩이 아니면 1, 패딩이면 0으로 설정
# 패딩 부분은 BERT 모델에서 어텐션을 수행하지 않아 속도 향상
for seq in input_ids:
seq_mask = [float(i>0) for i in seq]
attention_masks.append(seq_mask)
print(attention_masks[0])
# 데이터를 파이토치의 텐서로 변환
test_inputs = torch.tensor(input_ids)
test_labels = torch.tensor(labels)
test_masks = torch.tensor(attention_masks)
print(test_inputs[0])
print(test_labels[0])
print(test_masks[0])
# 배치 사이즈
batch_size = 32
# 파이토치의 DataLoader로 입력, 마스크, 라벨을 묶어 데이터 설정
# 학습시 배치 사이즈 만큼 데이터를 가져옴
test_data = TensorDataset(test_inputs, test_masks, test_labels)
test_sampler = RandomSampler(test_data)
test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=batch_size)
"""<br>
<br>
# **모델 생성**
"""
# GPU 디바이스 이름 구함
device_name = tf.test.gpu_device_name()
# GPU 디바이스 이름 검사
if device_name == '/device:GPU:0':
print('Found GPU at: {}'.format(device_name))
else:
raise SystemError('GPU device not found')
# 디바이스 설정
if torch.cuda.is_available():
device = torch.device("cuda")
print('There are %d GPU(s) available.' % torch.cuda.device_count())
print('We will use the GPU:', torch.cuda.get_device_name(0))
else:
device = torch.device("cpu")
print('No GPU available, using the CPU instead.')
# 분류를 위한 BERT 모델 생성
model = BertForSequenceClassification.from_pretrained("bert-base-multilingual-cased", num_labels=2)
model.cuda()
"""
사전훈련된 BERT는 다양한 문제로 전이학습이 가능합니다. 여기서는 위의 그림과 같이 한 문장을 분류하는 방법을 사용합니다. 영화리뷰 문장이 입력으로 들어가면, 긍정/부정으로 구분합니다. 모델의 출력에서 [CLS] 위치인 첫 번째 토큰에 새로운 레이어를 붙여서 파인튜닝을 합니다. Huggning Face는 BertForSequenceClassification() 함수를 제공하기 때문에 쉽게 구현할 수 있습니다.
<br>
<br>
<br>
"""
# 옵티마이저 설정
optimizer = AdamW(model.parameters(),
lr = 2e-5, # 학습률
eps = 1e-8 # 0으로 나누는 것을 방지하기 위한 epsilon 값
)
# 에폭수
epochs = 4
# 총 훈련 스텝 : 배치반복 횟수 * 에폭
total_steps = len(train_dataloader) * epochs
# 학습률을 조금씩 감소시키는 스케줄러 생성
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps = 0,
num_training_steps = total_steps)
"""<br>
<br>
# **모델 학습**
"""
# 정확도 계산 함수
def flat_accuracy(preds, labels):
pred_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat) / len(labels_flat)
# 시간 표시 함수
def format_time(elapsed):
# 반올림
elapsed_rounded = int(round((elapsed)))
# hh:mm:ss으로 형태 변경
return str(datetime.timedelta(seconds=elapsed_rounded))
# 재현을 위해 랜덤시드 고정
seed_val = 42
random.seed(seed_val)
np.random.seed(seed_val)
torch.manual_seed(seed_val)
torch.cuda.manual_seed_all(seed_val)
# 그래디언트 초기화
model.zero_grad()
# 에폭만큼 반복
for epoch_i in range(0, epochs):
# ========================================
# Training
# ========================================
print("")
print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs))
print('Training...')
# 시작 시간 설정
t0 = time.time()
# 로스 초기화
total_loss = 0
# 훈련모드로 변경
model.train()
# 데이터로더에서 배치만큼 반복하여 가져옴
for step, batch in enumerate(train_dataloader):
# 경과 정보 표시
if step % 500 == 0 and not step == 0:
elapsed = format_time(time.time() - t0)
print(' Batch {:>5,} of {:>5,}. Elapsed: {:}.'.format(step, len(train_dataloader), elapsed))
# 배치를 GPU에 넣음
batch = tuple(t.to(device) for t in batch)
# 배치에서 데이터 추출
b_input_ids, b_input_mask, b_labels = batch
# Forward 수행
outputs = model(b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask,
labels=b_labels)
# 로스 구함
loss = outputs[0]
# 총 로스 계산
total_loss += loss.item()
# Backward 수행으로 그래디언트 계산
loss.backward()
# 그래디언트 클리핑
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
# 그래디언트를 통해 가중치 파라미터 업데이트
optimizer.step()
# 스케줄러로 학습률 감소
scheduler.step()
# 그래디언트 초기화
model.zero_grad()
# 평균 로스 계산
avg_train_loss = total_loss / len(train_dataloader)
print("")
print(" Average training loss: {0:.2f}".format(avg_train_loss))
print(" Training epcoh took: {:}".format(format_time(time.time() - t0)))
# ========================================
# Validation
# ========================================
print("")
print("Running Validation...")
#시작 시간 설정
t0 = time.time()
# 평가모드로 변경
model.eval()
# 변수 초기화
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
# 데이터로더에서 배치만큼 반복하여 가져옴
for batch in validation_dataloader:
# 배치를 GPU에 넣음
batch = tuple(t.to(device) for t in batch)
# 배치에서 데이터 추출
b_input_ids, b_input_mask, b_labels = batch
# 그래디언트 계산 안함
with torch.no_grad():
# Forward 수행
outputs = model(b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask)
# 로스 구함
logits = outputs[0]
# CPU로 데이터 이동
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
# 출력 로짓과 라벨을 비교하여 정확도 계산
tmp_eval_accuracy = flat_accuracy(logits, label_ids)
eval_accuracy += tmp_eval_accuracy
nb_eval_steps += 1
print(" Accuracy: {0:.2f}".format(eval_accuracy/nb_eval_steps))
print(" Validation took: {:}".format(format_time(time.time() - t0)))
print("")
print("Training complete!")
"""에폭마다 훈련셋과 검증셋을 반복하여 학습을 수행합니다.
<br>
<br>
# **테스트셋 평가**
"""
#시작 시간 설정
t0 = time.time()
# 평가모드로 변경
model.eval()
# 변수 초기화
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
# 데이터로더에서 배치만큼 반복하여 가져옴
for step, batch in enumerate(test_dataloader):
# 경과 정보 표시
if step % 100 == 0 and not step == 0:
elapsed = format_time(time.time() - t0)
print(' Batch {:>5,} of {:>5,}. Elapsed: {:}.'.format(step, len(test_dataloader), elapsed))
# 배치를 GPU에 넣음
batch = tuple(t.to(device) for t in batch)
# 배치에서 데이터 추출
b_input_ids, b_input_mask, b_labels = batch
# 그래디언트 계산 안함
with torch.no_grad():
# Forward 수행
outputs = model(b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask)
# 로스 구함
logits = outputs[0]
# CPU로 데이터 이동
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
# 출력 로짓과 라벨을 비교하여 정확도 계산
tmp_eval_accuracy = flat_accuracy(logits, label_ids)
eval_accuracy += tmp_eval_accuracy
nb_eval_steps += 1
print("")
print("Accuracy: {0:.2f}".format(eval_accuracy/nb_eval_steps))
print("Test took: {:}".format(format_time(time.time() - t0)))
"""테스트셋의 정확도가 87%입니다. <BERT 톺아보기> 블로그에서는 같은 데이터로 88.7%를 달성하였습니다. 거기서는 한글 코퍼스로 사전훈련을 하여 새로운 모델을 만들었습니다. 반면에 우리는 BERT의 기본 모델인 bert-base-multilingual-cased를 사용했기 때문에 더 성능이 낮은 것 같습니다.
<br>
<br>
# **새로운 문장 테스트**
"""
# 입력 데이터 변환
def convert_input_data(sentences):
# BERT의 토크나이저로 문장을 토큰으로 분리
tokenized_texts = [tokenizer.tokenize(sent) for sent in sentences]
# 입력 토큰의 최대 시퀀스 길이
MAX_LEN = 128
# 토큰을 숫자 인덱스로 변환
input_ids = [tokenizer.convert_tokens_to_ids(x) for x in tokenized_texts]
# 문장을 MAX_LEN 길이에 맞게 자르고, 모자란 부분을 패딩 0으로 채움
input_ids = pad_sequences(input_ids, maxlen=MAX_LEN, dtype="long", truncating="post", padding="post")
# 어텐션 마스크 초기화
attention_masks = []
# 어텐션 마스크를 패딩이 아니면 1, 패딩이면 0으로 설정
# 패딩 부분은 BERT 모델에서 어텐션을 수행하지 않아 속도 향상
for seq in input_ids:
seq_mask = [float(i>0) for i in seq]
attention_masks.append(seq_mask)
# 데이터를 파이토치의 텐서로 변환
inputs = torch.tensor(input_ids)
masks = torch.tensor(attention_masks)
return inputs, masks
# 문장 테스트
def test_sentences(sentences):
# 평가모드로 변경
model.eval()
# 문장을 입력 데이터로 변환
inputs, masks = convert_input_data(sentences)
# 데이터를 GPU에 넣음
b_input_ids = inputs.to(device)
b_input_mask = masks.to(device)
# 그래디언트 계산 안함
with torch.no_grad():
# Forward 수행
outputs = model(b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask)
# 로스 구함
logits = outputs[0]
# CPU로 데이터 이동
logits = logits.detach().cpu().numpy()
return logits
logits = test_sentences(['연기는 별로지만 재미 하나는 끝내줌!'])
print(logits)
print(np.argmax(logits))
logits = test_sentences(['주연배우가 아깝다. 총체적 난국...'])
print(logits)
print(np.argmax(logits))
"""학습한 모델을 가지고 실제 문장을 넣어봤습니다. 출력 로짓은 소프트맥스가 적용되지 않은 상태입니다. argmax로 더 높은 값의 위치를 라벨로 설정하면 됩니다. 0은 부정, 1은 긍정입니다. 위와 같이 새로운 문장에도 잘 분류를 하고 있습니다.
<br>
<br>
<br>
< 챗봇 개발자 모임 ><br>
- 페이스북 그룹에 가입하시면 챗봇에 대한 최신 정보를 쉽게 받으실 수 있습니다.
- https://www.facebook.com/groups/ChatbotDevKR/
""" | [
"bbiyongel@gmail.com"
] | bbiyongel@gmail.com |
a5a93d8c75a41abbea1d7fc918fc011fa840eb02 | 6a7e2dd291d8dcc5a7fcf146c558ae240b2afca4 | /Daily Flash/week 2/day 5/MySolutions/Python/Program2.py | 96010e780db3f7514f6d3a89290d3b141bb27787 | [] | no_license | sanvedj/MayuriC2WT | f35579ad56fa266bbedeea1c7e9c4c448f445c35 | 029eb05a95f9b5958d627c253583c40b80db2adc | refs/heads/master | 2021-01-03T12:14:13.804290 | 2020-02-12T18:05:19 | 2020-02-12T18:05:19 | 240,080,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 367 | py |
print("\nNumbers ranging between 1 to 100 that are divisible by 4 and 7 : \n")
for i in range(1,101):
if(i%4==0 and i%7==0):
print(i,end=" ")
print()
'''
Output :
(base) mayuri@mayuri-PC:~/C2WT/Daily Flash/week 2/day 5/MySolutions/Python$ python3 Program2.py
Numbers ranging between 1 to 100 that are divisible by 4 and 7 :
28 56 84
'''
| [
"sanvednjoshi007@gmail.com"
] | sanvednjoshi007@gmail.com |
cfd612d29ddd306d4cfbfe6a4835fb72965fa868 | 25afe2f2ea7d4d7a6582d33548164a49a11bcf4c | /start_gui_v2.py | 4297c1a30013dfa5d012ad096dbe99d2c2e6cf85 | [] | no_license | hibbertm71030/02_mystery_box | e0b91cc4f04a3f80b9ac9b21565deba1cc68dcef | 02190e94cd90fd3668f2845c66a7291c0322a364 | refs/heads/master | 2023-06-21T00:39:25.833648 | 2021-07-30T00:30:40 | 2021-07-30T00:30:40 | 377,346,523 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,971 | py | from tkinter import *
from functools import partial
import random
# initial dialogue, asks user for money and stakes
class Start:
def __init__(self, parent):
# gui to get starting balance and stakes
self.start_frame = Frame(padx=10, pady=10)
self.start_frame.grid()
# set initial value to 0
self.starting_funds = IntVar()
self.starting_funds.set(0)
# mystery heading (row 0)
self.mystery_box_label = Label(self.start_frame, text="Mystery Box Game",
font="Arial 19 bold")
self.mystery_box_label.grid(row=0)
# initial instructions (row 1)
self.mystery_instructions = Label(self.start_frame, font="arial 10 italic",
text="please enter a dollar amount (between $5 and $50 in the box below)."
"then choose the stakes. The higher the stakes the more you can win",
wrap=275, justify=LEFT, padx=10, pady=10)
self.mystery_instructions.grid(row=1)
# entry box and error label (row 2)
self.entry_error_frame = Frame(self.start_frame, width=200)
self.entry_error_frame.grid(row=2)
self.start_amount_entry = Entry(self.entry_error_frame, font="Arial 19 bold", width=10)
self.start_amount_entry.grid(row=0, column=0)
self.add_funds_button = Button(self.entry_error_frame, font="Arial 14 bold",
text="Add Funds", command=self.check_funds)
self.add_funds_button.grid(row=0, column=1)
self.amount_error_label = Label(self.entry_error_frame, text="", fg="maroon",
font="arial 10 bold", wrap=275, justify=LEFT)
self.amount_error_label.grid(row=1, columnspan=2, pady=5)
# button frame
self.stakes_frame = Frame(self.start_frame)
self.stakes_frame.grid(row=3)
# buttons go here
button_font = "Arial 12 bold"
# orange low stakes button
self.low_stakes_button = Button(self.stakes_frame, text="Low ($5)",
command=lambda: self.to_game(1),
font=button_font, bg="#FF9933")
self.low_stakes_button.grid(row=0, column=0, pady=10)
# yellow medium stakes button
self.medium_stakes_button = Button(self.stakes_frame, text="Medium ($10)",
command=lambda:self.to_game(2),
font=button_font, bg="#FFFF33")
self.medium_stakes_button.grid(row=0, column=1, padx=5, pady=10)
# green high stakes button
self.high_stakes_button = Button(self.stakes_frame, text="High ($15)",
command=lambda: self.to_game(3),
font=button_font, bg="#09FF33")
self.high_stakes_button.grid(row=0, column=2, pady=10)
# disable all stakes buttons
self.low_stakes_button.config(state=DISABLED)
self.medium_stakes_button.config(state=DISABLED)
self.high_stakes_button.config(state=DISABLED)
# help button
self.help_button = Button(self.start_frame, text="how to play", bg="#808080", fg="white",
font=button_font)
self.help_button.grid(row=4, pady=10)
def check_funds(self):
starting_balance = self.start_amount_entry.get()
# set error background colours (and assume that there are no errors at start
error_back = "#ffafaf"
has_errors = "no"
# change background to white(testing)
self.start_amount_entry.config(bg="white")
self.amount_error_label.config(text="")
#disable all stakes buttons in case user changes mind and decreases amount entered
# disable all stakes buttons
self.low_stakes_button.config(state=DISABLED)
self.medium_stakes_button.config(state=DISABLED)
self.high_stakes_button.config(state=DISABLED)
try:
starting_balance = int(starting_balance)
if starting_balance < 5:
has_errors = "yes"
error_feedback = "sorry, the least you can play with is $5"
elif starting_balance > 50:
has_errors = "yes"
error_feedback = "Too high, the most you play with is $50"
elif starting_balance >= 15:
# enable all buttons
self.low_stakes_button.config(state=NORMAL)
self.medium_stakes_button.config(state=NORMAL)
self.high_stakes_button.config(state=NORMAL)
elif starting_balance >= 10:
# enable low and medium stake buttons
self.low_stakes_button.config(state=NORMAL)
self.medium_stakes_button.config(state=NORMAL)
else:
self.low_stakes_button.config(state=NORMAL)
except ValueError:
has_errors = "yes"
error_feedback = "please enter a $ amount (no text / decimals)"
if has_errors == "yes":
self.start_amount_entry.config(bg=error_back)
self.amount_error_label.config(text=error_feedback)
else:
# set starting balance to amount entered by user
self.starting_funds.set(starting_balance)
def to_game(self, stakes):
# retrieve starting balance
starting_balance = self.starting_funds.get()
Game(self, stakes, starting_balance)
# hide startup window
root.withdraw()
class Game:
def __init__(self, partner, stakes, starting_balance):
print(stakes)
print(starting_balance)
# main routine
if __name__ == "__main__":
root = Tk()
root.title("Mystery Box")
something = Start(root)
root.mainloop() | [
"60630392+hibbertm71030@users.noreply.github.com"
] | 60630392+hibbertm71030@users.noreply.github.com |
2ef11f6cdbf8403c0d448a2d67022c40b83c6620 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=3.5_rd=0.8_rw=0.06_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=60/sched.py | 48c7bde40359bfb09d24223c9a5ccb1161b938c2 | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | -X FMLP -Q 0 -L 3 104 400
-X FMLP -Q 0 -L 3 78 300
-X FMLP -Q 0 -L 3 69 300
-X FMLP -Q 0 -L 3 64 300
-X FMLP -Q 1 -L 2 62 250
-X FMLP -Q 1 -L 2 54 175
-X FMLP -Q 1 -L 2 49 300
-X FMLP -Q 2 -L 2 49 150
-X FMLP -Q 2 -L 2 43 150
-X FMLP -Q 2 -L 2 37 250
-X FMLP -Q 3 -L 1 35 250
-X FMLP -Q 3 -L 1 34 100
-X FMLP -Q 3 -L 1 30 150
22 100
21 200
8 175
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
7090cad3d345836cdd617571aba4f8dc0ed2c5e2 | 4e3a302c492018b15c06bdbc582ccce4ade51d48 | /Hotal/wsgi.py | c4d169262d82cc90b0d00af7cf96774213fab162 | [] | no_license | RmdGroups/Hotal | 8e2055562e36bb911585d978dcd0b1e0c75dc7dc | 84f78e3a4310ae744d825475415667695d70b208 | refs/heads/master | 2022-07-02T21:54:55.780628 | 2020-05-10T10:36:15 | 2020-05-10T10:36:15 | 262,763,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | """
WSGI config for Hotal project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Hotal.settings')
application = get_wsgi_application()
| [
"rmdgroup257@gmail.com"
] | rmdgroup257@gmail.com |
5309fa1188f170b8efbe4b43b64fe524a1b8e1e9 | db8ab70de135d8bddc2c6df865b98ed76c2b92ee | /model/toxic_comment_classifier.py | f0bf2a9509d6372736d134cd7b3551e2797e332d | [] | no_license | boyuan12/ToxicBlockPlus | 718af4970f27e9eba9c454268a75c53c007f7737 | f90a46b9748a8d4dcdfc9e8c19279cc6aeed46c5 | refs/heads/main | 2023-02-26T21:20:56.878995 | 2021-02-09T01:15:34 | 2021-02-09T01:15:34 | 335,865,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,278 | py | from typing import List
from bentoml import api, artifacts, env, BentoService
from bentoml.frameworks.keras import KerasModelArtifact
from bentoml.service.artifacts.common import PickleArtifact
from bentoml.adapters import DataframeInput, JsonOutput
from keras.preprocessing import text, sequence
import numpy as np
import pandas as pd
list_of_classes = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"]
max_text_length = 400
@env(pip_packages=['tensorflow==1.14.0', 'keras==2.3.1', 'pandas', 'numpy'])
@artifacts([PickleArtifact('x_tokenizer'), KerasModelArtifact('model')])
class ToxicCommentClassification(BentoService):
def tokenize_df(self, df):
comments = df['comment_text'].values
tokenized = self.artifacts.x_tokenizer.texts_to_sequences(comments)
input_data = sequence.pad_sequences(tokenized, maxlen=max_text_length)
return input_data
@api(input=DataframeInput(), output=JsonOutput(), batch=True)
def predict(self, df: pd.DataFrame) -> List[str]:
input_data = self.tokenize_df(df)
prediction = self.artifacts.model.predict(input_data)
result = []
for i in prediction:
result.append(list_of_classes[np.argmax(i)])
return result | [
"boyuanliu6@yahoo.com"
] | boyuanliu6@yahoo.com |
ec1b320c98c0e05566a88dde52772385140c407c | 3d09a087f2a41d24df4b5821fac1d38140bb984d | /hw4/radial_spline.py | f337224ab9d2e135eb96e92408d575d9ead188a0 | [] | no_license | andresoro/Numerical-Analysis | 5a31f7f4c7f0afa8ae584715bc6cb3d8595b07b4 | fdb68fbc4e65c4dbfd8b39def99f3f41dd0ad56b | refs/heads/master | 2021-01-25T09:52:32.992802 | 2018-05-01T15:11:03 | 2018-05-01T15:11:03 | 123,322,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,034 | py | # Compare and contrast natural cubic spline vs radial basis function interpolation
from scipy.interpolate import Rbf as radial
from scipy.interpolate import CubicSpline as cubic
import numpy as np
from matplotlib import pyplot as plt
# The funciton compare plots the cubic and raidal interpolation methods
# for a given function f over a evenly spaced interval n
def compare(func, n, p):
domain = np.linspace(0, n, p)
domain2 = np.linspace(0, n, p*10)
mapping = [func(i) for i in domain]
e = (n/p)*(1/10)
r = radial(domain, mapping, function='gaussian', episolon=e)
c = cubic(domain, mapping, bc_type='natural')
plt.plot(domain2, func(domain2), 'g', label="Actual")
plt.plot(domain2, r(domain2), 'b', label="Radial")
#plt.plot(domain2, c(domain2), 'r', label="Cubic")
plt.legend(loc='lower left')
plt.show()
def f(x):
if isinstance(x, list):
for i in x:
return [2*np.sin(i) for i in x]
else:
return 2*np.sin(x)
compare(f, np.pi, 10) | [
"ao15@my.fsu.edu"
] | ao15@my.fsu.edu |
ff3bdb24aacb03d36b3c3e500bc0329ae28e5735 | 87f7de83e8d1a536a06b5c39b4b1aaa1e95f7b9e | /examples/full-example/app/models/auth.py | 7f3cf361301c7c19d36483bb72b1ff5a68ac72b2 | [
"MIT"
] | permissive | MushroomMaula/fastapi_login | 048a68ac02989e35b0ae880671a2589ca7047447 | eb50ed4f528c28d15816166634d7ba3024484f84 | refs/heads/master | 2023-08-31T09:40:06.603049 | 2023-07-27T11:36:00 | 2023-07-27T11:36:00 | 216,593,146 | 553 | 62 | MIT | 2023-08-24T11:37:53 | 2019-10-21T14:47:00 | Python | UTF-8 | Python | false | false | 110 | py | from pydantic import BaseModel
class Token(BaseModel):
access_token: str
token_type: str = "bearer"
| [
"maxrd79@gmail.com"
] | maxrd79@gmail.com |
7b56d48d91936f9449bf341f94482ffc72672cfe | bf1bc5bfba2d8b50b7663580dfddd71da7dcab61 | /codingame/medium/war.py | 1cf869425aaef6dbf590762c1e72d410a3316434 | [] | no_license | kpbochenek/algorithms | 459a4377def13d5d45e3e0becf489c9edd033a5e | f39d50fe6cd3798a8a77a14b33eecc45412927fa | refs/heads/master | 2021-05-02T09:35:54.077621 | 2016-12-27T18:45:13 | 2016-12-27T18:45:13 | 38,165,785 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,298 | py | import sys, math
from collections import deque
def convert(x):
if x.startswith("10"): return "10"
if x[0] == 'J': return "11"
if x[0] == 'Q': return "12"
if x[0] == 'K': return "13"
if x[0] == 'A': return "14"
return x[0]
n = int(input()) # the number of cards for player 1
qp1 = deque([int(convert(input())) for i in range(n)])
m = int(input()) # the number of cards for player 2
qp2 = deque([int(convert(input())) for i in range(m)])
rounds = 0
left = []
right = []
try:
while qp1 and qp2:
p1 = qp1.popleft()
p2 = qp2.popleft()
left.append(p1)
right.append(p2)
print("FIGHT {} {}".format(p1, p2), file=sys.stderr)
if p1 < p2:
qp2.extend(left)
qp2.extend(right)
left, right = [], []
elif p1 > p2:
qp1.extend(left)
qp1.extend(right)
left, right = [], []
else:
for i in range(3):
left.append(qp1.popleft())
right.append(qp2.popleft())
rounds -= 1
rounds += 1
except IndexError:
print("PAT")
else:
if not qp1:
print("2 {}".format(rounds))
elif not qp2:
print("1 {}".format(rounds))
# To debug: print("Debug messages...", file=sys.stderr)
| [
"kpbochenek@gmail.com"
] | kpbochenek@gmail.com |
396bc614118dafa6269fb5a5634a91908fa560f2 | 9f3ab101aaec1e94f61666a1639b5dd4ee8a9368 | /backend/exams/migrations/0006_auto_20210416_1551.py | 062845f7e533c0162cfdab275d04898d902939a7 | [] | no_license | fmcruz01/HSaoQuadrado | 6d6044b2bdbeccf762c96da4859e47d62a8e7c77 | fe592fb97cb193769e98afc9f200cb9c9d033dcb | refs/heads/main | 2023-07-29T11:40:42.313243 | 2021-09-10T19:04:56 | 2021-09-10T19:04:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | # Generated by Django 3.1.7 on 2021-04-16 14:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('exams', '0005_auto_20210416_1549'),
]
operations = [
migrations.AlterField(
model_name='question',
name='wrongAnswer',
field=models.ManyToManyField(related_name='wrongAnswer', to='exams.Answer'),
),
]
| [
"noreply@github.com"
] | noreply@github.com |
434bfb4f4cc27692073954c84c66e1218f428b56 | af47e1dd1405ebd5267e7f8cf22f7b4429fcef00 | /scattertext/termscoring/g2.py | 7ba2d01a83690dfca816ad03c8a93d0365684bad | [
"MIT",
"CC-BY-NC-SA-4.0",
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] | permissive | JasonKessler/scattertext | 72ce3b35d71af595f7797de845ba93b4bb0091b4 | b41e3a875faf6dd886e49e524345202432db1b21 | refs/heads/master | 2023-05-11T06:42:51.108527 | 2023-05-06T19:23:59 | 2023-05-06T19:23:59 | 63,827,736 | 2,187 | 303 | Apache-2.0 | 2023-05-06T19:24:00 | 2016-07-21T01:47:12 | Python | UTF-8 | Python | false | false | 4,143 | py | import numpy as np
import pandas as pd
from scipy.stats import chi2
from statsmodels.stats.multitest import fdrcorrection
from scattertext.termscoring.CorpusBasedTermScorer import CorpusBasedTermScorer
def g2_term(O, E):
res = O.astype(np.float64) * (np.log(O) - np.log(E))
res[O == 0] = 0
return res
def sign(a: np.array) -> np.array:
return np.nan_to_num(a / np.abs(a), 0)
def qchisq(alpha: np.array, df: int) -> np.array:
return chi2.ppf(1 - alpha, df=df) # qchisq(alpha, df=1, lower.tail=FALSE)
class G2(CorpusBasedTermScorer):
"""
G^2 (log likelihood ratio)s from (Rayson and Garside 2000)
A direct translation of the R function from (Evert 2023)
Stephanie Evert. 2023. Measuring Keyness. https://osf.io/x8z9n.
G2.term <- function (O, E) {
res <- O * log(O / E)
res[O == 0] <- 0
res
}
G2 <- function (f1, f2, N1, N2, alpha=NULL, correct=TRUE) {
stopifnot(length(f1) == length(f2))
## observed and expected contingency tables
N <- N1 + N2
R1 <- f1 + f2
O11 <- f1; E11 <- R1 * N1 / N
O12 <- f2; E12 <- R1 * N2 / N
O21 <- N1 - f1; E21 <- N1 - E11
O22 <- N2 - f2; E22 <- N2 - E12
## log-likelihood statistic (simplest formula)
G2 <- 2 * (G2.term(O11, E11) + G2.term(O12, E12) + G2.term(O21, E21) + G2.term(O22, E22))
res <- sign(O11 - E11) * G2 # set sign to distinguish positive vs. negative keywords
## weed out non-significant items if alpha is specified
if (!is.null(alpha)) {
if (correct) alpha <- alpha / length(f1)
theta <- qchisq(alpha, df=1, lower.tail=FALSE)
res[G2 < theta] <- 0 # set to 0 if not significant at level alpha
}
res
}
"""
def _set_scorer_args(self, **kwargs):
self.alpha_ = kwargs.get('alpha', None)
self.correct_ = kwargs.get('correct', True)
def get_score_df(self, label_append=''):
N1, N2, f1, f2 = self._get_ns_and_fs(())
gsquare, res = self._get_g2_and_res(N1, N2, f1, f2)
df = pd.DataFrame({
'G2': gsquare,
'Score': res,
'P': chi2.sf(gsquare, df=1),
})
return df.assign(
CorrectedP = lambda df: fdrcorrection(pvals=df.P.values, alpha=0.05, method='indep')[1]
)
def get_scores(self, *args) -> pd.Series:
N1, N2, f1, f2 = self._get_ns_and_fs(args)
gsquare, res = self._get_g2_and_res(N1, N2, f1, f2)
## weed out non-significant items if alpha is specified
if self.alpha_ is not None:
alpha = self.alpha_
if self.correct_:
alpha = alpha / len(f1)
theta = qchisq(alpha, df=1)
res[gsquare < theta] = 0 # set to 0 if not significant at level alpha
return pd.Series(res, index=self._get_terms())
def _get_g2_and_res(self, N1, N2, f1, f2):
N = N1 + N2
R1 = f1 + f2
E11, E12, E21, E22, O11, O12, O21, O22 = self.__get_contingency_table(N, N1, N2, R1, f1, f2)
## log-likelihood statistic (simplest formula)
gsquare = 2 * (g2_term(O11, E11) + g2_term(O12, E12) + g2_term(O21, E21) + g2_term(O22, E22))
res = sign(O11 - E11) * gsquare # set sign to distinguish positive vs. negative keywords
return gsquare, res
def __get_contingency_table(self, N, N1, N2, R1, f1, f2):
O11 = f1
E11 = R1 * N1 / N
O12 = f2
E12 = R1 * N2 / N
O21 = N1 - f1
E21 = N1 - E11
O22 = N2 - f2
E22 = N2 - E12
return E11, E12, E21, E22, O11, O12, O21, O22
def _get_ns_and_fs(self, args):
cat_X, ncat_X = self._get_cat_and_ncat(self._get_X())
N1 = self._get_cat_size()
N2 = self._get_ncat_size()
if len(args) == 0:
f1 = cat_X.sum(axis=0).A1
f2 = ncat_X.sum(axis=0).A1
else:
f1, f2 = self.__get_f1_f2_from_args(args)
f1 = np.array(f1).astype(np.float64)
f2 = np.array(f2).astype(np.float64)
return N1, N2, f1, f2
def get_name(self):
return 'G2'
| [
"JasonKessler@users.noreply.github.com"
] | JasonKessler@users.noreply.github.com |
2b0b60fa7dc054457ed41a697b42028cc176aa8f | e10a6d844a286db26ef56469e31dc8488a8c6f0e | /ml_debiaser/randomized_threshold.py | 11e80f5069ecc7299769e3dd9478b15431da9356 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | Jimmy-INL/google-research | 54ad5551f97977f01297abddbfc8a99a7900b791 | 5573d9c5822f4e866b6692769963ae819cb3f10d | refs/heads/master | 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 | Apache-2.0 | 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null | UTF-8 | Python | false | false | 10,142 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for debiasing ML models."""
import math
from typing import Optional
import numpy as np
class RandomizedThreshold:
"""Threshold optimizer (RTO) to debias models via postprocessing.
See: https://arxiv.org/abs/2106.12887.
This is a solver to the following optimiation problem:
minimize gamma/2 ||x||^2 - y^Tx
s.t. x satisfying DP constraints with tolerance eps and parameter rho.
There are no assumptions about y in this code but, in general, y should be the
predictions of the original classifier.
"""
def __init__(self, gamma: float = 1.0,
eps: float = 0.0,
rho: Optional[float] = None
) -> None:
"""Instantiate object.
Args:
gamma: The regularization parameter gamma (for randomization). Set this to
1 if the goal is to minmize l2 difference from the original scores.
eps: Tolerance parameter for bias between 0 and 1 inclusive.
rho: The rho parameter in the post-hoc rule. If None, rho = E[y].
"""
if eps < 0:
raise ValueError('eps must be non-negative.')
if gamma <= 0:
raise ValueError('gamma must be a strictly positive number.')
if rho is not None and rho <= 0:
raise ValueError('rho must be either None or a strictly positive number.')
self.num_groups = 1
self.gamma = gamma
self.eps = eps
self.rho = rho
self.avrg_y_score = 0
# model paramters (Lagrange dual variables)
self.lambdas = []
self.mus = []
def fit(self,
y_orig: np.ndarray,
group_feature: np.ndarray,
sgd_steps: int = 10_000,
full_gradient_epochs: int = 1_000,
verbose: bool = True,
batch_size: int = 256,
ignore_warnings: bool = False
) -> None:
"""Debias predictions w.r.t. the sensitive class in each demographic group.
IMPORTANT: If this is used for postprocessing a classifier,
the scores y_orig need to be rescaled linearly to [-1, +1].
Training proceeds in two rounds. First is SGD. Second is full gradient
descent. Full gradient descent is recommended when debiasing deep neural
nets because the scores are concentrated around the extremes
so high preciseion might be needed. Because the loss is smooth, lr
in full gradient method does not need tuning. It can be set to gamma / 2.0.
Args:
y_orig: A vector of the original probability scores. If this is used for
debiasing binary classifiers, y_orig = 2 * p(y=1) - 1.
group_feature: An array containing the group id of each instance starting
from group 0 to group K-1.
sgd_steps: Number of minibatch steps in SGD.
full_gradient_epochs: Number of epochs in full gradient descent phase.
verbose: Set to True to display progress.
batch_size: Size of minibatches in SGD.
ignore_warnings: Set to True to suppress warnings.
"""
if min(y_orig) >= 0: # use this to catch a common bug
self.yscale = 'positive'
else:
self.yscale = 'negative'
y_orig = np.array(y_orig)
num_groups = len(set(group_feature)) # number of demographic groups
# warnings against common bugs/errors
if (min(y_orig) < -1 or max(y_orig) > 1) and not ignore_warnings:
print('Warning: the scores y_orig are not in the range [-1, +1]. '
'To suppress this message, set ignore_warnings=True.')
if self.yscale == 'positive' and not ignore_warnings:
print('Warning: if this is for postprocessing a binary classifier, '
'the scores need to be rescaled to [-1, +1]. To suppress this '
'message, set ignore_warnings=True.')
# assert that group_feature is of the right form and no group is empty
if min(group_feature) != 0 or (max(group_feature) != num_groups - 1):
raise ValueError('group_feature should be in {0, 1, .. K-1} where '
'K is the nubmer of groups. Some groups are missing.')
self.num_groups = num_groups
eps0 = self.eps / 2.0
gamma = self.gamma
# Store group membership ids in a dictionary.
xk_groups = {k: [] for k in range(num_groups)}
for i in range(len(group_feature)):
xk_groups[group_feature[i]].append(i)
self.avrg_y_score = float(sum(y_orig))/len(y_orig)
if self.rho is None: # by default: self.rho = E[y] in [0, 1] not [-1, 1]
if self.yscale == 'positive':
self.rho = self.avrg_y_score
else:
self.rho = self.avrg_y_score / 2.0 + 0.5
# The parameters we optimize in the algorithm are lambdas and mus.
# lambdas_final and mus_final are running averages (final output).
lambdas = np.zeros((num_groups,))
mus = np.zeros((num_groups,))
lambdas_final = np.zeros_like(lambdas) # running averages
mus_final = np.zeros_like(mus) # running averages
# SGD is carried out in each group separately due to decomposition of the
# optimization problem.
num_samples_sgd = sgd_steps * batch_size
lr = gamma * math.sqrt(1.0 / num_samples_sgd)
# Begin the projected SGD phase.
if verbose:
print('SGD phase started:')
for k in range(num_groups):
if verbose:
print('Group %d.\t\t%02d%%'%(k, int(100*k/num_groups)), end='\r')
idx = np.array(list(xk_groups[k])) # instance IDs in group k
group_size = len(idx)
for _ in range(sgd_steps):
# random.randint is 10x faster than random.choice.
batch_ids = np.random.randint(0, group_size, batch_size)
batch_ids = idx[batch_ids]
# The code below is a faster implementation of:
# xi_arg = y_orig[batch_ids] - (lambdas[k] - mus[k])
# xi_gradient = xi_arg/gamma
# xi_gradient = np.maximum(xi_gradient, 0.)
# xi_gradient = np.minimum(xi_gradient, 1.)
lambda_minus_mu = lambdas[k] - mus[k]
xi_arg = np.maximum(y_orig[batch_ids], lambda_minus_mu)
xi_arg = np.minimum(xi_arg, gamma + lambda_minus_mu)
mean_xi = (np.mean(xi_arg) - lambda_minus_mu) / gamma
lambda_gradient = eps0 + self.rho - mean_xi
mu_gradient = eps0 - self.rho + mean_xi
# stochastic gradient descent
if eps0 > 1e-3:
lambdas[k] = max(0, lambdas[k] - lr * batch_size * lambda_gradient)
mus[k] = max(0, mus[k] - lr * batch_size * mu_gradient)
else:
# If self.eps=0, we can drop mus and optimize lambdas only but
# lambdas will not be constrained to be non-negative in this case.
lambdas[k] = lambdas[k] - lr * batch_size * lambda_gradient
# lambdas_final and mus_final are running averages.
lambdas_final[k] += lambdas[k] / sgd_steps
mus_final[k] += mus[k] / sgd_steps
# Now switch to full gradient descent.
# Because the objective is smooth, lr = gamma / 2 works.
if verbose and full_gradient_epochs:
print('\nFull gradient descent phase started:')
for k in range(num_groups):
if verbose:
print('Group {}.'.format(k))
idx = np.array(list(xk_groups[k]))
for _ in range(full_gradient_epochs):
lambda_minus_mu = lambdas_final[k] - mus_final[k]
xi_arg = np.maximum(y_orig[idx], lambda_minus_mu)
xi_arg = np.minimum(xi_arg, gamma + lambda_minus_mu)
mean_xi = (np.mean(xi_arg) - lambda_minus_mu) / gamma
full_grad_lambda = eps0 + self.rho - mean_xi
full_grad_mu = eps0 - self.rho + mean_xi
if eps0 > 1e-3:
lambdas_final[k] = max(0,
lambdas_final[k] - 0.5*gamma*full_grad_lambda)
mus_final[k] = max(0, mus_final[k] - 0.5*gamma*full_grad_mu)
else:
lambdas_final[k] = lambdas_final[k] - 0.5*gamma*full_grad_lambda
self.lambdas = lambdas_final
self.mus = mus_final
def predict(self,
y_orig: np.ndarray,
group_feature: np.ndarray,
ignore_warnings: bool = False
) -> np.ndarray:
"""Debiases the predictions.
Given the original scores y, post-process them such that the predictions
satisfy the desired fairness criteria.
Args:
y_orig: Original classifier scores. If this is for postprocessing binary
classifiers, y_orig = 2 * p(y = 1) - 1.
group_feature: An array containing the group id of each instance starting
from group 0 to group K-1.
ignore_warnings: Set to True to suppress warnings.
Returns:
y_new_prob: y_new_prob[i] is the probability of predicting the positive
class for instance i.
"""
if (((min(y_orig) >= 0 and self.yscale == 'negative') or
(min(y_orig) < 0 and self.yscale == 'positive')) and
not ignore_warnings):
print('Warning: the scores seem to have a different scale from the '
'training data. '
'If the data is scaled in [0, 1], e.g. for preprocessing, or '
'in [-1, +1], e.g. for postprocessing, make sure that test labels '
'are scaled similarly.')
num_examples = len(y_orig) # number of training examples
gamma = self.gamma
lambdas = self.lambdas
mus = self.mus
y_new_prob = np.zeros((num_examples,))
for i in range(num_examples):
k = group_feature[i]
if y_orig[i] < (lambdas[k] - mus[k]):
y_new_prob[i] = 0
elif y_orig[i] < (lambdas[k] - mus[k]) + gamma:
y_new_prob[i] = (1.0 / gamma) * (y_orig[i] - (lambdas[k] - mus[k]))
else:
y_new_prob[i] = 1.0
return y_new_prob
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
22cbea452b25b6ffe759506fa95cb4bcd5cc0172 | b0afba2b6da272d4aa75abd346dce499a2ecb387 | /tests/ontarget/test_mpfexp.py | c08d78e49b91abf6f3d7c90959d8645b929f4d84 | [
"MIT"
] | permissive | PMunch/mpfshell | 2d04d619043ce4e7def4363087805d70e42f8c50 | 14190f205f24aea195999e4ff1e77ee15cff507f | refs/heads/master | 2021-01-11T23:57:54.334874 | 2017-02-17T18:15:57 | 2017-02-17T18:15:57 | 78,651,540 | 8 | 3 | MIT | 2023-01-15T12:03:24 | 2017-01-11T15:23:09 | Python | UTF-8 | Python | false | false | 9,311 | py | ##
# The MIT License (MIT)
#
# Copyright (c) 2016 Stefan Wendler
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
##
import os
import pytest
from mp.mpfshell import RemoteIOError
@pytest.mark.usefixtures("mpsetup")
class TestMpfexp:
"""
Tests for the MpFileExplorer class.
"""
def __create_local_file(self, file, data =b""):
with open(file, "wb") as f:
f.write(data)
def test_directory_handling(self, mpfexp):
assert "/" == mpfexp.pwd()
mpfexp.md('dir1')
mpfexp.md('dir 1')
mpfexp.md('dir1/subdir1')
# no duplicate directory names
with pytest.raises(RemoteIOError):
mpfexp.md('dir1')
with pytest.raises(RemoteIOError):
mpfexp.md('dir1/subdir1')
# no subdir in non existing dir
with pytest.raises(RemoteIOError):
mpfexp.md('dir2/subdir1')
# relative directory creating
mpfexp.cd('dir1')
assert "/dir1" == mpfexp.pwd()
mpfexp.md('subdir2')
# created dirs visible for ls and marked as directory
mpfexp.cd('/')
assert "/" == mpfexp.pwd()
assert ('dir1', 'D') in mpfexp.ls(True, True, True)
assert ('dir 1', 'D') in mpfexp.ls(True, True, True)
# no dir with same name as existing file
with pytest.raises(RemoteIOError):
mpfexp.md('boot.py')
# subdirs are visible for ls
mpfexp.cd('dir1')
assert "/dir1" == mpfexp.pwd()
assert [('subdir1', 'D'), ('subdir2', 'D')] == mpfexp.ls(True, True, True)
mpfexp.cd('subdir1')
assert "/dir1/subdir1" == mpfexp.pwd()
assert [] == mpfexp.ls(True, True, True)
mpfexp.cd('..')
mpfexp.cd('subdir2')
assert "/dir1/subdir2" == mpfexp.pwd()
assert [] == mpfexp.ls(True, True, True)
# no duplicate directory names
with pytest.raises(RemoteIOError):
mpfexp.cd('subdir1')
#FIXME: not working as expected yet
#mpfexp.cd('../subdir1')
#assert "/dir1/subdir1" == mpfexp.pwd()
# allow whitespaces in dir names
mpfexp.cd('/dir 1')
assert "/dir 1" == mpfexp.pwd()
assert [] == mpfexp.ls(True, True, True)
def test_file_handling(self, mpfexp, tmpdir):
os.chdir(str(tmpdir))
data = b"\x00\x11\x22\x33\x44\x55\x66\x77\x88\x99"
self.__create_local_file("file1", data)
# upload with same name
mpfexp.put("file1")
# upload with different name
mpfexp.put("file1", "file2")
assert ('file1', 'F') in mpfexp.ls(True, True, True)
assert ('file2', 'F') in mpfexp.ls(True, True, True)
os.remove("file1")
assert not os.path.isfile("file1")
# download and compare
mpfexp.get("file1")
mpfexp.get("file2")
mpfexp.get("file1", "file3")
for name in ["file1", "file2", "file3"]:
with open(name, "rb") as f:
assert data == f.read()
# overwrite existing file
data = b"\xaa\xbb\xcc\xdd\xee\xff"
self.__create_local_file("file1", data)
mpfexp.put("file1")
with open("file1", "rb") as f:
assert data == f.read()
# file with name of existing directory not allowed
self.__create_local_file("dir2")
mpfexp.md("dir2")
with pytest.raises(RemoteIOError):
mpfexp.put("file1", "dir2")
with pytest.raises(RemoteIOError):
mpfexp.put("dir2")
# put files to subdir
mpfexp.put("file1", "dir2/file1")
mpfexp.cd("dir2")
mpfexp.put("file2", "file2")
assert [('file1', 'F'), ('file2', 'F')] == mpfexp.ls(True, True, True)
mpfexp.cd("/")
# fail to put to non-existing directory
with pytest.raises(RemoteIOError):
mpfexp.put("file1", "dir3/file1")
# fail to get non-existing file
with pytest.raises(RemoteIOError):
mpfexp.get("file99")
with pytest.raises(RemoteIOError):
mpfexp.get("dir2")
with pytest.raises(RemoteIOError):
mpfexp.get("dir2/file99")
# fail to get to non-existing dir
with pytest.raises(IOError):
mpfexp.get("file1", "dir/file")
# fail to put non existing file
with pytest.raises(IOError):
mpfexp.put("file99")
# allow whitespaces in file-names
mpfexp.put("file1", "file 1")
mpfexp.get("file 1")
assert ('file 1', 'F') in mpfexp.ls(True, True, True)
def test_removal(self, mpfexp, tmpdir):
os.chdir(str(tmpdir))
mpfexp.md("dir3")
mpfexp.md("dir 3")
self.__create_local_file("file10")
mpfexp.put("file10")
mpfexp.put("file10", "dir3/file1")
mpfexp.put("file10", "dir3/file2")
# don't allow deletion of non empty dirs
with pytest.raises(RemoteIOError):
mpfexp.rm("dir3")
# delete files and empty dirs
mpfexp.rm("file10")
mpfexp.rm("dir3/file1")
mpfexp.cd("dir3")
mpfexp.rm("file2")
assert [] == mpfexp.ls(True, True, True)
mpfexp.cd("/")
mpfexp.rm("dir3")
mpfexp.rm("dir 3")
assert ('file10', 'F') not in mpfexp.ls(True, True, True)
assert ('dir3', 'D') not in mpfexp.ls(True, True, True)
assert ('dir 3', 'D') not in mpfexp.ls(True, True, True)
# fail to remove non-existing file or dir
with pytest.raises(RemoteIOError):
mpfexp.rm("file10")
with pytest.raises(RemoteIOError):
mpfexp.rm("dir3")
def test_mputget(self, mpfexp, tmpdir):
os.chdir(str(tmpdir))
self.__create_local_file("file20")
self.__create_local_file("file21")
self.__create_local_file("file22")
mpfexp.md("dir4")
mpfexp.cd("dir4")
mpfexp.mput(".", "file\.*")
assert [("file20", "F"), ("file21", "F"), ("file22", "F")] == sorted(mpfexp.ls(True, True, True))
os.mkdir("mget")
os.chdir(os.path.join(str(tmpdir), "mget"))
mpfexp.mget(".", "file\.*")
assert ["file20", "file21", "file22"] == sorted(os.listdir("."))
mpfexp.mget(".", "notmatching")
with pytest.raises(RemoteIOError):
mpfexp.mput(".", "*")
with pytest.raises(RemoteIOError):
mpfexp.mget(".", "*")
def test_putsgets(self, mpfexp):
mpfexp.md("dir5")
mpfexp.cd("dir5")
data = "Some random data"
mpfexp.puts("file1", data)
assert mpfexp.gets("file1").startswith(data)
mpfexp.cd("/")
with pytest.raises(RemoteIOError):
mpfexp.puts("invalid/file1", "don't care")
with pytest.raises(RemoteIOError):
mpfexp.puts("dir5", "don't care")
mpfexp.puts("dir5/file1", data)
with pytest.raises(RemoteIOError):
mpfexp.gets("dir5")
with pytest.raises(RemoteIOError):
mpfexp.gets("dir5/file99")
def test_bigfile(self, mpfexp, tmpdir):
os.chdir(str(tmpdir))
data = b"\xab" * (1024 * 40)
self.__create_local_file("file30", data)
mpfexp.md("dir6")
mpfexp.cd("dir6")
mpfexp.put("file30", "file1")
mpfexp.get("file1")
with open("file1", "rb") as f:
assert data == f.read()
def test_stress(self, mpfexp, tmpdir):
os.chdir(str(tmpdir))
mpfexp.md("dir7")
mpfexp.cd("dir7")
for i in range(20):
data = b"\xab" * (1024 * 1)
self.__create_local_file("file40", data)
mpfexp.put("file40", "file1")
assert [("file1", "F")] == mpfexp.ls(True, True, True)
mpfexp.put("file40", "file2")
assert [("file1", "F"), ("file2", "F")] == mpfexp.ls(True, True, True)
mpfexp.md("subdir1")
assert [("subdir1", "D"), ("file1", "F"), ("file2", "F")] == mpfexp.ls(True, True, True)
mpfexp.rm("file1")
mpfexp.rm("file2")
mpfexp.cd("subdir1")
mpfexp.cd("..")
mpfexp.rm("subdir1")
assert [] == mpfexp.ls(True, True, True)
| [
"sw@kaltpost.de"
] | sw@kaltpost.de |
5c547e31bbf908eaea567803e1bb8460a41d864b | acdeeec45bf16502fc3b2540dbb71478327f6be3 | /WeBrew.py | 641ab88008ff4f975fc6850b660a3e77ecc2992a | [] | no_license | jayjay300/pdp2017 | 4c8ff9664ed8c77766a8ebd811e0090fd3d40359 | 540d9d91be754e020c9cd6495c4b36dda1800c11 | refs/heads/master | 2021-10-09T21:46:27.276515 | 2019-01-03T19:19:38 | 2019-01-03T19:19:38 | 112,773,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,124 | py | #!/usr/bin/python
import os
import glob
import time
import blescan
import sys
import requests
import bluetooth._bluetooth as bluez
import pygame
import numpy as np
import tkinter as tk
import RPi.GPIO as GPIO
from PIL import ImageTk,Image
from firebase import firebase
from datetime import datetime
import matplotlib
import matplotlib.image as mpimg
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
base_dir ='/sys/bus/w1/devices/'
device_folder = glob.glob(base_dir + '28*')[0]
device_file = device_folder + '/w1_slave'
firebase = firebase.FirebaseApplication('https://webrew-d32dd.firebaseio.com', None)
coldcrush= False
#Assign uuid's of various colour tilt hydrometers. BLE devices like the tilt work primarily using advertisements.
#The first section of any advertisement is the universally unique identifier. Tilt uses a particular identifier based on the colour of the device
red = 'a495bb10c5b14b44b5121370f02d74de'
green = 'a495bb20c5b14b44b5121370f02d74de'
black = 'a495bb30c5b14b44b5121370f02d74de'
purple = 'a495bb40c5b14b44b5121370f02d74de'
orange = 'a495bb50c5b14b44b5121370f02d74de'
blue = 'a495bb60c5b14b44b5121370f02d74de'
yellow = 'a495bb70c5b14b44b5121370f02d74de'
pink = 'a495bb80c5b14b44b5121370f02d74de'
dev_id = 0
GPIO.setmode(GPIO.BOARD)
chan_heat = [13,18,29,36,37]
chan_cool = [11,15,16,22,31]
GPIO.setup(chan_heat, GPIO.OUT)
GPIO.setup(chan_cool, GPIO.OUT)
#ax = plt.gca()
#ax2 = ax.twinx() #set scale
def heat():
GPIO.output(chan_heat, GPIO.HIGH)
GPIO.output(chan_cool, GPIO.LOW)
def cool():
GPIO.output(chan_heat, GPIO.LOW)
GPIO.output(chan_cool, GPIO.HIGH)
def shutoff():
GPIO.output(chan_heat, GPIO.LOW)
GPIO.output(chan_cool, GPIO.LOW)
def read_Sg():
try:
sock = bluez.hci_open_dev(dev_id)
except:
print ("error accessing bluetooth device...")
sys.exit(1)
blescan.hci_le_set_scan_parameters(sock)
blescan.hci_enable_le_scan(sock)
#gotData = 0
#while (gotData == 0):
returnedList = blescan.parse_events(sock, 10)
for beacon in returnedList: #returnedList is a list datatype of string datatypes seperated by commas (,)
output = beacon.split(',') #split the list into individual strings in an array
if output[1] == green: #Change this to the colour of you tilt
#gotData = 1
tiltSG = int(output[3],16)/1000
print ("testing")
print (tiltSG)
print ("-----")
blescan.hci_disable_le_scan(sock)
return tiltSG
def read_temp_raw():
f = open(device_file, 'r')
lines = f.readlines()
f.close()
return lines
def read_time():
currenttime= datetime.now().replace(microsecond=0)
return currenttime
def read_timegui():
ctime = read_time()
a = "%d:%02d" % (ctime.hour,ctime.minute)
return a
def read_temp():
lines = read_temp_raw()
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = read_temp_raw()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
currenttime = read_time()
temp_c = float(temp_string) / 1000.0
temp_f = temp_c * 9.0 / 5.0 + 32.0
stringtemp = str(temp_c)
# Set condition to not send if data is the same
#result = firebase.post('/users/1002', {'time':str(currenttime),'temp':temp_c, 'hydro': 1.6})
return temp_c
def updatetherm():
with open("/home/pi/Desktop/Interface/temp.txt") as f:
datat = f.read()
datat = datat.split('\n')
dt = [row.split(' ')[0] for row in datat]
xt = [row.split(' ')[1] for row in datat]
yt = [row.split(' ')[2] for row in datat]
xt = [str(i) for i in xt]
yt = [float(i) for i in yt]
#x = list(map(int, x))
#y = list(map(int, y))
#d = plt.plot(xt,yt)
#d[0].set_ydata(s)
#ax2.plot(xt,yt,'#f35924')
#fig.canvas.draw()
def updatesg():
with open("/home/pi/Desktop/Interface/sg.txt") as f:
datas = f.read()
datas = datas.split('\n')
ds = [row.split(' ')[0] for row in datas]
xs = [row.split(' ')[1] for row in datas]
ys = [row.split(' ')[2] for row in datas]
#xs = [f(i) for i in x]
ys = [float(i) for i in ys]
#x = list(map(int, x))
#y = list(map(int, y))
#d = plt.plot(xs,ys)
#d[0].set_ydata(s)
#ax.plot(xs,ys,'black')
#fig.canvas.draw()
def exitbutton():
GPIO.cleanup()
root.destroy()
def temperaturecontrol():
temperature= read_temp()
desired_temperature = 21
if coldcrush == False:
if temperature < desired_temperature:
heat()
elif temperature > desired_temperature:
cool()
elif temperature == desired_temperature:
shutoff()
def updategui():
updatesg()
updatetherm()
#temperaturecontrol() TURN ON ONCE TEMP CONTROL TESTING IS NECESSARY
ftemp = open('/home/pi/Desktop/Interface/temp.txt', 'a')
fsg = open('/home/pi/Desktop/Interface/sg.txt', 'a')
tempread = str(read_temp())
sgread = str(read_Sg()) # TURN ON ONCE HYDROMETER IS FUNCTIONAL
#sgread = "1.6"
timeread = str(read_time())
temperature.set("Current Temperature "+tempread+"°C")
writetemp = '\n'+timeread + " " + tempread
ftemp.write(writetemp)
sg.set("Current Specific Gravity: "+sgread)
writesg = '\n'+timeread + " " + sgread
fsg.write(writesg)
time.set("Current Time: "+timeread)
root.update()
root.after(60000,updategui)
print(read_temp())
print(read_Sg())
datetime=timeread.split()
date=datetime[0]
print(date)
timer=datetime[1]
print(timer)
tk.Label(root,text=str(read_temp())+"°C",font=("NotoSans-Bold",45),fg='#ff8d00',background='white').grid(row=10, column=1)
tk.Label(root,text=str(read_Sg()),font=("NotoSans-Bold",45),fg='#ff8d00',background='white').grid(row=10, column=4)
tk.Label(titleframe,text=str(read_timegui()+" "),font=("NotoSans-Bold",14),fg='white',background='#ff8d00',compound="center").grid(row=0, column=6)
#result = firebase.post('/users/1003', {'date':date,'time':timer,'temp':tempread,'desiredtemp':20, 'hydro': sgread})
root = tk.Tk()
#fig = plt.figure(1) GRAPH GUI CODE
#plt.ion()
temperature = tk.StringVar()
sg = tk.StringVar()
time = tk.StringVar()
temperature.set("null")
sg.set("null")
updatetherm()
updatesg()
#plt.plot(xt,yt) COMMENTED CODE BELOW IS GRAPH CODE-FUNCTIONAL BUT ABANDONED
#plt.plot(xs,ys)
#canvas = FigureCanvasTkAgg(fig, master=root)
#plot_widget = canvas.get_tk_widget()
#im = mpimg.imread('webrew.gif')
#ax2.imshow(im, extent=[0, 400, 0, 300])
#ax2.set_ylabel("Temperature",fontsize=8,color='#f35924')
#ax.set_ylabel("Specific Gravity",fontsize=8,color='black')
#ax.get_xaxis().set_ticks([])
#templeg = mpatches.Patch(color='#f35924', label='Temperature')
#sgleg = mpatches.Patch(color='black', label='Specific Gravity')
#plt.legend(handles=[templeg,sgleg],bbox_to_anchor=(0., 1.02, 1., .102), loc=3,ncol=2, mode="expand", borderaxespad=0.)
def update():
updatesg()
updatetherm()
# plt.plot(xt,yt)
# plt.plot(xs,ys)
# d[0].set_ydata(s)
# fig.canvas.draw()
#img = plt.imread("webrew.gif")
#plot_widget.grid(row=1, column=0) ABANDONED GRAPHING CODE- FUNCTIONAL
#imgpath = "icon_lcd.gif"
#img = root.PhotoImage(file=imgpath)
#photos = tk.Label(root, image = img).grid(row=0,column=0)
#photos.image = img
root.resizable(width = False, height = False)
titleframe = tk.Frame(root, bg = "#ff8d00")
titleframe.grid(row=0,column=0, columnspan=80,sticky='ew')
image = tk.PhotoImage(file="/home/pi/Desktop/Interface/icon_lcd.gif")
photo = tk.Label(titleframe,image=image,borderwidth=0)
photo.grid(row=0,column=0)
#titlelabel = tk.Label(root, bg="#ff8d00")
#titlelabel.grid(row=0, column =0, sticky='ew',columnspan=2)
root.grid_rowconfigure(1, minsize=100)
root.grid_rowconfigure(11, minsize=100)
root.grid_columnconfigure(0, minsize=100)
root.grid_columnconfigure(3, minsize=50)
#root.grid_columnconfigure(11, minsize=)
tk.Label(root,text="Temperature",font=("NotoSans-Bold",24),fg="#606859",background="white").grid(row=7, column=1)
tk.Label(root,text="Specific Gravity",font=("NotoSans-Bold",24),fg="#606859",background="white").grid(row=7,column=4)
tk.Label(root,text=str(read_temp())+"°C",font=("NotoSans-Bold",45),fg='#ff8d00',background='white').grid(row=10, column=1)
tk.Label(root,text=str(read_Sg()),font=("NotoSans-Bold",45),fg='#ff8d00',background='white').grid(row=10, column=4)
#tk.Label(root,text="recipe 25.1°C",font=("NotoSans-Bold",20),fg='#ff8d00',background='white').grid(row=11,column=1)
#tk.Label(root,text="recipe 1.58",font=("NotoSans-Bold",20),fg='#ff8d00',background='white').grid(row=11,column=4)
#tk.Label(root,text="Fermentation: 3 days left",font=("NotoSans-Bold",22)).grid(row=12,column=7,columnspan=3)
tk.Label(titleframe,text=str(read_timegui()+" "),font=("NotoSans-Bold",14),fg='white',background='#ff8d00',compound="center").grid(row=0, column=6)
#im=Image.open("icon_lcd.gif")
#canvas = tk.Canvas(root, width = 800, height = 200) ABANDONED GRAPHING CODE
#canvas.grid(row=0, column=0)
tk.Button(root,text="Exit",command=exitbutton).grid(row=9, column=0)
root.configure(background='white')
root.title("WeBrew")
root.geometry('800x480')
#plt.imshow(img) ABANDONED GRAPHING CODE
root.after(0,updategui)
root.mainloop()
| [
"noreply@github.com"
] | noreply@github.com |
b2d99dd00dae5887c8c3aece4c053cd8a48a2e01 | 9aae9be07db7fb030c976a408cb5473ccab658c3 | /Arrays and Strings/Check if given strings Anagrams/CharacterCount.py | 34faa8cc817c302fadd1a3e04e05383b00b5e4d3 | [] | no_license | deepaksattiraju249/Algorithms | 90f76d256baede01ab9308fcc5f33fdbbf91f725 | aac1e6851ac8c4ad5ff6d5a3b7b06cb99094005d | refs/heads/master | 2021-01-01T20:05:43.617431 | 2016-02-03T11:59:19 | 2016-02-03T11:59:19 | 19,768,686 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 737 | py | # Anagrams are the permutations of the same characteres
#One layman solution would be count the number of characters
#if character count of both the strings match then they are Anagrams
def CheckIfAnagrams(s1,s2):
if len(s1) != len(s2):
return False
count_map = dict()
for i in xrange(26):
count_map[chr(i+97)] = 0 # Creating a dictionary of all characters and setting count to 0
countMapForS1 = count_map
for ch in s1:
countMapForS1[ch]+=1; # Increasing the count of each of those
countMapForS2 = count_map
for ch in s2:
countMapForS2[ch]+=1;
for ch in count_map:
if(countMapForS1[ch] != countMapForS2[ch]):
return False
return True
# Order of Time complexity O(N) and order of Space complexity O(c) | [
"deepaks.iiti@gmail.com"
] | deepaks.iiti@gmail.com |
d5b305474874553f115cb231dc005fedb1503afc | a35083d8cba097791557b037aacad8a80720da15 | /blog/models.py | 97e3a080c2f6dad0b25a950cb173c7a92bb0435c | [] | no_license | petwam/my-first-blog | e5782dacafeeec183fe9632210bb73c1be5c2510 | b1286ed2a6dc241031132b6379adae783761a23e | refs/heads/master | 2020-03-21T09:19:02.028245 | 2018-06-23T13:26:43 | 2018-06-23T13:26:43 | 138,393,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | from django.db import models
from django.utils import timezone
# Create your models here.
class Post(models.Model):
author = models.ForeignKey('auth.User', on_delete=models.CASCADE)
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(
default=timezone.now)
published_date = models.DateTimeField(
blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
| [
"petwamasett@gmail.com"
] | petwamasett@gmail.com |
5a43d0bd963e1b2cf3ebfc5e78bffe0f08e5f2b7 | 37d1fdd09178194bd93d39be67a5e2e702fb4c5a | /task_2/regression.py | d2df1a66d8de7f6a4e0b26d3eb2e4b1f1610dc8e | [] | no_license | Polad27/MLEng_course | 3492cacf3717978d602b65f3cb88f6b50b35e57c | 53fa161380b6d8383d88e9db4b2d475ae3bf038a | refs/heads/main | 2023-06-01T13:48:29.275541 | 2021-06-15T06:49:13 | 2021-06-15T06:49:13 | 310,322,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,505 | py | '''This script performs regression fitting'''
import json
import pickle
import yaml
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler, MinMaxScaler, MaxAbsScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_absolute_error
def get_scaler(name):
'''Returns suitable scaler'''
if name == 'StandardScaler':
return StandardScaler()
if name == 'MinMaxScaler':
return MinMaxScaler()
if name == 'MaxAbsScaler':
return MaxAbsScaler()
raise 'Unsuitable name'
def metrics(y_true, y_pred):
'''Calculate metrics'''
return {'adj_r2': r2_score(y_true, y_pred),
'MAE': mean_absolute_error(y_true, y_pred)}
params = yaml.safe_load(open('params.yaml'))
scaler = get_scaler(params['scaler'])
df = pd.read_csv('prepared_data.csv')
X_train, X_test, y_train, y_test = train_test_split(df.drop('C6H6(GT)', axis=1),
df['C6H6(GT)'])
lm = LinearRegression()
lm = lm.fit(scaler.fit_transform(X_train), y_train)
predicted = lm.predict(scaler.transform(X_test))
with open('scores.json', 'w') as f:
json.dump(metrics(y_test, predicted), f)
with open('plots.json', 'w') as f:
proc_dict = {'proc': [{
'predicted': p,
'real': r,
} for p, r in zip(predicted, y_test)
]}
json.dump(proc_dict, f)
with open('model.pkl', 'wb') as f:
pickle.dump(lm, f)
| [
"polad271998@gmail.com"
] | polad271998@gmail.com |
a9478d67a35ca8973d20fa07c06a36ff602e04fa | 35e63ec5c18496f36da6ef953b39d7efa9d9bbde | /safelife/render_text.py | 6e3caaf4370416e4fd61bab8bad9600b5c5dbfda | [
"Apache-2.0"
] | permissive | pde/safelife | f4e20aa8b6b05ad2c355ee8ee9e8c87e9f91c5f8 | 6fed5b34e72a17d58770f2db0f87e1337d25bf0c | refs/heads/master | 2023-01-05T20:18:49.524793 | 2020-08-04T15:09:55 | 2020-08-04T15:09:55 | 276,539,404 | 0 | 0 | Apache-2.0 | 2020-07-02T03:29:38 | 2020-07-02T03:29:37 | null | UTF-8 | Python | false | false | 5,913 | py | import numpy as np
from .helper_utils import recenter_view
from .safelife_game import CellTypes, GameWithGoals
background_colors = [
'\x1b[48;5;251m', # black / empty
'\x1b[48;5;217m', # red
'\x1b[48;5;114m', # green
'\x1b[48;5;229m', # yellow
'\x1b[48;5;117m', # blue
'\x1b[48;5;183m', # magenta
'\x1b[48;5;123m', # cyan
'\x1b[48;5;255m', # white
]
foreground_colors = [
'\x1b[38;5;0m', # black
'\x1b[38;5;1m', # red
'\x1b[38;5;2m', # green
'\x1b[38;5;172m', # yellow
'\x1b[38;5;12m', # blue
'\x1b[38;5;129m', # magenta
'\x1b[38;5;39m', # cyan
'\x1b[38;5;244m', # white / gray
]
def print_reward_table():
text = ""
rewards = GameWithGoals.reward_table
for r in range(8):
text += background_colors[r]
for c in range(8):
text += foreground_colors[c]
text += "{:2d} ".format(rewards[r,c])
text += '\x1b[0m\n'
print(text)
@np.vectorize
def render_cell(cell, goal=0, orientation=0, edit_color=None):
cell_color = (cell & CellTypes.rainbow_color) >> CellTypes.color_bit
goal_color = (goal & CellTypes.rainbow_color) >> CellTypes.color_bit
val = background_colors[goal_color]
val += ' ' if edit_color is None else foreground_colors[edit_color] + '∎'
val += foreground_colors[cell_color]
if cell & CellTypes.agent:
arrow = '⋀>⋁<'[orientation]
val += '\x1b[1m' + arrow
else:
gray_cell = cell & ~CellTypes.rainbow_color
val += {
CellTypes.empty: '.' if cell_color else ' ',
CellTypes.life: 'z',
CellTypes.alive: 'Z',
CellTypes.wall: '#',
CellTypes.crate: '%',
CellTypes.plant: '&',
CellTypes.tree: 'T',
CellTypes.ice_cube: '=',
CellTypes.parasite: '!',
CellTypes.weed: '@',
CellTypes.spawner: 's',
CellTypes.hard_spawner: 'S',
CellTypes.level_exit: 'X',
CellTypes.fountain: '\x1b[1m+',
}.get(gray_cell, '?')
return val + '\x1b[0m'
def cell_name(cell):
cell_type = {
CellTypes.empty: 'empty',
CellTypes.life: 'life',
CellTypes.alive: 'hard-life',
CellTypes.wall: 'wall',
CellTypes.crate: 'crate',
CellTypes.plant: 'plant',
CellTypes.tree: 'tree',
CellTypes.ice_cube: 'ice-cube',
CellTypes.parasite: 'parasite',
CellTypes.weed: 'weed',
CellTypes.spawner: 'spawner',
CellTypes.hard_spawner: 'hard-spawner',
CellTypes.level_exit: 'exit',
CellTypes.fountain: 'fountain',
}.get(cell & ~CellTypes.rainbow_color, 'unknown')
color = {
0: 'gray',
CellTypes.color_r: 'red',
CellTypes.color_g: 'green',
CellTypes.color_b: 'blue',
CellTypes.color_r | CellTypes.color_b: 'magenta',
CellTypes.color_g | CellTypes.color_r: 'yellow',
CellTypes.color_b | CellTypes.color_g: 'cyan',
CellTypes.rainbow_color: 'white',
}.get(cell & CellTypes.rainbow_color, 'x')
return cell_type + '-' + color
def render_board(board, goals=0, orientation=0, edit_loc=None, edit_color=0):
"""
Just render the board itself. Doesn't require game state.
"""
if edit_loc and (edit_loc[0] >= board.shape[0] or edit_loc[1] >= board.shape[1]):
edit_loc = None
goals = np.broadcast_to(goals, board.shape)
screen = np.empty((board.shape[0]+2, board.shape[1]+3,), dtype=object)
screen[:] = ''
screen[0] = screen[-1] = ' -'
screen[:,0] = screen[:,-2] = ' |'
screen[:,-1] = '\n'
screen[0,0] = screen[0,-2] = screen[-1,0] = screen[-1,-2] = ' +'
screen[1:-1,1:-2] = render_cell(board, goals, orientation)
if edit_loc:
x1, y1 = edit_loc
val = render_cell(board[y1, x1], goals[y1, x1], orientation, edit_color)
screen[y1+1, x1+1] = str(val)
return ''.join(screen.ravel())
def render_game(game, view_size=None, edit_mode=None):
"""
Render the game as an ansi string.
Parameters
----------
game : SafeLifeGame instance
view_size : (int, int) or None
Shape of the view port, or None if the full board should be rendered.
If not None, the view will be centered on either the agent or the
current edit location.
edit_mode : None, "BOARD", or "GOALS"
Determines whether or not the game should be drawn in edit mode with
the edit cursor. If "GOALS", the goals and normal board are swapped so
that the goals can be edited directly.
"""
if view_size is not None:
if edit_mode:
center = game.edit_loc
edit_loc = view_size[1] // 2, view_size[0] // 2
else:
center = game.agent_loc
edit_loc = None
center = game.edit_loc if edit_mode else game.agent_loc
board = recenter_view(game.board, view_size, center[::-1], game.exit_locs)
goals = recenter_view(game.goals, view_size, center[::-1])
else:
board = game.board
goals = game.goals
edit_loc = game.edit_loc if edit_mode else None
edit_color = (game.edit_color & CellTypes.rainbow_color) >> CellTypes.color_bit
if edit_mode == "GOALS":
# Render goals instead. Swap board and goals.
board, goals = goals, board
return render_board(board, goals, game.orientation, edit_loc, edit_color)
def agent_powers(game):
x0, y0 = game.agent_loc
agent = game.board[y0, x0]
power_names = [
(CellTypes.alive, 'alive'),
(CellTypes.preserving, 'preserving'),
(CellTypes.inhibiting, 'inhibiting'),
(CellTypes.spawning, 'spawning'),
]
powers = [txt for val, txt in power_names if agent & val]
return ', '.join(powers) or 'none'
if __name__ == "__main__":
print_reward_table()
| [
"clwainwri@gmail.com"
] | clwainwri@gmail.com |
e548260687b2b3fe03959e30d3cced5ff9b53e89 | 73d9480df1e4d137ec03b985d1eae6810a63a325 | /Exercicios-Mundo1/ex005.py | 66b8fa718acb1471b2b7e82221a0f2968e95c61a | [
"MIT"
] | permissive | WeDias/RespCEV | 335bc4dd0ade352205056b4007e6d127f6484d69 | 3cf0918c5c90e3b2d6dc7e57f2824436c639c04c | refs/heads/master | 2020-08-19T05:17:49.971950 | 2020-04-29T19:13:24 | 2020-04-29T19:13:24 | 215,882,465 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | num1 = int(input('Digite um numero '))
ante = num1 - 1
suce = num1 + 1
print('O antecessor de {} é {}!\nE o sucessor de {} é {}!'.format(num1, ante, num1, suce))
| [
"noreply@github.com"
] | noreply@github.com |
0397c9f0d2e40acf497622b8b4cb2e5299202bba | 471ea669e21abdb4e4915610b4b5eb43ea3cffe9 | /剑指Offer/31.整数中1出现的次数.py | d663b71492aabfba5cd8ae82b899c772a9d0eb39 | [] | no_license | JiahuaLink/nowcoder-leetcode | 26aed099e215cfc1d8e8afffc62fafa26b26b06f | 0155fc33511cbe892f58550d561d3aa3efcd56b9 | refs/heads/master | 2023-07-09T03:05:31.227720 | 2021-08-03T06:50:36 | 2021-08-03T06:50:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 883 | py | # 方法一:判断整数每个数字
class Solution:
def NumberOf1Between1AndN_Solution(self, n):
count = 0
for i in range(1, n+1):
temp = i
while(temp):
if temp%10 == 1:
count += 1
temp /= 10
return count
# 方法二:将整数转为字符串逐位判断
class Solution:
def NumberOf1Between1AndN_Solution(self, n):
count = 0
for i in range(1, n+1):
s = str(i)
for j in s:
if j == '1':
count += 1
return count
# 方法三:将整数转为字符串,组合含有‘1’的字符串,再统计‘1’的个数
def NumberOf1Between1AndN_Solution(self, n):
a = map(str, range(n+1))
ones = [i for i in a if '1' in i]
return ''.join(ones).count('1') | [
"noreply@github.com"
] | noreply@github.com |
fc6f9b5f0d9a97cad4ccddd1069c0fdef7f7a2a3 | 70f616beb3456268084d33d4445278b87641e2bf | /shortner/base62.py | 512ce56aa0c892abe3ed744cd3ea0cdafe2d27f8 | [] | no_license | Prashant-Surya/Url-Shortener | 59bf8f051ca91e70789438cb2861165ed9a4c4c3 | af053a5d9d97a7ba66a35d977474aa90bef08b4c | refs/heads/master | 2021-01-10T16:01:51.835095 | 2015-10-26T12:02:10 | 2015-10-26T12:02:10 | 44,124,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | ALPHABET = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
def base62_encode(num, alphabet=ALPHABET):
"""Encode a number in Base X
`num`: The number to encode
`alphabet`: The alphabet to use for encoding
"""
if (num == 0):
return alphabet[0]
arr = []
base = len(alphabet)
while num:
rem = num % base
num = num // base
arr.append(alphabet[rem])
arr.reverse()
return ''.join(arr)
| [
"prashantsurya@ymail.com"
] | prashantsurya@ymail.com |
77a0aa4b1eb0ec07f559139f79804985aa1f1f1a | 2c9ae9927f0d0a2242fd140988de9cda924ebf84 | /AcesWebsite 2nd assignment/aces/apps.py | 4fbeb3d5d12fb0416a73fc1e33ae469cf26bea52 | [] | no_license | amr-essam95/Aces-assignment | 822978c4cfdce36b64e119a70b8e56fedc60524b | eb7a8dcfe6f23f536eb0515d760ae12f7a25f30c | refs/heads/master | 2020-01-27T10:00:39.264096 | 2016-09-16T16:41:31 | 2016-09-16T16:41:31 | 66,290,291 | 0 | 0 | null | 2016-08-22T16:50:39 | 2016-08-22T16:43:47 | null | UTF-8 | Python | false | false | 84 | py | from django.apps import AppConfig
class AcesConfig(AppConfig):
name = 'aces'
| [
"noreply@github.com"
] | noreply@github.com |
7e21d7121464f05d9507f59568bbd12710e369c0 | 0d153f781d04c0fa925a864e03bf28d2bd61cb06 | /python/p6.py | 632e0aa1348fc584535221c3b9755116ca15a611 | [] | no_license | glovguy/project-euler-solutions | f9750cf1ca71a2aba9433f99d89838749aa9cf00 | 38f9c60d9d45f88d5d9a384404ab5d41cff491f0 | refs/heads/master | 2021-01-21T15:04:41.877811 | 2020-06-07T21:20:27 | 2020-06-07T21:20:27 | 57,855,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 600 | py | '''The sum of the squares of the first ten natural numbers is,
1^2+2^2+...+10^2=385
The square of the sum of the first ten natural numbers is,
(1+2+...+10)^2=55^2=3025
Hence the difference between the sum of the squares of the first ten natural numbers and the square of the sum is 3025−385=2640.
Find the difference between the sum of the squares of the first one hundred natural numbers and the square of the sum.'''
runningSum = 0
runningSumSq = 0
for i in range(1, 101):
runningSum += i
runningSumSq += i**2
print(runningSum, runningSumSq)
print(runningSum**2 - runningSumSq)
| [
"karlsmith@bouzou.com"
] | karlsmith@bouzou.com |
76f8185eb90a42766f86ea066b38f022fd6156e5 | 131688c1006670be2bab5ce062521ce9b79b64af | /week2/design_hashset.py | ff2fc0f9e3e5b52cdb2f8f1875abad001dd4aa75 | [
"MIT"
] | permissive | ravichalla/wallbreaker | 4e3dc98ff02fd8a7bace2466c071c65a37124426 | 0d587f12c60df5e4bca47f9183484a69d284d1f5 | refs/heads/master | 2020-06-08T05:44:35.510146 | 2020-01-29T02:25:19 | 2020-01-29T02:25:19 | 193,169,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 959 | py | class MyHashSet:
def __init__(self):
self.capacity = 5000
self.arr = [None] * self.capacity
def add(self, key):
hash_val = hash(key) % self.capacity
if self.arr[hash_val] == None:
self.arr[hash_val] = [key]
else:
if key not in self.arr[hash_val]:
self.arr[hash_val].append(key)
def remove(self, key) -> None:
hash_val = hash(key) % self.capacity
if self.arr[hash_val] == None:
return
for ind in range(len(self.arr[hash_val])):
if self.arr[hash_val][ind] == key:
del self.arr[hash_val][ind]
return
def contains(self, key):
hash_val = hash(key) % self.capacity
if self.arr[hash_val] == None:
return False
else:
for h_key in self.arr[hash_val]:
if h_key == key:
return True
return False
| [
"ravichalla95@gmail.com"
] | ravichalla95@gmail.com |
2242f5feea29a26e6b09e2e4a6f0cd97b75bc380 | a7012b3477ddf1fc03b1b518be1bd9bb4760beb5 | /ABC/ver3/150/C.py | 5eff42cb254634dd8a8071bb455c3e6fb9cb4752 | [] | no_license | kou1127h/atcoder | f01e3e866dd4ad0ba4300b19077c4b107c89ae19 | 743375d4ea2486e512ff32a3c11f4daa8e51d7a4 | refs/heads/master | 2023-08-03T12:22:10.732140 | 2021-09-21T12:38:24 | 2021-09-21T12:38:24 | 378,305,088 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | import itertools
N = int(input())
p = list(map(int, input().split()))
q = list(map(int, input().split()))
index_p = -1
index_q = -1
i = 0
for v in itertools.permutations(range(1, N + 1), N):
i += 1
item = list(v)
# print(item, p, q)
if p == item:
index_p = i
if q == item:
index_q = i
if index_p != -1 and index_q != -1:
break
print(abs(index_p - index_q))
| [
"pg5027ace@gmail.com"
] | pg5027ace@gmail.com |
a9943a583e6f452fe4c46226bcfa68f8e05a8d57 | fec7236128869a511e37386431f2400c5231779c | /app/migrations/0001_initial.py | a0f734026915a8aa36e08d4c214cff07e00ed961 | [] | no_license | atul4113/erpms | d6d7b937f1521d21694fec8476dde49989635da6 | 291df243d0b6ea8c2565b55095c8a713aaa21ffd | refs/heads/master | 2020-04-02T01:48:09.417954 | 2018-10-20T06:20:05 | 2018-10-20T06:20:05 | 153,875,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,611 | py | # Generated by Django 2.0.7 on 2018-08-04 16:22
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Admin',
fields=[
('usr', models.CharField(max_length=20, primary_key=True, serialize=False)),
('email', models.EmailField(default='admin@test.com', max_length=254)),
('pw', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('pw', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='JS',
fields=[
('name', models.CharField(max_length=50)),
('email', models.EmailField(default='test@gmail.com', max_length=254, primary_key=True, serialize=False)),
('mobile', models.IntegerField(default=9876543210)),
('password', models.CharField(max_length=25)),
('education', models.CharField(max_length=60)),
('skils', models.CharField(max_length=60)),
('location', models.CharField(max_length=60)),
('resume', models.FileField(upload_to='')),
('profile', models.ImageField(upload_to='')),
],
),
]
| [
"atuladya.com@gmail.com"
] | atuladya.com@gmail.com |
72c7901ea07b1b8b35d17a2538d804c9b5f5c4e4 | b74e37b0610a2407f260be4918d36d6e47ee4c1c | /0x0F-python-object_relational_mapping/8-model_state_fetch_first.py | c660f95a4eb73c4038877f2e9e60930696bce4d0 | [] | no_license | GodsloveIsuor123/holbertonschool-higher_level_programming | 763a5ae7c17022511e20a6ae74993a967126ba04 | 3125108fe1127e141e2706136bd695f544015400 | refs/heads/master | 2023-03-15T04:43:58.914751 | 2021-02-11T02:05:14 | 2021-02-11T02:05:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | #!/usr/bin/python3
"""
prints the first State object from the database hbtn_0e_6_usa
"""
import sys
from model_state import State
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
if __name__ == '__main__':
user, psswd, db = sys.argv[1], sys.argv[2], sys.argv[3]
engine = create_engine("mysql+mysqldb://{}:{}@localhost/{}"
.format(user, psswd, db), pool_pre_ping=True)
Session = sessionmaker(bind=engine)
session = Session()
state = session.query(State).first()
print('{}: {}'.format(state.id, state.name) if state else 'Nothing')
| [
"aarizatr@gmail.com"
] | aarizatr@gmail.com |
104c011f86d9c6a85c5c02e11010db2569b64e37 | c89b922235b5427f0ab1da6b4a07d595d60090e7 | /consumer.py | a4cefc5b274d2d17f469f5aa11244ea8015c588f | [] | no_license | ksharapani/rabbitMQ-demo | 345d89c4e5d8250a33ede630b91f3530e864d822 | ed26491f311bbd438df283700e9aedddb3c78173 | refs/heads/main | 2023-04-14T07:48:50.712628 | 2021-04-22T12:37:48 | 2021-04-22T12:37:48 | 360,482,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 996 | py | import pika
import sqlite3
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost', 5672, '/',
pika.PlainCredentials("guest", "guest")))
channel = connection.channel()
reverse_number = 0
def recursive_reverse(number):
global reverse_number
if number > 0:
reminder = number % 10
reverse_number = (reverse_number * 10) + reminder
recursive_reverse(number // 10)
return reverse_number
def callback(ch, method, properties, body):
number = body.decode("utf-8")
try:
conn = sqlite3.connect('database.db')
cur = conn.cursor()
cur.execute("UPDATE data SET result_2={} WHERE result_1={};".format(recursive_reverse(int(number)), number))
conn.commit()
except Exception as e:
print(e)
print(body.decode("utf-8"))
channel.basic_consume(queue='test-q', on_message_callback=callback, auto_ack=True)
channel.start_consuming()
| [
"ksharapani963@gmail.com"
] | ksharapani963@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.