index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
985,700 | bfb8de0ec59912dc52095dbb2ba49917eabe9d8e | """This module defines a class for running knoweng's gene prioritization jobs."""
### Summary of paths and docker mounts for AWS (TODO: generalize across hosts and pipelines) ###
# on the host:
#
# /mnt/storage/interface/ <-- mounted to pipeline's docker container
#
# /mnt/storage/interface/networks/ <-- contains network files in tree of subdirectories
#
# /mnt/storage/interface/userfiles/projectid/jobs/jobid/ <-- contains yml file
#
# /mnt/storage/interface/userfiles/projectid/jobs/jobid/results/ <-- contains outputs
#
# /mnt/storage/interface/userfiles/projectid/files/fileid <-- the user's features file
# /mnt/storage/interface/userfiles/projectid/files/file2id <-- the user's response file
#
#
# in the container:
#
# /home/run_dir/ can map to the host's /mnt/storage/interface/
#
#
# calling the script:
#
# -run_directory can be /home/run_dir/userfiles/projectid/jobs/jobid/
# -run_file can be the name of a yml file in that directory
#
#
# in the yml file:
#
# (working directory will be /home/run_dir/userfiles/projectid/jobid/)
# results_directory can be ./results/
# network file paths will look like ../../../networks/...
# spreadsheet files paths will look like ../fileid
import csv
import os
from zipfile import ZipFile, ZIP_DEFLATED
import yaml
from nest_py.knoweng.jobs.chronos_job import ChronosJob
from nest_py.knoweng.jobs.db_utils import create_gp_record, get_file_record
from nest_py.knoweng.jobs.networks import get_merged_network_info
from nest_py.knoweng.jobs.data_cleanup import \
DataCleanupJob, PipelineType, CorrelationMeasure, \
get_cleaned_spreadsheet_relative_path, get_gene_names_map_relative_path, \
get_dict_from_id_to_name
class GenePrioritizationJob(ChronosJob):
"""Subclass of ChronosJob that handles gene prioritization jobs."""
def __init__(self, user_id, job_id, userfiles_dir, job_dir_relative_path,
timeout, cloud, species_id, features_file_relative_path,
gene_names_map_relative_path, response_file_relative_path,
correlation_method, use_network, network_name,
network_influence, num_response_correlated_genes,
use_bootstrapping, num_bootstraps, bootstrap_sample_percent,
prep_job):
"""Initializes self.
Args:
user_id (NestId): The user id associated with the job.
job_id (NestId): The unique identifier Eve/Mongo assigns to the job.
userfiles_dir (str): The base directory containing all files for
all users.
job_dir_relative_path (str): The relative path from userfiles_dir to
the directory containing the job's files, which must already
exist.
timeout (int): The maximum execution time in seconds.
cloud (str): The cloud name, which must appear as a key in
nest_py.knoweng.jobs.ChronosJob.cloud_path_dict.
species_id (str): The species_id to disambiguate networks with
identical edge_type_names.
features_file_relative_path (str): The relative path from the
userfiles_dir to the features file.
gene_names_map_relative_path (str): The relative path from the
userfiles_dir to the gene-name map.
response_file_relative_path (str): The relative path from the
userfiles_dir to the response file.
correlation_method (str): One of ['pearson', 't_test'].
use_network (bool): Whether to use the knowledge network.
network_name (str): The network to use when use_network is True.
network_influence (float): The amount of network influence to use.
num_response_correlated_genes (int): The number of top genes from
the correlation analysis to carry over to the network analysis.
use_bootstrapping (bool): Whether to use bootstrapping.
num_bootstraps (int): The number of bootstraps to run.
bootstrap_sample_percent (float): The percentage of columns to use
per bootstrap.
prep_job (DataCleanupJob): The job that prepares the inputs.
Returns:
None: None.
"""
self.features_file_relative_path = features_file_relative_path
self.gene_names_map_relative_path = gene_names_map_relative_path
self.response_file_relative_path = response_file_relative_path
self.prep_job = prep_job
self.job_dir_path = os.path.join(userfiles_dir, job_dir_relative_path)
self.results_dir_path = os.path.join(self.job_dir_path, 'results')
os.mkdir(self.results_dir_path)
# create yaml file
run_data = {
'correlation_measure': correlation_method,
'spreadsheet_name_full_path': '../../' + features_file_relative_path,
'phenotype_name_full_path': '../../' + response_file_relative_path,
'results_directory': './results',
'drop_method': 'drop_NA'
}
if use_network:
networks = get_merged_network_info('/networks/')
network_info = [net for net in networks if \
net['species_id'] == species_id and \
net['edge_type_name'] == network_name][0]
run_data['gg_network_name_full_path'] = '../../../networks/' + \
network_info['path_to_edge']
self.gg_network_metadata_full_path = '../../../networks/' + \
network_info['path_to_metadata']
run_data['rwr_max_iterations'] = 100
run_data['rwr_convergence_tolerence'] = 1.0e-4
run_data['rwr_restart_probability'] = \
1 - float(network_influence)/100
run_data['top_beta_of_sort'] = num_response_correlated_genes
else:
run_data['top_beta_of_sort'] = 100 # TODO Nahil says we need this
self.gg_network_metadata_full_path = None
if use_bootstrapping:
run_data['number_of_bootstraps'] = num_bootstraps
run_data['rows_sampling_fraction'] = 1.0
run_data['cols_sampling_fraction'] = \
float(bootstrap_sample_percent)/100
if use_network and use_bootstrapping:
run_data['method'] = 'bootstrap_net_correlation'
elif use_network and not use_bootstrapping:
run_data['method'] = 'net_correlation'
elif not use_network and use_bootstrapping:
run_data['method'] = 'bootstrap_correlation'
else:
run_data['method'] = 'correlation'
self.yml_path = os.path.join(self.job_dir_path, 'run.yml')
with open(self.yml_path, 'wb') as outfile:
yaml.safe_dump(run_data, outfile, default_flow_style=False)
job_name = 'nest_GP_' + correlation_method + '_' + job_id.to_slug()
super(GenePrioritizationJob, self).__init__(\
user_id, job_id, userfiles_dir, job_dir_relative_path, \
job_name, timeout, cloud,
'knowengdev/gene_prioritization_pipeline:07_26_2017', 8, 15000)
def get_command(self):
"""Returns the docker command for gene_prioritization."""
return 'date && cd ' + \
os.path.join(ChronosJob.cloud_path_dict[self.cloud], \
'userfiles', self.job_dir_relative_path) + \
' && python3 /home/src/gene_prioritization.py ' + \
' -run_directory ./' + \
' -run_file run.yml' + \
' && date;'
def is_ready(self):
"""Returns true iff preprocessing is done."""
return self.prep_job.is_done()
def is_done(self):
"""Returns true iff all of the files have been created."""
return_val = False
for name in os.listdir(self.results_dir_path):
if name.startswith('top_genes_per_phenotype'):
return_val = True
return return_val
def on_done(self):
"""Processes scores, loads data to database, prepares zip file, and
deletes self from Chronos.
Returns:
None: None.
"""
scores = {}
# TODO KNOW-153
complete_gene_id_to_name = get_dict_from_id_to_name(\
os.path.join(self.userfiles_dir, self.gene_names_map_relative_path))
seen_gene_id_to_name = {}
for name in self.get_response_files():
csvfile_path = os.path.join(self.results_dir_path, name)
with open(csvfile_path, 'rb') as csvfile:
# TODO FIXME read more top scores:
# 1. to support threshold beyond 100
# 2. to display actual values when included by union
count = 0
for row in csv.DictReader(csvfile, delimiter='\t'):
resp = row['Response']
if resp not in scores:
scores[resp] = {}
gene_id = row['Gene_ENSEMBL_ID']
scores[resp][gene_id] = \
float(row['visualization_score'])
seen_gene_id_to_name[gene_id] = \
complete_gene_id_to_name.get(gene_id, gene_id)
count += 1
if count > 100:
break
create_gp_record(self.user_id, self.job_id, scores, 0,\
seen_gene_id_to_name)
self.prepare_zip_file()
self.delete_from_chronos()
def get_response_files(self):
return [f for f in os.listdir(self.results_dir_path) \
if not f.startswith('ranked_genes_per_phenotype') and \
not f.startswith('top_genes_per_phenotype') and \
os.path.isfile(os.path.join(self.results_dir_path, f))]
def prepare_zip_file(self):
"""Creates a zip file on disk for later download by the user.
Args:
None.
Returns:
None.
"""
# need the following:
# 1. readme
# 2. cleaned features file
# 3. gene map
# 4. clean response file
# 5. run.yml
# 6. combined viz scores files
# 7. all top_genes_per_phenotype* files
# 8. network metadata
zip_path = os.path.join(\
self.job_dir_path, 'download.zip')
with ZipFile(zip_path, 'w', ZIP_DEFLATED) as zipout:
zipout.write(\
'/zip_readmes/README-GP.txt', 'README-GP.txt')
cleaned_features_path = os.path.join(\
self.userfiles_dir, self.features_file_relative_path)
zipout.write(\
cleaned_features_path, 'clean_genomic_matrix.txt')
gene_names_map_path = os.path.join(\
self.userfiles_dir, self.gene_names_map_relative_path)
zipout.write(\
gene_names_map_path, 'gene_map.txt')
cleaned_response_path = os.path.join(\
self.userfiles_dir, self.response_file_relative_path)
zipout.write(\
cleaned_response_path, 'clean_phenotypic_matrix.txt')
zipout.write(\
self.yml_path, 'run_params.yml')
# combine viz files
combined_viz_path = os.path.join(self.job_dir_path, \
'combined_viz.tsv')
with open(combined_viz_path, 'w') as combo:
for fidx, fname in enumerate(sorted(self.get_response_files())):
fpath = os.path.join(self.results_dir_path, fname)
with open(fpath, 'r') as vizfile:
for lidx, line in enumerate(vizfile):
if lidx == 0 and fidx > 0:
# only print the column labels once
pass
else:
combo.write(line)
zipout.write(combined_viz_path, 'genes_ranked_per_phenotype.txt')
top_genes_files = [f for f in os.listdir(self.results_dir_path) \
if f.startswith('top_genes_per_phenotype')]
if len(top_genes_files) == 1:
top_genes_file_path = os.path.join(\
self.results_dir_path, top_genes_files[0])
zipout.write(\
top_genes_file_path, 'top_genes_per_phenotype_matrix.txt')
if self.gg_network_metadata_full_path is not None:
zipout.write(self.gg_network_metadata_full_path, \
'interaction_network.metadata')
def get_gene_prioritization_runners(\
user_id, job_id, userfiles_dir, project_dir, timeout, cloud, species_id,\
features_file_id, response_file_id, correlation_method, use_network,\
network_name, network_influence, num_response_correlated_genes,\
use_bootstrapping, num_bootstraps, bootstrap_sample_percent):
"""Returns a list of ChronosJob instances required to run a GP job.
Args:
user_id (NestId): The user id associated with the job.
job_id (NestId): The unique identifier Eve/Mongo assigns to the job.
userfiles_dir (str): The base directory containing all files for
all users.
project_dir (str): The name of the directory containing the files
associated with the current project.
timeout (int): The maximum execution time in seconds.
cloud (str): The cloud name, which must appear as a key in
nest_py.knoweng.jobs.ChronosJob.cloud_path_dict.
species_id (str): The species_id to disambiguate networks with
identical edge_type_names.
features_file_id (NestId): The _id of the features file in the database.
response_file_id (NestId): The _id of the response file in the database.
correlation_method (str): One of ['pearson', 't_test'].
use_network (bool): Whether to use the knowledge network.
network_name (str): The network to use when use_network is True.
network_influence (float): The amount of network influence to use.
num_response_correlated_genes (int): The number of top genes from
the correlation analysis to carry over to the network analysis.
use_bootstrapping (bool): Whether to use bootstrapping.
num_bootstraps (int): The number of bootstraps to run.
bootstrap_sample_percent (float): The percentage of columns to use
per bootstrap.
Returns:
list: A list of ChronosJob instances required to run a GP job.
"""
job_name = "nest_GP_" + correlation_method + '_' + job_id.to_slug()
job_dir_relative_path = os.path.join(project_dir, job_name)
os.mkdir(os.path.join(userfiles_dir, job_dir_relative_path))
features_file_dto = get_file_record(user_id, features_file_id)
cleaned_features_file_relative_path = \
get_cleaned_spreadsheet_relative_path(\
job_dir_relative_path, features_file_dto)
gene_names_map_relative_path = \
get_gene_names_map_relative_path(\
job_dir_relative_path, features_file_dto)
response_file_dto = get_file_record(user_id, response_file_id)
response_file_relative_path = \
get_cleaned_spreadsheet_relative_path(\
job_dir_relative_path, response_file_dto)
dc_method = None
if correlation_method == 't_test':
dc_method = CorrelationMeasure.T_TEST
elif correlation_method == 'pearson':
dc_method = CorrelationMeasure.PEARSON
else:
# TODO error
pass
prep_job = DataCleanupJob(\
user_id, job_id, userfiles_dir, timeout, cloud, species_id,
features_file_dto, response_file_dto, None,
job_dir_relative_path, PipelineType.GENE_PRIORITIZATION, dc_method)
return [
prep_job,
GenePrioritizationJob(\
user_id, job_id, userfiles_dir, job_dir_relative_path, timeout,
cloud, species_id, cleaned_features_file_relative_path,
gene_names_map_relative_path, response_file_relative_path,
correlation_method, use_network, network_name, network_influence,
num_response_correlated_genes, use_bootstrapping, num_bootstraps,
bootstrap_sample_percent, prep_job)
]
|
985,701 | 613684f80b9985fa951a788ad21a8b27492d9eac | #user when you need to use key value pare
# name: uday kumar
# email: uday@gmail.com
# phone: 1234567890
customer ={
"name": "udaykumar",
"age": 30,
"is_verified": True
}
customer["name"]="Kranthi Kumar"
customer["birthdate"] = "jan 5th 2020"
print(customer["name"])
print(customer["age"])
print(customer.get("name"))
print(customer.get("age"))
print(customer.get("birthdate"))
#ex
phone = input("Phone")
digits_mapping = {
"1":"one",
"2": "two",
"3":"three",
"4:": "four"
}
output =""
for ch in phone:
output += digits_mapping.get(ch,"!") + " "
print(output) |
985,702 | 704d1cbbad802373372b6c321fcaae44215162c2 | from itertools import zip_longest
CLIPPY = r"""/ \
| |
@ @
|| || <--
|| ||
|\_/|
\___/ """
BLANK = '|' + ' '*70 + '|'
def clippify(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
if func.__doc__:
doc_lines = [f'|{line:70s}|' for line in func.__doc__.split('\n')]
else:
doc_lines = ['| Nothing to see here|']
doc_lines = ['-'*70 + '\\', BLANK, f'| It looks like you are trying to find help on {func.__name__:24s}|', BLANK] + doc_lines
doc_lines = doc_lines + [BLANK, '-'*70 + '/']
doc_string_lines = []
for clip_line, doc_line in zip_longest(CLIPPY.split('\n'), doc_lines):
if not clip_line:
clip_line = ' ' * 10
if not doc_line:
doc_line = ' ' * 72
doc_string_lines.append(f'{clip_line:10s} {doc_line:72s}')
wrapper.__doc__ = '\n'.join(doc_string_lines)
return wrapper
|
985,703 | d9522aaca54295d33d5f43b4a2e92011acbf03b3 | class Solution:
def balancedStringSplit(self, s: str) -> int:
balance = 0
ans = 0
for char in s:
balance += 1 if char == 'L' else -1
if not balance: ans += 1
return ans
|
985,704 | e5c220724fe339111c1827aaefe8d890ec859e13 | import pyqrcode #pip install pyqrcode
#pip install pypng
import sys
print("--- Command Line:", sys.argv)
if len(sys.argv) < 2:
# expect 2 arguments
print(f"Usage: {sys.argv[0]} my-msg")
exit(1)
msg = sys.argv[1]
qrcode = pyqrcode.create(msg)
qrcode.png("QRmsg.png", scale=8)
print("--- QR Code is created in QRmsg.png")
|
985,705 | cd576621d18ce0d252ef848010421a749767e070 | def nthUglyno(n):
ugl=[0]*(n)
ugl[0]=1
i2=i3=i5=0
next_multile_of_2=ugl[i2]*2
next_multile_of_3=ugl[i3]*3
next_multile_of_5=ugl[i5]*5
for i in range(1,n):
next_ugl_no= min(next_multile_of_2,next_multile_of_3,next_multile_of_5)
ugl[i]=next_ugl_no
if(next_ugl_no==next_multile_of_2):
i2+=1
next_multile_of_2=ugl[i2]*2
if (next_ugl_no == next_multile_of_3):
i3 += 1
next_multile_of_3 = ugl[i3] * 3
if (next_ugl_no == next_multile_of_5):
i5+=1
next_multile_of_5 =ugl[i5]*5
return ugl[-1]
n=int(input())
print(nthUglyno(n)) |
985,706 | 771857248a65452b3eb1830e06f6b4ede1a8ed42 | import math
from primecheck import IsPrime
def QuadPrimeFormula(n, a, b):
res = n**2 + a*n + b
return res
def QuadPrimeSeqLenght(n, a, b):
seq_len = 0
try:
while IsPrime(QuadPrimeFormula(n, a, b)):
seq_len += 1
n += 1
except ValueError:
seq_len = 0
return seq_len
## Test ##
max_len = 0
max_a = 0
max_b = 0
for b in range(-1000, 1001, 1):
for a in range(-999, 1000, 1):
temp_len = QuadPrimeSeqLenght(0, a, b)
if temp_len > max_len:
max_len = temp_len
max_a, max_b = a, b
else:
continue
print(f"{max_a*max_b}")
|
985,707 | 06dfc56aa2bf6047bb7a193ae3545bcc319abca3 | from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import (
BaseUserManager, AbstractUser
)
from django.utils import six, timezone
class EmployeeManager(BaseUserManager):
def _create_user(self, username, email, password,
is_staff, is_superuser, **extra_fields):
"""
Creates and saves a User with the given username, email and password.
"""
now = timezone.now()
if not username:
raise ValueError('The given username must be set')
email = self.normalize_email(email)
user = self.model(username=username, email=email,
is_staff=is_staff, is_active=True,
is_superuser=is_superuser,
date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, username, email=None, password=None, **extra_fields):
return self._create_user(username, email, password, False, False,
**extra_fields)
def create_superuser(self, username, email, password, **extra_fields):
return self._create_user(username, email, password, True, True,
**extra_fields)
class Department(models.Model):
name = models.CharField(_("Name"), max_length=128, unique=True)
desc = models.TextField(_("Description"), blank=True)
def __str__(self):
return self.name
class Employee(AbstractUser):
employee_num = models.CharField(_("Employee No"), max_length=32, unique=True)
departments = models.ManyToManyField(Department)
telephone_num = models.CharField(_("Telephone Number"), max_length=32)
leave_data = models.DateField(_("Leave Data"), blank=True, null=True)
desc = models.TextField(_("Description"), blank=True)
objects = EmployeeManager()
class Meta:
verbose_name = _('employee')
verbose_name_plural = _('employees')
# USERNAME_FIELD = 'email'
# REQUIRED_FIELDS = ['date_of_birth']
# def get_full_name(self):
# # The user is identified by their email address
# return self.email
# def get_short_name(self):
# # The user is identified by their email address
# return self.email
# def __str__(self): # __unicode__ on Python 2
# return self.email
# def has_perm(self, perm, obj=None):
# "Does the user have a specific permission?"
# # Simplest possible answer: Yes, always
# return True
# def has_module_perms(self, app_label):
# "Does the user have permissions to view the app `app_label`?"
# # Simplest possible answer: Yes, always
# return True
# @property
# def is_staff(self):
# "Is the user a member of staff?"
# # Simplest possible answer: All admins are staff
# return self.is_admin |
985,708 | de79f5b554b54cbf74edd3768956df838bd3c88a | import numpy as np
import random
from utils import *
np.random.seed(1234567)
random.seed(1234567)
Shuffle = False
points = generate_data(100)
# points = generate_cluster_data(20, maxn=5, std=3, domain=(0, 100))
n = len(points)
print("We have gotten %i sites to travel" % n)
# those in the same cluster may be close to each other in the indices
# because the order of processing
# To check the initialization's effect
# you can change Shuffle's values
if Shuffle:
order = np.arange(n)
np.random.shuffle(order)
points = points[order]
distances = get_distances(points)
# mean, std = get_mean_std(distances)
plot_data(points)
# Method 1 randomly swapping
# a rough loop
initial_path = np.arange(n)
path = initial_path
plot_path(points ,path, show=False)
# path = soft_random_select_swap(n, distances, initial_path)
path = soft_random_select_swap(n, distances, initial_path, max_iters=int(1e5))
print(path)
res = get_path_length(distances, path)
print("the final result is %.3f" % res)
plot_path(points, path, show=True)
|
985,709 | 0f4450813c992f2e46a76438a3fc6c4ea2f0f44d | ## Ejercicios
#Ejercicios 1 :
'''
Ejercicio 1
Escribir un programa que pregunte al usuario su nombre, y luego lo salude.
'''
Nombre = input("Ingrese SU nombre : ")
SaludoCompleto = "Hola Buenas Noches, {}.".format(Nombre)
print(SaludoCompleto)
'''
Ejercicio 2
Calcular el perímetro y área de un rectángulo dada su base y su altura.
'''
Lado1 = int(input("Ingrese el primer Lado :"))
Lado2 = int(input("Ingrese el Segundo Lado :"))
Area = Lado1 * Lado2
Perimetro = 2* (Lado1 + Lado2)
print("El rectangulo tiene los siguientes lados, Lado 1 es igual a {}, el segundo lado es igual a {}, el área es igual a {} y el perimetro es {}.".format(Lado1, Lado2, Area, Perimetro))
'''
Ejercicio 3
Dados los catetos de un triángulo rectángulo, calcular su hipotenusa.
'''
LadoTriangulo1 = int(input("Ingrese primer cateto : "))
LadoTriangulo2 = int(input("Ingrese segundo cateto : "))
if LadoTriangulo1 == 0 or LadoTriangulo2 == 0:
Hipotenusa = "Uno de los catetos es cero y no se puede calcular la Hipotenusa."
print(Hipotenusa)
else :
Hipotenuesa = pow( pow(LadoTriangulo1,2) + pow(LadoTriangulo2,2), 0.5)
print("La hipotenusa de los lados es el siguiente : {}".format(Hipotenuesa))
'''
Ejercicio 4
Dados dos números, mostrar la suma, resta, división y multiplicación de ambos.
'''
Numero1 = int(input("Ingresar el Primer numero : "))
Numero2 = int(input("Ingresar el Segundo numero : "))
Suma = Numero1 + Numero2
Resta = Numero1 - Numero2
Multiplicacion = Numero1 * Numero2
if(Numero2 == 0):
Division = "No se puede realizar la Division entre Cero. "
else:
Division = Numero1 / Numero2
print("La Suma es : {}".format(Suma))
print("La Resta es : {}".format(Resta))
print("La Multiplicacion es : {}".format(Multiplicacion))
print("La Division es : {}".format(Division))
'''
Ejercicio 5
Escribir un programa que convierta un valor dado en grados Fahrenheit a grados Celsius.
Recordar que la fórmula para la conversión es:
1. C = (F-32)*5/9
'''
ValorFah = int(input("Ingresar el valor en Fahrenheit : "))
ValorCel = ( ( ValorFah - 32 ) * 5 ) / 9
print("El valor fahre es {} y su conversión en celsius es : {} .".format(ValorFah, ValorCel))
'''
Ejercicio 6
Calcular la media de tres números pedidos por teclado.
'''
'''
Ejercicio 7
Realiza un programa que reciba una cantidad de minutos y muestre por pantalla a cuantas horas y minutos corresponde.
Por ejemplo: 1000 minutos son 16 horas y 40 minutos.
'''
'''
Ejercicio 8
Un vendedor recibe un sueldo base mas un 10% extra por comisión de sus ventas, el vendedor desea saber cuanto dinero obtendrá por concepto de comisiones por las tres ventas que realiza en el mes y el total que recibirá en el mes tomando en cuenta su sueldo base y comisiones.
'''
'''
Ejercicio 9
Una tienda ofrece un descuento del 15% sobre el total de la compra y un cliente desea saber cuanto deberá pagar finalmente por su compra.
'''
'''
Ejercicio 10
Un alumno desea saber cual será su calificación final en la materia de Algoritmos. Dicha calificación se compone de los siguientes porcentajes:
• 55% del promedio de sus tres calificaciones parciales.
• 30% de la calificación del examen final.
• 15% de la calificación de un trabajo final.
'''
|
985,710 | ae9eb34563bfb5abac9c2528fd56c7a80dddabf9 |
#with open("newdatafile.txt", "r") as f:
# content = f.read()
#print(content)
#with open("newdatafile.txt", "w") as f:
# f.write("Qausain is written by coding")
#
import strip_function as f
print(f.greeting())
|
985,711 | a4b38310457f60d50bd90514bacf63ea67e9481e | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-03 14:53
from __future__ import unicode_literals
from django.db import migrations, models
import grid.models
class Migration(migrations.Migration):
dependencies = [
('grid', '0020_auto_20160703_0952'),
]
operations = [
migrations.AlterField(
model_name='article',
name='image',
field=models.ImageField(blank=True, null=True, upload_to=grid.models.get_image_path),
),
]
|
985,712 | d64c612593d63a1827eebc7b3e31c27b180f0f80 | from .common import (
OnExecutionDirective,
OnBuildDirective,
OnIntrospectionDirective,
CommonDirective,
)
from .deprecated import Deprecated
from .directive import Directive
from .non_introspectable import NonIntrospectable
from .skip import Skip
from .include import Include
BUILT_IN_DIRECTIVES = {
"deprecated": Deprecated,
"non_introspectable": NonIntrospectable,
"skip": Skip,
"include": Include,
}
|
985,713 | 05c0877bce2ccb8bfb468b016c668a92161cf7e1 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import db
class YoukuMovieCategoryPipeline(object):
def open_spider(self, spider):
# self.file = open('ten.txt', 'w', encoding="utf-8")
# print("-----open------")
self.connect = db.mysqlConnect
# 通过cursor执行增删查改
self.cursor = self.connect.cursor()
def process_item(self, item, spider):
# line = "{}\n".format(json.dumps(dict(item)))
# self.file.write(line)
# return item
sql = 'insert into youku_movie_category (name, url, type, key_val, label) value (%s, %s, %s, %s, %s )'
self.cursor.execute(sql, # 纯属python操作mysql知识,不熟悉请恶补
(item['name'], # item里面定义的字段和表字段对应
item['url'],
item['key'],
item['key_val'],
item['label'],
))
# 提交sql语句
self.connect.commit()
return item # 必须实现返回
def close_spider(self, spider):
# self.file.close()
# print("--------close---------")
self.cursor.close()
# 关闭数据库连接
self.connect.close()
|
985,714 | 6d3944a8fcf9802b7c82f32b973f060f2693e727 | import asyncio
import json
from sys import stderr
from gtts import gTTS
from pathlib import Path
import httpx
URL = "https://v2.jokeapi.dev/joke/Any?type=single"
async def get_call() -> dict:
deserialized_json = {}
try:
async with httpx.AsyncClient() as client:
response = await client.get(URL)
assert response.status_code == 200
deserialized_json = json.loads(response.text)
except AssertionError:
print("Error calling {}. HTTP status code: {}".format(URL, response.status_code), file=stderr)
exit(1)
except json.decoder.JSONDecodeError as e:
print("JSON decode error: {}".format(e), file=stderr)
exit(1)
return deserialized_json
def joke_to_mp3(joke_str, joke_id) -> None:
path_file = "mp3s/joke-{}.mp3".format(joke_id)
mp3_file_path = Path(path_file)
if mp3_file_path.is_file():
print("This joke ID already exists")
return
try:
tts = gTTS(joke_str, lang='en')
with open(path_file, "wb") as mp3:
tts.write_to_fp(mp3)
except Exception as e:
print(e, file=stderr)
exit(1)
async def main() -> None:
joke = await get_call()
joke_str = joke["joke"]
joke_id = joke["id"]
joke_to_mp3(joke_str, joke_id)
if __name__ == "__main__":
asyncio.run(main())
|
985,715 | 8465c96a959c36e01924d0efcab39b26e3471c03 | # How to change tcp keepalive timer using python script?
socket.setsockopt
|
985,716 | 8e5c629de3e6b01293a7aa620feddd7f0b06eeef | import time
from selenium import webdriver
# Chromedriver path
ie_driver_path = 'C:\z-BrowserDrivers\IEDriverServer.exe'
# Create a new IE session
driver = webdriver.Ie(ie_driver_path)
driver.implicitly_wait(30)
driver.maximize_window()
# Navigate to the application home page
driver.get("http://www.google.com/")
time.sleep(10)
# Close browser
driver.close() |
985,717 | fa2dc6bc7fbcb81307aaca0af8dc30f3e4b64821 | from django.urls import path
from . import views
producto_patterns = ([
path('producto/gestionar', views.ProductManage.as_view(), name='manage'),
path('producto/create/', views.ProductCreate.as_view(), name='create'),
path('producto/update/<int:pk>/', views.ProductUpdate.as_view(), name='update'),
path('producto/delete/<int:pk>/', views.ProductoDelete.as_view(), name='delete'),
path('categoria/gestionar', views.CategoriaList.as_view(), name='categoria_index'),
path('categoria/create/', views.CategoriaCreate.as_view(), name='categoria_create'),
path('categoria/update/<int:pk>/', views.CategoriaUpdate.as_view(), name='categoria_update'),
path('categoria/delete/<int:pk>/', views.CategoriaDelete.as_view(), name='categoria_delete'),
], 'producto')
|
985,718 | 3fd80393790dedc789e540ad89665395139f0fa5 | import re
# Precompile the patterns
regexes = [ re.compile(p) for p in ['this', 'that'] ]
text = 'Does this text match the pattern?'
print('Text: %r\n' % text)
for regex in regexes:
print('Seeking "%s" ->' % regex.pattern)
if regex.search(text):
print('match!')
else:
print('no match')
# findall function returns all substrings of the iput that match the pattern
# without overlapping
text = 'abbaaabbbbaaaa'
pattern = 'ab'
for match in re.findall(pattern, text):
print('Found "%s"' % match)
# finditer returns an iterator that produces Match instanaces instead of the
# strings returned by findall()
for match in re.finditer(pattern, text):
s = match.start()
e = match.end()
print('Found "%s" at %d:%d' % (text[s:e], s, e))
|
985,719 | c8dec279ee447d8632c506ae28318c724979e8ea | from datetime import datetime
import pytest
import time
from app import db, create_app
from app.controllers.create_db_data import create_db_data
from .utils import login
from app.models import Schedule
@pytest.fixture
def client():
app = create_app(environment="testing")
app.config["TESTING"] = True
with app.test_client() as client:
app_ctx = app.app_context()
app_ctx.push()
db.drop_all()
db.create_all()
create_db_data()
yield client
db.session.remove()
db.drop_all()
app_ctx.pop()
def call_client_func():
time.sleep(10)
def test_launch_post(client):
login(client)
DELTA_HOURS = 5
now = datetime.now()
frontend_date = datetime(now.year, now.month, now.day, now.hour, now.minute)
data = dict(
launch_time=now.strftime("%H:%M"),
value1=56,
value2=56,
value3=56,
value4=56,
value5=56,
value6=56,
value7=56,
value8=56,
value9=56,
value10=56,
checkbox1=False,
checkbox2=False,
checkbox3=False,
checkbox4=False,
checkbox5=False,
checkbox6=False,
checkbox7=False,
checkbox8=False,
checkbox9=False,
checkbox10=False,
submit_manual=True,
)
response = client.post(
"/",
data=data,
)
assert response.status_code == 200
s: Schedule = Schedule.query.first()
assert s
assert s.is_run_now
del data["submit_manual"]
data["submit"] = True
response = client.post(
"/",
data=data,
)
s: Schedule = Schedule.query.first()
assert s
|
985,720 | ebbcb0b09b0835956131cbf4000c5355e5bc21c3 | import os
import shutil
from baseline.utils import unzip_model, read_config_file, write_json, str_file
import json
import yaml
from xpclient import Configuration, ApiClient
from xpclient.api import XpctlApi
from xpclient.models import Result, Experiment
def read_logs(file_name):
logs = []
with open(file_name) as f:
for line in f:
logs.append(json.loads(line))
return logs
def convert_to_result(event):
results = []
non_metrics = ['tick_type', 'tick', 'phase']
metrics = event.keys() - non_metrics
for metric in metrics:
results.append(Result(
metric=metric,
value=event[metric],
tick_type=event['tick_type'],
tick=event['tick'],
phase=event['phase']
)
)
return results
def flatten(_list):
return [item for sublist in _list for item in sublist]
def to_experiment(task, config, log, **kwargs):
if type(log) is not str: # this is a log object and not a file
events_obj = log
else:
events_obj = read_logs(log)
train_events = flatten(
[convert_to_result(event) for event in list(filter(lambda x: x['phase'] == 'Train', events_obj))]
)
valid_events = flatten(
[convert_to_result(event) for event in list(filter(lambda x: x['phase'] == 'Valid', events_obj))]
)
test_events = flatten(
[convert_to_result(event) for event in list(filter(lambda x: x['phase'] == 'Test', events_obj))]
)
if type(config) is not str: # this is a config object and not a file
config = json.dumps(config)
else:
config = json.dumps(read_config_file(config))
d = kwargs
d.update({'task': task,
'config': config,
'train_events': train_events,
'valid_events': valid_events,
'test_events': test_events
})
return Experiment(**d)
def store_model(checkpoint_base, config_sha1, checkpoint_store, print_fn=print):
checkpoint_base = unzip_model(checkpoint_base)
mdir, mbase = os.path.split(checkpoint_base)
mdir = mdir if mdir else "."
if not os.path.exists(mdir):
print_fn("no directory found for the model location: [{}], aborting command".format(mdir))
return None
mfiles = ["{}/{}".format(mdir, x) for x in os.listdir(mdir) if x.startswith(mbase + "-") or
x.startswith(mbase + ".")]
if not mfiles:
print_fn("no model files found with base [{}] at location [{}], aborting command".format(mbase, mdir))
return None
model_loc_base = "{}/{}".format(checkpoint_store, config_sha1)
if not os.path.exists(model_loc_base):
os.makedirs(model_loc_base)
dirs = [int(x[:-4]) for x in os.listdir(model_loc_base) if x.endswith(".zip") and x[:-4].isdigit()]
# we expect dirs in numbers.
new_dir = "1" if not dirs else str(max(dirs) + 1)
model_loc = "{}/{}".format(model_loc_base, new_dir)
os.makedirs(model_loc)
for mfile in mfiles:
shutil.copy(mfile, model_loc)
print_fn("writing model file: [{}] to store: [{}]".format(mfile, model_loc))
print_fn("zipping model files")
shutil.make_archive(base_name=model_loc,
format='zip',
root_dir=model_loc_base,
base_dir=new_dir)
shutil.rmtree(model_loc)
print_fn("model files zipped and written")
return model_loc + ".zip"
def write_config_file(content, filepath):
"""Write a config file. This method optionally supports YAML, if the dependency was already installed. O.W. JSON plz
:param content: config object
:param filepath: (``str``) A path to a file which should be a JSON file, or YAML if pyyaml is installed
:return:
"""
if filepath.endswith('.yml') or filepath.endswith('.yaml'):
return write_yaml(content, filepath)
return write_json(content, filepath)
@str_file(filepath="w")
def write_yaml(content, filepath):
yaml.dump(content, filepath, default_flow_style=False)
def xpctl_client(host):
config = Configuration(host)
api_client = ApiClient(config)
return XpctlApi(api_client)
|
985,721 | 20ac86c620efa556f1c8e0902988fa3022d65158 | import os
import csv
"""Parse a csv file and create list of Car-objects"""
#Base class for all cars
class CarBase:
def __init__(self, car_type, photo_file_name, brand, carrying):
self.car_type = car_type
self.photo_file_name = photo_file_name
self.brand = brand
self.carrying = carrying
def get_photo_file_ext(self):
return os.path.splitext(self.photo_file_name)
#Passenger car class
class Car(CarBase):
def __init__(self, car_type, photo_file_name, brand, carrying, passenger_seats_count):
super().__init__(car_type, photo_file_name, brand, carrying)
self.passenger_seats_count = passenger_seats_count
#Truck car class
class Truck(CarBase):
def __init__(self, car_type, photo_file_name, brand, carrying, body_whl):
super().__init__(car_type, photo_file_name, brand, carrying)
list_whl = self._parser_whl(body_whl)
self.body_width = list_whl[0]
self.body_height = list_whl[1]
self.body_length = list_whl[2]
def _parser_whl(self, body_whl):
list_whl = []
zero_whl = [0, 0, 0]
tmp_list = body_whl.split('x')
if len(tmp_list) != 3:
return zero_whl
try:
for item in tmp_list:
f_item = float(item)
if f_item < 0:
return zero_whl
list_whl.append(float(item))
except ValueError:
return zero_whl
return list_whl
def get_body_volume(self):
return self.body_width * self.body_height * self.body_length
#Special car class
class SpecMachine(CarBase):
def __init__(self, car_type, photo_file_name, brand, carrying, extra):
super().__init__(car_type, photo_file_name, brand, carrying)
self.extra = extra
#getter cars list
def get_car_list(csv_filename):
car_list = []
with open(csv_filename) as csv_fd:
reader = csv.reader(csv_fd, delimiter=';')
list_title = next(reader) # skipping the title
for row in reader:
car_attr = {}
for i in range(len(list_title)):
car_attr[list_title[i]] = row[i]
print(car_attr)
print('{}'.format())
#if not isinstance(body_whl, str):
#car_list.append(row)
#print(row)
return car_list |
985,722 | eddb17c5bc900573fcbd18090dabf88eca993d08 | import scrapy
NEW_POSITIONS ="http://www.1point3acres.com/bbs/forum.php?mod=forumdisplay&fid=198&typeid=653&orderby=dateline&sortid=192&typeid=653&orderby=dateline&sortid=192&filter=typeid"
INTERVIEW_REPORTS="http://www.1point3acres.com/bbs/forum.php?mod=forumdisplay&fid=145&orderby=dateline&sortid=311&orderby=dateline&sortid=311&filter=author&page=1"
SCHOOL_APPLYS="http://www.1point3acres.com/bbs/forum-27-1.html"
TOFEL_EXAM="http://www.1point3acres.com/bbs/forum.php?mod=forumdisplay&fid=125&filter=typeid&typeid=472&sortid=313"
PAGE_1 = TOFEL_EXAM
# response.css("span[title^='共']::text").extract_first()
# ' / 28 页'
# response.css("em[id^='authorposton']::text").extract_first()
# rsp.css("a[class='s xst']").extract()
# detail page url
# rsp.css("a[class='s xst']::attr(href)").extract_first()
# get url
class PostTime(scrapy.Spider):
name = 'post-time'
def start_requests(self):
yield scrapy.Request(url = PAGE_1, callback=self.parse_total_page)
def parse_total_page(self, rsp):
total_page_text = rsp.css("span[title^='共']::text").extract_first()
if(total_page_text):
total_page = int(total_page_text[3:-1])
for p in range(1, total_page + 1):
next_url = PAGE_1 + '&page=' + str(p)
#next_url = PAGE_1[:-6] + str(p) + ".html"
#print('**************')
#print(next_url)
yield scrapy.Request(url = next_url, callback=self.parse_list)
def parse_list(self, rsp):
post_urls = rsp.css("a[class='s xst']::attr(href)").extract()
for u in post_urls:
yield scrapy.Request(url = u, callback = self.parse_date)
def parse_date(self, rsp):
post_on = rsp.css("em[id^='authorposton']::text").extract_first()[3:]
date, time= post_on.split()
yield {'date': date, 'time':time}
|
985,723 | a0c9d88e863589d89b1d29164f4d9e4fae4d5690 | from bagatelle.ngs import openBam
def mhsmidcount(bamfile, chromosome, start, end, maxinsert=80, mininsert=1, paired=False):
"""
:param bamfile:
:param chromosome:
:param start:
:param end:
:param paired:
:return: dictionary of mhs middle site count
"""
samfile = openBam.openBam(bamfile)
readscount = dict()
if paired:
for aligened_read in samfile.fetch(reference=str(chromosome), start=start, end=end):
if aligened_read.is_proper_pair:
if not aligened_read.is_reverse:
if mininsert <= aligened_read.isize <= maxinsert:
pair_start = aligened_read.pos
if aligened_read.isize % 2 == 0:
middle1 = pair_start + aligened_read.isize / 2
middle2 = pair_start + aligened_read.isize / 2 - 1
else:
middle1 = pair_start + int(aligened_read.isize / 2)
middle2 = pair_start + int(aligened_read.isize / 2)
middleint1 = int(middle1)
middleint2 = int(middle2)
if start <= middleint1 <= end:
if middleint1 in readscount:
readscount[middleint1] = readscount[middleint1] + 1
else:
readscount[middleint1] = 1
if start <= middleint2 <= end:
if middleint2 in readscount:
readscount[middleint2] = readscount[middleint2] + 1
else:
readscount[middleint2] = 1
else:
for alignend_read in samfile.fetch(reference=str(chromosome), start=start, end=end):
if mininsert <= alignend_read.alen <= maxinsert:
if alignend_read.alen % 2 == 0:
middle1 = alignend_read.pos + alignend_read.alen / 2
middle2 = alignend_read.pos + alignend_read.alen / 2 + 1
else:
middle1 = alignend_read.pos + int(alignend_read.alen / 2)
middle2 = alignend_read.pos + int(alignend_read.alen / 2)
middleint1 = int(middle1)
middleint2 = int(middle2)
if (start <= middleint1 <= end):
if middleint1 in readscount:
readscount[middleint1] = readscount[middleint1] + 1
else:
readscount[middleint1] = 1
if (start <= middleint2 <= end):
if middleint2 in readscount:
readscount[middleint2] = readscount[middleint2] + 1
else:
readscount[middleint2] = 1
return readscount
def coveragecount(bamfile, chromosome, start, end, maxinsert=80, mininsert=1, paired=False):
"""
:param bamfile:
:param chromosome:
:param start:
:param end:
:param paired:
:return: dictionary of mhs middle site count
"""
samfile = openBam.openBam(bamfile)
readscount = dict()
if paired:
for aligened_read in samfile.fetch(reference=str(chromosome), start=start, end=end):
if aligened_read.is_proper_pair:
if not aligened_read.is_reverse:
if mininsert <= aligened_read.isize <= maxinsert:
pair_start = aligened_read.pos
for site in range(pair_start, pair_start + aligened_read.isize):
if site in readscount:
readscount[site] += 1
else:
readscount[site] = 1
else:
for alignend_read in samfile.fetch(reference=str(chromosome), start=start, end=end):
if mininsert <= alignend_read.alen <= maxinsert:
for site in range(alignend_read.pos, alignend_read.pos + alignend_read.alen):
if site in readscount:
readscount[site] = readscount[site] + 1
else:
readscount[site] = 1
return readscount
|
985,724 | a35bae92a245e5b30fab6333fe1358209da74c59 | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for general.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import torch
from robust_loss_pytorch import general
class TestGeneral(parameterized.TestCase):
def setUp(self):
super(TestGeneral, self).setUp()
np.random.seed(0)
def _assert_all_close_according_to_type(self, a, b):
"""AssertAllClose() with tighter thresholds for float64 than float32."""
if a.dtype == np.float32:
np.testing.assert_allclose(a, b, rtol=1e-6, atol=1e-6)
elif a.dtype == np.float64:
np.testing.assert_allclose(a, b, rtol=1e-15, atol=1e-15)
else:
assert False
def _precompute_lossfun_inputs(self, float_dtype, device):
"""Precompute a loss and its derivatives for random inputs and parameters.
Generates a large number of random inputs to the loss, and random
shape/scale parameters for the loss function at each sample, and
computes the loss and its derivative with respect to all inputs and
parameters, returning everything to be used to assert various properties
in our unit tests.
Args:
float_dtype: The float precision to be used (np.float32 or np.float64).
device: The device to run on.
Returns:
A tuple containing:
(the number (int) of samples, and the length of all following arrays,
A np.array (float_dtype) of losses for each sample,
A np.array (float_dtype) of residuals of each sample (the loss inputs),
A np array (float_dtype) of shape parameters of each loss,
A np.array (float_dtype) of scale parameters of each loss,
A np.array (float_dtype) of derivatives of each loss wrt each x,
A np.array (float_dtype) of derivatives of each loss wrt each alpha,
A np.array (float_dtype) of derivatives of each loss wrt each scale)
Typical usage example:
(num_samples, loss, x, alpha, scale, d_x, d_alpha, d_scale)
= self._precompute_lossfun_inputs(np.float32, 'cpu')
"""
num_samples = 100000
# Normally distributed inputs.
x = float_dtype(np.random.normal(size=num_samples))
# Uniformly distributed values in (-16, 3), quantized to the nearest 0.1
# to ensure that we hit the special cases at 0, 2.
alpha = float_dtype(
np.round(np.random.uniform(-16, 3, num_samples) * 10) / 10.0
)
# Push the sampled alphas at the extents of the range to +/- infinity, so
# that we probe those cases too.
alpha[alpha == 3.0] = float_dtype(float("inf"))
alpha[alpha == -16.0] = -float_dtype(float("inf"))
# Random log-normally distributed values in approx (1e-5, 100000):
scale = float_dtype(np.exp(np.random.normal(size=num_samples) * 4.0) + 1e-5)
# Compute the loss and its derivative with respect to all three inputs.
var_x = torch.autograd.Variable(
torch.tensor(x, device=device), requires_grad=True
)
var_alpha = torch.autograd.Variable(
torch.tensor(alpha, device=device), requires_grad=True
)
var_scale = torch.autograd.Variable(
torch.tensor(scale, device=device), requires_grad=True
)
loss = general.lossfun(var_x, var_alpha, var_scale)
sum_loss = torch.sum(loss)
sum_loss.backward()
d_x = var_x.grad.cpu().detach().numpy()
d_alpha = var_alpha.grad.cpu().detach().numpy()
d_scale = var_scale.grad.cpu().detach().numpy()
loss = loss.cpu().detach().numpy()
return (num_samples, loss, x, alpha, scale, d_x, d_alpha, d_scale)
@parameterized.named_parameters(
("SingleCPU", np.float32, "cpu"),
("DoubleCPU", np.float64, "cpu"),
("SingleGPU", np.float32, "cuda"),
("DoubleGPU", np.float64, "cuda"),
)
def testLossfunPreservesDtype(self, float_dtype, device):
"""Check the loss's output has the same precision as its input."""
n = 16
x = torch.tensor(float_dtype(np.random.normal(size=n)), device=device)
alpha = torch.tensor(float_dtype(np.random.normal(size=n)), device=device)
scale = torch.tensor(
float_dtype(np.exp(np.random.normal(size=n))), device=device
)
y = general.lossfun(x, alpha, scale)
np.testing.assert_equal(y.cpu().detach().numpy().dtype, float_dtype)
@parameterized.named_parameters(
("SingleCPU", np.float32, "cpu"),
("DoubleCPU", np.float64, "cpu"),
("SingleGPU", np.float32, "cuda"),
("DoubleGPU", np.float64, "cuda"),
)
def testLossfunPreservesDevice(self, float_dtype, device):
"""Check the loss's output has the same precision as its input."""
n = 16
x = torch.tensor(float_dtype(np.random.normal(size=n)), device=device)
alpha = torch.tensor(float_dtype(np.random.normal(size=n)), device=device)
scale = torch.tensor(
float_dtype(np.exp(np.random.normal(size=n))), device=device
)
y = general.lossfun(x, alpha, scale)
np.testing.assert_equal(y.device.type, device)
@parameterized.named_parameters(
("SingleCPU", np.float32, "cpu"),
("DoubleCPU", np.float64, "cpu"),
("SingleGPU", np.float32, "cuda"),
("DoubleGPU", np.float64, "cuda"),
)
def testDerivativeIsMonotonicWrtX(self, float_dtype, device):
# Check that the loss increases monotonically with |x|.
_, _, x, alpha, _, d_x, _, _ = self._precompute_lossfun_inputs(
float_dtype, device
)
d_x[~np.isfinite(d_x)] = 0 # This is just to suppress a warning below.
mask = np.isfinite(alpha) & (np.abs(d_x) > (100.0 * np.finfo(float_dtype).eps))
np.testing.assert_equal(np.sign(d_x[mask]), np.sign(x[mask]))
@parameterized.named_parameters(
("SingleCPU", np.float32, "cpu"),
("DoubleCPU", np.float64, "cpu"),
("SingleGPU", np.float32, "cuda"),
("DoubleGPU", np.float64, "cuda"),
)
def testLossIsNearZeroAtOrigin(self, float_dtype, device):
# Check that the loss is near-zero when x is near-zero.
_, loss, x, _, _, _, _, _ = self._precompute_lossfun_inputs(float_dtype, device)
np.testing.assert_(np.all(np.abs(loss[np.abs(x) < 1e-5]) < 1e-5))
@parameterized.named_parameters(
("SingleCPU", np.float32, "cpu"),
("DoubleCPU", np.float64, "cpu"),
("SingleGPU", np.float32, "cuda"),
("DoubleGPU", np.float64, "cuda"),
)
def testLossIsQuadraticNearOrigin(self, float_dtype, device):
# Check that the loss is well-approximated by a quadratic when |x| < scale
_, loss, x, _, scale, _, _, _ = self._precompute_lossfun_inputs(
float_dtype, device
)
mask = np.abs(x) < (0.5 * scale)
loss_quad = 0.5 * np.square(x / scale)
np.testing.assert_allclose(loss_quad[mask], loss[mask], rtol=1e-5, atol=1e-2)
@parameterized.named_parameters(
("SingleCPU", np.float32, "cpu"),
("DoubleCPU", np.float64, "cpu"),
("SingleGPU", np.float32, "cuda"),
("DoubleGPU", np.float64, "cuda"),
)
def testLossIsBoundedWhenAlphaIsNegative(self, float_dtype, device):
# Assert that loss < (alpha - 2)/alpha when alpha < 0.
_, loss, _, alpha, _, _, _, _ = self._precompute_lossfun_inputs(
float_dtype, device
)
mask = alpha < 0.0
min_val = np.finfo(float_dtype).min
alpha_clipped = np.maximum(min_val, alpha[mask])
np.testing.assert_(
np.all(loss[mask] <= ((alpha_clipped - 2.0) / alpha_clipped))
)
@parameterized.named_parameters(
("SingleCPU", np.float32, "cpu"),
("DoubleCPU", np.float64, "cpu"),
("SingleGPU", np.float32, "cuda"),
("DoubleGPU", np.float64, "cuda"),
)
def testDerivativeIsBoundedWhenAlphaIsBelow2(self, float_dtype, device):
# Assert that |d_x| < |x|/scale^2 when alpha <= 2.
_, _, x, alpha, scale, d_x, _, _ = self._precompute_lossfun_inputs(
float_dtype, device
)
mask = np.isfinite(alpha) & (alpha <= 2)
np.testing.assert_(
np.all(
(
np.abs(d_x[mask])
<= (
(np.abs(x[mask]) + (100.0 * np.finfo(float_dtype).eps))
/ scale[mask] ** 2
)
)
)
)
@parameterized.named_parameters(
("SingleCPU", np.float32, "cpu"),
("DoubleCPU", np.float64, "cpu"),
("SingleGPU", np.float32, "cuda"),
("DoubleGPU", np.float64, "cuda"),
)
def testDerivativeIsBoundedWhenAlphaIsBelow1(self, float_dtype, device):
# Assert that |d_x| < 1/scale when alpha <= 1.
_, _, _, alpha, scale, d_x, _, _ = self._precompute_lossfun_inputs(
float_dtype, device
)
mask = np.isfinite(alpha) & (alpha <= 1)
np.testing.assert_(
np.all(
(
np.abs(d_x[mask])
<= ((1.0 + (100.0 * np.finfo(float_dtype).eps)) / scale[mask])
)
)
)
@parameterized.named_parameters(
("SingleCPU", np.float32, "cpu"),
("DoubleCPU", np.float64, "cpu"),
("SingleGPU", np.float32, "cuda"),
("DoubleGPU", np.float64, "cuda"),
)
def testAlphaDerivativeIsPositive(self, float_dtype, device):
# Assert that d_loss / d_alpha > 0.
_, _, _, alpha, _, _, d_alpha, _ = self._precompute_lossfun_inputs(
float_dtype, device
)
mask = np.isfinite(alpha)
np.testing.assert_(np.all(d_alpha[mask] > (-300.0 * np.finfo(float_dtype).eps)))
@parameterized.named_parameters(
("SingleCPU", np.float32, "cpu"),
("DoubleCPU", np.float64, "cpu"),
("SingleGPU", np.float32, "cuda"),
("DoubleGPU", np.float64, "cuda"),
)
def testScaleDerivativeIsNegative(self, float_dtype, device):
# Assert that d_loss / d_scale < 0.
_, _, _, alpha, _, _, _, d_scale = self._precompute_lossfun_inputs(
float_dtype, device
)
mask = np.isfinite(alpha)
np.testing.assert_(np.all(d_scale[mask] < (100.0 * np.finfo(float_dtype).eps)))
@parameterized.named_parameters(
("SingleCPU", np.float32, "cpu"),
("DoubleCPU", np.float64, "cpu"),
("SingleGPU", np.float32, "cuda"),
("DoubleGPU", np.float64, "cuda"),
)
def testLossIsScaleInvariant(self, float_dtype, device):
# Check that loss(mult * x, alpha, mult * scale) == loss(x, alpha, scale)
(num_samples, loss, x, alpha, scale, _, _, _) = self._precompute_lossfun_inputs(
float_dtype, device
)
# Random log-normally distributed scalings in ~(0.2, 20)
mult = float_dtype(np.maximum(0.2, np.exp(np.random.normal(size=num_samples))))
x = torch.tensor(np.array(mult * x, dtype=float_dtype), device=device)
alpha = torch.tensor(np.array(alpha, dtype=float_dtype), device=device)
scale = torch.tensor(np.array(mult * scale, dtype=float_dtype), device=device)
# Compute the scaled loss.
loss_scaled = general.lossfun(x, alpha, scale).cpu().detach()
np.testing.assert_allclose(loss, loss_scaled, atol=1e-4, rtol=1e-4)
@parameterized.named_parameters(
("SingleCPU", np.float32, "cpu"),
("DoubleCPU", np.float64, "cpu"),
("SingleGPU", np.float32, "cuda"),
("DoubleGPU", np.float64, "cuda"),
)
def testAlphaEqualsNegativeInfinity(self, float_dtype, device):
# Check that alpha == -Infinity reproduces Welsch aka Leclerc loss.
x = float_dtype(np.arange(-20, 20, 0.1))
alpha = float_dtype(np.array([-float("inf")]))
scale = float_dtype(np.array([2.0]))
# Our loss.
x_t = torch.tensor(x, device=device)
alpha_t = torch.tensor(alpha).to(x_t)
scale_t = torch.tensor(scale).to(x_t)
loss = general.lossfun(x_t, alpha_t, scale_t).cpu().detach().numpy()
# Welsch/Leclerc loss.
loss_true = 1.0 - np.exp(-0.5 * (x / scale) ** 2)
self._assert_all_close_according_to_type(loss, loss_true)
@parameterized.named_parameters(
("SingleCPU", np.float32, "cpu"),
("DoubleCPU", np.float64, "cpu"),
("SingleGPU", np.float32, "cuda"),
("DoubleGPU", np.float64, "cuda"),
)
def testAlphaEqualsNegativeTwo(self, float_dtype, device):
# Check that alpha == -2 reproduces Geman-McClure loss.
x = float_dtype(np.arange(-20, 20, 0.1))
alpha = float_dtype(np.array(-2.0))
scale = float_dtype(np.array(2.0))
# Our loss.
x_t = torch.tensor(x, device=device)
alpha_t = torch.tensor(alpha).to(x_t)
scale_t = torch.tensor(scale).to(x_t)
loss = general.lossfun(x_t, alpha_t, scale_t).cpu().detach().numpy()
# Geman-McClure loss.
loss_true = 2.0 * (x / scale) ** 2 / ((x / scale) ** 2 + 4.0)
self._assert_all_close_according_to_type(loss, loss_true)
@parameterized.named_parameters(
("SingleCPU", np.float32, "cpu"),
("DoubleCPU", np.float64, "cpu"),
("SingleGPU", np.float32, "cuda"),
("DoubleGPU", np.float64, "cuda"),
)
def testAlphaEqualsZero(self, float_dtype, device):
# Check that alpha == 0 reproduces Cauchy aka Lorentzian loss.
x = np.arange(-20, 20, 0.1, float_dtype)
alpha = float_dtype(0.0)
scale = float_dtype(2.0)
# Our loss.
x_t = torch.tensor(x, device=device)
alpha_t = torch.tensor(alpha).to(x_t)
scale_t = torch.tensor(scale).to(x_t)
loss = general.lossfun(x_t, alpha_t, scale_t).cpu().detach().numpy()
# Cauchy/Lorentzian loss.
loss_true = np.log(0.5 * (x / scale) ** 2 + 1.0)
self._assert_all_close_according_to_type(loss, loss_true)
@parameterized.named_parameters(
("SingleCPU", np.float32, "cpu"),
("DoubleCPU", np.float64, "cpu"),
("SingleGPU", np.float32, "cuda"),
("DoubleGPU", np.float64, "cuda"),
)
def testAlphaEqualsOne(self, float_dtype, device):
# Check that alpha == 1 reproduces Charbonnier aka pseudo-Huber loss.
x = np.arange(-20, 20, 0.1, float_dtype)
alpha = float_dtype(1.0)
scale = float_dtype(2.0)
# Our loss.
x_t = torch.tensor(x, device=device)
alpha_t = torch.tensor(alpha).to(x_t)
scale_t = torch.tensor(scale).to(x_t)
loss = general.lossfun(x_t, alpha_t, scale_t).cpu().detach().numpy()
# Charbonnier loss.
loss_true = np.sqrt((x / scale) ** 2 + 1.0) - 1.0
self._assert_all_close_according_to_type(loss, loss_true)
@parameterized.named_parameters(
("SingleCPU", np.float32, "cpu"),
("DoubleCPU", np.float64, "cpu"),
("SingleGPU", np.float32, "cuda"),
("DoubleGPU", np.float64, "cuda"),
)
def testAlphaEqualsTwo(self, float_dtype, device):
# Check that alpha == 2 reproduces L2 loss.
x = np.arange(-20, 20, 0.1, float_dtype)
alpha = float_dtype(2.0)
scale = float_dtype(2.0)
# Our loss.
x_t = torch.tensor(x, device=device)
alpha_t = torch.tensor(alpha).to(x_t)
scale_t = torch.tensor(scale).to(x_t)
loss = general.lossfun(x_t, alpha_t, scale_t).cpu().detach().numpy()
# L2 Loss.
loss_true = 0.5 * (x / scale) ** 2
self._assert_all_close_according_to_type(loss, loss_true)
@parameterized.named_parameters(
("SingleCPU", np.float32, "cpu"),
("DoubleCPU", np.float64, "cpu"),
("SingleGPU", np.float32, "cuda"),
("DoubleGPU", np.float64, "cuda"),
)
def testAlphaEqualsFour(self, float_dtype, device):
# Check that alpha == 4 reproduces a quartic.
x = np.arange(-20, 20, 0.1, float_dtype)
alpha = float_dtype(4.0)
scale = float_dtype(2.0)
# Our loss.
x_t = torch.tensor(x, device=device)
alpha_t = torch.tensor(alpha).to(x_t)
scale_t = torch.tensor(scale).to(x_t)
loss = general.lossfun(x_t, alpha_t, scale_t).cpu().detach().numpy()
# The true loss.
loss_true = np.square(np.square(x / scale)) / 8.0 + np.square(x / scale) / 2.0
self._assert_all_close_according_to_type(loss, loss_true)
@parameterized.named_parameters(
("SingleCPU", np.float32, "cpu"),
("DoubleCPU", np.float64, "cpu"),
("SingleGPU", np.float32, "cuda"),
("DoubleGPU", np.float64, "cuda"),
)
def testAlphaEqualsInfinity(self, float_dtype, device):
# Check that alpha == Infinity takes the correct form.
x = np.arange(-20, 20, 0.1, float_dtype)
alpha = float_dtype(float("inf"))
scale = float_dtype(2.0)
# Our loss.
x_t = torch.tensor(x, device=device)
alpha_t = torch.tensor(alpha).to(x_t)
scale_t = torch.tensor(scale).to(x_t)
loss = general.lossfun(x_t, alpha_t, scale_t).cpu().detach().numpy()
# The true loss.
loss_true = np.exp(0.5 * np.square(x / scale)) - 1.0
self._assert_all_close_according_to_type(loss, loss_true)
@parameterized.named_parameters(
("SingleCPU", np.float32, "cpu"),
("DoubleCPU", np.float64, "cpu"),
("SingleGPU", np.float32, "cuda"),
("DoubleGPU", np.float64, "cuda"),
)
def testApproximateLossIsAccurate(self, float_dtype, device):
# Check that the approximate loss (lossfun() with epsilon=1e-6) reasonably
# approximates the true loss (lossfun() with epsilon=0.) for a range of
# values of alpha (skipping alpha=0, where the approximation is poor).
x = np.arange(-10, 10, 0.1, float_dtype)
scale = float_dtype(1.7)
for alpha in [-4, -2, -0.2, -0.01, 0.01, 0.2, 1, 1.99, 2, 2.01, 4]:
alpha = float_dtype(alpha)
x_t = torch.tensor(x, device=device)
alpha_t = torch.tensor(alpha).to(x_t)
scale_t = torch.tensor(scale).to(x_t)
loss = general.lossfun(x_t, alpha_t, scale_t).cpu().detach().numpy()
loss_approx = (
general.lossfun(x_t, alpha_t, scale_t, approximate=True)
.cpu()
.detach()
.numpy()
)
np.testing.assert_allclose(loss, loss_approx, rtol=1e-5, atol=1e-4)
@parameterized.named_parameters(
("SingleCPU", np.float32, "cpu"),
("DoubleCPU", np.float64, "cpu"),
("SingleGPU", np.float32, "cuda"),
("DoubleGPU", np.float64, "cuda"),
)
def testLossAndGradientsAreFinite(self, float_dtype, device):
# Test that the loss and its approximation both give finite losses and
# derivatives everywhere that they should for a wide range of values.
for approximate in [False, True]:
num_samples = 100000
# Normally distributed inputs.
x = float_dtype(np.random.normal(size=num_samples))
# Uniformly distributed values in (-16, 3), quantized to the nearest
# 0.1 to ensure that we hit the special cases at 0, 2.
alpha = float_dtype(
np.round(np.random.uniform(-16, 3, num_samples) * 10) / 10.0
)
# Random log-normally distributed values in approx (1e-5, 100000):
scale = float_dtype(np.exp(np.random.normal(size=num_samples) * 4.0) + 1e-5)
# Compute the loss and its derivative with respect to all three inputs.
var_x = torch.autograd.Variable(
torch.tensor(x, device=device), requires_grad=True
)
var_alpha = torch.autograd.Variable(
torch.tensor(alpha, device=device), requires_grad=True
)
var_scale = torch.autograd.Variable(
torch.tensor(scale, device=device), requires_grad=True
)
loss = general.lossfun(var_x, var_alpha, var_scale, approximate=approximate)
sum_loss = torch.sum(loss)
sum_loss.backward()
d_x = var_x.grad.cpu().detach().numpy()
d_alpha = var_alpha.grad.cpu().detach().numpy()
d_scale = var_scale.grad.cpu().detach().numpy()
loss = loss.cpu().detach().numpy()
for v in [loss, d_x, d_alpha, d_scale]:
np.testing.assert_(np.all(np.isfinite(v)))
@parameterized.named_parameters(
("SingleCPU", np.float32, "cpu"),
("DoubleCPU", np.float64, "cpu"),
("SingleGPU", np.float32, "cuda"),
("DoubleGPU", np.float64, "cuda"),
)
def testGradientMatchesFiniteDifferences(self, float_dtype, device):
# Test that the loss and its approximation both return gradients that are
# close to the numerical gradient from finite differences, with forward
# differencing. Returning correct gradients is Torch's job, so this is
# just an aggressive sanity check in case some implementation detail causes
# gradients to incorrectly go to zero due to quantization or stop_gradients
# in some op that is used by the loss.
for approximate in [False, True]:
num_samples = 100000
# Normally distributed inputs.
x = float_dtype(np.random.normal(size=num_samples))
# Uniformly distributed values in (-16, 3), quantized to the nearest
# 0.1 and then shifted by 0.05 so that we avoid the special cases at
# 0 and 2 where the analytical gradient wont match finite differences.
alpha = float_dtype(
np.round(np.random.uniform(-16, 3, num_samples) * 10) / 10.0
)
# Random uniformy distributed values in [0.5, 1.5]
scale = float_dtype(np.random.uniform(0.5, 1.5, num_samples))
# Compute the loss and its derivative with respect to all three inputs.
var_x = torch.autograd.Variable(
torch.tensor(x, device=device), requires_grad=True
)
var_alpha = torch.autograd.Variable(
torch.tensor(alpha, device=device), requires_grad=True
)
var_scale = torch.autograd.Variable(
torch.tensor(scale, device=device), requires_grad=True
)
loss = general.lossfun(var_x, var_alpha, var_scale, approximate=approximate)
sum_loss = torch.sum(loss)
sum_loss.backward()
d_x = var_x.grad.cpu().detach().numpy()
d_alpha = var_alpha.grad.cpu().detach().numpy()
d_scale = var_scale.grad.cpu().detach().numpy()
loss = loss.cpu().detach().numpy()
step_size = float_dtype(1e-3)
# Assert that the 95th percentile of errors is <= 1e-2.
def assert_percentile_close(v1, v2):
np.testing.assert_(np.percentile(np.abs(v1 - v2), 95) <= 1e-2)
def loss_helper(x, a, c):
x = torch.tensor(x, device=device)
a = torch.tensor(a).to(x)
c = torch.tensor(c).to(x)
return general.lossfun(x, a, c).cpu().detach().numpy()
n_x = (loss_helper(x + step_size, alpha, scale) - loss) / step_size
assert_percentile_close(n_x, d_x)
n_alpha = (loss_helper(x, alpha + step_size, scale) - loss) / step_size
assert_percentile_close(n_alpha, d_alpha)
n_scale = (loss_helper(x, alpha, scale + step_size) - loss) / step_size
assert_percentile_close(n_scale, d_scale)
if __name__ == "__main__":
np.testing.run_module_suite()
|
985,725 | 69b49fb2c961a3fec5c3311dcdcec9c5bbc5c1d0 | from django.db.models import Q
from django.shortcuts import render, Http404, get_object_or_404
from django.contrib import messages
from .models import Curso
def index(request):
dados = Curso.objects.order_by('-id').filter(
mostrar=True
)
return render(request,'home/index.html',{'dados':dados})
def mostrar(request,idbusca):
dados = get_object_or_404(Curso,id=idbusca)
return render(request,'home/detcurso.html',{'dados':dados})
def buscar(request):
x = request.GET['buscar']
if x is None or not x:
messages.add_message(request,messages.INFO,'Digite um valor válido')
return render(request,'home/index.html')
dados = Curso.objects.order_by('titulo').filter(
Q(titulo__icontains=x) | Q(descricao__icontains=x)
)
return render(request,'home/index.html',{'dados':dados}) |
985,726 | 1e20543e0f3c194c0fe63f66580153677dfa8066 | """
* Licensed to DSecure.me under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. DSecure.me licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
"""
import hashlib
from django.db import models
from django.core.exceptions import ValidationError
from vmc.common.models import ConfigBaseModel, BaseModel
from vmc.elasticsearch.models import Tenant
class Config(ConfigBaseModel):
last_scans_pull = models.DateTimeField(default=None, null=True, blank=True)
tenant = models.OneToOneField(Tenant, null=True, blank=True, on_delete=models.CASCADE)
scanner = models.CharField(max_length=128)
filter = models.CharField(max_length=256, null=True, blank=True)
__original_tenant = None
class Meta:
db_table = 'scanners'
def __init__(self, *args, **kwargs):
super(Config, self).__init__(*args, **kwargs)
self.__original_tenant = self.tenant
def clean(self):
super(Config, self).clean()
if not self.pk and Config.objects.filter(tenant=self.tenant).exists():
raise ValidationError('Only one type of Scanner can be assigned to one Tenant')
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
if self.tenant != self.__original_tenant:
self.last_scans_pull = None
super(Config, self).save(force_insert=force_insert, force_update=force_update,
using=using, update_fields=update_fields)
self.__original_tenant = self.tenant
return self
class Scan(BaseModel):
config = models.ForeignKey(Config, on_delete=models.SET_NULL, null=True)
file = models.TextField()
file_id = models.CharField(max_length=64)
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
self.file_id = hashlib.sha256(self.file.encode('utf-8')).hexdigest().lower()
return super(Scan, self).save(force_insert=force_insert, force_update=force_update,
using=using, update_fields=update_fields)
|
985,727 | df849da29a429e4cc0aa8de8a7e024aef632ca40 | from libs.catch_game import Labyrinth, PlayerMove
import random
def test_print(lab):
map = lab.get_current_map()
print(map)
board = lab.get_scores()
print(board)
pc = lab.get_scores()
for pc in lab.get_scores():
print(pc)
def test_loading():
"""
load default map.data
"""
print('Test loading')
lab = Labyrinth()
lab.load_from_file('map.data')
test_print(lab)
print('Test completed')
return lab
def test_spawn(lab):
"""
test spawn of all missing
"""
print('Test spawn')
lab.spawn_missing()
test_print(lab)
print('Test completed')
def test_turn_ai_and_players(lab):
"""
test turn of all creatures
"""
print('Test turn')
lab.update_game()
test_print(lab)
print("Turn: {}".format(lab.turn_count))
print('Test completed')
def test_spawn_player(lab):
"""
test player spawn
"""
print('Test spawn player')
pc = lab.spawn_player('🦸', 'Bob the Tester', 0)
test_print(lab)
print('Test completed')
return pc
def all():
"""
Verify that all parts work as expected
"""
lab = test_loading()
for _ in range(1):
print('🦅🐀🐙')
test_spawn(lab)
pc = test_spawn_player(lab)
while True:
pc.store_move(PlayerMove(random.choice(['walk left', 'walk up', 'walk down', 'walk right'])))
test_turn_ai_and_players(lab)
if input() == '0':
break |
985,728 | 5cc67062913e4f37bf655e3cd93ab39ccf85e50b | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for add_context_to_examples."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import datetime
import os
import tempfile
import unittest
import numpy as np
import six
import tensorflow as tf
from object_detection.utils import tf_version
if tf_version.is_tf2():
from object_detection.dataset_tools.context_rcnn import add_context_to_examples # pylint:disable=g-import-not-at-top
try:
import apache_beam as beam # pylint:disable=g-import-not-at-top
except ModuleNotFoundError:
pass
@contextlib.contextmanager
def InMemoryTFRecord(entries):
temp = tempfile.NamedTemporaryFile(delete=False)
filename = temp.name
try:
with tf.io.TFRecordWriter(filename) as writer:
for value in entries:
writer.write(value)
yield filename
finally:
os.unlink(temp.name)
def BytesFeature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def BytesListFeature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def Int64Feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def Int64ListFeature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def FloatListFeature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class GenerateContextDataTest(tf.test.TestCase):
def _create_first_tf_example(self):
encoded_image = tf.io.encode_jpeg(
tf.constant(np.ones((4, 4, 3)).astype(np.uint8))).numpy()
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': BytesFeature(encoded_image),
'image/source_id': BytesFeature(six.ensure_binary('image_id_1')),
'image/height': Int64Feature(4),
'image/width': Int64Feature(4),
'image/object/class/label': Int64ListFeature([5, 5]),
'image/object/class/text': BytesListFeature([six.ensure_binary('hyena'),
six.ensure_binary('hyena')
]),
'image/object/bbox/xmin': FloatListFeature([0.0, 0.1]),
'image/object/bbox/xmax': FloatListFeature([0.2, 0.3]),
'image/object/bbox/ymin': FloatListFeature([0.4, 0.5]),
'image/object/bbox/ymax': FloatListFeature([0.6, 0.7]),
'image/seq_id': BytesFeature(six.ensure_binary('01')),
'image/seq_num_frames': Int64Feature(2),
'image/seq_frame_num': Int64Feature(0),
'image/date_captured': BytesFeature(
six.ensure_binary(str(datetime.datetime(2020, 1, 1, 1, 0, 0)))),
'image/embedding': FloatListFeature([0.1, 0.2, 0.3]),
'image/embedding_score': FloatListFeature([0.9]),
'image/embedding_length': Int64Feature(3),
'image/embedding_count': Int64Feature(1)
}))
return example.SerializeToString()
def _create_second_tf_example(self):
encoded_image = tf.io.encode_jpeg(
tf.constant(np.ones((4, 4, 3)).astype(np.uint8))).numpy()
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': BytesFeature(encoded_image),
'image/source_id': BytesFeature(six.ensure_binary('image_id_2')),
'image/height': Int64Feature(4),
'image/width': Int64Feature(4),
'image/object/class/label': Int64ListFeature([5]),
'image/object/class/text': BytesListFeature([six.ensure_binary('hyena')
]),
'image/object/bbox/xmin': FloatListFeature([0.0]),
'image/object/bbox/xmax': FloatListFeature([0.1]),
'image/object/bbox/ymin': FloatListFeature([0.2]),
'image/object/bbox/ymax': FloatListFeature([0.3]),
'image/seq_id': BytesFeature(six.ensure_binary('01')),
'image/seq_num_frames': Int64Feature(2),
'image/seq_frame_num': Int64Feature(1),
'image/date_captured': BytesFeature(
six.ensure_binary(str(datetime.datetime(2020, 1, 1, 1, 1, 0)))),
'image/embedding': FloatListFeature([0.4, 0.5, 0.6]),
'image/embedding_score': FloatListFeature([0.9]),
'image/embedding_length': Int64Feature(3),
'image/embedding_count': Int64Feature(1)
}))
return example.SerializeToString()
def assert_expected_examples(self, tf_example_list):
self.assertAllEqual(
{tf_example.features.feature['image/source_id'].bytes_list.value[0]
for tf_example in tf_example_list},
{six.ensure_binary('image_id_1'), six.ensure_binary('image_id_2')})
self.assertAllClose(
tf_example_list[0].features.feature[
'image/context_features'].float_list.value,
[0.1, 0.2, 0.3, 0.4, 0.5, 0.6])
self.assertAllClose(
tf_example_list[1].features.feature[
'image/context_features'].float_list.value,
[0.1, 0.2, 0.3, 0.4, 0.5, 0.6])
def assert_expected_sequence_example(self, tf_sequence_example_list):
tf_sequence_example = tf_sequence_example_list[0]
num_frames = 2
self.assertAllEqual(
tf_sequence_example.context.feature[
'clip/media_id'].bytes_list.value[0], six.ensure_binary(
'01_0'))
self.assertAllClose(
tf_sequence_example.context.feature[
'image/context_features'].float_list.value,
[0.1, 0.2, 0.3, 0.4, 0.5, 0.6])
seq_feature_dict = tf_sequence_example.feature_lists.feature_list
self.assertLen(
seq_feature_dict['image/encoded'].feature[:],
num_frames)
actual_timestamps = [
feature.int64_list.value[0] for feature
in seq_feature_dict['image/timestamp'].feature]
timestamps = [0, 1]
self.assertAllEqual(timestamps, actual_timestamps)
# First image.
self.assertAllClose(
[0.4, 0.5],
seq_feature_dict['region/bbox/ymin'].feature[0].float_list.value[:])
self.assertAllClose(
[0.0, 0.1],
seq_feature_dict['region/bbox/xmin'].feature[0].float_list.value[:])
self.assertAllClose(
[0.6, 0.7],
seq_feature_dict['region/bbox/ymax'].feature[0].float_list.value[:])
self.assertAllClose(
[0.2, 0.3],
seq_feature_dict['region/bbox/xmax'].feature[0].float_list.value[:])
self.assertAllEqual(
[six.ensure_binary('hyena'), six.ensure_binary('hyena')],
seq_feature_dict['region/label/string'].feature[0].bytes_list.value[:])
# Second example.
self.assertAllClose(
[0.2],
seq_feature_dict['region/bbox/ymin'].feature[1].float_list.value[:])
self.assertAllClose(
[0.0],
seq_feature_dict['region/bbox/xmin'].feature[1].float_list.value[:])
self.assertAllClose(
[0.3],
seq_feature_dict['region/bbox/ymax'].feature[1].float_list.value[:])
self.assertAllClose(
[0.1],
seq_feature_dict['region/bbox/xmax'].feature[1].float_list.value[:])
self.assertAllEqual(
[six.ensure_binary('hyena')],
seq_feature_dict['region/label/string'].feature[1].bytes_list.value[:])
def assert_expected_key(self, key):
self.assertAllEqual(key, b'01')
def assert_sorted(self, example_collection):
example_list = list(example_collection)
counter = 0
for example in example_list:
frame_num = example.features.feature[
'image/seq_frame_num'].int64_list.value[0]
self.assertGreaterEqual(frame_num, counter)
counter = frame_num
def assert_context(self, example_collection):
example_list = list(example_collection)
for example in example_list:
context = example.features.feature[
'image/context_features'].float_list.value
self.assertAllClose([0.1, 0.2, 0.3, 0.4, 0.5, 0.6], context)
def assert_resized(self, example):
width = example.features.feature['image/width'].int64_list.value[0]
self.assertAllEqual(width, 2)
height = example.features.feature['image/height'].int64_list.value[0]
self.assertAllEqual(height, 2)
def assert_size(self, example):
width = example.features.feature['image/width'].int64_list.value[0]
self.assertAllEqual(width, 4)
height = example.features.feature['image/height'].int64_list.value[0]
self.assertAllEqual(height, 4)
def test_sliding_window(self):
example_list = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
max_clip_length = 3
stride_length = 3
out_list = [list(i) for i in add_context_to_examples.get_sliding_window(
example_list, max_clip_length, stride_length)]
self.assertAllEqual(out_list, [['a', 'b', 'c'],
['d', 'e', 'f'],
['g']])
def test_rekey_data_fn(self):
sequence_key = 'image/seq_id'
time_horizon = None
reduce_image_size = False
max_dim = None
rekey_fn = add_context_to_examples.ReKeyDataFn(
sequence_key, time_horizon,
reduce_image_size, max_dim)
output = rekey_fn.process(self._create_first_tf_example())
self.assert_expected_key(output[0][0])
self.assert_size(output[0][1])
def test_rekey_data_fn_w_resize(self):
sequence_key = 'image/seq_id'
time_horizon = None
reduce_image_size = True
max_dim = 2
rekey_fn = add_context_to_examples.ReKeyDataFn(
sequence_key, time_horizon,
reduce_image_size, max_dim)
output = rekey_fn.process(self._create_first_tf_example())
self.assert_expected_key(output[0][0])
self.assert_resized(output[0][1])
def test_sort_fn(self):
sequence_key = 'image/seq_id'
sorted_image_ids = False
max_num_elements_in_context_features = 10
sort_fn = add_context_to_examples.SortGroupedDataFn(
sequence_key, sorted_image_ids, max_num_elements_in_context_features)
output = sort_fn.process(
('dummy_key', [tf.train.Example.FromString(
self._create_second_tf_example()),
tf.train.Example.FromString(
self._create_first_tf_example())]))
self.assert_sorted(output[0][1])
def test_add_context_fn(self):
sequence_key = 'image/seq_id'
add_context_features = True
image_ids_to_keep = 'All'
context_fn = add_context_to_examples.GenerateContextFn(
sequence_key, add_context_features, image_ids_to_keep)
output = context_fn.process(
('dummy_key', [tf.train.Example.FromString(
self._create_first_tf_example()),
tf.train.Example.FromString(
self._create_second_tf_example())]))
self.assertEqual(len(output), 2)
self.assert_context(output)
def test_add_context_fn_output_sequence_example(self):
sequence_key = 'image/seq_id'
add_context_features = True
image_ids_to_keep = 'All'
context_fn = add_context_to_examples.GenerateContextFn(
sequence_key, add_context_features, image_ids_to_keep,
output_type='tf_sequence_example')
output = context_fn.process(
('01',
[tf.train.Example.FromString(self._create_first_tf_example()),
tf.train.Example.FromString(self._create_second_tf_example())]))
self.assertEqual(len(output), 1)
self.assert_expected_sequence_example(output)
def test_add_context_fn_output_sequence_example_cliplen(self):
sequence_key = 'image/seq_id'
add_context_features = True
image_ids_to_keep = 'All'
context_fn = add_context_to_examples.GenerateContextFn(
sequence_key, add_context_features, image_ids_to_keep,
output_type='tf_sequence_example', max_clip_length=1)
output = context_fn.process(
('01',
[tf.train.Example.FromString(self._create_first_tf_example()),
tf.train.Example.FromString(self._create_second_tf_example())]))
self.assertEqual(len(output), 2)
def test_beam_pipeline(self):
with InMemoryTFRecord(
[self._create_first_tf_example(),
self._create_second_tf_example()]) as input_tfrecord:
temp_dir = tempfile.mkdtemp(dir=os.environ.get('TEST_TMPDIR'))
output_tfrecord = os.path.join(temp_dir, 'output_tfrecord')
sequence_key = six.ensure_binary('image/seq_id')
max_num_elements = 10
num_shards = 1
pipeline_options = beam.options.pipeline_options.PipelineOptions(
runner='DirectRunner')
p = beam.Pipeline(options=pipeline_options)
add_context_to_examples.construct_pipeline(
p,
input_tfrecord,
output_tfrecord,
sequence_key,
max_num_elements_in_context_features=max_num_elements,
num_shards=num_shards)
p.run()
filenames = tf.io.gfile.glob(output_tfrecord + '-?????-of-?????')
actual_output = []
record_iterator = tf.data.TFRecordDataset(
tf.convert_to_tensor(filenames)).as_numpy_iterator()
for record in record_iterator:
actual_output.append(record)
self.assertEqual(len(actual_output), 2)
self.assert_expected_examples([tf.train.Example.FromString(
tf_example) for tf_example in actual_output])
def test_beam_pipeline_sequence_example(self):
with InMemoryTFRecord(
[self._create_first_tf_example(),
self._create_second_tf_example()]) as input_tfrecord:
temp_dir = tempfile.mkdtemp(dir=os.environ.get('TEST_TMPDIR'))
output_tfrecord = os.path.join(temp_dir, 'output_tfrecord')
sequence_key = six.ensure_binary('image/seq_id')
max_num_elements = 10
num_shards = 1
pipeline_options = beam.options.pipeline_options.PipelineOptions(
runner='DirectRunner')
p = beam.Pipeline(options=pipeline_options)
add_context_to_examples.construct_pipeline(
p,
input_tfrecord,
output_tfrecord,
sequence_key,
max_num_elements_in_context_features=max_num_elements,
num_shards=num_shards,
output_type='tf_sequence_example')
p.run()
filenames = tf.io.gfile.glob(output_tfrecord + '-?????-of-?????')
actual_output = []
record_iterator = tf.data.TFRecordDataset(
tf.convert_to_tensor(filenames)).as_numpy_iterator()
for record in record_iterator:
actual_output.append(record)
self.assertEqual(len(actual_output), 1)
self.assert_expected_sequence_example(
[tf.train.SequenceExample.FromString(
tf_example) for tf_example in actual_output])
if __name__ == '__main__':
tf.test.main()
|
985,729 | 8aaa1d436e3b3b33c57a7247cafaf4610da33ba3 | print("hello world")
for i in range(100):
print("hello james world!")
|
985,730 | 1a353c9df5b68095b44f220e53d851bae8ed1c85 | import os
os.environ.setdefault("SQL_SERVER", "mongodb+srv://root:r00tUser@cluster0.gwvhn.mongodb.net/myFirstDatabase?retryWrites=true&w=majority")
|
985,731 | 26b6134fd54e4928fdc468918df9358df9f5ecf6 | # -*- coding: utf-8 -*-
#
# Copyright (C) tkornuta, IBM Corporation 2019
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "Tomasz Kornuta & Vincent Marois"
import numpy as np
from torch.nn import Module
from ptp.components.component import Component
class Model(Module, Component):
"""
Class representing base class for all Models.
Inherits from :py:class:`torch.nn.Module` as all subclasses will represent a trainable model.
Hence, all subclasses should override the ``forward`` function.
Implements features & attributes used by all subclasses.
"""
def __init__(self, name, class_type, config):
"""
Initializes a Model object.
:param name: Model name.
:type name: str
:param class_type: Class type of the component.
:param config: Parameters read from configuration file.
:type config: ``ptp.configuration.ConfigInterface``
This constructor:
- calls base class constructors (save config, name, logger, app_state etc.)
- initializes the best model loss (used to select which model to save) to ``np.inf``:
>>> self.best_loss = np.inf
"""
# Call constructors of parent classes.
Component.__init__(self, name, class_type, config)
Module.__init__(self)
# Flag indicating whether the model is frozen or not.
self.frozen = False
def save_to_checkpoint(self, chkpt):
"""
Adds model's state dictionary to checkpoint, which will be next stored to file.
:param: Checkpoint (dictionary) that will be saved to file.
"""
chkpt[self.name] = self.state_dict()
def load_from_checkpoint(self, chkpt, section=None):
"""
Loads state dictionary from checkpoint.
:param chkpt: Checkpoint (dictionary) loaded from file.
:param section: Name of the section containing params (DEFAULT: None, means that model name from current configuration will be used)\
"""
if section is None:
section = self.name
self.load_state_dict(chkpt[section])
def freeze(self):
"""
Freezes the trainable weigths of the model.
"""
# Freeze.
self.frozen = True
for param in self.parameters():
param.requires_grad = False
def summarize(self):
"""
Summarizes the model by showing the trainable/non-trainable parameters and weights\
per layer ( ``nn.Module`` ).
Uses ``recursive_summarize`` to iterate through the nested structure of the model (e.g. for RNNs).
:return: Summary as a str.
"""
# go recursively in the model architecture
summary_str = self.recursive_summarize(self, 0, self.name)
# Sum the model parameters.
num_total_params = sum([np.prod(p.size()) for p in self.parameters()])
mod_trainable_params = filter(lambda p: p.requires_grad, self.parameters())
num_trainable_params = sum([np.prod(p.size()) for p in mod_trainable_params])
summary_str += 'Total Trainable Params: {}\n'.format(num_trainable_params)
summary_str += 'Total Non-trainable Params: {}\n'.format(num_total_params-num_trainable_params)
summary_str += '='*80 + '\n'
return summary_str
def recursive_summarize(self, module_, indent_, module_name_):
"""
Function that recursively inspects the (sub)modules and records their statistics\
(like names, types, parameters, their numbers etc.)
:param module_: Module to be inspected.
:type module_: ``nn.Module`` or subclass
:param indent_: Current indentation level.
:type indent_: int
:param module_name_: Name of the module that will be displayed before its type.
:type module_name_: str
:return: Str summarizing the module.
"""
# Recursively inspect the children.
child_lines = []
for key, module in module_._modules.items():
child_lines.append(self.recursive_summarize(module, indent_+1, key))
# "Leaf information".
mod_str = ''
if indent_ > 0:
mod_str += ' ' + '| ' * (indent_-1) + '+ '
mod_str += module_name_ + " (" + module_._get_name() + ')'
#print("Model's state_dict:")
#for param_tensor in module_.state_dict():
# print(param_tensor, "\t", module_.state_dict()[param_tensor].size())
if indent_ == 0:
if self.frozen:
mod_str += "\t\t[FROZEN]"
else:
mod_str += "\t\t[TRAINABLE]"
mod_str += '\n'
mod_str += ''.join(child_lines)
# Get leaf weights and number of params - only for leafs!
#if not child_lines:
# Collect names and dimensions of all (named) params.
mod_direct_params = list(filter(lambda np: np[0].find('.') == -1, module_.named_parameters()))
if len(mod_direct_params) != 0:
mod_weights = [(n, tuple(p.size())) for (n, p) in mod_direct_params]
mod_str += ' ' + '| ' * (indent_) + '+ ' + 'Matrices: {}\n'.format(mod_weights)
# Sum the parameters.
num_total_params = sum([np.prod(p.size()) for (_,p) in mod_direct_params])
mod_trainable_params = filter(lambda np: np[1].requires_grad, mod_direct_params)
num_trainable_params = sum([np.prod(p.size()) for (_,p) in mod_trainable_params])
mod_str += ' ' + '| ' * (indent_) + ' Trainable Params: {}\n'.format(num_trainable_params)
mod_str += ' ' + '| ' * (indent_) + ' Non-trainable Params: {}\n'.format(num_total_params - num_trainable_params)
# Add line.
mod_str += ' ' + '| ' * (indent_) + '\n'
return mod_str
|
985,732 | 4e06ff319e17d9d6daeca54f370298cc5e70483c | #creating a fibonacci sequence without using a function
const = [0,1]
for i in range(1,10):
fib = const.append(const[i] + const[i-1])
print const
|
985,733 | 096b36a382f535eb444935cce2e586ea2aa1ff99 | import tensorflow as tf
import sys
from model import Model
from dataset import Dataset
from network import *
from datetime import datetime
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def main():
# Dataset path
train_color_list = 'data_file_color.txt'
train_thermal_list = 'data_file_thermal.txt'
checkpoint_path = 'log/'
# Learning params
learning_rate = 0.001
training_iters = 1501
batch_size = 128
display_step = 5
test_step = 50 # 0.5 epoch
margin = 0.5
# Network params
n_classes = 16
keep_rate = 0.5
# Graph input
x1 = tf.placeholder(tf.float32, [batch_size, 227, 227, 3])
x2 = tf.placeholder(tf.float32, [batch_size, 227, 227, 3])
y0 = tf.placeholder(tf.float32, [batch_size*2, n_classes])
y = tf.placeholder(tf.float32, [batch_size, 1])
keep_var = tf.placeholder(tf.float32)
# Model
feat1 = Model().alexnet_visible(x1, keep_var)
feat2 = Model().alexnet_thermal(x2, keep_var)
feat, pred0 = Model().share_modal(feat1, feat2, keep_var)
# norm_feat = tf.nn.l2_normalize(feat, 1, epsilon=1e-12)
feature1, feature2 = tf.split(feat, 2, 0)
feature1 = tf.nn.l2_normalize(feature1, 0, epsilon=1e-12)
feature2 = tf.nn.l2_normalize(feature2, 0, epsilon=1e-12)
dist = tf.reduce_sum(tf.square(feature1 - feature2), 1)
d_sqrt = tf.sqrt(dist)
loss0 = (1- y) * tf.square(tf.maximum(0., margin - d_sqrt)) + y * dist
loss0 = 0.5 * tf.reduce_mean(loss0)
# Loss and optimizer
identity_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred0, labels = y0))
total_loss = identity_loss + 0.2* loss0
# total_loss = identity_loss
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=0.9).minimize(total_loss)
# Evaluation
correct_pred0 = tf.equal(tf.argmax(pred0, 1), tf.argmax(y0, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred0, tf.float32))
# Init
init = tf.initialize_all_variables()
# Load dataset
dataset = Dataset(train_color_list, train_thermal_list)
saver = tf.train.Saver()
# Launch the graph
with tf.Session() as sess:
print 'Init variable'
sess.run(init)
print 'Start training'
step = 0
while step < training_iters:
batch_x1, batch_x2, batch_y0, batch_y = dataset.next_batch(batch_size, 'train')
sess.run(optimizer, feed_dict={x1: batch_x1, x2: batch_x2, y0:batch_y0, y:batch_y, keep_var: keep_rate})
# Display training status
if step%display_step == 0:
acc = sess.run(accuracy, feed_dict={x1: batch_x1, x2: batch_x2, y0:batch_y0, y:batch_y, keep_var: 1.0})
batch_loss = sess.run(total_loss, feed_dict={x1: batch_x1, x2: batch_x2, y0:batch_y0,y:batch_y, keep_var: 1.0})
print >> sys.stderr, "{} Iter {}: Training Loss = {:.4f}, Accuracy = {:.4f},".format(datetime.now(), step, batch_loss, acc)
# Save the model checkpoint periodically.
if step % 900 == 0 or step == training_iters:
checkpoint_name = os.path.join(checkpoint_path, 'tone_iter2_'+ str(step) +'.ckpt')
save_path = saver.save(sess, checkpoint_name)
step += 1
print "Finish!"
if __name__ == '__main__':
main()
|
985,734 | a089e9737dc385c166f00e7210844e868cc2a826 | list1 = [1, 2, 3, 4,5]
total = sum(list1)
print("Sum of all elements in given list: ", total)
|
985,735 | ecd78117d4fa318732b4477196f6ce50b3ea7ea8 | for _ in range(int(input())):
N = int(input())
S = {}
for i in range(N):
d, p = map(int, input().split())
S[d] = p
Q = int(input())
for q in range(Q):
d, p = map(int, input().split())
solved = 0
for i in S.keys():
if i <= d:
solved += S[i]
else:
break
print('Go Camp' if solved >= p else 'Go Sleep')
|
985,736 | c07c71b2758b73b1279b87e914d8c2fa2fac099e | #-*- coding:utf-8 -*-
#!/usr/bin/python
"""
Author:$Author$
Date:$Date$
Revision:$Revision$
Description:
所有代理的上级代理做一个映射脚本
"""
import sys
sys.path.insert(0, 'server_common')
sys.path.insert(0, 'mahjong')
from web_db_define import *
from datetime import datetime,time
from config.config import *
import redis
import hashlib
serverList = [
'http://192.168.0.99:9797',
'http://192.168.0.18:9797',
'http://192.168.0.155:9797'
]
def getInst(dbNum):
global redisdb
redisdb = redis.ConnectionPool(host="192.168.16.50", port=6379, db='1', password='Fkkg65NbRwQOnq01OGMPy5ZREsNUeURm')
return redis.Redis(connection_pool=redisdb)
redis = getInst(1)
agentIDs = redis.smembers(AGENT_ID_TABLE)
for agent in agentIDs:
parentId = redis.hget(AGENT_TABLE%(agent),'parent_id')
if not redis.exists(AGENT2PARENT%(agent)):
redis.set(AGENT2PARENT%(agent),parentId)
print 'set AGENT2PARENT[%s] success.'%(agent)
else :
print 'set AGENT2PARENT[%s] exists.'%(agent)
|
985,737 | 7b7bfd0fbc05d77fa3b4871c2a8e8bebfd930a55 | # Generated by Django 2.0.7 on 2018-08-01 14:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_auto_20180801_1418'),
]
operations = [
migrations.AddField(
model_name='article',
name='download_times',
field=models.PositiveIntegerField(default=0, editable=False, verbose_name='下载次数'),
),
]
|
985,738 | 4d487f238a88da7efbebecda3c42fde13193de72 | from django.contrib import admin
from django.urls import path, include
from django.contrib.auth import views as auth_views
from django.conf import settings
from django.conf.urls.static import static
from accounts import views as accounts_views
from courts import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.home, name='home'),
# Registration
path('signup/', accounts_views.signup, name='signup'),
path('login/', auth_views.LoginView.as_view(template_name='login.html'), name='login'),
path('logout/', auth_views.LogoutView.as_view(), name='logout'),
path('accounts/', include('django.contrib.auth.urls')),
## Profile
path('account/', accounts_views.account, name='account'),
path('mycourt/<int:pk>/', views.mycourt, name='mycourt'),
## Courts
path('courts/', views.courts, name='courts'),
path('court/<int:pk>/', views.court, name='court'),
## Order
## Address
path('load-districts/', views.load_districts, name='load_districts'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) |
985,739 | a9c6adb76704806df35731a9f842eaf90ba7ea4c | s='Bryan Tran'
print(s[::-1])
print(s.upper())
print(s.upper()[::-1])
print(s.lower()[::-1])
|
985,740 | b5353c17d4a38680af6255c0974c17031dd71945 | ''' FREEZE version 0.1.0 - test 12.04.14'''
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plot
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.font_manager import FontProperties
import scipy
import scipy.cluster.hierarchy as sch
from operator import *
pd.core.config.option_context('mode.use_inf_as_null',True)
try:
import seaborn as sns
rcdefsns = plot.rcParams.copy()
except:
pd.options.display.mpl_style = 'default'
plot.ioff()
def binning(maxlength,size):
bins=np.arange(5,maxlength,size)
return bins
def mhist(values,*args):
return np.histogram(values,args[0],normed=True)[0]
def kde(values,*args):
try:
density=scipy.stats.gaussian_kde(values)
ys=density(args[0])
return ys
except (ValueError,np.linalg.LinAlgError):
return np.nan
def cdf(values,*args):
try:
a=scipy.stats.cumfreq(values,len(args[0]))
return a[0]/max(a[0])
except (ValueError,np.linalg.LinAlgError):
return np.nan
def cumulative_plots(tables,plottype,metric):
fig = plot.figure(frameon=False,facecolor='white')
color=colors(tables)
for i in range(len(tables)):
plot.plot(tables[i][1][plottype].sum()/len(tables[i][1][plottype]),color=color[i],label=str(tables[i][0]))
plot.xlabel('Tail Length (%s)' %(metric))
plot.ylabel('Frequency')
plot.legend()
return fig
def table(filein,metric,counts,maxlength,size):
table = pd.read_table(filein,index_col=0)
metric='A_length_'+str(metric)
table[metric]=table[metric].str.split(',').apply(np.asarray,dtype=int)
index=table['No.Aligned_Reads'].where(table['No.Aligned_Reads']>=counts).dropna().index
table=table.ix[index]
table['MEDIAN']=table[metric].apply(np.median)
table['KDE']=table[metric].apply(kde,args=(binning(maxlength,size),))
table['HIST']=table[metric].apply(mhist,args=(binning(maxlength,size),))
table['CDF']=table[metric].apply(cdf,args=(binning(maxlength,size),))
return table
def colors(tables):
try:
import brewer2mpl
allcolors1=brewer2mpl.get_map('Set1', 'Qualitative',7).mpl_colors
allcolors2=brewer2mpl.get_map('Set2', 'Qualitative',6).mpl_colors
allcolors=allcolors1+allcolors2
except:
allcolors=['b','g','r','c','m','y','k','w']
while len(tables)> len(allcolors):
allcolors+=allcolors
return allcolors[:len(tables)]
def common_index(tables):
index=tables[0][1].index
for i in range(1,len(tables)):
index=index.intersection(tables[i][1].index)
return index
def genewise(tables,fig):
color=colors(tables)
index=common_index(tables)
for i in index:
fig, ((ax1,ax2),(ax3,ax4)) = plot.subplots(nrows=2, ncols=2)
fig.suptitle(str(i))
reads=[[],[]]
for q in range(len(tables)):
ax1.plot(tables[q][1].ix[i].KDE,color=color[q],label=tables[q][0])
ax2.plot(tables[q][1].ix[i].CDF,color=color[q],label=tables[q][0])
reads[0].append(tables[q][0])
reads[1].append(tables[q][1].ix[i]['No.Aligned_Reads'])
ax1.legend()
ax1.set_ylabel('Frequency')
ax1.set_xlabel('Tails')
ax2.legend()
ax2.set_ylabel('Frequency')
ax2.set_xlabel('Tails')
ind=np.arange(len(reads[0]))+0.5
ax3.bar(ind,reads[1],color='k')
ax3.set_xticks(ind+0.5)
ax3.set_xticklabels(reads[0])
ax3.set_ylabel('Reads')
fig_pdf.savefig()
plot.close(fig)
def singlegene(tables,gene):
color=colors(tables)
fig, ((ax1,ax2),(ax3,ax4)) = plot.subplots(nrows=2, ncols=2)
fig.suptitle(str(gene))
reads=[[],[]]
for q in range(len(tables)):
ax1.plot(tables[q][1].ix[gene].KDE,color=color[q],label=tables[q][0])
ax2.plot(tables[q][1].ix[gene].CDF,color=color[q],label=tables[q][0])
reads[0].append(tables[q][0])
reads[1].append(tables[q][1].ix[gene]['No.Aligned_Reads'])
ax1.legend()
## ax1.set_xlim([0,80])
ax1.set_ylabel('Frequency')
ax1.set_xlabel('Tails')
ax2.legend()
ax2.set_ylabel('Frequency')
ax2.set_xlabel('Tails')
ind=np.arange(len(reads[0]))+0.5
ax3.bar(ind,reads[1],color='k')
ax3.set_xticks(ind+0.5)
ax3.set_xticklabels(reads[0])
ax3.set_ylabel('Reads')
return fig
def heatmap(tables):
fig, axes = plot.subplots(nrows=1, ncols=len(tables))
index=common_index(tables)
if len(tables) > 1:
for data, ax in zip(tables, axes.flat):
datap=np.row_stack(data[1].ix[index]['HIST'].values)
heatmap = ax.matshow(datap, aspect='auto', cmap=plot.cm.Greens,vmin=0,vmax=0.05)
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(str(data[0]))
cax = fig.add_axes([0.9, 0.1, 0.03, 0.8])
fig.colorbar(heatmap, cax=cax)
else:
ax=axes
data=tables[0]
datap=np.row_stack(data[1].ix[index]['HIST'].values)
heatmap = ax.matshow(datap, aspect='auto', cmap=plot.cm.Greens,vmin=0,vmax=0.05)
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(str(data[0]))
cax = fig.add_axes([0.9, 0.1, 0.03, 0.8])
fig.colorbar(heatmap, cax=cax)
return fig
def wrap(txt, width=8):
'''helper function to wrap text for long labels'''
import textwrap
return '\n'.join(textwrap.wrap(txt, width))
def scattermatrix(tables):
fig = plot.figure(frameon=False,facecolor='white')
index=common_index(tables)
data=pd.DataFrame(index=index)
for i in tables:
data[i[0]]=i[1].ix[index]['MEDIAN']
axs=pd.scatter_matrix(data, alpha=0.2, figsize=(8,8), diagonal='none', marker='.',)
for ax in axs[:,0]:
ax.grid('off', axis='both')
ax.set_ylabel(wrap(ax.get_ylabel()), rotation=0, va='center', labelpad=30)
ax.set_yticks([])
for ax in axs[-1,:]:
ax.grid('off', axis='both')
ax.set_xlabel(wrap(ax.get_xlabel()), rotation=90)
ax.set_xticks([])
return fig
def run(parser,options):
data=[]
for i in range(len(options.tables[0])):
data.append((options.labels[0][i],
table(options.tables[0][i],
options.metric,
options.counts,
options.max_length,
options.binsize)))
fig_pdf=PdfPages('%s.pdf' %(options.plottype))
if options.plottype=='KDE' or options.plottype=='CDF' or options.plottype=='HIST':
fig=cumulative_plots(data,options.plottype,options.metric)
fig_pdf.savefig()
plot.close(fig)
fig_pdf.close()
elif options.plottype=='Heatmap':
fig=heatmap(data)
fig_pdf.savefig()
plot.close(fig)
fig_pdf.close()
elif options.plottype=='Genewise':
genewise(data)
fig_pdf.close()
elif options.plottype=='ScatterMatrix':
fig=scattermatrix(data)
fig_pdf.savefig()
plot.close(fig)
fig_pdf.close()
elif options.plottype=='SingleGene':
fig=singlegene(data,options.geneid)
fig_pdf.savefig()
plot.close(fig)
fig_pdf.close()
|
985,741 | dc5776ef962a04b66bba399229ba07af453b208c | #!/bin/python3
import os
import sys
import collections as cl
T = int(input())
array = []
for _ in range(T):
m,d = [int(temp) for temp in input().split()]
if d // 10 >= m or d % 10 >= m:
pass
else:
array.append((d // 10) * m + d % 10)
ans = 0
for c in cl.Counter(array).items():
ans += c[1] * (c[1] - 1) // 2
print(ans)
|
985,742 | eceb77fbcf99613044c77118bc5337389d13b394 | from mayavi.mlab import *
from const import *
def det_cmap(cmap,style):
|
985,743 | 2fb581e4da270dc46eeaebe165b9ad0840fdf6c4 | import pandas as pd
movies = [
{
"name": "Usual Suspects",
"imdb": 7.0,
"category": "Thriller"
},
{
"name": "Hitman",
"imdb": 6.3,
"category": "Action"
},
{
"name": "Dark Knight",
"imdb": 9.0,
"category": "Adventure"
},
{
"name": "The Help",
"imdb": 8.0,
"category": "Drama"
},
{
"name": "The Choice",
"imdb": 6.2,
"category": "Romance"
},
{
"name": "Colonia",
"imdb": 7.4,
"category": "Romance"
},
{
"name": "Love",
"imdb": 6.0,
"category": "Romance"
},
{
"name": "Bride Wars",
"imdb": 5.4,
"category": "Romance"
},
{
"name": "AlphaJet",
"imdb": 3.2,
"category": "War"
},
{
"name": "Ringing Crime",
"imdb": 4.0,
"category": "Crime"
},
{
"name": "Joking muck",
"imdb": 7.2,
"category": "Comedy"
},
{
"name": "What is the name",
"imdb": 9.2,
"category": "Suspense"
},
{
"name": "Detective",
"imdb": 7.0,
"category": "Suspense"
},
{
"name": "Exam",
"imdb": 4.2,
"category": "Thriller"
},
{
"name": "We Two",
"imdb": 7.2,
"category": "Romance"
}
]
df = pd.DataFrame(movies)
print(df)
print(df.head(10))
print(df.describe())
print(df.dtypes)
print("mean is ", df.mean())
print("mean calc", df['imdb'].mean())
print(df[df['imdb'] > 7.0])
print(df[df['category'] == 'Comedy'])
print(df[(df['imdb'] > 7.0) & (df['category'] == 'Comedy')])
category = df['category'] == "Comedy"
rating = df['imdb'] > 7.0
filDF = df[category & rating]
print("filtered df is ", filDF)
|
985,744 | 19ca8176c97f6917bc99dfa034ee6af3f5fdf71b | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from subprocess import check_output
from keras.models import Model
from keras.layers import Dense, Embedding, Input
from keras.layers import LSTM, Bidirectional, GlobalMaxPool1D, Dropout, GRU
from keras.preprocessing import text, sequence
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers.normalization import BatchNormalization
from keras.layers import Flatten
import re
# conf
max_features = 20000
maxlen = 100
# load data
train = pd.read_csv("data/train.csv")
#train = train[:1000]
test = pd.read_csv("data/test.csv")
#test = test[:1000]
train = train.sample(frac=1)
# pre-processing
print(">> pre-processing ... ")
list_classes = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"]
list_sentences_train = train["comment_text"].fillna("__NA__").values
y = train[list_classes].values
list_sentences_test = test["comment_text"].fillna("__NA__").values
tokenizer = text.Tokenizer(num_words=max_features)
tokenizer.fit_on_texts(list(list_sentences_train) + list(list_sentences_test))
list_tokenized_train = tokenizer.texts_to_sequences(list(list_sentences_train))
list_tokenized_test = tokenizer.texts_to_sequences(list(list_sentences_test))
X_t = sequence.pad_sequences(list_tokenized_train, maxlen=maxlen)
X_te = sequence.pad_sequences(list_tokenized_test, maxlen=maxlen)
def get_model(embed_size = 256 ,
num_lstm = 50 ,
rate_drop_lstm = 0,
rate_drop_dense = 0.1,
num_dense = 50):
inp = Input(shape=(maxlen, ))
x = Embedding(max_features, embed_size)(inp)
#x = Bidirectional(LSTM(num_lstm, return_sequences=True, dropout=rate_drop_lstm, recurrent_dropout=rate_drop_lstm ))(x)
x = Bidirectional(GRU(num_lstm, return_sequences=True))(x)
x = Dropout(rate_drop_dense)(x)
#x = BatchNormalization()(x)
x = Flatten()(x)
x = Dense(num_dense, activation="relu")(x)
x = Dropout(rate_drop_dense)(x)
#x = BatchNormalization()(x)
x = Dense(6, activation="sigmoid")(x)
model = Model(inputs=inp, outputs=x)
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
# train
batch_size = 32
epochs = 10
model = get_model()
print(model.summary())
file_path="weights_base.best.hdf5"
checkpoint = ModelCheckpoint(file_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
early = EarlyStopping(monitor="val_loss", mode="min", patience=0)
callbacks_list = [checkpoint, early] #early
model.fit(X_t, y, batch_size=batch_size, epochs=epochs, validation_split=0.1, callbacks=callbacks_list)
# predict
print(">>> predicting on test set ... ")
model.load_weights(file_path)
y_test = model.predict(X_te)
#sub
sample_submission = pd.read_csv("data/sample_submission.csv")
sample_submission[list_classes] = y_test
sample_submission.to_csv("sub_gru2_.csv.gz", index=False , compression='gzip') |
985,745 | 272d41e13b42ac75e7759752e2dc123f25986f85 | import ctypes
import numpy as np
from OpenGL.GL import *
from omega_engine.core import load_obj_data
from omega_engine.core import MatrixModel
class Entity():
def __init__(self, vertices, vertex_format, indices, texture=None, model=None, draw_mode=GL_TRIANGLES):
self.draw_mode = draw_mode
self.vertices = vertices
self.vertex_format = np.array(vertex_format)
self.indices = indices
self.texture = texture
if model == None:
model = MatrixModel()
self.model = model
self.vao = glGenVertexArrays(1)
self.vbo = None
self.ibo = None
self.gen_vertex_buffer()
self.populate_vao()
def gen_vertex_buffer(self):
self.vbo = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, self.vbo)
glBufferData(GL_ARRAY_BUFFER, self.vertices.nbytes, self.vertices, GL_STATIC_DRAW)
self.ibo = glGenBuffers(1)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.ibo)
glBufferData(GL_ELEMENT_ARRAY_BUFFER, self.indices.nbytes, self.indices, GL_STATIC_DRAW)
def populate_vao(self):
glBindVertexArray(self.vao)
glBindBuffer(GL_ARRAY_BUFFER, self.vbo)
stride = self.vertex_format.transpose()[0].sum() * 4
pointer_v = 0
for size, dtype, position in self.vertex_format:
glEnableVertexAttribArray(position)
glVertexAttribPointer(position, size, dtype, False, stride, ctypes.c_void_p(int(pointer_v)))
pointer_v += size * 4
def draw(self, program):
if self.texture:
self.texture.bind()
program.use()
program.set_uniform_matrix4f_by_name(self.model.matrix4, "model", 1)
glBindVertexArray(self.vao)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.ibo)
glDrawElements(self.draw_mode, len(self.indices), GL_UNSIGNED_INT, None)
def __copy__(self):
entity = Entity.__new__(Entity)
entity.vertices = self.vertices
entity.indices = self.indices
entity.vertex_format = self.vertex_format
entity.texture = self.texture
entity.model = MatrixModel()
entity.vao = self.vao
entity.vbo = self.vbo
entity.ibo = self.ibo
return entity
@classmethod
def load_obj(cls, obj_file_path, mtl_file_path):
vertices, normals, colors, indices = load_obj_data(obj_file_path, mtl_file_path)
_vertices_data = []
print(np.array(vertices).shape)
print(np.array(normals).shape)
print(np.array(colors).shape)
for vertice, normal, color in zip(vertices, normals, colors):
_vertices_data += vertice + color + normal
vertex_format = [[3, GL_FLOAT, 0], [3, GL_FLOAT, 1], [3, GL_FLOAT, 2]]
_vertices_data = np.array(_vertices_data, dtype="float32")
indices = np.array(indices, dtype="uint32")
return _vertices_data, vertex_format, indices
|
985,746 | 67ee0ecbf82c6883095af53cde635006cb07bade | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
#from werkzeug.wsgi import SharedDataMiddleware
from werkzeug.serving import run_simple
from lib.baseapp import BaseApp
from urls import make_url_map
def make_app():
app = BaseApp(make_url_map)
return app
if __name__ == '__main__':
from werkzeug.serving import run_simple
run_simple('localhost', 8080, make_app(), use_reloader=True, use_debugger=True)
else:
application = make_app()
|
985,747 | a97aca44c435a2c5cc082c53a7e9020284947fe0 | from django.contrib import admin
from address.models import *
# Register your models here.
admin.site.register(Currency)
admin.site.register(Country)
admin.site.register(City)
admin.site.register(Address)
|
985,748 | 31fe681a0a3130959f620c21845f525bf65f444c | """Keegan Naidoo
NDXKEE009
4 May 2014"""
import math
import sys
sys.setrecursionlimit (30000)
N=input("Enter the starting point N:\n") #Input starting number
M=input("Enter the ending point M:\n") #Input ending number
def Palindrome(No):
if (No==""): #Base cases to check if empty string or space entered return empty string or space respectively
return ""
elif (No) == " ":
return " "
else:
return No[-1] + Palindrome(No[0:len(No)-1]) #Returns last letter of string and repeats function starting from first letter to second last letter
def Prime(No, div_No):
if(No%div_No==0): #Checks if the current number is divisible by any other number
if (No!=div_No): #Checks if current number is not equal to current number
return True #Return true if it is divisible by another number
if math.sqrt(No<div_No): #Checks if the denomenator is greater than the square root of the number
return False
else:
return Prime(No,div_No+1) #Loops through the numbers by adding 1
def Loop(current_no,Other_Number):
div_No=2
if (current_no==Other_Number): #Checks if the current number is the last number to allow the programme to stop
current_no=str(current_no)
reverse=Palindrome(current_no) #Reverses string
if(current_no==reverse): #Checks if string is a palindrome
current_no=int(current_no) #Converts number to integer
if(Prime(current_no,div_No)==False): #Checks if the current number is a prime number
if(reverse=="1"):
print("",end = "") #If the number is one print the empty string
else:
print(reverse)
else:
current_no = str(current_no)
reverse = Palindrome(current_no)
if current_no == reverse:
current_no = int(current_no)
if Prime(current_no,div_No) == False:
if reverse == "1":
print("",end = "")
else:
print(reverse)
current_no = int(current_no)
current_no += 1 #Allows programme to loop through the numbers
current_no = str(current_no) #Converts current number to string
Loop(current_no,M) # Repeats the process for the other numbers
print("The palindromic primes are:")
Loop(N,M) #Initiaites the process of calling all functions |
985,749 | 0e63d48b4a4dc760ff12aa84ddc419e79fb2780e | #!/usr/bin/env python
"""Generates random "fluff" news articles.
Usage: fluffy.py [topic] [company]
"""
import sys
import random
import textwrap
def main():
if len(sys.argv) > 3:
exit(__doc__)
elif len(sys.argv) == 3:
topic = sys.argv[-2]
company = sys.argv[-1]
elif len(sys.argv) == 2:
topic = sys.argv[-1]
else:
topic = None
company = None
text = ' '.join(p for p in article(topic=topic, company=company))
for paragraph in text.split('\n'):
wrapped = textwrap.fill(paragraph.strip(), 80)
print(wrapped, end='\n\n')
def article(topic=None, company=None):
topic = topic or random.choice((
'Hypervisor',
'FPGA',
'Embedded Systems',
'Continuous Integration',
))
company = company or "ACME"
yield from intoduction(topic)
yield "\n"
for index in range(random.randint(2, 2)):
if index % 2:
paragraph = quote
else:
paragraph = fluff
yield from paragraph(topic)
yield "\n"
yield from closing(topic, company)
def intoduction(topic):
yield random.choice((
"Despite the apparent trend,",
"In spite of media reactions,",
))
yield random.choice((
"the medical industry",
"the aerospace industry",
"safety-critical application development",
"automotive infotainment",
))
yield random.choice((
"is not a dying trend.",
"continues to demand innovation.",
))
yield random.choice((
"Lots of disparate sets of data",
))
yield "create complex engineering problems."
yield "This is where " + topic + 's' " come in."
def fluff(topic):
yield "Using a " + topic + ", engineers can leverage"
yield random.choice((
"the latest technology",
"an innovative new technology",
"a multifaceted solution",
))
yield "to meet their customer's needs."
def quote(topic):
yield "According to "
yield random.choice((
"Ned Flanders",
"Lisa Simpson",
))
yield "at"
yield random.choice((
"Competing Company,",
))
yield '"A ' + topic + ' is a great way to synergize content."'
def closing(topic, company):
yield company + " is"
yield random.choice((
"eager",
"excited",
))
yield "to help your company"
yield random.choice((
"exceed",
"meet",
))
yield "their goals"
yield random.choice((
"utilizing our skills with",
"by taking advantage of",
))
yield topic + 's.'
if __name__ == '__main__':
main() |
985,750 | 0aa0a9829033d4e2875e227a36b57204efa9431e | """ Project Euler #6: Sum square difference """
t = int(input())
while (t > 0):
n = int(input())
result = (n*(n+1)*(3* n * n - n -2))//12
print(result)
t -= 1
|
985,751 | da23d91eed9d188a2f5a3af91bee8bd91c1152b7 | #This class will handle opening and scanning an img to get the values of each pixel to assign a tile to
#warning: Quite intensive, causes lag spike on call, use sparingly
from PIL import Image
class readMapTiles:
def __init__(self, path):
#Opens the image
self.map = Image.open(path)
#Gets a list of all the pixel data in the img in a 1 dimensional list
pixels = list(self.map.getdata())
#Sets the size so that the pixel list can be turned into a 2 dimensional array like a grid
width, height = self.map.size
pixels = [pixels[i * width:
(i + 1) * width] for i in range(height)]
self.pixels = pixels
def returnMap(self):
return self.pixels |
985,752 | 73e71b4d9e5853101b84fbc680d298848f692008 | #-*- coding:utf-8 _*-
"""
@version:
@author:yang
@time: 2018/05/15
@file: spider.py
@Project : stock
@Software: PyCharm
@function:
@modify:
"""
import ssl
from urllib import error
from urllib import request
class Spider(object):
def __init__(self):
pass
def spider(self, url, header):
context = ssl._create_unverified_context()
try:
req = request.Request(url, headers=header)
res = request.urlopen(req, context=context).read()
return res
except error.HTTPError as e:
print(e.code)
except error.URLError as e:
print(str(e)) |
985,753 | e6045634773e969f953cb3060e7896a26884e905 | """The WaveBlocks Project
Sample wavepackets at the nodes of a given grid and save
the results back to the given simulation data file.
@author: R. Bourquin
@copyright: Copyright (C) 2010, 2011, 2012, 2013, 2016 R. Bourquin
@license: Modified BSD License
"""
from WaveBlocksND import BlockFactory
from WaveBlocksND import BasisTransformationHAWP
from WaveBlocksND import WaveFunction
def compute_evaluate_wavepackets(pp, iom, blockid=0, eigentrafo=True):
"""Evaluate an in homogeneous Hagedorn wavepacket on a given grid for each timestep.
:param pp: An :py:class:`ParameterProvider` instance providing the grid data.
:param iom: An :py:class:`IOManager` instance providing the simulation data.
:param blockid: The data block from which the values are read.
:param eigentrafo: Whether or not do an eigentransformation before evaluation is done.
"""
parameters = iom.load_parameters()
if pp is None:
pp = parameters
# Number of time steps we saved
timesteps = iom.load_inhomogwavepacket_timegrid(blockid=blockid)
nrtimesteps = timesteps.shape[0]
# Prepare the potential for basis transformations
Potential = BlockFactory().create_potential(parameters)
grid = BlockFactory().create_grid(pp)
# We want to save wavefunctions, thus add a data slot to the data file
d = {"ncomponents": parameters["ncomponents"],
"number_nodes": pp["number_nodes"],
"dimension": parameters["dimension"]}
iom.add_grid(d, blockid=blockid)
iom.add_wavefunction(d, timeslots=nrtimesteps, flat=True, blockid=blockid)
iom.save_grid(grid.get_nodes(), blockid=blockid)
# Initialize a Hagedorn wavepacket with the data
descr = iom.load_inhomogwavepacket_description(blockid=blockid)
HAWP = BlockFactory().create_wavepacket(descr)
# Basis transformator
if eigentrafo is True:
BT = BasisTransformationHAWP(Potential)
BT.set_matrix_builder(HAWP.get_innerproduct())
# Basis shapes
BS_descr = iom.load_inhomogwavepacket_basisshapes(blockid=blockid)
BS = {}
for ahash, descr in BS_descr.items():
BS[ahash] = BlockFactory().create_basis_shape(descr)
WF = WaveFunction(parameters)
WF.set_grid(grid)
# Iterate over all timesteps
for i, step in enumerate(timesteps):
print(" Evaluating inhomogeneous wavepacket at timestep %d" % step)
# Retrieve simulation data
params = iom.load_inhomogwavepacket_parameters(timestep=step, blockid=blockid, key=("q", "p", "Q", "P", "S", "adQ"))
hashes, coeffs = iom.load_inhomogwavepacket_coefficients(timestep=step, get_hashes=True, blockid=blockid)
# Configure the wavepacket
HAWP.set_parameters(params, key=("q", "p", "Q", "P", "S", "adQ"))
HAWP.set_basis_shapes([BS[int(ha)] for ha in hashes])
HAWP.set_coefficients(coeffs)
# Transform to the eigenbasis.
if eigentrafo is True:
BT.transform_to_eigen(HAWP)
# Evaluate the wavepacket
values = HAWP.evaluate_at(grid, prefactor=True)
WF.set_values(values)
# Save the wave function
iom.save_wavefunction(WF.get_values(), timestep=step, blockid=blockid)
|
985,754 | e53d4ac54ca88c4364e24bc428ccc2cfa70e36cc | import herbicideMutationFinder as herb
from Bio import SeqIO
sha = 'githash unavailable'
__version__ = 1.00
__author__ = 'Nathan D. Hall'
__updated__ = 'December 13, 2019'
def batch_search(fasta,out,homeolog=True):
tsr = {}
stem = fasta.split('/')[-1]
for seq_record in SeqIO.parse(fasta, 'fasta'):
"""
Fasta header should be formated as follows:
<gene>_<homeolog>_<unique id>
"""
gene,*args = seq_record.description.split('_')
# args is a list that contains the rest of header
if homeolog is True:
homeo = args[0]
else:
homeo = None
res_muts = herb.batchMutationFinder(str(seq_record.seq),gene)
if len(res_muts) >0:
detected =[stem,gene,homeo]
for pos, aa in zip(res_muts[0::2],res_muts[1::2]):
if len(aa) > 0 :
if 'positions' in pos:
detected.append(pos.replace(' positions',''))
elif 'position' in pos:
detected.append(pos.replace(' position', ''))
# Now get total number of seqs with this mutation
total_seqs = int(args[-1].split(':')[-1])
for a in aa:
for ref,mut,codon in zip(a[0::3],a[1::3],a[2::3]):
strout = '{r}->{m}({c})'.format(
r=ref,
m=mut,
c=codon
)
detected.append(strout)
key_detected = '\t'.join(detected)
if key_detected not in tsr:
tsr[key_detected] = [total_seqs,[seq_record.description]]
else:
tsr[key_detected][0] += total_seqs
tsr[key_detected][1].append(seq_record.description)
with open(out, 'w') as f:
for snp in tsr:
f.write(snp+'\t{}\n'.format(tsr[snp][0]))
for seq in tsr[snp][1]:
f.write('##\t{id}\t{snp}\n'.format(id=seq,snp=snp))
f.close()
des = """\
In ampseq runs it is likely that pcr duplicates will occur
here we are doing 2 things.
1. assuming that all valid reads will have been marked as
pcr duplicates by picard tools. This is because we are
sequening at extreme depths.
2. extracting unique copies of each read that occurs above
some cutoff.
Returns a file with fasta name, gene name homeolog designation, AA, AA substition, count
POA12_uniq.fasta als sup Trp-574 W->L(TTG) 183
## als_sup_someread3 count:25 POA12_uniq.fasta als sup Trp-574 W->L(TTG)
## als_sup_someread2 count:66 POA12_uniq.fasta als sup Trp-574 W->L(TTG)
## als_sup_someread1 count:12 POA12_uniq.fasta als sup Trp-574 W->L(TTG)
"""
epi = """\
version:{v}
author: {a}
updated: {u}
written for Python 3.5
githash: {g}
""".format(v=__version__,
a=__author__,
u=__updated__,
g=sha)
if __name__ == '__main__':
import argparse
import textwrap
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent(des),
epilog=textwrap.dedent(epi))
parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
optional = parser.add_argument_group('optional arguments')
required.add_argument('-f,--fasta_file',
help='fasta file',
required=True,
dest='fasta_file',
)
required.add_argument('-o,--out',
help='out file',
required=True,
dest='out_report',
)
argv = parser.parse_args()
batch_search(argv.fasta_file, argv.out_report, homeolog=True)
|
985,755 | 5b97d2ddf8ebd041447d7b4afd58f96059283669 | import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.style as style
import pandas as pd
def eda(df):
style.use('seaborn-poster')
numerical_df = df[['likes', 'views', 'dislikes', 'comments_disabled', 'comment_count', 'ratings_disabled']].apply(pd.to_numeric)
sns.heatmap(numerical_df.corr(method='pearson'), cmap=sns.light_palette('purple'), annot=True, annot_kws={'size':16})
plt.xticks(rotation=45)
#plt.savefig('Charts/corr_matrix.png', bbox_inches='tight')
plt.show() |
985,756 | c5451c8a5b545177955a8679f6d909a92157e1e1 |
def sieve_of_erat(N):
grid = [True] * (N + 1)
primes = []
for i in range(2, N + 1):
if not grid[i]:
continue
primes.append(i)
for j in range(i * 2, N + 1, i):
grid[j] = False
return primes
PRIMES = sieve_of_erat(1000)
def factor_n(n):
for p in PRIMES:
if n % p == 0:
return p, n // p
def extended_euclidean(totient, e):
original_totient = totient
x,y, u,v = 0,1, 1,0
while totient != 0:
q, r = e // totient, e % totient
m, n = x - u * q, y - v * q
e, totient, x,y, u,v = totient,r, u,v, m,n
return y if y > 0 else y + original_totient
def find_d(n, e):
p, q = factor_n(n)
tot = (p - 1) * (q - 1)
return extended_euclidean(tot, e)
def main():
T = int(input())
for _ in range(T):
n, e = map(int, input().split())
print(find_d(n, e))
if __name__ == '__main__':
main()
|
985,757 | 1a226796f2b990db34bfb496bb47cf62de88d977 | # Author: Dylan Tong, AWS
import json
from time import sleep
from urllib.parse import urlparse
import pandas as pd
import boto3
from sagemaker import clarify
from sagemaker import Session
sm = boto3.client("sagemaker")
s3 = boto3.client("s3")
class TaskTimedOut(Exception): pass
def get_first_matching_s3_key(bucket, prefix=''):
kwargs = {'Bucket': bucket, 'Prefix': prefix, 'MaxKeys': 1}
resp = s3.list_objects_v2(**kwargs)
for obj in resp['Contents']:
return obj['Key']
def get_samples(data_uri, num_samples) :
#obtains sample rows from the first object of many at the specified location
parsed = urlparse(data_uri, allow_fragments=False)
if parsed.query:
prefix= parsed.path.lstrip('/') + '?' + parsed.query
else:
prefix= parsed.path.lstrip('/')
key = get_first_matching_s3_key(parsed.netloc,prefix)
uri = f"s3://{parsed.netloc}/{key}"
return pd.read_csv(uri,nrows=num_samples)
def create_clarify_xai_job(event) :
role = event["Input"]["Payload"]["security-config"]["iam_role"]
data_params = event["Input"]["Payload"]["data-config"]
ws_params = event["Input"]["Payload"]["workspace-config"]
model_params = event["Input"]["Payload"]["model-config"]
automl_params = event["Input"]["Payload"]["automl-config"]
xai_params = event["Input"]["Payload"]["xai-config"]
session = Session()
clarify_processor = clarify.SageMakerClarifyProcessor(role=role,
instance_count=xai_params["instance_count"],
instance_type=xai_params["instance_type"],
sagemaker_session=session)
output_uri = "s3://{}/{}/{}".format(ws_params["s3_bucket"],
ws_params["s3_prefix"],
xai_params["output_prefix"])
shap_params = xai_params["shap-config"]
num_samples = shap_params["num_samples"]
df = get_samples(automl_params["data_uri"],shap_params["num_samples"])
samples = df.iloc[:,:-1].values.tolist()
columns = df.columns.to_list()
shap_config = clarify.SHAPConfig(baseline=samples,
num_samples=num_samples,
agg_method=shap_params["agg_method"])
data_config = clarify.DataConfig(s3_data_input_path=automl_params["data_uri"],
s3_output_path=output_uri,
label=automl_params["target_name"],
headers=columns,
dataset_type='text/csv')
model_config = clarify.ModelConfig(model_name=model_params["model_name"],
instance_type=model_params["instance_type"],
instance_count=model_params["instance_count"],
content_type='text/csv')
clarify_processor.run_explainability(job_name=xai_params["job_name"],
data_config=data_config,
model_config=model_config,
explainability_config=shap_config,
wait=False,
logs=False)
def monitor_job_status(job_name, context) :
sleep_time = 60
while True:
status = sm.describe_processing_job(ProcessingJobName = job_name)["ProcessingJobStatus"]
if status == 'Completed' :
break;
elif status in ('Failed', 'Stopped') :
break;
else :
if context.get_remaining_time_in_millis() > 2000*sleep_time :
sleep(sleep_time)
else :
raise TaskTimedOut("Task timed out.")
return {"status":status}
def lambda_handler(event, context):
xai_params = event["Input"]["Payload"]["xai-config"]
job_name = xai_params["job_name"]
try :
sm.describe_processing_job(ProcessingJobName = job_name)
except Exception as e:
create_clarify_xai_job(event)
results = monitor_job_status(job_name, context)
event["Input"]["Payload"]["xai-config"]["job-results"] = results
return event["Input"]["Payload"] |
985,758 | 79442af38f0edbedc0931b2802d450dff77de2cd | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 10 18:14:48 2020
@author: norman marlier
"""
import tensorflow as tf
import tensorflow_probability as tfp
import json
import numpy as np
import sys
sys.path.append("../..")
from DexterousManipulation.model.object_model import ObjectDataset
class ObjectPrior():
"""Prior distribution for object, p(O).
Discrete uniform distribution for object.
An object O is defined as a dict with field:
-"collision_mesh": the vhacd collision mesh file name
-"visual_mesh": the visual mesh file name
-"scaling": the size of the object (vec3)
-"z": the height of the object
-"base_pos": the initial position
-"base_ori": the initial orientation
-"volume": the volume of the collision mesh
-"density": the density of the collision mesh
-"mass": the "true" mass of the object
"""
def __init__(self, json_config):
"""Constructor."""
self.object_dataset = ObjectDataset(json_config)
self.probs = [1/self.object_dataset.get_nb_obj()]\
*self.object_dataset.get_nb_obj()
# Categorical distribution for geometry
self.geometry_distribution = tfp.distributions.Categorical(probs=self.probs)
def get_obj(self, idx):
"""Get object dictionnary from idx."""
if idx >= self.object_dataset.get_nb_obj():
raise ValueError("idx is greater than the number of objects")
return self.object_dataset.get_obj(idx)
def get_idx(self, obj):
return self.object_dataset.get_idx(obj)
def sample(self, noise=True):
idx = self.geometry_distribution.sample(1).numpy()[0]
obj = self.object_dataset.get_obj(idx).copy()
if noise:
# Scaling factor
scaling_factor = tfp.distributions.Uniform(0.9, 1.1).sample().numpy()*np.ones((3))
#scaling_factor = np.ones((3))
obj["scale"] = scaling_factor*obj["scale"][0]
self.object_dataset.set_geometrical_attributs(obj)
# Change density
obj["density"] = tfp.distributions.Normal(obj["density"], 1.).sample().numpy()
return obj
if __name__ == "__main__":
p_obj = ObjectPrior("./egad.json")
sample = p_obj.sample()
print("Sample", sample)
print("Idx", p_obj.get_idx(sample))
#print(p_obj.get_obj(14))
|
985,759 | e77e8c759907274f7c9727e23faadb6ada128e7d | """Prints the content of a packet list."""
from fragscapy.modifications.mod import Mod
class Print(Mod):
"""Prints the content of a packet list.
Args:
*args: The arguments of the mods.
Raises:
ValueError: Unrecognized or incorrect number of parameters.
Examples:
>>> Print()
"""
name = "Print"
doc = ("Prints the content of the packet list.\n"
"print")
_nb_args = 0
def apply(self, pkt_list):
"""Prints the content of each packet. See `Mod.apply` for more
details."""
pkt_list.display()
return pkt_list
|
985,760 | 40094665448ac6f21464887292dc7a32d7521506 | print("Careers:")
careers = ["engineer", "hacker", "doctor", "chemist"]
print(careers)
careers.append("physicist")
print(careers)
careers.insert(2,"astronaut")
print(careers) |
985,761 | 53483d8b621600c6c9b3e146ca4fe83fc04fec2d | import unittest
import socket
import json
import base64
import zlib
import gzip
import bs4
from http.server import BaseHTTPRequestHandler, HTTPServer
from threading import Thread
import WebRequest
from . import testing_server
class TestPlainCreation(unittest.TestCase):
def test_plain_instantiation_1(self):
wg = WebRequest.WebGetRobust()
self.assertTrue(wg is not None)
def test_plain_instantiation_2(self):
wg = WebRequest.WebGetRobust(cloudflare=True)
self.assertTrue(wg is not None)
def test_plain_instantiation_3(self):
wg = WebRequest.WebGetRobust(use_socks=True)
self.assertTrue(wg is not None)
class TestSimpleFetch(unittest.TestCase):
def setUp(self):
self.wg = WebRequest.WebGetRobust()
# Configure mock server.
self.mock_server_port, self.mock_server, self.mock_server_thread = testing_server.start_server(self, self.wg)
def tearDown(self):
self.mock_server.shutdown()
self.mock_server_thread.join()
self.wg = None
def test_fetch_1(self):
page = self.wg.getpage("http://localhost:{}".format(self.mock_server_port))
self.assertEqual(page, 'Root OK?')
def test_fetch_decode_1(self):
# text/html content should be decoded automatically.
page = self.wg.getpage("http://localhost:{}/html-decode".format(self.mock_server_port))
self.assertEqual(page, 'Root OK?')
def test_fetch_soup_1(self):
# text/html content should be decoded automatically.
page = self.wg.getSoup("http://localhost:{}/html/real".format(self.mock_server_port))
self.assertEqual(page, bs4.BeautifulSoup('<html><body>Root OK?</body></html>', 'lxml'))
def test_fetch_soup_2(self):
page = self.wg.getSoup("http://localhost:{}/html-decode".format(self.mock_server_port))
self.assertEqual(page, bs4.BeautifulSoup('<html><body><p>Root OK?</p></body></html>', 'lxml'))
def test_fetch_soup_3(self):
# getSoup fails to fetch content that's not of content-type text/html
with self.assertRaises(WebRequest.ContentTypeError):
self.wg.getSoup("http://localhost:{}/binary_ctnt".format(self.mock_server_port))
def test_fetch_decode_json(self):
# text/html content should be decoded automatically.
page = self.wg.getJson("http://localhost:{}/json/valid".format(self.mock_server_port))
self.assertEqual(page, {'oh': 'hai'})
page = self.wg.getJson("http://localhost:{}/json/no-coding".format(self.mock_server_port))
self.assertEqual(page, {'oh': 'hai'})
with self.assertRaises(json.decoder.JSONDecodeError):
page = self.wg.getJson("http://localhost:{}/json/invalid".format(self.mock_server_port))
def test_fetch_compressed(self):
page = self.wg.getpage("http://localhost:{}/compressed/gzip".format(self.mock_server_port))
self.assertEqual(page, 'Root OK?')
page = self.wg.getpage("http://localhost:{}/compressed/deflate".format(self.mock_server_port))
self.assertEqual(page, 'Root OK?')
def test_file_and_name_1(self):
page, fn = self.wg.getFileAndName("http://localhost:{}/filename/path-only.txt".format(self.mock_server_port))
self.assertEqual(page, b'LOLWAT?')
self.assertEqual(fn, 'path-only.txt')
def test_file_and_name_2(self):
page, fn = self.wg.getFileAndName("http://localhost:{}/filename/content-disposition".format(self.mock_server_port))
self.assertEqual(page, b'LOLWAT?')
self.assertEqual(fn, 'lolercoaster.txt')
def test_file_and_name_3(self):
page, fn = self.wg.getFileAndName("http://localhost:{}/filename_mime/content-disposition-quotes-1".format(self.mock_server_port))
self.assertEqual(page, b'LOLWAT?')
self.assertEqual(fn, 'lolercoaster.html')
def test_file_and_name_4(self):
page, fn = self.wg.getFileAndName("http://localhost:{}/filename_mime/content-disposition-quotes-2".format(self.mock_server_port))
self.assertEqual(page, b'LOLWAT?')
self.assertEqual(fn, 'lolercoaster.html')
def test_file_and_name_5(self):
page, fn = self.wg.getFileAndName("http://localhost:{}/filename_mime/content-disposition-quotes-spaces-1".format(self.mock_server_port))
self.assertEqual(page, b'LOLWAT?')
self.assertEqual(fn, 'loler coaster.html')
def test_file_and_name_6(self):
page, fn = self.wg.getFileAndName("http://localhost:{}/filename_mime/content-disposition-quotes-spaces-2".format(self.mock_server_port))
self.assertEqual(page, b'LOLWAT?')
self.assertEqual(fn, 'loler coaster.html')
def test_file_and_name_7(self):
page, fn = self.wg.getFileAndName(requestedUrl="http://localhost:{}/filename_mime/content-disposition-quotes-spaces-2".format(self.mock_server_port))
self.assertEqual(page, b'LOLWAT?')
self.assertEqual(fn, 'loler coaster.html')
def test_file_and_name_8(self):
page, fn = self.wg.getFileAndName(requestedUrl="http://localhost:{}/filename_mime/content-disposition-quotes-spaces-2".format(self.mock_server_port), addlHeaders={"Referer" : 'http://www.example.org'})
self.assertEqual(page, b'LOLWAT?')
self.assertEqual(fn, 'loler coaster.html')
def test_file_and_name_9(self):
page, fn = self.wg.getFileAndName("http://localhost:{}/filename_mime/content-disposition-quotes-spaces-2".format(self.mock_server_port), addlHeaders={"Referer" : 'http://www.example.org'})
self.assertEqual(page, b'LOLWAT?')
self.assertEqual(fn, 'loler coaster.html')
def test_file_and_name_10(self):
page, fn = self.wg.getFileAndName("http://localhost:{}/filename/path-only-trailing-slash/".format(self.mock_server_port))
self.assertEqual(page, b'LOLWAT?')
self.assertEqual(fn, '')
def test_file_name_mime_1(self):
page, fn, mimet = self.wg.getFileNameMime(
"http://localhost:{}/filename_mime/path-only.txt".format(self.mock_server_port))
self.assertEqual(page, b'LOLWAT?')
self.assertEqual(fn, 'path-only.txt')
self.assertEqual(mimet, 'text/plain')
def test_file_name_mime_2(self):
page, fn, mimet = self.wg.getFileNameMime(
"http://localhost:{}/filename_mime/content-disposition".format(self.mock_server_port))
self.assertEqual(page, b'LOLWAT?')
self.assertEqual(fn, 'lolercoaster.txt')
self.assertEqual(mimet, 'text/plain')
def test_file_name_mime_3(self):
page, fn, mimet = self.wg.getFileNameMime(
"http://localhost:{}/filename_mime/content-disposition-html-suffix".format(self.mock_server_port))
self.assertEqual(page, b'LOLWAT?')
self.assertEqual(fn, 'lolercoaster.html')
self.assertEqual(mimet, 'text/plain')
def test_file_name_mime_5(self):
page, fn, mimet = self.wg.getFileNameMime(
"http://localhost:{}/filename/path-only-trailing-slash/".format(self.mock_server_port))
self.assertEqual(page, b'LOLWAT?')
self.assertEqual(fn, '')
self.assertEqual(mimet, 'text/plain')
def test_file_name_mime_4(self):
page, fn, mimet = self.wg.getFileNameMime(
"http://localhost:{}/filename_mime/explicit-html-mime".format(self.mock_server_port))
self.assertEqual(page, 'LOLWAT?')
self.assertEqual(fn, 'lolercoaster.html')
self.assertEqual(mimet, 'text/html')
def test_get_head_1(self):
inurl_1 = "http://localhost:{}".format(self.mock_server_port)
nurl_1 = self.wg.getHead(inurl_1)
self.assertEqual(inurl_1, nurl_1)
def test_get_head_2(self):
inurl_2 = "http://localhost:{}/filename_mime/content-disposition".format(self.mock_server_port)
nurl_2 = self.wg.getHead(inurl_2)
self.assertEqual(inurl_2, nurl_2)
def test_redirect_handling_1(self):
inurl_1 = "http://localhost:{}/redirect/from-1".format(self.mock_server_port)
ctnt_1 = self.wg.getpage(inurl_1)
self.assertEqual(ctnt_1, b"Redirect-To-1")
def test_redirect_handling_2(self):
inurl_2 = "http://localhost:{}/redirect/from-2".format(self.mock_server_port)
ctnt_2 = self.wg.getpage(inurl_2)
self.assertEqual(ctnt_2, b"Redirect-To-2")
def test_redirect_handling_3(self):
inurl_3 = "http://localhost:{}/redirect/from-1".format(self.mock_server_port)
outurl_3 = "http://localhost:{}/redirect/to-1".format(self.mock_server_port)
nurl_3 = self.wg.getHead(inurl_3)
self.assertEqual(outurl_3, nurl_3)
def test_redirect_handling_4(self):
inurl_4 = "http://localhost:{}/redirect/from-2".format(self.mock_server_port)
outurl_4 = "http://localhost:{}/redirect/to-2".format(self.mock_server_port)
nurl_4 = self.wg.getHead(inurl_4)
self.assertEqual(outurl_4, nurl_4)
def test_redirect_handling_5(self):
# This is a redirect without the actual redirect
with self.assertRaises(WebRequest.FetchFailureError):
inurl_5 = "http://localhost:{}/redirect/bad-1".format(self.mock_server_port)
self.wg.getHead(inurl_5)
def test_redirect_handling_6(self):
# This is a infinitely recursive redirect.
with self.assertRaises(WebRequest.FetchFailureError):
inurl_6 = "http://localhost:{}/redirect/bad-2".format(self.mock_server_port)
self.wg.getHead(inurl_6)
def test_redirect_handling_7(self):
# This is a infinitely recursive redirect.
with self.assertRaises(WebRequest.FetchFailureError):
inurl_6 = "http://localhost:{}/redirect/bad-3".format(self.mock_server_port)
self.wg.getHead(inurl_6)
def test_redirect_handling_8(self):
inurl_7 = "http://localhost:{}/redirect/from-3".format(self.mock_server_port)
# Assumes localhost seems to resolve to the listening address (here it's 0.0.0.0). Is this ever not true? IPv6?
outurl_7 = "http://0.0.0.0:{}/".format(self.mock_server_port)
nurl_7 = self.wg.getHead(inurl_7)
self.assertEqual(outurl_7, nurl_7)
# For the auth tests, we have to restart the test-server with the wg that's configured for password management
def test_http_auth_1(self):
self.mock_server.shutdown()
self.mock_server_thread.join()
self.wg = None
new_port_1 = testing_server.get_free_port()
wg_1 = WebRequest.WebGetRobust(creds=[("localhost:{}".format(new_port_1), "lol", "wat")])
# Configure mock server.
new_port_1, self.mock_server, self.mock_server_thread = testing_server.start_server(self, wg_1, port_override=new_port_1)
page = wg_1.getpage("http://localhost:{}/password/expect".format(new_port_1))
self.assertEqual(page, b'Password Ok?')
def test_http_auth_2(self):
self.mock_server.shutdown()
self.mock_server_thread.join()
self.wg = None
new_port_2 = testing_server.get_free_port()
wg_2 = WebRequest.WebGetRobust(creds=[("localhost:{}".format(new_port_2), "lol", "nope")])
# Configure mock server.
new_port_2, self.mock_server, self.mock_server_thread = testing_server.start_server(self, wg_2, port_override=new_port_2)
page = wg_2.getpage("http://localhost:{}/password/expect".format(new_port_2))
self.assertEqual(page, b'Password Bad!')
def test_get_item_1(self):
inurl_1 = "http://localhost:{}".format(self.mock_server_port)
content_1, fileN_1, mType_1 = self.wg.getItem(inurl_1)
self.assertEqual(content_1, 'Root OK?')
self.assertEqual(fileN_1, '')
self.assertEqual(mType_1, "text/html")
def test_get_item_2(self):
inurl_2 = "http://localhost:{}/filename_mime/content-disposition".format(self.mock_server_port)
content_2, fileN_2, mType_2 = self.wg.getItem(inurl_2)
# Lack of an explicit mimetype makes this not get decoded
self.assertEqual(content_2, b'LOLWAT?')
self.assertEqual(fileN_2, 'lolercoaster.txt')
self.assertEqual(mType_2, None)
def test_get_item_3(self):
inurl_3 = "http://localhost:{}/filename/path-only.txt".format(self.mock_server_port)
content_3, fileN_3, mType_3 = self.wg.getItem(inurl_3)
self.assertEqual(content_3, b'LOLWAT?')
self.assertEqual(fileN_3, 'path-only.txt')
self.assertEqual(mType_3, None)
def test_get_cookies_1(self):
inurl_1 = "http://localhost:{}/cookie_test".format(self.mock_server_port)
inurl_2 = "http://localhost:{}/cookie_require".format(self.mock_server_port)
self.wg.clearCookies()
cookies = self.wg.getCookies()
self.assertEqual(list(cookies), [])
page_resp_nocook = self.wg.getpage(inurl_2)
self.assertEqual(page_resp_nocook, '<html><body>Cookie is missing</body></html>')
_ = self.wg.getpage(inurl_1)
cookies = self.wg.getCookies()
print(cookies)
page_resp_cook = self.wg.getpage(inurl_2)
self.assertEqual(page_resp_cook, '<html><body>Cookie forwarded properly!</body></html>')
|
985,762 | 2f6c6692bbd3f47f98b9dbf38c8fc9cabc2506f4 | import pymysql
class roleAssign:
def conn(self):
connection = pymysql.connect(host='localhost',user='root',password='',charset="utf8mb4")
return connection
def roleMaping(self,c):
#database name: USER_MANAGER
#SAP table name: AGR_USERS with 2 attribute UNAME(user),AGR_NAME(role))
#Create new Users if not present in SAP
cursor = c.cursor()
select_sap = "SELECT uname,agr_name FROM user_manager.agr_users as l LEFT JOIN mysql.default_roles as r on(l.uname=r.user) WHERE uname not in(SELECT user FROM mysql.default_roles)"
cursor.execute(select_sap)
rows_insert = cursor.fetchall()
c.commit()
rowcount = cursor.rowcount
if rowcount > 0:
for row in rows_insert:
row1 = row[0]
row2 = row[1]
createUser = "CREATE USER '"+ row1 +"'@'localhost'"
cursor.execute(createUser)
c.commit()
grantRole = "GRANT '"+ row2 +"' TO '"+ row1 +"'@'localhost'"
cursor.execute(grantRole)
c.commit()
setDefaultRole = "SET DEFAULT ROLE '"+ row2 +"' TO '"+ row1 +"'@'localhost' "
cursor.execute(setDefaultRole)
c.commit()
print("\n\n"+str(rowcount)+" New User Added successfully\n")
#Upadate role of user if role changed in SAP
selectUpdate = "SELECT uname,agr_name FROM user_manager.agr_users AS l LEFT JOIN mysql.default_roles AS r ON(l.uname=r.user) WHERE l.uname = r.user AND l.agr_name != r.default_role_user"
cursor.execute(selectUpdate)
rows_update = cursor.fetchall()
c.commit()
rowcount2 = cursor.rowcount
if rowcount2 > 0:
for row in rows_update:
row1 = row[0]
row2 = row[1]
select_current_role = "SELECT default_role_user FROM mysql.default_roles WHERE user = '"+ row1 +"' "
cursor.execute(select_current_role)
c.commit()
current_role = cursor.fetchone()
revoke_old_role = "REVOKE '"+ current_role[0] +"' FROM '"+ row1 +"'@'localhost'"
cursor.execute(revoke_old_role)
c.commit()
grant_new_role = "GRANT '"+ row2 +"' TO '"+ row1 +"'@'localhost'"
cursor.execute(grant_new_role)
c.commit()
set_new_DefaultRole = "SET DEFAULT ROLE '"+ row2 +"' TO '"+ row1 +"'@'localhost'"
cursor.execute(set_new_DefaultRole)
c.commit()
print(str(rowcount2)+" New User Updated successfully\n")
#delete user and assigned role if user is not present in SAP
selectD = "SELECT user,default_role_user FROM mysql.default_roles WHERE user NOT IN(SELECT uname FROM user_manager.agr_users)"
cursor.execute(selectD)
rows_delete = cursor.fetchall()
c.commit()
rowcount3 = cursor.rowcount
if rowcount3 > 0:
for row in rows_delete:
row1 = row[0]
row2 = row[1]
revoke_role = "REVOKE '"+ row2 +"' From '"+ row1 +"'@'localhost'"
cursor.execute(revoke_role)
c.commit()
drop_user = "DROP user '"+ row1 +"'@'localhost'"
cursor.execute(drop_user)
c.commit()
print(str(rowcount3)+" User Deleted successfully\n")
print("\nUSER Role mapping successfull\n\n")
userRoles = roleAssign()
c=userRoles.conn()
userRoles.roleMaping(c)
|
985,763 | 964a292b89f425450605f7a93807d54008e3effb | """Gulp installer class."""
import subprocess
from grow.sdk.installers import base_installer
class GulpInstaller(base_installer.BaseInstaller):
"""Gulp installer."""
KIND = 'gulp'
@property
def should_run(self):
"""Should the installer run?"""
return self.pod.file_exists('/gulpfile.js')
def check_prerequisites(self):
"""Check if required prerequisites are installed or available."""
status_command = 'gulp --version > /dev/null 2>&1'
not_found = subprocess.call(
status_command, **self.subprocess_args(shell=True)) == 127
if not_found:
install_commands = [
'Either add gulp to package.json or install globally using:'
' `sudo npm install -g gulp`']
raise base_installer.MissingPrerequisiteError(
'The `gulp` command was not found.', install_commands=install_commands)
def install(self):
"""Install dependencies."""
pass
|
985,764 | 8488b57db819eced67e0e0a2bafbbcbac2bc6749 | from causal_world.task_generators.base_task import BaseTask
import numpy as np
import copy
class Stacking2TaskGenerator(BaseTask):
def __init__(self, reference={"mass":[], "size":[]},
mode=0,
variables_space='space_a_b',
fractional_reward_weight=1,
dense_reward_weights=np.array([750, 250,
250, 125,
0.005]),
activate_sparse_reward=False,
tool_block_mass=0.02,
tool_block_shape = "cube",
tool_block_size=0.065,
joint_positions=None,
tool_block_1_position=np.array([0, 0, 0.0325]),
tool_block_1_orientation=np.array([0, 0, 0, 1]),
tool_block_2_position=np.array([0.01, 0.08, 0.0325]),
tool_block_2_orientation=np.array([0, 0, 0, 1]),
goal_position=np.array([-0.06, -0.06, 0.0325]),
goal_orientation=np.array([0, 0, 0, 1])):
"""
This task generates a task for stacking 2 blocks above each other.
Note: it belongs to the same shape family of towers, we only provide a
specific task generator for it to be able to do reward engineering
and to reproduce the baselines for it in an easy way.
:param variables_space:
:param fractional_reward_weight:
:param dense_reward_weights:
:param activate_sparse_reward:
:param tool_block_mass:
:param joint_positions:
:param tool_block_1_position:
:param tool_block_1_orientation:
:param tool_block_2_position:
:param tool_block_2_orientation:
"""
super().__init__(task_name="stacking2",
variables_space=variables_space,
fractional_reward_weight=fractional_reward_weight,
dense_reward_weights=dense_reward_weights,
activate_sparse_reward=activate_sparse_reward,
reference=reference,
mode=mode)
self._task_robot_observation_keys = ["time_left_for_task",
"joint_positions",
"joint_velocities",
"end_effector_positions"]
self._task_params["tool_block_mass"] = tool_block_mass
self._task_params["tool_block_shape"] = tool_block_shape
self._task_params["tool_block_size"] = tool_block_size
self._task_params["joint_positions"] = joint_positions
self._task_params["tool_block_1_position"] = tool_block_1_position
self._task_params["tool_block_1_orientation"] = tool_block_1_orientation
self._task_params["tool_block_2_position"] = tool_block_2_position
self._task_params["tool_block_2_orientation"] = tool_block_2_orientation
self._task_params["goal_position"] = goal_position
self._task_params["goal_orientation"] = goal_orientation
self._task_params["tool_block_size"] = tool_block_size
self.previous_tool_block_1_position = None
self.previous_tool_block_2_position = None
self.previous_end_effector_positions = None
self.previous_joint_velocities = None
def get_description(self):
"""
:return:
"""
return "Task where the goal shape is a tower of two blocks"
def _set_up_stage_arena(self):
"""
:return:
"""
creation_dict = {
'name': "tool_block_1",
'shape': "cube",
'initial_position': self._task_params["tool_block_1_position"],
'initial_orientation': self._task_params["tool_block_1_orientation"],
'mass': self._task_params["tool_block_mass"],
'size': np.array([self._task_params["tool_block_size"], self._task_params["tool_block_size"], self._task_params["tool_block_size"]]),
}
self._stage.add_rigid_general_object(**creation_dict)
creation_dict = {
'name': "tool_block_2",
'shape': "cube",
'initial_position': self._task_params["tool_block_2_position"],
'initial_orientation': self._task_params["tool_block_2_orientation"],
'mass': self._task_params["tool_block_mass"],
'size': np.array([self._task_params["tool_block_size"], self._task_params["tool_block_size"], self._task_params["tool_block_size"]]),
}
self._stage.add_rigid_general_object(**creation_dict)
creation_dict = {
'name': "goal_block_1",
'shape': "cube",
'position': self._task_params["goal_position"],
'orientation': self._task_params["goal_orientation"]
}
self._stage.add_silhoutte_general_object(**creation_dict)
goal_block_2_position = copy.deepcopy(np.array(self._task_params["goal_position"]))
goal_block_2_position[2] += self._task_params["tool_block_size"]
creation_dict = {
'name': "goal_block_2",
'shape': "cube",
'position': goal_block_2_position,
'orientation': self._task_params["goal_orientation"]
}
self._stage.add_silhoutte_general_object(**creation_dict)
self._task_stage_observation_keys = [
"tool_block_1_type", "tool_block_1_size",
"tool_block_1_cartesian_position", "tool_block_1_orientation",
"tool_block_1_linear_velocity", "tool_block_1_angular_velocity",
"tool_block_2_type", "tool_block_2_size",
"tool_block_2_cartesian_position", "tool_block_2_orientation",
"tool_block_2_linear_velocity", "tool_block_2_angular_velocity",
"goal_block_1_type", "goal_block_1_size",
"goal_block_1_cartesian_position", "goal_block_1_orientation",
"goal_block_2_type", "goal_block_2_size",
"goal_block_2_cartesian_position", "goal_block_2_orientation"
]
return
def _calculate_dense_rewards(self, desired_goal, achieved_goal):
"""
:param desired_goal:
:param achieved_goal:
:return:
"""
rewards = [0.0] * 5
block_position_1 = self._stage.get_object_state('tool_block_1',
'cartesian_position')
block_position_2 = self._stage.get_object_state('tool_block_2',
'cartesian_position')
goal_block_1_position = self._stage.get_object_state('goal_block_1',
'cartesian_position')
goal_block_2_position = self._stage.get_object_state('goal_block_2',
'cartesian_position')
joint_velocities = self._robot.get_latest_full_state()['velocities']
end_effector_positions = \
self._robot.get_latest_full_state()['end_effector_positions']
end_effector_positions = end_effector_positions.reshape(-1, 3)
lower_block_positioned = False
if np.linalg.norm(block_position_1 - goal_block_1_position) < 0.02:
lower_block_positioned = True
if not lower_block_positioned:
current_distance_from_block = np.linalg.norm(end_effector_positions -
block_position_1)
previous_distance_from_block = np.linalg.norm(
self.previous_end_effector_positions -
self.previous_tool_block_1_position)
rewards[0] = previous_distance_from_block - current_distance_from_block
previous_dist_to_goal = np.linalg.norm(goal_block_1_position -
self.previous_tool_block_1_position)
current_dist_to_goal = np.linalg.norm(goal_block_1_position - block_position_1)
rewards[1] = previous_dist_to_goal - current_dist_to_goal
else:
current_distance_from_block = np.linalg.norm(end_effector_positions -
block_position_2)
previous_distance_from_block = np.linalg.norm(
self.previous_end_effector_positions -
self.previous_tool_block_2_position)
rewards[0] = previous_distance_from_block - current_distance_from_block
block_2_above_block_1 = False
if np.linalg.norm(block_position_1[:2] - block_position_2[:2]) < 0.005:
block_2_above_block_1 = True
previous_block_to_goal_height = abs(self.previous_tool_block_2_position[2] -
goal_block_2_position[2])
current_block_to_goal_height = abs(block_position_2[2] - goal_block_2_position[2])
if not block_2_above_block_1:
rewards[2] = (previous_block_to_goal_height -
current_block_to_goal_height)
else:
rewards[2] = 0.0
if block_position_2[2] > goal_block_2_position[2]:
# if block 2 high enough activate horizontal reward
previous_block_1_to_block_2 = np.linalg.norm(
self.previous_tool_block_1_position[:2] -
self.previous_tool_block_2_position[:2])
current_block_1_to_block_2 = np.linalg.norm(
block_position_1[:2] -
block_position_2[:2])
rewards[3] = previous_block_1_to_block_2 - current_block_1_to_block_2
else:
rewards[3] = 0.0
rewards[4] = -np.linalg.norm(joint_velocities -
self.previous_joint_velocities)
update_task_info = {
'current_end_effector_positions': end_effector_positions,
'current_tool_block_1_position': block_position_1,
'current_tool_block_2_position': block_position_2,
'current_velocity': joint_velocities
}
return rewards, update_task_info
def _update_task_state(self, update_task_info):
"""
:param update_task_info:
:return:
"""
self.previous_end_effector_positions = \
update_task_info['current_end_effector_positions']
self.previous_tool_block_1_position = \
update_task_info['current_tool_block_1_position']
self.previous_tool_block_2_position = \
update_task_info['current_tool_block_2_position']
self.previous_joint_velocities = \
update_task_info['current_velocity']
return
def _set_task_state(self):
"""
:return:
"""
self.previous_end_effector_positions = \
self._robot.get_latest_full_state()['end_effector_positions']
self.previous_end_effector_positions = \
self.previous_end_effector_positions.reshape(-1, 3)
self.previous_tool_block_1_position = \
self._stage.get_object_state('tool_block_1', 'cartesian_position')
self.previous_tool_block_2_position = \
self._stage.get_object_state('tool_block_2', 'cartesian_position')
self.previous_joint_velocities = \
self._robot.get_latest_full_state()['velocities']
return
def _set_intervention_space_a(self):
"""
:return:
"""
super(Stacking2TaskGenerator, self)._set_intervention_space_a()
self._intervention_space_a['goal_tower'] = dict()
self._intervention_space_a['goal_tower']['cylindrical_position'] = \
copy.deepcopy(self._intervention_space_a['goal_block_1']
['cylindrical_position'])
self._intervention_space_a['goal_tower']['cylindrical_position'][0][-1] = \
self._task_params["goal_position"][-1] * 2.0
self._intervention_space_a['goal_tower']['cylindrical_position'][1][
-1] = \
self._task_params["goal_position"][-1] * 2.0
self._intervention_space_a['goal_tower']['euler_orientation'] = \
copy.deepcopy(self._intervention_space_a['goal_block_1']
['euler_orientation'])
for visual_object in self._stage.get_visual_objects():
del self._intervention_space_a[visual_object]['size']
del self._intervention_space_a[visual_object]['euler_orientation']
del self._intervention_space_a[visual_object]['cylindrical_position']
for rigid_object in self._stage.get_rigid_objects():
del self._intervention_space_a[rigid_object]['size']
return
def _set_intervention_space_b(self):
"""
:return:
"""
super(Stacking2TaskGenerator, self)._set_intervention_space_b()
self._intervention_space_b['goal_tower'] = dict()
self._intervention_space_b['goal_tower']['cylindrical_position'] = \
copy.deepcopy(self._intervention_space_b['goal_block_1']
['cylindrical_position'])
self._intervention_space_b['goal_tower']['cylindrical_position'][0][-1] = \
self._task_params["goal_position"][-1] * 2.0
self._intervention_space_b['goal_tower']['cylindrical_position'][1][-1] = \
self._task_params["goal_position"][-1] * 2.0
self._intervention_space_b['goal_tower']['euler_orientation'] = \
copy.deepcopy(self._intervention_space_b['goal_block_1']
['euler_orientation'])
for visual_object in self._stage.get_visual_objects():
del self._intervention_space_b[visual_object]['size']
del self._intervention_space_b[visual_object]['euler_orientation']
del self._intervention_space_b[visual_object][
'cylindrical_position']
for rigid_object in self._stage.get_rigid_objects():
del self._intervention_space_b[rigid_object]['size']
return
def get_task_generator_variables_values(self):
"""
:return:
"""
return {
'goal_tower': {'cylindrical_position':
self._stage.get_object_state('goal_block_1',
'cylindrical_position'),
'euler_orientation':
self._stage.get_object_state('goal_block_1',
'euler_orientation')
}
}
def apply_task_generator_interventions(self, interventions_dict):
"""
:param interventions_dict:
:return:
"""
if len(interventions_dict) == 0:
return True, False
reset_observation_space = False
if 'goal_tower' in interventions_dict:
new_interventions_dict = dict()
new_interventions_dict['goal_block_1'] = dict()
new_interventions_dict['goal_block_2'] = dict()
if 'cylindrical_position' in interventions_dict['goal_tower']:
new_interventions_dict['goal_block_1']['cylindrical_position'] = \
copy.deepcopy(interventions_dict['goal_tower']['cylindrical_position'])
new_interventions_dict['goal_block_2'][
'cylindrical_position'] = \
copy.deepcopy(interventions_dict['goal_tower'][
'cylindrical_position'])
new_interventions_dict['goal_block_1']['cylindrical_position'][-1] \
= interventions_dict['goal_tower']['cylindrical_position'][-1] / 2.0
new_interventions_dict['goal_block_2']['cylindrical_position'][
-1] \
= interventions_dict['goal_tower'][
'cylindrical_position'][-1] * (3 / 2.0)
elif 'euler_orientation' in interventions_dict['goal_tower']:
new_interventions_dict['goal_block_1']['euler_orientation'] = \
copy.deepcopy(
interventions_dict['goal_tower']['euler_orientation'])
new_interventions_dict['goal_block_2'][
'euler_orientation'] = \
copy.deepcopy(interventions_dict['goal_tower'][
'euler_orientation'])
else:
raise Exception("this task generator variable "
"is not yet defined")
self._stage.apply_interventions(new_interventions_dict)
else:
raise Exception("this task generator variable "
"is not yet defined")
return True, reset_observation_space
def sample_new_goal(self, level=None):
"""
:param level:
:return:
"""
intervention_space = self.get_variable_space_used()
intervention_dict = dict()
intervention_dict['goal_tower'] = dict()
intervention_dict['goal_tower']['cylindrical_position'] = \
np.random.uniform(
intervention_space['goal_tower']['cylindrical_position'][0],
intervention_space['goal_tower']['cylindrical_position'][1])
intervention_dict['goal_tower']['euler_orientation'] = \
np.random.uniform(
intervention_space['goal_tower']['euler_orientation'][0],
intervention_space['goal_tower']['euler_orientation'][1])
return intervention_dict
|
985,765 | ba3241ffb66d3fc1dc942ba509e8ef60158849a2 | # Download all Media from Wordpress Website
# 0.0.1 - Tested to support images.
# Written by Quinston Pimenta
# This script exists purely for educational reasons.
# The writer of this script 'Quinston Pimenta' is not liable for
# the misuse of this script.
import os
import time
import re
import urllib.parse
import requests
from bs4 import BeautifulSoup
website_url = 'http://3dsignindia.com'
time_between_download_requests = 1 # second - use this to not wreck havoc on the website.
output_directory = 'C:/Users/Quinston/Documents/wordpress-media-download-python-script'
ignore_sizes_regex = r'-\d+x\d+.[a-z]+'
def traverse_url_recursive(url, sleepTime=1):
# ignore the images that were resized by wordpress during upload
if(re.search(ignore_sizes_regex, str(url)) != None):
return
r = None
try:
r = requests.head(url)
except:
print('The URL could not be reached')
return
if(str(r.headers['Content-Type']).__contains__('html')):
try:
r = requests.get(url)
except:
print('The URL could not be reached')
return
html_parsed = None
try:
html_parsed = BeautifulSoup(r.text, 'html.parser')
except:
print('Invalid HTML')
return
for links in html_parsed.find_all('a'):
if(links.get_text() != 'Name'
and links.get_text() != 'Last modified'
and links.get_text() != 'Size'
and links.get_text() != 'Description'
and links.get_text() != 'Parent Directory'):
# time.sleep(sleepTime)
try:
traverse_url_recursive(urllib.parse.urljoin(url, links['href']))
except:
print('The link does not contain href.')
else:
time.sleep(sleepTime)
file_path = os.path.join(output_directory, url.split('wp-content/')[1])
if not os.path.exists(os.path.dirname(file_path)):
try:
os.makedirs(os.path.dirname(file_path))
except:
print('The directory could not be created.')
try:
try:
r = requests.get(url)
except:
print('The URL could not be reached')
return
open(file_path, 'wb').write(r.content)
print('File downloaded: ', file_path)
except:
print('There was an error opening the file.')
traverse_url_recursive(urllib.parse.urljoin(website_url, 'wp-content/uploads/'), time_between_download_requests)
|
985,766 | a0cbbfe7753d0d2c4a96027a23625c5169844c07 | from osgeo import gdal
import sys
input = 'NPP_21616_151230010535.tif'
#output =
ds = gdal.Open(input)
if ds is None:
print 'Unable to open INPUT.tif'
sys.exit(1)
#num = ds.RasterCount
#print 'Number of bands:', num
#if num > 1:
# print 'Leave one band'
# ds = gdal.Translate('output.tif', ds, bandList = [1])
# input = 'output.tif'
#print input
#ds = gdal.Open('output.tif')
#srcband = ds.GetRasterBand(1)
#print srcband
#(min,max) = srcband.ComputeRasterMinMax()
#print 'MIN', min, 'MAX', max
#if min == 0:
print '0'
dstDS = gdal.Warp('outputWARP.tif', ds, dstAlpha=True, srcNodata=0)
#else:
# print '255'
# dstDS = gdal.Warp('outputWARP.tif', ds, dstAlpha=True, srcNodata=255)
|
985,767 | 1c1ba5004cbc976de873880944891a3c068722c9 | n, m = map(int, input().split())
As = list(map(int, input().split()))
Bs = list(map(int, input().split()))
adjMat = [[0 for j in range(n + 2)] for i in range(n + 2)]
Cs = [As[i] - Bs[i] for i in range(n)]
movs = [[0 for j in range(n + 2)] for i in range(n + 2)]
for i in range(1, n + 1):
movs[i][i] = adjMat[i][i]
limit = 0
for i, a in enumerate(Cs):
if a < 0:
adjMat[0][i + 1] = -a
elif a > 0:
adjMat[i + 1][n + 1] = a
limit += a
for i in range(m):
a, b = map(int, input().split())
adjMat[a][b] = As[a - 1]
adjMat[b][a] = As[b - 1]
graph = [[adjMat[i][j] for j in range(n + 2)] for i in range(n + 2)]
def BFS(root, dest):
parents = [-1 for i in range(n + 2)]
marked = [False for i in range(n + 2)]
bfs(root, marked, parents, dest)
return parents
def bfs(root, marked, parents, dest):
queue = [root]
parents[root] = -1
while len(queue) > 0:
u = queue.pop(0)
marked[u] = True
for v in range(n + 2):
if not marked[v] and adjMat[u][v] > 0:
queue.append(v)
parents[v] = u
return marked[dest]
maxFlow = 0
while True:
parents = BFS(0, n + 1)
if parents[n + 1] < 0:
break
pathFlow = 200
v = n + 1
while v != 0:
u = parents[v]
pathFlow = min(pathFlow, adjMat[u][v])
v = u
v = n + 1
while v != 0:
u = parents[v]
adjMat[u][v] -= pathFlow
adjMat[v][u] += pathFlow
v = u
maxFlow += pathFlow
# print(adjMat)
if maxFlow >= limit:
print("YES")
outMat = [[max(0, graph[i][j] - adjMat[i][j]) for j in range(n + 2)] for i in range(n + 1)]
for i in range(1, n + 1):
temp = 0
for j in range(1, n + 1):
if j != i:
temp += outMat[i][j]
outMat[i][i] = As[i - 1] - temp
for i in range(1, n + 1):
print(*outMat[i][1:n + 1])
else:
print("NO")
|
985,768 | 5f770e00611a01d7879b7d247610ac6c79839cf7 | #
# This file is part of LiteEth.
#
# Copyright (c) 2015-2021 Florent Kermarrec <florent@enjoy-digital.fr>
# Copyright (c) 2015-2017 Sebastien Bourdeauducq <sb@m-labs.hk>
# Copyright (c) 2018 whitequark <whitequark@whitequark.org>
# SPDX-License-Identifier: BSD-2-Clause
import math
from liteeth.common import *
# MAC Gap ------------------------------------------------------------------------------------------
class LiteEthMACGap(Module):
def __init__(self, dw):
self.sink = sink = stream.Endpoint(eth_phy_description(dw))
self.source = source = stream.Endpoint(eth_phy_description(dw))
# # #
gap = math.ceil(eth_interpacket_gap/(dw//8))
counter = Signal(max=gap, reset_less=True)
self.submodules.fsm = fsm = FSM(reset_state="COPY")
fsm.act("COPY",
NextValue(counter, 0),
sink.connect(source),
If(sink.valid & sink.last & sink.ready,
NextState("GAP")
)
)
fsm.act("GAP",
NextValue(counter, counter + 1),
If(counter == (gap-1),
NextState("COPY")
)
)
|
985,769 | c6be5fffa8d0b9bea020e3d21084c42c4407b0b6 | import refit_hbond_polynomials as rhp
import numpy
import blargs
from cvxopt import lapack, matrix
def drange( start, stop, step ) :
r = start
while r < stop :
yield r
r += step
def read_HBpoly_csv_file( fname ) :
polys = []
flines = open( fname ).readlines()
for line in flines :
cols = line.split(",")
deg = int( cols[10] )
name = cols[1]
min = float( cols[4] )
max = float( cols[5] )
coeffs = []
for col in xrange(11,11+deg) :
coeffs.append( float(cols[col]) )
polys.append( (name,coeffs,min,max) )
return polys
def closest_to_target( vals, target ) :
besti = -1
closest_dist = -1
for i in xrange(len(vals)) :
if besti == -1 or abs(vals[i] - target) < closest_dist :
closest_dist = abs(vals[i]-target)
besti = i
return besti
def shift_polynomial( name, coeffs, min, max, shift, count ) :
xs = [ x for x in drange( min, max, 0.1 ) ]
newxs = matrix([ x + shift for x in xs ])
ys = matrix( numpy.polyval( coeffs, xs ) )
deriv_coeffs = numpy.polyder( coeffs )
minimum = rhp.find_real_polynomial_roots_near_target( deriv_coeffs )
print "minimum: ",minimum[ closest_to_target(minimum,1) ],minimum
x0 = [ minimum[ closest_to_target(minimum,1) ] + shift ]
y0 = [ -0.5 ]
print "xmin:", min, "xmax:", max
x1 = [ min+shift, max+shift ]
y1 = [ 1.0, 1.0 ]
newpoly = rhp.fit_polynomial_multiple_constraints( newxs, ys, len(coeffs), x0, y0, x1, y1 )
rhp.print_polynomial_in_HBPoly_csv_format( name, newpoly, count )
if __name__ == "__main__" :
with blargs.Parser(locals()) as p :
p.str("polynomial_file").shorthand("p")
p.float("shift_value").shorthand("s")
polys = read_HBpoly_csv_file( polynomial_file )
count = 0
for name,coeffs,min,max in polys :
count += 1
shift_polynomial( name,coeffs, min, max, shift_value, count )
|
985,770 | 01b0b34a3249df0d777100037c647640642d2b8e | from flaskapp.recommend.final_freelancers.jobSimilarity import Similarity
from flaskapp.recommend.freelancers_for_jobs import recommend_freelancers as freelancers
import pandas as pd
import string
import os, sys
fileDir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
#fileDir = os.path.join(fileDir, '..')
filePath = os.path.abspath(os.path.join(fileDir, 'data/jobs.csv'))
logPath = os.path.abspath(os.path.join(fileDir, 'logs'))
sys.path.insert(0, filePath)
sys.path.insert(0, logPath)
jobs_df = pd.read_csv(filePath)
def unamesFromLinks(links):
#print('inner-called')
#print(links)
if isinstance(links, list):
s = ' '.join(links)
else:
s = links
table = str.maketrans(dict.fromkeys(string.punctuation))
new_s = s.translate(table)
#new_s = new_s.replace('\n', ' ')
#new_s = new_s.replace(' ', ' ')
s = [x for x in s.lower().split() if len(x) > 1]
sub = []
for i in s:
if ('[' in i):
sub.append(i[35:-2])
elif (']' in i):
sub.append(i[34:-2])
else:
sub.append(i[34:-2])
return sub
def getInvitedFls(jobId):
#print('outer-called')
links = list(jobs_df['Link_of_invited_freelancers'].values)[jobId-1]
#print(links)
return unamesFromLinks(links)
def getHiredFls(jobId):
#print('outer-called')
links = list(jobs_df['Link_of_hired_freelancers'].values)[jobId-1]
#print(links)
return unamesFromLinks(links)
def getResults(jobId, n):
#running job to job similarity
similarity = Similarity()
similar_jobs = similarity.get_similar_jobs(jobId, 1, 1, 1, 10)
#print('Similar_Jobs: ',similar_jobs)
fls_invited = []
fls_hired = []
#generate bucket 1 and 2 of freelancers based on job-job similarity
for job in similar_jobs:
fls_hired += getHiredFls(job)
fls_hired = [x for x in fls_hired if x]
fls_invited += getInvitedFls(job)
fls_invited = [x for x in fls_invited if x]
#generate bucket 3 from cotent-based matching
similarities_on = ['Skills']
weights = [30, 20, 50]
freelancer_recommendations = freelancers.Implementation(jobId, similarities_on, weights)
fls_content = freelancer_recommendations.pick_top_n(n)
return fls_hired, fls_invited, fls_content, similar_jobs
|
985,771 | 4d43d9b3d8068a73d9172bbb19a415cb10e56990 | # Generated by Django 3.1 on 2020-09-07 08:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('article', '0002_auto_20200905_2008'),
]
operations = [
migrations.AddField(
model_name='article',
name='proto',
field=models.TextField(blank=True, default=' ', help_text='해당 프로토 아티클'),
),
migrations.AddField(
model_name='article',
name='relate_dart',
field=models.TextField(blank=True, default=' ', help_text='관련자료 id. , 로 구분'),
),
]
|
985,772 | 3ecb0241b566e14959588e5ad2d5e0c39d3f95f5 | from flask_wtf import FlaskForm
from wtforms import StringField, TextField
from wtforms.validators import DataRequired
class RecipeForm(FlaskForm):
recipeName = StringField('recipeName', validators=[DataRequired()])
instructions = TextField('instructions', validators=[DataRequired()])
estimatedTime = StringField('estimatedTime', validators=[DataRequired()]) |
985,773 | 4f9774c2b396e21239e15ca07598f462cf11cc03 | name = "JamesBond"
for index in name:
print("*****" + str(index) + "*****")
|
985,774 | 08ac2dad24d6dcc7a399b209988a335b1982e4df | from django.contrib import admin
from .models import Expert
admin.site.register(Expert)
|
985,775 | 6a409e95bd8b2e67951b2fb5ba7d71ff6be6d221 | """ API views list
QuestionViews
QuestionRetriveView
"""
from rest_framework import generics
from django_filters import rest_framework
from django.views.generic import ListView
from django.contrib.auth.models import User
from . import serializers, filters, authentication, models
# Create your views here.
class QuestionViews(generics.ListAPIView):
"""
View the list of questions records
"""
authentication_classes = (authentication.TenantAuthentication,)
serializer_class = serializers.QuestionSerializer
model_class = serializer_class.Meta.model
queryset = model_class.objects.all().exclude(private=True)
filter_backends = (rest_framework.DjangoFilterBackend,)
filter_class = filters.QuestionFilter
class QuestionRetriveView(generics.RetrieveAPIView):
"""
View to retrive individual question record
"""
authentication_classes = (authentication.TenantAuthentication,)
serializer_class = serializers.QuestionRetriveSerializer
model_class = serializer_class.Meta.model
queryset = model_class.objects.all().exclude(private=True)
filter_backends = (rest_framework.DjangoFilterBackend,)
class IndexView(ListView):
"""
Dashboard views to show list of tenant and count of questions, answers
"""
model = models.Tenant
template_name = 'index.html'
def get_context_data(self, **kwargs):
"""Related details to show on template
"""
context = super(IndexView, self).get_context_data(**kwargs)
context['questions'] = models.Question.objects.count()
context['answers'] = models.Answer.objects.count()
context['users'] = User.objects.count()
return context
|
985,776 | 90582d89e433f9c0b69f9b50439de245cc9a4c17 | # 문제 3
# 조건1 문자열을 입력받고 정수를 입력받아서 문자열의 맨 뒤부터 정수만큼 출력하는 프로그램을 작성하시오
# 조건 2 입력받은 정수가 문자열의 길이보다 크다면 맨 뒤부터 맨 처음까지 모두 출력한다
word = input("문자열을 입력하시오 : ")
num = int(input("정수를 입력하세요 : "))
word2 = word[::-1]
if num > len(word) :
print(word2)
else :
print(word2[0:num])
|
985,777 | 00e3d369e5e02f80f4779b3ed65e0acd03317690 | from airflow import DAG
import datetime as dt
import json
import arrow
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.slack_operator import SlackAPIOperator
from airflow.models import Variable
from airflow.hooks.http_hook import HttpHook
from airflow.hooks.postgres_hook import PostgresHook
from common import destinations
default_args = {
'owner': 'airflow',
'start_date': dt.datetime(2018, 11, 28, 00, 00, 00),
'concurrency': 3,
'retries': 0
}
# Storing in a table like this:
# create table availability (
# oid serial primary key,
# vendor text,
# device_id text,
# timestamp timestamp,
# extra json
# )
# select addgeometrycolumn('public', 'availability', 'geom', 4326, 'POINT', 2)
pg = PostgresHook(
postgres_conn_id='etl_postgres'
)
with DAG('scooter_7a',
default_args=default_args,
schedule_interval="0 11 * * *") as dag:
opr_dump_geojson = BashOperator(
task_id = 'dump_geojson',
bash_command = """ogr2ogr -f GeoJSON /tmp/scooter_7a.json -sql "SELECT a.*, cd.districts as district FROM scooters.availability a inner join base.council_districts cd on st_contains(st_transform(cd.wkb_geometry, 4326), a.geom) where timestamp = (select max(timestamp) from scooters.availability)" pg:dbname=etl scooters.availability"""
)
opr_upload_to_ago = PythonOperator(
task_id='upload_to_ago',
provide_context=True,
python_callable=destinations.upload_to_ago,
op_kwargs={
"id": "424a9858887c4eadafbbe07b31cfeac3",
"filepath": "/tmp/scooter_7a.json"
}
)
opr_dump_geojson >> opr_upload_to_ago |
985,778 | c5e0ef619ec20bdfa9bc086747a0aa7d68a14b16 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from unittest import mock
from senlin.db import api as db_api
from senlin.objects import base
from senlin.objects import health_registry as hro
class TestHealthRegistry(testtools.TestCase):
def setUp(self):
super(TestHealthRegistry, self).setUp()
self.ctx = mock.Mock()
@mock.patch.object(base.SenlinObject, '_from_db_object')
@mock.patch.object(db_api, 'registry_create')
def test_create(self, mock_create, mock_from):
x_registry = mock.Mock()
mock_create.return_value = x_registry
x_obj = mock.Mock()
mock_from.return_value = x_obj
result = hro.HealthRegistry.create(
self.ctx, "FAKE_ID", "FAKE_TYPE", 123, {'foo': 'bar'},
'FAKE_ENGINE')
self.assertEqual(x_obj, result)
mock_create.assert_called_once_with(
self.ctx, "FAKE_ID", "FAKE_TYPE", 123, {'foo': 'bar'},
"FAKE_ENGINE", enabled=True)
mock_from.assert_called_once_with(self.ctx, mock.ANY, x_registry)
@mock.patch.object(db_api, 'registry_update')
def test_update(self, mock_update):
hro.HealthRegistry.update(self.ctx, "FAKE_ID", {"foo": "bar"})
mock_update.assert_called_once_with(
self.ctx, "FAKE_ID", {"foo": "bar"})
@mock.patch.object(base.SenlinObject, '_from_db_object')
@mock.patch.object(db_api, 'registry_claim')
def test_claim(self, mock_claim, mock_from):
x_registry = mock.Mock()
mock_claim.return_value = [x_registry]
x_obj = mock.Mock()
mock_from.side_effect = [x_obj]
result = hro.HealthRegistry.claim(self.ctx, "FAKE_ENGINE")
self.assertEqual([x_obj], result)
mock_claim.assert_called_once_with(self.ctx, "FAKE_ENGINE")
mock_from.assert_called_once_with(self.ctx, mock.ANY, x_registry)
@mock.patch.object(db_api, 'registry_delete')
def test_delete(self, mock_delete):
hro.HealthRegistry.delete(self.ctx, "FAKE_ID")
mock_delete.assert_called_once_with(self.ctx, "FAKE_ID")
@mock.patch.object(base.SenlinObject, '_from_db_object')
@mock.patch.object(db_api, 'registry_get')
def test_get(self, mock_get, mock_from):
x_registry = mock.Mock()
x_registry.cluster_id = 'FAKE'
mock_get.return_value = x_registry
x_obj = mock.Mock()
mock_from.return_value = x_obj
result = hro.HealthRegistry.get(self.ctx, 'FAKE')
self.assertEqual(x_obj, result)
mock_get.assert_called_once_with(self.ctx, 'FAKE')
mock_from.assert_called_once_with(self.ctx, mock.ANY, x_registry)
@mock.patch.object(base.SenlinObject, '_from_db_object')
@mock.patch.object(db_api, 'registry_get_by_param')
def test_get_by_engine(self, mock_get, mock_from):
x_registry = mock.Mock()
x_registry.cluster_id = 'FAKE'
x_registry.engine_id = 'FAKE_ENGINE'
mock_get.return_value = x_registry
x_obj = mock.Mock()
mock_from.return_value = x_obj
result = hro.HealthRegistry.get_by_engine(
self.ctx, 'FAKE_ENGINE', 'FAKE')
self.assertEqual(x_obj, result)
mock_get.assert_called_once_with(
self.ctx, {"cluster_id": "FAKE", "engine_id": "FAKE_ENGINE"})
mock_from.assert_called_once_with(self.ctx, mock.ANY, x_registry)
@mock.patch.object(hro.HealthRegistry, 'update')
def test_disable(self, mock_update):
hro.HealthRegistry.disable_registry(
self.ctx, "FAKE_ID")
mock_update.assert_called_once_with(
self.ctx, "FAKE_ID", {"enabled": False})
@mock.patch.object(hro.HealthRegistry, 'update')
def test_enable(self, mock_update):
hro.HealthRegistry.enable_registry(
self.ctx, "FAKE_ID")
mock_update.assert_called_once_with(
self.ctx, "FAKE_ID", {"enabled": True})
|
985,779 | 2f2dfd516e7c8b8b89f00ba526026b67f1304b4c | import logging
from os import path, makedirs
class Logger(object):
'''
receive and save image from redis stream
'''
def __init__(self, input_log_path):
logFormatterStr = "%(asctime)s - %(levelname)s - [%(filename)s: %(lineno)d] - %(message)s"
logging.basicConfig(level = logging.INFO,format = logFormatterStr)
self._logger = logging.getLogger()
self._logger.setLevel(logging.INFO)
logging.getLogger("kafka").setLevel(logging.WARNING)
input_log_dir = path.dirname(input_log_path)
# log_dir = os.path.join('..', log_dir)
# print(log_dir)
if input_log_dir and not path.exists(input_log_dir):
makedirs(input_log_dir)
fh = logging.FileHandler(input_log_path, mode='a+')
fh.setLevel(logging.DEBUG) # 输出到file的log等级的开关
formatter = logging.Formatter(logFormatterStr)
fh.setFormatter(formatter)
self._logger.addHandler(fh)
def logger(self):
return self._logger
# def debug(self, msg):
# self._logger.debug(msg)
# def info(self, msg):
# self._logger.info(msg)
# def warning(self, msg):
# self._logger.warning(msg)
# def error(self, msg):
# self._logger.error(msg)
# def fatal(self, msg):
# self.fatal(msg)
# def critical(self, msg):
# self._logger.critical(msg)
# self._logger.handlers[0].flush()
# logger = Logger(cfg_log.LOG_DIR).logger() |
985,780 | 5b69e1ed0ff6a2133e67fdfdbeaa396148e79e9a | print('Faça um programa para uma loja de tintas. O programa deverá pedir o tamanho em metros quadrados da área a ser '
'pintada. Considere que a cobertura da tinta é de 1 litro para cada 3 metros quadrados e que a tinta é vendida em latas de'
'18 litros, que custam R$ 80,00. Informe ao usuário a quantidades de latas de tinta a serem compradas e o preço total.')
area = float(input('\nInforme a medida em m² do local a ser pintado: '))
litros = (area / 3)
latas = int(litros / 18)
if (litros % 18 != 0):
latas += 1
preco = latas * 80.00
print('\n\nPara pintar {:.2f} m²'
'\nSerão necessárias {:.2f} latas de tinta'
'\n\nCom cada lata de 18 litros custando R$ 80,00,'
'\nPreço total R$ {:.2f}' .format(area, latas, preco))
|
985,781 | 5bc8d9f4da1e00afe48f8e5271715d6387877556 | from InitiationFiles.Query_execution_summary_File_creation import Query_Execution_Summary
from InitiationFiles.Task_execution_summary_File_creation import Task_Execution_Summary
from InitiationFiles.Detailed_metrics_per_task_File_creation import Detailed_Metrics_per_task
import logging
class ParseInputFile:
def parse_file(self, filepath):
with open(filepath) as input_file:
line = input_file.readline()
while line != '': # The EOF char is an empty string
# 'Query Execution Summary' in line:
if 'OPERATION' in line:
query_execution = Query_Execution_Summary()
query_execution.create_Query_Execution_Summary_text_file(input_file)
# 'Task Execution Summary' in line:
elif 'VERTICES' in line:
task_execution = Task_Execution_Summary()
task_execution.create_Task_Execution_Summary_text_file(line, input_file)
elif 'org.apache.tez.common.counters.DAGCounter' in line:
detailed_metrics = Detailed_Metrics_per_task()
detailed_metrics.create_Detailed_Metrics_per_task_text_file(line, input_file)
line = input_file.readline()
logging.info("Input file parsing completed succesfully")
|
985,782 | 2fa48fdc9e4cd276afa4a248b247536278ee92c2 | import numpy as np
import generador
import random
numerosGCL = generador.generadorgcl(1234,134775813, 1, pow(2, 32),10000)
uniforme = []
exponencial = []
normal = []
gamma = []
pascal = []
binomial = []
hipergeometrica = []
poisson = []
empirica = []
def distribucion_uniforme(a,b):
for r in numerosGCL:
x = a+(b-a)*r
uniforme.append(x)
return uniforme
def distribucion_exponencial(ex):
for r in numerosGCL:
x = -ex*np.log(r)
exponencial.append(x)
return exponencial
def distribucion_gamma(k,alpha):
for i in range(10000):
tr = 1.0
for i in range(k):
r = random.choice(numerosGCL)
tr = tr * r
x = -np.log(tr)/alpha
gamma.append(x)
return gamma
def distribucion_normal(mu,sigma):
for i in range(10000):
sum = 0.0
for i in range(12):
r = random.choice(numerosGCL)
sum = sum + r
x = sigma * (sum - 6.0) + mu
normal.append(x)
return normal
def distribucion_pascal(k,q):
for i in range(10000):
tr = 1.0
qr = np.log(q)
for i in range(k):
r = random.choice(numerosGCL)
tr = tr * r
nx = np.log(tr)/qr
x = nx
pascal.append(x)
return pascal
def distribucion_binomial(n,p):
for i in range(10000):
x = 0.0
for i in range(n):
r = random.choice(numerosGCL)
if (r-p) < 0:
x += 1
binomial.append(x)
return binomial
def distribucion_hipergeometrica(tn,ns,p):
for i in range(10000):
x = 0.0
for i in range(ns):
r = random.choice(numerosGCL)
if (r-p) > 0:
s = 0.0
else:
s = 1.0
x = x + 1.0
p = (tn*p-s) / (tn-1.0)
tn = tn - 1.0
if(tn<2):break
hipergeometrica.append(x)
return hipergeometrica
def distribucion_poisson(p):
for i in range(10000):
x = 0.0
b = np.exp(-p)
tr = 1.0
while (tr-b) >= 0:
r = random.choice(numerosGCL)
tr = tr * r
if(tr-b >= 0):
x = x + 1.0
poisson.append(x)
return poisson
def distribucion_empirica():
p=[0.273,0.037,0.195,0.009,0.124,0.058,0.062,0.151,0.047,0.044]
for i in range(10000):
r = random.choice(numerosGCL)
acum=0
cont=1
for j in p:
acum = acum + j
if (r<=acum):
break
else:
cont+=1
empirica.append(cont)
return empirica
|
985,783 | 70826b5326572537ea65730ed7481b4cac4c7a10 | import imutils
imutils.find_function("contour") |
985,784 | fe46ad7776361a6476373fa89db1c3633996a245 | # -*- coding: utf-8 -*-
import math
class Solution:
def isHappy(self, n: int) -> bool:
d = set()
while True:
n = self.cal(n)
if n in d:
return False
elif n == 1:
return True
d.add(n)
def cal(self, n):
ret = 0
while n >= 10:
ret += pow((n % 10), 2)
n = n // 10
ret += pow(n, 2)
return ret
s = Solution()
assert s.isHappy(19) is True
assert s.isHappy(1) is True
assert s.isHappy(2) is False
assert s.isHappy(7) is True
|
985,785 | b18adb4e4cc3b51b2e072190608d3e5608066ea9 | import cv2
import numpy as np
import imutils
from PIL import Image
#import pyfirmata
import serial
port='port name'
#bluetooth=serial.Serial(port, 9600)
#bluetooth.flushInput()
def perspective_bird():
pts1 = np.float32([[430,320],[590,320],[120,450],[850,450]])
pts2= np.float32([[0,0],[999,0],[0,666],[999,666]])
per_trans=cv2.getPerspectiveTransform(pts1,pts2)
perspective = cv2.warpPerspective(frame,per_trans,(999,666))
return perspective
def houghc():
global circles
flag = 1
l_b = np.array([30,50,30])
u_b = np.array([190,200,190])
mask = cv2.inRange(frame, l_b, u_b)
kernal=np.ones((1,1),np.uint8)
mask=cv2.dilate(mask,kernal,iterations=1)
res = cv2.bitwise_and(frame, frame, mask=mask)
cv2.imshow('mask', mask)
#cv2.imshow('result',res)
circles = cv2.HoughCircles(mask, cv2.HOUGH_GRADIENT, 1, 200, param1=100, param2=5, minRadius=56, maxRadius=65)
# Draw detected circles
if circles is not None:
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
# Draw outer circle
cv2.circle(frame, (i[0], i[1]), i[2], (0, 255, 0), 2)
# Draw inner circle
cv2.circle(frame, (i[0], i[1]), 2, (0, 0, 255), 3)
#if len(circles[0,:])==0:
# flag = 0
return frame,flag
def hought():
global frame
#global i
frame = cv2.medianBlur(frame,5)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#cv2.imshow("check",frame)
#cv2.waitKey(0)
global circles
###
#HughCircles Detection TEST
circles = cv2.HoughCircles(gray,cv2.HOUGH_GRADIENT,1,50,
param1=50,param2=30,minRadius=54,maxRadius=70)
circles = np.uint16(np.around(circles))
ret,thresh = cv2.threshold(gray,127,255,0)
# calculate moments of binary image
M = cv2.moments(thresh)
# calculate x,y coordinate of center
if(M["m00"]!=0):
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
if circles.all != None:
for i in circles[0,:]:
# draw the outer circle
cv2.circle(frame,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
cv2.circle(frame,(i[0],i[1]),2,(0,0,255),3)
#print(i[0])
def line_angle(thresh,frame):
global ar
ar=[]
#frame = cv2.medianBlur(frame,5)
global theta2
#print(frame.shape)
y,x,_=frame.shape
x=int(x/2)
theta2=0
#M = cv2.moments(thresh)
# calculate x,y coordinate of center
if circles.all != None:
for i in circles[0,:]:
# draw the outer circle
#cv2.line(frame,(i[0],i[1]),(cX,cY),(0,0,0),2)
ar.append(i[1])
#ar[0].sort()
ar.sort()
l=str(len(ar))
theta1=np.arctan((i[0]-x)/(i[1]-y))*180/np.pi
#cv2.putText(frame,l,(50,50),cv2.FONT_HERSHEY_PLAIN,1,(255,0,0),4)
cv2.line(frame,(x,y),(x,int(y/2)),(0,0,0),2)
#if ar[0]==i[1]:
#cv2.line(frame,(i[0],i[1]),(x,y),(0,255,0),2)
#theta1 = np.arctan((thiselem[1]-cY)/(thiselem[0]-cX))
#theta1*=180/np.pi
#print(theta1)
#frame=cv2.putText(frame,str(theta1),(thiselem[0],thiselem[1]),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,0),2)
if theta1>0:
frame=cv2.putText(frame,'Turn Left',(10,20),cv2.FONT_HERSHEY_SIMPLEX,1,(0,50,255),2)
else:
frame=cv2.putText(frame,'Turn Right',(10,20),cv2.FONT_HERSHEY_SIMPLEX,1,(0,50,255),2)
print(ar)
for i in circles[0,:]:
if i[1]==ar[len(ar)-1]:
cv2.line(frame,(i[0],i[1]),(x,y),(0,255,0),2)
#if len(circles)>1:
# nextelem = circles[circles.index(i)-len(li)+1]
# theta2=np.arctan((cY-nextelem[1])/(cX-nextelem[0]))
# theta2=theta2*180/np.pi
# print(theta2)
# frame=cv2.putText(frame,str(theta2),(nextelem[0],nextelem[1]),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,0))
def barcode():
img=cv.imread("opencv/barcode/bcode.jpg")
height,width,_=img.shape
im=Image.open('opencv/barcode/bcode.jpg')
ppi=im.info['dpi']
print(ppi[0])
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
#s=cv.Sobel(gray,cv.CV_64f,1,0)
#cv.imshow("sobel",s)
a=0
d=[]
#can=cv.Canny(gray,100,200)
edges = cv2.Canny(gray,50,150,apertureSize = 3)
minLineLength = 100
maxLineGap = 70
lines = cv2.HoughLinesP(edges,1,np.pi/180,100,minLineLength,maxLineGap)
for x1,y1,x2,y2 in lines[:,0]:
d.append(x1)
cv2.line(img,(x1,y1),(x2,y2),(0,255,0),2)
d.sort()
l=len(d)
ba=[]
for i in range(0,l-1):
if(i%2==0):
ba.append(d[i+1]-d[i])
print(ba)
# for i in range(0,len(ba)):
# ba[i]=(ba[i]/ppi[0])*25.4
# print(ba)
bav=0
for i in range(0,len(ba)):
bav=bav+ba[i]
bav=bav/4
print(bav)
s=""
for i in range(0,len(ba)):
if ba[i]<=bav:
ba[i]=0
if bav< ba[i]:
ba[i]=1
s=s+str(ba[i])
print(s)
#cv.imwrite('opencv/barcode/houghlines5.jpg',img)
#i1=cv.imread('opencv/barcode/houghlines5.jpg')
cv2.putText(frame,s,(int(width/2),int(height/2)),cv2.FONT_HERSHEY_PLAIN,3,(255,0,0),2)
#cv.imshow("Edges",i1)
#cv.imshow("canny",can)
def shape():
def nothing(x):
pass
cv2.namedWindow("Trackbars")
cv2.createTrackbar("L-H","Trackbars",57,180,nothing)
cv2.createTrackbar("L-S","Trackbars",0,255,nothing)
cv2.createTrackbar("L-V","Trackbars",136,255,nothing)
cv2.createTrackbar("U-H","Trackbars",156,180,nothing)
cv2.createTrackbar("U-S","Trackbars",111,255,nothing)
cv2.createTrackbar("U-V","Trackbars",255,255,nothing)
hsv=cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
lh=cv2.getTrackbarPos("L-H","Trackbars")
ls=cv2.getTrackbarPos("L-S","Trackbars")
lv=cv2.getTrackbarPos("L-V","Trackbars")
uh=cv2.getTrackbarPos("U-H","Trackbars")
us=cv2.getTrackbarPos("U-S","Trackbars")
uv=cv2.getTrackbarPos("U-V","Trackbars")
lower_black=np.array([lh,ls,lv])
upper_black=np.array([uh,us,uv])
mask=cv2.inRange(hsv,lower_black,upper_black)
kernel=np.ones((5,5),np.uint8)
mask=cv2.erode(mask,kernel)
#contour detection
contours, _ = cv2.findContours(mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
area=cv2.contourArea(cnt)
approx=cv2.approxPolyDP(cnt,0.05*cv2.arcLength(cnt,True),True)
x=approx.ravel()[0]
y=approx.ravel()[1]
if area>0:
cv2.drawContours(frame,[approx],0,(0,255,0))
if len(approx) == 3:
cv2.putText(frame, "Triangle", (4,4), cv2.FONT_HERSHEY_COMPLEX, 3, (0, 255, 0))
elif len(approx) == 4:
cv2.putText(frame, "square", (x, y), cv2.FONT_HERSHEY_COMPLEX, 3, (0, 0, 255))
else : cv2.putText(frame, "Circle", (200,200), cv2.FONT_HERSHEY_COMPLEX, 3, (0, 0, 255))
cv2.imshow("mask",mask)
cv2.imshow("Kernel",kernel)
def main():
#cap=cv2.VideoCapture("opencv/visionX/r1_toptrack.mp4")
cap=cv2.VideoCapture("git_workspace/opencv/visionX/visionX/r1_toptrack.mp4")
global frame
#frame=cv2.imread("opencv/visionX/r1_track_p1.png")
#ret=True
while(True):
ret,frame=cap.read()
frame=cv2.transpose(frame)
frame=cv2.flip(frame,+1)
if ret==True:
#frame=perspective_bird()
#frame,flag=houghc()
shape()
hought()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(gray,127,255,0)
line_angle(thresh,frame)
cv2.imshow("Video",frame)
key=cv2.waitKey(1)
if key== 27:
break
cap.release()
cv2.destroyAllWindows()
main() |
985,786 | 3ab6a229e21da2b4099f5e60e47fa17fe83e5905 | def PredictTheWinner(nums):
"""
:type nums: List[int]
:rtype: bool
"""
if len(nums) % 2 == 0 or len(nums) == 1:
return True
n = len(nums)
dp = [[[0, 0] for row in range(n)] for _ in range(n)]
# bottom up, build each case starting from problem with 1 number in a game:
# base case: only 1 number, player 1 pick first, player 2 will be left with 0 number game, aka 0
# each dp[i][j] will store [bestScore, leftOver]
for i in range(n):
dp[i][i] = [nums[i], 0]
# sub divide the game into list from index i to j
# now start from 2 number game [i][j]:
# if player 1 pick i, player 2 will pick the bestScore of game [i+1][j], then player 1 is left with the leftOver of game [i+1][j]
# if player 1 pick j, player 2 will pick the bestScore of game [i][j-1], then player 1 is left with the leftOver of game [i][j-1]
# player 1 will choose the best case in above scenarios
for length in range(2, n + 1):
for i in range(n - length + 1):
j = i + length - 1
# pick i:
pi = dp[i + 1][j][1] + nums[i]
# pick j:
pj = dp[i][j - 1][1] + nums[j]
if pi > pj:
dp[i][j][0] = pi
dp[i][j][1] = dp[i + 1][j][0]
else:
dp[i][j][0] = pj
dp[i][j][1] = dp[i][j - 1][0]
return dp[0][-1][0] >= dp[0][-1][1]
nums = [1, 5, 233, 7, 8]
PredictTheWinner(nums) |
985,787 | 78287c946b99d365000cdd7d2b0534353309d9d5 | #
# @lc app=leetcode.cn id=108 lang=python3
#
# [108] 将有序数组转换为二叉搜索树
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def sortedArrayToBST(self, nums: List[int]) -> TreeNode:
n = len(nums)
if n == 0:
return None
if n == 1:
return TreeNode(nums[0])
if n == 2:
node = TreeNode(nums[1])
node.left = self.sortedArrayToBST([nums[0]])
else:
num = n//2
node = TreeNode(nums[num])
node.left = self.sortedArrayToBST(nums[:num:])
node.right = self.sortedArrayToBST(nums[num+1::])
return node
# @lc code=end
|
985,788 | 5d3146b16df8ccd0bf1dfe6e870f365b410d21df | """
print("b mi계산기 입니다.")
weight = float(input("몸무게를 입력해주세요"))
height = float(input("키를 입력해주세요"))
print("측정도 : weight / %f*%f = %f"%(weight,height,height,))
"""
score = int(input("점수를 입력하세요"))
if score>=90:
print("수")
|
985,789 | d673b1992d4ba1fde3768339114737d05818648b | # -*- coding: utf-8 -*-
from flask import g
from flask_restful import reqparse, Resource, fields,marshal
from Apps.apis.common.amap_about.amap_about_utils import amapAboutHandle
from Apps.apis.common.user.user_utils import login_required
parser = reqparse.RequestParser()
dp_fields = {'dataTotal':fields.Integer,
'desc':fields.String,
'dpName':fields.String,
'position':fields.List(fields.Float),
'type':fields.Integer}
dev_fields = {'dataPoints':fields.List(fields.Nested(dp_fields)),
'dataTotal':fields.Integer,
'desc':fields.String,
'devName':fields.String,
'dpTotal':fields.Integer,
'position':fields.List(fields.Float),
}
gro_fields = {'desc':fields.String,
'devs':fields.List(fields.Nested(dev_fields)),
'devsTotal':fields.Integer,
'gName':fields.String,
'position':fields.List(fields.Float)}
plot_fields = {'bounds':fields.List(fields.Float),
'desc':fields.String,
'exterior':fields.List(fields.List(fields.Float)),
'layerUrl':fields.String,
'plotsRange':fields.Integer,
'plotName':fields.String}
reg_fields = {'desc':fields.String,
'groups':fields.List(fields.Nested(gro_fields)),
'location':fields.String,
'plots':fields.List(fields.Nested(plot_fields)),
'position':fields.List(fields.Float),
'regName':fields.String,
'regRange':fields.Integer}
data_fields = {'code':fields.Integer,'data':fields.List(fields.Nested(reg_fields))}
class amapAboutData(Resource,amapAboutHandle):
@login_required
def get(self):
rank = g.user.rank
if rank:
data = marshal(self.getdata(),data_fields)
return data
def post(self):
return "请求方法错误"
|
985,790 | 7292b449206b49cd0db70e1eaa41f1f2ff3c75a3 |
class Triangle:
height=""
base=""
def set_value(self,height,base):
self.height=height
self.base=base
def display(self):
result=(0.5*self.base*self.height)
print(result)
# print(f"height : {self.height},base : {self.base} , result = {0.5*self.height*self.base}")
tin=Triangle()
tin.set_value(20,10)
tin.display()
|
985,791 | a1edd64df6e2dbe3c4065fe1eac882f5023fe917 | # model.py ---
#
# Filename: model.py
# Description:
# Author: Kwang Moo Yi
# Maintainer:
# Created: Thu Jan 24 17:28:40 2019 (-0800)
# Version:
#
# Commentary:
#
#
#
#
# Change Log:
#
#
#
# Copyright (C), Visual Computing Group @ University of Victoria.
# Code:
import numpy as np
import torch
from torch import nn
class ConvBlock(nn.Module):
"""Convolution block """
def __init__(self, indim, outdim, ksize=3, stride=1, activation=nn.ReLU):
"""Initialization of the custom conv2d module
For simplicity, we will only implement the case where
`indim`==`outdim`. We will also compute our padding ourselves and not
change the input-output shapes. This will simplify many things. We also
consider only the case when `stride` == 1.
"""
# Run initialization for super class
super(ConvBlock, self).__init__()
# Check ksize, stride requirements
assert (ksize % 2) == 1
assert stride == 1
assert indim == outdim
# Store proper activation function depending on configuration
self.activ = activation
# Compute padding according to `ksize`. Make sure
# that this will not cause image width and height to change.
padding = ksize // 2
# We will follow the architecture in slide 76 of lecture 21, but with
# our `_conv` function as our conv ``block''. We'll also use
# nn.Sequential() and its `add_module' function. Note that the 64 and
# 256 in that slide are just examples, and you should instead use indim
# and outdim.
#
# Also note that we are creating these layers with support for
# different `ksize`, `stride`, `padding`, unlike previous assignment.
self.layers = nn.Sequential()
self.layers.add_module("conv_1", self._conv(indim, indim, 1, 1, 0))
self.layers.add_module("conv_2", self._conv(
indim, indim, ksize, 1, padding))
self.layers.add_module("conv_3", self._conv(indim, outdim, 1, 1, 0))
def _conv(self, indim, outdim, ksize, stride, padding):
"""Function to make conv layers easier to create.
Returns a nn.Sequential object which has bn-conv-activation.
"""
return nn.Sequential(
nn.BatchNorm2d(indim),
nn.Conv2d(indim, outdim, ksize, stride, padding),
self.activ(),
)
def forward(self, x):
"""Forward pass our block.
Note that we are implementing a resnet here. Thus, one path should go
through our `layers`, whereas the other path should go through
intact. They should then get added together (see again slide 76 of
lecture 21). We will not use any activation after they are added.
"""
assert(len(x.shape) == 4)
x_out = self.layers(x) + x
return x_out
class MyNetwork(nn.Module):
"""Network class """
def __init__(self, config, input_shp):
"""Initialization of the model.
Parameters
----------
config:
Configuration object that holds the command line arguments.
input_shp: tuple or list
Shape of each input data sample.
"""
# Run initialization for super class
super(MyNetwork, self).__init__()
# Store configuration
self.config = config
# Placeholder for layers
self.layers = {}
indim = input_shp[0]
# Retrieve Conv, Act, Pool functions from configurations. We'll use
# these for our code below.
if config.conv2d == "torch":
self.Conv2d = nn.Conv2d
elif config.conv2d == "custom":
self.Conv2d = ConvBlock
self.Activation = getattr(nn, config.activation)
self.Pool2d = getattr(nn, config.pool2d)
self.Linear = nn.Linear
# Resnet Blocks, similar to slide 73 of lecture 21. However, for
# simplicity, we'll make is slightly different. Note that we used
# nn.Sequential this time.
self.convs = nn.Sequential()
cur_h, cur_w = input_shp[-2:]
for _i in range(config.num_conv_outer):
#
# NOTE THE NEW LAYER ON THESE LINES!
#
# We have a dedicated 1x1 layer to get more channels. Note also
# that this is a pure linear convolution layer.
outdim = config.nchannel_base * 2 ** _i
self.convs.add_module(
"conv_{}_base".format(_i), nn.Conv2d(indim, outdim, 1, 1, 0))
indim = outdim
for _j in range(config.num_conv_inner):
# We now use our selected convolution layer. Note that our
# resnet implementation will have a different call style to
# vanilla conv2d of torch, so we'll just do an ugly if-else
# here.
if config.conv2d == "torch":
self.convs.add_module(
"conv_{}_{}".format(_i, _j),
self.Conv2d(indim, outdim, config.ksize, 1, 1))
self.convs.add_module(
"act_{}_{}".format(_i, _j),
self.Activation())
cur_h = cur_h - (config.ksize - 1)
cur_w = cur_w - (config.ksize - 1)
elif config.conv2d == "custom":
self.convs.add_module(
"conv_{}_{}".format(_i, _j),
self.Conv2d(indim, outdim, config.ksize, 1, self.Activation))
self.convs.add_module(
"conv_{}_pool".format(_i), self.Pool2d(2, 2))
cur_h = cur_h // 2
cur_w = cur_w // 2
# Final output layer. We'll assume that conv layer outputs are global
# average pooled
self.output = nn.Linear(indim, config.num_class)
print(self)
def forward(self, x):
"""Forward pass for the model
Parameters
----------
x: torch.Tensor
Input data for the model to be applied. Note that this data is
typically in the shape of BCHW or BC, where B is the number of
elements in the batch, and C is the number of dimension of our
feature. H, W is when we use raw images. In the current assignment,
it wil l be of shape BC.
Returns
-------
x: torch.Tensor
We will reuse the variable name, because often the case it's more
convenient to do so. We will first normalize the input, and then
feed it to our linear layer by simply calling the layer as a
function with normalized x as argument.
"""
# 2.2 BUG: Did Bob do anything wrong in the forward method?
# HINT: Usually a CNN would expect correctly normalized data.
# Roughly make input to be within -1 to 1 range
x = (x - 127.5) / 127.5
# Apply conv layers
x = self.convs(x)
# Global average pooling
x = x.mean(-1).mean(-1)
# Output layer
x = self.output(x)
return x
#
# model.py ends here
|
985,792 | 3b39e5c5e1e8019182b0eeb7a35cd97ae9fba185 | my_student_List =['Cristian' ,29, 'Juan',31, 'Luis',29, 'Juan',22]
"""
print(my_student_List)
print(my_student_List[0])
print(type(my_student_List))
"""
#clean code - Robert C Martin
for student in my_student_List:
print(student) |
985,793 | 346b08b62cbfb1df9592d34ecf370f57169f92c2 | import vampytest
from ..preinstanced import SortOrder
def test__SortOrder__name():
"""
Tests whether ``SortOrder`` instance names are all strings.
"""
for instance in SortOrder.INSTANCES.values():
vampytest.assert_instance(instance.name, str)
def test__SortOrder__value():
"""
Tests whether ``SortOrder`` instance values are all the expected value type.
"""
for instance in SortOrder.INSTANCES.values():
vampytest.assert_instance(instance.value, SortOrder.VALUE_TYPE)
|
985,794 | 7493f49ac2783ef4a13faca5a4eaa8897998418f | #!/usr/bin/env python
# Code for converting wind sensor data to world frame
import rospy
from geographic_msgs.msg import Vector3
from geographic_msgs.msg import GeoPose
class Converter:
def __init__(self):
self.wind_sub = rospy.Subscriber('/crw_wind_pub', Vector3, self.wind_callback)
self.pose_sub = rospy.Subscriber('/crw_geopose_pub', GeoPose, self.pose_callback)
self.wind_pub = rospy.Publisher('/wind_converted', Vector3, queue_size=100)
self.current_pose = GeoPose()
def wind_callback(self, msg):
print "Wind callback"
def pose_callback(self, msg):
print "Pose callback" |
985,795 | e966165bb73b58574b3be3f6d713daa0510bce6e | x = 1
y = 1
z = 0
k = 2
while len(str(z)) < 1000:
z = x + y
x = y
y = z
k += 1
print k
|
985,796 | 66d446f6f592a7c206c81693a72b244ca6ea1e1e | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Han"
__email__ = "liuhan132@foxmail.com"
import torch
import random
from collections import namedtuple, deque
from utils.functions import to_long_tensor, to_float_tensor
# Define one step transition in dialog
Transition = namedtuple('Transition', ('state', 'action', 'reward', 'next_state', 'term'))
class ExperiencePool:
"""
Experience Replay
"""
def __init__(self, experience_pool_size):
self.size = experience_pool_size
self.buffer = deque(maxlen=experience_pool_size)
def __len__(self):
return self.size
def add(self, state, action, reward, next_state, terminal):
example = Transition(state, action, reward, next_state, terminal)
self.buffer.append(example)
def _encode_sample(self, idx):
batch_states = []
batch_actions = []
batch_rewards = []
batch_next_states = []
batch_terminals = []
for i in idx:
data = self.buffer[i]
batch_states.append(data.state)
batch_actions.append(data.action)
batch_rewards.append(data.reward)
batch_next_states.append(data.next_state)
batch_terminals.append(data.term)
batch_states_tensor = self._statck_state(batch_states)
batch_actions_tensor = to_long_tensor(batch_actions)
batch_rewards_tensor = to_float_tensor(batch_rewards)
batch_next_states_tensor = self._statck_state(batch_next_states)
batch_terminals_tensor = to_long_tensor(batch_terminals)
return Transition(batch_states_tensor, batch_actions_tensor, batch_rewards_tensor,
batch_next_states_tensor, batch_terminals_tensor)
def sample(self, batch_size):
idxes = [random.randint(0, len(self.buffer) - 1) for _ in range(batch_size)]
return self._encode_sample(idxes)
def _statck_state(self, list_states):
"""
stack the list of states to batch array
:param list_states:
:return:
"""
batch_last_docs_prob = []
batch_last_turn = []
for ele in list_states:
batch_last_docs_prob.append(ele.last_docs_prob)
batch_last_turn.append(ele.last_turn)
# (batch, num_docs)
batch_last_docs_prob = torch.cat(batch_last_docs_prob, dim=0)
# (batch, turn_len)
batch_last_turn = torch.cat(batch_last_turn, dim=0)
return DialogState(batch_last_docs_prob, batch_last_turn)
|
985,797 | 47db6ffd90ebfe6dacffa2bf62aa18f5f9445488 | """
Example:
Enter the number of rows: 5
0
1 0 1
2 1 0 1 2
3 2 1 0 1 2 3
4 3 2 1 0 1 2 3 4
"""
n = int(input("Enter the number of rows: "))
for i in range(1, n + 1):
print(" "*(n - i), end=" ")
for j in range(1, i):
print(i - j, end=" ")
for k in range(0, i):
print(k, end=" ")
print()
|
985,798 | dc3c9ced609d3c37c457911ec11939a70a58a534 | import re
import urllib
import string
import os
from urlparse import *
from HtmlParserEngine import *
from html_cut import *
import time
TYPE_URL = 1
TYPE_PAGE = 2
def echo_str(str):
if str != '':
print str
return
class search_1:
def __init__(self):
self.rule = ''
self.type = 0
return
def set_type(self, type):
self.type = type;
return
def init_item(self):
return {'#url':'', '#title':'', '#replycount':'', '#time':'', '#readcount':'', '#cut':''}
def compiler(self, rule, code):
if '' != code:
compiler_file = urllib.urlopen(rule).read().decode(code)
else:
compiler_file = urllib.urlopen(rule).read()
compiler = HtmlParserComplier()
compiler.feed(compiler_file)
self.rule = compiler.rule
return
def parser(self, url, code):
page = urllib.urlopen(url).read().decode(code)
urlmatcher = HtmlParserMatch(self.rule)
urlmatcher.set_initfunc(self.init_item)
urlmatcher.feed(page)
return urlmatcher.data
def search(self, rule, code1, code2, url):
self.compiler(rule, code1)
data = self.parser(url, code2)
self.print_data(data, self.type)
return
def print_data(self, data, type):
for t in data:
if type == TYPE_PAGE:
echo_str(t['#cut'])
print
else:
replycount = 0
if t['#replycount'] != '':
replycount = string.atoi(t['#replycount'])
if replycount < 50:
continue
echo_str(t['#title'])
echo_str(t['#url'])
echo_str(t['#replycount'])
echo_str(t['#readcount'])
echo_str(t['#time'])
print
return
|
985,799 | 5126f1606db22ffa950e884f4730c119da23a542 | def number_loop(limit, increment=1):
i = 0
numbers = []
while i < limit:
print "At the top i is %d" % i
numbers.append(i)
i = i + increment
print "Numbers now: ", numbers
print "At the bottom i is %d" % i
return numbers
result = number_loop(5, 2)
print "The numbers: "
for num in result:
print num
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.