text
stringlengths 2
999k
|
|---|
# GENERATED BY KOMAND SDK - DO NOT EDIT
from setuptools import setup, find_packages
setup(name='sqlmap-rapid7-plugin',
version='1.1.1',
description='The SQLMap plugin allows you to scan targets and analyze the results',
author='rapid7',
author_email='',
url='',
packages=find_packages(),
install_requires=['komand'], # Add third-party dependencies to requirements.txt, not here!
scripts=['bin/komand_sqlmap']
)
|
# CircuitPython NeoPixel Color Picker Example
import board
import neopixel
from adafruit_ble import BLERadio
from adafruit_ble.advertising.standard import ProvideServicesAdvertisement
from adafruit_ble.services.nordic import UARTService
from adafruit_bluefruit_connect.packet import Packet
from adafruit_bluefruit_connect.color_packet import ColorPacket
ble = BLERadio()
uart_service = UARTService()
advertisement = ProvideServicesAdvertisement(uart_service)
pixels = neopixel.NeoPixel(board.NEOPIXEL, 10, brightness=0.1)
while True:
# Advertise when not connected.
ble.start_advertising(advertisement)
while not ble.connected:
pass
while ble.connected:
packet = Packet.from_stream(uart_service)
if isinstance(packet, ColorPacket):
print(packet.color)
pixels.fill(packet.color)
|
'''resequencing class'''
from copy import copy
#sbaas lims
from SBaaS_LIMS.lims_biologicalMaterial_query import lims_biologicalMaterial_query
#SBaaS models
from SBaaS_models.models_COBRA_query import models_COBRA_query
from SBaaS_models.models_BioCyc_execute import models_BioCyc_execute
#sbaas
from .stage01_resequencing_mutations_io import stage01_resequencing_mutations_io
from .stage01_resequencing_gd_query import stage01_resequencing_gd_query
from .stage01_resequencing_omniExpressExome_query import stage01_resequencing_omniExpressExome_query
#sbaas models
from .stage01_resequencing_mutations_postgresql_models import *
#resources
from sequencing_analysis.genome_diff import genome_diff
from sequencing_analysis.genome_annotations import genome_annotations
from python_statistics.calculate_interface import calculate_interface
class stage01_resequencing_mutations_execute(stage01_resequencing_mutations_io,
lims_biologicalMaterial_query,
models_COBRA_query):
#TODO:
#1. add in query_object_I
#2. add in query_func_I
#3. add in query_object_annotation_I
#4. add in query_object_annotation_func_I
#5. split into queryData, transformData, storeData functions
#6.
def execute_annotateFilteredMutations_v01(self,
experiment_id,sample_names_I=[],
annotation_I='data/U00096.2.gb',
annotation_ref_I = 'genbank',
biologicalmaterial_id_I='MG1655',
query_object_I = 'stage01_resequencing_gd_query',
query_func_I = 'get_mutations_experimentID_dataStage01ResequencingMutationsFiltered',
):
'''Annotate filtered mutations using a reference annotation
INPUT:
experiment_id = string
sample_names_I = [] of strings
annotation_I = string, reference file for the sequencing annotation
annotation_ref_I = string, reference file data base source
biologicalmaterial_id_I = string
'''
#read in the annotations file
genomeannotation = genome_annotations(
annotation_I=annotation_I,
annotation_ref_I=annotation_ref_I
);
# intantiate the query object:
query_objects = {'stage01_resequencing_gd_query':stage01_resequencing_gd_query,
'stage01_resequencing_omniExpressExome_query':stage01_resequencing_omniExpressExome_query,
};
if query_object_I in query_objects.keys():
query_object = query_objects[query_object_I];
query_instance = query_object(self.session,self.engine,self.settings);
query_instance.initialize_supportedTables();
print('Executing annotation of filtered mutations...')
## query sample names
#if sample_names_I:
# sample_names = sample_names_I;
#else:
# sample_names = [];
# sample_names = self.get_sampleNames_experimentID_dataStage01ResequencingMutationsFiltered(experiment_id);
#for sn in sample_names:
# print('analyzing sample_name ' + sn);
# # query mutation data:
# mutations = [];
# mutations = self.get_mutations_experimentIDAndSampleName_dataStage01ResequencingMutationsFiltered(experiment_id,sn);
#query the data:
data_listDict = [];
if hasattr(query_instance, query_func_I):
query_func = getattr(query_instance, query_func_I);
try:
data_listDict = query_func(
#analysis_id_I,
experiment_id_I=experiment_id,
sample_names_I=sample_names_I,
);
except AssertionError as e:
print(e);
else:
print('query instance does not have the required method.');
mutation_data_O = [];
for end_cnt,mutation in enumerate(mutations):
print('analyzing mutations')
data_tmp = {};
# annotate each mutation based on the position
annotation = {};
annotation = genomeannotation._find_genesFromMutationPosition(mutation['mutation_data']['position']);
data_tmp['mutation_genes'] = annotation['gene']
data_tmp['mutation_locations'] = annotation['location']
data_tmp['mutation_annotations'] = annotation['product']
# generate a link to ecogene for the genes
data_tmp['mutation_links'] = [];
if biologicalmaterial_id_I and not biologicalmaterial_id_I is None:
for bnumber in annotation['locus_tag']:
if bnumber:
ecogenes = [];
ecogenes = self.get_ecogeneAccessionNumber_biologicalmaterialIDAndOrderedLocusName_biologicalMaterialGeneReferences(biologicalmaterial_id_I,bnumber);
if ecogenes:
ecogene = ecogenes[0];
ecogene_link = genomeannotation._generate_httplink2gene_ecogene(ecogene['ecogene_accession_number']);
data_tmp['mutation_links'].append(ecogene_link)
else: print('no ecogene_accession_number found for ordered_locus_location ' + bnumber);
data_tmp['experiment_id'] = mutation['experiment_id'];
data_tmp['sample_name'] = mutation['sample_name'];
frequency = 1.0;
if 'frequency' in mutation['mutation_data']:
frequency = mutation['mutation_data']['frequency'];
data_tmp['mutation_frequency'] = frequency
data_tmp['mutation_position'] = mutation['mutation_data']['position']
data_tmp['mutation_type'] = mutation['mutation_data']['type']
data_tmp['mutation_data'] = mutation['mutation_data'];
data_tmp['mutation_chromosome'] = 1;
mutation_data_O.append(data_tmp);
if mutation_data_O:
self.add_rows_table('data_stage01_resequencing_mutationsAnnotated',mutation_data_O);
def execute_annotateFilteredMutations(self,
experiment_id,sample_names_I=[],
annotation_dir_I='data/',
annotation_files_I=['U00096.2.gb'],
annotation_chromosome2File_I = {'1':'U00096.2.gb'},
annotation_ref_I = 'genbank',
biologicalmaterial_id_I='MG1655',
query_object_I = 'stage01_resequencing_gd_query',
query_func_I = 'get_mutations_experimentID_dataStage01ResequencingMutationsFiltered',
):
'''Annotate filtered mutations using a reference annotation
INPUT:
experiment_id = string
sample_names_I = [] of strings
annotation_I = string, reference file for the sequencing annotation
annotation_ref_I = string, reference file data base source
biologicalmaterial_id_I = string
'''
#read in the annotations files
genomeannotation_dict = {}; #{subfile#:
for annotation_file in annotation_files_I:
annotation_filename = annotation_dir_I + annotation_file;
genomeannotation = genome_annotations(
annotation_I=annotation_filename,
annotation_ref_I=annotation_ref_I
);
genomeannotation_dict[annotation_file] = genomeannotation;
# intantiate the query object:
query_objects = {'stage01_resequencing_gd_query':stage01_resequencing_gd_query,
'stage01_resequencing_omniExpressExome_query':stage01_resequencing_omniExpressExome_query,
};
if query_object_I in query_objects.keys():
query_object = query_objects[query_object_I];
query_instance = query_object(self.session,self.engine,self.settings);
query_instance.initialize_supportedTables();
print('Executing annotation of filtered mutations...')
#list out the number of chromosomes
chromosomes = None;
if annotation_chromosome2File_I:
chromosomes = list(annotation_chromosome2File_I.keys());
#TODO: break into seperate function
#query the data:
data_listDict = [];
if hasattr(query_instance, query_func_I):
query_func = getattr(query_instance, query_func_I);
try:
data_listDict = query_func(
#analysis_id_I,
experiment_id_I=experiment_id,
sample_names_I=sample_names_I,
chromosomes_I=chromosomes,
);
except AssertionError as e:
print(e);
else:
print('query instance does not have the required method.');
##Testing only
##read in the data:
#from io_utilities.base_importData import base_importData
#filename_I = self.settings['workspace_data']+\
# '/tmp/test.csv'
#iobase = base_importData();
#iobase.read_csv(filename_I);
#data_listDict = iobase.data;
mutation_data_O = [];
for end_cnt,mutation in enumerate(data_listDict):
#print('analyzing mutations')
data_tmp = {};
# annotate each mutation based on the position
annotation = {};
if 'chromosome' in mutation['mutation_data'].keys():
chromosome = mutation['mutation_data']['chromosome']
else:
chromosome = '1';
if not chromosome in annotation_chromosome2File_I.keys(): continue;
if chromosome in annotation_chromosome2File_I.keys() and \
not annotation_chromosome2File_I[chromosome] in genomeannotation_dict.keys(): continue;
annotation = genomeannotation_dict[annotation_chromosome2File_I[chromosome]]._find_genesFromMutationPosition(
mutation['mutation_data']['position']);
# generate a link to ecogene for the genes
data_tmp['mutation_links'] = [];
if biologicalmaterial_id_I and not biologicalmaterial_id_I is None:
for bnumber in annotation['locus_tag']:
if bnumber:
ecogenes = [];
ecogenes = self.get_ecogeneAccessionNumber_biologicalmaterialIDAndOrderedLocusName_biologicalMaterialGeneReferences(biologicalmaterial_id_I,bnumber);
if ecogenes:
ecogene = ecogenes[0];
ecogene_link = genomeannotation._generate_httplink2gene_ecogene(ecogene['ecogene_accession_number']);
data_tmp['mutation_links'].append(ecogene_link)
else: print('no ecogene_accession_number found for ordered_locus_location ' + bnumber);
elif annotation['locus_tag'] and not annotation['locus_tag'][0] is None:
data_tmp['mutation_links'] = annotation['locus_tag'];
# record the data
data_tmp['mutation_genes'] = annotation['gene']
data_tmp['mutation_locations'] = annotation['location']
data_tmp['mutation_annotations'] = annotation['product']
data_tmp['experiment_id'] = mutation['experiment_id'];
data_tmp['sample_name'] = mutation['sample_name'];
frequency = 1.0;
if 'frequency' in mutation['mutation_data']:
frequency = mutation['mutation_data']['frequency'];
data_tmp['mutation_frequency'] = frequency
data_tmp['mutation_position'] = mutation['mutation_data']['position']
data_tmp['mutation_type'] = mutation['mutation_data']['type']
data_tmp['mutation_data'] = mutation['mutation_data'];
data_tmp['mutation_chromosome'] = chromosome;
mutation_data_O.append(data_tmp);
if mutation_data_O:
self.add_rows_table('data_stage01_resequencing_mutationsAnnotated',mutation_data_O);
def execute_mutateFilteredMutations(self,experiment_id,sample_names_I=[],
annotation_I='data/U00096.2.gb',
annotation_ref_I = 'genbank',
sequence_I='data/U00096.2.fas',
sequence_ref_I = 'fasta',
codonUsageTable_I='data/ecoli_codonUsageTable.csv',
IS_sequences_I='data/ecoli_IS_sequences.fasta',
IS_sequences_ref_I = 'fasta',
translation_table_I='Bacterial',
):
'''Mutate filtered mutations to determine the change in dna, rna, and peptide sequences
INPUT:
experiment_id = string
sample_names_I = [] of strings
annotation_I = string, reference file for the sequencing annotation
annotation_ref_I = string, reference file data base source
sequence_I = string, reference file for the sequence
sequence_I = string, reference file format
codonUsageTable_I = string, reference file for the codon usage table
IS_sequences_I = string, reference file for the insertion element sequences
IS_sequences_ref_I = string, reference file format
translation_table_I = string, translation table to use when converting from rna to peptide sequence
'''
genomeannotation = genome_annotations(annotation_I=annotation_I,annotation_ref_I=annotation_ref_I,
sequence_I=sequence_I,sequence_ref_I=sequence_ref_I,
IS_sequences_I=IS_sequences_I,IS_sequences_ref_I=IS_sequences_ref_I,
codonUsageTable_I=codonUsageTable_I);
print('Executing annotation of filtered mutations...')
data_O = [];
# query sample names
if sample_names_I:
sample_names = sample_names_I;
else:
sample_names = [];
sample_names = self.get_sampleNames_experimentID_dataStage01ResequencingMutationsFiltered(experiment_id);
for sn in sample_names:
print('analyzing sample_name ' + sn);
data_O = [];
data_codon_O=[];
# query mutation data:
mutations = [];
mutations = self.get_mutations_experimentIDAndSampleName_dataStage01ResequencingMutationsFiltered(experiment_id,sn);
for end_cnt,mutation in enumerate(mutations):
print('analyzing mutations')
data_tmp = {};
# annotate each mutation based on the position
annotation = {};
annotation = genomeannotation._mutate_peptideFromMutationData(mutation['mutation_data'],translation_table_I=translation_table_I);
if not annotation['gene']: continue;
data_tmp['mutation_genes'] = annotation['gene']
data_tmp['mutation_locations'] = annotation['location']
data_tmp['mutation_data'] = annotation['mutation_data']
data_tmp['dna_sequence_ref'] = annotation['dna_sequence_ref'];
data_tmp['dna_sequence_new'] = annotation['dna_sequence_new'];
data_tmp['rna_sequence_ref'] = annotation['rna_sequence_ref'];
data_tmp['rna_sequence_new'] = annotation['rna_sequence_new'];
data_tmp['peptide_sequence_ref'] = annotation['peptide_sequence_ref'];
data_tmp['peptide_sequence_new'] = annotation['peptide_sequence_new'];
data_tmp['mutation_class'] = annotation['mutation_class'];
data_tmp['dna_feature_position'] = annotation['dna_feature_position']
data_tmp['dna_feature_ref'] = annotation['dna_feature_ref']
data_tmp['dna_feature_new'] = annotation['dna_feature_new']
data_tmp['rna_feature_position'] = annotation['rna_feature_position']
data_tmp['rna_feature_ref'] = annotation['rna_feature_ref']
data_tmp['rna_feature_new'] = annotation['rna_feature_new']
data_tmp['peptide_feature_position'] = annotation['peptide_feature_position']
data_tmp['peptide_feature_ref'] = annotation['peptide_feature_ref']
data_tmp['peptide_feature_new'] = annotation['peptide_feature_new']
data_tmp['experiment_id'] = mutation['experiment_id'];
data_tmp['sample_name'] = mutation['sample_name'];
data_tmp['dna_features_region'] = None;
data_tmp['rna_features_region'] = None;
data_tmp['peptide_features_region'] = None;
frequency = 1.0;
if 'frequency' in mutation['mutation_data']:
frequency = mutation['mutation_data']['frequency'];
data_tmp['mutation_frequency'] = frequency
data_tmp['mutation_position'] = mutation['mutation_data']['position']
data_tmp['mutation_type'] = mutation['mutation_data']['type']
#data_tmp['mutation_data'] = mutation['mutation_data'];
data_tmp['used_'] = True;
data_tmp['comment_'] = None;
#split into different tables depending on whether the peptide sequence changed
if mutation['mutation_data']['type']=='SNP' and 'synonymous' in annotation['mutation_class']:
data_tmp['codon_triplet_ref'] = annotation['codon_triplet_ref'];
data_tmp['codon_triplet_new'] = annotation['codon_triplet_new'];
data_tmp['codon_triplet_position'] = annotation['codon_triplet_position']
data_tmp['codon_fraction_ref'] = annotation['codon_fraction_ref']
data_tmp['codon_fraction_new'] = annotation['codon_fraction_new']
data_tmp['codon_frequency_ref'] = annotation['codon_frequency_ref']
data_tmp['codon_frequency_new'] = annotation['codon_frequency_new']
data_tmp['codon_frequency_units'] = annotation['codon_frequency_units']
data_codon_O.append(data_tmp);
else:
data_O.append(data_tmp);
#upload the data to the database (each sample)
if data_O:
self.add_dataStage01ResequencingMutationsSeqChanges(data_O);
if data_codon_O:
self.add_rows_table('data_stage01_resequencing_mutationsCodonChanges',data_codon_O);
def map_geneName2ModelReaction(self,
biologicalmaterial_id_I,gene_name_I,
model_id_I,):
"""return the model reaction rows whose enzymes are produced by a given gene
INPUT:
biologicalmaterial_id_I = string, e.g. MG1655
gene_name_I = string, e.g. pgi
model_id_I = string, e.g. iJO1366
OUTPUT:
rows_O = rows from data_stage02_physiology_modelReactions
"""
rows_O = [];
orderedLocusNames = [];
orderedLocusNames = self.get_orderedLocusName_biologicalmaterialIDAndGeneName_biologicalMaterialGeneReferences(biologicalmaterial_id_I,gene_name_I);
for oln in orderedLocusNames:
rows_tmp = [];
rows_tmp = self.get_rows_modelIDAndOrderedLocusName_dataStage02PhysiologyModelReactions(model_id_I,oln['ordered_locus_name']);
for r in rows_tmp:
r['mutation_gene'] = gene_name_I;
rows_O.append(r);
return rows_O;
def execute_mapGeneName2ModelReaction_mutationsAnnotated(self,
experiment_id,filename_O,
biologicalmaterial_id_I,
model_id_I,
sample_names_I=[],
gene_names_I=[]):
"""return the model reaction rows whose enzymes are produced by a given gene
INPUT:
biologicalmaterial_id_I = string, e.g. MG1655
gene_name_I = string, e.g. pgi
model_id_I = string, e.g. iJO1366
OUTPUT:
filename_O = name of output file
rows from data_stage02_physiology_modelReactions in a .csv file
"""
data_O = [];
# query sample names from the experiment
if sample_names_I:
sample_names = sample_names_I;
else:
sample_names = [];
sample_names = self.get_sampleNames_experimentID_dataStage01ResequencingMetadata(experiment_id);
for sn in sample_names:
#query the mutations from the experiment
mutations = [];
mutations = self.get_mutations_experimentIDAndSampleName_dataStage01ResequencingMutationsAnnotated(experiment_id,sn);
for mutation in mutations:
for gene_name in mutation['mutation_genes']:
if gene_names_I:
if not gene_name in gene_names_I:
continue
rows = [];
rows = self.map_geneName2ModelReaction(
biologicalmaterial_id_I,gene_name,
model_id_I);
if rows:
for r in rows:
r['experiment_id']=mutation['experiment_id'];
r['sample_name']=mutation['sample_name'];
r['mutation_frequency']=mutation['mutation_frequency'];
r['mutation_type']=mutation['mutation_type'];
r['mutation_position']=mutation['mutation_position'];
r['mutation_data']=mutation['mutation_data'];
r['mutation_annotations']=mutation['mutation_annotations'];
r['mutation_genes']=mutation['mutation_genes'];
r['mutation_locations']=mutation['mutation_locations'];
r['mutation_links']=mutation['mutation_links'];
r['used_']=mutation['used_'];
r['comment_']=mutation['comment_'];
r['biologicalmaterial_id'] = biologicalmaterial_id_I;
r['model_id'] = model_id_I;
data_O.append(r);
#export the data to .csv
self.export_mapGeneName2ModelReaction_csv(data_O,filename_O);
def calculate_distributionOfMutationsInBioCycParentClasses(
self,
experiment_ids_I,
sample_names_I,
parent_classes_I=['Transcription related'],
database_I='ECOLI',
names_I=[],
unique_I=True,
):
'''calculate the percentages of mutations in each BioCyc parent_classes
INPUT:
'''
#BioCyc dependencies
biocyc01 = models_BioCyc_execute(self.session,self.engine,self.settings);
biocyc01.initialize_supportedTables()
biocyc01.initialize_tables()
from SBaaS_models.models_BioCyc_dependencies import models_BioCyc_dependencies
biocyc01_dep = models_BioCyc_dependencies();
if parent_classes_I:
parent_classes = parent_classes_I;
else:
parent_classes = [];
parent_classes = biocyc01.getParsed_parentClasses_modelsBioCycPolymerSegments(
database_I='ECOLI'
);
pc2Genes = {};
for pc in parent_classes:
#join list of genes with alternative identifiers
biocyc_genes = biocyc01.getParsed_genesAndAccessionsAndSynonyms_namesAndParentClassesAndDatabase_modelsBioCycPolymerSegments(
names_I=names_I,
database_I=database_I,
parent_classes_I='%s"%s"%s'%('%',pc,'%'),
query_I={},
output_O='listDict',
dictColumn_I=None);
if biocyc_genes:
gene_ids = list(set([g['gene'] for g in biocyc_genes if g['gene']] +\
[g['common_name'] for g in biocyc_genes if g['common_name']] +\
[g['synonym'] for g in biocyc_genes if g['synonym']]));
pc2Genes[pc] = gene_ids;
#query all of the resequencing data
mutations_rows = self.get_mutations_experimentIDsAndSampleNames_dataStage01ResequencingMutationsAnnotated(
experiment_ids_I = experiment_ids_I,
sample_names_I = sample_names_I);
mutated_genes = [];
for row in mutations_rows:
if row['mutation_genes']: #exclude non-annotated regions
mutated_genes.extend(row['mutation_genes']);
if unique_I:
mutations_genes_cnt = len(list(set(mutated_genes)))
else :
mutations_genes_cnt = len(mutated_genes)
#calculate the distributions for each parent_class
data_O = [];
for parent_class,gene_ids in pc2Genes.items():
if unique_I:
pc_genes_cnt = len(list(set([d for d in mutated_genes if d in gene_ids])))
else:
pc_genes_cnt = len([d for d in mutated_genes if d in gene_ids])
genes_ratio = pc_genes_cnt/mutations_genes_cnt;
tmp = {
'analysis_id':None,
'feature_id':'parent_class',
'feature_units':None,
'element_id':parent_class,
'frequency':pc_genes_cnt,
'fraction':genes_ratio,
'used_':True,
'comment_':None}
#tmp = {'parent_class':parent_class,
# 'mutation_genes_count':mutations_genes_cnt,
# 'genes_count':pc_genes_cnt,
# 'genes_fraction':genes_ratio};
data_O.append(tmp);
return data_O;
def calculate_fractionOfMutationLocations(
self,
experiment_id_I,
sample_names_I,
mutation_locations_I = []
):
'''calculate the percentages of mutations in each mutation_location
INPUT:
EXAMPLE:
sample_names = '140807_11_OxicEvo04Evo01EPEcoliGlcM9_Broth-1,\
140807_11_OxicEvo04Evo02EPEcoliGlcM9_Broth-1,\
140807_11_OxicEvo04gndEvo01EPEcoliGlcM9_Broth-1,\
140807_11_OxicEvo04gndEvo02EPEcoliGlcM9_Broth-1,\
140807_11_OxicEvo04gndEvo03EPEcoliGlcM9_Broth-1,\
140807_11_OxicEvo04pgiEvo01EPEcoliGlcM9_Broth-1,\
140807_11_OxicEvo04pgiEvo02EPEcoliGlcM9_Broth-1,\
140807_11_OxicEvo04pgiEvo03EPEcoliGlcM9_Broth-1,\
140807_11_OxicEvo04pgiEvo04EPEcoliGlcM9_Broth-1,\
140807_11_OxicEvo04pgiEvo05EPEcoliGlcM9_Broth-1,\
140807_11_OxicEvo04pgiEvo06EPEcoliGlcM9_Broth-1,\
140807_11_OxicEvo04pgiEvo07EPEcoliGlcM9_Broth-1,\
140807_11_OxicEvo04pgiEvo08EPEcoliGlcM9_Broth-1,\
140807_11_OxicEvo04ptsHIcrrEvo01EPEcoliGlcM9_Broth-1,\
140807_11_OxicEvo04ptsHIcrrEvo02EPEcoliGlcM9_Broth-1,\
140807_11_OxicEvo04ptsHIcrrEvo03EPEcoliGlcM9_Broth-1,\
140807_11_OxicEvo04ptsHIcrrEvo04EPEcoliGlcM9_Broth-1,\
140807_11_OxicEvo04sdhCBEvo01EPEcoliGlcM9_Broth-1,\
140807_11_OxicEvo04sdhCBEvo02EPEcoliGlcM9_Broth-1,\
140807_11_OxicEvo04sdhCBEvo03EPEcoliGlcM9_Broth-1,\
140807_11_OxicEvo04tpiAEvo01EPEcoliGlcM9_Broth-1,\
140807_11_OxicEvo04tpiAEvo02EPEcoliGlcM9_Broth-1,\
140807_11_OxicEvo04tpiAEvo03EPEcoliGlcM9_Broth-1,\
140807_11_OxicEvo04tpiAEvo04EPEcoliGlcM9_Broth-1';
mutation_locations_fractions = mut01.calculate_fractionOfMutationLocations(
experiment_id_I = 'ALEsKOs01',
sample_names_I = sample_names,);
#export the data to disk
from io_utilities.base_exportData import base_exportData
iobase = base_exportData(parent_classes_fractions);
iobase.write_dict2json(
pg_settings.datadir_settings['workspace_data']+\
'/_output/ALEsKOs01_0_11_parent_classes_fractions.json');
iobase.write_dict2csv(
pg_settings.datadir_settings['workspace_data']+\
'/_output/ALEsKOs01_0_11_parent_classes_fractions.csv');
'''
#query all of the resequencing data
mutations_rows = self.get_mutations_experimentIDAndSampleNames_dataStage01ResequencingMutationsAnnotated(
experiment_id_I = experiment_id_I,
sample_names_I = sample_names_I);
mutation_locations = {};
mutated_genes_coding = [];
for row in mutations_rows:
if row['mutation_genes'] : #exclude non-annotated regions
mutated_genes.extend(row['mutation_genes']);
if not row['mutation_location'] in mutation_locations:
mutation_locations[row['mutation_location']]= [];
if mutation_locations_I and not row['mutation_location'] in mutation_locations_I:
continue;
mutation_locations[row['mutation_location']].extend(row['mutation_genes'])
mutations_genes_cnt = len(list(set(mutated_genes)))
#calculate the distributions for each parent_class
data_O = [];
for parent_class,gene_ids in mutation_locations.items():
pc_genes_cnt = len(list(set([d for d in mutated_genes if d in gene_ids])))
genes_ratio = pc_genes_cnt/mutations_genes_cnt;
tmp = {'parent_class':parent_class,
'mutation_genes_count':mutations_genes_cnt,
'genes_count':pc_genes_cnt,
'genes_fraction':genes_ratio};
data_O.append(tmp);
return data_O;
|
#
# Base submodel class
#
import pybamm
class BaseSubModel(pybamm.BaseModel):
"""
The base class for all submodels. All submodels inherit from this class and must
only provide public methods which overwrite those in this base class. Any methods
added to a submodel that do not overwrite those in this bass class are made
private with the prefix '_', providing a consistent public interface for all
submodels.
Parameters
----------
param: parameter class
The model parameter symbols
domain : str
The domain of the model either 'Negative' or 'Positive'
name: str
A string giving the name of the submodel
external: bool, optional
Whether the variables defined by the submodel will be provided externally
by the users. Default is 'False'.
options: dict
A dictionary of options to be passed to the model.
See :class:`pybamm.BaseBatteryModel`
Attributes
----------
param: parameter class
The model parameter symbols
rhs: dict
A dictionary that maps expressions (variables) to expressions that represent
the rhs
algebraic: dict
A dictionary that maps expressions (variables) to expressions that represent
the algebraic equations. The algebraic expressions are assumed to equate
to zero. Note that all the variables in the model must exist in the keys of
`rhs` or `algebraic`.
initial_conditions: dict
A dictionary that maps expressions (variables) to expressions that represent
the initial conditions for the state variables y. The initial conditions for
algebraic variables are provided as initial guesses to a root finding algorithm
that calculates consistent initial conditions.
boundary_conditions: dict
A dictionary that maps expressions (variables) to expressions that represent
the boundary conditions
variables: dict
A dictionary that maps strings to expressions that represent
the useful variables
events: list
A list of events. Each event can either cause the solver to terminate
(e.g. concentration goes negative), or be used to inform the solver of the
existance of a discontinuity (e.g. discontinuity in the input current)
"""
def __init__(
self, param, domain=None, name="Unnamed submodel", external=False, options=None
):
super().__init__(name)
self.param = param
self.domain = domain
self.set_domain_for_broadcast()
self.name = name
self.external = external
self.options = pybamm.BatteryModelOptions(options or {})
# Save whether the submodel is a half-cell submodel
we = self.options["working electrode"]
self.half_cell = we != "both"
@property
def domain(self):
return self._domain
@domain.setter
def domain(self, domain):
ok_domain_list = [
"Negative",
"Separator",
"Positive",
"Negative electrode",
"Negative electrolyte",
"Separator electrolyte",
"Positive electrode",
"Positive electrolyte",
]
if domain in ok_domain_list:
self._domain = domain
elif domain is None:
pass
else:
raise pybamm.DomainError(
"Domain '{}' not recognised (must be one of {})".format(
domain, ok_domain_list
)
)
def set_domain_for_broadcast(self):
if hasattr(self, "_domain"):
if self.domain in ["Negative", "Positive"]:
self.domain_for_broadcast = self.domain.lower() + " electrode"
elif self.domain == "Separator":
self.domain_for_broadcast = "separator"
def get_fundamental_variables(self):
"""
A public method that creates and returns the variables in a submodel which can
be created independent of other submodels. For example, the electrolyte
concentration variables can be created independent of whether any other
variables have been defined in the model. As a rule, if a variable can be
created without variables from other submodels, then it should be placed in
this method.
Returns
-------
dict :
The variables created by the submodel which are independent of variables in
other submodels.
"""
return {}
def get_external_variables(self):
"""
A public method that returns the variables in a submodel which are
supplied by an external source.
Returns
-------
list :
A list of the external variables in the model.
"""
external_variables = []
list_of_vars = []
if self.external is True:
# look through all the variables in the submodel and get the
# variables which are state vectors
submodel_variables = self.get_fundamental_variables()
for var in submodel_variables.values():
if isinstance(var, pybamm.Variable):
list_of_vars += [var]
elif isinstance(var, pybamm.Concatenation):
if all(
isinstance(child, pybamm.Variable) for child in var.children
):
list_of_vars += [var]
# first add only unique concatenations
unique_ids = []
for var in list_of_vars:
if var.id not in unique_ids and isinstance(var, pybamm.Concatenation):
external_variables += [var]
unique_ids += [var.id]
# also add the ids of the children to unique ids
for child in var.children:
unique_ids += [child.id]
# now add any unique variables that are not part of a concatentation
for var in list_of_vars:
if var.id not in unique_ids:
external_variables += [var]
unique_ids += [var.id]
return external_variables
def get_coupled_variables(self, variables):
"""
A public method that creates and returns the variables in a submodel which
require variables in other submodels to be set first. For example, the
exchange current density requires the concentration in the electrolyte to
be created before it can be created. If a variable can be created independent
of other submodels then it should be created in 'get_fundamental_variables'
instead of this method.
Parameters
----------
variables: dict
The variables in the whole model.
Returns
-------
dict :
The variables created in this submodel which depend on variables in
other submodels.
"""
return {}
def set_rhs(self, variables):
"""
A method to set the right hand side of the differential equations which contain
a time derivative. Note: this method modifies the state of self.rhs. Unless
overwritten by a submodel, the default behaviour of 'pass' is used as
implemented in :class:`pybamm.BaseSubModel`.
Parameters
----------
variables: dict
The variables in the whole model.
"""
pass
def set_algebraic(self, variables):
"""
A method to set the differential equations which do not contain a time
derivative. Note: this method modifies the state of self.algebraic. Unless
overwritten by a submodel, the default behaviour of 'pass' is used as
implemented in :class:`pybamm.BaseSubModel`.
Parameters
----------
variables: dict
The variables in the whole model.
"""
pass
def set_boundary_conditions(self, variables):
"""
A method to set the boundary conditions for the submodel. Note: this method
modifies the state of self.boundary_conditions. Unless overwritten by a
submodel, the default behaviour of 'pass' is used a implemented in
:class:`pybamm.BaseSubModel`.
Parameters
----------
variables: dict
The variables in the whole model.
"""
pass
def set_initial_conditions(self, variables):
"""
A method to set the initial conditions for the submodel. Note: this method
modifies the state of self.initial_conditions. Unless overwritten by a
submodel, the default behaviour of 'pass' is used a implemented in
:class:`pybamm.BaseSubModel`.
Parameters
----------
variables: dict
The variables in the whole model.
"""
pass
def set_events(self, variables):
"""
A method to set events related to the state of submodel variable. Note: this
method modifies the state of self.events. Unless overwritten by a submodel, the
default behaviour of 'pass' is used a implemented in
:class:`pybamm.BaseSubModel`.
Parameters
----------
variables: dict
The variables in the whole model.
"""
pass
|
class SameCardsInOneDeckError(Exception):
pass
|
from setuptools import setup
setup(
name='zipfpy',
version='0.1',
author='Greg Wilson',
packages=['zipfpy']
)
|
import sys
import time
File__ = None
FileSize__ = None
FileName__ = None
Secs__ = None
def InitFile(File, FileName = ""):
global Secs__, File__, FileSize__, FileName__
File__ = File
FileName__ = FileName
Secs__ = None
Pos = File.tell()
File.seek(0, 2)
FileSize__ = File.tell()
File.seek(Pos)
def FileDone(Msg = ""):
global Secs__, File__, FileSize__, FileName__
Str = "%s 100.0%% %s \n" % (FileName__, Msg)
sys.stderr.write(Str)
def File(Msg = ""):
global Secs__, File__, FileSize__, FileName__
Secs = time.clock()
if Secs__ != None and Secs - Secs__ < 1:
return
Secs__ = Secs
Pos = File__.tell()
Pct = (100.0*Pos)/FileSize__
Str = "%s %5.1f%% %s \r" % (FileName__, Pct, Msg)
sys.stderr.write(Str)
def File2(Msg = ""):
global Secs__, File__, FileSize__, FileName__
Secs = time.clock()
if Secs__ != None and Secs - Secs__ < 1:
return
Secs__ = Secs
Pos = File__.tell()
Pct = (100.0*Pos)/FileSize__
Str = " %s %s \r" % (FileName__, Msg)
sys.stderr.write(Str)
def FileStep(Msg = ""):
File(Msg)
def Step(Msg, i, N):
global Secs__, File__, FileSize__, FileName__
Secs = time.clock()
if Secs__ != None and Secs - Secs__ < 1:
return
Secs__ = Secs
Pct = (100.0*i)/N
if i == N-1:
sys.stderr.write("%5.1f%% %s \r" % (Pct, Msg))
else:
sys.stderr.write("%5.1f%% %s \n" % (Pct, Msg))
|
"""A framework for bulk data processing."""
|
# O(n) time | O(n) space
def branchSums(root):
sums = []
preorderTraversal(root, 0, sums)
return sums
# Recursive
def preorderTraversal(root, runningSum, sums):
if root:
if root.left or root.right:
preorderTraversal(root.left, runningSum + root.value, sums)
preorderTraversal(root.right, runningSum + root.value, sums)
else:
sums.append(runningSum + root.value)
# O(n) time | O(1) space
# Iterative
def branchSums(root):
stack = []
sums = []
root.currSum = 0
stack.append(root)
while stack:
root = stack.pop()
if root.left or root.right:
if root.right :
root.right.currSum = root.currSum + root.value
stack.append(root.right)
if root.left:
root.left.currSum = root.currSum + root.value
stack.append(root.left)
else:
sums.append(root.currSum + root.value)
return sums
|
import os
import shutil
from django.test import override_settings, TestCase
from drftest import doc_generator
@override_settings(DRF_TEST_DOCS_DIR='drftest/tests/test_docs')
class DocGeneratorTest(TestCase):
def setUp(self):
super().setUp()
doc_generator.store = [{
'method': 'post',
'data': {'foo': 'bar'},
'url': '/api',
'url_kwargs': {'pk': 2},
'format': 'json',
'headers': {'Authorization': 'Token abcde'},
'success': True,
'meta': {
'docs': 'Method docstring',
'method_name': 'test_sth',
'class_name': 'SthTest',
'app_name': 'some_app'
},
'response': {
'data': {'foo': 'barium'},
'status': 200,
}
}]
doc_generator.class_docs = {'SthTest': 'Class docstring'}
def to_absolute_path(self, *paths):
return os.path.join(os.path.dirname(__file__), *paths)
def tearDown(self):
super().tearDown()
dirpath = self.to_absolute_path('test_docs')
if os.path.exists(dirpath) and os.path.isdir(dirpath):
shutil.rmtree(dirpath)
def test_yml_file(self):
doc_generator.write_docs()
yml_path = self.to_absolute_path('test_docs', 'mkdocs.yml')
self.assertTrue(os.path.exists(yml_path))
with open(yml_path) as f:
lines = f.readlines()
self.assertIn('site_name: DRF Tests', lines[0])
self.assertIn('theme: readthedocs', lines[1])
def assertStrListContainsSubstring(self, str_list, substring):
joined = ' '.join(str_list)
self.assertIn(substring, joined)
def test_index_page(self):
doc_generator.write_docs()
md_path = self.to_absolute_path('test_docs', 'docs', 'index.md')
self.assertTrue(os.path.exists(md_path))
def test_app_page(self):
doc_generator.write_docs()
md_path = self.to_absolute_path('test_docs', 'docs', 'some_app.md')
self.assertTrue(os.path.exists(md_path))
with open(md_path) as f:
lines = f.readlines()
self.assertStrListContainsSubstring(lines[:3], '# some_app')
self.assertStrListContainsSubstring(lines[:15], 'Class docstring')
self.assertStrListContainsSubstring(lines[:15], '## SthTest')
self.assertStrListContainsSubstring(lines[:25], '**test_sth**')
self.assertStrListContainsSubstring(lines[:25], 'Method docstring')
self.assertStrListContainsSubstring(lines[15:30], '* **URL:** `/api`')
self.assertStrListContainsSubstring(lines[15:30], '* **Method:** `post`')
self.assertStrListContainsSubstring(lines[15:30], '* **Format:** `json`')
self.assertStrListContainsSubstring(lines, '"Authorization": "Token abcde"')
self.assertStrListContainsSubstring(lines, '* **Response data:** ')
self.assertStrListContainsSubstring(lines, '* **Response status code**: 200')
self.assertStrListContainsSubstring(lines, '* **Request data:**')
|
import logging
import sys
try:
from enum import Enum
except ImportError:
from ..enum import Enum
from PySide2 import QtWidgets, QtCore, QtGui
from . import utils
# py 2.7
if sys.version_info[0] >= 3:
unicode = str
class AttributeTableView(QtWidgets.QTableView):
def __init__(self, parent=None):
super(AttributeTableView, self).__init__(parent)
self._header_state = {}
self.init_ui()
def init_ui(self):
self.setSelectionBehavior(QtWidgets.QTableView.SelectRows)
self.setAlternatingRowColors(True)
self.setShowGrid(False)
self.setSortingEnabled(True)
self.horizontalHeader().setSortIndicatorShown(True)
self.horizontalHeader().setSectionsMovable(True)
self.horizontalHeader().setStretchLastSection(True)
self.verticalHeader().hide()
header = self.horizontalHeader()
header.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
@property
def _model(self):
model = super(AttributeTableView, self).model()
if model and isinstance(model, QtCore.QAbstractProxyModel):
model = model.sourceModel()
return model
@property
def selected_attribute_items(self):
attribute_items = []
for selected_index in self.selectionModel().selectedRows():
index = self.model().mapToSource(selected_index)
attribute_item = self._model.attribute_item_from_index(index)
if attribute_item:
attribute_items.append(attribute_item)
return attribute_items
def update_requested(self):
header_state = self.header_state
if header_state:
self._header_state = header_state
def update(self):
self.update_delegates()
# rebuild header_state from cache
self.header_state = self._header_state
def update_delegates(self):
node = self._model.data(self._model.index(0, 0), QtCore.Qt.UserRole + 1)
if node:
for i in range(self._model.columnCount()):
attribute = self._model.headerData(i, QtCore.Qt.Horizontal, QtCore.Qt.UserRole + 1)
value = getattr(node, attribute)
delegate = self.delegate_from_value(value, parent=self)
self.setItemDelegateForColumn(i, delegate)
def update_header(self):
header = self.horizontalHeader()
states = []
for i, action in enumerate(header.actions()):
state = action.isChecked()
states.append(state)
if state and not header.sectionSize(i):
header.resizeSection(i, 100)
header.setSectionHidden(i, not state)
# prevent all columns to be hidden
if not any(states):
header.actions()[0].setChecked(True)
header.setSectionHidden(0, False)
def contextMenuEvent(self, event):
index = self.indexAt(event.pos())
if not self.model():
return
model_index = self.model().mapToSource(index)
if not self._model.itemFromIndex(model_index).isEditable():
return
menu = QtWidgets.QMenu(self)
action = menu.addAction('Edit')
action.triggered.connect(lambda: self.edit(index))
menu.popup(event.globalPos())
@property
def header_state(self):
header = self.horizontalHeader()
headers = {}
for i in range(header.count()):
attribute = self._model.horizontalHeaderItem(i).data()
visibility = not header.isSectionHidden(i)
width = header.sectionSize(i)
visual_index = header.visualIndex(i)
headers[attribute] = {
'width': width,
'visibility': visibility,
'visual_index': visual_index
}
return headers
@header_state.setter
def header_state(self, headers):
if headers:
# cache state
self._header_state = headers
else:
# set state from cache
headers = self._header_state
header = self.horizontalHeader()
for i in range(header.count()):
attribute = self._model.horizontalHeaderItem(i).data()
values = headers.get(attribute)
if values:
visibility = values.get('visibility', True)
width = values.get('width', 100)
if width == 0:
visibility = False
header.setSectionHidden(i, not visibility)
header.resizeSection(i, width)
header.moveSection(header.visualIndex(i), values.get('visual_index', i))
self.update_header_actions()
def update_header_actions(self):
header = self.horizontalHeader()
for action in header.actions():
header.removeAction(action)
for i in range(header.count()):
item = self._model.horizontalHeaderItem(i)
action = QtWidgets.QAction(item.text(), self)
action.setCheckable(True)
action.setChecked(not header.isSectionHidden(i))
action.triggered.connect(self.update_header)
header.addAction(action)
@staticmethod
def delegate_from_value(value, parent=None):
if isinstance(value, Enum):
delegate = EnumDelegate(enum=value.__class__, parent=parent)
elif isinstance(value, utils.FileSize):
delegate = FileSizeDelegate(parent)
elif isinstance(value, QtGui.QColor):
delegate = ColorDelegate(parent)
elif isinstance(value, bool):
delegate = BoolDelegate(parent)
elif isinstance(value, list):
delegate = ListDelegate(parent)
else:
delegate = Delegate(parent)
return delegate
class AttributeItemModel(QtGui.QStandardItemModel):
update_requested = QtCore.Signal()
updated = QtCore.Signal()
def __init__(self, parent=None):
super(AttributeItemModel, self).__init__(parent)
self.attributes = []
def data(self, index, role=QtCore.Qt.DisplayRole):
# override to enable deferred loading of items
data = super(AttributeItemModel, self).data(index, role)
if data is None and role in (QtCore.Qt.DisplayRole, QtCore.Qt.EditRole):
attribute_item = self.attribute_item_from_index(index)
attribute = self.attribute_from_index(index)
data = getattr(attribute_item, attribute)
super(AttributeItemModel, self).setData(index, data, role)
return data
def setData(self, index, value, role):
result = super(AttributeItemModel, self).setData(index, value, role)
if role == QtCore.Qt.EditRole:
attribute_item = self.attribute_item_from_index(index)
attribute = self.attribute_from_index(index)
try:
setattr(attribute_item, attribute, value)
except AttributeError:
return False
return True and result
def attribute_item_from_index(self, index):
item = self.item(index.row(), 0)
if item:
return item.data()
def attribute_from_index(self, index):
header_item = self.horizontalHeaderItem(index.column())
if header_item:
return header_item.data()
def set_items(self, attribute_items):
self.update_requested.emit()
self.clear()
for attribute_item in attribute_items:
items = []
for i, attribute in enumerate(attribute_item.attributes):
item = QtGui.QStandardItem()
if attribute in attribute_item.locked_attributes:
item.setFlags(item.flags() & ~QtCore.Qt.ItemIsEditable)
if i == 0:
item.setData(attribute_item)
items.append(item)
if items:
self.appendRow(items)
self.set_headers(attribute_items)
self.updated.emit()
def update(self):
for row in range(self.rowCount()):
self.update_index(self.index(row, 0))
def update_items(self, attribute_items):
for row in range(self.rowCount()):
attribute_item = self.item(row, 0).data()
if attribute_item in attribute_items:
self.update_index(self.index(row, 0))
attribute_items.remove(attribute_item)
if not attribute_items:
break
def update_index(self, index):
for column in range(self.columnCount()):
item = self.item(index.row(), column)
item.setData(None, QtCore.Qt.DisplayRole)
def set_headers(self, attribute_items):
if attribute_items:
self.attributes = attribute_items[0].attributes
for i, attribute in enumerate(self.attributes):
item = QtGui.QStandardItem()
item.setText(utils.title(attribute))
item.setData(attribute)
self.setHorizontalHeaderItem(i, item)
class AttributeSortModel(QtCore.QSortFilterProxyModel):
filters = {}
def __init__(self, parent=None):
super(AttributeSortModel, self).__init__(parent)
self.setFilterRegExp('')
def value(self, value):
# py 2.7
if isinstance(value, str) or isinstance(value, unicode):
if self.sortCaseSensitivity() == QtCore.Qt.CaseInsensitive:
return value.lower()
elif isinstance(value, QtGui.QColor):
return str(value)
elif isinstance(value, Enum):
return value.value
return value
def lessThan(self, left, right):
# load requested item / override data
left_value = self.value(self.sourceModel().data(left))
right_value = self.value(self.sourceModel().data(right))
return left_value < right_value
def filterAcceptsRow(self, source_row, source_parent):
model = self.sourceModel()
for attribute, filter_value in self.filters.items():
try:
column = model.attributes.index(attribute)
except IndexError:
continue
index = model.index(source_row, column, source_parent)
item_value = self.value(model.data(index))
filter_value = self.value(filter_value)
# support for filtering string lists
if isinstance(item_value, list):
item_value = ''.join(item_value)
# py 2.7
if isinstance(filter_value, str) or isinstance(filter_value, unicode):
# todo: add support for expressions?
if filter_value not in item_value:
break
elif filter_value != item_value:
break
else:
return True
return False
def update_filters(self, filters):
self.filters = filters
self.setFilterFixedString('')
class Delegate(QtWidgets.QStyledItemDelegate):
def setModelData(self, editor, model, index, value=None):
# Set ModelData on all selected rows
# Sometimes the right click happens on not selected row
indexes = [index]
if self.parent() and self.parent().selectionModel():
indexes.extend(self.parent().selectionModel().selectedRows(index.column()))
indexes = list(set(indexes))
for item_index in indexes:
if value is not None:
model.setData(item_index, value, QtCore.Qt.EditRole)
else:
super(Delegate, self).setModelData(editor, model, item_index)
def updateEditorGeometry(self, editor, option, index):
editor.setGeometry(option.rect)
class FileSizeDelegate(Delegate):
def displayText(self, value, locale):
return str(value)
def initStyleOption(self, option, index):
super(FileSizeDelegate, self).initStyleOption(option, index)
option.displayAlignment = QtCore.Qt.AlignRight
class ListDelegate(Delegate):
def displayText(self, value, locale):
return ', '.join(value)
class BoolDelegate(Delegate):
def displayText(self, value, locale):
return 'Enabled' if value else 'Disabled'
def createEditor(self, parent, option, index):
editor = QtWidgets.QComboBox(parent)
editor.addItems(('Disabled', 'Enabled'))
editor.currentIndexChanged.connect(lambda: self.commitData.emit(editor))
editor.currentIndexChanged.connect(lambda: self.closeEditor.emit(editor, self.NoHint))
return editor
def setEditorData(self, editor, index):
value = index.model().data(index, QtCore.Qt.EditRole)
editor.setCurrentIndex(value)
def setModelData(self, editor, model, index):
value = bool(editor.currentIndex())
super(BoolDelegate, self).setModelData(editor, model, index, value)
class EnumDelegate(Delegate):
def __init__(self, enum, parent=None):
super(EnumDelegate, self).__init__(parent)
self.enum = enum
def displayText(self, value, locale):
return value.name
def createEditor(self, parent, option, index):
editor = QtWidgets.QComboBox(parent)
for member in self.enum:
editor.addItem(member.name, member.value)
editor.currentIndexChanged.connect(lambda: self.commitData.emit(editor))
editor.currentIndexChanged.connect(lambda: self.closeEditor.emit(editor, self.NoHint))
return editor
def setEditorData(self, editor, index):
member = index.model().data(index, QtCore.Qt.EditRole)
index = editor.findData(member.value)
editor.setCurrentIndex(index)
# editor.showPopup()
def setModelData(self, editor, model, index):
value = self.enum(editor.currentData())
super(EnumDelegate, self).setModelData(editor, model, index, value)
class ColorDelegate(Delegate):
def displayText(self, value, locale):
return
def createEditor(self, parent, option, index):
editor = QtWidgets.QWidget(parent)
editor.dialog = QtWidgets.QColorDialog(editor)
editor.color = None
editor.dialog.colorSelected.connect(lambda color: setattr(editor, 'color', color))
editor.dialog.colorSelected.connect(lambda: self.commitData.emit(editor))
editor.dialog.colorSelected.connect(lambda: self.closeEditor.emit(editor, self.NoHint))
return editor
def setEditorData(self, editor, index):
value = index.model().data(index, QtCore.Qt.EditRole)
editor.dialog.setCurrentColor(value)
editor.dialog.open()
def setModelData(self, editor, model, index):
value = editor.color
if value and value.isValid():
super(ColorDelegate, self).setModelData(editor, model, index, value)
def paint(self, painter, option, index):
value = index.model().data(index, QtCore.Qt.EditRole)
if value:
option.rect.adjust(5, 5, -5, -5)
painter.setBrush(value)
painter.drawRect(option.rect)
|
def reverso(n):
numeroInvertido = int(str(n)[::-1])
print(numeroInvertido)
n = int(input("Digite o número: "))
reverso(n)
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sonet.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
"""
Move Sprite With Keyboard
Simple program to show moving a sprite with the keyboard.
The sprite_move_keyboard_better.py example is slightly better
in how it works, but also slightly more complex.
Artwork from http://kenney.nl
If Python and Arcade are installed, this example can be run from the command line with:
python -m arcade.examples.sprite_move_keyboard
"""
import arcade
import os
SPRITE_SCALING = 0.5
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
SCREEN_TITLE = "Move Sprite with Keyboard Example"
MOVEMENT_SPEED = 5
class Player(arcade.Sprite):
def update(self):
self.center_x += self.change_x
self.center_y += self.change_y
if self.left < 0:
self.left = 0
elif self.right > SCREEN_WIDTH - 1:
self.right = SCREEN_WIDTH - 1
if self.bottom < 0:
self.bottom = 0
elif self.top > SCREEN_HEIGHT - 1:
self.top = SCREEN_HEIGHT - 1
class MyGame(arcade.Window):
"""
Main application class.
"""
def __init__(self, width, height, title):
"""
Initializer
"""
# Call the parent class initializer
super().__init__(width, height, title)
# Set the working directory (where we expect to find files) to the same
# directory this .py file is in. You can leave this out of your own
# code, but it is needed to easily run the examples using "python -m"
# as mentioned at the top of this program.
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
# Variables that will hold sprite lists
self.player_list = None
# Set up the player info
self.player_sprite = None
# Set the background color
arcade.set_background_color(arcade.color.AMAZON)
def setup(self):
""" Set up the game and initialize the variables. """
# Sprite lists
self.player_list = arcade.SpriteList()
# Set up the player
self.player_sprite = Player("images/character.png", SPRITE_SCALING)
self.player_sprite.center_x = 50
self.player_sprite.center_y = 50
self.player_list.append(self.player_sprite)
def on_draw(self):
"""
Render the screen.
"""
# This command has to happen before we start drawing
arcade.start_render()
# Draw all the sprites.
self.player_list.draw()
def update(self, delta_time):
""" Movement and game logic """
# Call update on all sprites (The sprites don't do much in this
# example though.)
self.player_list.update()
def on_key_press(self, key, modifiers):
"""Called whenever a key is pressed. """
if key == arcade.key.UP:
self.player_sprite.change_y = MOVEMENT_SPEED
elif key == arcade.key.DOWN:
self.player_sprite.change_y = -MOVEMENT_SPEED
elif key == arcade.key.LEFT:
self.player_sprite.change_x = -MOVEMENT_SPEED
elif key == arcade.key.RIGHT:
self.player_sprite.change_x = MOVEMENT_SPEED
def on_key_release(self, key, modifiers):
"""Called when the user releases a key. """
if key == arcade.key.UP or key == arcade.key.DOWN:
self.player_sprite.change_y = 0
elif key == arcade.key.LEFT or key == arcade.key.RIGHT:
self.player_sprite.change_x = 0
def main():
""" Main method """
window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
window.setup()
arcade.run()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
import andrena
def got_key(client):
# Setup callback for when diffie hellman key is negotiated
infile = open('README.md', 'r')
data = infile.read()
infile.close()
stream = andrena.FileTransfer(None, client)
stream.meta = "newreadme" # save file remotely as newreadme
stream.buffer = data
client.send(stream)
# Setup communicator object using default serial port
comm = andrena.Communicator(device='/dev/ttyUSB1')
# Setup a remote client (the handler) ctx
handler = andrena.ClientCTX(comm, addr="\x00\x01", psk="A"*16, dh_callback=got_key)
# Create a message processor for the comm object
dispatch = andrena.Dispatcher(comm=comm, clients=[handler])
dispatch.start()
# Initiate DiffieHellman key exchange
handler.agent_hello()
try:
from time import sleep
while True:
sleep(5)
except KeyboardInterrupt:
comm.close()
sys.exit(0)
|
"""Implementation of :class:`Domain` class. """
from typing import Any, Optional, Type
from sympy.core import Basic, sympify
from sympy.core.sorting import default_sort_key, ordered
from sympy.external.gmpy import HAS_GMPY
from sympy.polys.domains.domainelement import DomainElement
from sympy.polys.orderings import lex
from sympy.polys.polyerrors import UnificationFailed, CoercionFailed, DomainError
from sympy.polys.polyutils import _unify_gens, _not_a_coeff
from sympy.utilities import public
from sympy.utilities.iterables import is_sequence
@public
class Domain:
"""Superclass for all domains in the polys domains system.
See :ref:`polys-domainsintro` for an introductory explanation of the
domains system.
The :py:class:`~.Domain` class is an abstract base class for all of the
concrete domain types. There are many different :py:class:`~.Domain`
subclasses each of which has an associated ``dtype`` which is a class
representing the elements of the domain. The coefficients of a
:py:class:`~.Poly` are elements of a domain which must be a subclass of
:py:class:`~.Domain`.
Examples
========
The most common example domains are the integers :ref:`ZZ` and the
rationals :ref:`QQ`.
>>> from sympy import Poly, symbols, Domain
>>> x, y = symbols('x, y')
>>> p = Poly(x**2 + y)
>>> p
Poly(x**2 + y, x, y, domain='ZZ')
>>> p.domain
ZZ
>>> isinstance(p.domain, Domain)
True
>>> Poly(x**2 + y/2)
Poly(x**2 + 1/2*y, x, y, domain='QQ')
The domains can be used directly in which case the domain object e.g.
(:ref:`ZZ` or :ref:`QQ`) can be used as a constructor for elements of
``dtype``.
>>> from sympy import ZZ, QQ
>>> ZZ(2)
2
>>> ZZ.dtype # doctest: +SKIP
<class 'int'>
>>> type(ZZ(2)) # doctest: +SKIP
<class 'int'>
>>> QQ(1, 2)
1/2
>>> type(QQ(1, 2)) # doctest: +SKIP
<class 'sympy.polys.domains.pythonrational.PythonRational'>
The corresponding domain elements can be used with the arithmetic
operations ``+,-,*,**`` and depending on the domain some combination of
``/,//,%`` might be usable. For example in :ref:`ZZ` both ``//`` (floor
division) and ``%`` (modulo division) can be used but ``/`` (true
division) cannot. Since :ref:`QQ` is a :py:class:`~.Field` its elements
can be used with ``/`` but ``//`` and ``%`` should not be used. Some
domains have a :py:meth:`~.Domain.gcd` method.
>>> ZZ(2) + ZZ(3)
5
>>> ZZ(5) // ZZ(2)
2
>>> ZZ(5) % ZZ(2)
1
>>> QQ(1, 2) / QQ(2, 3)
3/4
>>> ZZ.gcd(ZZ(4), ZZ(2))
2
>>> QQ.gcd(QQ(2,7), QQ(5,3))
1/21
>>> ZZ.is_Field
False
>>> QQ.is_Field
True
There are also many other domains including:
1. :ref:`GF(p)` for finite fields of prime order.
2. :ref:`RR` for real (floating point) numbers.
3. :ref:`CC` for complex (floating point) numbers.
4. :ref:`QQ(a)` for algebraic number fields.
5. :ref:`K[x]` for polynomial rings.
6. :ref:`K(x)` for rational function fields.
7. :ref:`EX` for arbitrary expressions.
Each domain is represented by a domain object and also an implementation
class (``dtype``) for the elements of the domain. For example the
:ref:`K[x]` domains are represented by a domain object which is an
instance of :py:class:`~.PolynomialRing` and the elements are always
instances of :py:class:`~.PolyElement`. The implementation class
represents particular types of mathematical expressions in a way that is
more efficient than a normal SymPy expression which is of type
:py:class:`~.Expr`. The domain methods :py:meth:`~.Domain.from_sympy` and
:py:meth:`~.Domain.to_sympy` are used to convert from :py:class:`~.Expr`
to a domain element and vice versa.
>>> from sympy import Symbol, ZZ, Expr
>>> x = Symbol('x')
>>> K = ZZ[x] # polynomial ring domain
>>> K
ZZ[x]
>>> type(K) # class of the domain
<class 'sympy.polys.domains.polynomialring.PolynomialRing'>
>>> K.dtype # class of the elements
<class 'sympy.polys.rings.PolyElement'>
>>> p_expr = x**2 + 1 # Expr
>>> p_expr
x**2 + 1
>>> type(p_expr)
<class 'sympy.core.add.Add'>
>>> isinstance(p_expr, Expr)
True
>>> p_domain = K.from_sympy(p_expr)
>>> p_domain # domain element
x**2 + 1
>>> type(p_domain)
<class 'sympy.polys.rings.PolyElement'>
>>> K.to_sympy(p_domain) == p_expr
True
The :py:meth:`~.Domain.convert_from` method is used to convert domain
elements from one domain to another.
>>> from sympy import ZZ, QQ
>>> ez = ZZ(2)
>>> eq = QQ.convert_from(ez, ZZ)
>>> type(ez) # doctest: +SKIP
<class 'int'>
>>> type(eq) # doctest: +SKIP
<class 'sympy.polys.domains.pythonrational.PythonRational'>
Elements from different domains should not be mixed in arithmetic or other
operations: they should be converted to a common domain first. The domain
method :py:meth:`~.Domain.unify` is used to find a domain that can
represent all the elements of two given domains.
>>> from sympy import ZZ, QQ, symbols
>>> x, y = symbols('x, y')
>>> ZZ.unify(QQ)
QQ
>>> ZZ[x].unify(QQ)
QQ[x]
>>> ZZ[x].unify(QQ[y])
QQ[x,y]
If a domain is a :py:class:`~.Ring` then is might have an associated
:py:class:`~.Field` and vice versa. The :py:meth:`~.Domain.get_field` and
:py:meth:`~.Domain.get_ring` methods will find or create the associated
domain.
>>> from sympy import ZZ, QQ, Symbol
>>> x = Symbol('x')
>>> ZZ.has_assoc_Field
True
>>> ZZ.get_field()
QQ
>>> QQ.has_assoc_Ring
True
>>> QQ.get_ring()
ZZ
>>> K = QQ[x]
>>> K
QQ[x]
>>> K.get_field()
QQ(x)
See also
========
DomainElement: abstract base class for domain elements
construct_domain: construct a minimal domain for some expressions
"""
dtype = None # type: Optional[Type]
"""The type (class) of the elements of this :py:class:`~.Domain`:
>>> from sympy import ZZ, QQ, Symbol
>>> ZZ.dtype
<class 'int'>
>>> z = ZZ(2)
>>> z
2
>>> type(z)
<class 'int'>
>>> type(z) == ZZ.dtype
True
Every domain has an associated **dtype** ("datatype") which is the
class of the associated domain elements.
See also
========
of_type
"""
zero = None # type: Optional[Any]
"""The zero element of the :py:class:`~.Domain`:
>>> from sympy import QQ
>>> QQ.zero
0
>>> QQ.of_type(QQ.zero)
True
See also
========
of_type
one
"""
one = None # type: Optional[Any]
"""The one element of the :py:class:`~.Domain`:
>>> from sympy import QQ
>>> QQ.one
1
>>> QQ.of_type(QQ.one)
True
See also
========
of_type
zero
"""
is_Ring = False
"""Boolean flag indicating if the domain is a :py:class:`~.Ring`.
>>> from sympy import ZZ
>>> ZZ.is_Ring
True
Basically every :py:class:`~.Domain` represents a ring so this flag is
not that useful.
See also
========
is_PID
is_Field
get_ring
has_assoc_Ring
"""
is_Field = False
"""Boolean flag indicating if the domain is a :py:class:`~.Field`.
>>> from sympy import ZZ, QQ
>>> ZZ.is_Field
False
>>> QQ.is_Field
True
See also
========
is_PID
is_Ring
get_field
has_assoc_Field
"""
has_assoc_Ring = False
"""Boolean flag indicating if the domain has an associated
:py:class:`~.Ring`.
>>> from sympy import QQ
>>> QQ.has_assoc_Ring
True
>>> QQ.get_ring()
ZZ
See also
========
is_Field
get_ring
"""
has_assoc_Field = False
"""Boolean flag indicating if the domain has an associated
:py:class:`~.Field`.
>>> from sympy import ZZ
>>> ZZ.has_assoc_Field
True
>>> ZZ.get_field()
QQ
See also
========
is_Field
get_field
"""
is_FiniteField = is_FF = False
is_IntegerRing = is_ZZ = False
is_RationalField = is_QQ = False
is_GaussianRing = is_ZZ_I = False
is_GaussianField = is_QQ_I = False
is_RealField = is_RR = False
is_ComplexField = is_CC = False
is_AlgebraicField = is_Algebraic = False
is_PolynomialRing = is_Poly = False
is_FractionField = is_Frac = False
is_SymbolicDomain = is_EX = False
is_SymbolicRawDomain = is_EXRAW = False
is_FiniteExtension = False
is_Exact = True
is_Numerical = False
is_Simple = False
is_Composite = False
is_PID = False
"""Boolean flag indicating if the domain is a `principal ideal domain`_.
>>> from sympy import ZZ
>>> ZZ.has_assoc_Field
True
>>> ZZ.get_field()
QQ
.. _principal ideal domain: https://en.wikipedia.org/wiki/Principal_ideal_domain
See also
========
is_Field
get_field
"""
has_CharacteristicZero = False
rep = None # type: Optional[str]
alias = None # type: Optional[str]
def __init__(self):
raise NotImplementedError
def __str__(self):
return self.rep
def __repr__(self):
return str(self)
def __hash__(self):
return hash((self.__class__.__name__, self.dtype))
def new(self, *args):
return self.dtype(*args)
@property
def tp(self):
"""Alias for :py:attr:`~.Domain.dtype`"""
return self.dtype
def __call__(self, *args):
"""Construct an element of ``self`` domain from ``args``. """
return self.new(*args)
def normal(self, *args):
return self.dtype(*args)
def convert_from(self, element, base):
"""Convert ``element`` to ``self.dtype`` given the base domain. """
if base.alias is not None:
method = "from_" + base.alias
else:
method = "from_" + base.__class__.__name__
_convert = getattr(self, method)
if _convert is not None:
result = _convert(element, base)
if result is not None:
return result
raise CoercionFailed("Cannot convert %s of type %s from %s to %s" % (element, type(element), base, self))
def convert(self, element, base=None):
"""Convert ``element`` to ``self.dtype``. """
if base is not None:
if _not_a_coeff(element):
raise CoercionFailed('%s is not in any domain' % element)
return self.convert_from(element, base)
if self.of_type(element):
return element
if _not_a_coeff(element):
raise CoercionFailed('%s is not in any domain' % element)
from sympy.polys.domains import ZZ, QQ, RealField, ComplexField
if ZZ.of_type(element):
return self.convert_from(element, ZZ)
if isinstance(element, int):
return self.convert_from(ZZ(element), ZZ)
if HAS_GMPY:
integers = ZZ
if isinstance(element, integers.tp):
return self.convert_from(element, integers)
rationals = QQ
if isinstance(element, rationals.tp):
return self.convert_from(element, rationals)
if isinstance(element, float):
parent = RealField(tol=False)
return self.convert_from(parent(element), parent)
if isinstance(element, complex):
parent = ComplexField(tol=False)
return self.convert_from(parent(element), parent)
if isinstance(element, DomainElement):
return self.convert_from(element, element.parent())
# TODO: implement this in from_ methods
if self.is_Numerical and getattr(element, 'is_ground', False):
return self.convert(element.LC())
if isinstance(element, Basic):
try:
return self.from_sympy(element)
except (TypeError, ValueError):
pass
else: # TODO: remove this branch
if not is_sequence(element):
try:
element = sympify(element, strict=True)
if isinstance(element, Basic):
return self.from_sympy(element)
except (TypeError, ValueError):
pass
raise CoercionFailed("Cannot convert %s of type %s to %s" % (element, type(element), self))
def of_type(self, element):
"""Check if ``a`` is of type ``dtype``. """
return isinstance(element, self.tp) # XXX: this isn't correct, e.g. PolyElement
def __contains__(self, a):
"""Check if ``a`` belongs to this domain. """
try:
if _not_a_coeff(a):
raise CoercionFailed
self.convert(a) # this might raise, too
except CoercionFailed:
return False
return True
def to_sympy(self, a):
"""Convert domain element *a* to a SymPy expression (Expr).
Explanation
===========
Convert a :py:class:`~.Domain` element *a* to :py:class:`~.Expr`. Most
public SymPy functions work with objects of type :py:class:`~.Expr`.
The elements of a :py:class:`~.Domain` have a different internal
representation. It is not possible to mix domain elements with
:py:class:`~.Expr` so each domain has :py:meth:`~.Domain.to_sympy` and
:py:meth:`~.Domain.from_sympy` methods to convert its domain elements
to and from :py:class:`~.Expr`.
Parameters
==========
a: domain element
An element of this :py:class:`~.Domain`.
Returns
=======
expr: Expr
A normal SymPy expression of type :py:class:`~.Expr`.
Examples
========
Construct an element of the :ref:`QQ` domain and then convert it to
:py:class:`~.Expr`.
>>> from sympy import QQ, Expr
>>> q_domain = QQ(2)
>>> q_domain
2
>>> q_expr = QQ.to_sympy(q_domain)
>>> q_expr
2
Although the printed forms look similar these objects are not of the
same type.
>>> isinstance(q_domain, Expr)
False
>>> isinstance(q_expr, Expr)
True
Construct an element of :ref:`K[x]` and convert to
:py:class:`~.Expr`.
>>> from sympy import Symbol
>>> x = Symbol('x')
>>> K = QQ[x]
>>> x_domain = K.gens[0] # generator x as a domain element
>>> p_domain = x_domain**2/3 + 1
>>> p_domain
1/3*x**2 + 1
>>> p_expr = K.to_sympy(p_domain)
>>> p_expr
x**2/3 + 1
The :py:meth:`~.Domain.from_sympy` method is used for the opposite
conversion from a normal SymPy expression to a domain element.
>>> p_domain == p_expr
False
>>> K.from_sympy(p_expr) == p_domain
True
>>> K.to_sympy(p_domain) == p_expr
True
>>> K.from_sympy(K.to_sympy(p_domain)) == p_domain
True
>>> K.to_sympy(K.from_sympy(p_expr)) == p_expr
True
The :py:meth:`~.Domain.from_sympy` method makes it easier to construct
domain elements interactively.
>>> from sympy import Symbol
>>> x = Symbol('x')
>>> K = QQ[x]
>>> K.from_sympy(x**2/3 + 1)
1/3*x**2 + 1
See also
========
from_sympy
convert_from
"""
raise NotImplementedError
def from_sympy(self, a):
"""Convert a SymPy expression to an element of this domain.
Explanation
===========
See :py:meth:`~.Domain.to_sympy` for explanation and examples.
Parameters
==========
expr: Expr
A normal SymPy expression of type :py:class:`~.Expr`.
Returns
=======
a: domain element
An element of this :py:class:`~.Domain`.
See also
========
to_sympy
convert_from
"""
raise NotImplementedError
def sum(self, args):
return sum(args)
def from_FF(K1, a, K0):
"""Convert ``ModularInteger(int)`` to ``dtype``. """
return None
def from_FF_python(K1, a, K0):
"""Convert ``ModularInteger(int)`` to ``dtype``. """
return None
def from_ZZ_python(K1, a, K0):
"""Convert a Python ``int`` object to ``dtype``. """
return None
def from_QQ_python(K1, a, K0):
"""Convert a Python ``Fraction`` object to ``dtype``. """
return None
def from_FF_gmpy(K1, a, K0):
"""Convert ``ModularInteger(mpz)`` to ``dtype``. """
return None
def from_ZZ_gmpy(K1, a, K0):
"""Convert a GMPY ``mpz`` object to ``dtype``. """
return None
def from_QQ_gmpy(K1, a, K0):
"""Convert a GMPY ``mpq`` object to ``dtype``. """
return None
def from_RealField(K1, a, K0):
"""Convert a real element object to ``dtype``. """
return None
def from_ComplexField(K1, a, K0):
"""Convert a complex element to ``dtype``. """
return None
def from_AlgebraicField(K1, a, K0):
"""Convert an algebraic number to ``dtype``. """
return None
def from_PolynomialRing(K1, a, K0):
"""Convert a polynomial to ``dtype``. """
if a.is_ground:
return K1.convert(a.LC, K0.dom)
def from_FractionField(K1, a, K0):
"""Convert a rational function to ``dtype``. """
return None
def from_MonogenicFiniteExtension(K1, a, K0):
"""Convert an ``ExtensionElement`` to ``dtype``. """
return K1.convert_from(a.rep, K0.ring)
def from_ExpressionDomain(K1, a, K0):
"""Convert a ``EX`` object to ``dtype``. """
return K1.from_sympy(a.ex)
def from_ExpressionRawDomain(K1, a, K0):
"""Convert a ``EX`` object to ``dtype``. """
return K1.from_sympy(a)
def from_GlobalPolynomialRing(K1, a, K0):
"""Convert a polynomial to ``dtype``. """
if a.degree() <= 0:
return K1.convert(a.LC(), K0.dom)
def from_GeneralizedPolynomialRing(K1, a, K0):
return K1.from_FractionField(a, K0)
def unify_with_symbols(K0, K1, symbols):
if (K0.is_Composite and (set(K0.symbols) & set(symbols))) or (K1.is_Composite and (set(K1.symbols) & set(symbols))):
raise UnificationFailed("Cannot unify %s with %s, given %s generators" % (K0, K1, tuple(symbols)))
return K0.unify(K1)
def unify(K0, K1, symbols=None):
"""
Construct a minimal domain that contains elements of ``K0`` and ``K1``.
Known domains (from smallest to largest):
- ``GF(p)``
- ``ZZ``
- ``QQ``
- ``RR(prec, tol)``
- ``CC(prec, tol)``
- ``ALG(a, b, c)``
- ``K[x, y, z]``
- ``K(x, y, z)``
- ``EX``
"""
if symbols is not None:
return K0.unify_with_symbols(K1, symbols)
if K0 == K1:
return K0
if K0.is_EXRAW:
return K0
if K1.is_EXRAW:
return K1
if K0.is_EX:
return K0
if K1.is_EX:
return K1
if K0.is_FiniteExtension or K1.is_FiniteExtension:
if K1.is_FiniteExtension:
K0, K1 = K1, K0
if K1.is_FiniteExtension:
# Unifying two extensions.
# Try to ensure that K0.unify(K1) == K1.unify(K0)
if list(ordered([K0.modulus, K1.modulus]))[1] == K0.modulus:
K0, K1 = K1, K0
return K1.set_domain(K0)
else:
# Drop the generator from other and unify with the base domain
K1 = K1.drop(K0.symbol)
K1 = K0.domain.unify(K1)
return K0.set_domain(K1)
if K0.is_Composite or K1.is_Composite:
K0_ground = K0.dom if K0.is_Composite else K0
K1_ground = K1.dom if K1.is_Composite else K1
K0_symbols = K0.symbols if K0.is_Composite else ()
K1_symbols = K1.symbols if K1.is_Composite else ()
domain = K0_ground.unify(K1_ground)
symbols = _unify_gens(K0_symbols, K1_symbols)
order = K0.order if K0.is_Composite else K1.order
if ((K0.is_FractionField and K1.is_PolynomialRing or
K1.is_FractionField and K0.is_PolynomialRing) and
(not K0_ground.is_Field or not K1_ground.is_Field) and domain.is_Field
and domain.has_assoc_Ring):
domain = domain.get_ring()
if K0.is_Composite and (not K1.is_Composite or K0.is_FractionField or K1.is_PolynomialRing):
cls = K0.__class__
else:
cls = K1.__class__
from sympy.polys.domains.old_polynomialring import GlobalPolynomialRing
if cls == GlobalPolynomialRing:
return cls(domain, symbols)
return cls(domain, symbols, order)
def mkinexact(cls, K0, K1):
prec = max(K0.precision, K1.precision)
tol = max(K0.tolerance, K1.tolerance)
return cls(prec=prec, tol=tol)
if K1.is_ComplexField:
K0, K1 = K1, K0
if K0.is_ComplexField:
if K1.is_ComplexField or K1.is_RealField:
return mkinexact(K0.__class__, K0, K1)
else:
return K0
if K1.is_RealField:
K0, K1 = K1, K0
if K0.is_RealField:
if K1.is_RealField:
return mkinexact(K0.__class__, K0, K1)
elif K1.is_GaussianRing or K1.is_GaussianField:
from sympy.polys.domains.complexfield import ComplexField
return ComplexField(prec=K0.precision, tol=K0.tolerance)
else:
return K0
if K1.is_AlgebraicField:
K0, K1 = K1, K0
if K0.is_AlgebraicField:
if K1.is_GaussianRing:
K1 = K1.get_field()
if K1.is_GaussianField:
K1 = K1.as_AlgebraicField()
if K1.is_AlgebraicField:
return K0.__class__(K0.dom.unify(K1.dom), *_unify_gens(K0.orig_ext, K1.orig_ext))
else:
return K0
if K0.is_GaussianField:
return K0
if K1.is_GaussianField:
return K1
if K0.is_GaussianRing:
if K1.is_RationalField:
K0 = K0.get_field()
return K0
if K1.is_GaussianRing:
if K0.is_RationalField:
K1 = K1.get_field()
return K1
if K0.is_RationalField:
return K0
if K1.is_RationalField:
return K1
if K0.is_IntegerRing:
return K0
if K1.is_IntegerRing:
return K1
if K0.is_FiniteField and K1.is_FiniteField:
return K0.__class__(max(K0.mod, K1.mod, key=default_sort_key))
from sympy.polys.domains import EX
return EX
def __eq__(self, other):
"""Returns ``True`` if two domains are equivalent. """
return isinstance(other, Domain) and self.dtype == other.dtype
def __ne__(self, other):
"""Returns ``False`` if two domains are equivalent. """
return not self == other
def map(self, seq):
"""Rersively apply ``self`` to all elements of ``seq``. """
result = []
for elt in seq:
if isinstance(elt, list):
result.append(self.map(elt))
else:
result.append(self(elt))
return result
def get_ring(self):
"""Returns a ring associated with ``self``. """
raise DomainError('there is no ring associated with %s' % self)
def get_field(self):
"""Returns a field associated with ``self``. """
raise DomainError('there is no field associated with %s' % self)
def get_exact(self):
"""Returns an exact domain associated with ``self``. """
return self
def __getitem__(self, symbols):
"""The mathematical way to make a polynomial ring. """
if hasattr(symbols, '__iter__'):
return self.poly_ring(*symbols)
else:
return self.poly_ring(symbols)
def poly_ring(self, *symbols, order=lex):
"""Returns a polynomial ring, i.e. `K[X]`. """
from sympy.polys.domains.polynomialring import PolynomialRing
return PolynomialRing(self, symbols, order)
def frac_field(self, *symbols, order=lex):
"""Returns a fraction field, i.e. `K(X)`. """
from sympy.polys.domains.fractionfield import FractionField
return FractionField(self, symbols, order)
def old_poly_ring(self, *symbols, **kwargs):
"""Returns a polynomial ring, i.e. `K[X]`. """
from sympy.polys.domains.old_polynomialring import PolynomialRing
return PolynomialRing(self, *symbols, **kwargs)
def old_frac_field(self, *symbols, **kwargs):
"""Returns a fraction field, i.e. `K(X)`. """
from sympy.polys.domains.old_fractionfield import FractionField
return FractionField(self, *symbols, **kwargs)
def algebraic_field(self, *extension):
r"""Returns an algebraic field, i.e. `K(\alpha, \ldots)`. """
raise DomainError("Cannot create algebraic field over %s" % self)
def inject(self, *symbols):
"""Inject generators into this domain. """
raise NotImplementedError
def drop(self, *symbols):
"""Drop generators from this domain. """
if self.is_Simple:
return self
raise NotImplementedError # pragma: no cover
def is_zero(self, a):
"""Returns True if ``a`` is zero. """
return not a
def is_one(self, a):
"""Returns True if ``a`` is one. """
return a == self.one
def is_positive(self, a):
"""Returns True if ``a`` is positive. """
return a > 0
def is_negative(self, a):
"""Returns True if ``a`` is negative. """
return a < 0
def is_nonpositive(self, a):
"""Returns True if ``a`` is non-positive. """
return a <= 0
def is_nonnegative(self, a):
"""Returns True if ``a`` is non-negative. """
return a >= 0
def canonical_unit(self, a):
if self.is_negative(a):
return -self.one
else:
return self.one
def abs(self, a):
"""Absolute value of ``a``, implies ``__abs__``. """
return abs(a)
def neg(self, a):
"""Returns ``a`` negated, implies ``__neg__``. """
return -a
def pos(self, a):
"""Returns ``a`` positive, implies ``__pos__``. """
return +a
def add(self, a, b):
"""Sum of ``a`` and ``b``, implies ``__add__``. """
return a + b
def sub(self, a, b):
"""Difference of ``a`` and ``b``, implies ``__sub__``. """
return a - b
def mul(self, a, b):
"""Product of ``a`` and ``b``, implies ``__mul__``. """
return a * b
def pow(self, a, b):
"""Raise ``a`` to power ``b``, implies ``__pow__``. """
return a ** b
def exquo(self, a, b):
"""Exact quotient of *a* and *b*. Analogue of ``a / b``.
Explanation
===========
This is essentially the same as ``a / b`` except that an error will be
raised if the division is inexact (if there is any remainder) and the
result will always be a domain element. When working in a
:py:class:`~.Domain` that is not a :py:class:`~.Field` (e.g. :ref:`ZZ`
or :ref:`K[x]`) ``exquo`` should be used instead of ``/``.
The key invariant is that if ``q = K.exquo(a, b)`` (and ``exquo`` does
not raise an exception) then ``a == b*q``.
Examples
========
We can use ``K.exquo`` instead of ``/`` for exact division.
>>> from sympy import ZZ
>>> ZZ.exquo(ZZ(4), ZZ(2))
2
>>> ZZ.exquo(ZZ(5), ZZ(2))
Traceback (most recent call last):
...
ExactQuotientFailed: 2 does not divide 5 in ZZ
Over a :py:class:`~.Field` such as :ref:`QQ`, division (with nonzero
divisor) is always exact so in that case ``/`` can be used instead of
:py:meth:`~.Domain.exquo`.
>>> from sympy import QQ
>>> QQ.exquo(QQ(5), QQ(2))
5/2
>>> QQ(5) / QQ(2)
5/2
Parameters
==========
a: domain element
The dividend
b: domain element
The divisor
Returns
=======
q: domain element
The exact quotient
Raises
======
ExactQuotientFailed: if exact division is not possible.
ZeroDivisionError: when the divisor is zero.
See also
========
quo: Analogue of ``a // b``
rem: Analogue of ``a % b``
div: Analogue of ``divmod(a, b)``
Notes
=====
Since the default :py:attr:`~.Domain.dtype` for :ref:`ZZ` is ``int``
(or ``mpz``) division as ``a / b`` should not be used as it would give
a ``float``.
>>> ZZ(4) / ZZ(2)
2.0
>>> ZZ(5) / ZZ(2)
2.5
Using ``/`` with :ref:`ZZ` will lead to incorrect results so
:py:meth:`~.Domain.exquo` should be used instead.
"""
raise NotImplementedError
def quo(self, a, b):
"""Quotient of *a* and *b*. Analogue of ``a // b``.
``K.quo(a, b)`` is equivalent to ``K.div(a, b)[0]``. See
:py:meth:`~.Domain.div` for more explanation.
See also
========
rem: Analogue of ``a % b``
div: Analogue of ``divmod(a, b)``
exquo: Analogue of ``a / b``
"""
raise NotImplementedError
def rem(self, a, b):
"""Modulo division of *a* and *b*. Analogue of ``a % b``.
``K.rem(a, b)`` is equivalent to ``K.div(a, b)[1]``. See
:py:meth:`~.Domain.div` for more explanation.
See also
========
quo: Analogue of ``a // b``
div: Analogue of ``divmod(a, b)``
exquo: Analogue of ``a / b``
"""
raise NotImplementedError
def div(self, a, b):
"""Quotient and remainder for *a* and *b*. Analogue of ``divmod(a, b)``
Explanation
===========
This is essentially the same as ``divmod(a, b)`` except that is more
consistent when working over some :py:class:`~.Field` domains such as
:ref:`QQ`. When working over an arbitrary :py:class:`~.Domain` the
:py:meth:`~.Domain.div` method should be used instead of ``divmod``.
The key invariant is that if ``q, r = K.div(a, b)`` then
``a == b*q + r``.
The result of ``K.div(a, b)`` is the same as the tuple
``(K.quo(a, b), K.rem(a, b))`` except that if both quotient and
remainder are needed then it is more efficient to use
:py:meth:`~.Domain.div`.
Examples
========
We can use ``K.div`` instead of ``divmod`` for floor division and
remainder.
>>> from sympy import ZZ, QQ
>>> ZZ.div(ZZ(5), ZZ(2))
(2, 1)
If ``K`` is a :py:class:`~.Field` then the division is always exact
with a remainder of :py:attr:`~.Domain.zero`.
>>> QQ.div(QQ(5), QQ(2))
(5/2, 0)
Parameters
==========
a: domain element
The dividend
b: domain element
The divisor
Returns
=======
(q, r): tuple of domain elements
The quotient and remainder
Raises
======
ZeroDivisionError: when the divisor is zero.
See also
========
quo: Analogue of ``a // b``
rem: Analogue of ``a % b``
exquo: Analogue of ``a / b``
Notes
=====
If ``gmpy`` is installed then the ``gmpy.mpq`` type will be used as
the :py:attr:`~.Domain.dtype` for :ref:`QQ`. The ``gmpy.mpq`` type
defines ``divmod`` in a way that is undesirable so
:py:meth:`~.Domain.div` should be used instead of ``divmod``.
>>> a = QQ(1)
>>> b = QQ(3, 2)
>>> a # doctest: +SKIP
mpq(1,1)
>>> b # doctest: +SKIP
mpq(3,2)
>>> divmod(a, b) # doctest: +SKIP
(mpz(0), mpq(1,1))
>>> QQ.div(a, b) # doctest: +SKIP
(mpq(2,3), mpq(0,1))
Using ``//`` or ``%`` with :ref:`QQ` will lead to incorrect results so
:py:meth:`~.Domain.div` should be used instead.
"""
raise NotImplementedError
def invert(self, a, b):
"""Returns inversion of ``a mod b``, implies something. """
raise NotImplementedError
def revert(self, a):
"""Returns ``a**(-1)`` if possible. """
raise NotImplementedError
def numer(self, a):
"""Returns numerator of ``a``. """
raise NotImplementedError
def denom(self, a):
"""Returns denominator of ``a``. """
raise NotImplementedError
def half_gcdex(self, a, b):
"""Half extended GCD of ``a`` and ``b``. """
s, t, h = self.gcdex(a, b)
return s, h
def gcdex(self, a, b):
"""Extended GCD of ``a`` and ``b``. """
raise NotImplementedError
def cofactors(self, a, b):
"""Returns GCD and cofactors of ``a`` and ``b``. """
gcd = self.gcd(a, b)
cfa = self.quo(a, gcd)
cfb = self.quo(b, gcd)
return gcd, cfa, cfb
def gcd(self, a, b):
"""Returns GCD of ``a`` and ``b``. """
raise NotImplementedError
def lcm(self, a, b):
"""Returns LCM of ``a`` and ``b``. """
raise NotImplementedError
def log(self, a, b):
"""Returns b-base logarithm of ``a``. """
raise NotImplementedError
def sqrt(self, a):
"""Returns square root of ``a``. """
raise NotImplementedError
def evalf(self, a, prec=None, **options):
"""Returns numerical approximation of ``a``. """
return self.to_sympy(a).evalf(prec, **options)
n = evalf
def real(self, a):
return a
def imag(self, a):
return self.zero
def almosteq(self, a, b, tolerance=None):
"""Check if ``a`` and ``b`` are almost equal. """
return a == b
def characteristic(self):
"""Return the characteristic of this domain. """
raise NotImplementedError('characteristic()')
__all__ = ['Domain']
|
from django.contrib import admin
from imagekit.admin import AdminThumbnail
import models
class PanelInline(admin.TabularInline):
prepopulated_fields = {"slug": ("title",)}
list_display = ['title', 'slug', 'visible']
model = models.Panel
extra = 3
class PageAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("title",)}
list_display = ['title', 'order', 'slug', 'visible']
search_fields = ['title', 'description_300']
list_filter = [ 'visible',]
list_editable = ['order']
inlines = [PanelInline]
admin.site.register(models.Page, PageAdmin)
class PanelAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("title",)}
list_display = ['title', 'order', 'slug', 'visible', 'slide_show']
search_fields = ['title', 'content']
list_filter = [ 'visible', 'page']
list_editable = ['order']
admin.site.register(models.Panel, PanelAdmin)
class SlideInline(admin.TabularInline):
#prepopulated_fields = {"slug": ("title",)}
fields = ['order', 'title', 'visible', 'image']
list_editable = ['order']
model = models.Slide
extra = 10
class SlideShowAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("title",)}
list_display = ['title', 'slug']
search_fields = ['title', ]
inlines = [SlideInline]
save_as = True
admin.site.register(models.SlideShow, SlideShowAdmin)
class SlideAdmin(admin.ModelAdmin):
#prepopulated_fields = {"slug": ("title",)}
list_display = ['id', 'title', 'order', 'visible', 'image_tag']# 'image_to_use']
#admin_image_to_use = AdminThumbnail(image_field='image_to_use')
search_fields = ['title', 'content']
list_filter = [ 'visible', 'slide_show']
list_editable = ['order', 'visible',]
admin.site.register(models.Slide, SlideAdmin)
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=g-doc-return-or-yield,line-too-long
"""TEAMS experiments."""
import dataclasses
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import optimization
from official.nlp.configs import encoders
from official.nlp.data import pretrain_dataloader
from official.nlp.data import question_answering_dataloader
from official.nlp.data import sentence_prediction_dataloader
from official.nlp.tasks import question_answering
from official.nlp.tasks import sentence_prediction
from official.projects.teams import teams
from official.projects.teams import teams_task
AdamWeightDecay = optimization.AdamWeightDecayConfig
PolynomialLr = optimization.PolynomialLrConfig
PolynomialWarmupConfig = optimization.PolynomialWarmupConfig
@dataclasses.dataclass
class TeamsOptimizationConfig(optimization.OptimizationConfig):
"""TEAMS optimization config."""
optimizer: optimization.OptimizerConfig = optimization.OptimizerConfig(
type="adamw",
adamw=AdamWeightDecay(
weight_decay_rate=0.01,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"],
epsilon=1e-6))
learning_rate: optimization.LrConfig = optimization.LrConfig(
type="polynomial",
polynomial=PolynomialLr(
initial_learning_rate=1e-4,
decay_steps=1000000,
end_learning_rate=0.0))
warmup: optimization.WarmupConfig = optimization.WarmupConfig(
type="polynomial", polynomial=PolynomialWarmupConfig(warmup_steps=10000))
@exp_factory.register_config_factory("teams/pretraining")
def teams_pretrain() -> cfg.ExperimentConfig:
"""TEAMS pretraining."""
config = cfg.ExperimentConfig(
task=teams_task.TeamsPretrainTaskConfig(
train_data=pretrain_dataloader.BertPretrainDataConfig(),
validation_data=pretrain_dataloader.BertPretrainDataConfig(
is_training=False)),
trainer=cfg.TrainerConfig(
optimizer_config=TeamsOptimizationConfig(), train_steps=1000000),
restrictions=[
"task.train_data.is_training != None",
"task.validation_data.is_training != None"
])
return config
@exp_factory.register_config_factory("teams/sentence_prediction")
def teams_sentence_prediction() -> cfg.ExperimentConfig:
r"""Teams GLUE."""
config = cfg.ExperimentConfig(
task=sentence_prediction.SentencePredictionConfig(
model=sentence_prediction.ModelConfig(
encoder=encoders.EncoderConfig(
type="any", any=teams.TeamsEncoderConfig(num_layers=1))),
train_data=sentence_prediction_dataloader
.SentencePredictionDataConfig(),
validation_data=sentence_prediction_dataloader
.SentencePredictionDataConfig(
is_training=False, drop_remainder=False)),
trainer=cfg.TrainerConfig(optimizer_config=TeamsOptimizationConfig()),
restrictions=[
"task.train_data.is_training != None",
"task.validation_data.is_training != None"
])
return config
@exp_factory.register_config_factory("teams/squad")
def teams_squad() -> cfg.ExperimentConfig:
"""Teams Squad V1/V2."""
config = cfg.ExperimentConfig(
task=question_answering.QuestionAnsweringConfig(
model=question_answering.ModelConfig(
encoder=encoders.EncoderConfig(
type="any", any=teams.TeamsEncoderConfig(num_layers=1))),
train_data=question_answering_dataloader.QADataConfig(),
validation_data=question_answering_dataloader.QADataConfig()),
trainer=cfg.TrainerConfig(optimizer_config=TeamsOptimizationConfig()),
restrictions=[
"task.train_data.is_training != None",
"task.validation_data.is_training != None"
])
return config
|
from sqlalchemy.orm import Session
from sqlalchemy import desc
from typing import List, Dict, Union, Any, Tuple
from datetime import datetime, timedelta
from fastapi import Form, Header
import uuid
import os
import enum
import jwt
from app.database import models, schemas
from app.database.base import get_db
from app import exceptions
from passlib.context import CryptContext
pwd_context = CryptContext(["bcrypt"], deprecated="auto")
def hash_token(token):
return pwd_context.hash(token)
def verify_token(token, hashed_token):
return pwd_context.verify(token, hashed_token)
class TokenStatus(enum.IntEnum):
VALID = enum.auto()
INVALID = enum.auto()
EXPIRED = enum.auto()
def get_user(db: Session, user_id: uuid.UUID) -> schemas.User:
return db.query(models.User).filter(models.User.id == user_id).first()
def get_user_by_social_id(db: Session, social_id: int) -> schemas.User:
return db.query(models.User).join(models.Token).filter(models.Token.social_id == social_id).first()
def get_users(db: Session, skip: int = 0, limit: int = 10) -> List[schemas.User]:
return db.query(models.User).offset(skip).limit(limit).all()
def create_post(db: Session, user: schemas.User, post: schemas.PostCreate):
db_post = models.Post(title=post.title, description=post.description,
main=post.main, owner_id=user.id, stdout=post.stdout,
stderr=post.stderr, exitcode=post.exitcode)
db.add(db_post)
db.flush()
db.commit()
db.refresh(db_post)
for img in post.posted_images:
db_image = models.PostedImage(url=img, post_id=db_post.id)
db.add(db_image)
db.flush()
db.commit()
db.refresh(db_image)
for img in post.generated_images:
db_image = models.GeneratedImage(url=img, post_id=db_post.id)
db.add(db_image)
db.flush()
db.commit()
db.refresh(db_image)
return db_post
def create_user(db: Session, user: schemas.UserCreate) -> schemas.User: # pragma: no cover
db_user = models.User(username=user.username, avater_url=user.avater_url)
db.add(db_user)
db.flush()
db.commit()
db.refresh(db_user)
return db_user
def get_all_posts(db: Session, skip: int = 0, limit: int = 10) -> List[schemas.Post]:
return db.query(models.Post).join(models.User).order_by(desc(models.Post.post_at)).offset(skip).limit(limit).all()
def fetch_new_posts(db: Session, post_at: str, skip: int = 0, limit: int = 1000) -> List[schemas.Post]:
return db.query(models.Post).join(models.User).filter(models.Post.post_at > datetime.strptime(post_at, "%Y-%m-%dT%H:%M:%S.%f")).order_by(desc(models.Post.post_at)).offset(skip).limit(limit).all()
def fetch_old_posts(db: Session, post_at: str, skip: int = 0, limit: int = 10) -> List[schemas.Post]:
return db.query(models.Post).filter(models.Post.post_at < datetime.strptime(post_at, "%Y-%m-%dT%H:%M:%S.%f")).join(models.User).order_by(desc(models.Post.post_at)).offset(skip).limit(limit).all()
def get_user_posts(db: Session, user_id: uuid.UUID, skip: int = 0, limit: int = 10) -> List[schemas.Post]:
return db.query(models.Post).join(models.User).filter(models.User.id == user_id).order_by(desc(models.Post.post_at)).offset(skip).limit(limit).all()
def get_user_post(db: Session, user_id: uuid.UUID, post_id: uuid.UUID) -> schemas.Post:
return db.query(models.Post).join(models.User).filter(models.User.id == user_id, models.Post.id == post_id).first()
def create_token(db: Session, token: schemas.TokenCreate, owner_id: uuid.UUID) -> schemas.Token: # pragma: no cover
db_token = models.Token(
social_id=token.social_id,
refresh_token=hash_token(token.refresh_token),
access_token_expire_at=datetime.utcnow() + timedelta(minutes=int(
os.environ['ACCESS_TOKEN_EXPIRE_MINUTES'])),
refresh_token_expire_at=datetime.utcnow() + timedelta(minutes=int(
os.environ['REFRESH_TOKEN_EXPIRE_MINUTES'])),
owner_id=owner_id
)
db.add(db_token)
db.flush()
db.commit()
db.refresh(db_token)
return db_token
def update_token(db: Session, db_user: models.User, refresh_token) -> schemas.Token:
db_token = db.query(models.Token).join(models.User).filter(
models.User.id == db_user.id).first()
db_token.refresh_token = hash_token(refresh_token)
db_token.access_token_expire_at = datetime.utcnow() + timedelta(minutes=int(
os.environ['ACCESS_TOKEN_EXPIRE_MINUTES']))
db_token.refresh_token_expire_at = datetime.utcnow() + timedelta(minutes=int(
os.environ['REFRESH_TOKEN_EXPIRE_MINUTES']))
db.flush()
db.commit()
db_token = db.query(models.Token).join(models.User).filter(
models.User.id == models.Token.owner_id).first()
def verify_access_token(db: Session, access_token) -> Tuple[TokenStatus, Union[Dict[str, Any], None]]:
try:
data = jwt.decode(
access_token, key=os.environ['SECRET_KEY'], algorithms=[
os.environ['AUTH_ALGORITHM']]
)
except jwt.ExpiredSignatureError: # pragma: no cover
data = jwt.decode(
access_token, key=os.environ['SECRET_KEY'], algorithms=[
os.environ['AUTH_ALGORITHM']], verify=False
)
user_id = data['sub']
token: models.Token = db.query(models.Token).filter(
models.Token.social_id == user_id).first()
return TokenStatus.EXPIRED, None
except:
return TokenStatus.INVALID, None
return TokenStatus.VALID, data
def verify_refresh_token(db: Session, access_token, refresh_token) -> Tuple[TokenStatus, Union[Dict[str, Any], None]]:
try:
data = jwt.decode(
access_token, key=os.environ['SECRET_KEY'], algorithms=[
os.environ['AUTH_ALGORITHM']]
)
except jwt.ExpiredSignatureError: # pragma: no cover
data = jwt.decode(
access_token, key=os.environ['SECRET_KEY'], algorithms=[
os.environ['AUTH_ALGORITHM']], verify=False
)
except:
return TokenStatus.INVALID, None
user_id = data['sub']
token: models.Token = db.query(models.Token).filter(
models.Token.social_id == user_id).first()
if not verify_token(refresh_token, token.refresh_token):
return TokenStatus.INVALID, None
now = datetime.utcnow()
if token.refresh_token_expire_at < now: # pragma: no cover
return TokenStatus.EXPIRED, None
return TokenStatus.VALID, data
def current_user(access_token: str = Header(...)) -> models.User:
db = next(get_db())
status, data = verify_access_token(db, access_token)
if status == TokenStatus.VALID and data:
return get_user_by_social_id(db, data['sub'])
elif status == TokenStatus.INVALID:
raise exceptions.InvalidTokenException()
elif status == TokenStatus.EXPIRED: # pragma: no cover
raise exceptions.ExpiredTokenException()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from six import with_metaclass
from collections import OrderedDict
from django.db import models
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import ugettext_lazy as _
from shop.models.fields import JSONField
from shop.modifiers.pool import cart_modifiers_pool
from shop.money import Money
from .product import BaseProduct
from shop import deferred
from shop.models.customer import CustomerModel
class CartItemManager(models.Manager):
"""
Customized model manager for our CartItem model.
"""
def get_or_create(self, **kwargs):
"""
Create a unique cart item. If the same product exists already in the given cart,
increase its quantity, if the product in the cart seems to be the same.
"""
cart = kwargs.pop('cart')
product = kwargs.pop('product')
quantity = int(kwargs.pop('quantity', 1))
# add a new item to the cart, or reuse an existing one, increasing the quantity
watched = not quantity
cart_item = product.is_in_cart(cart, watched=watched, **kwargs)
if cart_item:
if not watched:
cart_item.quantity += quantity
created = False
else:
cart_item = self.model(cart=cart, product=product, quantity=quantity, **kwargs)
created = True
cart_item.save()
return cart_item, created
def filter_cart_items(self, cart, request):
"""
Use this method to fetch items for shopping from the cart. It rearranges the result set
according to the defined modifiers.
"""
cart_items = self.filter(cart=cart, quantity__gt=0).order_by('id')
for modifier in cart_modifiers_pool.get_all_modifiers():
cart_items = modifier.arrange_cart_items(cart_items, request)
return cart_items
def filter_watch_items(self, cart, request):
"""
Use this method to fetch items from the watch list. It rearranges the result set
according to the defined modifiers.
"""
watch_items = self.filter(cart=cart, quantity=0)
for modifier in cart_modifiers_pool.get_all_modifiers():
watch_items = modifier.arrange_watch_items(watch_items, request)
return watch_items
class BaseCartItem(with_metaclass(deferred.ForeignKeyBuilder, models.Model)):
"""
This is a holder for the quantity of items in the cart and, obviously, a
pointer to the actual Product being purchased
"""
cart = deferred.ForeignKey('BaseCart', related_name='items')
product = deferred.ForeignKey(BaseProduct)
product_code = models.CharField(_("Product code"), max_length=255, null=True, blank=True,
help_text=_("Product code of added item."))
extra = JSONField(verbose_name=_("Arbitrary information for this cart item"))
objects = CartItemManager()
class Meta:
abstract = True
verbose_name = _("Cart item")
verbose_name_plural = _("Cart items")
@classmethod
def perform_model_checks(cls):
try:
allowed_types = ('IntegerField', 'DecimalField', 'FloatField')
field = [f for f in cls._meta.fields if f.attname == 'quantity'][0]
if not field.get_internal_type() in allowed_types:
msg = "Field `{}.quantity` must be of one of the types: {}."
raise ImproperlyConfigured(msg.format(cls.__name__, allowed_types))
except IndexError:
msg = "Class `{}` must implement a field named `quantity`."
raise ImproperlyConfigured(msg.format(cls.__name__))
def __init__(self, *args, **kwargs):
# reduce the given fields to what the model actually can consume
all_field_names = [field.name for field in self._meta.get_fields(include_parents=True)]
model_kwargs = {k: v for k, v in kwargs.items() if k in all_field_names}
super(BaseCartItem, self).__init__(*args, **model_kwargs)
self.extra_rows = OrderedDict()
self._dirty = True
def save(self, *args, **kwargs):
super(BaseCartItem, self).save(*args, **kwargs)
self._dirty = True
self.cart._dirty = True
def update(self, request):
"""
Loop over all registered cart modifier, change the price per cart item and optionally add
some extra rows.
"""
if not self._dirty:
return
self.extra_rows = OrderedDict() # reset the dictionary
for modifier in cart_modifiers_pool.get_all_modifiers():
modifier.process_cart_item(self, request)
self._dirty = False
CartItemModel = deferred.MaterializedModel(BaseCartItem)
class CartManager(models.Manager):
"""
The Model Manager for any Cart inheriting from BaseCart.
"""
def get_from_request(self, request):
"""
Return the cart for current customer.
"""
if request.customer.is_visitor():
raise self.model.DoesNotExist("Cart for visiting customer does not exist.")
cart, created = self.get_or_create(customer=request.customer)
return cart
def get_or_create_from_request(self, request):
if request.customer.is_visitor():
request.customer = CustomerModel.objects.get_or_create_from_request(request)
cart, created = self.get_or_create(customer=request.customer)
return cart
class BaseCart(with_metaclass(deferred.ForeignKeyBuilder, models.Model)):
"""
The fundamental part of a shopping cart.
"""
customer = deferred.OneToOneField('BaseCustomer', verbose_name=_("Customer"), related_name='cart')
created_at = models.DateTimeField(auto_now_add=True, verbose_name=_("Created at"))
updated_at = models.DateTimeField(auto_now=True, verbose_name=_("Updated at"))
extra = JSONField(verbose_name=_("Arbitrary information for this cart"))
# our CartManager determines the cart object from the request.
objects = CartManager()
class Meta:
abstract = True
verbose_name = _("Shopping Cart")
verbose_name_plural = _("Shopping Carts")
def __init__(self, *args, **kwargs):
super(BaseCart, self).__init__(*args, **kwargs)
# That will hold things like tax totals or total discount
self.extra_rows = OrderedDict()
self._cached_cart_items = None
self._dirty = True
def save(self, force_update=False, *args, **kwargs):
if self.pk or force_update is False:
super(BaseCart, self).save(force_update=force_update, *args, **kwargs)
self._dirty = True
def update(self, request):
"""
This should be called after a cart item changed quantity, has been added or removed.
It will loop on all line items in the cart, and call all the cart modifiers for each item.
After doing this, it will compute and update the order's total and subtotal fields, along
with any supplement added along the way by modifiers.
Note that theses added fields are not stored - we actually want to
reflect rebate and tax changes on the *cart* items, but we don't want
that for the order items (since they are legally binding after the
"purchase" button was pressed)
"""
if not self._dirty:
return
if self._cached_cart_items:
items = self._cached_cart_items
else:
items = CartItemModel.objects.filter_cart_items(self, request)
# This calls all the pre_process_cart methods and the pre_process_cart_item for each item,
# before processing the cart. This allows to prepare and collect data on the cart.
for modifier in cart_modifiers_pool.get_all_modifiers():
modifier.pre_process_cart(self, request)
for item in items:
modifier.pre_process_cart_item(self, item, request)
self.extra_rows = OrderedDict() # reset the dictionary
self.subtotal = 0 # reset the subtotal
for item in items:
# item.update iterates over all cart modifiers and invokes method `process_cart_item`
item.update(request)
self.subtotal += item.line_total
# Iterate over the registered modifiers, to process the cart's summary
for modifier in cart_modifiers_pool.get_all_modifiers():
for item in items:
modifier.post_process_cart_item(self, item, request)
modifier.process_cart(self, request)
# This calls the post_process_cart method from cart modifiers, if any.
# It allows for a last bit of processing on the "finished" cart, before
# it is displayed
for modifier in reversed(cart_modifiers_pool.get_all_modifiers()):
modifier.post_process_cart(self, request)
# Cache updated cart items
self._cached_cart_items = items
self._dirty = False
def empty(self):
"""
Remove the cart with all its items.
"""
if self.pk:
self.items.all().delete()
self.delete()
def merge_with(self, other_cart):
"""
Merge the contents of the other cart into this one, afterwards delete it.
This is done item by item, so that duplicate items increase the quantity.
"""
# iterate over the cart and add quantities for items from other cart considered as equal
for item in self.items.all():
other_item = item.product.is_in_cart(other_cart, extra=item.extra)
if other_item:
item.quantity += other_item.quantity
item.save()
other_item.delete()
# the remaining items from the other cart are merged into this one
other_cart.items.update(cart=self)
other_cart.delete()
def __str__(self):
return "{}".format(self.pk) if self.pk else '(unsaved)'
@property
def num_items(self):
"""
Returns the number of items in the cart.
"""
return self.items.filter(quantity__gt=0).count()
@property
def total_quantity(self):
"""
Returns the total quantity of all items in the cart.
"""
aggr = self.items.aggregate(quantity=models.Sum('quantity'))
return aggr['quantity'] or 0
# if we would know, that self.items is already evaluated, then this might be faster:
# return sum([ci.quantity for ci in self.items.all()])
@property
def is_empty(self):
return self.total_quantity == 0
def get_caption_data(self):
return {'num_items': self.num_items, 'total_quantity': self.total_quantity,
'subtotal': self.subtotal, 'total': self.total}
@classmethod
def get_default_caption_data(cls):
return {'num_items': 0, 'total_quantity': 0, 'subtotal': Money(), 'total': Money()}
CartModel = deferred.MaterializedModel(BaseCart)
|
import numpy as np
from liquepy import functions
import eqsig
class ShearTest(object):
_stress = None
_strain = None
_pp = None
_esig_v0 = None
_i_liq = None
_i_liq_strain = None
_i_liq_pp = None
_n_points = 0
_n_cycles = None
_ru_limit = None
_da_strain = None
def __init__(self, stress, strain, esig_v0=1, sl=None, pp=None, n_cycles=None):
self._strain = np.array(strain)
self._stress = np.array(stress)
self.sl = sl
self._pp = pp
if esig_v0 is not None:
self._esig_v0 = esig_v0
self._n_points = len(stress)
self._n_cycles = n_cycles
@property
def pp(self):
return self._pp
@property
def stress(self):
return self._stress
@property
def strain(self):
return self._strain
@property
def esig_v0(self):
return self._esig_v0
@property
def i_liq(self):
return self._i_liq
@property
def i_liq_strain(self):
return self._i_liq_strain
@property
def i_liq_pp(self):
return self._i_liq_pp
@property
def n_points(self):
return self._n_points
@property
def n_cycles(self):
return self._n_cycles
@esig_v0.setter
def esig_v0(self, value):
self._esig_v0 = value
@property
def csr(self):
try:
return self.stress / self.esig_v0
except ValueError:
return None
@property
def epp(self):
try:
return self.pp - self.pp[0]
except ValueError:
return None
@property
def ru(self):
try:
return self.epp / self.esig_v0
except ValueError:
return None
@n_cycles.setter
def n_cycles(self, values):
self._n_cycles = values
def set_pp_via_ru(self, ru, hydrostatic):
epp = np.array(ru) * self.esig_v0
self._pp = epp + hydrostatic
def set_i_liq(self, ru_limit=None, esig_v_limit=None, strain_limit=None, da_strain_limit=None, or_none=True):
if ru_limit is not None:
self._ru_limit = ru_limit
self._i_liq_pp = functions.determine_t_liq_index(self.ru, ru_limit, return_none=or_none)
elif esig_v_limit is not None:
ru_limit = 1 - esig_v_limit / self.esig_v0
self._ru_limit = ru_limit
self._i_liq_pp = functions.determine_t_liq_index(self.ru, ru_limit, return_none=or_none)
elif strain_limit is None:
pass
# print("No limit set for set_i_liq")
if strain_limit is not None:
self._i_liq_strain = functions.determine_t_liq_index(abs(self.strain), strain_limit, return_none=or_none)
elif da_strain_limit is not None:
pinds = eqsig.get_switched_peak_array_indices(self.strain)
da_strains = self.get_da_strain_series()
dind = np.where(da_strains > 0.05)
if len(dind[0]):
self._i_liq_strain = dind[0][0]
if self._i_liq_pp is None:
self._i_liq = self._i_liq_strain
elif self._i_liq_strain is None:
self._i_liq = self._i_liq_pp
else:
self._i_liq = min(self._i_liq_pp, self._i_liq_strain)
@property
def ru_limit(self):
return self._ru_limit
@property
def av_stress(self):
average_stress = (self.stress[1:] + self.stress[:-1]) / 2
average_stress = np.insert(average_stress, 0, self.stress[0]) # Include first value
return average_stress
@property
def delta_strain(self): # TODO: cache this parameter
delta_strain = np.diff(self.strain)
delta_strain = np.insert(delta_strain, 0, 0)
return delta_strain
def get_da_strain_series(self):
if self._da_strain is not None:
return np.array(self._da_strain)
pinds = eqsig.get_switched_peak_array_indices(self.strain)
if pinds[-1] != len(self.strain) - 1:
pinds = np.insert(pinds, len(pinds), len(self.strain) - 1)
da_strains = [0]
for j in range(len(pinds) - 1):
curr_p_strain = self.strain[pinds[j]]
sgn = np.sign(curr_p_strain)
if sgn == 0 and j == 0:
sgn = -1 * np.sign(self.strain[pinds[j + 1]])
da_strains += list(sgn * -1 * (self.strain[pinds[j] + 1: pinds[j + 1] + 1] - curr_p_strain))
self._da_strain = np.array(da_strains)
return np.array(self._da_strain)
|
import pytest
import numpy as np
import torch
from doctr.models.preprocessor import PreProcessor
@pytest.mark.parametrize(
"batch_size, output_size, input_tensor, expected_batches, expected_value",
[
[2, (128, 128), np.full((3, 256, 128, 3), 255, dtype=np.uint8), 1, .5], # numpy uint8
[2, (128, 128), np.ones((3, 256, 128, 3), dtype=np.float32), 1, .5], # numpy fp32
[2, (128, 128), np.ones((3, 256, 128, 3), dtype=np.float16), 1, .5], # numpy fp16
[2, (128, 128), torch.full((3, 3, 256, 128), 255, dtype=torch.uint8), 1, .5], # torch uint8
[2, (128, 128), torch.ones((3, 3, 256, 128), dtype=torch.float32), 1, .5], # torch fp32
[2, (128, 128), torch.ones((3, 3, 256, 128), dtype=torch.float16), 1, .5], # torch fp16
[2, (128, 128), [np.full((256, 128, 3), 255, dtype=np.uint8)] * 3, 2, .5], # list of numpy uint8
[2, (128, 128), [np.ones((256, 128, 3), dtype=np.float32)] * 3, 2, .5], # list of numpy fp32
[2, (128, 128), [np.ones((256, 128, 3), dtype=np.float16)] * 3, 2, .5], # list of numpy fp16
[2, (128, 128), [torch.full((3, 256, 128), 255, dtype=torch.uint8)] * 3, 2, .5], # list of torch uint8
[2, (128, 128), [torch.ones((3, 256, 128), dtype=torch.float32)] * 3, 2, .5], # list of torch fp32
[2, (128, 128), [torch.ones((3, 256, 128), dtype=torch.float16)] * 3, 2, .5], # list of torch fp32
],
)
def test_preprocessor(batch_size, output_size, input_tensor, expected_batches, expected_value):
processor = PreProcessor(output_size, batch_size)
# Invalid input type
with pytest.raises(TypeError):
processor(42)
# 4D check
with pytest.raises(AssertionError):
processor(np.full((256, 128, 3), 255, dtype=np.uint8))
with pytest.raises(TypeError):
processor(np.full((1, 256, 128, 3), 255, dtype=np.int32))
# 3D check
with pytest.raises(AssertionError):
processor([np.full((3, 256, 128, 3), 255, dtype=np.uint8)])
with pytest.raises(TypeError):
processor([np.full((256, 128, 3), 255, dtype=np.int32)])
with torch.no_grad():
out = processor(input_tensor)
assert isinstance(out, list) and len(out) == expected_batches
assert all(isinstance(b, torch.Tensor) for b in out)
assert all(b.dtype == torch.float32 for b in out)
assert all(b.shape[-2:] == output_size for b in out)
assert all(torch.all(b == expected_value) for b in out)
assert len(repr(processor).split('\n')) == 4
# Check FP16
processor = PreProcessor(output_size, batch_size, fp16=True)
with torch.no_grad():
out = processor(input_tensor)
assert all(b.dtype == torch.float16 for b in out)
|
#!/usr/bin/env python3
import serial
import subprocess
dev = serial.Serial("/dev/ttyUSB0", 115200)
results = []
for i in range(1, 250):
binary = f"benchmark-schoolbook_{i}.bin"
print(f">>> making {binary}")
subprocess.run(["make", binary])
print("done")
print(f">>> flashing {binary}")
subprocess.run(["st-flash", "--reset", "write", binary, "0x8000000"])
print("done")
state = 'waiting'
marker = b''
while True:
x = dev.read()
if state == 'waiting':
if x == b'=':
marker += x
continue
# If we saw at least 5 equal signs, assume we've probably started
elif marker.count(b'=') > 5:
state = 'beginning'
vector = []
print(" .. found output marker..")
if state == 'beginning':
if x == b'=':
continue
else:
state = 'reading'
elif state == 'reading':
if x == b'#':
break
else:
vector.append(x)
vector = b''.join(vector).decode('utf-8').split("\n")
n = 0
for i in range(len(vector)):
if vector[i] == "n: ":
n = int(vector[i+1])
if vector[i] == "cycles: ":
cycles = int(vector[i+1])
print("## found results")
if vector[i] == "ERROR!":
print("## FOUND AN ERROR!")
results.append([n, cycles])
print("| N | cycles ")
for result in results:
print(f"| {result[0]} | {result[1]}")
print("cleaning up...")
subprocess.run(["make", "clean"])
print("done...")
print("| N | cycles ")
for result in results:
print(f"| {result[0]} | {result[1]}")
|
# -*- coding: utf-8 -*-
# /*##########################################################################
#
# Copyright (c) 2016 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""Qt-based plot widget.
This widget provides plot functionnalities.
This is the PyMca's plot API.
"""
__authors__ = ["V.A. Sole", "T. Vincent"]
__license__ = "MIT"
__date__ = "11/02/2016"
from collections import OrderedDict
import logging
from PyQt4 import QtCore, QtGui
from .plot import Plot
from .backend_mpl import BackendMPL
from . import items
logging.basicConfig()
logger = logging.getLogger(__name__)
# API changes from PyMca:
# - Remove **kw in methods
class PlotWidget(QtGui.QMainWindow):
def __init__(self, parent=None, windowFlags=QtCore.Qt.Widget, backend=None):
self._invertYAxis = False
self._curves = OrderedDict()
super(PlotWidget, self).__init__(parent, windowFlags)
self._plot = Plot()
self._backend = BackendMPL(self._plot)
self.setCentralWidget(self._backend)
self._plot.axes.left.visible = True
self._plot.axes.right.visible = False
################
# Plot content #
################
# Add
# - addImage add interpolation
def addCurve(
self,
x,
y,
legend=None,
info=None,
replace=False,
replot=True,
color=None,
symbol=None,
linewidth=None,
linestyle=None,
xlabel=None,
ylabel=None,
yaxis=None,
xerror=None,
yerror=None,
z=1,
selectable=True,
**kw
):
if kw:
logger.warning("addCurve extra arguments deprecated")
if replace:
self.remove(kind="curve")
if color is None:
color = (0.0, 0.0, 0.0) # TODO
assert yaxis in ("left", "right", None)
yaxis = "left" if yaxis in (None, "left") else "right"
# if a curve with the same name exists, remove it
if legend in self._curves:
oldCurve = self._curves.pop(legend)
self._plot.removeItem(oldCurve)
curve = items.Curve(
x,
y,
xerror=xerror,
yerror=yerror,
color=color,
copy=True,
marker=symbol,
linewidth=linewidth,
linestyle=linestyle,
z=z,
selectable=selectable,
)
curve.info = info
curve.legend = legend
curve.xlabel = xlabel
curve.ylabel = ylabel
curve.yaxis = yaxis
if yaxis == "left":
self._plot.addItem(curve)
else:
self._plot.axes.right.addItem(curve)
# TODO active curve handling
if replot:
self.resetZoom()
return curve
def addImage(
self,
data,
legend=None,
info=None,
replace=True,
replot=True,
xScale=(0.0, 1.0),
yScale=(0.0, 1.0),
z=0,
selectable=False,
draggable=False,
colormap=None,
**kw
):
if kw:
logger.warning("addImage extra arguments deprecated")
if replace:
self.remove(kind="image")
image = items.Image(
data,
copy=True,
colormap=colormap,
origin=(xScale[0], yScale[0]),
scale=(xScale[1], yScale[1]),
z=z,
selectable=selectable,
draggable=draggable,
)
image.legend = legend
image.info = info
self._plot.addItem(image)
# TODO active image handling
if replot:
self.resetZoom()
return image
def addItem(
self,
xList,
yList,
legend=None,
info=None,
replace=False,
replot=True,
shape="polygon",
fill=True,
**kw
):
if kw:
logger.warning("addItem extra arguments deprecated")
if replace:
self.remove(kind="item")
# TODO
if replot:
self.replot()
return None
def addMarker(
self,
x,
y,
legend=None,
text=None,
color="k",
selectable=False,
draggable=False,
replot=True,
symbol=None,
constraint=None,
):
# TODO
return None
# Remove
def clear(self):
pass
def remove(self, legend=None, kind=None):
pass
# Get
# TODO: getImages? getMarkers?
def getAllCurves(self, just_legend=False):
pass
def getCurve(self, legend):
pass
def getImage(self, legend):
pass
def getMonotonicCurves(self):
pass
# Show/hide
# TODO: usage? replace by OO.visible
def hideCurve(self, legend, replot=True):
if replot:
self.replot()
def hideImage(self, legend, replot=True):
if replot:
self.replot()
def isCurveHidden(self, legend):
pass
def isImageHidden(self, legend):
pass
def showCurve(self, legend, replot=True):
if replot:
self.replot()
def showImage(self, legend, replot=True):
if replot:
self.replot()
##############
# Plot setup #
##############
# Labels
def getGraphTitle(self):
return self._plot.title
def setGraphTitle(self, title=""):
self._plot.title = title
def getGraphXLabel(self):
return self._plot.axes.left.xlabel
def setGraphXLabel(self, label="X"):
self._plot.xlabel = label
def getGraphYLabel(self):
return self._plot.axes.left.ylabel
def setGraphYLabel(self, label="Y"):
self._plot.ylabel = label
# Axes
def isYAxisInverted(self):
return self._invertYAxis
def invertYAxis(self, flag=True):
self._invertYAxis = bool(flag)
for axes in self._plot.axes:
begin, end = axes.ylimits
axes.ylimits = end, begin
def isXAxisLogarithmic(self):
pass
def setXAxisLogarithmic(self, flag=True):
pass
def isYAxisLogarithmic(self):
pass
def setYAxisLogarithmic(self, flag):
pass
def isDefaultBaseVectors(self):
pass
def getBaseVectors(self):
pass
def setBaseVectors(self, x=(1.0, 0.0), y=(0.0, 1.0)):
pass
# Limits
# TODO check interaction with autoscale and aspect ratio
def getGraphXLimits(self):
start, end = self._plot.xlimits
return min(start, end), max(start, end)
def setGraphXLimits(self, xmin, xmax):
self._plot.xlimits = xmin, xmax
def getGraphYLimits(self, axis="left"):
assert axis in ("left", "right")
if axis == "left":
axes = self._plot.axes.left
else:
axes = self._plot.axes.right
start, end = axes.ylimits
return min(start, end), max(start, end)
def setGraphYLimits(self, ymin, ymax, axis="left"):
assert axis in ("left", "right")
if axis == "left":
axes = self._plot.axes.left
else:
axes = self._plot.axes.right
axes.ylimits = (ymax, ymin) if self._invertYAxis else (ymin, ymax)
def setLimits(self, xmin, xmax, ymin, ymax):
self.setGraphXLimits(xmin, xmax)
self.setGraphYLimits(ymin, ymax)
def isXAxisAutoScale(self):
pass
def setXAxisAutoScale(self, flag=True):
pass
def isYAxisAutoScale(self):
pass
def setYAxisAutoScale(self, flag=True):
pass
def resetZoom(self, dataMargins=None):
pass
def replot(self):
pass
# plot
def isKeepDataAspectRatio(self):
pass
def keepDataAspectRatio(self, flag=True):
pass
def showGrid(self, flag=True):
pass
def getDataMargins(self):
pass
def setDataMargins(
self, xMinMargin=0.0, xMaxMargin=0.0, yMinMargin=0.0, yMaxMargin=0.0
):
pass
############
# Defaults #
############
# TODO: add getDefaultPlotPoints, getDefaultPlotLines
# def setDefaults(self, colormap, symbol, linestyle, linewidth, colors)
# def getDefaults(self) -> dict
def getSupportedColormaps(self):
pass
def getDefaultColormap(self):
pass
def setDefaultColormap(self, colormap=None):
pass
def setDefaultPlotPoints(self, flag):
pass
def setDefaultPlotLines(self, flag):
pass
#########
# Utils #
#########
def dataToPixel(self, x=None, y=None, axis="left"):
pass
def pixelToData(self, x=None, y=None, axis="left"):
pass
def getWidgetHandle(self):
return self
#############
# Selection #
#############
# TODO:
# - rename enableActiveCurveHandling -> setActiveCurveHandling
# - missing activeImage handling + combine with active curve
def isActiveCurveHandlingEnabled(self):
pass
def enableActiveCurveHandling(self, flag=True):
pass
def setActiveCurveColor(self, color="#000000"):
pass
def getActiveCurve(self, just_legend=False):
pass
def setActiveCurve(self, legend, replot=True):
if replot:
self.replot()
def getActiveImage(self, just_legend=False):
pass
def setActiveImage(self, legend, replot=True):
if replot:
self.replot()
###############
# Interaction #
###############
# TODO:
# - getDrawMode + isDrawModeEnabled
# - Issue: setDrawMode/setZoomMode have interactions
def getGraphCursor(self):
pass
def setGraphCursor(self, flag=None, color=None, linewidth=None, linestyle=None):
pass
def isDrawModeEnabled(self):
pass
def getDrawMode(self):
pass
def setDrawModeEnabled(
self, flag=True, shape="polygon", label=None, color=None, **kw
):
pass
def isZoomModeEnabled(self):
pass
def setZoomModeEnabled(self, flag=True, color=None):
pass
def isPanWithArrowKeys(self):
pass
def setPanWithArrowKeys(self, pan=False):
pass
#########
# Misc. #
#########
def saveGraph(self, fileName, fileFormat="svg", dpi=None, **kw):
pass
def printGraph(
self,
width=None,
height=None,
xOffset=0.0,
yOffset=0.0,
units="inches",
dpi=None,
printer=None,
dialog=True,
keepAspectRatio=True,
**kw
):
pass
def setCallback(self, callbackFunction):
pass
##############
def insertMarker(
self,
x,
y,
legend=None,
text=None,
color="k",
selectable=False,
draggable=False,
replot=True,
symbol=None,
constraint=None,
**kw
):
logger.warning("insertMarker deprecated, use addMarker instead")
return self.addMarker(
x, y, legend, text, color, selectable, draggable, replot, symbol, constraint
)
def insertXMarker(
self,
x,
legend=None,
text=None,
color="k",
selectable=False,
draggable=False,
replot=True,
**kw
):
logger.warning("insertXMarker deprecated, use addMarker with y=None instead")
return self.addMarker(
x, None, legend, text, color, selectable, draggable, replot, None, None
)
def insertYMarker(
self,
y,
legend=None,
text=None,
color="k",
selectable=False,
draggable=False,
replot=True,
**kw
):
logger.warning("insertYMarker deprecated, use addMarker with x=None instead")
return self.addMarker(
None, y, legend, text, color, selectable, draggable, replot, None, None
)
def clearCurves(self):
logger.warning("clearCurves deprecated, use remove(kind='curve') instead")
self.remove(kind="curve")
def clearImages(self):
logger.warning("clearCurves deprecated, use remove(kind='image') instead")
self.remove(kind="image")
def clearMarkers(self):
logger.warning("clearCurves deprecated, use remove(kind='markers') instead")
self.remove(kind="markers")
def removeCurve(self, legend, replot=True):
logger.warning("removeCurve deprecated, use remove instead")
self.remove(legend, kind="curve")
if replot:
self.replot()
def removeImage(self, legend, replot=True):
logger.warning("removeImage deprecated, use remove instead")
self.remove(legend, kind="image")
if replot:
self.replot()
def removeItem(self, legend, replot=True):
logger.warning("removeItem deprecated, use remove instead")
self.remove(legend, kind="item")
if replot:
self.replot()
def removeMarker(self, legend, replot=True):
logger.warning("removeMarker deprecated, use remove instead")
self.remove(legend, kind="marker")
if replot:
self.replot()
|
import requests
from sys import argv
"""
quickly check api responses.
setup venv:
python3 -m venv merlinapi_venv
source merlinapi_venv/bin/activate
pip3 install -r requirements.txt
run:
python3 api_tests.py
"""
TFJS_URL = "http://127.0.0.1:3300/api/v1/classify/27758741/d0601ea6-8a3d-4899-9a24-473a7186f8c7"
TOKEN_URL = "http://127.0.0.1:3300/api/token"
EXPORT_URL = "http://127.0.0.1:3300/api/export"
FALSE_URL = "http://127.0.0.1:3300/api/null"
urls_to_test = [TOKEN_URL, TARGET_URL]
def hit(url):
print("hitting endpoint: " + url)
_res = requests.get(url, verify=False)
_status = " -- status is: "
if _res.status_code != 200:
results = "failed" + _status + _res.status_code.__str__()
return results, False, _res.elapsed.total_seconds()
else:
results = "passed" + _status + _res.status_code.__str__()
try:
contents = _res.json()
except:
contents = _res.text
return results, contents, _res.elapsed.total_seconds()
def test(url_list):
for u in url_list:
_result, _content, _time = hit(u)
print('test: ' + _result)
print('contents: ' + str(_content))
print('elapsed: ' + str(_time))
if __name__ == '__main__':
test(urls_to_test)
|
from dataclasses import dataclass
from typing import List, Tuple
from tqdm import tqdm
from src.core.common.accents_dict import AccentsDict
from src.core.common.language import Language
from src.core.common.symbol_id_dict import SymbolIdDict
from src.core.common.symbols_dict import SymbolsDict
from src.core.common.text import deserialize_list, serialize_list
from src.core.common.utils import GenericList, get_counter
from src.core.pre.ds import DsData, DsDataList
from src.core.pre.text.utils import symbols_convert_to_ipa, symbols_normalize
@dataclass()
class TextData:
entry_id: int
text: str
serialized_symbol_ids: str
serialized_accent_ids: str
lang: Language
def load_init(self):
self.lang = Language(self.lang)
class TextDataList(GenericList[TextData]):
def load_init(self):
for item in self.items():
item.load_init()
def convert_to_ipa(data: TextDataList, symbol_converter: SymbolIdDict, ignore_tones: bool, ignore_arcs: bool) -> Tuple[TextDataList, SymbolIdDict, SymbolsDict]:
processed_data: List[Tuple[int, List[str], List[int], Language]] = []
values: TextData
for values in tqdm(data.items()):
new_symbols, new_accent_ids = symbols_convert_to_ipa(
symbols=symbol_converter.get_symbols(values.serialized_symbol_ids),
lang=values.lang,
accent_ids=deserialize_list(values.serialized_accent_ids),
ignore_arcs=ignore_arcs,
ignore_tones=ignore_tones
)
processed_data.append((values.entry_id, new_symbols, new_accent_ids, Language.IPA))
return _prepare_data(processed_data)
def normalize(data: TextDataList, symbol_converter: SymbolIdDict) -> Tuple[TextDataList, SymbolIdDict, SymbolsDict]:
processed_data: List[Tuple[int, List[str], List[int], Language]] = []
values: TextData
for values in tqdm(data):
new_symbols, new_accent_ids = symbols_normalize(
symbols=symbol_converter.get_symbols(values.serialized_symbol_ids),
lang=values.lang,
accent_ids=deserialize_list(values.serialized_accent_ids),
)
processed_data.append((values.entry_id, new_symbols, new_accent_ids, values.lang))
return _prepare_data(processed_data)
def preprocess(data: DsDataList, symbol_ids: SymbolIdDict) -> Tuple[TextDataList, SymbolIdDict, SymbolsDict]:
processed_data: List[Tuple[int, List[str], List[int], Language]] = []
values: DsData
for values in tqdm(data):
symbols: List[str] = symbol_ids.get_symbols(deserialize_list(values.serialized_symbols))
accents: List[int] = deserialize_list(values.serialized_accents)
processed_data.append((values.entry_id, symbols, accents, values.lang))
return _prepare_data(processed_data)
def _prepare_data(processed_data: List[Tuple[int, List[str], List[int], Language]]) -> Tuple[TextDataList, SymbolIdDict, AccentsDict, SymbolsDict]:
result = TextDataList()
symbol_counter = get_counter([x[1] for x in processed_data])
symbols_dict = SymbolsDict.fromcounter(symbol_counter)
conv = SymbolIdDict.init_from_symbols(set(symbols_dict.keys()))
for entry_id, symbols, accent_ids, lang in processed_data:
assert len(accent_ids) == len(symbols)
text = SymbolIdDict.symbols_to_text(symbols)
serialized_symbol_ids = conv.get_serialized_ids(symbols)
serialized_accent_ids = serialize_list(accent_ids)
data = TextData(entry_id, text, serialized_symbol_ids, serialized_accent_ids, lang)
result.append(data)
return result, conv, symbols_dict
|
from discord.ext import commands
import discord
import os
TOKEN = os.environ['DISCORD_BOT_TOKEN']
client = discord.Client()
@client.event
async def on_ready():
channel = client.get_channel(701731353783304225)
await channel.send('投稿削除サーバー起動')
return
@client.event
async def on_message(message):
channel = client.get_channel(701731353783304225)
if message.content = "delete":
await channel.send("削除を開始します")
if message.author.bot:
return
if message.channel.name == 'bot_control':
return
if message.channel.name == '一般':
return
if message.channel.name == 'ルール':
return
if message.channel.name == '対戦a実況':
return
if message.channel.name == '対戦b実況':
return
await message.channel.purge()
await channel.send("完了しました")
return
client.run(TOKEN)
|
import random
import re
import requests
from urllib3.exceptions import InsecureRequestWarning
from login.Utils import Utils
from login.casLogin import casLogin
from login.iapLogin import iapLogin
from login.RSALogin import RSALogin
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
class TodayLoginService:
# 初始化本地登录类
def __init__(self, userInfo):
if None == userInfo['username'] or '' == userInfo['username'] or None == userInfo['password'] or '' == \
userInfo['password'] or None == userInfo['schoolName'] or '' == userInfo['schoolName']:
raise Exception('初始化类失败,请键入完整的参数(用户名,密码,学校名称)')
self.username = userInfo['username']
self.password = userInfo['password']
self.schoolName = userInfo['schoolName']
self.session = requests.session()
headers = {'User-Agent': random.choice(Utils.getUserAgents())}
# 关闭多余的连接
self.session.keep_alive = False
# 增加重试次数
self.session.adapters.DEFAULT_RETRIES = 5
self.session.headers = headers
# 如果设置了用户的代理,那么该用户将走代理的方式进行访问
if 'proxy' in userInfo and userInfo['proxy'] is not None and userInfo['proxy'] != '':
print(f'{Utils.getAsiaTime()} 检测到代理ip配置,正在使用代理')
self.session.proxies = {'http': userInfo['proxy'], 'https': userInfo['proxy']}
else:
print(f'{Utils.getAsiaTime()} 未检测到代理ip,已忽略')
# 添加hooks进行拦截判断该请求是否被418拦截
self.session.hooks['response'].append(Utils.checkStatus)
self.login_url = ''
self.host = ''
self.login_host = ''
self.loginEntity = None
# 通过学校名称借助api获取学校的登陆url
def getLoginUrlBySchoolName(self):
schools = self.session.get('https://mobile.campushoy.com/v6/config/guest/tenant/list', verify=False,
hooks=dict(response=[Utils.checkStatus])).json()[
'data']
flag = True
for item in schools:
if item['name'] == self.schoolName:
if item['joinType'] == 'NONE':
raise Exception(self.schoolName + '未加入今日校园,请检查...')
flag = False
params = {
'ids': item['id']
}
data = self.session.get('https://mobile.campushoy.com/v6/config/guest/tenant/info', params=params,
verify=False, hooks=dict(response=[Utils.checkStatus])).json()['data'][0]
joinType = data['joinType']
idsUrl = data['idsUrl']
ampUrl = data['ampUrl']
if 'campusphere' in ampUrl or 'cpdaily' in ampUrl:
self.host = re.findall('\w{4,5}\:\/\/.*?\/', ampUrl)[0]
status_code = 0
while status_code != 200:
newAmpUrl = self.session.get(ampUrl, allow_redirects=False, verify=False)
status_code = newAmpUrl.status_code
if 'Location' in newAmpUrl.headers:
ampUrl = newAmpUrl.headers['Location']
self.login_url = ampUrl
self.login_host = re.findall('\w{4,5}\:\/\/.*?\/', self.login_url)[0]
ampUrl2 = data['ampUrl2']
if 'campusphere' in ampUrl2 or 'cpdaily' in ampUrl2:
self.host = re.findall('\w{4,5}\:\/\/.*?\/', ampUrl2)[0]
ampUrl2 = self.session.get(ampUrl2, verify=False).url
self.login_url = ampUrl2
self.login_host = re.findall(r'\w{4,5}\:\/\/.*?\/', self.login_url)[0]
break
# 通过登陆url判断采用哪种登陆方式
def checkLogin(self):
if self.login_url.find('/iap') != -1:
self.loginEntity = iapLogin(self.username, self.password, self.login_url, self.login_host, self.session)
elif self.login_url.find('kmu.edu.cn') != -1 or self.login_url.find('hytc.edu.cn') != -1:
self.loginEntity = RSALogin(self.username, self.password, self.login_url, self.login_host, self.session)
else:
self.loginEntity = casLogin(self.username, self.password, self.login_url, self.login_host, self.session)
# 统一登录流程
self.session.cookies = self.loginEntity.login()
# 本地化登陆
def login(self):
# 获取学校登陆地址
self.getLoginUrlBySchoolName()
self.checkLogin()
|
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from st2common.exceptions import StackStormPluginException
class PluginLoadError(StackStormPluginException):
pass
class IncompatiblePluginException(StackStormPluginException):
pass
|
__title__ = 'cleanfreak'
__author__ = 'Dan Bradham'
__email__ = 'danielbradham@gmail.com'
__url__ = 'http://github.com/danbradham/cleanfreak'
__version__ = '0.1.8'
__license__ = 'MIT'
__description__ = 'Sanity checks and grades for CG production.'
import os
from functools import partial
# Package relative path joining
package_path = partial(os.path.join, os.path.dirname(__file__))
os.environ.setdefault('CLEANFREAK_CFG', os.path.expanduser('~/cleanfreak'))
from .app import CleanFreak
from .checker import Checker
|
import numpy as np
def skew(x):
x=np.asarray(x).ravel()
""" Returns the skew symmetric matrix M, such that: cross(x,v) = M v """
return np.array([[0, -x[2], x[1]],[x[2],0,-x[0]],[-x[1],x[0],0]])
# !> Computes directional cosine matrix DirCos
# !! Transforms from element to global coordinates: xg = DC.xe, Kg = DC.Ke.DC^t
# !! Assumes that the element main direction is along ze.
# !! NOTE that this is the transpose of what is normally considered the Direction Cosine Matrix
# SUBROUTINE GetDirCos(P1, P2, DirCos, L_out, ErrStat, ErrMsg)
# REAL(ReKi) , INTENT(IN ) :: P1(3), P2(3) ! (x,y,z) global positions of two nodes making up an element
# REAL(FEKi) , INTENT( OUT) :: DirCos(3, 3) ! calculated direction cosine matrix
# REAL(ReKi) , INTENT( OUT) :: L_out ! length of element
# INTEGER(IntKi), INTENT( OUT) :: ErrStat ! Error status of the operation
# CHARACTER(*), INTENT( OUT) :: ErrMsg ! Error message if ErrStat /= ErrID_None
# REAL(FEKi) :: Dx, Dy, Dz, Dxy,L! distances between nodes
# ErrMsg = ""
# ErrStat = ErrID_None
#
# Dx=P2(1)-P1(1)
# Dy=P2(2)-P1(2)
# Dz=P2(3)-P1(3)
# Dxy = sqrt( Dx**2 + Dy**2 )
# L = sqrt( Dx**2 + Dy**2 + Dz**2)
#
# IF ( EqualRealNos(L, 0.0_FEKi) ) THEN
# ErrMsg = ' Same starting and ending location in the element.'
# ErrStat = ErrID_Fatal
# RETURN
# ENDIF
#
# IF ( EqualRealNos(Dxy, 0.0_FEKi) ) THEN
# DirCos=0.0_FEKi ! whole matrix set to 0
# IF ( Dz < 0) THEN !x is kept along global x
# DirCos(1, 1) = 1.0_FEKi
# DirCos(2, 2) = -1.0_FEKi
# DirCos(3, 3) = -1.0_FEKi
# ELSE
# DirCos(1, 1) = 1.0_ReKi
# DirCos(2, 2) = 1.0_ReKi
# DirCos(3, 3) = 1.0_ReKi
# ENDIF
# ELSE
# DirCos(1, 1) = Dy/Dxy
# DirCos(1, 2) = +Dx*Dz/(L*Dxy)
# DirCos(1, 3) = Dx/L
#
# DirCos(2, 1) = -Dx/Dxy
# DirCos(2, 2) = +Dz*Dy/(L*Dxy)
# DirCos(2, 3) = Dy/L
#
# DirCos(3, 1) = 0.0_FEKi
# DirCos(3, 2) = -Dxy/L
# DirCos(3, 3) = +Dz/L
# ENDIF
# L_out= real(L, ReKi)
#
# END SUBROUTINE GetDirCos
# !------------------------------------------------------------------------------------------------------
# !> Rigid transformation matrix between DOFs of node j and k where node j is the leader node.
# SUBROUTINE GetRigidTransformation(Pj, Pk, TRigid, ErrStat, ErrMsg)
# REAL(ReKi), INTENT(IN ) :: Pj(3) ! (x,y,z) positions of leader node
# REAL(ReKi), INTENT(IN ) :: Pk(3) ! (x,y,z) positions of follower node
# REAL(ReKi), INTENT( OUT) :: TRigid(6,6) ! Transformation matrix such that xk = T.xj
# INTEGER(IntKi), INTENT( OUT) :: ErrStat ! Error status of the operation
# CHARACTER(*), INTENT( OUT) :: ErrMsg ! Error message if ErrStat /= ErrID_None
# ! Local
# !REAL(ReKi) :: L ! length of element
# !REAL(ReKi) :: DirCos(3, 3) ! direction cosine matrix
# !REAL(ReKi) :: R0(3,3)
# integer(IntKi) :: I
# ErrStat = ErrID_None
# ErrMsg = ""
#
# ! --- Formulation using Delta of Global coordinates
# Trigid=0; do I = 1,6; Trigid(I,I) = 1; enddo
# Trigid ( 1, 5 ) = (Pk(3) - Pj(3))
# Trigid ( 1, 6 ) = -(Pk(2) - Pj(2))
# Trigid ( 2, 4 ) = -(Pk(3) - Pj(3))
# Trigid ( 2, 6 ) = (Pk(1) - Pj(1))
# Trigid ( 3, 4 ) = (Pk(2) - Pj(2))
# Trigid ( 3, 5 ) = -(Pk(1) - Pj(1))
#
# ! --- Formulation bty transforming the "local" matrix into a global one
# !call GetDirCos(Pj, Pk, R0, L, ErrStat, ErrMsg)
# !TRigid = 0 ; do I = 1,6; TRigid(I,I) = 1; enddo
# !TRigid (1, 5) = L
# !TRigid (2, 4) = -L
# !TRigid(1:3,4:6) = matmul( R0 , matmul(TRigid(1:3,4:6), transpose(R0)) )
#
# ! --- Formulation using L and Rotation matrix
# !TRigid = 0; do I = 1,6; TRigid(I,I) = 1; enddo
# !TRigid ( 1, 5 ) = L*R0(3,3)
# !TRigid ( 1, 6 ) = -L*R0(2,3)
# !TRigid ( 2, 4 ) = -L*R0(3,3)
# !TRigid ( 2, 6 ) = L*R0(1,3)
# !TRigid ( 3, 4 ) = L*R0(2,3)
# !TRigid ( 3, 5 ) = -L*R0(1,3)
# END SUBROUTINE GetRigidTransformation
# SUBROUTINE RigidTransformationLine(dx,dy,dz,iLine,Line)
# real(ReKi), INTENT(IN) :: dx,dy,dz
# integer(IntKi) , INTENT(IN) :: iLine
# Real(ReKi), dimension(6), INTENT(OUT) :: Line
# SELECT CASE (iLine)
# CASE (1); Line = (/1.0_ReKi, 0.0_ReKi, 0.0_ReKi, 0.0_ReKi, dz, -dy/)
# CASE (2); Line = (/0.0_ReKi, 1.0_ReKi, 0.0_ReKi, -dz, 0.0_ReKi, dx/)
# CASE (3); Line = (/0.0_ReKi, 0.0_ReKi, 1.0_ReKi, dy, -dx, 0.0_ReKi/)
# CASE (4); Line = (/0.0_ReKi, 0.0_ReKi, 0.0_ReKi, 1.0_ReKi, 0.0_ReKi, 0.0_ReKi/)
# CASE (5); Line = (/0.0_ReKi, 0.0_ReKi, 0.0_ReKi, 0.0_ReKi, 1.0_ReKi, 0.0_ReKi/)
# CASE (6); Line = (/0.0_ReKi, 0.0_ReKi, 0.0_ReKi, 0.0_ReKi, 0.0_ReKi, 1.0_ReKi/)
# CASE DEFAULT
# Line=-99999999_ReKi
# print*,'Error in RigidTransformationLine'
# STOP
# ! ErrStat = ErrID_Fatal
# ! ErrMsg = 'Error calculating transformation matrix TI '
# ! return
# END SELECT
# END SUBROUTINE
|
import os
import sys
testsPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, testsPath + '/../')
from suds.client import Client
import datetime
import pytest
import pytz
from nrewebservices.ldbws import NextDeparturesBoardWithDetails, Session
from helpers import mock_ldbws_response_from_file, ldbws_client_helper
@pytest.fixture(scope="module")
def board():
ldbws_client = ldbws_client_helper()
response = mock_ldbws_response_from_file(
ldbws_client,
"LDBServiceSoap",
"GetFastestDeparturesWithDetails",
"basic-fastest-departures-board-with-details.xml",
)
return NextDeparturesBoardWithDetails(response)
class TestFastestDeparturesBoardWithDetails(object):
def test_fastest_departures_board_with_details_basics(self, board):
# Basic NextDeparturesBoard properties.
assert board.generated_at.astimezone(pytz.utc) == \
datetime.datetime(2016, 8, 6, 21, 12, 33, 677886, tzinfo=pytz.utc)
assert board.location_name == 'East Croydon'
assert board.crs == 'ECR'
assert board.filter_location_name is None
assert board.filter_crs is None
assert board.filter_type is None
assert board.platforms_available is True
assert board.services_available is True
# NRCC messages list.
assert board.nrcc_messages == ['<P>Amended weekday Southern and Gatwick Express services. More information in <A href="http://nationalrail.co.uk/service_disruptions/143147.aspx">Latest Travel News</A>.</P>']
# Next Departures items.
assert len(board.next_departures) == 3
def test_fastest_departures_board_with_details_next_departures_basics(self, board):
dep = board.next_departures[1]
assert dep.crs == "GTW"
def test_fastest_departures_board_with_details_service_basics(self, board):
service = board.next_departures[1].service
assert len(service.origins) == 1
assert len(service.destinations) == 1
assert len(service.current_origins) == 0
assert len(service.current_destinations) == 0
assert service.sta == "22:05"
assert service.eta == "22:09"
assert service.std == "22:06"
assert service.etd == "22:11"
assert service.platform == "2"
assert service.operator == "Southern"
assert service.operator_code == "SN"
assert service.circular_route is False
assert service.cancelled is False
assert service.filter_location_cancelled is False
assert service.service_type == "train"
assert service.length is None
assert service.detach_front is False
assert service.reverse_formation is False
assert service.cancel_reason is None
assert service.delay_reason is None
assert service.service_id == "1hHszdehfteYvCy6NbiPKw=="
assert service.adhoc_alerts == None
assert service.rsid == "SN078700"
assert service.origin == "London Victoria"
assert service.destination == "Brighton"
def test_fastest_departures_board_with_details_service_calling_points(self, board):
service = board.next_departures[1].service
# Check the quantity of each type of calling point list.
assert len(service.previous_calling_points) == 0
assert len(service.subsequent_calling_points) == 1
# Check the subsequent calling points list properties.
scp = service.subsequent_calling_points[0]
assert len(scp) == 3
assert scp.service_type == None
assert scp.change_required == False
assert scp.association_is_cancelled == False
# Check the actual calling point properties.
cp = scp[0]
cp.location_name
assert cp.location_name == "Gatwick Airport"
assert cp.crs == "GTW"
assert cp.st == "22:21"
assert cp.et == "22:25"
assert cp.at is None
assert cp.cancelled is False
assert cp.length is None
assert cp.detach_front is False
assert cp.adhoc_alerts is None
|
from django.utils import timezone
from calendar import HTMLCalendar
import logging
logger = logging.getLogger(__name__)
class Calendar(HTMLCalendar):
def __init__(self, year=None, month=None, dark=False):
self.year = year
self.month = month
self.events = None
self.dark = dark
super(Calendar, self).__init__()
self.setfirstweekday(6)
# formats a day as a td
# filter events by day
def formatday(self, day):
events_per_day = self.events.filter(class_date__day=day)
data = ''
for event in events_per_day:
logging.debug(event.class_type)
btn_color = 'btn-primary'
if event.class_type == 'combined':
btn_color = 'btn-info'
elif event.class_type == 'returnee':
btn_color = 'btn-secondary'
cd = timezone.localtime(event.class_date)
data += f'<li><button class="btn {btn_color} bc-btn m-1" type="button" bc_id="{event.id}">'
data += f'{event.class_type.capitalize()} {cd.strftime("%I:%M %p")}</button></li>'
if day != 0:
return f"<td><span class='date'>{day}</span><ul> {data} </ul></td>"
return '<td></td>'
# formats a week as a tr
def formatweek(self, theweek):
week = ''
for d, weekday in theweek:
week += self.formatday(d)
return f'<tr> {week} </tr>'
# formats a month as a table
# filter events by year and month
def formatmonth(self, withyear=True):
if self.dark:
bg = 'table-dark'
else:
bg = ''
cal = f'<table border="0" cellpadding="0" cellspacing="0" class="calendar table table-bordered {bg}">\n'
cal += f'{self.formatmonthname(self.year, self.month, withyear=withyear)}\n'
cal += f'{self.formatweekheader()}\n'
for week in self.monthdays2calendar(self.year, self.month):
cal += f'{self.formatweek(week)}\n'
return cal
def set_event(self, queryset):
self.events = queryset
|
from loguru import logger
def info(info_msg):
logger.info(info_msg)
def error(error_msg):
logger.error(error_msg)
def debug(debug_msg):
logger.debug(debug_msg)
def warning(warn_msg):
logger.warning(warn_msg)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to set the version number wherever it's needed before a release."""
from __future__ import unicode_literals, print_function
import io
import os
import re
import sys
import glob
import subprocess
import io
def sed_like_thing(pattern, repl, path):
"""Like re.sub but applies to a file instead of a string."""
with io.open(path, 'r', encoding='utf8') as inf:
data = inf.read()
data = re.sub(pattern, repl, data)
with io.open(path, 'w+', encoding='utf8') as outf:
outf.write(data)
if __name__ == "__main__":
inpf = raw_input if sys.version_info[0] == 2 else input
while True:
version = inpf("New version number (in format X.Y.Z): ").strip()
if version.startswith('v'):
print("ERROR: the version number must not start with v.")
else:
break
for doc in glob.glob(os.path.join("docs/*.txt")):
sed_like_thing(":Version: .*", ":Version: {0}".format(version), doc)
sed_like_thing("version='.+'", "version='{0}'".format(version), 'setup.py')
sed_like_thing("version = .*", "version = '{0}'".format(version), os.path.join('docs', 'sphinx', 'conf.py'))
sed_like_thing("release = .*", "release = '{0}'".format(version), os.path.join('docs', 'sphinx', 'conf.py'))
sed_like_thing('__version__ = ".*"', '__version__ = "{0}"'.format(version), os.path.join('nikola', '__init__.py'))
sed_like_thing('New in master', 'New in v{0}'.format(version), 'CHANGES.txt')
sed_like_thing(':Version: .*', ':Version: Nikola v{0}'.format(version), os.path.join('docs', 'man', 'nikola.rst'))
man = subprocess.check_output(["rst2man.py", os.path.join('docs', 'man', 'nikola.rst')])
with io.open(os.path.join('docs', 'man', 'nikola.1'), 'w', encoding='utf-8') as fh:
try:
man = man.decode('utf-8')
except AttributeError:
pass
fh.write(man)
subprocess.call(["gzip", "-f", os.path.join('docs', 'man', 'nikola.1')])
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Fashion-MNIST dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import numpy as np
from tensorflow.python.keras.utils.data_utils import get_file
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.datasets.fashion_mnist.load_data')
def load_data():
"""Loads the Fashion-MNIST dataset.
This is a dataset of 60,000 28x28 grayscale images of 10 fashion categories,
along with a test set of 10,000 images. This dataset can be used as
a drop-in replacement for MNIST. The class labels are:
| Label | Description |
|:-----:|-------------|
| 0 | T-shirt/top |
| 1 | Trouser |
| 2 | Pullover |
| 3 | Dress |
| 4 | Coat |
| 5 | Sandal |
| 6 | Shirt |
| 7 | Sneaker |
| 8 | Bag |
| 9 | Ankle boot |
Returns:
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
x_train, x_test: uint8 arrays of grayscale image data with shape
(num_samples, 28, 28).
y_train, y_test: uint8 arrays of labels (integers in range 0-9)
with shape (num_samples,).
License:
The copyright for Fashion-MNIST is held by Zalando SE.
Fashion-MNIST is licensed under the [MIT license](
https://github.com/zalandoresearch/fashion-mnist/blob/master/LICENSE).
"""
dirname = os.path.join('datasets', 'fashion-mnist')
base = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/'
files = [
'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz',
't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz'
]
paths = []
for fname in files:
paths.append(get_file(fname, origin=base + fname, cache_subdir=dirname))
with gzip.open(paths[0], 'rb') as lbpath:
y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)
with gzip.open(paths[1], 'rb') as imgpath:
x_train = np.frombuffer(
imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28)
with gzip.open(paths[2], 'rb') as lbpath:
y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8)
with gzip.open(paths[3], 'rb') as imgpath:
x_test = np.frombuffer(
imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28, 28)
return (x_train, y_train), (x_test, y_test)
|
from .graph_node import Node
from .graph import Graph
from .exceptions import GraphError
|
# Generated by Django 3.0.2 on 2020-01-20 12:15
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('api', '0012_auto_20200120_0659'),
]
operations = [
migrations.AddField(
model_name='vehiclechangehistory',
name='at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
|
# coding: utf-8
"""
MIT License
Copyright (c) 2019 Claude SIMON (https://q37.info/s/rmnmqd49)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from workshop.fr.ab import *
def go():
main()
|
#!/usr/bin/env python
"""Tests for grr.lib.flows.general.filetypes."""
import os
from grr.lib import action_mocks
from grr.lib import aff4
from grr.lib import flags
from grr.lib import test_lib
from grr.lib.aff4_objects import filetypes as aff4_filetypes
from grr.lib.flows.general import filetypes
from grr.lib.rdfvalues import paths as rdf_paths
from grr.lib.rdfvalues import plist as rdf_plist
class TestPlistFlows(test_lib.FlowTestsBaseclass):
"""Tests the PlistValueFilter flow."""
def _RunFlow(self, flow, context=None, query=None, output=None):
client_mock = action_mocks.ActionMock("PlistQuery")
request = rdf_plist.PlistRequest(context=context, query=query)
request.pathspec.path = os.path.join(self.base_path, "test.plist")
request.pathspec.pathtype = rdf_paths.PathSpec.PathType.OS
for _ in test_lib.TestFlowHelper(flow,
client_mock,
client_id=self.client_id,
token=self.token,
request=request,
output=output):
pass
def _CheckOutputAFF4Type(self, output):
# Check the output file is created
output_path = self.client_id.Add(output)
aff4.FACTORY.Open(output_path,
aff4_type=aff4_filetypes.AFF4PlistQuery,
token=self.token)
def testPlistValueFilter(self):
output = "analysis/plistvaluefilter_test"
self._RunFlow(filetypes.PlistValueFilter.__name__,
context="",
query="",
output=output)
self._CheckOutputAFF4Type(output)
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
|
"""
Functions to help with download and basic processing of GPS data
"""
from datetime import datetime, timedelta
import io
import json
import logging
import multiprocessing
import os
import re
from typing import cast, Dict, Iterable, Optional, Sequence, Tuple
import zipfile
import numpy
import requests
import georinex
import xarray
from laika import AstroDog
from laika.dgps import get_station_position
from laika.downloader import download_cors_station, download_and_cache_file
from laika.gps_time import GPSTime
from laika.rinex_file import DownloadError
from tid import config, tec, types, util
LOG = logging.getLogger(__name__)
DENSE_TYPE = [
("tick", "i4"), # tick number the observation was made
("C1C", "f8"), # GNSS measurements, if available
("C2C", "f8"),
("L1C", "f8"),
("L2C", "f8"),
("sat_pos", "3f8"), # satellite position XYZ ECEF in meters
]
DOWNLOAD_WORKERS = 20 # how many processes to spawn for downloading files
# ecef locations for stations, so we can know what is nearby
with open(
os.path.dirname(__file__) + "/lookup_tables/station_locations.json", "rb"
) as f:
STATION_LOCATIONS = json.load(f)
# which network stations belong to, if we know, to speed up downloading
with open(
os.path.dirname(__file__) + "/lookup_tables/station_networks.json", "rb"
) as f:
STATION_NETWORKS = json.load(f)
conf = config.Configuration()
def get_nearby_stations(
dog: AstroDog, point: Sequence, dist: int = 400000
) -> Sequence[str]:
"""
Find all known/downloadable station names within a given distance from
the target point.
Args:
dog: laika AstroDog object
point: tuple of ECEF xyz location, in meters
dist: allowable distance from the target point, in meters
Returns:
a list of strings representing station names close to the target point
"""
cache_dir = dog.cache_dir
cors_pos_path = cache_dir + "cors_coord/cors_station_positions"
with open(cors_pos_path, "rb") as cors_pos:
# pylint:disable=unexpected-keyword-arg
# (confused about numpy, I guess)
cors_pos_dict = numpy.load(cors_pos, allow_pickle=True).item()
station_names = []
station_pos = []
for name, (_, pos, _) in cors_pos_dict.items():
station_names.append(name)
station_pos.append(pos)
for name, pos in STATION_LOCATIONS.items():
station_names.append(name)
station_pos.append(pos)
np_station_names = numpy.array(station_names)
np_station_pos = numpy.array(station_pos)
dists = numpy.sqrt(((np_station_pos - numpy.array(point)) ** 2).sum(1))
return list(np_station_names[numpy.where(dists < dist)[0]])
def _download_misc_igs_station(
dog: AstroDog, time: GPSTime, station_name: str
) -> Optional[str]:
"""
Downloader for non-CORS stations. Attempts to download rinex observables
for the given station and time
Should only be used internally by data_for_station
Args:
dog: laika AstroDog object
time: laika GPSTime object
station_name: string representation a station name
Returns:
string representing a path to the downloaded file
or None, if the file was not able to be downloaded
"""
cache_subdir = dog.cache_dir + "misc_igs_obs/"
t = time.as_datetime()
# different path formats...
folder_path = t.strftime("%Y/%j/")
filename = station_name + t.strftime("%j0.%yo")
url_bases = (
"ftp://garner.ucsd.edu/archive/garner/rinex/",
"ftp://data-out.unavco.org/pub/rinex/obs/",
)
try:
filepath = download_and_cache_file(
url_bases, folder_path, cache_subdir, filename, compression=".Z"
)
return filepath
except IOError:
url_bases = (
"ftp://igs.gnsswhu.cn/pub/gps/data/daily/",
"ftp://cddis.nasa.gov/gnss/data/daily/",
)
folder_path += t.strftime("%yo/")
try:
filepath = download_and_cache_file(
url_bases, folder_path, cache_subdir, filename, compression=".Z"
)
return filepath
except IOError:
return None
def _download_korean_station(
dog: AstroDog, time: GPSTime, station_name: str
) -> Optional[str]:
"""
Downloader for Korean stations. Attempts to download rinex observables
for the given station and time.
Should only be used internally by data_for_station
TODO: we can download from multiple stations at once and save some time here....
Args:
dog: laika AstroDog object
time: laika GPSTime object
station_name: string representation a station name
Returns:
string representing a path to the downloaded file
or None, if the file was not able to be downloaded
"""
json_url = "http://gnssdata.or.kr/download/createToZip.json"
zip_url = "http://gnssdata.or.kr/download/getZip.do?key=%d"
cache_subdir = dog.cache_dir + "korean_obs/"
t = time.as_datetime()
# different path formats...
folder_path = cache_subdir + t.strftime("%Y/%j/")
filename = folder_path + station_name + t.strftime("%j0.%yo")
if os.path.isfile(filename):
return filename
if not os.path.exists(folder_path):
os.makedirs(folder_path, exist_ok=True)
start_day = t.strftime("%Y%m%d")
postdata = {
"corsId": station_name.upper(),
"obsStDay": start_day,
"obsEdDay": start_day,
"dataTyp": util.DATA_RATE,
}
res = requests.post(json_url, data=postdata).text
if not res:
raise DownloadError
res_dat = json.loads(res)
if not res_dat.get("result", None):
raise DownloadError
key = res_dat["key"]
zipstream = requests.get(zip_url % key, stream=True)
with zipfile.ZipFile(io.BytesIO(zipstream.content)) as zipdat:
for zipf in zipdat.filelist:
with zipfile.ZipFile(io.BytesIO(zipdat.read(zipf))) as station:
for rinex in station.filelist:
if rinex.filename.endswith("o"):
with open(filename, "wb") as rinex_out:
rinex_out.write(station.read(rinex))
return filename
def _download_japanese_station(
dog: AstroDog, time: GPSTime, station_name: str
) -> Optional[str]:
"""
Downloader for Japanese stations. Attempts to download rinex observables
for the given station and time.
Should only be used internally by data_for_station
Args:
dog: laika AstroDog object
time: laika GPSTime object
station_name: string representation a station name
Returns:
string representing a path to the downloaded file
or None, if the file was not able to be downloaded
"""
cache_subdir = dog.cache_dir + "japanese_obs/"
t = time.as_datetime()
# different path formats...
folder_path = t.strftime("%Y/%j/")
filename = station_name + t.strftime("%j0.%yo")
url_bases = ("http://copyfighter.org:6670/japan/data/GR_2.11/",)
try:
filepath = download_and_cache_file(
url_bases, folder_path, cache_subdir, filename, compression=".gz"
)
return filepath
except IOError:
return None
def cors_get_station_lists_for_day(date: datetime) -> Iterable[str]:
"""
Given a date, returns the stations that the US CORS network
reports as available
"""
url = "https://geodesy.noaa.gov/corsdata/rinex/"
resp = requests.get(url + date.strftime("%Y/%j/"))
pat = '<a href="..../">([a-z0-9]{4})/</a>'
return re.findall(pat, resp.text)
def fetch_rinex_for_station(
dog: Optional[AstroDog], time: GPSTime, station_name: str
) -> Optional[str]:
"""
Given a particular time and station, get the rinex obs file that
corresponds to it
Args:
dog: laika AstroDog object or None
time: laika GPSTime object for the time in question
station_name: string of the station in question
station names are CORS names or similar (eg: 'slac')
Returns:
the string containing the file path, or None
"""
if dog is None:
dog = AstroDog(cache_dir=conf.cache_dir)
# handlers for specific networks
handlers = {
"Korea": _download_korean_station,
"Japan": _download_japanese_station,
}
network = STATION_NETWORKS.get(station_name, None)
# no special network, so try using whatever
if network is None:
# step 1: get the station rinex data
try:
rinex_obs_file = download_cors_station(
time, station_name, cache_dir=dog.cache_dir
)
except (KeyError, DownloadError):
# station position not in CORS map, try another thing
if station_name in STATION_LOCATIONS:
rinex_obs_file = _download_misc_igs_station(dog, time, station_name)
else:
return None
else:
rinex_obs_file = handlers[network](dog, time, station_name)
return rinex_obs_file
def location_for_station(
dog: AstroDog, time: GPSTime, station_name: str
) -> types.ECEF_XYZ:
"""
Get location for a particular station at a particular time.
Time is needed so we can look at RINEX files and sanity check
the location data.
Args:
dog: laika AstroDog object
time: laika GPSTime object for the time in question
station_name: string of the station in question
station names are CORS names or similar (eg: 'slac')
Returns:
aproximate x,y,z location in ECEF meters
Raises:
DownloadError if the RINEX could not be fetched
"""
rinex_obs_file = fetch_rinex_for_station(dog, time, station_name)
if rinex_obs_file is None:
raise DownloadError
# start with most accurate positions (from known databases)
approx_position = util.station_location_from_rinex(rinex_obs_file)
try:
station_pos = get_station_position(station_name, cache_dir=dog.cache_dir)
except KeyError:
station_pos = numpy.array(
STATION_LOCATIONS.get(station_name) or approx_position
)
# while databases are more accurate, there are some cases of name collsions
# (eg Korea and US CORS may pick same 4 letter name). To resolve this, favor
# positions reported from RINEX files if there is a big (>100m) divergence
if station_pos is not None and approx_position is not None:
if numpy.linalg.norm(station_pos - approx_position) > 100:
LOG.warning(
"for station %s, we have large differences in position reports",
station_name,
)
station_pos = approx_position
return station_pos
def from_xarray_sat(rinex: xarray.Dataset, start_date: GPSTime) -> types.Observations:
"""
Convert the georinex xarray for a satellite to Observations
Args:
xarray: the georinex xarray thing
start_date: time at which tick 0 occurred
Returns:
Observations for the satellite
"""
# truncate to observations with data
rinex = rinex.dropna("time", how="all", subset=["C1"])
outp = numpy.zeros(rinex.dims["time"], dtype=DENSE_TYPE)
obs_map = {"C1C": "C1", "C2C": "C2", "C2P": "P2", "L1C": "L1", "L2C": "L2"}
for obs in ["C1C", "C2C", "L1C", "L2C"]:
# if the channel doesn't exist, set to NaN
if obs_map[obs] not in rinex:
outp[obs][:] = numpy.nan
else:
outp[obs][:] = rinex[obs_map[obs]]
# if the C2C channel is empty/crap, replace it with C2P
if numpy.all(numpy.isnan(outp["C2C"])):
outp["C2C"][:] = rinex[obs_map["C2P"]]
timedeltas = rinex["time"].astype(numpy.datetime64).to_numpy() - numpy.datetime64(
start_date.as_datetime()
)
outp["tick"] = (timedeltas / numpy.timedelta64(util.DATA_RATE, "s")).astype(int)
return cast(types.Observations, outp)
def from_xarray(rinex: xarray.Dataset, start_date: GPSTime) -> types.DenseMeasurements:
"""
Convert georinex's xarray format into our sparser format
Args:
rinex: the georinex xarray file
start_date: when tick 0 occurred
Returns:
dense raw gps data
"""
sv_dict_out = cast(types.DenseMeasurements, {})
for svid in rinex.sv.to_numpy():
sv_dict_out[svid] = from_xarray_sat(rinex.sel(sv=svid), start_date)
return sv_dict_out
def data_for_station(
dog: AstroDog,
time: GPSTime,
station_name: str,
start_date: GPSTime,
) -> types.DenseMeasurements:
"""
Get data from a particular station and time. Wrapper for data_for_station
inside of get_data
Args:
dog: laika AstroDog object
time: laika GPSTime object for the time in question
station_name: the station for which we want data
start_date: when index 0 occurred
Returns:
dense raw gps data
Raises:
DownloadError if the data could not be fetched
TODO: caching of the results on disk? or should that happen later?
"""
rinex_obs_file = fetch_rinex_for_station(dog, time, station_name)
if rinex_obs_file is None:
raise DownloadError
rinex = georinex.load(rinex_obs_file, interval=30)
return from_xarray(rinex, start_date)
def populate_sat_info(
dog: AstroDog,
start_time: GPSTime,
duration: timedelta,
station_dict: types.StationPrnMap[types.Observations],
) -> None:
"""
Populate the satellite locations for our measurements
Args:
dog: laika AstroDog to use
start_time: when the 0th tick occurs
duration: how long until the last tick
station_dict: mapping to the Observations that need correcting
TODO: can numba (or something) help us parallelize the lower loops?
"""
satellites = {sat: idx for idx, sat in enumerate(dog.get_all_sat_info(start_time))}
tick_count = int(duration.total_seconds() / util.DATA_RATE)
# get an accurate view of the satellites at 30 second intervals
sat_info = numpy.zeros(
(len(satellites), tick_count + 1), dtype=[("pos", "3f8"), ("vel", "3f8")]
)
for tick in range(tick_count + 1):
tick_info = dog.get_all_sat_info(start_time + util.DATA_RATE * tick)
for svid, info in tick_info.items():
sat_info[satellites[svid]][tick] = (info[0], info[1])
bad_datas = set()
for station in station_dict:
for sat in station_dict[station]:
if sat not in satellites:
# no info for this satellite, probably not orbiting, remove it
bad_datas.add((station, sat))
continue
ticks = station_dict[station][sat]["tick"]
time_delays = station_dict[station][sat]["C1C"] / tec.C
delta_pos = (
sat_info[satellites[sat]]["vel"][ticks] * time_delays[:, numpy.newaxis]
)
corrected_pos = sat_info[satellites[sat]]["pos"][ticks] - delta_pos
station_dict[station][sat]["sat_pos"][:] = corrected_pos
for station, sat in bad_datas:
del station_dict[station][sat]
def merge_data(
data1: types.DenseMeasurements, data2: types.DenseMeasurements
) -> types.DenseMeasurements:
"""
Merges two sets of dense measurements together
Args:
data1: the first (chronologically) set of data
data2: the second (chronologically) set of data
Returns:
the combined data
"""
combined = data1.copy()
for prn in data2:
# prn only has data in the second dataset
if prn not in data1:
combined[prn] = data2[prn]
# otherwise we need an actual merge
else:
combined[prn] = numpy.append(data1[prn], data2[prn])
return cast(types.DenseMeasurements, combined)
def populate_data(
stations: Iterable[str],
start_date: GPSTime,
duration: timedelta,
dog: AstroDog,
) -> Tuple[Dict[str, types.ECEF_XYZ], types.StationPrnMap[types.Observations]]:
"""
Download/populate the station data and station location info
Args:
stations: list of station names
date_list: ordered list of the dates for which to fetch data
dog: astro dog to use
Returns:
dictionary of station names to their locations,
dictionary of station names to sat names to their dense data
TODO: is this a good place to be caching results?
"""
# dict of station names -> XYZ ECEF locations in meters
station_locs: Dict[str, types.ECEF_XYZ] = {}
# dict of station names -> dict of prn -> numpy observation data
station_data = cast(types.StationPrnMap[types.Observations], {})
for station in stations:
gps_date = start_date
while (gps_date) < start_date + duration.total_seconds():
try:
latest_data = data_for_station(
dog, gps_date, station, start_date=start_date
)
if station not in station_locs:
station_locs[station] = location_for_station(dog, gps_date, station)
except DownloadError:
continue
except IndexError:
print("index error: ", station)
continue
finally:
gps_date += (1 * util.DAYS).total_seconds()
if station not in station_data:
station_data[station] = latest_data
else:
# we've already got some data, so merge it together
# give mypy a hint here about our type aliases
station_data[station] = merge_data(
cast(types.DenseMeasurements, station_data[station]),
latest_data,
)
# didn't download data, ignore it
if station not in station_data:
continue
populate_sat_info(dog, start_date, duration, station_data)
return station_locs, station_data
def download_and_process(
argtuple: Tuple[GPSTime, str]
) -> Tuple[GPSTime, str, Optional[str]]:
"""
Fetch the data for a station at a date, return a path to the NetCDF4 version of it
Args:
argtuple: the date and station for which we want the data
Returns:
date requested, station requested, and the path to the nc file, or
None if it can't be retrieved
"""
date, station = argtuple
# first search for already processed NetCDF4 files
path_name = date.as_datetime().strftime(f"%Y/%j/{station}%j0.%yo.nc")
for cache_folder in ["misc_igs_obs", "japanese_obs", "korean_obs", "cors_obs"]:
fname = f"{conf.cache_dir}/{cache_folder}/{path_name}"
if os.path.exists(fname):
return date, station, fname
rinex_obs_file = fetch_rinex_for_station(None, date, station)
if rinex_obs_file is not None:
if os.path.exists(rinex_obs_file + ".nc"):
return date, station, rinex_obs_file + ".nc"
rinex = georinex.load(rinex_obs_file, interval=30)
rinex["time"] = rinex.time.astype(numpy.datetime64)
rinex.to_netcdf(rinex_obs_file + ".nc")
return date, station, rinex_obs_file + ".nc"
return date, station, None
def parallel_populate_data(
stations: Iterable[str],
start_date: GPSTime,
duration: timedelta,
dog: AstroDog,
) -> Tuple[Dict[str, types.ECEF_XYZ], types.StationPrnMap[types.Observations]]:
"""
Download/populate the station data and station location info
Args:
stations: list of station names
date_list: ordered list of the dates for which to fetch data
dog: astro dog to use
Returns:
dictionary of station names to their locations,
dictionary of station names to sat names to their dense data
TODO: is this a good place to be caching results?
"""
# dict of station names -> XYZ ECEF locations in meters
station_locs: Dict[str, types.ECEF_XYZ] = {}
# dict of station names -> dict of prn -> numpy observation data
station_data = cast(types.StationPrnMap[types.Observations], {})
to_download = []
for station in stations:
gps_date = start_date
while gps_date < start_date + duration.total_seconds():
to_download.append((gps_date, station))
gps_date += (1 * util.DAYS).total_seconds()
with multiprocessing.Pool(DOWNLOAD_WORKERS) as pool:
download_res = pool.map(download_and_process, to_download)
downloaded_map = {
# break it up like this to deal with GPSTime not being hashable
(start_date.week, start_date.tow, station): result
for start_date, station, result in download_res
}
for station in stations:
gps_date = start_date
while gps_date < start_date + duration.total_seconds():
result = downloaded_map.get((gps_date.week, gps_date.tow, station))
gps_date += (1 * util.DAYS).total_seconds()
if result is None:
continue
latest_data = xarray.load_dataset(result)
if station not in station_locs:
station_locs[station] = latest_data.position
dense_data = from_xarray(latest_data, start_date)
if station not in station_data:
station_data[station] = dense_data
else:
# we've already got some data, so merge it together
# give mypy a hint here about our type aliases
station_data[station] = merge_data(
cast(types.DenseMeasurements, station_data[station]),
dense_data,
)
# didn't download data, ignore it
if station not in station_data:
continue
populate_sat_info(dog, start_date, duration, station_data)
return station_locs, station_data
|
# SPDX-License-Identifier: BSD-3-Clause
"""
Utility to create a SoftFab results file from PyLint's JSON output.
For SoftFab, 'error' means the test results are incomplete, while 'warning'
means the results are complete but the content has problems. So if PyLint
ran successfully but finds errors in the code it examined, that means it did
its job correctly and the SoftFab result will be 'warning'.
"""
from collections import Counter
import json
def results_from_exit_code(exit_code):
"""
Return a results dictionary based on PyLint's exit code.
https://pylint.readthedocs.io/en/latest/user_guide/run.html#exit-codes
"""
# Incomplete results.
if exit_code & 32:
return dict(result="error", summary="PyLint did not complete run")
if exit_code & 1:
return dict(result="error", summary="PyLint encountered fatal error")
if exit_code & ~63:
return dict(result="error", summary=f"Unknown PyLint exit code: {exit_code:d}")
# Content problems, from more to less urgent.
# I'm putting convention messages before refactor messages because
# the former can typically be fixed quicker.
if exit_code & 2:
return dict(result="warning", summary="PyLint found errors")
if exit_code & 4:
return dict(result="warning", summary="PyLint found warnings")
if exit_code & 16:
return dict(result="warning", summary="PyLint found broken conventions")
if exit_code & 8:
return dict(result="warning", summary="PyLint found refactor candidates")
return dict(result="ok", summary="PyLint found no issues")
def results_from_json(json_path):
"""Return a results dictionary based on a PyLint JSON output file."""
# Read and parse JSON file.
try:
with open(str(json_path), encoding="utf-8") as inp:
data = json.load(inp)
except OSError as ex:
return dict(result="error", summary=f"Error reading JSON: {ex}")
except ValueError as ex:
return dict(result="error", summary=f"Error parsing JSON: {ex}")
# Count number of issues of each type.
counts = Counter()
try:
if isinstance(data, list):
# PyLint's native JSON format.
messages = data
elif isinstance(data, dict):
# Extended JSON format from pylint_json2html.
messages = data["messages"]
else:
raise TypeError(f"Bad top-level type: {type(data).__name__}")
for message in messages:
counts[message["type"]] += 1
except Exception as ex: # pylint: disable=broad-except
return dict(result="error", summary=f"Error processing JSON: {ex}")
# In case of a fatal problem, the results may be incomplete, so stop
# here to avoid reporting incorrect information.
if counts["fatal"]:
return dict(result="error", summary="PyLint encountered fatal error")
# Prepare summary and gather mid-level data.
results = {}
issues = []
for msg_type in ("error", "warning", "convention", "refactor"):
count = counts[msg_type]
results[f"data.{msg_type}"] = str(count)
if count:
issues.append(f"{count} {msg_type}{'' if count == 1 else 's'}")
# Gather more mid-level data when using extended JSON format.
if isinstance(data, dict):
try:
stats = data["stats"]
for key in (
"module",
"class",
"method",
"function",
"statement",
"undocumented_module",
"undocumented_class",
"undocumented_method",
"undocumented_function",
):
results[f"data.{key}"] = str(stats[key])
except Exception as ex: # pylint: disable=broad-except
return dict(result="error", summary=f"Error processing extended JSON: {ex}")
# Summarize the findings.
if issues:
results["result"] = "warning"
results["summary"] = f"PyLint found {', '.join(issues)}"
else:
results["result"] = "ok"
results["summary"] = "PyLint found no issues"
return results
def gather_results(json_path, exit_code=0):
"""
Return a results dictionary based on PyLint's exit code and
a PyLint JSON output file.
"""
results = results_from_exit_code(exit_code)
if results["result"] != "error":
results = results_from_json(json_path)
return results
|
# -*- coding:utf-8 -*-
import pytest
cands = [
['>=1.2.3', '2.0.0-pre', False, False],
]
@pytest.mark.parametrize("range_, version, loose, expected", cands)
def test_it(range_, version, loose, expected):
from semver import make_semver, satisfies
# assert expected == make_semver(range_, loose=loose).test(version)
assert expected == satisfies(version, range_, loose=loose)
|
"""
Core functions used by unumpy and some of its submodules.
(c) 2010-2013 by Eric O. LEBIGOT (EOL).
"""
# The functions found in this module cannot be defined in unumpy or
# its submodule: this creates import loops, when unumpy explicitly
# imports one of the submodules in order to make it available to the
# user.
from __future__ import division
# Standard modules:
import sys
# 3rd-party modules:
import numpy
from numpy.core import numeric
# Local modules:
import uncertainties
from uncertainties import umath
from uncertainties import __author__
__all__ = [
# Factory functions:
'uarray', 'umatrix',
# Utilities:
'nominal_values', 'std_devs',
# Classes:
'matrix'
]
###############################################################################
# Utilities:
# nominal_values() and std_devs() are defined as functions (instead of
# as additional methods of the unumpy.matrix class) because the user
# might well directly build arrays of numbers with uncertainties
# without going through the factory functions found in this module
# (uarray() and umatrix()). Thus,
# numpy.array([uncertainties.ufloat((1, 0.1))]) would not
# have a nominal_values() method. Adding such a method to, say,
# unumpy.matrix, would break the symmetry between NumPy arrays and
# matrices (no nominal_values() method), and objects defined in this
# module.
# ! Warning: the __doc__ is set, but help(nominal_values) does not
# display it, but instead displays the documentation for the type of
# nominal_values (i.e. the documentation of its class):
to_nominal_values = numpy.vectorize(
uncertainties.nominal_value,
otypes=[float], # Because vectorize() has side effects (dtype setting)
doc=("Applies uncertainties.nominal_value to the elements of"
" a NumPy (or unumpy) array (this includes matrices)."))
to_std_devs = numpy.vectorize(
uncertainties.std_dev,
otypes=[float], # Because vectorize() has side effects (dtype setting)
doc=("Returns the standard deviation of the numbers with uncertainties"
" contained in a NumPy array, or zero for other objects."))
def unumpy_to_numpy_matrix(arr):
"""
If arr in a unumpy.matrix, it is converted to a numpy.matrix.
Otherwise, it is returned unchanged.
"""
return arr.view(numpy.matrix) if isinstance(arr, matrix) else arr
def nominal_values(arr):
"""
Returns the nominal values of the numbers in NumPy array arr.
Elements that are not uncertainties.AffineScalarFunc are passed
through untouched (because a numpy.array can contain numbers with
uncertainties and pure floats simultaneously).
If arr is of type unumpy.matrix, the returned array is a
numpy.matrix, because the resulting matrix does not contain
numbers with uncertainties.
"""
return unumpy_to_numpy_matrix(to_nominal_values(arr))
def std_devs(arr):
"""
Returns the standard deviations of the numbers in NumPy array arr.
Elements that are not uncertainties.AffineScalarFunc are given a
zero uncertainty ((because a numpy.array can contain numbers with
uncertainties and pure floats simultaneously)..
If arr is of type unumpy.matrix, the returned array is a
numpy.matrix, because the resulting matrix does not contain
numbers with uncertainties.
"""
return unumpy_to_numpy_matrix(to_std_devs(arr))
###############################################################################
def derivative(u, var):
"""
Returns the derivative of u along var, if u is an
uncertainties.AffineScalarFunc instance, and if var is one of the
variables on which it depends. Otherwise, return 0.
"""
if isinstance(u, uncertainties.AffineScalarFunc):
try:
return u.derivatives[var]
except KeyError:
return 0.
else:
return 0.
def wrap_array_func(func):
"""
Returns a version of the function func() that works even when
func() is given a NumPy array that contains numbers with
uncertainties.
func() is supposed to return a NumPy array.
This wrapper is similar to uncertainties.wrap(), except that it
handles an array argument instead of float arguments.
func -- version that takes and returns a single NumPy array.
"""
@uncertainties.set_doc("""\
Version of %s(...) that works even when its first argument is a NumPy
array that contains numbers with uncertainties.
Warning: elements of the first argument array that are not
AffineScalarFunc objects must not depend on uncertainties.Variable
objects in any way. Otherwise, the dependence of the result in
uncertainties.Variable objects will be incorrect.
Original documentation:
%s""" % (func.__name__, func.__doc__))
def wrapped_func(arr, *args):
# Nominal value:
arr_nominal_value = nominal_values(arr)
func_nominal_value = func(arr_nominal_value, *args)
# The algorithm consists in numerically calculating the derivatives
# of func:
# Variables on which the array depends are collected:
variables = set()
for element in arr.flat:
# floats, etc. might be present
if isinstance(element, uncertainties.AffineScalarFunc):
variables |= set(element.derivatives.iterkeys())
# If the matrix has no variables, then the function value can be
# directly returned:
if not variables:
return func_nominal_value
# Calculation of the derivatives of each element with respect
# to the variables. Each element must be independent of the
# others. The derivatives have the same shape as the output
# array (which might differ from the shape of the input array,
# in the case of the pseudo-inverse).
derivatives = numpy.vectorize(lambda _: {})(func_nominal_value)
for var in variables:
# A basic assumption of this package is that the user
# guarantees that uncertainties cover a zone where
# evaluated functions are linear enough. Thus, numerical
# estimates of the derivative should be good over the
# standard deviation interval. This is true for the
# common case of a non-zero standard deviation of var. If
# the standard deviation of var is zero, then var has no
# impact on the uncertainty of the function func being
# calculated: an incorrect derivative has no impact. One
# scenario can give incorrect results, however, but it
# should be extremely uncommon: the user defines a
# variable x with 0 standard deviation, sets y = func(x)
# through this routine, changes the standard deviation of
# x, and prints y; in this case, the uncertainty on y
# might be incorrect, because this program had no idea of
# the scale on which func() is linear, when it calculated
# the numerical derivative.
# The standard deviation might be numerically too small
# for the evaluation of the derivative, though: we set the
# minimum variable shift.
shift_var = max(var._std_dev/1e5, 1e-8*abs(var._nominal_value))
# An exceptional case is that of var being exactly zero.
# In this case, an arbitrary shift is used for the
# numerical calculation of the derivative. The resulting
# derivative value might be quite incorrect, but this does
# not matter as long as the uncertainty of var remains 0,
# since it is, in this case, a constant.
if not shift_var:
shift_var = 1e-8
# Shift of all the elements of arr when var changes by shift_var:
shift_arr = array_derivative(arr, var)*shift_var
# Origin value of array arr when var is shifted by shift_var:
shifted_arr_values = arr_nominal_value + shift_arr
func_shifted = func(shifted_arr_values, *args)
numerical_deriv = (func_shifted-func_nominal_value)/shift_var
# Update of the list of variables and associated
# derivatives, for each element:
for (derivative_dict, derivative_value) in (
zip(derivatives.flat, numerical_deriv.flat)):
if derivative_value:
derivative_dict[var] = derivative_value
# numbers with uncertainties are build from the result:
return numpy.vectorize(uncertainties.AffineScalarFunc)(
func_nominal_value, derivatives)
# It is easier to work with wrapped_func, which represents a
# wrapped version of 'func', when it bears the same name as
# 'func' (the name is used by repr(wrapped_func)).
wrapped_func.__name__ = func.__name__
return wrapped_func
###############################################################################
# Arrays
# Vectorized creation of an array of variables:
# ! Looking up uncertainties.Variable beforehand through '_Variable =
# uncertainties.Variable' does not result in a significant speed up:
_uarray = numpy.vectorize(lambda v, s: uncertainties.Variable(v, s),
otypes=[object])
def uarray((values, std_devs)):
"""
Returns a NumPy array of numbers with uncertainties
initialized with the given nominal values and standard
deviations.
values, std_devs -- valid arguments for numpy.array, with
identical shapes (list of numbers, list of lists, numpy.ndarray,
etc.).
"""
return _uarray(values, std_devs)
###############################################################################
def array_derivative(array_like, var):
"""
Returns the derivative of the given array with respect to the
given variable.
The returned derivative is a Numpy ndarray of the same shape as
array_like, that contains floats.
array_like -- array-like object (list, etc.) that contains
scalars or numbers with uncertainties.
var -- Variable object.
"""
return numpy.vectorize(lambda u: derivative(u, var),
# The type is set because an
# integer derivative should not
# set the output type of the
# array:
otypes=[float])(array_like)
def func_with_deriv_to_uncert_func(func_with_derivatives):
"""
Returns a function that can be applied to array-like objects that
contain numbers with uncertainties (lists, lists of lists, Numpy
arrays, etc.).
func_with_derivatives -- defines a function that takes array-like
objects containing scalars and returns an array. Both the value
and the derivatives of this function with respect to multiple
scalar parameters are calculated by func_with_derivatives().
func_with_derivatives(arr, input_type, derivatives, *args) returns
an iterator. The first element is the value of the function at
point 'arr' (with the correct type). The following elements are
arrays that represent the derivative of the function for each
derivative array from the iterator 'derivatives'.
func_with_derivatives takes the following arguments:
arr -- Numpy ndarray of scalars where the function must be
evaluated.
input_type -- type of the input array-like object. This type is
used for determining the type that the function should return.
derivatives -- iterator that returns the derivatives of the
argument of the function with respect to multiple scalar
variables. func_with_derivatives() returns the derivatives of
the defined function with respect to these variables.
args -- additional arguments that define the result (example:
for the pseudo-inverse numpy.linalg.pinv: numerical cutoff).
Examples of func_with_derivatives: inv_with_derivatives().
"""
def wrapped_func(array_like, *args):
"""
array_like -- array-like object that contains numbers with
uncertainties (list, Numpy ndarray or matrix, etc.).
args -- additional arguments that are passed directly to
func_with_derivatives.
"""
# So that .flat works even if array_like is a list. Later
# useful for faster code:
array_version = numpy.asarray(array_like)
# Variables on which the array depends are collected:
variables = set()
for element in array_version.flat:
# floats, etc. might be present
if isinstance(element, uncertainties.AffineScalarFunc):
variables |= set(element.derivatives.iterkeys())
array_nominal = nominal_values(array_version)
# Function value, and derivatives at array_nominal (the
# derivatives are with respect to the variables contained in
# array_like):
func_and_derivs = func_with_derivatives(
array_nominal,
type(array_like),
(array_derivative(array_version, var) for var in variables),
*args)
func_nominal_value = func_and_derivs.next()
if not variables:
return func_nominal_value
# The result is built progressively, with the contribution of
# each variable added in turn:
# Calculation of the derivatives of the result with respect to
# the variables.
derivatives = numpy.array(
[{} for _ in xrange(func_nominal_value.size)], dtype=object)
derivatives.resize(func_nominal_value.shape)
# Memory-efficient approach. A memory-hungry approach would
# be to calculate the matrix derivatives will respect to all
# variables and then combine them into a matrix of
# AffineScalarFunc objects. The approach followed here is to
# progressively build the matrix of derivatives, by
# progressively adding the derivatives with respect to
# successive variables.
for (var, deriv_wrt_var) in zip(variables, func_and_derivs):
# Update of the list of variables and associated
# derivatives, for each element:
for (derivative_dict, derivative_value) in zip(
derivatives.flat, deriv_wrt_var.flat):
if derivative_value:
derivative_dict[var] = derivative_value
# An array of numbers with uncertainties are built from the
# result:
result = numpy.vectorize(uncertainties.AffineScalarFunc)(
func_nominal_value, derivatives)
# Numpy matrices that contain numbers with uncertainties are
# better as unumpy matrices:
if isinstance(result, numpy.matrix):
result = result.view(matrix)
return result
return wrapped_func
########## Matrix inverse
def inv_with_derivatives(arr, input_type, derivatives):
"""
Defines the matrix inverse and its derivatives.
See the definition of func_with_deriv_to_uncert_func() for its
detailed semantics.
"""
inverse = numpy.linalg.inv(arr)
# The inverse of a numpy.matrix is a numpy.matrix. It is assumed
# that numpy.linalg.inv is such that other types yield
# numpy.ndarrays:
if issubclass(input_type, numpy.matrix):
inverse = inverse.view(numpy.matrix)
yield inverse
# It is mathematically convenient to work with matrices:
inverse_mat = numpy.asmatrix(inverse)
# Successive derivatives of the inverse:
for derivative in derivatives:
derivative_mat = numpy.asmatrix(derivative)
yield -inverse_mat * derivative_mat * inverse_mat
_inv = func_with_deriv_to_uncert_func(inv_with_derivatives)
_inv.__doc__ = """\
Version of numpy.linalg.inv that works with array-like objects
that contain numbers with uncertainties.
The result is a unumpy.matrix if numpy.linalg.pinv would return a
matrix for the array of nominal values.
Analytical formulas are used.
Original documentation:
%s
""" % numpy.linalg.inv.__doc__
########## Matrix pseudo-inverse
def pinv_with_derivatives(arr, input_type, derivatives, rcond):
"""
Defines the matrix pseudo-inverse and its derivatives.
Works with real or complex matrices.
See the definition of func_with_deriv_to_uncert_func() for its
detailed semantics.
"""
inverse = numpy.linalg.pinv(arr, rcond)
# The pseudo-inverse of a numpy.matrix is a numpy.matrix. It is
# assumed that numpy.linalg.pinv is such that other types yield
# numpy.ndarrays:
if issubclass(input_type, numpy.matrix):
inverse = inverse.view(numpy.matrix)
yield inverse
# It is mathematically convenient to work with matrices:
inverse_mat = numpy.asmatrix(inverse)
# Formula (4.12) from The Differentiation of Pseudo-Inverses and
# Nonlinear Least Squares Problems Whose Variables
# Separate. Author(s): G. H. Golub and V. Pereyra. Source: SIAM
# Journal on Numerical Analysis, Vol. 10, No. 2 (Apr., 1973),
# pp. 413-432
# See also
# http://mathoverflow.net/questions/25778/analytical-formula-for-numerical-derivative-of-the-matrix-pseudo-inverse
# Shortcuts. All the following factors should be numpy.matrix objects:
PA = arr*inverse_mat
AP = inverse_mat*arr
factor21 = inverse_mat*inverse_mat.H
factor22 = numpy.eye(arr.shape[0])-PA
factor31 = numpy.eye(arr.shape[1])-AP
factor32 = inverse_mat.H*inverse_mat
# Successive derivatives of the inverse:
for derivative in derivatives:
derivative_mat = numpy.asmatrix(derivative)
term1 = -inverse_mat*derivative_mat*inverse_mat
derivative_mat_H = derivative_mat.H
term2 = factor21*derivative_mat_H*factor22
term3 = factor31*derivative_mat_H*factor32
yield term1+term2+term3
# Default rcond argument for the generalization of numpy.linalg.pinv:
try:
# Python 2.6+:
_pinv_default = numpy.linalg.pinv.__defaults__[0]
except AttributeError:
_pinv_default = 1e-15
_pinv_with_uncert = func_with_deriv_to_uncert_func(pinv_with_derivatives)
@uncertainties.set_doc("""
Version of numpy.linalg.pinv that works with array-like objects
that contain numbers with uncertainties.
The result is a unumpy.matrix if numpy.linalg.pinv would return a
matrix for the array of nominal values.
Analytical formulas are used.
Original documentation:
%s
""" % numpy.linalg.pinv.__doc__)
def _pinv(array_like, rcond=_pinv_default):
return _pinv_with_uncert(array_like, rcond)
########## Matrix class
class matrix(numpy.matrix):
# The name of this class is the same as NumPy's, which is why it
# does not follow PEP 8.
"""
Class equivalent to numpy.matrix, but that behaves better when the
matrix contains numbers with uncertainties.
"""
def __rmul__(self, other):
# ! NumPy's matrix __rmul__ uses an apparently a restrictive
# dot() function that cannot handle the multiplication of a
# scalar and of a matrix containing objects (when the
# arguments are given in this order). We go around this
# limitation:
if numeric.isscalar(other):
return numeric.dot(self, other)
else:
return numeric.dot(other, self) # The order is important
# The NumPy doc for getI is empty:
# @uncertainties.set_doc(numpy.matrix.getI.__doc__)
def getI(self):
"Matrix inverse of pseudo-inverse"
# numpy.matrix.getI is OK too, but the rest of the code assumes that
# numpy.matrix.I is a property object anyway:
M, N = self.shape
if M == N:
func = _inv
else:
func = _pinv
return func(self)
# ! In Python >= 2.6, this could be simplified as:
# I = numpy.matrix.I.getter(__matrix_inverse)
I = property(getI, numpy.matrix.I.fset, numpy.matrix.I.fdel,
numpy.matrix.I.__doc__)
@property
def nominal_values(self):
"""
Nominal value of all the elements of the matrix.
"""
return nominal_values(self)
std_devs = std_devs
def umatrix(*args):
"""
Constructs a matrix that contains numbers with uncertainties.
The input data is the same as for uarray(...): a tuple with the
nominal values, and the standard deviations.
The returned matrix can be inverted, thanks to the fact that it is
a unumpy.matrix object instead of a numpy.matrix one.
"""
return uarray(*args).view(matrix)
###############################################################################
def define_vectorized_funcs():
"""
Defines vectorized versions of functions from uncertainties.umath.
Some functions have their name translated, so as to follow NumPy's
convention (example: math.acos -> numpy.arccos).
"""
this_module = sys.modules[__name__]
# NumPy does not always use the same function names as the math
# module:
func_name_translations = dict(
(f_name, 'arc'+f_name[1:])
for f_name in ['acos', 'acosh', 'asin', 'atan', 'atan2', 'atanh'])
new_func_names = [func_name_translations.get(function_name, function_name)
for function_name in umath.many_scalars_to_scalar_funcs]
for (function_name, unumpy_name) in zip(
umath.many_scalars_to_scalar_funcs, new_func_names):
# ! The newly defined functions (uncertainties.unumpy.cos, etc.)
# do not behave exactly like their NumPy equivalent (numpy.cos,
# etc.): cos(0) gives an array() and not a
# numpy.float... (equality tests succeed, though).
func = getattr(umath, function_name)
setattr(
this_module, unumpy_name,
numpy.vectorize(func,
# If by any chance a function returns,
# in a particular case, an integer,
# side-effects in vectorize() would
# fix the resulting dtype to integer,
# which is not what is wanted:
otypes=[object],
doc="""\
Vectorized version of umath.%s.
Original documentation:
%s""" % (function_name, func.__doc__)))
__all__.append(unumpy_name)
define_vectorized_funcs()
|
import os
def get_cmap():
"""Gets the colormap (default: ``viridis``)
The colormap can be set by the environment variable ``TTSLEARN_CMAP``
for convenience.
Returns:
str: The name of the current colormap.
Examples:
.. ipython::
In [1]: from ttslearn.notebook import get_cmap
In [2]: get_cmap()
Out[2]: 'viridis'
"""
return os.environ.get("TTSLEARN_CMAP", "viridis")
def init_plot_style():
"""Initializes the plotting style."""
import matplotlib.pyplot as plt
if get_cmap() == "gray":
plt.style.use("grayscale")
def savefig(name, dpi=350, *args, **kwargs):
"""Saves the figure to a file.
By default, a figure is saved as a .png file. The extension of the file can be set by
the environment variable ``TTSLEARN_EXT`` for convenience.
If the environment variable ``TTSLEARN_SAVEFIG`` is set to 0, figures will not be saved
and this function do nothing.
Args:
name (str): The name of the file.
dpi (int): The resolution of the image.
args: Additional arguments for plt.savefig.
kwargs: Additional keyword arguments for plt.savefig.
"""
import matplotlib.pyplot as plt
if os.environ.get("TTSLEARN_NO_SAVEFIG", 0):
return # no op
fig_ext = os.environ.get("TTSLEARN_FIG_EXT", ".png")
if not os.path.exists(os.path.dirname(name)):
os.makedirs(os.path.dirname(name))
plt.savefig(name + fig_ext, *args, dpi=dpi, bbox_inches="tight", **kwargs)
|
# Sentinel-2 package
import ee
import math
import datetime
import os, sys
from utils import *
import sun_angles
import view_angles
import time
class env(object):
def __init__(self):
"""Initialize the environment."""
# Initialize the Earth Engine object, using the authentication credentials.
ee.Initialize()
self.dem = ee.Image("JAXA/ALOS/AW3D30_V1_1").select(["AVE"])
self.epsg = "EPSG:4326"
##########################################
# variable for the landsat data request #
##########################################
self.metadataCloudCoverMax = 30;
##########################################
# Export variables #
##########################################
#self.assetId ="projects/Sacha/PreprocessedData/L8_Biweekly_V4/"
self.assetId ="projects/Sacha/PreprocessedData/TOA_composites"
self.name = "LSS2_ECUADOR_ANNUAL_MEDIAN_2018_000365"
self.exportScale = 20
##########################################
# variable for the shadowMask algorithm #
##########################################
# zScoreThresh: Threshold for cloud shadow masking- lower number masks out
# less. Between -0.8 and -1.2 generally works well
self.zScoreThresh = -0.9
# shadowSumThresh: Sum of IR bands to include as shadows within TDOM and the
# shadow shift method (lower number masks out less)
self.shadowSumThresh = 0.30;
# contractPixels: The radius of the number of pixels to contract (negative buffer) clouds and cloud shadows by. Intended to eliminate smaller cloud
# patches that are likely errors (1.5 results in a -1 pixel buffer)(0.5 results in a -0 pixel buffer)
# (1.5 or 2.5 generally is sufficient)
self.contractPixels = 1.5;
# dilatePixels: The radius of the number of pixels to dilate (buffer) clouds
# and cloud shadows by. Intended to include edges of clouds/cloud shadows
# that are often missed (1.5 results in a 1 pixel buffer)(0.5 results in a 0 pixel buffer)
# (2.5 or 3.5 generally is sufficient)
self.dilatePixels = 3.25;
##########################################
# variable for cloudScore algorithm #
##########################################
# 9. Cloud and cloud shadow masking parameters.
# If cloudScoreTDOM is chosen
# cloudScoreThresh: If using the cloudScoreTDOMShift method-Threshold for cloud
# masking (lower number masks more clouds. Between 10 and 30 generally works best)
self.cloudScoreThresh = 30;
# Percentile of cloud score to pull from time series to represent a minimum for
# the cloud score over time for a given pixel. Reduces commission errors over
# cool bright surfaces. Generally between 5 and 10 works well. 0 generally is a bit noisy
self.cloudScorePctl = 8
##########################################
# variable for terrain algorithm #
##########################################
self.terrainScale = 600
##########################################
# variable band selection #
##########################################
self.divideBandsLandsat = ee.List(['blue','green','red','nir','swir1','swir2'])
self.bandNamesLandsat = ee.List(['blue','green','red','nir','swir1','thermal','swir2','qa'])
self.sensorBandDictLandsatSR = ee.Dictionary({'L8' : ee.List([1,2,3,4,5,9,6,10]),\
'L7' : ee.List([0,1,2,3,4,5,8,9])})
self.s2BandsIn = ee.List(['QA60','B1','B2','B3','B4','B5','B6','B7','B8','B8A','B9','B10','B11','B12'])
self.s2BandsOut = ee.List(['QA60','cb','blue','green','red','re1','re2','re3','nir','nir2','waterVapor','cirrus','swir1','swir2'])
self.divideBands = ee.List(['blue','green','red','re1','re2','re3','nir','nir2','cb','cirrus','swir1','swir2','waterVapor'])
self.medianIncludeBands = ee.List(['blue','green','red','re1','re2','re3','nir','nir2','cb','cirrus','swir1','swir2','waterVapor'])
##########################################
# enable / disable modules #
##########################################
self.cloudMask = True
self.QAcloudMask = True
self.shadowMask = True
self.brdfCorrect = False
self.terrainCorrection = False
class Landsat():
def __init__(self):
"""Initialize the Surfrace Reflectance app."""
# get the environment
self.env = env()
def main(self,studyArea,startDate,endDate):
self.env.startDate = startDate
self.env.endDate = endDate
self.studyArea = studyArea
landsat8 = ee.ImageCollection('LANDSAT/LC08/C01/T1_TOA').filterDate(self.env.startDate,self.env.endDate).filterBounds(studyArea)
landsat8 = landsat8.filterMetadata('CLOUD_COVER','less_than',self.env.metadataCloudCoverMax)
landsat8 = landsat8.select(self.env.sensorBandDictLandsatSR.get('L8'),self.env.bandNamesLandsat)
landsat7 = ee.ImageCollection('LANDSAT/LE07/C01/T1_TOA').filterDate(self.env.startDate,self.env.endDate).filterBounds(studyArea)
landsat7 = landsat7.filterMetadata('CLOUD_COVER','less_than',self.env.metadataCloudCoverMax)
landsat7 = landsat7.select(self.env.sensorBandDictLandsatSR.get('L7'),self.env.bandNamesLandsat)
landsat7 = landsat7.map(self.HarmonizeLandsat7)
landsat8 = landsat8.map(self.HarmonizeLandsat8)
#landsat = landsat7.merge(landsat8)
landsat = landsat8
if landsat.size().getInfo() > 0:
# mask clouds using the QA band
#if self.env.maskSR == True:
#print "removing clouds"
#landsat = landsat.map(self.CloudMaskSRL8)
# mask clouds using cloud mask function
if self.env.shadowMask == True:
#print "shadow masking"
self.fullCollection = ee.ImageCollection('LANDSAT/LC08/C01/T1_TOA').filterBounds(studyArea).filter(ee.Filter.lt("CLOUD_COVER",30))\
.select(self.env.sensorBandDictLandsatSR.get('L8'),self.env.bandNamesLandsat)
l7 = ee.ImageCollection('LANDSAT/LE07/C01/T1_TOA').filterBounds(studyArea).filter(ee.Filter.lt("CLOUD_COVER",30))\
.select(self.env.sensorBandDictLandsatSR.get('L7'),self.env.bandNamesLandsat)
self.fullCollection = self.fullCollection.merge(l7)
landsat = self.maskShadows(landsat)
#landsat = landsat.map(self.scaleLandsat).map(self.addDateYear)
landsat = landsat.map(self.addDateYear)
# mask clouds using cloud mask function
if self.env.cloudMask == True:
#print "removing some more clouds"
landsat = landsat.map(self.maskClouds)
if self.env.brdfCorrect == True:
landsat = landsat.map(self.brdf)
if self.env.terrainCorrection == True:
landsat = ee.ImageCollection(landsat.map(self.terrain))
return landsat
def addDateYear(self,img):
#add a date and year band
date = ee.Date(img.get("system:time_start"))
day = date.getRelative('day','year').add(1);
yr = date.get('year');
mk = img.mask().reduce(ee.Reducer.min());
img = img.addBands(ee.Image.constant(day).mask(mk).uint16().rename(['date']));
img = img.addBands(ee.Image.constant(yr).mask(mk).uint16().rename(['year']));
return img;
def CloudMaskSRL8(self,img):
"""apply cf-mask Landsat"""
QA = img.select("qa")
cloud = QA.bitwiseAnd(4).neq(0);
return img.updateMask(cloud.Not()).copyProperties(img)
def scaleLandsat(self,img):
"""Landast is scaled by factor 0.0001 """
thermal = img.select(ee.List(['thermal'])).multiply(0.1)
scaled = ee.Image(img).select(self.env.divideBandsLandsat).multiply(ee.Number(0.0001))
return img.select(['TDOMMask']).addBands(scaled).addBands(thermal)
def reScaleLandsat(self,img):
"""Landast is scaled by factor 0.0001 """
noScaleBands = ee.List(['date','year','TDOMMask','cloudMask','count'])
noScale = ee.Image(img).select(noScaleBands)
thermalBand = ee.List(['thermal'])
thermal = ee.Image(img).select(thermalBand).multiply(10)
otherBands = ee.Image(img).bandNames().removeAll(thermalBand).removeAll(noScaleBands)
scaled = ee.Image(img).select(otherBands).divide(0.0001)
image = ee.Image(scaled.addBands([thermal,noScale])).int16()
return image.copyProperties(img)
def maskClouds(self,img):
"""
Computes spectral indices of cloudyness and take the minimum of them.
Each spectral index is fairly lenient because the group minimum
is a somewhat stringent comparison policy. side note -> this seems like a job for machine learning :)
originally written by Matt Hancher for Landsat imageryadapted to Sentinel by Chris Hewig and Ian Housman
"""
score = ee.Image(1.0);
# Clouds are reasonably bright in the blue band.
blue_rescale = img.select('blue').subtract(ee.Number(0.1)).divide(ee.Number(0.3).subtract(ee.Number(0.1)))
score = score.min(blue_rescale);
# Clouds are reasonably bright in all visible bands.
visible = img.select('red').add(img.select('green')).add(img.select('blue'))
visible_rescale = visible.subtract(ee.Number(0.2)).divide(ee.Number(0.8).subtract(ee.Number(0.2)))
score = score.min(visible_rescale);
# Clouds are reasonably bright in all infrared bands.
infrared = img.select('nir').add(img.select('swir1')).add(img.select('swir2'))
infrared_rescale = infrared.subtract(ee.Number(0.3)).divide(ee.Number(0.8).subtract(ee.Number(0.3)))
score = score.min(infrared_rescale);
# Clouds are reasonably cool in temperature.
temp_rescale = img.select('thermal').subtract(ee.Number(300)).divide(ee.Number(290).subtract(ee.Number(300)))
score = score.min(temp_rescale);
# However, clouds are not snow.
ndsi = img.normalizedDifference(['green', 'swir1']);
ndsi_rescale = ndsi.subtract(ee.Number(0.8)).divide(ee.Number(0.6).subtract(ee.Number(0.8)))
score = score.min(ndsi_rescale).multiply(100).byte();
mask = score.lt(self.env.cloudScoreThresh).rename(['cloudMask']);
img = img.updateMask(mask).addBands([mask]);
return img;
def maskShadows(self,collection):
def TDOM(image):
zScore = image.select(shadowSumBands).subtract(irMean).divide(irStdDev)
irSum = image.select(shadowSumBands).reduce(ee.Reducer.sum())
TDOMMask = zScore.lt(self.env.zScoreThresh).reduce(ee.Reducer.sum()).eq(2)\
.And(irSum.lt(self.env.shadowSumThresh)).Not()
TDOMMask = TDOMMask.focal_min(self.env.contractPixels).focal_max(self.env.dilatePixels).rename(['TDOMMask'])
image = image.addBands([TDOMMask])
return image.updateMask(TDOMMask)
shadowSumBands = ['nir','swir1']
# Get some pixel-wise stats for the time series
irStdDev = self.fullCollection.select(shadowSumBands).reduce(ee.Reducer.stdDev())
irMean = self.fullCollection.select(shadowSumBands).reduce(ee.Reducer.mean())
# Mask out dark dark outliers
collection_tdom = collection.map(TDOM)
return collection_tdom
def terrain(self,img):
degree2radian = 0.01745;
otherBands = img.select(['thermal','date','year','TDOMMask','cloudMask'])
def topoCorr_IC(img):
dem = ee.Image("USGS/SRTMGL1_003")
# Extract image metadata about solar position
SZ_rad = ee.Image.constant(ee.Number(img.get('SOLAR_ZENITH_ANGLE'))).multiply(degree2radian).clip(img.geometry().buffer(10000));
SA_rad = ee.Image.constant(ee.Number(img.get('SOLAR_AZIMUTH_ANGLE'))).multiply(degree2radian).clip(img.geometry().buffer(10000));
# Creat terrain layers
slp = ee.Terrain.slope(dem).clip(img.geometry().buffer(10000));
slp_rad = ee.Terrain.slope(dem).multiply(degree2radian).clip(img.geometry().buffer(10000));
asp_rad = ee.Terrain.aspect(dem).multiply(degree2radian).clip(img.geometry().buffer(10000));
# Calculate the Illumination Condition (IC)
# slope part of the illumination condition
cosZ = SZ_rad.cos();
cosS = slp_rad.cos();
slope_illumination = cosS.expression("cosZ * cosS", \
{'cosZ': cosZ, 'cosS': cosS.select('slope')});
# aspect part of the illumination condition
sinZ = SZ_rad.sin();
sinS = slp_rad.sin();
cosAziDiff = (SA_rad.subtract(asp_rad)).cos();
aspect_illumination = sinZ.expression("sinZ * sinS * cosAziDiff", \
{'sinZ': sinZ, \
'sinS': sinS, \
'cosAziDiff': cosAziDiff});
# full illumination condition (IC)
ic = slope_illumination.add(aspect_illumination);
# Add IC to original image
img_plus_ic = ee.Image(img.addBands(ic.rename(['IC'])).addBands(cosZ.rename(['cosZ'])).addBands(cosS.rename(['cosS'])).addBands(slp.rename(['slope'])));
return ee.Image(img_plus_ic);
def topoCorr_SCSc(img):
img_plus_ic = img;
mask1 = img_plus_ic.select('nir').gt(-0.1);
mask2 = img_plus_ic.select('slope').gte(5) \
.And(img_plus_ic.select('IC').gte(0)) \
.And(img_plus_ic.select('nir').gt(-0.1));
img_plus_ic_mask2 = ee.Image(img_plus_ic.updateMask(mask2));
bandList = ['blue', 'green', 'red', 'nir', 'swir1', 'swir2']; # Specify Bands to topographically correct
def applyBands(image):
blue = apply_SCSccorr('blue').select(['blue'])
green = apply_SCSccorr('green').select(['green'])
red = apply_SCSccorr('red').select(['red'])
nir = apply_SCSccorr('nir').select(['nir'])
swir1 = apply_SCSccorr('swir1').select(['swir1'])
swir2 = apply_SCSccorr('swir2').select(['swir2'])
return replace_bands(image, [blue, green, red, nir, swir1, swir2])
def apply_SCSccorr(band):
method = 'SCSc';
out = ee.Image(1).addBands(img_plus_ic_mask2.select('IC', band)).reduceRegion(reducer= ee.Reducer.linearRegression(2,1), \
geometry= ee.Geometry(img.geometry().buffer(-5000)), \
scale= self.env.terrainScale, \
bestEffort =True,
maxPixels=1e10)
#out_a = ee.Number(out.get('scale'));
#out_b = ee.Number(out.get('offset'));
#out_c = ee.Number(out.get('offset')).divide(ee.Number(out.get('scale')));
fit = out.combine({"coefficients": ee.Array([[1],[1]])}, False);
#Get the coefficients as a nested list,
#cast it to an array, and get just the selected column
out_a = (ee.Array(fit.get('coefficients')).get([0,0]));
out_b = (ee.Array(fit.get('coefficients')).get([1,0]));
out_c = out_a.divide(out_b)
# apply the SCSc correction
SCSc_output = img_plus_ic_mask2.expression("((image * (cosB * cosZ + cvalue)) / (ic + cvalue))", {
'image': img_plus_ic_mask2.select([band]),
'ic': img_plus_ic_mask2.select('IC'),
'cosB': img_plus_ic_mask2.select('cosS'),
'cosZ': img_plus_ic_mask2.select('cosZ'),
'cvalue': out_c });
return ee.Image(SCSc_output);
#img_SCSccorr = ee.Image([apply_SCSccorr(band) for band in bandList]).addBands(img_plus_ic.select('IC'));
img_SCSccorr = applyBands(img).select(bandList).addBands(img_plus_ic.select('IC'))
bandList_IC = ee.List([bandList, 'IC']).flatten();
img_SCSccorr = img_SCSccorr.unmask(img_plus_ic.select(bandList_IC)).select(bandList);
return img_SCSccorr.unmask(img_plus_ic.select(bandList))
img = topoCorr_IC(img)
img = topoCorr_SCSc(img)
return img.addBands(otherBands)
def defringe(self,img):
# threshold for defringing landsat5 and 7
fringeCountThreshold = 279
k = ee.Kernel.fixed(41, 41,
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]);
m = ee.Image(img).mask().reduce(ee.Reducer.min())
sum = m.reduceNeighborhood(ee.Reducer.sum(), k, 'kernel')
mask = sum.gte(fringeCountThreshold)
return img.updateMask(mask)
def brdf(self,img):
import sun_angles
import view_angles
def _apply(image, kvol, kvol0):
blue = _correct_band(image, 'blue', kvol, kvol0, f_iso=0.0774, f_geo=0.0079, f_vol=0.0372)
green = _correct_band(image, 'green', kvol, kvol0, f_iso=0.1306, f_geo=0.0178, f_vol=0.0580)
red = _correct_band(image, 'red', kvol, kvol0, f_iso=0.1690, f_geo=0.0227, f_vol=0.0574)
nir = _correct_band(image, 'nir', kvol, kvol0, f_iso=0.3093, f_geo=0.0330, f_vol=0.1535)
swir1 = _correct_band(image, 'swir1', kvol, kvol0, f_iso=0.3430, f_geo=0.0453, f_vol=0.1154)
swir2 = _correct_band(image, 'swir2', kvol, kvol0, f_iso=0.2658, f_geo=0.0387, f_vol=0.0639)
return replace_bands(image, [blue, green, red, nir, swir1, swir2])
def _correct_band(image, band_name, kvol, kvol0, f_iso, f_geo, f_vol):
"""fiso + fvol * kvol + fgeo * kgeo"""
iso = ee.Image(f_iso)
geo = ee.Image(f_geo)
vol = ee.Image(f_vol)
pred = vol.multiply(kvol).add(geo.multiply(kvol)).add(iso).rename(['pred'])
pred0 = vol.multiply(kvol0).add(geo.multiply(kvol0)).add(iso).rename(['pred0'])
cfac = pred0.divide(pred).rename(['cfac'])
corr = image.select(band_name).multiply(cfac).rename([band_name])
return corr
def _kvol(sunAz, sunZen, viewAz, viewZen):
"""Calculate kvol kernel.
From Lucht et al. 2000
Phase angle = cos(solar zenith) cos(view zenith) + sin(solar zenith) sin(view zenith) cos(relative azimuth)"""
relative_azimuth = sunAz.subtract(viewAz).rename(['relAz'])
pa1 = viewZen.cos() \
.multiply(sunZen.cos())
pa2 = viewZen.sin() \
.multiply(sunZen.sin()) \
.multiply(relative_azimuth.cos())
phase_angle1 = pa1.add(pa2)
phase_angle = phase_angle1.acos()
p1 = ee.Image(PI().divide(2)).subtract(phase_angle)
p2 = p1.multiply(phase_angle1)
p3 = p2.add(phase_angle.sin())
p4 = sunZen.cos().add(viewZen.cos())
p5 = ee.Image(PI().divide(4))
kvol = p3.divide(p4).subtract(p5).rename(['kvol'])
viewZen0 = ee.Image(0)
pa10 = viewZen0.cos() \
.multiply(sunZen.cos())
pa20 = viewZen0.sin() \
.multiply(sunZen.sin()) \
.multiply(relative_azimuth.cos())
phase_angle10 = pa10.add(pa20)
phase_angle0 = phase_angle10.acos()
p10 = ee.Image(PI().divide(2)).subtract(phase_angle0)
p20 = p10.multiply(phase_angle10)
p30 = p20.add(phase_angle0.sin())
p40 = sunZen.cos().add(viewZen0.cos())
p50 = ee.Image(PI().divide(4))
kvol0 = p30.divide(p40).subtract(p50).rename(['kvol0'])
return (kvol, kvol0)
date = img.date()
footprint = determine_footprint(img)
(sunAz, sunZen) = sun_angles.create(date, footprint)
(viewAz, viewZen) = view_angles.create(footprint)
(kvol, kvol0) = _kvol(sunAz, sunZen, viewAz, viewZen)
return _apply(img, kvol.multiply(PI()), kvol0.multiply(PI()))
def medoidMosaic(self,collection):
""" medoid composite with equal weight among indices """
nImages = ee.ImageCollection(collection).select([0]).count().rename('count')
bandNames = ee.Image(collection.first()).bandNames()
otherBands = bandNames.removeAll(self.env.divideBands)
others = collection.select(otherBands).reduce(ee.Reducer.mean()).rename(otherBands);
collection = collection.select(self.env.divideBands)
bandNumbers = ee.List.sequence(1,self.env.divideBands.length());
median = ee.ImageCollection(collection).median()
def subtractmedian(img):
diff = ee.Image(img).subtract(median).pow(ee.Image.constant(2));
return diff.reduce('sum').addBands(img);
medoid = collection.map(subtractmedian)
medoid = ee.ImageCollection(medoid).reduce(ee.Reducer.min(self.env.divideBands.length().add(1))).select(bandNumbers,self.env.divideBands);
return medoid.addBands(others).addBands(nImages);
def medianMosaic(self,collection):
""" median composite """
median = collection.select(medianIncludeBands).median();
othersBands = bandNames.removeAll(medianIncludeBands);
others = collection.select(otherBands).mean();
return median.addBands(others)
def HarmonizeLandsat7(self,img):
t = img.get("system:time_start")
bandNames = ee.List(['blue','green','red','nir','swir1','swir2']);
slopes = [1.10601,0.99091,1.05681,1.0045,1.03611,1.04011]
intercepts = [-0.0139,0.00411,-0.0024,-0.0076,0.00411,0.00861]
otherBands = ee.Image(img).bandNames().removeAll(bandNames)
others = img.select(otherBands)
img = ee.Image(img).select(bandNames).multiply(slopes).add(intercepts).float();
return img.addBands(others).copyProperties(img).set("system:time_start",t)
def HarmonizeLandsat8(self,img):
t = img.get("system:time_start")
bandNames = ee.List(['blue','green','red','nir','swir1','swir2']);
slopes = [1.0946,1.0043,1.0524,0.8954,1.0049,1.0002];
intercepts = [-0.0107,0.0026,-0.0015,0.0033,0.0065,0.0046];
otherBands = ee.Image(img).bandNames().removeAll(bandNames)
others = img.select(otherBands)
img = ee.Image(img).select(bandNames).multiply(slopes).add(intercepts).float();
return img.addBands(others).copyProperties(img).set("system:time_start",t)
class sentinel2():
def __init__(self):
"""Initialize the Surfrace Reflectance app."""
# get the environment
self.env = env()
def main(self,studyArea,startDate,endDate):
s2 = self.getSentinel2(startDate,endDate,studyArea);
if s2.size().getInfo() > 0:
s2 = s2.map(self.scaleS2)
# masking the shadows
if self.env.shadowMask == True:
s2 = self.maskShadows(s2,studyArea)
self.collectionMeta = s2.getInfo()['features']
s2 = s2.select(self.env.s2BandsIn,self.env.s2BandsOut).map(self.addDateYear)
if self.env.QAcloudMask == True:
s2 = s2.map(self.QAMaskCloud)
if self.env.cloudMask == True:
s2 = s2.map(self.sentinelCloudScore)
s2 = self.cloudMasking(s2)
if self.env.brdfCorrect == True:
s2 = s2.map(self.brdf)
if self.env.terrainCorrection == True:
s2 = s2.map(self.getTopo)
corrected = s2.filter(ee.Filter.gt("slope",20))
notCorrected = s2.filter(ee.Filter.lt("slope",20))
s2 = corrected.map(self.terrain).merge(notCorrected)
return s2
def getSentinel2(self,start,end,studyArea):
s2s = ee.ImageCollection('COPERNICUS/S2').filterDate(start,end) \
.filterBounds(studyArea) \
.filter(ee.Filter.lt('CLOUDY_PIXEL_PERCENTAGE',self.env.metadataCloudCoverMax)) \
.filter(ee.Filter.lt('CLOUD_COVERAGE_ASSESSMENT',self.env.metadataCloudCoverMax))\
return s2s
def addDateYear(self,img):
#add a date and year band
date = ee.Date(img.get("system:time_start"))
day = date.getRelative('day','year').add(1);
yr = date.get('year');
mk = img.mask().reduce(ee.Reducer.min());
img = img.addBands(ee.Image.constant(day).mask(mk).uint16().rename('date'));
img = img.addBands(ee.Image.constant(yr).mask(mk).uint16().rename('year'));
return img;
def maskShadows(self,collection,studyArea):
def TDOM(image):
zScore = image.select(shadowSumBands).subtract(irMean).divide(irStdDev)
irSum = image.select(shadowSumBands).reduce(ee.Reducer.sum())
TDOMMask = zScore.lt(self.env.zScoreThresh).reduce(ee.Reducer.sum()).eq(2)\
.And(irSum.lt(self.env.shadowSumThresh)).Not()
TDOMMask = TDOMMask.focal_min(self.env.dilatePixels)
return image.addBands(TDOMMask.rename(['TDOMMask']))
def mask(image):
outimg = image.updateMask(image.select(['TDOMMask']))
return outimg
shadowSumBands = ['B8','B11']
allCollection = ee.ImageCollection('COPERNICUS/S2').filterBounds(studyArea).filter(ee.Filter.lt("CLOUDY_PIXEL_PERCENTAGE",30))
# Get some pixel-wise stats for the time series
irStdDev = allCollection.select(shadowSumBands).reduce(ee.Reducer.stdDev())
irMean = allCollection.select(shadowSumBands).reduce(ee.Reducer.mean())
# Mask out dark dark outliers
collection_tdom = collection.map(TDOM)
return collection_tdom.map(mask)
def getTopo(self,img):
''' funtion to filter for areas with terrain and areas without'''
dem = self.env.dem.unmask(0)
geom = ee.Geometry(img.get('system:footprint')).bounds()
slp_rad = ee.Terrain.slope(dem).clip(geom);
slope = slp_rad.reduceRegion(reducer= ee.Reducer.percentile([80]), \
geometry= geom,\
scale= 100 ,\
maxPixels=10000000)
return img.set('slope',slope.get('slope'))
def scaleS2(self,img):
divideBands = ['B1','B2','B3','B4','B5','B6','B7','B8','B8A','B9','B10','B11','B12']
bandNames = img.bandNames()
otherBands = bandNames.removeAll(divideBands)
others = img.select(otherBands)
out = img.select(divideBands).divide(10000)
return out.addBands(others).copyProperties(img,['system:time_start','system:footprint','MEAN_SOLAR_ZENITH_ANGLE','MEAN_SOLAR_AZIMUTH_ANGLE']).set("centroid",img.geometry().centroid());
def reScaleS2(self,img):
bandNames = img.bandNames()
otherBands = bandNames.removeAll(self.env.divideBands)
others = img.select(otherBands)
t = img.select(self.env.divideBands);
t = t.multiply(10000)
out = ee.Image(t.copyProperties(img).copyProperties(img,['system:time_start'])).addBands(others).int16()
return out;
def pixelArea(self,img):
geom = ee.Geometry(img.get('system:footprint')).bounds()
area = img.select(['red']).gt(0).reduceRegion(reducer= ee.Reducer.sum(),\
geometry= geom,\
scale= 100,\
maxPixels=10000000)
return img.set("pixelArea",area.get("red"))
# Function to mask clouds using the Sentinel-2 QA band.
def QAMaskCloud(self,img):
bandNames = img.bandNames()
otherBands = bandNames.removeAll(self.env.divideBands)
others = img.select(otherBands)
qa = img.select('QA60').int16();
img = img.select(self.env.divideBands)
# Bits 10 and 11 are clouds and cirrus, respectively.
cloudBitMask = int(math.pow(2, 10));
cirrusBitMask = int(math.pow(2, 11));
# Both flags should be set to zero, indicating clear conditions.
mask = qa.bitwiseAnd(cloudBitMask).eq(0).And(qa.bitwiseAnd(cirrusBitMask).eq(0));
img = img.updateMask(mask).addBands(others)
# Return the masked and scaled data.
return img
def sentinelCloudScore(self,img):
"""
Computes spectral indices of cloudyness and take the minimum of them.
Each spectral index is fairly lenient because the group minimum
is a somewhat stringent comparison policy. side note -> this seems like a job for machine learning :)
originally written by Matt Hancher for Landsat imagery
adapted to Sentinel by Chris Hewig and Ian Housman
"""
def rescale(img, thresholds):
"""
Linear stretch of image between two threshold values.
"""
return img.subtract(thresholds[0]).divide(thresholds[1] - thresholds[0])
# cloud until proven otherwise
score = ee.Image(1)
blueCirrusScore = ee.Image(0)
# clouds are reasonably bright
blueCirrusScore = blueCirrusScore.max(rescale(img.select(['blue']), [0.1, 0.5]))
blueCirrusScore = blueCirrusScore.max(rescale(img.select(['cb']), [0.1, 0.5]))
blueCirrusScore = blueCirrusScore.max(rescale(img.select(['cirrus']), [0.1, 0.3]))
score = score.min(blueCirrusScore)
score = score.min(rescale(img.select(['red']).add(img.select(['green'])).add(img.select('blue')), [0.2, 0.8]))
score = score.min(rescale(img.select(['nir']).add(img.select(['swir1'])).add(img.select('swir2')), [0.3, 0.8]))
# clouds are moist
ndsi = img.normalizedDifference(['green','swir1'])
score=score.min(rescale(ndsi, [0.8, 0.6]))
score = score.multiply(100).byte();
score = score.clamp(0,100);
return img.addBands(score.rename(['cloudScore']))
def cloudMasking(self,collection):
def maskClouds(img):
cloudMask = img.select(['cloudScore']).lt(self.env.cloudScoreThresh)\
.focal_min(self.env.dilatePixels) \
.focal_max(self.env.contractPixels) \
.rename(['cloudMask'])
bandNames = img.bandNames()
otherBands = bandNames.removeAll(self.env.divideBands)
others = img.select(otherBands)
img = img.select(self.env.divideBands).updateMask(cloudMask)
return img.addBands(cloudMask).addBands(others);
# Find low cloud score pctl for each pixel to avoid comission errors
minCloudScore = collection.select(['cloudScore']).reduce(ee.Reducer.percentile([self.env.cloudScorePctl]));
collection = collection.map(maskClouds)
return collection
def brdf(self,img):
def _apply(image, kvol, kvol0):
blue = _correct_band(image, 'blue', kvol, kvol0, f_iso=0.0774, f_geo=0.0079, f_vol=0.0372)
green = _correct_band(image, 'green', kvol, kvol0, f_iso=0.1306, f_geo=0.0178, f_vol=0.0580)
red = _correct_band(image, 'red', kvol, kvol0, f_iso=0.1690, f_geo=0.0227, f_vol=0.0574)
re1 = _correct_band(image, 're1', kvol, kvol0, f_iso=0.2085, f_geo=0.0256, f_vol=0.0845)
re2 = _correct_band(image, 're2', kvol, kvol0, f_iso=0.2316, f_geo=0.0273, f_vol=0.1003)
re3 = _correct_band(image, 're3', kvol, kvol0, f_iso=0.2599, f_geo=0.0294, f_vol=0.1197)
nir = _correct_band(image, 'nir', kvol, kvol0, f_iso=0.3093, f_geo=0.0330, f_vol=0.1535)
re4 = _correct_band(image, 're4', kvol, kvol0, f_iso=0.2907, f_geo=0.0410, f_vol=0.1611)
swir1 = _correct_band(image, 'swir1', kvol, kvol0, f_iso=0.3430, f_geo=0.0453, f_vol=0.1154)
swir2 = _correct_band(image, 'swir2', kvol, kvol0, f_iso=0.2658, f_geo=0.0387, f_vol=0.0639)
return replace_bands(image, [blue, green, red,re1,re2,re3, nir,re4, swir1, swir2])
def _correct_band(image, band_name, kvol, kvol0, f_iso, f_geo, f_vol):
"""fiso + fvol * kvol + fgeo * kgeo"""
iso = ee.Image(f_iso)
geo = ee.Image(f_geo)
vol = ee.Image(f_vol)
pred = vol.multiply(kvol).add(geo.multiply(kvol)).add(iso).rename(['pred'])
pred0 = vol.multiply(kvol0).add(geo.multiply(kvol0)).add(iso).rename(['pred0'])
cfac = pred0.divide(pred).rename(['cfac'])
corr = image.select(band_name).multiply(cfac).rename([band_name])
return corr
def _kvol(sunAz, sunZen, viewAz, viewZen):
"""Calculate kvol kernel.
From Lucht et al. 2000
Phase angle = cos(solar zenith) cos(view zenith) + sin(solar zenith) sin(view zenith) cos(relative azimuth)"""
relative_azimuth = sunAz.subtract(viewAz).rename(['relAz'])
pa1 = viewZen.cos() \
.multiply(sunZen.cos())
pa2 = viewZen.sin() \
.multiply(sunZen.sin()) \
.multiply(relative_azimuth.cos())
phase_angle1 = pa1.add(pa2)
phase_angle = phase_angle1.acos()
p1 = ee.Image(PI().divide(2)).subtract(phase_angle)
p2 = p1.multiply(phase_angle1)
p3 = p2.add(phase_angle.sin())
p4 = sunZen.cos().add(viewZen.cos())
p5 = ee.Image(PI().divide(4))
kvol = p3.divide(p4).subtract(p5).rename(['kvol'])
viewZen0 = ee.Image(0)
pa10 = viewZen0.cos() \
.multiply(sunZen.cos())
pa20 = viewZen0.sin() \
.multiply(sunZen.sin()) \
.multiply(relative_azimuth.cos())
phase_angle10 = pa10.add(pa20)
phase_angle0 = phase_angle10.acos()
p10 = ee.Image(PI().divide(2)).subtract(phase_angle0)
p20 = p10.multiply(phase_angle10)
p30 = p20.add(phase_angle0.sin())
p40 = sunZen.cos().add(viewZen0.cos())
p50 = ee.Image(PI().divide(4))
kvol0 = p30.divide(p40).subtract(p50).rename(['kvol0'])
return (kvol, kvol0)
date = img.date()
footprint = ee.List(img.geometry().bounds().bounds().coordinates().get(0));
(sunAz, sunZen) = sun_angles.create(date, footprint)
(viewAz, viewZen) = view_angles.create(footprint)
(kvol, kvol0) = _kvol(sunAz, sunZen, viewAz, viewZen)
bandNames = img.bandNames()
otherBands = bandNames.removeAll(self.env.divideBands)
others = img.select(otherBands)
img = ee.Image(_apply(img, kvol.multiply(PI()), kvol0.multiply(PI())))
return img
def terrain(self,img):
degree2radian = 0.01745;
geom = ee.Geometry(img.get('system:footprint')).bounds().buffer(10000)
dem = self.env.dem
bandNames = img.bandNames()
otherBands = bandNames.removeAll(self.env.divideBands)
others = img.select(otherBands)
bandList = ['blue','green','red','re1','re2','re3','nir','re4','cb','cirrus','swir1','swir2','waterVapor']
def topoCorr_IC(img):
# Extract image metadata about solar position
SZ_rad = ee.Image.constant(ee.Number(img.get('MEAN_SOLAR_ZENITH_ANGLE'))).multiply(degree2radian).clip(geom);
SA_rad = ee.Image.constant(ee.Number(img.get('MEAN_SOLAR_AZIMUTH_ANGLE'))).multiply(degree2radian).clip(geom);
# Creat terrain layers
slp = ee.Terrain.slope(dem).clip(geom);
slp_rad = ee.Terrain.slope(dem).multiply(degree2radian).clip(geom);
asp_rad = ee.Terrain.aspect(dem).multiply(degree2radian).clip(geom);
# Calculate the Illumination Condition (IC)
# slope part of the illumination condition
cosZ = SZ_rad.cos();
cosS = slp_rad.cos();
slope_illumination = cosS.expression("cosZ * cosS", \
{'cosZ': cosZ, 'cosS': cosS.select('slope')});
# aspect part of the illumination condition
sinZ = SZ_rad.sin();
sinS = slp_rad.sin();
cosAziDiff = (SA_rad.subtract(asp_rad)).cos();
aspect_illumination = sinZ.expression("sinZ * sinS * cosAziDiff", \
{'sinZ': sinZ, \
'sinS': sinS, \
'cosAziDiff': cosAziDiff});
# full illumination condition (IC)
ic = slope_illumination.add(aspect_illumination);
# Add IC to original image
img_plus_ic = ee.Image(img.addBands(ic.rename(['IC'])).addBands(cosZ.rename(['cosZ'])).addBands(cosS.rename(['cosS'])).addBands(slp.rename(['slope'])));
return ee.Image(img_plus_ic);
def topoCorr_SCSc(img):
img_plus_ic = img;
mask1 = img_plus_ic.select('nir').gt(-0.1);
mask2 = img_plus_ic.select('slope').gte(5) \
.And(img_plus_ic.select('IC').gte(0)) \
.And(img_plus_ic.select('nir').gt(-0.1));
img_plus_ic_mask2 = ee.Image(img_plus_ic.updateMask(mask2));
def apply_SCSccorr(band):
method = 'SCSc';
out = ee.Image(1).addBands(img_plus_ic_mask2.select('IC', band)).reduceRegion(reducer= ee.Reducer.linearRegression(2,1), \
geometry= ee.Geometry(img.geometry().buffer(-5000)), \
scale= 300, \
bestEffort =True,
maxPixels=1e10)
fit = out.combine({"coefficients": ee.Array([[1],[1]])}, False);
#Get the coefficients as a nested list,
#cast it to an array, and get just the selected column
out_a = (ee.Array(fit.get('coefficients')).get([0,0]));
out_b = (ee.Array(fit.get('coefficients')).get([1,0]));
out_c = out_a.divide(out_b)
# apply the SCSc correction
SCSc_output = img_plus_ic_mask2.expression("((image * (cosB * cosZ + cvalue)) / (ic + cvalue))", {
'image': img_plus_ic_mask2.select([band]),
'ic': img_plus_ic_mask2.select('IC'),
'cosB': img_plus_ic_mask2.select('cosS'),
'cosZ': img_plus_ic_mask2.select('cosZ'),
'cvalue': out_c });
return ee.Image(SCSc_output);
img_SCSccorr = ee.Image([apply_SCSccorr(band) for band in bandList]).addBands(img_plus_ic.select('IC'));
bandList_IC = ee.List([bandList, 'IC']).flatten();
img_SCSccorr = img_SCSccorr.unmask(img_plus_ic.select(bandList_IC)).select(bandList);
return img_SCSccorr.unmask(img_plus_ic.select(bandList))
img = topoCorr_IC(img)
img = topoCorr_SCSc(img).addBands(others )
return img
class Harmonize():
def __init__(self):
"""Initialize the Surfrace Reflectance app."""
# get the environment
self.env = env()
def harmonizeData(self,l8,s2):
bands = ee.List(['blue','green','red','nir','swir1','swir2'])
s2 = s2.select(bands)
l8 = l8.select(bands)
return ee.ImageCollection(l8.merge(s2))
def medoidMosaic(self,collection):
""" medoid composite with equal weight among indices """
bands = ee.List(['blue','green','red','nir','swir1','swir2'])
collection = collection.select(bands)
median = ee.ImageCollection(collection).median()
bandNumbers = ee.List.sequence(1,bands.length());
def subtractmedian(img):
diff = ee.Image(img).subtract(median).pow(ee.Image.constant(2));
return diff.reduce('sum').addBands(img);
medoid = collection.map(subtractmedian)
medoid = ee.ImageCollection(medoid).reduce(ee.Reducer.min(bands.length().add(1))).select(bandNumbers,bands)
return medoid;
def exportMap(self,img,studyArea,week):
geom = studyArea.bounds().getInfo();
task_ordered= ee.batch.Export.image.toAsset(image=img.clip(studyArea.buffer(10000)),
description = self.env.name + str(week),
assetId= self.env.assetId + self.env.name ,
region=geom['coordinates'],
maxPixels=1e13,
crs=self.env.epsg,
scale=self.env.exportScale)
task_ordered.start()
def composite(aoi,year):
startDate = ee.Date.fromYMD(year,1,1)
endDate = ee.Date.fromYMD(year,12,31)
landsat = Landsat().main(aoi,startDate,endDate)
s2 = sentinel2().main(aoi,startDate,endDate)
collection = Harmonize().harmonizeData(landsat,s2)
medoid = collection.median().set("system:time_start",startDate.millis())
percentiles = collection.reduce(ee.Reducer.percentile([25,75]));
inbands = ['blue_p25', 'blue_p75', 'green_p25', 'green_p75', 'red_p25', 'red_p75', 'nir_p25', 'nir_p75', 'swir1_p25', 'swir1_p75', 'swir2_p25', 'swir2_p75']
outbands = ['p25_blue', 'p75_blue', 'p25_green', 'p75_green', 'p25_red', 'p75_red', 'p25_nir', 'p75_nir', 'p25_swir1', 'p75_swir1', 'p25_swir2', 'p75_swir2']
percentiles = percentiles.select(inbands,outbands)
nImages = ee.ImageCollection(collection).select([0]).count().rename('count')
medoid = medoid.addBands(percentiles)
medoid = medoid.multiply(10000).int16()
medoid = medoid.addBands(nImages)
return ee.Image(medoid)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .vfe_template import VFETemplate
class PFNLayer(nn.Module):
def __init__(self,
in_channels,
out_channels,
use_norm=True,
last_layer=False):
super().__init__()
self.last_vfe = last_layer
self.use_norm = use_norm
if not self.last_vfe:
out_channels = out_channels // 2
if self.use_norm:
self.linear = nn.Linear(in_channels, out_channels, bias=False)
self.norm = nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01)
else:
self.linear = nn.Linear(in_channels, out_channels, bias=True)
self.part = 50000
def forward(self, inputs):
if inputs.shape[0] > self.part:
# nn.Linear performs randomly when batch size is too large
num_parts = inputs.shape[0] // self.part
part_linear_out = [self.linear(inputs[num_part*self.part:(num_part+1)*self.part])
for num_part in range(num_parts+1)]
x = torch.cat(part_linear_out, dim=0)
else:
x = self.linear(inputs)
torch.backends.cudnn.enabled = False
x = self.norm(x.permute(0, 2, 1)).permute(0, 2, 1) if self.use_norm else x
torch.backends.cudnn.enabled = True
x = F.relu(x)
x_max = torch.max(x, dim=1, keepdim=True)[0]
if self.last_vfe:
return x_max
else:
x_repeat = x_max.repeat(1, inputs.shape[1], 1)
x_concatenated = torch.cat([x, x_repeat], dim=2)
return x_concatenated
class PillarVFE(VFETemplate):
def __init__(self, model_cfg, num_point_features, voxel_size, point_cloud_range):
super().__init__(model_cfg=model_cfg)
self.use_norm = self.model_cfg.USE_NORM
self.with_distance = self.model_cfg.WITH_DISTANCE
self.use_absolute_xyz = self.model_cfg.USE_ABSLOTE_XYZ
num_point_features += 6 if self.use_absolute_xyz else 3
if self.with_distance:
num_point_features += 1
self.num_filters = self.model_cfg.NUM_FILTERS
assert len(self.num_filters) > 0
num_filters = [num_point_features] + list(self.num_filters)
pfn_layers = []
for i in range(len(num_filters) - 1):
in_filters = num_filters[i]
out_filters = num_filters[i + 1]
pfn_layers.append(
PFNLayer(in_filters, out_filters, self.use_norm, last_layer=(i >= len(num_filters) - 2))
)
self.pfn_layers = nn.ModuleList(pfn_layers)
self.voxel_x = voxel_size[0]
self.voxel_y = voxel_size[1]
self.voxel_z = voxel_size[2]
self.x_offset = self.voxel_x / 2 + point_cloud_range[0]
self.y_offset = self.voxel_y / 2 + point_cloud_range[1]
self.z_offset = self.voxel_z / 2 + point_cloud_range[2]
def get_output_feature_dim(self):
return self.num_filters[-1]
def get_paddings_indicator(self, actual_num, max_num, axis=0):
actual_num = torch.unsqueeze(actual_num, axis + 1)
max_num_shape = [1] * len(actual_num.shape)
max_num_shape[axis + 1] = -1
max_num = torch.arange(max_num, dtype=torch.int, device=actual_num.device).view(max_num_shape)
paddings_indicator = actual_num.int() > max_num
return paddings_indicator
def forward(self, batch_dict, **kwargs):
points_ori = batch_dict['points']
batch_size = batch_dict['batch_size']
xyz = points_ori[:, 1:4]
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
batch_idx = points_ori[:, 0]
for k in range(batch_size):
xyz_batch_cnt[k] = (batch_idx == k).sum()
points_ori_features = points_ori[:, 4].view(-1, 1)
voxel_features, voxel_num_points, coords = batch_dict['voxels'], batch_dict['voxel_num_points'], batch_dict['voxel_coords']
points_mean = voxel_features[:, :, :3].sum(dim=1, keepdim=True) / voxel_num_points.type_as(voxel_features).view(-1, 1, 1)
f_cluster = voxel_features[:, :, :3] - points_mean
f_center = torch.zeros_like(voxel_features[:, :, :3])
f_center[:, :, 0] = voxel_features[:, :, 0] - (coords[:, 3].to(voxel_features.dtype).unsqueeze(1) * self.voxel_x + self.x_offset)
f_center[:, :, 1] = voxel_features[:, :, 1] - (coords[:, 2].to(voxel_features.dtype).unsqueeze(1) * self.voxel_y + self.y_offset)
f_center[:, :, 2] = voxel_features[:, :, 2] - (coords[:, 1].to(voxel_features.dtype).unsqueeze(1) * self.voxel_z + self.z_offset)
if self.use_absolute_xyz:
features = [voxel_features, f_cluster, f_center]
else:
features = [voxel_features[..., 3:], f_cluster, f_center]
if self.with_distance:
points_dist = torch.norm(voxel_features[:, :, :3], 2, 2, keepdim=True)
features.append(points_dist)
features = torch.cat(features, dim=-1)
voxel_count = features.shape[1]
mask = self.get_paddings_indicator(voxel_num_points, voxel_count, axis=0)
mask = torch.unsqueeze(mask, -1).type_as(voxel_features)
features *= mask
for pfn in self.pfn_layers:
features = pfn(features)
features = features.squeeze()
batch_dict['pillar_features'] = features
batch_dict['sa_points'] = xyz
batch_dict['sa_batch_cnt'] = xyz_batch_cnt
batch_dict['sa_features'] = points_ori_features
return batch_dict
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zerei.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
import os
import pytest
from ray.tests.conftest import * # noqa
@pytest.fixture
def enable_test_module():
os.environ["RAY_DASHBOARD_MODULE_TEST"] = "true"
yield
os.environ.pop("RAY_DASHBOARD_MODULE_TEST", None)
|
import MySQLdb
conn = MySQLdb.Connect(
host = '127.0.0.1',
port = 3306,
user = 'root',
passwd = '123456',
db = 'softcup',
charset = 'utf8'
)
cursor = conn.cursor()
sql = "select title from douban_press01 where isbn13='9787010009292'"
cursor.execute(sql)
catalog = cursor.fetchall()
conn.commit()
conn.close()
print catalog[0][0]
with open('title.txt','a') as t:
t.write(catalog[0][0].encode('utf-8'))
|
"""
Harness to manage optimisation domains.
-- kandasamy@cs.cmu.edu,
kkorovin@cs.cmu.edu
"""
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=arguments-differ
# pylint: disable=abstract-class-not-used
import numpy as np
# Local
from explore.explorer import ga_opt_args, ga_optimise_from_args
from gp.kernel import SEKernel
from utils.oper_utils import random_maximise, direct_ft_maximise
from utils.option_handler import load_options
from utils.reporters import get_reporter
from chemist_opt.worker_manager import SyntheticWorkerManager
_EUCLIDEAN_DFLT_OPT_METHOD = 'rand'
_NN_DFLT_OPT_METHOD = 'ga'
class Domain(object):
""" Domain class. An abstract class which implements domains. """
def __init__(self, dflt_domain_opt_method):
""" Constructor. """
super(Domain, self).__init__()
self.dflt_domain_opt_method = dflt_domain_opt_method
def maximise_obj(self, opt_method, obj, num_evals, *args, **kwargs):
""" Optimises the objective and returns it. """
if opt_method == 'dflt_domain_opt_method':
opt_method = self.dflt_domain_opt_method
print("MYSELF:", self)
return self._child_maximise_obj(opt_method, obj, num_evals, *args, **kwargs)
def _child_maximise_obj(self, opt_method, obj, num_evals, *args, **kwargs):
""" Child class implementation for optimising an objective. """
raise NotImplementedError('Implement in a child class.')
def get_default_kernel(self, *args, **kwargs):
""" Get the default kernel for this domain. """
raise NotImplementedError('Implement in a child class.')
def get_type(self):
""" Returns the type of the domain. """
raise NotImplementedError('Implement in a child class.')
def get_dim(self):
""" Returns the dimension of the space. """
raise NotImplementedError('Implement in a child class.')
# For euclidean spaces ---------------------------------------------------------------
class EuclideanDomain(Domain):
""" Domain for Euclidean spaces. """
def __init__(self, bounds):
""" Constructor. """
self.bounds = np.array(bounds)
self._dim = len(bounds)
super(EuclideanDomain, self).__init__('rand')
def _child_maximise_obj(self, opt_method, obj, num_evals):
""" Child class implementation for optimising an objective. """
if opt_method == 'rand':
return self._rand_maximise_obj(obj, num_evals)
elif opt_method == 'direct':
return self._direct_maximise_obj(obj, num_evals)
else:
raise ValueError('Unknown opt_method=%s for EuclideanDomain'%(opt_method))
def _rand_maximise_obj(self, obj, num_evals):
""" Maximise with random evaluations. """
if num_evals is None:
lead_const = 10 * min(5, self.dim)**2
num_evals = lambda t: np.clip(lead_const * np.sqrt(min(t, 1000)), 2000, 3e4)
opt_val, opt_pt = random_maximise(obj, self.bounds, num_evals)
return opt_val, opt_pt
def _direct_maximise_obj(self, obj, num_evals):
""" Maximise with direct. """
if num_evals is None:
lead_const = 10 * min(5, self.dim)**2
num_evals = lambda t: np.clip(lead_const * np.sqrt(min(t, 1000)), 2000, 3e4)
lb = self.bounds[:, 0]
ub = self.bounds[:, 1]
opt_val, opt_pt, _ = direct_ft_maximise(obj, lb, ub, num_evals)
return opt_val, opt_pt
def get_default_kernel(self, range_Y):
""" Returns the default (SE) kernel. """
return SEKernel(self.dim, range_Y/4.0, dim_bandwidths=0.05*np.sqrt(self.dim))
def get_type(self):
""" Returns the type of the domain. """
return 'euclidean'
def get_dim(self):
""" Return the dimensions. """
return self._dim
# For molecules. ---------------------------------------------------------------
class MolDomain(Domain):
""" Domain for Molecules. """
def __init__(self, constraint_checker=None):
# this may check validity of molecules:
self.constraint_checker = constraint_checker
super(MolDomain, self).__init__('ga')
@staticmethod
def maximise_obj(opt_method, obj, num_evals, *args, **kwargs):
""" Optimises the objective and returns it. """
opt_pt, opt_val = ga_optimise_from_args(obj, num_evals)
return opt_val, opt_pt
def _child_maximise_obj(self, opt_method, obj, num_evals, *args, **kwargs):
""" Child class implementation for optimising an objective. """
if opt_method == 'ga':
return self._ga_maximise(obj, num_evals, *args, **kwargs)
elif opt_method == 'rand_ga':
return self._rand_ga_maximise(obj, num_evals)
else:
raise ValueError('Unknown method=%s for MolDomain'%(opt_method))
def _ga_maximise(self, obj, num_evals, mutation_op,
init_pool, init_pool_vals=None,
expects_inputs_to_be_iterable=False):
""" Maximise with genetic algorithms.
if expects_inputs_as_list is True it means the function expects the inputs to
be iterable by default.
Arguments:
obj - target function to optimize (*objective*)
"""
# Optimization happens here:
opt_pt, opt_val = ga_optimise_from_args(obj, num_evals)
return opt_val, opt_pt
###### TODO: call like the following: #######
# ga_optimise_from_args(func_caller, worker_manager, num_evals, 'asy', mutation_op,
# options=options, reporter=reporter)
def _rand_ga_maximise(self, obj, num_evals):
""" Maximise over the space of neural networks via rand_ga. """
raise NotImplementedError('Not implemented rand_ga for MolDomain yet.')
def get_default_kernel(self, tp_comp, mislabel_coeffs, struct_coefs, powers,
dist_type, range_Y):
""" Returns the default (SE) kernel. """
raise NotImplementedError("TODO.")
def get_dim(self):
""" Return the dimensions. """
raise NotImplementedError("TODO.")
|
from ..factory import Type
class messageVoiceNote(Type):
voice_note = None # type: "voiceNote"
caption = None # type: "formattedText"
is_listened = None # type: "Bool"
|
from django.test import tag
from unittest.mock import patch
from CMS.test.mocks.institution_mocks import InstitutionMocks
from CMS.test.utils import UniSimpleTestCase
from errors.models import ApiError
from institutions.models import Institution
@tag('azure')
class InstitutionsModelsTests(UniSimpleTestCase):
@patch('institutions.request_handler.load_institution_data',
return_value=InstitutionMocks.get_unsuccessful_institution_load_response())
def test_institution_find_returns_api_error_if_institution_not_found(self, mock_response):
institution, error = Institution.find(1, 'en')
self.assertIsNone(institution)
self.assertIsNotNone(error)
self.assertEquals(type(error), ApiError)
def test_institution_find_returns_a_institution_object_if_institution_found(self):
institution, error = Institution.find(1, 1)
self.assertIsNone(error)
self.assertIsNotNone(institution)
self.assertEquals(type(institution), Institution)
|
"""Auto-generated file, do not edit by hand. AZ metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_AZ = PhoneMetadata(id='AZ', country_code=994, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='(?:(?:(?:[12457]\\d|60|88)\\d|365)\\d{3}|900200)\\d{3}', possible_length=(9,), possible_length_local_only=(7,)),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:(?:1[28]\\d|2(?:[045]2|1[24]|2[2-4]|33|6[23]))\\d\\d|365(?:[0-46-9]\\d|5[0-35-9]))\\d{4}', example_number='123123456', possible_length=(9,), possible_length_local_only=(7,)),
mobile=PhoneNumberDesc(national_number_pattern='(?:36554|(?:4[04]|5[015]|60|7[07])\\d{3})\\d{4}', example_number='401234567', possible_length=(9,)),
toll_free=PhoneNumberDesc(national_number_pattern='88\\d{7}', example_number='881234567', possible_length=(9,)),
premium_rate=PhoneNumberDesc(national_number_pattern='900200\\d{3}', example_number='900200123', possible_length=(9,)),
national_prefix='0',
national_prefix_for_parsing='0',
number_format=[NumberFormat(pattern='(\\d{3})(\\d{2})(\\d{2})', format='\\1 \\2 \\3', leading_digits_pattern=['[1-9]']),
NumberFormat(pattern='(\\d{3})(\\d{2})(\\d{2})(\\d{2})', format='\\1 \\2 \\3 \\4', leading_digits_pattern=['9'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(\\d{2})(\\d{3})(\\d{2})(\\d{2})', format='\\1 \\2 \\3 \\4', leading_digits_pattern=['[12]|365', '[12]|365', '[12]|365(?:[0-46-9]|5[0-35-9])'], national_prefix_formatting_rule='(0\\1)'),
NumberFormat(pattern='(\\d{2})(\\d{3})(\\d{2})(\\d{2})', format='\\1 \\2 \\3 \\4', leading_digits_pattern=['[3-8]'], national_prefix_formatting_rule='0\\1')],
intl_number_format=[NumberFormat(pattern='(\\d{3})(\\d{2})(\\d{2})(\\d{2})', format='\\1 \\2 \\3 \\4', leading_digits_pattern=['9']),
NumberFormat(pattern='(\\d{2})(\\d{3})(\\d{2})(\\d{2})', format='\\1 \\2 \\3 \\4', leading_digits_pattern=['[12]|365', '[12]|365', '[12]|365(?:[0-46-9]|5[0-35-9])']),
NumberFormat(pattern='(\\d{2})(\\d{3})(\\d{2})(\\d{2})', format='\\1 \\2 \\3 \\4', leading_digits_pattern=['[3-8]'])],
mobile_number_portable_region=True)
|
# -*- coding: utf-8 -*-
"""
@description:
@author:XuMing
"""
from __future__ import print_function # 兼容python3的print写法
from __future__ import unicode_literals # 兼容python3的编码处理
import base64
import binascii
import json
import logging
import re
import time
import requests
import rsa
import urllib
class WeiBoLogin(object):
"""
class of WeiBoLogin, to login weibo.com
"""
def __init__(self):
"""
constructor
"""
self.user_name = None
self.pass_word = None
self.user_uniqueid = None
self.user_nick = None
self.session = requests.Session()
self.session.headers.update({"User-Agent": "Mozilla/5.0 (Windows NT 6.3; WOW64; rv:41.0) Gecko/20100101 Firefox/41.0"})
self.session.get("http://weibo.com/login.php")
return
def login(self, user_name, pass_word):
"""
login weibo.com, return True or False
"""
self.user_name = user_name
self.pass_word = pass_word
self.user_uniqueid = None
self.user_nick = None
# get json data
s_user_name = self.get_username()
json_data = self.get_json_data(su_value=s_user_name)
if not json_data:
return False
s_pass_word = self.get_password(json_data["servertime"], json_data["nonce"], json_data["pubkey"])
# make post_data
post_data = {
"entry": "weibo",
"gateway": "1",
"from": "",
"savestate": "7",
"userticket": "1",
"vsnf": "1",
"service": "miniblog",
"encoding": "UTF-8",
"pwencode": "rsa2",
"sr": "1280*800",
"prelt": "529",
"url": "http://weibo.com/ajaxlogin.php?framelogin=1&callback=parent.sinaSSOController.feedBackUrlCallBack",
"rsakv": json_data["rsakv"],
"servertime": json_data["servertime"],
"nonce": json_data["nonce"],
"su": s_user_name,
"sp": s_pass_word,
"returntype": "TEXT",
}
# get captcha code
if json_data["showpin"] == 1:
url = "http://login.sina.com.cn/cgi/pin.php?r=%d&s=0&p=%s" % (int(time.time()), json_data["pcid"])
with open("captcha.jpeg", "wb") as file_out:
file_out.write(self.session.get(url).content)
code = input("请输入验证码:")
post_data["pcid"] = json_data["pcid"]
post_data["door"] = code
# login weibo.com
login_url_1 = "http://login.sina.com.cn/sso/login.php?client=ssologin.js(v1.4.18)&_=%d" % int(time.time())
json_data_1 = self.session.post(login_url_1, data=post_data).json()
if json_data_1["retcode"] == "0":
params = {
"callback": "sinaSSOController.callbackLoginStatus",
"client": "ssologin.js(v1.4.18)",
"ticket": json_data_1["ticket"],
"ssosavestate": int(time.time()),
"_": int(time.time()*1000),
}
response = self.session.get("https://passport.weibo.com/wbsso/login", params=params)
json_data_2 = json.loads(re.search(r"\((?P<result>.*)\)", response.text).group("result"))
if json_data_2["result"] is True:
self.user_uniqueid = json_data_2["userinfo"]["uniqueid"]
self.user_nick = json_data_2["userinfo"]["displayname"]
logging.warning("WeiBoLogin succeed: %s", json_data_2)
else:
logging.warning("WeiBoLogin failed: %s", json_data_2)
else:
logging.warning("WeiBoLogin failed: %s", json_data_1)
return True if self.user_uniqueid and self.user_nick else False
def get_username(self):
"""
get legal username
"""
username_quote = urllib.quote_plus(self.user_name)
username_base64 = base64.b64encode(username_quote.encode("utf-8"))
return username_base64.decode("utf-8")
def get_json_data(self, su_value):
"""
get the value of "servertime", "nonce", "pubkey", "rsakv" and "showpin", etc
"""
params = {
"entry": "weibo",
"callback": "sinaSSOController.preloginCallBack",
"rsakt": "mod",
"checkpin": "1",
"client": "ssologin.js(v1.4.18)",
"su": su_value,
"_": int(time.time()*1000),
}
try:
response = self.session.get("http://login.sina.com.cn/sso/prelogin.php", params=params)
json_data = json.loads(re.search(r"\((?P<data>.*)\)", response.text).group("data"))
except Exception as excep:
json_data = {}
logging.error("WeiBoLogin get_json_data error: %s", excep)
logging.debug("WeiBoLogin get_json_data: %s", json_data)
return json_data
def get_password(self, servertime, nonce, pubkey):
"""
get legal password
"""
string = (str(servertime) + "\t" + str(nonce) + "\n" + str(self.pass_word)).encode("utf-8")
public_key = rsa.PublicKey(int(pubkey, 16), int("10001", 16))
password = rsa.encrypt(string, public_key)
password = binascii.b2a_hex(password)
return password.decode()
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s\t%(levelname)s\t%(message)s")
weibo = WeiBoLogin()
weibo.login("username", "password")
|
"""
This is a sample on how to define custom components.
You can make a repo out of this file, having one custom component per file
"""
import os
import shutil
import pytest
import pp
from pp.add_padding import add_padding_to_grid
from pp.add_termination import add_gratings_and_loop_back
from pp.autoplacer.yaml_placer import place_from_yaml
from pp.components.spiral_inner_io import spiral_inner_io_euler
from pp.config import CONFIG
from pp.generate_does import generate_does
from pp.mask.merge_metadata import merge_metadata
from pp.routing.connect import connect_strip_way_points
def _route_filter(*args, **kwargs):
return connect_strip_way_points(
*args, taper_factory=None, start_straight=5.0, end_straight=5.0, **kwargs
)
def add_te(component, **kwargs):
c = pp.routing.add_fiber_array(
component,
grating_coupler=pp.c.grating_coupler_elliptical_te,
route_filter=_route_filter,
**kwargs,
)
c.test = "passive_optical_te"
c = add_padding_to_grid(c)
return c
def add_tm(component, **kwargs):
c = pp.routing.add_fiber_array(
component,
grating_coupler=pp.c.grating_coupler_elliptical_tm,
route_filter=_route_filter,
bend_radius=20,
**kwargs,
)
c = add_padding_to_grid(c)
return c
@pp.cell
def coupler_te(gap, length, wg_width=0.5, nominal_wg_width=0.5):
""" sample of component cutback """
c = pp.c.coupler(wg_width=wg_width, gap=gap, length=length)
cc = add_te(c)
return cc
@pp.cell
def spiral_te(wg_width=0.5, length=2):
""" sample of component cutback
Args:
wg_width: um
lenght: mm
"""
c = spiral_inner_io_euler(wg_width=wg_width, length=length)
cc = add_gratings_and_loop_back(
component=c,
grating_coupler=pp.c.grating_coupler_elliptical_te,
bend_factory=pp.c.bend_circular,
)
return cc
@pp.cell
def spiral_tm(wg_width=0.5, length=2):
""" sample of component cutback """
c = spiral_inner_io_euler(wg_width=wg_width, length=length, dx=10, dy=10, N=5)
cc = add_gratings_and_loop_back(
component=c,
grating_coupler=pp.c.grating_coupler_elliptical_tm,
bend_factory=pp.c.bend_circular,
)
return cc
component_factory = dict(
spiral_te=spiral_te, spiral_tm=spiral_tm, coupler_te=coupler_te
)
@pytest.fixture
def cleandir():
build_folder = CONFIG["samples_path"] / "mask_custom" / "build"
if build_folder.exists():
shutil.rmtree(build_folder)
@pytest.fixture
def chdir():
workspace_folder = CONFIG["samples_path"] / "mask_custom"
os.chdir(workspace_folder)
@pytest.mark.usefixtures("cleandir")
def test_mask(precision=2e-9):
workspace_folder = CONFIG["samples_path"] / "mask_custom"
build_path = workspace_folder / "build"
doe_root_path = build_path / "cache_doe"
doe_metadata_path = build_path / "doe"
mask_path = build_path / "mask"
does_yml = workspace_folder / "does.yml"
mask_path.mkdir(parents=True, exist_ok=True)
gdspath = mask_path / "sample_mask.gds"
markdown_path = gdspath.with_suffix(".md")
json_path = gdspath.with_suffix(".json")
test_metadata_path = gdspath.with_suffix(".tp.json")
generate_does(
str(does_yml),
component_factory=component_factory,
precision=precision,
doe_root_path=doe_root_path,
doe_metadata_path=doe_metadata_path,
)
top_level = place_from_yaml(does_yml, precision=precision, root_does=doe_root_path)
top_level.write(str(gdspath))
merge_metadata(gdspath=gdspath)
assert gdspath.exists()
assert markdown_path.exists()
assert json_path.exists()
assert test_metadata_path.exists()
report = open(markdown_path).read()
assert report.count("#") == 2, f" only {report.count('#')} DOEs in {markdown_path}"
return gdspath
if __name__ == "__main__":
# from pprint import pprint
# pprint(component_factory)
c = test_mask()
pp.klive.show(c)
# c = coupler_te(gap=0.3, length=20)
# pp.show(c)
|
import sys
import sqlite3
import geoip2.database
db = sqlite3.connect(sys.argv[1])
geoip_reader = geoip2.database.Reader(sys.argv[2])
geoip_lookup = geoip_reader.city
with db as cur:
for row in db.execute('select ip from access'):
ip = row[0]
try:
r = geoip_lookup(ip)
except geoip2.errors.AddressNotFoundError:
continue
try:
subdiv = r.subdivisions[0].names['en']
except (KeyError, IndexError):
subdiv = None
try:
city = r.city.names['en']
except:
city = None
print(r.country.iso_code, subdiv, city)
|
import sys
import re
import os
import argparse
from collections import deque
FLAG = None
def read_classify_list(filename):
classify_dic = {}
with open(filename,'r') as f:
for line in f:
l_sp = line.rstrip().split(' ')
ID = l_sp[0]
start_frame = l_sp[1]
cont_frame = l_sp[2]
word_id = int(l_sp[3])
if ID not in classify_dic:
classify_dic[ID] = []
classify_dic[ID].append((start_frame,cont_frame,word_id))
return classify_dic
def mkdir(frame_num_list, path):
for i in frame_num_list:
if not os.path.exists(path+'/'+str(i)):
os.mkdir(path+'/'+str(i))
return
def classify(frame_num, frame_num_list):
for i in range(len(frame_num_list)):
if i==0 and frame_num <= frame_num_list[i]:
return frame_num_list[i]
if frame_num <= frame_num_list[i] and frame_num > frame_num_list[i-1] :
return frame_num_list[i]
return None
### write the features to path/classify_num.ark ###
def read_and_save_feat(prons, filename, classify_dic, frame_num_list, path, feat_dim, filtered_prons):
import csv
import numpy as np
counter_dic = {}
for i in frame_num_list:
counter_dic[i] = 0
filtered_lines = []
with open(filename,'r') as f:
line_id = 0
for line in f:
if '[' in line:
# print (line_id)
ID = line.strip().split(' ')[0]
### temp_list contains all the utterance feature ###
temp_list = []
for lines in f:
flag = False
if ']' in lines:
lines = lines.replace(']',' ')
flag = True
feat_str = lines.strip().split(' ')
feat_l = [float(i) for i in feat_str]
temp_list.append(feat_l)
if flag :
break
if ID not in classify_dic:
continue
for start, cont, word_id in classify_dic[ID]:
if word_id == 0:
line_id += 1
continue
cls = classify(int(cont),frame_num_list)
if cls == None:
line_id += 1
continue
filtered_lines.append(line_id)
line_id += 1
new_frames = [ temp_list[i] for i in \
range(int(start),int(start)+int(cont))]
### padding zero ###
#print (cont, cls)
if int(cont) < cls :
new_frames += [[0. for j in range(feat_dim)]for i in range(cls -
int(cont))]
# new_frames = np.array(new_frames, dtype=np.float32)
# x = np.arange(cls)
# xp = np.linspace(0, cls - 1, new_frames.shape[0])
# new_frames = np.vstack([np.interp(x, xp, new_frames[:, i]) for i in \
# range(new_frames.shape[1])]).T
np_new_frames = np.reshape(np.array(new_frames),-1)
np_new_frames = np.append(np_new_frames,[word_id])
np_new_frames = np.append(np_new_frames,[ID])
#print (np_new_frames[0])
# with open(path+'/'+str(cls)+'/'+str(int(counter_dic[cls]/FLAG.num_in_ark)) + '.ark','a') as csvfile:
with open(path+'/'+str(cls)+'/'+ID.split('-')[0] + '.ark','a') as csvfile:
counter_dic[cls] += 1
# csvfile.write(ID+' ')
for i in range(len(np_new_frames)):
if i != len(np_new_frames)-1:
csvfile.write(str(np_new_frames[i])+',')
else:
csvfile.write(str(np_new_frames[i])+'\n')
with open(prons, 'r') as fin:
with open(filtered_prons, 'w') as fout:
count = 0
for i, idx in enumerate(filtered_lines):
while True:
line = fin.readline()
if line == '':
break
line = line[:-1]
if count == idx:
fout.write(line + '\n')
count += 1
break
count += 1
def main():
classify_list = [70]
path=FLAG.store_path
mkdir(classify_list, path)
classify_dic = read_classify_list(FLAG.prons)
read_and_save_feat(FLAG.prons, FLAG.feat_ark, classify_dic, classify_list, path, FLAG.feat_dim, FLAG.filtered_prons)
return
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='get the feat csv through all_prons')
parser.add_argument('prons',
help='the pronounciation file')
parser.add_argument('feat_ark',
help='the feat ark')
parser.add_argument('store_path',
help='the directory to store the feat arks.')
parser.add_argument('filtered_prons',
help='the prons file filtered by num of frames')
parser.add_argument('--feat_dim', type=int,
default=39,
help='the feat dimension, default=39')
FLAG = parser.parse_args()
main()
|
import six
import warnings
from .. import errors
from ..utils.utils import (
convert_port_bindings, convert_tmpfs_mounts, convert_volume_binds,
format_environment, normalize_links, parse_bytes, parse_devices,
split_command, version_gte, version_lt,
)
from .base import DictType
from .healthcheck import Healthcheck
class LogConfigTypesEnum(object):
_values = (
'json-file',
'syslog',
'journald',
'gelf',
'fluentd',
'none'
)
JSON, SYSLOG, JOURNALD, GELF, FLUENTD, NONE = _values
class LogConfig(DictType):
types = LogConfigTypesEnum
def __init__(self, **kwargs):
log_driver_type = kwargs.get('type', kwargs.get('Type'))
config = kwargs.get('config', kwargs.get('Config')) or {}
if config and not isinstance(config, dict):
raise ValueError("LogConfig.config must be a dictionary")
super(LogConfig, self).__init__({
'Type': log_driver_type,
'Config': config
})
@property
def type(self):
return self['Type']
@type.setter
def type(self, value):
self['Type'] = value
@property
def config(self):
return self['Config']
def set_config_value(self, key, value):
self.config[key] = value
def unset_config(self, key):
if key in self.config:
del self.config[key]
class Ulimit(DictType):
def __init__(self, **kwargs):
name = kwargs.get('name', kwargs.get('Name'))
soft = kwargs.get('soft', kwargs.get('Soft'))
hard = kwargs.get('hard', kwargs.get('Hard'))
if not isinstance(name, six.string_types):
raise ValueError("Ulimit.name must be a string")
if soft and not isinstance(soft, int):
raise ValueError("Ulimit.soft must be an integer")
if hard and not isinstance(hard, int):
raise ValueError("Ulimit.hard must be an integer")
super(Ulimit, self).__init__({
'Name': name,
'Soft': soft,
'Hard': hard
})
@property
def name(self):
return self['Name']
@name.setter
def name(self, value):
self['Name'] = value
@property
def soft(self):
return self.get('Soft')
@soft.setter
def soft(self, value):
self['Soft'] = value
@property
def hard(self):
return self.get('Hard')
@hard.setter
def hard(self, value):
self['Hard'] = value
class HostConfig(dict):
def __init__(self, version, binds=None, port_bindings=None,
lxc_conf=None, publish_all_ports=False, links=None,
privileged=False, dns=None, dns_search=None,
volumes_from=None, network_mode=None, restart_policy=None,
cap_add=None, cap_drop=None, devices=None, extra_hosts=None,
read_only=None, pid_mode=None, ipc_mode=None,
security_opt=None, ulimits=None, log_config=None,
mem_limit=None, memswap_limit=None, mem_reservation=None,
kernel_memory=None, mem_swappiness=None, cgroup_parent=None,
group_add=None, cpu_quota=None, cpu_period=None,
blkio_weight=None, blkio_weight_device=None,
device_read_bps=None, device_write_bps=None,
device_read_iops=None, device_write_iops=None,
oom_kill_disable=False, shm_size=None, sysctls=None,
tmpfs=None, oom_score_adj=None, dns_opt=None, cpu_shares=None,
cpuset_cpus=None, userns_mode=None, pids_limit=None,
isolation=None, auto_remove=False, storage_opt=None,
init=None, init_path=None, volume_driver=None,
cpu_count=None, cpu_percent=None, nano_cpus=None,
cpuset_mems=None, runtime=None):
if mem_limit is not None:
self['Memory'] = parse_bytes(mem_limit)
if memswap_limit is not None:
self['MemorySwap'] = parse_bytes(memswap_limit)
if mem_reservation:
if version_lt(version, '1.21'):
raise host_config_version_error('mem_reservation', '1.21')
self['MemoryReservation'] = parse_bytes(mem_reservation)
if kernel_memory:
if version_lt(version, '1.21'):
raise host_config_version_error('kernel_memory', '1.21')
self['KernelMemory'] = parse_bytes(kernel_memory)
if mem_swappiness is not None:
if version_lt(version, '1.20'):
raise host_config_version_error('mem_swappiness', '1.20')
if not isinstance(mem_swappiness, int):
raise host_config_type_error(
'mem_swappiness', mem_swappiness, 'int'
)
self['MemorySwappiness'] = mem_swappiness
if shm_size is not None:
if isinstance(shm_size, six.string_types):
shm_size = parse_bytes(shm_size)
self['ShmSize'] = shm_size
if pid_mode:
if version_lt(version, '1.24') and pid_mode != 'host':
raise host_config_value_error('pid_mode', pid_mode)
self['PidMode'] = pid_mode
if ipc_mode:
self['IpcMode'] = ipc_mode
if privileged:
self['Privileged'] = privileged
if oom_kill_disable:
if version_lt(version, '1.20'):
raise host_config_version_error('oom_kill_disable', '1.19')
self['OomKillDisable'] = oom_kill_disable
if oom_score_adj:
if version_lt(version, '1.22'):
raise host_config_version_error('oom_score_adj', '1.22')
if not isinstance(oom_score_adj, int):
raise host_config_type_error(
'oom_score_adj', oom_score_adj, 'int'
)
self['OomScoreAdj'] = oom_score_adj
if publish_all_ports:
self['PublishAllPorts'] = publish_all_ports
if read_only is not None:
self['ReadonlyRootfs'] = read_only
if dns_search:
self['DnsSearch'] = dns_search
if network_mode:
self['NetworkMode'] = network_mode
elif network_mode is None and version_gte(version, '1.20'):
self['NetworkMode'] = 'default'
if restart_policy:
if not isinstance(restart_policy, dict):
raise host_config_type_error(
'restart_policy', restart_policy, 'dict'
)
self['RestartPolicy'] = restart_policy
if cap_add:
self['CapAdd'] = cap_add
if cap_drop:
self['CapDrop'] = cap_drop
if devices:
self['Devices'] = parse_devices(devices)
if group_add:
if version_lt(version, '1.20'):
raise host_config_version_error('group_add', '1.20')
self['GroupAdd'] = [six.text_type(grp) for grp in group_add]
if dns is not None:
self['Dns'] = dns
if dns_opt is not None:
if version_lt(version, '1.21'):
raise host_config_version_error('dns_opt', '1.21')
self['DnsOptions'] = dns_opt
if security_opt is not None:
if not isinstance(security_opt, list):
raise host_config_type_error(
'security_opt', security_opt, 'list'
)
self['SecurityOpt'] = security_opt
if sysctls:
if not isinstance(sysctls, dict):
raise host_config_type_error('sysctls', sysctls, 'dict')
self['Sysctls'] = {}
for k, v in six.iteritems(sysctls):
self['Sysctls'][k] = six.text_type(v)
if volumes_from is not None:
if isinstance(volumes_from, six.string_types):
volumes_from = volumes_from.split(',')
self['VolumesFrom'] = volumes_from
if binds is not None:
self['Binds'] = convert_volume_binds(binds)
if port_bindings is not None:
self['PortBindings'] = convert_port_bindings(port_bindings)
if extra_hosts is not None:
if isinstance(extra_hosts, dict):
extra_hosts = [
'{0}:{1}'.format(k, v)
for k, v in sorted(six.iteritems(extra_hosts))
]
self['ExtraHosts'] = extra_hosts
if links is not None:
self['Links'] = normalize_links(links)
if isinstance(lxc_conf, dict):
formatted = []
for k, v in six.iteritems(lxc_conf):
formatted.append({'Key': k, 'Value': str(v)})
lxc_conf = formatted
if lxc_conf is not None:
self['LxcConf'] = lxc_conf
if cgroup_parent is not None:
self['CgroupParent'] = cgroup_parent
if ulimits is not None:
if not isinstance(ulimits, list):
raise host_config_type_error('ulimits', ulimits, 'list')
self['Ulimits'] = []
for l in ulimits:
if not isinstance(l, Ulimit):
l = Ulimit(**l)
self['Ulimits'].append(l)
if log_config is not None:
if not isinstance(log_config, LogConfig):
if not isinstance(log_config, dict):
raise host_config_type_error(
'log_config', log_config, 'LogConfig'
)
log_config = LogConfig(**log_config)
self['LogConfig'] = log_config
if cpu_quota:
if not isinstance(cpu_quota, int):
raise host_config_type_error('cpu_quota', cpu_quota, 'int')
if version_lt(version, '1.19'):
raise host_config_version_error('cpu_quota', '1.19')
self['CpuQuota'] = cpu_quota
if cpu_period:
if not isinstance(cpu_period, int):
raise host_config_type_error('cpu_period', cpu_period, 'int')
if version_lt(version, '1.19'):
raise host_config_version_error('cpu_period', '1.19')
self['CpuPeriod'] = cpu_period
if cpu_shares:
if version_lt(version, '1.18'):
raise host_config_version_error('cpu_shares', '1.18')
if not isinstance(cpu_shares, int):
raise host_config_type_error('cpu_shares', cpu_shares, 'int')
self['CpuShares'] = cpu_shares
if cpuset_cpus:
if version_lt(version, '1.18'):
raise host_config_version_error('cpuset_cpus', '1.18')
self['CpusetCpus'] = cpuset_cpus
if cpuset_mems:
if version_lt(version, '1.19'):
raise host_config_version_error('cpuset_mems', '1.19')
if not isinstance(cpuset_mems, str):
raise host_config_type_error(
'cpuset_mems', cpuset_mems, 'str'
)
self['CpusetMems'] = cpuset_mems
if blkio_weight:
if not isinstance(blkio_weight, int):
raise host_config_type_error(
'blkio_weight', blkio_weight, 'int'
)
if version_lt(version, '1.22'):
raise host_config_version_error('blkio_weight', '1.22')
self["BlkioWeight"] = blkio_weight
if blkio_weight_device:
if not isinstance(blkio_weight_device, list):
raise host_config_type_error(
'blkio_weight_device', blkio_weight_device, 'list'
)
if version_lt(version, '1.22'):
raise host_config_version_error('blkio_weight_device', '1.22')
self["BlkioWeightDevice"] = blkio_weight_device
if device_read_bps:
if not isinstance(device_read_bps, list):
raise host_config_type_error(
'device_read_bps', device_read_bps, 'list'
)
if version_lt(version, '1.22'):
raise host_config_version_error('device_read_bps', '1.22')
self["BlkioDeviceReadBps"] = device_read_bps
if device_write_bps:
if not isinstance(device_write_bps, list):
raise host_config_type_error(
'device_write_bps', device_write_bps, 'list'
)
if version_lt(version, '1.22'):
raise host_config_version_error('device_write_bps', '1.22')
self["BlkioDeviceWriteBps"] = device_write_bps
if device_read_iops:
if not isinstance(device_read_iops, list):
raise host_config_type_error(
'device_read_iops', device_read_iops, 'list'
)
if version_lt(version, '1.22'):
raise host_config_version_error('device_read_iops', '1.22')
self["BlkioDeviceReadIOps"] = device_read_iops
if device_write_iops:
if not isinstance(device_write_iops, list):
raise host_config_type_error(
'device_write_iops', device_write_iops, 'list'
)
if version_lt(version, '1.22'):
raise host_config_version_error('device_write_iops', '1.22')
self["BlkioDeviceWriteIOps"] = device_write_iops
if tmpfs:
if version_lt(version, '1.22'):
raise host_config_version_error('tmpfs', '1.22')
self["Tmpfs"] = convert_tmpfs_mounts(tmpfs)
if userns_mode:
if version_lt(version, '1.23'):
raise host_config_version_error('userns_mode', '1.23')
if userns_mode != "host":
raise host_config_value_error("userns_mode", userns_mode)
self['UsernsMode'] = userns_mode
if pids_limit:
if not isinstance(pids_limit, int):
raise host_config_type_error('pids_limit', pids_limit, 'int')
if version_lt(version, '1.23'):
raise host_config_version_error('pids_limit', '1.23')
self["PidsLimit"] = pids_limit
if isolation:
if not isinstance(isolation, six.string_types):
raise host_config_type_error('isolation', isolation, 'string')
if version_lt(version, '1.24'):
raise host_config_version_error('isolation', '1.24')
self['Isolation'] = isolation
if auto_remove:
if version_lt(version, '1.25'):
raise host_config_version_error('auto_remove', '1.25')
self['AutoRemove'] = auto_remove
if storage_opt is not None:
if version_lt(version, '1.24'):
raise host_config_version_error('storage_opt', '1.24')
self['StorageOpt'] = storage_opt
if init is not None:
if version_lt(version, '1.25'):
raise host_config_version_error('init', '1.25')
self['Init'] = init
if init_path is not None:
if version_lt(version, '1.25'):
raise host_config_version_error('init_path', '1.25')
if version_gte(version, '1.29'):
# https://github.com/moby/moby/pull/32470
raise host_config_version_error('init_path', '1.29', False)
self['InitPath'] = init_path
if volume_driver is not None:
if version_lt(version, '1.21'):
raise host_config_version_error('volume_driver', '1.21')
self['VolumeDriver'] = volume_driver
if cpu_count:
if not isinstance(cpu_count, int):
raise host_config_type_error('cpu_count', cpu_count, 'int')
if version_lt(version, '1.25'):
raise host_config_version_error('cpu_count', '1.25')
self['CpuCount'] = cpu_count
if cpu_percent:
if not isinstance(cpu_percent, int):
raise host_config_type_error('cpu_percent', cpu_percent, 'int')
if version_lt(version, '1.25'):
raise host_config_version_error('cpu_percent', '1.25')
self['CpuPercent'] = cpu_percent
if nano_cpus:
if not isinstance(nano_cpus, six.integer_types):
raise host_config_type_error('nano_cpus', nano_cpus, 'int')
if version_lt(version, '1.25'):
raise host_config_version_error('nano_cpus', '1.25')
self['NanoCpus'] = nano_cpus
if runtime:
if version_lt(version, '1.25'):
raise host_config_version_error('runtime', '1.25')
self['Runtime'] = runtime
def host_config_type_error(param, param_value, expected):
error_msg = 'Invalid type for {0} param: expected {1} but found {2}'
return TypeError(error_msg.format(param, expected, type(param_value)))
def host_config_version_error(param, version, less_than=True):
operator = '<' if less_than else '>'
error_msg = '{0} param is not supported in API versions {1} {2}'
return errors.InvalidVersion(error_msg.format(param, operator, version))
def host_config_value_error(param, param_value):
error_msg = 'Invalid value for {0} param: {1}'
return ValueError(error_msg.format(param, param_value))
class ContainerConfig(dict):
def __init__(
self, version, image, command, hostname=None, user=None, detach=False,
stdin_open=False, tty=False, mem_limit=None, ports=None, dns=None,
environment=None, volumes=None, volumes_from=None,
network_disabled=False, entrypoint=None, cpu_shares=None,
working_dir=None, domainname=None, memswap_limit=None, cpuset=None,
host_config=None, mac_address=None, labels=None, volume_driver=None,
stop_signal=None, networking_config=None, healthcheck=None,
stop_timeout=None, runtime=None
):
if version_gte(version, '1.10'):
message = ('{0!r} parameter has no effect on create_container().'
' It has been moved to host_config')
if dns is not None:
raise errors.InvalidVersion(message.format('dns'))
if volumes_from is not None:
raise errors.InvalidVersion(message.format('volumes_from'))
if version_lt(version, '1.18'):
if labels is not None:
raise errors.InvalidVersion(
'labels were only introduced in API version 1.18'
)
else:
if cpuset is not None or cpu_shares is not None:
warnings.warn(
'The cpuset_cpus and cpu_shares options have been moved to'
' host_config in API version 1.18, and will be removed',
DeprecationWarning
)
if version_lt(version, '1.19'):
if volume_driver is not None:
raise errors.InvalidVersion(
'Volume drivers were only introduced in API version 1.19'
)
mem_limit = mem_limit if mem_limit is not None else 0
memswap_limit = memswap_limit if memswap_limit is not None else 0
else:
if mem_limit is not None:
raise errors.InvalidVersion(
'mem_limit has been moved to host_config in API version'
' 1.19'
)
if memswap_limit is not None:
raise errors.InvalidVersion(
'memswap_limit has been moved to host_config in API '
'version 1.19'
)
if version_lt(version, '1.21'):
if stop_signal is not None:
raise errors.InvalidVersion(
'stop_signal was only introduced in API version 1.21'
)
else:
if volume_driver is not None:
warnings.warn(
'The volume_driver option has been moved to'
' host_config in API version 1.21, and will be removed',
DeprecationWarning
)
if stop_timeout is not None and version_lt(version, '1.25'):
raise errors.InvalidVersion(
'stop_timeout was only introduced in API version 1.25'
)
if healthcheck is not None:
if version_lt(version, '1.24'):
raise errors.InvalidVersion(
'Health options were only introduced in API version 1.24'
)
if version_lt(version, '1.29') and 'StartPeriod' in healthcheck:
raise errors.InvalidVersion(
'healthcheck start period was introduced in API '
'version 1.29'
)
if isinstance(command, six.string_types):
command = split_command(command)
if isinstance(entrypoint, six.string_types):
entrypoint = split_command(entrypoint)
if isinstance(environment, dict):
environment = format_environment(environment)
if isinstance(labels, list):
labels = dict((lbl, six.text_type('')) for lbl in labels)
if mem_limit is not None:
mem_limit = parse_bytes(mem_limit)
if memswap_limit is not None:
memswap_limit = parse_bytes(memswap_limit)
if isinstance(ports, list):
exposed_ports = {}
for port_definition in ports:
port = port_definition
proto = 'tcp'
if isinstance(port_definition, tuple):
if len(port_definition) == 2:
proto = port_definition[1]
port = port_definition[0]
exposed_ports['{0}/{1}'.format(port, proto)] = {}
ports = exposed_ports
if isinstance(volumes, six.string_types):
volumes = [volumes, ]
if isinstance(volumes, list):
volumes_dict = {}
for vol in volumes:
volumes_dict[vol] = {}
volumes = volumes_dict
if volumes_from:
if not isinstance(volumes_from, six.string_types):
volumes_from = ','.join(volumes_from)
else:
# Force None, an empty list or dict causes client.start to fail
volumes_from = None
if healthcheck and isinstance(healthcheck, dict):
healthcheck = Healthcheck(**healthcheck)
attach_stdin = False
attach_stdout = False
attach_stderr = False
stdin_once = False
if not detach:
attach_stdout = True
attach_stderr = True
if stdin_open:
attach_stdin = True
stdin_once = True
self.update({
'Hostname': hostname,
'Domainname': domainname,
'ExposedPorts': ports,
'User': six.text_type(user) if user else None,
'Tty': tty,
'OpenStdin': stdin_open,
'StdinOnce': stdin_once,
'Memory': mem_limit,
'AttachStdin': attach_stdin,
'AttachStdout': attach_stdout,
'AttachStderr': attach_stderr,
'Env': environment,
'Cmd': command,
'Dns': dns,
'Image': image,
'Volumes': volumes,
'VolumesFrom': volumes_from,
'NetworkDisabled': network_disabled,
'Entrypoint': entrypoint,
'CpuShares': cpu_shares,
'Cpuset': cpuset,
'CpusetCpus': cpuset,
'WorkingDir': working_dir,
'MemorySwap': memswap_limit,
'HostConfig': host_config,
'NetworkingConfig': networking_config,
'MacAddress': mac_address,
'Labels': labels,
'VolumeDriver': volume_driver,
'StopSignal': stop_signal,
'Healthcheck': healthcheck,
'StopTimeout': stop_timeout,
'Runtime': runtime
})
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import difflib
import argparse
import argcomplete
import azure.cli.core.telemetry as telemetry
from azure.cli.core.azlogging import CommandLoggerContext
from azure.cli.core.extension import get_extension
from azure.cli.core.commands import ExtensionCommandSource
from azure.cli.core.commands import AzCliCommandInvoker
from azure.cli.core.commands.events import EVENT_INVOKER_ON_TAB_COMPLETION
from azure.cli.core.command_recommender import CommandRecommender
from azure.cli.core.azclierror import UnrecognizedArgumentError
from azure.cli.core.azclierror import RequiredArgumentMissingError
from azure.cli.core.azclierror import InvalidArgumentValueError
from azure.cli.core.azclierror import ArgumentUsageError
from azure.cli.core.azclierror import CommandNotFoundError
from azure.cli.core.azclierror import ValidationError
from knack.log import get_logger
from knack.parser import CLICommandParser
from knack.util import CLIError
logger = get_logger(__name__)
EXTENSION_REFERENCE = ("If the command is from an extension, "
"please make sure the corresponding extension is installed. "
"To learn more about extensions, please visit "
"'https://docs.microsoft.com/cli/azure/azure-cli-extensions-overview'")
OVERVIEW_REFERENCE = ("Still stuck? Run '{command} --help' to view all commands or go to "
"'https://aka.ms/cli_ref' to learn more")
class IncorrectUsageError(CLIError):
'''Raised when a command is incorrectly used and the usage should be
displayed to the user.
'''
pass # pylint: disable=unnecessary-pass
class AzCompletionFinder(argcomplete.CompletionFinder):
def _get_completions(self, comp_words, cword_prefix, cword_prequote, last_wordbreak_pos):
external_completions = []
self._parser.cli_ctx.raise_event(EVENT_INVOKER_ON_TAB_COMPLETION,
external_completions=external_completions,
parser=self._parser,
comp_words=comp_words,
cword_prefix=cword_prefix,
cword_prequote=cword_prequote,
last_wordbreak_pos=last_wordbreak_pos)
return external_completions + super(AzCompletionFinder, self)._get_completions(comp_words,
cword_prefix,
cword_prequote,
last_wordbreak_pos)
class AzCliCommandParser(CLICommandParser):
"""ArgumentParser implementation specialized for the Azure CLI utility."""
def __init__(self, cli_ctx=None, cli_help=None, **kwargs):
self.command_source = kwargs.pop('_command_source', None)
self._raw_arguments = None
self._namespace = None
self._suggestion_msg = []
self.subparser_map = {}
self.specified_arguments = []
super(AzCliCommandParser, self).__init__(cli_ctx, cli_help=cli_help, **kwargs)
def load_command_table(self, command_loader):
"""Load a command table into our parser."""
# If we haven't already added a subparser, we
# better do it.
cmd_tbl = command_loader.command_table
grp_tbl = command_loader.command_group_table
if not self.subparsers:
sp = self.add_subparsers(dest='_command_package')
sp.required = True
self.subparsers = {(): sp}
for command_name, metadata in cmd_tbl.items():
subparser = self._get_subparser(command_name.split(), grp_tbl)
deprecate_info = metadata.deprecate_info
if not subparser or (deprecate_info and deprecate_info.expired()):
continue
command_verb = command_name.split()[-1]
# To work around http://bugs.python.org/issue9253, we artificially add any new
# parsers we add to the "choices" section of the subparser.
subparser.choices[command_verb] = command_verb
# inject command_module designer's help formatter -- default is HelpFormatter
fc = metadata.formatter_class or argparse.HelpFormatter
command_parser = subparser.add_parser(command_verb,
description=metadata.description,
parents=self.parents,
conflict_handler='error',
help_file=metadata.help,
formatter_class=fc,
cli_help=self.cli_help,
_command_source=metadata.command_source)
self.subparser_map[command_name] = command_parser
command_parser.cli_ctx = self.cli_ctx
command_validator = metadata.validator
argument_validators = []
argument_groups = {}
for _, arg in metadata.arguments.items():
# don't add deprecated arguments to the parser
deprecate_info = arg.type.settings.get('deprecate_info', None)
if deprecate_info and deprecate_info.expired():
continue
if arg.validator:
argument_validators.append(arg.validator)
try:
if arg.arg_group:
try:
group = argument_groups[arg.arg_group]
except KeyError:
# group not found so create
group_name = '{} Arguments'.format(arg.arg_group)
group = command_parser.add_argument_group(arg.arg_group, group_name)
argument_groups[arg.arg_group] = group
param = AzCliCommandParser._add_argument(group, arg)
else:
param = AzCliCommandParser._add_argument(command_parser, arg)
except argparse.ArgumentError as ex:
raise CLIError("command authoring error for '{}': '{}' {}".format(
command_name, ex.args[0].dest, ex.message)) # pylint: disable=no-member
param.completer = arg.completer
param.deprecate_info = arg.deprecate_info
param.preview_info = arg.preview_info
param.experimental_info = arg.experimental_info
param.default_value_source = arg.default_value_source
command_parser.set_defaults(
func=metadata,
command=command_name,
_cmd=metadata,
_command_validator=command_validator,
_argument_validators=argument_validators,
_parser=command_parser)
def validation_error(self, message):
az_error = ValidationError(message)
az_error.print_error()
az_error.send_telemetry()
self.exit(2)
def error(self, message):
# Get a recommended command from the CommandRecommender
command_arguments = self._get_failure_recovery_arguments()
cli_ctx = self.cli_ctx or (self.cli_help.cli_ctx if self.cli_help else None)
recommender = CommandRecommender(*command_arguments, message, cli_ctx)
recommender.set_help_examples(self.get_examples(self.prog))
recommendation = recommender.recommend_a_command()
az_error = ArgumentUsageError(message)
if 'unrecognized arguments' in message:
az_error = UnrecognizedArgumentError(message)
elif 'arguments are required' in message:
az_error = RequiredArgumentMissingError(message)
elif 'invalid' in message:
az_error = InvalidArgumentValueError(message)
if '--query' in message:
from azure.cli.core.util import QUERY_REFERENCE
az_error.set_recommendation(QUERY_REFERENCE)
elif recommendation:
az_error.set_recommendation("Try this: '{}'".format(recommendation))
az_error.set_recommendation(OVERVIEW_REFERENCE.format(command=self.prog))
az_error.print_error()
az_error.send_telemetry()
self.exit(2)
def format_help(self):
extension_version = None
extension_name = None
try:
if isinstance(self.command_source, ExtensionCommandSource):
extension_name = self.command_source.extension_name
extension_version = get_extension(self.command_source.extension_name).version
except Exception: # pylint: disable=broad-except
pass
telemetry.set_command_details(
command=self.prog[3:],
extension_name=extension_name,
extension_version=extension_version)
telemetry.set_success(summary='show help')
super(AzCliCommandParser, self).format_help()
def get_examples(self, command):
if not self.cli_help:
return []
is_group = self.is_group()
return self.cli_help.get_examples(command,
self._actions[-1] if is_group else self,
is_group)
def enable_autocomplete(self):
argcomplete.autocomplete = AzCompletionFinder()
argcomplete.autocomplete(self, validator=lambda c, p: c.lower().startswith(p.lower()),
default_completer=lambda *args, **kwargs: ())
def _get_failure_recovery_arguments(self, action=None):
# Strip the leading "az " and any extraneous whitespace.
command = self.prog[3:].strip()
parameters = []
parameter_set = set()
raw_arguments = None
extension = None
# Extract only parameter names to ensure GPDR compliance
def extract_safe_params(parameters):
return AzCliCommandInvoker._extract_parameter_names(parameters) # pylint: disable=protected-access
# Check for extension name attribute
def has_extension_name(command_source):
is_extension_command_source = isinstance(command_source, ExtensionCommandSource)
has_extension_name = False
if is_extension_command_source:
has_extension_name = hasattr(command_source, 'extension_name')
return is_extension_command_source and has_extension_name
# If the arguments have been processed into a namespace...
if self._namespace:
# Select the parsed command.
if hasattr(self._namespace, 'command'):
command = self._namespace.command
# Parse parameter names from user input.
if self._raw_arguments:
raw_arguments = self._raw_arguments
parameters = extract_safe_params(self._raw_arguments)
for parameter in parameters:
parameter_set.add(parameter)
# If we can retrieve the extension from the current parser's command source...
if has_extension_name(self.command_source):
extension = self.command_source.extension_name
# Otherwise, the command may have not been in a command group. The command source will not be
# set in this case.
elif action and action.dest in ('_subcommand', '_command_package'):
# Get all parsers in the set of possible actions.
parsers = list(action.choices.values())
parser = parsers[0] if parsers else None
# If the first parser comes from an extension...
if parser and has_extension_name(parser.command_source):
# We're looking for a subcommand under an extension command group. Set the
# extension to reflect this.
extension = parser.command_source.extension_name
# Extend the command if the first raw argument is not a parameter.
if raw_arguments and raw_arguments[0] not in parameter_set:
command = '{cmd} {arg}'.format(cmd=command, arg=raw_arguments[0])
# Otherwise, only set the extension if every subparser comes from an extension. This occurs
# when an unrecognized argument is passed to a command from an extension.
elif isinstance(self.subparser_map, dict):
for _, subparser in self.subparser_map.items():
if isinstance(subparser.command_source, ExtensionCommandSource):
extension = subparser.command_source.extension_name
else:
extension = None
break
return command, self._raw_arguments, extension
def _get_values(self, action, arg_strings):
value = super(AzCliCommandParser, self)._get_values(action, arg_strings)
if action.dest and isinstance(action.dest, str) and not action.dest.startswith('_'):
self.specified_arguments.append(action.dest)
return value
def parse_known_args(self, args=None, namespace=None):
# retrieve the raw argument list in case parsing known arguments fails.
self._raw_arguments = args
# if parsing known arguments succeeds, get the command namespace and the argument list
self._namespace, self._raw_arguments = super().parse_known_args(args=args, namespace=namespace)
return self._namespace, self._raw_arguments
def _get_extension_command_tree(self):
from azure.cli.core._session import EXT_CMD_TREE
import os
VALID_SECOND = 3600 * 24 * 10
# self.cli_ctx is None when self.prog is beyond 'az', such as 'az iot'.
# use cli_ctx from cli_help which is not lost.
cli_ctx = self.cli_ctx or (self.cli_help.cli_ctx if self.cli_help else None)
if not cli_ctx:
return None
EXT_CMD_TREE.load(os.path.join(cli_ctx.config.config_dir, 'extensionCommandTree.json'), VALID_SECOND)
if not EXT_CMD_TREE.data:
import requests
from azure.cli.core.util import should_disable_connection_verify
try:
response = requests.get(
'https://azurecliextensionsync.blob.core.windows.net/cmd-index/extensionCommandTree.json',
verify=(not should_disable_connection_verify()),
timeout=10)
except Exception as ex: # pylint: disable=broad-except
logger.info("Request failed for extension command tree: %s", str(ex))
return None
if response.status_code == 200:
EXT_CMD_TREE.data = response.json()
EXT_CMD_TREE.save_with_retry()
else:
logger.info("Error when retrieving extension command tree. Response code: %s", response.status_code)
return None
return EXT_CMD_TREE
def _get_all_extensions(self, cmd_chain, ext_set=None):
"""Find all the extension names in cmd_chain (dict of extension command subtree).
An example of cmd_chain may look like (a command sub tree of the 'aks' command group):
{
"create": "aks-preview",
"update": "aks-preview",
"app": {
"up": "deploy-to-azure"
},
"use-dev-spaces": "dev-spaces"
}
Then the resulting ext_set is {'aks-preview', 'deploy-to-azure', 'dev-spaces'}
"""
ext_set = set() if ext_set is None else ext_set
for key in cmd_chain:
if isinstance(cmd_chain[key], str):
ext_set.add(cmd_chain[key])
else:
self._get_all_extensions(cmd_chain[key], ext_set)
return ext_set
def _search_in_extension_commands(self, command_str):
"""Search the command in an extension commands dict which mimics a prefix tree.
If the value of the dict item is a string, then the key represents the end of a complete command
and the value is the name of the extension that the command belongs to.
An example of the dict read from extensionCommandTree.json:
{
"aks": {
"create": "aks-preview",
"update": "aks-preview",
"app": {
"up": "deploy-to-azure"
},
"use-dev-spaces": "dev-spaces"
},
...
}
"""
cmd_chain = self._get_extension_command_tree()
if not cmd_chain:
return None
for part in command_str.split():
try:
if isinstance(cmd_chain[part], str):
return cmd_chain[part]
cmd_chain = cmd_chain[part]
except KeyError:
return None
# command_str is prefix of one or more complete commands.
all_exts = self._get_all_extensions(cmd_chain)
return list(all_exts) if all_exts else None
def _get_extension_use_dynamic_install_config(self):
cli_ctx = self.cli_ctx or (self.cli_help.cli_ctx if self.cli_help else None)
default_value = 'yes_prompt'
use_dynamic_install = cli_ctx.config.get(
'extension', 'use_dynamic_install', default_value).lower() if cli_ctx else default_value
if use_dynamic_install not in ['no', 'yes_prompt', 'yes_without_prompt']:
use_dynamic_install = default_value
return use_dynamic_install
def _get_extension_run_after_dynamic_install_config(self):
cli_ctx = self.cli_ctx or (self.cli_help.cli_ctx if self.cli_help else None)
default_value = True
run_after_extension_installed = cli_ctx.config.getboolean('extension',
'run_after_dynamic_install',
default_value) if cli_ctx else default_value
return run_after_extension_installed
def _check_value(self, action, value): # pylint: disable=too-many-statements, too-many-locals, too-many-branches
# Override to customize the error message when a argument is not among the available choices
# converted value must be one of the choices (if specified)
if action.choices is not None and value not in action.choices: # pylint: disable=too-many-nested-blocks
# self.cli_ctx is None when self.prog is beyond 'az', such as 'az iot'.
# use cli_ctx from cli_help which is not lost.
cli_ctx = self.cli_ctx or (self.cli_help.cli_ctx if self.cli_help else None)
caused_by_extension_not_installed = False
command_name_inferred = self.prog
error_msg = None
if not self.command_source:
candidates = []
args = self.prog.split() + self._raw_arguments
use_dynamic_install = self._get_extension_use_dynamic_install_config()
if use_dynamic_install != 'no':
# Check if the command is from an extension
from azure.cli.core.util import roughly_parse_command
command_str = roughly_parse_command(args[1:])
ext_name = self._search_in_extension_commands(command_str)
# The input command matches the prefix of one or more extension commands
if isinstance(ext_name, list):
if len(ext_name) > 1:
from knack.prompting import prompt_choice_list, NoTTYException
try:
prompt_msg = "The command requires the latest version of one of the following " \
"extensions. You need to pick one to install:"
choice_idx = prompt_choice_list(prompt_msg, ext_name)
ext_name = ext_name[choice_idx]
use_dynamic_install = 'yes_without_prompt'
except NoTTYException:
error_msg = "{}{}\nUnable to prompt for selection as no tty available. Please " \
"update or install the extension with 'az extension add --upgrade -n " \
"<extension-name>'.".format(prompt_msg, ext_name)
logger.error(error_msg)
telemetry.set_user_fault(error_msg)
self.exit(2)
else:
ext_name = ext_name[0]
if ext_name:
caused_by_extension_not_installed = True
telemetry.set_command_details(command_str,
parameters=AzCliCommandInvoker._extract_parameter_names(args), # pylint: disable=protected-access
extension_name=ext_name)
run_after_extension_installed = self._get_extension_run_after_dynamic_install_config()
prompt_info = ""
if use_dynamic_install == 'yes_without_prompt':
logger.warning('The command requires the extension %s. '
'It will be installed first.', ext_name)
go_on = True
else:
from knack.prompting import prompt_y_n, NoTTYException
prompt_msg = 'The command requires the extension {}. ' \
'Do you want to install it now?'.format(ext_name)
if run_after_extension_installed:
prompt_msg = '{} The command will continue to run after the extension is installed.' \
.format(prompt_msg)
NO_PROMPT_CONFIG_MSG = "Run 'az config set extension.use_dynamic_install=" \
"yes_without_prompt' to allow installing extensions without prompt."
try:
go_on = prompt_y_n(prompt_msg, default='y')
if go_on:
prompt_info = " with prompt"
logger.warning(NO_PROMPT_CONFIG_MSG)
except NoTTYException:
error_msg = "The command requires the extension {}. " \
"Unable to prompt for extension install confirmation as no tty " \
"available. {}".format(ext_name, NO_PROMPT_CONFIG_MSG)
go_on = False
if go_on:
from azure.cli.core.extension.operations import add_extension
add_extension(cli_ctx=cli_ctx, extension_name=ext_name, upgrade=True)
if run_after_extension_installed:
import subprocess
import platform
exit_code = subprocess.call(args, shell=platform.system() == 'Windows')
error_msg = ("Extension {} dynamically installed{} and commands will be "
"rerun automatically.").format(ext_name, prompt_info)
telemetry.set_user_fault(error_msg)
self.exit(exit_code)
else:
with CommandLoggerContext(logger):
error_msg = 'Extension {} installed{}. Please rerun your command.' \
.format(ext_name, prompt_info)
logger.error(error_msg)
telemetry.set_user_fault(error_msg)
self.exit(2)
else:
error_msg = "The command requires the latest version of extension {ext_name}. " \
"To install, run 'az extension add --upgrade -n {ext_name}'.".format(
ext_name=ext_name) if not error_msg else error_msg
if not error_msg:
# parser has no `command_source`, value is part of command itself
error_msg = "'{value}' is misspelled or not recognized by the system.".format(value=value)
az_error = CommandNotFoundError(error_msg)
if not caused_by_extension_not_installed:
candidates = difflib.get_close_matches(value, action.choices, cutoff=0.7)
if candidates:
# use the most likely candidate to replace the misspelled command
args_inferred = [item if item != value else candidates[0] for item in args]
command_name_inferred = ' '.join(args_inferred).split('-')[0]
else:
# `command_source` indicates command values have been parsed, value is an argument
parameter = action.option_strings[0] if action.option_strings else action.dest
error_msg = "{prog}: '{value}' is not a valid value for '{param}'.".format(
prog=self.prog, value=value, param=parameter)
candidates = difflib.get_close_matches(value, action.choices, cutoff=0.7)
az_error = InvalidArgumentValueError(error_msg)
command_arguments = self._get_failure_recovery_arguments(action)
if candidates:
az_error.set_recommendation("Did you mean '{}' ?".format(candidates[0]))
# recommend a command for user
if not caused_by_extension_not_installed:
recommender = CommandRecommender(*command_arguments, error_msg, cli_ctx)
recommender.set_help_examples(self.get_examples(command_name_inferred))
recommended_command = recommender.recommend_a_command()
if recommended_command:
az_error.set_recommendation("Try this: '{}'".format(recommended_command))
# remind user to check extensions if we can not find a command to recommend
if isinstance(az_error, CommandNotFoundError) \
and not az_error.recommendations and self.prog == 'az' \
and use_dynamic_install == 'no':
az_error.set_recommendation(EXTENSION_REFERENCE)
az_error.set_recommendation(OVERVIEW_REFERENCE.format(command=self.prog))
az_error.print_error()
az_error.send_telemetry()
self.exit(2)
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 5
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_8_1_0.models.ndmp_logs_node import NdmpLogsNode # noqa: F401,E501
from isi_sdk_8_1_0.models.node_drives_purposelist_error import NodeDrivesPurposelistError # noqa: F401,E501
class NdmpLogs(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'errors': 'list[NodeDrivesPurposelistError]',
'nodes': 'list[NdmpLogsNode]',
'total': 'int'
}
attribute_map = {
'errors': 'errors',
'nodes': 'nodes',
'total': 'total'
}
def __init__(self, errors=None, nodes=None, total=None): # noqa: E501
"""NdmpLogs - a model defined in Swagger""" # noqa: E501
self._errors = None
self._nodes = None
self._total = None
self.discriminator = None
if errors is not None:
self.errors = errors
if nodes is not None:
self.nodes = nodes
if total is not None:
self.total = total
@property
def errors(self):
"""Gets the errors of this NdmpLogs. # noqa: E501
A list of errors encountered by the individual nodes involved in this request, or an empty list if there were no errors. # noqa: E501
:return: The errors of this NdmpLogs. # noqa: E501
:rtype: list[NodeDrivesPurposelistError]
"""
return self._errors
@errors.setter
def errors(self, errors):
"""Sets the errors of this NdmpLogs.
A list of errors encountered by the individual nodes involved in this request, or an empty list if there were no errors. # noqa: E501
:param errors: The errors of this NdmpLogs. # noqa: E501
:type: list[NodeDrivesPurposelistError]
"""
self._errors = errors
@property
def nodes(self):
"""Gets the nodes of this NdmpLogs. # noqa: E501
The responses from the individual nodes involved in this request. # noqa: E501
:return: The nodes of this NdmpLogs. # noqa: E501
:rtype: list[NdmpLogsNode]
"""
return self._nodes
@nodes.setter
def nodes(self, nodes):
"""Sets the nodes of this NdmpLogs.
The responses from the individual nodes involved in this request. # noqa: E501
:param nodes: The nodes of this NdmpLogs. # noqa: E501
:type: list[NdmpLogsNode]
"""
self._nodes = nodes
@property
def total(self):
"""Gets the total of this NdmpLogs. # noqa: E501
The total number of nodes responding. # noqa: E501
:return: The total of this NdmpLogs. # noqa: E501
:rtype: int
"""
return self._total
@total.setter
def total(self, total):
"""Sets the total of this NdmpLogs.
The total number of nodes responding. # noqa: E501
:param total: The total of this NdmpLogs. # noqa: E501
:type: int
"""
self._total = total
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NdmpLogs):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
# coding=utf-8
from OTLMOW.OTLModel.Datatypes.KeuzelijstField import KeuzelijstField
from OTLMOW.OTLModel.Datatypes.KeuzelijstWaarde import KeuzelijstWaarde
# Generated with OTLEnumerationCreator. To modify: extend, do not edit
class KlWegdekvoegType(KeuzelijstField):
"""Vormen van wegdekvoeg."""
naam = 'KlWegdekvoegType'
label = 'Voeg type'
objectUri = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#KlWegdekvoegType'
definition = 'Vormen van wegdekvoeg.'
codelist = 'https://wegenenverkeer.data.vlaanderen.be/id/conceptscheme/KlWegdekvoegType'
options = {
'DGB-compoundvoeg': KeuzelijstWaarde(invulwaarde='DGB-compoundvoeg',
label='DGB compoundvoeg',
definitie='Een voeg die het uitzetten en krimpen van materialen, ook wel werking genoemd, opvangt.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlWegdekvoegType/DGB-compoundvoeg'),
'dwarse-werkvoeg': KeuzelijstWaarde(invulwaarde='dwarse-werkvoeg',
label='dwarse werkvoeg',
definitie='Een dwarse voeg die het uitzetten en krimpen van materialen, ook wel werking genoemd, opvangt.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlWegdekvoegType/dwarse-werkvoeg'),
'geëxtrudeerde-voegband': KeuzelijstWaarde(invulwaarde='geëxtrudeerde-voegband',
label='geëxtrudeerde voegband',
definitie='Een geëxtrudeerde voegband die het uitzetten en krimpen van materialen, ook wel werking genoemd, opvangt.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlWegdekvoegType/geëxtrudeerde-voegband'),
'isolatievoeg-tussen-bestaande-constructie-en-betonverharding': KeuzelijstWaarde(invulwaarde='isolatievoeg-tussen-bestaande-constructie-en-betonverharding',
label='isolatievoeg tussen bestaande constructie en betonverharding',
definitie='isolatievoeg tussen bestaande constructie en betonverharding',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlWegdekvoegType/isolatievoeg-tussen-bestaande-constructie-en-betonverharding'),
'langse-buigingsvoeg': KeuzelijstWaarde(invulwaarde='langse-buigingsvoeg',
label='langse buigingsvoeg',
definitie='Een zaagsnede om de verharding toe te laten te scharnieren volgens de lengteas en om de spanningen ingevolge de thermische gradiënt te beperken.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlWegdekvoegType/langse-buigingsvoeg'),
'langse-werkvoeg': KeuzelijstWaarde(invulwaarde='langse-werkvoeg',
label='langse werkvoeg',
definitie='Een langse voeg die het uitzetten en krimpen van materialen, ook wel werking genoemd, opvangt.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlWegdekvoegType/langse-werkvoeg'),
'langsvoeg-tussen-asfalt-en-beton': KeuzelijstWaarde(invulwaarde='langsvoeg-tussen-asfalt-en-beton',
label='langsvoeg tussen asfalt en beton',
definitie='Een doorgaande voeg in de lengterichting tussen asfalt en beton.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlWegdekvoegType/langsvoeg-tussen-asfalt-en-beton'),
'langsvoeg-tussen-fietspad-en-betonverharding': KeuzelijstWaarde(invulwaarde='langsvoeg-tussen-fietspad-en-betonverharding',
label='langsvoeg tussen fietspad en betonverharding',
definitie='Een doorgaande voeg in de lengterichting tussen een fietspad en betonverharding.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlWegdekvoegType/langsvoeg-tussen-fietspad-en-betonverharding'),
'langsvoeg-tussen-lijnvormig-element-en-betonverharding': KeuzelijstWaarde(invulwaarde='langsvoeg-tussen-lijnvormig-element-en-betonverharding',
label='langsvoeg tussen lijnvormig element en betonverharding',
definitie='Een doorgaande voeg in de lengterichting tussen een lijnvormig element en betonverharding.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlWegdekvoegType/langsvoeg-tussen-lijnvormig-element-en-betonverharding'),
'langsvoeg-tussen-lijnvormig-element-en-bitumineuze-verharding': KeuzelijstWaarde(invulwaarde='langsvoeg-tussen-lijnvormig-element-en-bitumineuze-verharding',
label='langsvoeg tussen lijnvormig element en bitumineuze verharding',
definitie='langsvoeg tussen lijnvormig element en bitumineuze verharding',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlWegdekvoegType/langsvoeg-tussen-lijnvormig-element-en-bitumineuze-verharding'),
'uitzettingsvoeg': KeuzelijstWaarde(invulwaarde='uitzettingsvoeg',
label='uitzettingsvoeg',
definitie='Een voeg die het uitzetten en krimpen van materialen, ook wel werking genoemd, opvangt.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlWegdekvoegType/uitzettingsvoeg'),
'voorgevormde-voegband': KeuzelijstWaarde(invulwaarde='voorgevormde-voegband',
label='voorgevormde voegband',
definitie='Een voorgevormde voegband die het uitzetten en krimpen van materialen, ook wel werking genoemd, opvangt.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlWegdekvoegType/voorgevormde-voegband')
}
|
import torch
import torch.nn as nn
import torch.nn.init as init
from transformer.modules import Linear
from transformer.modules import ScaledDotProductAttention
from transformer.modules import LayerNormalization
class _MultiHeadAttention(nn.Module):
def __init__(self, d_k, d_v, d_model, n_heads, dropout):
super(_MultiHeadAttention, self).__init__()
self.d_k = d_k
self.d_v = d_v
self.d_model = d_model
self.n_heads = n_heads
self.w_q = nn.Parameter((-.5 - .5) * torch.rand(n_heads, d_model, d_k) + .5, requires_grad = True)
self.w_k = nn.Parameter((-.5 - .5) * torch.rand(n_heads, d_model, d_k) + .5, requires_grad = True)
self.w_v = nn.Parameter((-.5 - .5) * torch.rand(n_heads, d_model, d_k) + .5, requires_grad = True)
self.attention = ScaledDotProductAttention(d_k, dropout)
init.xavier_normal(self.w_q)
init.xavier_normal(self.w_k)
init.xavier_normal(self.w_v)
def forward(self, q, k, v, attn_mask):
(d_k, d_v, d_model, n_heads) = (self.d_k, self.d_v, self.d_model, self.n_heads)
# print(, "\n\n\n\n\n")
b_size = k.size(0)
q_s = q.repeat(n_heads, 1, 1).view(n_heads, -1, d_model) # [n_heads x b_size * len_q x d_model]
k_s = k.repeat(n_heads, 1, 1).view(n_heads, -1, d_model) # [n_heads x b_size * len_k x d_model]
v_s = v.repeat(n_heads, 1, 1).view(n_heads, -1, d_model) # [n_heads x b_size * len_v x d_model]
# print(q_s.size(), k_s.size(), v_s.size())
q_s = torch.bmm(q_s, self.w_q).view(b_size * n_heads, -1, d_k) # [b_size * n_heads x len_q x d_k]
k_s = torch.bmm(k_s, self.w_k).view(b_size * n_heads, -1, d_k) # [b_size * n_heads x len_k x d_k]
v_s = torch.bmm(v_s, self.w_v).view(b_size * n_heads, -1, d_v) # [b_size * n_heads x len_v x d_v]
# print(q_s.size(), k_s.size(), v_s.size())
# asas
# perform attention, result_size = [b_size * n_heads x len_q x d_v]
outputs, attn = self.attention(q_s, k_s, v_s, attn_mask=attn_mask.repeat(n_heads, 1, 1))
# return a list of tensors of shape [b_size x len_q x d_v] (length: n_heads)
return torch.split(outputs, b_size, dim=0), attn
class MultiHeadAttention(nn.Module):
def __init__(self, d_k, d_v, d_model, n_heads, dropout):
super(MultiHeadAttention, self).__init__()
self.attention = _MultiHeadAttention(d_k, d_v, d_model, n_heads, dropout)
self.proj = Linear(n_heads * d_k, d_model)
self.dropout = nn.Dropout(dropout)
self.layer_norm = LayerNormalization(d_model)
def forward(self, q, k, v, attn_mask):
# q: [b_size x len_q x d_model]
# k: [b_size x len_k x d_model]
# v: [b_size x len_v x d_model] note (len_k == len_v)
# residual = q
# outputs: a list of tensors of shape [b_size x len_q x d_v] (length: n_heads)
outputs, attn = self.attention(q, k, v, attn_mask=attn_mask)
# print(len(outputs), outputs[0].size()) # 8 torch.Size([16, 31, 64])
# concatenate 'n_heads' multi-head attentions
outputs = torch.cat(outputs, dim=-1) #torch.Size([16, 31, 512])
# project back to residual size, result_size = [b_size x len_q x d_model]
# outputs = self.proj(outputs) # torch.Size([16, 31, 300])
# outputs = self.dropout(outputs)
return outputs, attn # layer Norm
class MultiBranchAttention(nn.Module):
def __init__(self, d_k, d_v, d_model, d_ff, n_branches, dropout):
super(MultiBranchAttention, self).__init__()
self.d_k = d_k
self.d_v = d_v
self.d_model = d_model
self.d_ff = d_ff
self.n_branches = n_branches
self.attention = _MultiHeadAttention(d_k, d_v, d_model, n_branches, dropout)
# additional weights for BranchedAttention
self.w_o = nn.Parameter(torch.FloatTensor(n_branches, d_v, d_model)) # 8x64x300
self.w_kp = torch.rand(n_branches)
self.w_kp = nn.Parameter(self.w_kp/self.w_kp.sum())
self.w_a = torch.rand(n_branches)
self.w_a = nn.Parameter(self.w_a/self.w_a.sum())
self.pos_ffn = nn.ModuleList([
PoswiseFeedForwardNet(d_model, d_ff//n_branches, dropout) for _ in range(n_branches)])
self.dropout = nn.Dropout(dropout)
self.layer_norm = LayerNormalization(d_model)
init.xavier_normal(self.w_o)
def forward(self, q, k, v, attn_mask):
# q: [b_size x len_q x d_model]
# k: [b_size x len_k x d_model]
# v: [b_size x len_v x d_model] note (len_k == len_v)
d_v, d_model, n_branches = self.d_v, self.d_model, self.n_branches
residual = q
b_size = k.size(0)
# outputs: a list of tensors of shape [b_size x len_q x d_v] (length: n_heads)
outputs, attn = self.attention(q, k, v, attn_mask=attn_mask)
# print(len(outputs), outputs[0].size()) # 8 torch.Size([16, 31, 64])
# print(torch.cat(outputs, dim=0).size()) # torch.Size([128, 31, 64])
outputs = torch.cat(outputs, dim=0).view(n_branches, -1, d_v) # 8, 496, 64
#outputs = outputs.view(b_size, -1, d_model)
# print(torch.bmm(outputs, self.w_o).size()) # 8,496,300
#print(outputs.size(), self.w_o.size())
#asasas
outputs = torch.bmm(outputs, self.w_o).sum(dim=0).view(b_size, -1, d_model) # 16, 31 ,300
outputs = self.layer_norm(self.dropout(outputs) + residual) # [b_size x len_q x d_model]
# print(self.w_kp.size(), outputs.size()) # torch.Size([8]) torch.Size([16, 31, 300])
outputs = [kappa * outputs for kappa in self.w_kp]
# print(len(outputs)) [8] each has torch.Size([16, 31, 300])
# for pos_ffn in self.pos_ffn:
# x = pos_ffn(outputs[0])
# print(x.size()) # 16x31x300
# asas
outputs = torch.cat([pos_ffn(output) for output, pos_ffn \
in zip(outputs, self.pos_ffn)], dim=0).view(n_branches, -1, d_model)
# print(outputs.size()) # 128x31x300 reshaped to torch.Size([8, 496, 300])
outputs = self.w_a.view(-1, 1, 1) * outputs # [n_branches x b_size * len_q x d_model] torch.Size([8, 496, 300])
outputs = torch.sum(outputs, dim=0).view(b_size, -1, d_model) # [b_size x len_q x d_model] # 1x496x300 --> 16x31x300
return outputs, attn
class PoswiseFeedForwardNet(nn.Module):
def __init__(self, d_model, d_ff, dropout=0.1):
super(PoswiseFeedForwardNet, self).__init__()
self.relu = nn.ReLU()
self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1)
self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1)
self.dropout = nn.Dropout(dropout)
self.layer_norm = LayerNormalization(d_model)
def forward(self, inputs):
residual = inputs # inputs: [b_size x len_q x d_model]
outputs = self.relu(self.conv1(inputs.transpose(1, 2)))
outputs = self.conv2(outputs).transpose(1, 2) # outputs: [b_size x len_q x d_model]
outputs = self.dropout(outputs)
return self.layer_norm(residual + outputs)
|
############################################################################
# Copyright 2015 Valerio Morsella #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
############################################################################
import logging
from exception import IllegalMethodCallException, FileNotFoundError
from galenpy.galen_webdriver import GalenRemoteWebDriver
from galenpy.thrift_client import ThriftClient
from pythrift.ttypes import SpecNotFoundException
logger = logging.getLogger()
class Galen(object):
"""
Galen API class.
Example usage.
driver = GalenWebDriver("http://localhost:4444/wd/hub", desired_capabilities=CHROME)
driver.get("http://example.com")
driver.set_window_size(720, 1024)
galen_api = Galen()
errors = galen_api.check_layout(driver, 'homePage.spec', ['phone'], None)
if errors !=0:
galen_api.generate_report("target/galen")
"""
def __init__(self, thrift_client=None):
self.thrift_client = thrift_client
def check_layout(self, driver, spec, included_tags, excluded_tags):
#TODO add multiple specs.
"""
Main validation method.
:param driver: An instance of GalenWebDriver.
:param spec: Specs to be run on the page under test.
:param included_tags: list of tags included in the check.
:param excluded_tags: list of tags excluded from check.
:return: CheckLayoutReport mapping info from the generated LayoutReport object in the Galen Server.
"""
if not isinstance(driver, GalenRemoteWebDriver):
raise ValueError("Provided driver object is not an instance of GalenWebDriver")
self.thrift_client = driver.thrift_client
try:
return self.thrift_client.check_layout(driver.session_id, spec, included_tags, excluded_tags)
except SpecNotFoundException as e:
raise IOError("Spec was not found: " + str(e.message))
def generate_report(self, report_folder):
"""
Generate Galen reports in the provided folder.
:param report_folder: target folder.
"""
if not self.thrift_client:
raise IllegalMethodCallException("generate_report() must be called after check_layout()")
logger.info("Generating reports in " + report_folder)
self.thrift_client.generate_report(report_folder)
self.thrift_client.quit_service_if_inactive()
def generate_galen_report(report_folder):
thrift_client = ThriftClient()
Galen(thrift_client).generate_report(report_folder)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import code
import warnings
import string
import argparse
from flask import _request_ctx_stack
from .cli import prompt, prompt_pass, prompt_bool, prompt_choices
class InvalidCommand(Exception):
pass
class Group(object):
"""
Stores argument groups and mutually exclusive groups for
`ArgumentParser.add_argument_group <http://argparse.googlecode.com/svn/trunk/doc/other-methods.html#argument-groups>`
or `ArgumentParser.add_mutually_exclusive_group <http://argparse.googlecode.com/svn/trunk/doc/other-methods.html#add_mutually_exclusive_group>`.
Note: The title and description params cannot be used with the exclusive
or required params.
:param options: A list of Option classes to add to this group
:param title: A string to use as the title of the argument group
:param description: A string to use as the description of the argument
group
:param exclusive: A boolean indicating if this is an argument group or a
mutually exclusive group
:param required: A boolean indicating if this mutually exclusive group
must have an option selected
"""
def __init__(self, *options, **kwargs):
self.option_list = options
self.title = kwargs.pop("title", None)
self.description = kwargs.pop("description", None)
self.exclusive = kwargs.pop("exclusive", None)
self.required = kwargs.pop("required", None)
if ((self.title or self.description) and
(self.required or self.exclusive)):
raise TypeError("title and/or description cannot be used with "
"required and/or exclusive.")
super(Group, self).__init__(**kwargs)
def get_options(self):
"""
By default, returns self.option_list. Override if you
need to do instance-specific configuration.
"""
return self.option_list
class Option(object):
"""
Stores positional and optional arguments for `ArgumentParser.add_argument
<http://argparse.googlecode.com/svn/trunk/doc/add_argument.html>`_.
:param name_or_flags: Either a name or a list of option strings,
e.g. foo or -f, --foo
:param action: The basic type of action to be taken when this argument
is encountered at the command-line.
:param nargs: The number of command-line arguments that should be consumed.
:param const: A constant value required by some action and nargs selections.
:param default: The value produced if the argument is absent from
the command-line.
:param type: The type to which the command-line arg should be converted.
:param choices: A container of the allowable values for the argument.
:param required: Whether or not the command-line option may be omitted
(optionals only).
:param help: A brief description of what the argument does.
:param metavar: A name for the argument in usage messages.
:param dest: The name of the attribute to be added to the object
returned by parse_args().
"""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
class Command(object):
"""
Base class for creating commands.
"""
option_list = []
@property
def description(self):
description = self.__doc__ or ''
return description.strip()
def add_option(self, option):
"""
Adds Option to option list.
"""
self.option_list.append(option)
def get_options(self):
"""
By default, returns self.option_list. Override if you
need to do instance-specific configuration.
"""
return self.option_list
def create_parser(self, *args, **kwargs):
parser = argparse.ArgumentParser(*args, **kwargs)
for option in self.get_options():
if isinstance(option, Group):
if option.exclusive:
group = parser.add_mutually_exclusive_group(
required=option.required,
)
else:
group = parser.add_argument_group(
title=option.title,
description=option.description,
)
for opt in option.get_options():
group.add_argument(*opt.args, **opt.kwargs)
else:
parser.add_argument(*option.args, **option.kwargs)
parser.set_defaults(func_handle=self.handle)
return parser
def handle(self, app, *args, **kwargs):
"""
Handles the command with given app. Default behaviour is to call within
a test request context.
"""
with app.test_request_context():
return self.run(*args, **kwargs)
def run(self):
"""
Runs a command. This must be implemented by the subclass. Should take
arguments as configured by the Command options.
"""
raise NotImplementedError
def prompt(self, name, default=None):
warnings.warn_explicit(
"Command.prompt is deprecated, use prompt() function instead")
prompt(name, default)
def prompt_pass(self, name, default=None):
warnings.warn_explicit(
"Command.prompt_pass is deprecated, use prompt_pass() function "
"instead")
prompt_pass(name, default)
def prompt_bool(self, name, default=False):
warnings.warn_explicit(
"Command.prompt_bool is deprecated, use prompt_bool() function "
"instead")
prompt_bool(name, default)
def prompt_choices(self, name, choices, default=None):
warnings.warn_explicit(
"Command.choices is deprecated, use prompt_choices() function "
"instead")
prompt_choices(name, choices, default)
class Shell(Command):
"""
Runs a Python shell inside Flask application context.
:param banner: banner appearing at top of shell when started
:param make_context: a callable returning a dict of variables
used in the shell namespace. By default
returns a dict consisting of just the app.
:param use_bpython: use BPython shell if available, ignore if not.
The BPython shell can be turned off in command
line by passing the **--no-bpython** flag.
:param use_ipython: use IPython shell if available, ignore if not.
The IPython shell can be turned off in command
line by passing the **--no-ipython** flag.
"""
banner = ''
help = description = 'Runs a Python shell inside Flask application context.'
def __init__(self, banner=None, make_context=None, use_ipython=True,
use_bpython=True):
self.banner = banner or self.banner
self.use_ipython = use_ipython
self.use_bpython = use_bpython
if make_context is None:
make_context = lambda: dict(app=_request_ctx_stack.top.app)
self.make_context = make_context
def get_options(self):
return (
Option('--no-ipython',
action="store_true",
dest='no_ipython',
default=not(self.use_ipython)),
Option('--no-bpython',
action="store_true",
dest='no_bpython',
default=not(self.use_bpython))
)
def get_context(self):
"""
Returns a dict of context variables added to the shell namespace.
"""
return self.make_context()
def run(self, no_ipython, no_bpython):
"""
Runs the shell. If no_bpython is False or use_bpython is True, then
a BPython shell is run (if installed). Else, if no_ipython is False or
use_python is True then a IPython shell is run (if installed).
"""
context = self.get_context()
if not no_bpython:
# Try BPython
try:
from bpython import embed
embed(banner=self.banner, locals_=context)
return
except ImportError:
pass
if not no_ipython:
# Try IPython
try:
try:
# 0.10.x
from IPython.Shell import IPShellEmbed
ipshell = IPShellEmbed(banner=self.banner)
ipshell(global_ns=dict(), local_ns=context)
except ImportError:
# 0.12+
from IPython import embed
embed(banner1=self.banner, user_ns=context)
return
except ImportError:
pass
# Use basic python shell
code.interact(self.banner, local=context)
class Server(Command):
"""
Runs the Flask development server i.e. app.run()
:param host: server host
:param port: server port
:param use_debugger: if False, will no longer use Werkzeug debugger.
This can be overriden in the command line
by passing the **-d** flag.
:param use_reloader: if False, will no longer use auto-reloader.
This can be overriden in the command line by
passing the **-r** flag.
:param threaded: should the process handle each request in a separate
thread?
:param processes: number of processes to spawn
:param passthrough_errors: disable the error catching. This means that the server will die on errors but it can be useful to hook debuggers in (pdb etc.)
:param options: :func:`werkzeug.run_simple` options.
"""
help = description = 'Runs the Flask development server i.e. app.run()'
def __init__(self, host='127.0.0.1', port=5000, use_debugger=True,
use_reloader=True, threaded=False, processes=1,
passthrough_errors=False, **options):
self.port = port
self.host = host
self.use_debugger = use_debugger
self.use_reloader = use_reloader
self.server_options = options
self.threaded = threaded
self.processes = processes
self.passthrough_errors = passthrough_errors
def get_options(self):
options = (
Option('-t', '--host',
dest='host',
default=self.host),
Option('-p', '--port',
dest='port',
type=int,
default=self.port),
Option('--threaded',
dest='threaded',
action='store_true',
default=self.threaded),
Option('--processes',
dest='processes',
type=int,
default=self.processes),
Option('--passthrough-errors',
action='store_true',
dest='passthrough_errors',
default=self.passthrough_errors),
)
if self.use_debugger:
options += (Option('-d', '--no-debug',
action='store_false',
dest='use_debugger',
default=self.use_debugger),)
else:
options += (Option('-d', '--debug',
action='store_true',
dest='use_debugger',
default=self.use_debugger),)
if self.use_reloader:
options += (Option('-r', '--no-reload',
action='store_false',
dest='use_reloader',
default=self.use_reloader),)
else:
options += (Option('-r', '--reload',
action='store_true',
dest='use_reloader',
default=self.use_reloader),)
return options
def handle(self, app, host, port, use_debugger, use_reloader,
threaded, processes, passthrough_errors):
# we don't need to run the server in request context
# so just run it directly
app.run(host=host,
port=port,
debug=use_debugger,
use_debugger=use_debugger,
use_reloader=use_reloader,
threaded=threaded,
processes=processes,
passthrough_errors=passthrough_errors,
**self.server_options)
class Clean(Command):
"Remove *.pyc and *.pyo files recursively starting at current directory"
def run(self):
for dirpath, dirnames, filenames in os.walk('.'):
for filename in filenames:
if filename.endswith('.pyc') or filename.endswith('.pyo'):
full_pathname = os.path.join(dirpath, filename)
print('Removing %s' % full_pathname)
os.remove(full_pathname)
class ShowUrls(Command):
"""
Displays all of the url matching routes for the project
"""
def __init__(self, order='rule'):
self.order = order
def get_options(self):
options = super(ShowUrls, self).get_options()
options += Option('url',
nargs='?',
help='Url to test (ex. /static/image.png)',
),
options += Option('--order',
dest='order',
default=self.order,
help='Property on Rule to order by (default: %s)' % self.order,
),
return options
def run(self, url, order):
from flask import current_app
from werkzeug.exceptions import NotFound, MethodNotAllowed
rows = []
column_length = 0
column_headers = ('Rule', 'Endpoint', 'Arguments')
if url:
try:
rule, arguments = current_app.url_map \
.bind('localhost') \
.match(url, return_rule=True)
rows.append((rule.rule, rule.endpoint, arguments))
column_length = 3
except (NotFound, MethodNotAllowed) as e:
rows.append(("<%s>" % e, None, None))
column_length = 1
else:
rules = sorted(current_app.url_map.iter_rules(), key=lambda rule: getattr(rule, order))
for rule in rules:
rows.append((rule.rule, rule.endpoint, None))
column_length = 2
str_template = ''
table_width = 0
if column_length >= 1:
max_rule_length = max(len(r[0]) for r in rows)
max_rule_length = max_rule_length if max_rule_length > 4 else 4
str_template += '%-' + str(max_rule_length) + 's'
table_width += max_rule_length
if column_length >= 2:
max_endpoint_length = max(len(str(r[1])) for r in rows)
# max_endpoint_length = max(rows, key=len)
max_endpoint_length = max_endpoint_length if max_endpoint_length > 8 else 8
str_template += ' %-' + str(max_endpoint_length) + 's'
table_width += 2 + max_endpoint_length
if column_length >= 3:
max_arguments_length = max(len(str(r[2])) for r in rows)
max_arguments_length = max_arguments_length if max_arguments_length > 9 else 9
str_template += ' %-' + str(max_arguments_length) + 's'
table_width += 2 + max_arguments_length
print(str_template % (column_headers[:column_length]))
print('-' * table_width)
for row in rows:
print(str_template % row[:column_length])
|
from checkov.common.models.enums import CheckResult, CheckCategories
from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
class AppServicePHPVersion(BaseResourceValueCheck):
def __init__(self):
name = "Ensure that 'PHP version' is the latest, if used to run the web app"
id = "CKV_AZURE_81"
supported_resources = ['azurerm_app_service']
categories = [CheckCategories.GENERAL_SECURITY]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources,
missing_block_result=CheckResult.PASSED)
def get_inspected_key(self):
return "site_config/[0]/php_version/[0]"
def get_expected_value(self):
return '7.4'
check = AppServicePHPVersion()
|
# python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for reverb-based adders."""
from typing import Dict, Mapping, Sequence
from acme import types
from acme.adders.reverb import base
from acme.tf import utils as tf2_utils
import numpy as np
import tree
def final_step_like(step: base.Step,
next_observation: types.NestedArray) -> base.Step:
"""Return a list of steps with the final step zero-filled."""
# Make zero-filled components so we can fill out the last step.
zero_action, zero_reward, zero_discount, zero_extras = tree.map_structure(
np.zeros_like, (step.action, step.reward, step.discount, step.extras))
# Return a final step that only has next_observation.
return base.Step(
observation=next_observation,
action=zero_action,
reward=zero_reward,
discount=zero_discount,
extras=zero_extras)
def calculate_priorities(priority_fns: Mapping[str, base.PriorityFn],
steps: Sequence[base.Step]) -> Dict[str, float]:
"""Helper used to calculate the priority of a sequence of steps.
This converts the sequence of steps into a PriorityFnInput tuple where the
components of each step (actions, observations, etc.) are stacked along the
time dimension.
Priorities are calculated for the sequence or transition that starts from
step[0].next_observation. As a result, the stack of observations comes from
steps[0:] whereas all other components (e.g. actions, rewards, discounts,
extras) corresponds to steps[1:].
Note: this means that all components other than the observation will be
ignored from step[0]. This also means that step[0] is allowed to correspond to
an "initial step" in which case the action, reward, discount, and extras are
each None, which is handled properly by this function.
Args:
priority_fns: a mapping from table names to priority functions (i.e. a
callable of type PriorityFn). The given function will be used to generate
the priority (a float) for the given table.
steps: a list of Step objects used to compute the priorities.
Returns:
A dictionary mapping from table names to the priority (a float) for the
given collection of steps.
"""
# Stack the steps and wrap them as PrioityFnInput.
fn_input = base.PriorityFnInput(*tf2_utils.stack_sequence_fields(steps))
return {
table: priority_fn(fn_input)
for table, priority_fn in priority_fns.items()
}
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
try:
import simplejson as json
except ImportError:
import json # NOQA
from libcloud.utils.py3 import httplib
from libcloud.common.types import InvalidCredsError
from libcloud.compute.base import NodeImage
from libcloud.compute.drivers.digitalocean import DigitalOceanNodeDriver
from libcloud.test import LibcloudTestCase, MockHttpTestCase
from libcloud.test.file_fixtures import ComputeFileFixtures
from libcloud.test.secrets import DIGITALOCEAN_v1_PARAMS
# class DigitalOceanTests(unittest.TestCase, TestCaseMixin):
class DigitalOcean_v1_Tests(LibcloudTestCase):
def setUp(self):
DigitalOceanNodeDriver.connectionCls.conn_classes = \
(None, DigitalOceanMockHttp)
DigitalOceanMockHttp.type = None
self.driver = DigitalOceanNodeDriver(*DIGITALOCEAN_v1_PARAMS,
api_version='v1')
def test_authentication(self):
DigitalOceanMockHttp.type = 'UNAUTHORIZED_CLIENT'
self.assertRaises(InvalidCredsError, self.driver.list_nodes)
def test_list_images_success(self):
images = self.driver.list_images()
self.assertTrue(len(images) >= 1)
image = images[0]
self.assertTrue(image.id is not None)
self.assertTrue(image.name is not None)
def test_list_sizes_success(self):
sizes = self.driver.list_sizes()
self.assertTrue(len(sizes) >= 1)
size = sizes[0]
self.assertTrue(size.id is not None)
self.assertEqual(size.name, '512MB')
self.assertEqual(size.ram, 512)
size = sizes[4]
self.assertTrue(size.id is not None)
self.assertEqual(size.name, '8GB')
self.assertEqual(size.ram, 8 * 1024)
def test_list_locations_success(self):
locations = self.driver.list_locations()
self.assertTrue(len(locations) >= 1)
location = locations[0]
self.assertEqual(location.id, '1')
self.assertEqual(location.name, 'New York 1')
def test_list_nodes_success(self):
nodes = self.driver.list_nodes()
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].name, 'test-2')
self.assertEqual(nodes[0].public_ips, [])
self.assertEqual(nodes[0].extra['image_id'], 1601)
self.assertEqual(nodes[0].extra['size_id'], 66)
def test_create_node_invalid_size(self):
image = NodeImage(id='invalid', name=None, driver=self.driver)
size = self.driver.list_sizes()[0]
location = self.driver.list_locations()[0]
DigitalOceanMockHttp.type = 'INVALID_IMAGE'
expected_msg = r'You specified an invalid image for Droplet creation. \(code: 404\)'
self.assertRaisesRegexp(Exception, expected_msg,
self.driver.create_node,
name='test', size=size, image=image,
location=location)
def test_reboot_node_success(self):
node = self.driver.list_nodes()[0]
result = self.driver.reboot_node(node)
self.assertTrue(result)
def test_destroy_node_success(self):
node = self.driver.list_nodes()[0]
result = self.driver.destroy_node(node)
self.assertTrue(result)
def test_ex_rename_node_success(self):
node = self.driver.list_nodes()[0]
result = self.driver.ex_rename_node(node, 'fedora helios')
self.assertTrue(result)
def test_ex_list_ssh_keys(self):
keys = self.driver.ex_list_ssh_keys()
self.assertEqual(len(keys), 1)
self.assertEqual(keys[0].id, 7717)
self.assertEqual(keys[0].name, 'test1')
self.assertEqual(keys[0].pub_key, None)
def test_ex_destroy_ssh_key(self):
key = self.driver.ex_list_ssh_keys()[0]
result = self.driver.ex_destroy_ssh_key(key.id)
self.assertTrue(result)
class DigitalOceanMockHttp(MockHttpTestCase):
fixtures = ComputeFileFixtures('digitalocean_v1')
def _v1_regions(self, method, url, body, headers):
body = self.fixtures.load('list_locations.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_images(self, method, url, body, headers):
body = self.fixtures.load('list_images.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_sizes(self, method, url, body, headers):
body = self.fixtures.load('list_sizes.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_droplets(self, method, url, body, headers):
body = self.fixtures.load('list_nodes.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_droplets_new_INVALID_IMAGE(self, method, url, body, headers):
# reboot_node
body = self.fixtures.load('error_invalid_image.json')
return (httplib.NOT_FOUND, body, {}, httplib.responses[httplib.NOT_FOUND])
def _v1_droplets_119461_reboot(self, method, url, body, headers):
# reboot_node
body = self.fixtures.load('reboot_node.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_droplets_119461_destroy(self, method, url, body, headers):
# destroy_node
self.assertUrlContainsQueryParams(url, {'scrub_data': '1'})
body = self.fixtures.load('destroy_node.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_droplets_119461_rename(self, method, url, body, headers):
# reboot_node
self.assertUrlContainsQueryParams(url, {'name': 'fedora helios'})
body = self.fixtures.load('ex_rename_node.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_ssh_keys(self, method, url, body, headers):
body = self.fixtures.load('ex_list_ssh_keys.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_ssh_keys_7717_destroy(self, method, url, body, headers):
# destroy_ssh_key
body = self.fixtures.load('ex_destroy_ssh_key.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_droplets_UNAUTHORIZED_CLIENT(self, method, url, body, headers):
body = self.fixtures.load('error.txt')
return (httplib.FOUND, body, {}, httplib.responses[httplib.FOUND])
if __name__ == '__main__':
sys.exit(unittest.main())
|
import argparse
import os
import pickle as pkl
import numpy as np
import torch
from statsmodels.tsa.arima_process import ArmaProcess
from attribution.mask_group import MaskGroup
from attribution.perturbation import GaussianBlur
from baselines.explainers import FO, FP, IG, SVS
from utils.losses import mse
explainers = ["dynamask", "fo", "fp", "ig", "shap"]
def run_experiment(
cv: int = 0,
N_ex: int = 10,
T: int = 50,
N_features: int = 50,
N_select: int = 5,
save_dir: str = "experiments/results/rare_time/",
):
"""Run experiment.
Args:
cv: Do the experiment with different cv to obtain error bars.
N_ex: Number of time series to generate.
T: Length of each time series.
N_features: Number of features in each time series.
N_select: Number of time steps that are truly salient.
save_dir: Directory where the results should be saved.
Return:
None
"""
# Create the saving directory if it does not exist
if not os.path.exists(save_dir):
os.makedirs(save_dir)
# Initialize useful variables
random_seed = cv
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.manual_seed(random_seed)
np.random.seed(random_seed)
pert = GaussianBlur(device=device) # We use a Gaussian Blur perturbation operator
# Generate the input data
ar = np.array([2, 0.5, 0.2, 0.1]) # AR coefficients
ma = np.array([2]) # MA coefficients
data_arima = ArmaProcess(ar=ar, ma=ma).generate_sample(nsample=(N_ex, T, N_features), axis=1)
X = torch.tensor(data_arima, device=device, dtype=torch.float32)
# Initialize the saliency tensors
true_saliency = torch.zeros(size=(N_ex, T, N_features), device=device, dtype=torch.int64)
dynamask_saliency = torch.zeros(size=true_saliency.shape, device=device)
fo_saliency = torch.zeros(size=true_saliency.shape, device=device)
fp_saliency = torch.zeros(size=true_saliency.shape, device=device)
ig_saliency = torch.zeros(size=true_saliency.shape, device=device)
shap_saliency = torch.zeros(size=true_saliency.shape, device=device)
for k in range(N_ex): # We compute the attribution for each individual time series
print(f"Now working on example {k + 1}/{N_ex}.")
# The truly salient times are selected randomly
t_rand = np.random.randint(low=0, high=T - N_select)
true_saliency[k, t_rand : t_rand + N_select, int(N_features / 4) : int(3 * N_features / 4)] = 1
x = X[k, :, :]
# The white box only depends on the truly salient features
def f(input):
output = torch.zeros(input.shape, device=input.device)
output[t_rand : t_rand + N_select, int(N_features / 4) : int(3 * N_features / 4)] = input[
t_rand : t_rand + N_select, int(N_features / 4) : int(3 * N_features / 4)
]
output = (output ** 2).sum(dim=-1)
return output
# Dynamask attribution
mask_group = MaskGroup(perturbation=pert, device=device, random_seed=random_seed, verbose=False)
mask_group.fit(
f=f,
X=x,
area_list=np.arange(0.001, 0.051, 0.001),
loss_function=mse,
n_epoch=1000,
size_reg_factor_dilation=1000,
size_reg_factor_init=1,
learning_rate=1,
)
mask = mask_group.get_best_mask()
dynamask_attr = mask.mask_tensor.clone().detach()
dynamask_saliency[k, :, :] = dynamask_attr
# Feature Occlusion attribution
fo = FO(f=f)
fo_attr = fo.attribute(x)
fo_saliency[k, :, :] = fo_attr
# Feature Permutation attribution
fp = FP(f=f)
fp_attr = fp.attribute(x)
fp_saliency[k, :, :] = fp_attr
# Integrated Gradient attribution
ig = IG(f=f)
ig_attr = ig.attribute(x)
ig_saliency[k, :, :] = ig_attr
# Sampling Shapley Value attribution
shap = SVS(f=f)
shap_attr = shap.attribute(x)
shap_saliency[k, :, :] = shap_attr
# Save everything in the directory
with open(os.path.join(save_dir, f"true_saliency_{cv}.pkl"), "wb") as file:
pkl.dump(true_saliency, file)
with open(os.path.join(save_dir, f"dynamask_saliency_{cv}.pkl"), "wb") as file:
pkl.dump(dynamask_saliency, file)
with open(os.path.join(save_dir, f"fo_saliency_{cv}.pkl"), "wb") as file:
pkl.dump(fo_saliency, file)
with open(os.path.join(save_dir, f"fp_saliency_{cv}.pkl"), "wb") as file:
pkl.dump(fp_saliency, file)
with open(os.path.join(save_dir, f"ig_saliency_{cv}.pkl"), "wb") as file:
pkl.dump(ig_saliency, file)
with open(os.path.join(save_dir, f"shap_saliency_{cv}.pkl"), "wb") as file:
pkl.dump(shap_saliency, file)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--cv", default=0, type=int)
args = parser.parse_args()
run_experiment(cv=args.cv)
|
"""
Assorted utilities for working with neural networks in AllenNLP.
"""
# pylint: disable=too-many-lines
from collections import defaultdict
from typing import Any, Dict, List, Optional, Sequence, Tuple, TypeVar
import logging
import copy
import math
import torch
from allennlp.common.checks import ConfigurationError
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
T = TypeVar('T')
def has_tensor(obj) -> bool:
"""
Given a possibly complex data structure,
check if it has any torch.Tensors in it.
"""
if isinstance(obj, torch.Tensor):
return True
elif isinstance(obj, dict):
return any(has_tensor(value) for value in obj.values())
elif isinstance(obj, (list, tuple)):
return any(has_tensor(item) for item in obj)
else:
return False
def move_to_device(obj, cuda_device: int):
"""
Given a structure (possibly) containing Tensors on the CPU,
move all the Tensors to the specified GPU (or do nothing, if they should be on the CPU).
"""
if cuda_device < 0 or not has_tensor(obj):
return obj
elif isinstance(obj, torch.Tensor):
return obj.cuda(cuda_device)
elif isinstance(obj, dict):
return {key: move_to_device(value, cuda_device) for key, value in obj.items()}
elif isinstance(obj, list):
return [move_to_device(item, cuda_device) for item in obj]
elif isinstance(obj, tuple):
return tuple([move_to_device(item, cuda_device) for item in obj])
else:
return obj
def clamp_tensor(tensor, minimum, maximum):
"""
Supports sparse and dense tensors.
Returns a tensor with values clamped between the provided minimum and maximum,
without modifying the original tensor.
"""
if tensor.is_sparse:
coalesced_tensor = tensor.coalesce()
# pylint: disable=protected-access
coalesced_tensor._values().clamp_(minimum, maximum)
return coalesced_tensor
else:
return tensor.clamp(minimum, maximum)
def batch_tensor_dicts(tensor_dicts: List[Dict[str, torch.Tensor]],
remove_trailing_dimension: bool = False) -> Dict[str, torch.Tensor]:
"""
Takes a list of tensor dictionaries, where each dictionary is assumed to have matching keys,
and returns a single dictionary with all tensors with the same key batched together.
Parameters
----------
tensor_dicts : ``List[Dict[str, torch.Tensor]]``
The list of tensor dictionaries to batch.
remove_trailing_dimension : ``bool``
If ``True``, we will check for a trailing dimension of size 1 on the tensors that are being
batched, and remove it if we find it.
"""
key_to_tensors: Dict[str, List[torch.Tensor]] = defaultdict(list)
for tensor_dict in tensor_dicts:
for key, tensor in tensor_dict.items():
key_to_tensors[key].append(tensor)
batched_tensors = {}
for key, tensor_list in key_to_tensors.items():
batched_tensor = torch.stack(tensor_list)
if remove_trailing_dimension and all(tensor.size(-1) == 1 for tensor in tensor_list):
batched_tensor = batched_tensor.squeeze(-1)
batched_tensors[key] = batched_tensor
return batched_tensors
def get_lengths_from_binary_sequence_mask(mask: torch.Tensor):
"""
Compute sequence lengths for each batch element in a tensor using a
binary mask.
Parameters
----------
mask : torch.Tensor, required.
A 2D binary mask of shape (batch_size, sequence_length) to
calculate the per-batch sequence lengths from.
Returns
-------
A torch.LongTensor of shape (batch_size,) representing the lengths
of the sequences in the batch.
"""
return mask.long().sum(-1)
def get_mask_from_sequence_lengths(sequence_lengths: torch.Tensor, max_length: int) -> torch.Tensor:
"""
Given a variable of shape ``(batch_size,)`` that represents the sequence lengths of each batch
element, this function returns a ``(batch_size, max_length)`` mask variable. For example, if
our input was ``[2, 2, 3]``, with a ``max_length`` of 4, we'd return
``[[1, 1, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0]]``.
We require ``max_length`` here instead of just computing it from the input ``sequence_lengths``
because it lets us avoid finding the max, then copying that value from the GPU to the CPU so
that we can use it to construct a new tensor.
"""
# (batch_size, max_length)
ones = sequence_lengths.new_ones(sequence_lengths.size(0), max_length)
range_tensor = ones.cumsum(dim=1)
return (sequence_lengths.unsqueeze(1) >= range_tensor).long()
def sort_batch_by_length(tensor: torch.Tensor, sequence_lengths: torch.Tensor):
"""
Sort a batch first tensor by some specified lengths.
Parameters
----------
tensor : torch.FloatTensor, required.
A batch first Pytorch tensor.
sequence_lengths : torch.LongTensor, required.
A tensor representing the lengths of some dimension of the tensor which
we want to sort by.
Returns
-------
sorted_tensor : torch.FloatTensor
The original tensor sorted along the batch dimension with respect to sequence_lengths.
sorted_sequence_lengths : torch.LongTensor
The original sequence_lengths sorted by decreasing size.
restoration_indices : torch.LongTensor
Indices into the sorted_tensor such that
``sorted_tensor.index_select(0, restoration_indices) == original_tensor``
permuation_index : torch.LongTensor
The indices used to sort the tensor. This is useful if you want to sort many
tensors using the same ordering.
"""
if not isinstance(tensor, torch.Tensor) or not isinstance(sequence_lengths, torch.Tensor):
raise ConfigurationError("Both the tensor and sequence lengths must be torch.Tensors.")
sorted_sequence_lengths, permutation_index = sequence_lengths.sort(0, descending=True)
sorted_tensor = tensor.index_select(0, permutation_index)
index_range = torch.arange(0, len(sequence_lengths), device=sequence_lengths.device)
# This is the equivalent of zipping with index, sorting by the original
# sequence lengths and returning the now sorted indices.
_, reverse_mapping = permutation_index.sort(0, descending=False)
restoration_indices = index_range.index_select(0, reverse_mapping)
return sorted_tensor, sorted_sequence_lengths, restoration_indices, permutation_index
def get_final_encoder_states(encoder_outputs: torch.Tensor,
mask: torch.Tensor,
bidirectional: bool = False) -> torch.Tensor:
"""
Given the output from a ``Seq2SeqEncoder``, with shape ``(batch_size, sequence_length,
encoding_dim)``, this method returns the final hidden state for each element of the batch,
giving a tensor of shape ``(batch_size, encoding_dim)``. This is not as simple as
``encoder_outputs[:, -1]``, because the sequences could have different lengths. We use the
mask (which has shape ``(batch_size, sequence_length)``) to find the final state for each batch
instance.
Additionally, if ``bidirectional`` is ``True``, we will split the final dimension of the
``encoder_outputs`` into two and assume that the first half is for the forward direction of the
encoder and the second half is for the backward direction. We will concatenate the last state
for each encoder dimension, giving ``encoder_outputs[:, -1, :encoding_dim/2]`` concatenated with
``encoder_outputs[:, 0, encoding_dim/2:]``.
"""
# These are the indices of the last words in the sequences (i.e. length sans padding - 1). We
# are assuming sequences are right padded.
# Shape: (batch_size,)
last_word_indices = mask.sum(1).long() - 1
batch_size, _, encoder_output_dim = encoder_outputs.size()
expanded_indices = last_word_indices.view(-1, 1, 1).expand(batch_size, 1, encoder_output_dim)
# Shape: (batch_size, 1, encoder_output_dim)
final_encoder_output = encoder_outputs.gather(1, expanded_indices)
final_encoder_output = final_encoder_output.squeeze(1) # (batch_size, encoder_output_dim)
if bidirectional:
final_forward_output = final_encoder_output[:, :(encoder_output_dim // 2)]
final_backward_output = encoder_outputs[:, 0, (encoder_output_dim // 2):]
final_encoder_output = torch.cat([final_forward_output, final_backward_output], dim=-1)
return final_encoder_output
def get_dropout_mask(dropout_probability: float, tensor_for_masking: torch.Tensor):
"""
Computes and returns an element-wise dropout mask for a given tensor, where
each element in the mask is dropped out with probability dropout_probability.
Note that the mask is NOT applied to the tensor - the tensor is passed to retain
the correct CUDA tensor type for the mask.
Parameters
----------
dropout_probability : float, required.
Probability of dropping a dimension of the input.
tensor_for_masking : torch.Tensor, required.
Returns
-------
A torch.FloatTensor consisting of the binary mask scaled by 1/ (1 - dropout_probability).
This scaling ensures expected values and variances of the output of applying this mask
and the original tensor are the same.
"""
binary_mask = (torch.rand(tensor_for_masking.size()) > dropout_probability).to(tensor_for_masking.device)
# Scale mask by 1/keep_prob to preserve output statistics.
dropout_mask = binary_mask.float().div(1.0 - dropout_probability)
return dropout_mask
def masked_softmax(vector: torch.Tensor,
mask: torch.Tensor,
dim: int = -1,
memory_efficient: bool = False,
mask_fill_value: float = -1e32) -> torch.Tensor:
"""
``torch.nn.functional.softmax(vector)`` does not work if some elements of ``vector`` should be
masked. This performs a softmax on just the non-masked portions of ``vector``. Passing
``None`` in for the mask is also acceptable; you'll just get a regular softmax.
``vector`` can have an arbitrary number of dimensions; the only requirement is that ``mask`` is
broadcastable to ``vector's`` shape. If ``mask`` has fewer dimensions than ``vector``, we will
unsqueeze on dimension 1 until they match. If you need a different unsqueezing of your mask,
do it yourself before passing the mask into this function.
If ``memory_efficient`` is set to true, we will simply use a very large negative number for those
masked positions so that the probabilities of those positions would be approximately 0.
This is not accurate in math, but works for most cases and consumes less memory.
In the case that the input vector is completely masked and ``memory_efficient`` is false, this function
returns an array of ``0.0``. This behavior may cause ``NaN`` if this is used as the last layer of
a model that uses categorical cross-entropy loss. Instead, if ``memory_efficient`` is true, this function
will treat every element as equal, and do softmax over equal numbers.
"""
if mask is None:
result = torch.nn.functional.softmax(vector, dim=dim)
else:
mask = mask.float()
while mask.dim() < vector.dim():
mask = mask.unsqueeze(1)
if not memory_efficient:
# To limit numerical errors from large vector elements outside the mask, we zero these out.
result = torch.nn.functional.softmax(vector * mask, dim=dim)
result = result * mask
result = result / (result.sum(dim=dim, keepdim=True) + 1e-13)
else:
masked_vector = vector.masked_fill((1 - mask).byte(), mask_fill_value)
result = torch.nn.functional.softmax(masked_vector, dim=dim)
return result
def masked_log_softmax(vector: torch.Tensor, mask: torch.Tensor, dim: int = -1) -> torch.Tensor:
"""
``torch.nn.functional.log_softmax(vector)`` does not work if some elements of ``vector`` should be
masked. This performs a log_softmax on just the non-masked portions of ``vector``. Passing
``None`` in for the mask is also acceptable; you'll just get a regular log_softmax.
``vector`` can have an arbitrary number of dimensions; the only requirement is that ``mask`` is
broadcastable to ``vector's`` shape. If ``mask`` has fewer dimensions than ``vector``, we will
unsqueeze on dimension 1 until they match. If you need a different unsqueezing of your mask,
do it yourself before passing the mask into this function.
In the case that the input vector is completely masked, the return value of this function is
arbitrary, but not ``nan``. You should be masking the result of whatever computation comes out
of this in that case, anyway, so the specific values returned shouldn't matter. Also, the way
that we deal with this case relies on having single-precision floats; mixing half-precision
floats with fully-masked vectors will likely give you ``nans``.
If your logits are all extremely negative (i.e., the max value in your logit vector is -50 or
lower), the way we handle masking here could mess you up. But if you've got logit values that
extreme, you've got bigger problems than this.
"""
if mask is not None:
mask = mask.float()
while mask.dim() < vector.dim():
mask = mask.unsqueeze(1)
# vector + mask.log() is an easy way to zero out masked elements in logspace, but it
# results in nans when the whole vector is masked. We need a very small value instead of a
# zero in the mask for these cases. log(1 + 1e-45) is still basically 0, so we can safely
# just add 1e-45 before calling mask.log(). We use 1e-45 because 1e-46 is so small it
# becomes 0 - this is just the smallest value we can actually use.
vector = vector + (mask + 1e-45).log()
return torch.nn.functional.log_softmax(vector, dim=dim)
def masked_max(vector: torch.Tensor,
mask: torch.Tensor,
dim: int,
keepdim: bool = False,
min_val: float = -1e7) -> torch.Tensor:
"""
To calculate max along certain dimensions on masked values
Parameters
----------
vector : ``torch.Tensor``
The vector to calculate max, assume unmasked parts are already zeros
mask : ``torch.Tensor``
The mask of the vector. It must be broadcastable with vector.
dim : ``int``
The dimension to calculate max
keepdim : ``bool``
Whether to keep dimension
min_val : ``float``
The minimal value for paddings
Returns
-------
A ``torch.Tensor`` of including the maximum values.
"""
one_minus_mask = (1.0 - mask).byte()
replaced_vector = vector.masked_fill(one_minus_mask, min_val)
max_value, _ = replaced_vector.max(dim=dim, keepdim=keepdim)
return max_value
def masked_mean(vector: torch.Tensor,
mask: torch.Tensor,
dim: int,
keepdim: bool = False,
eps: float = 1e-8) -> torch.Tensor:
"""
To calculate mean along certain dimensions on masked values
Parameters
----------
vector : ``torch.Tensor``
The vector to calculate mean.
mask : ``torch.Tensor``
The mask of the vector. It must be broadcastable with vector.
dim : ``int``
The dimension to calculate mean
keepdim : ``bool``
Whether to keep dimension
eps : ``float``
A small value to avoid zero division problem.
Returns
-------
A ``torch.Tensor`` of including the mean values.
"""
one_minus_mask = (1.0 - mask).byte()
replaced_vector = vector.masked_fill(one_minus_mask, 0.0)
value_sum = torch.sum(replaced_vector, dim=dim, keepdim=keepdim)
value_count = torch.sum(mask.float(), dim=dim, keepdim=keepdim)
return value_sum / value_count.clamp(min=eps)
def masked_flip(padded_sequence: torch.Tensor,
sequence_lengths: List[int]) -> torch.Tensor:
"""
Flips a padded tensor along the time dimension without affecting masked entries.
Parameters
----------
padded_sequence : ``torch.Tensor``
The tensor to flip along the time dimension.
Assumed to be of dimensions (batch size, num timesteps, ...)
sequence_lengths : ``torch.Tensor``
A list containing the lengths of each unpadded sequence in the batch.
Returns
-------
A ``torch.Tensor`` of the same shape as padded_sequence.
"""
assert padded_sequence.size(0) == len(sequence_lengths), \
f'sequence_lengths length ${len(sequence_lengths)} does not match batch size ${padded_sequence.size(0)}'
num_timesteps = padded_sequence.size(1)
flipped_padded_sequence = torch.flip(padded_sequence, [1])
sequences = [flipped_padded_sequence[i, num_timesteps - length:] for i, length in enumerate(sequence_lengths)]
return torch.nn.utils.rnn.pad_sequence(sequences, batch_first=True)
def viterbi_decode(tag_sequence: torch.Tensor,
transition_matrix: torch.Tensor,
tag_observations: Optional[List[int]] = None):
"""
Perform Viterbi decoding in log space over a sequence given a transition matrix
specifying pairwise (transition) potentials between tags and a matrix of shape
(sequence_length, num_tags) specifying unary potentials for possible tags per
timestep.
Parameters
----------
tag_sequence : torch.Tensor, required.
A tensor of shape (sequence_length, num_tags) representing scores for
a set of tags over a given sequence.
transition_matrix : torch.Tensor, required.
A tensor of shape (num_tags, num_tags) representing the binary potentials
for transitioning between a given pair of tags.
tag_observations : Optional[List[int]], optional, (default = None)
A list of length ``sequence_length`` containing the class ids of observed
elements in the sequence, with unobserved elements being set to -1. Note that
it is possible to provide evidence which results in degenerate labelings if
the sequences of tags you provide as evidence cannot transition between each
other, or those transitions are extremely unlikely. In this situation we log a
warning, but the responsibility for providing self-consistent evidence ultimately
lies with the user.
Returns
-------
viterbi_path : List[int]
The tag indices of the maximum likelihood tag sequence.
viterbi_score : torch.Tensor
The score of the viterbi path.
"""
sequence_length, num_tags = list(tag_sequence.size())
if tag_observations:
if len(tag_observations) != sequence_length:
raise ConfigurationError("Observations were provided, but they were not the same length "
"as the sequence. Found sequence of length: {} and evidence: {}"
.format(sequence_length, tag_observations))
else:
tag_observations = [-1 for _ in range(sequence_length)]
path_scores = []
path_indices = []
if tag_observations[0] != -1:
one_hot = torch.zeros(num_tags)
one_hot[tag_observations[0]] = 100000.
path_scores.append(one_hot)
else:
path_scores.append(tag_sequence[0, :])
# Evaluate the scores for all possible paths.
for timestep in range(1, sequence_length):
# Add pairwise potentials to current scores.
summed_potentials = path_scores[timestep - 1].unsqueeze(-1) + transition_matrix
scores, paths = torch.max(summed_potentials, 0)
# If we have an observation for this timestep, use it
# instead of the distribution over tags.
observation = tag_observations[timestep]
# Warn the user if they have passed
# invalid/extremely unlikely evidence.
if tag_observations[timestep - 1] != -1:
if transition_matrix[tag_observations[timestep - 1], observation] < -10000:
logger.warning("The pairwise potential between tags you have passed as "
"observations is extremely unlikely. Double check your evidence "
"or transition potentials!")
if observation != -1:
one_hot = torch.zeros(num_tags)
one_hot[observation] = 100000.
path_scores.append(one_hot)
else:
path_scores.append(tag_sequence[timestep, :] + scores.squeeze())
path_indices.append(paths.squeeze())
# Construct the most likely sequence backwards.
viterbi_score, best_path = torch.max(path_scores[-1], 0)
viterbi_path = [int(best_path.numpy())]
for backward_timestep in reversed(path_indices):
viterbi_path.append(int(backward_timestep[viterbi_path[-1]]))
# Reverse the backward path.
viterbi_path.reverse()
return viterbi_path, viterbi_score
def get_text_field_mask(text_field_tensors: Dict[str, torch.Tensor],
num_wrapping_dims: int = 0) -> torch.LongTensor:
"""
Takes the dictionary of tensors produced by a ``TextField`` and returns a mask
with 0 where the tokens are padding, and 1 otherwise. We also handle ``TextFields``
wrapped by an arbitrary number of ``ListFields``, where the number of wrapping ``ListFields``
is given by ``num_wrapping_dims``.
If ``num_wrapping_dims == 0``, the returned mask has shape ``(batch_size, num_tokens)``.
If ``num_wrapping_dims > 0`` then the returned mask has ``num_wrapping_dims`` extra
dimensions, so the shape will be ``(batch_size, ..., num_tokens)``.
There could be several entries in the tensor dictionary with different shapes (e.g., one for
word ids, one for character ids). In order to get a token mask, we use the tensor in
the dictionary with the lowest number of dimensions. After subtracting ``num_wrapping_dims``,
if this tensor has two dimensions we assume it has shape ``(batch_size, ..., num_tokens)``,
and use it for the mask. If instead it has three dimensions, we assume it has shape
``(batch_size, ..., num_tokens, num_features)``, and sum over the last dimension to produce
the mask. Most frequently this will be a character id tensor, but it could also be a
featurized representation of each token, etc.
If the input ``text_field_tensors`` contains the "mask" key, this is returned instead of inferring the mask.
TODO(joelgrus): can we change this?
NOTE: Our functions for generating masks create torch.LongTensors, because using
torch.ByteTensors makes it easy to run into overflow errors
when doing mask manipulation, such as summing to get the lengths of sequences - see below.
>>> mask = torch.ones([260]).byte()
>>> mask.sum() # equals 260.
>>> var_mask = torch.autograd.V(mask)
>>> var_mask.sum() # equals 4, due to 8 bit precision - the sum overflows.
"""
if "mask" in text_field_tensors:
return text_field_tensors["mask"]
tensor_dims = [(tensor.dim(), tensor) for tensor in text_field_tensors.values()]
tensor_dims.sort(key=lambda x: x[0])
smallest_dim = tensor_dims[0][0] - num_wrapping_dims
if smallest_dim == 2:
token_tensor = tensor_dims[0][1]
return (token_tensor != 0).long()
elif smallest_dim == 3:
character_tensor = tensor_dims[0][1]
return ((character_tensor > 0).long().sum(dim=-1) > 0).long()
else:
raise ValueError("Expected a tensor with dimension 2 or 3, found {}".format(smallest_dim))
def weighted_sum(matrix: torch.Tensor, attention: torch.Tensor) -> torch.Tensor:
"""
Takes a matrix of vectors and a set of weights over the rows in the matrix (which we call an
"attention" vector), and returns a weighted sum of the rows in the matrix. This is the typical
computation performed after an attention mechanism.
Note that while we call this a "matrix" of vectors and an attention "vector", we also handle
higher-order tensors. We always sum over the second-to-last dimension of the "matrix", and we
assume that all dimensions in the "matrix" prior to the last dimension are matched in the
"vector". Non-matched dimensions in the "vector" must be `directly after the batch dimension`.
For example, say I have a "matrix" with dimensions ``(batch_size, num_queries, num_words,
embedding_dim)``. The attention "vector" then must have at least those dimensions, and could
have more. Both:
- ``(batch_size, num_queries, num_words)`` (distribution over words for each query)
- ``(batch_size, num_documents, num_queries, num_words)`` (distribution over words in a
query for each document)
are valid input "vectors", producing tensors of shape:
``(batch_size, num_queries, embedding_dim)`` and
``(batch_size, num_documents, num_queries, embedding_dim)`` respectively.
"""
# We'll special-case a few settings here, where there are efficient (but poorly-named)
# operations in pytorch that already do the computation we need.
if attention.dim() == 2 and matrix.dim() == 3:
return attention.unsqueeze(1).bmm(matrix).squeeze(1)
if attention.dim() == 3 and matrix.dim() == 3:
return attention.bmm(matrix)
if matrix.dim() - 1 < attention.dim():
expanded_size = list(matrix.size())
for i in range(attention.dim() - matrix.dim() + 1):
matrix = matrix.unsqueeze(1)
expanded_size.insert(i + 1, attention.size(i + 1))
matrix = matrix.expand(*expanded_size)
intermediate = attention.unsqueeze(-1).expand_as(matrix) * matrix
return intermediate.sum(dim=-2)
def sequence_cross_entropy_with_logits(logits: torch.FloatTensor,
targets: torch.LongTensor,
weights: torch.FloatTensor,
average: str = "batch",
label_smoothing: float = None) -> torch.FloatTensor:
"""
Computes the cross entropy loss of a sequence, weighted with respect to
some user provided weights. Note that the weighting here is not the same as
in the :func:`torch.nn.CrossEntropyLoss()` criterion, which is weighting
classes; here we are weighting the loss contribution from particular elements
in the sequence. This allows loss computations for models which use padding.
Parameters
----------
logits : ``torch.FloatTensor``, required.
A ``torch.FloatTensor`` of size (batch_size, sequence_length, num_classes)
which contains the unnormalized probability for each class.
targets : ``torch.LongTensor``, required.
A ``torch.LongTensor`` of size (batch, sequence_length) which contains the
index of the true class for each corresponding step.
weights : ``torch.FloatTensor``, required.
A ``torch.FloatTensor`` of size (batch, sequence_length)
average: str, optional (default = "batch")
If "batch", average the loss across the batches. If "token", average
the loss across each item in the input. If ``None``, return a vector
of losses per batch element.
label_smoothing : ``float``, optional (default = None)
Whether or not to apply label smoothing to the cross-entropy loss.
For example, with a label smoothing value of 0.2, a 4 class classification
target would look like ``[0.05, 0.05, 0.85, 0.05]`` if the 3rd class was
the correct label.
Returns
-------
A torch.FloatTensor representing the cross entropy loss.
If ``average=="batch"`` or ``average=="token"``, the returned loss is a scalar.
If ``average is None``, the returned loss is a vector of shape (batch_size,).
"""
if average not in {None, "token", "batch"}:
raise ValueError("Got average f{average}, expected one of "
"None, 'token', or 'batch'")
# shape : (batch * sequence_length, num_classes)
logits_flat = logits.view(-1, logits.size(-1))
# shape : (batch * sequence_length, num_classes)
log_probs_flat = torch.nn.functional.log_softmax(logits_flat, dim=-1)
# shape : (batch * max_len, 1)
targets_flat = targets.view(-1, 1).long()
if label_smoothing is not None and label_smoothing > 0.0:
num_classes = logits.size(-1)
smoothing_value = label_smoothing / num_classes
# Fill all the correct indices with 1 - smoothing value.
one_hot_targets = torch.zeros_like(log_probs_flat).scatter_(-1, targets_flat, 1.0 - label_smoothing)
smoothed_targets = one_hot_targets + smoothing_value
negative_log_likelihood_flat = - log_probs_flat * smoothed_targets
negative_log_likelihood_flat = negative_log_likelihood_flat.sum(-1, keepdim=True)
else:
# Contribution to the negative log likelihood only comes from the exact indices
# of the targets, as the target distributions are one-hot. Here we use torch.gather
# to extract the indices of the num_classes dimension which contribute to the loss.
# shape : (batch * sequence_length, 1)
negative_log_likelihood_flat = - torch.gather(log_probs_flat, dim=1, index=targets_flat)
# shape : (batch, sequence_length)
negative_log_likelihood = negative_log_likelihood_flat.view(*targets.size())
# shape : (batch, sequence_length)
negative_log_likelihood = negative_log_likelihood * weights.float()
if average == "batch":
# shape : (batch_size,)
per_batch_loss = negative_log_likelihood.sum(1) / (weights.sum(1).float() + 1e-13)
num_non_empty_sequences = ((weights.sum(1) > 0).float().sum() + 1e-13)
return per_batch_loss.sum() / num_non_empty_sequences
elif average == "token":
return negative_log_likelihood.sum() / (weights.sum().float() + 1e-13)
else:
# shape : (batch_size,)
per_batch_loss = negative_log_likelihood.sum(1) / (weights.sum(1).float() + 1e-13)
return per_batch_loss
def replace_masked_values(tensor: torch.Tensor, mask: torch.Tensor, replace_with: float) -> torch.Tensor:
"""
Replaces all masked values in ``tensor`` with ``replace_with``. ``mask`` must be broadcastable
to the same shape as ``tensor``. We require that ``tensor.dim() == mask.dim()``, as otherwise we
won't know which dimensions of the mask to unsqueeze.
This just does ``tensor.masked_fill()``, except the pytorch method fills in things with a mask
value of 1, where we want the opposite. You can do this in your own code with
``tensor.masked_fill((1 - mask).byte(), replace_with)``.
"""
if tensor.dim() != mask.dim():
raise ConfigurationError("tensor.dim() (%d) != mask.dim() (%d)" % (tensor.dim(), mask.dim()))
return tensor.masked_fill((1 - mask).byte(), replace_with)
def tensors_equal(tensor1: torch.Tensor, tensor2: torch.Tensor, tolerance: float = 1e-12) -> bool:
"""
A check for tensor equality (by value). We make sure that the tensors have the same shape,
then check all of the entries in the tensor for equality. We additionally allow the input
tensors to be lists or dictionaries, where we then do the above check on every position in the
list / item in the dictionary. If we find objects that aren't tensors as we're doing that, we
just defer to their equality check.
This is kind of a catch-all method that's designed to make implementing ``__eq__`` methods
easier, in a way that's really only intended to be useful for tests.
"""
# pylint: disable=too-many-return-statements
if isinstance(tensor1, (list, tuple)):
if not isinstance(tensor2, (list, tuple)) or len(tensor1) != len(tensor2):
return False
return all([tensors_equal(t1, t2, tolerance) for t1, t2 in zip(tensor1, tensor2)])
elif isinstance(tensor1, dict):
if not isinstance(tensor2, dict):
return False
if tensor1.keys() != tensor2.keys():
return False
return all([tensors_equal(tensor1[key], tensor2[key], tolerance) for key in tensor1])
elif isinstance(tensor1, torch.Tensor):
if not isinstance(tensor2, torch.Tensor):
return False
if tensor1.size() != tensor2.size():
return False
return ((tensor1 - tensor2).abs().float() < tolerance).all()
else:
try:
return tensor1 == tensor2
except RuntimeError:
print(type(tensor1), type(tensor2))
raise
def device_mapping(cuda_device: int):
"""
In order to `torch.load()` a GPU-trained model onto a CPU (or specific GPU),
you have to supply a `map_location` function. Call this with
the desired `cuda_device` to get the function that `torch.load()` needs.
"""
def inner_device_mapping(storage: torch.Storage, location) -> torch.Storage: # pylint: disable=unused-argument
if cuda_device >= 0:
return storage.cuda(cuda_device)
else:
return storage
return inner_device_mapping
def combine_tensors(combination: str, tensors: List[torch.Tensor]) -> torch.Tensor:
"""
Combines a list of tensors using element-wise operations and concatenation, specified by a
``combination`` string. The string refers to (1-indexed) positions in the input tensor list,
and looks like ``"1,2,1+2,3-1"``.
We allow the following kinds of combinations: ``x``, ``x*y``, ``x+y``, ``x-y``, and ``x/y``,
where ``x`` and ``y`` are positive integers less than or equal to ``len(tensors)``. Each of
the binary operations is performed elementwise. You can give as many combinations as you want
in the ``combination`` string. For example, for the input string ``"1,2,1*2"``, the result
would be ``[1;2;1*2]``, as you would expect, where ``[;]`` is concatenation along the last
dimension.
If you have a fixed, known way to combine tensors that you use in a model, you should probably
just use something like ``torch.cat([x_tensor, y_tensor, x_tensor * y_tensor])``. This
function adds some complexity that is only necessary if you want the specific combination used
to be `configurable`.
If you want to do any element-wise operations, the tensors involved in each element-wise
operation must have the same shape.
This function also accepts ``x`` and ``y`` in place of ``1`` and ``2`` in the combination
string.
"""
if len(tensors) > 9:
raise ConfigurationError("Double-digit tensor lists not currently supported")
combination = combination.replace('x', '1').replace('y', '2')
to_concatenate = [_get_combination(piece, tensors) for piece in combination.split(',')]
return torch.cat(to_concatenate, dim=-1)
def _rindex(sequence: Sequence[T], obj: T) -> int:
"""
Return zero-based index in the sequence of the last item whose value is equal to obj. Raises a
ValueError if there is no such item.
Parameters
----------
sequence : ``Sequence[T]``
obj : ``T``
Returns
-------
zero-based index associated to the position of the last item equal to obj
"""
for i in range(len(sequence) - 1, -1, -1):
if sequence[i] == obj:
return i
raise ValueError(f"Unable to find {obj} in sequence {sequence}.")
def _get_combination(combination: str, tensors: List[torch.Tensor]) -> torch.Tensor:
if combination.isdigit():
index = int(combination) - 1
return tensors[index]
else:
if len(combination) != 3:
raise ConfigurationError("Invalid combination: " + combination)
first_tensor = _get_combination(combination[0], tensors)
second_tensor = _get_combination(combination[2], tensors)
operation = combination[1]
if operation == '*':
return first_tensor * second_tensor
elif operation == '/':
return first_tensor / second_tensor
elif operation == '+':
return first_tensor + second_tensor
elif operation == '-':
return first_tensor - second_tensor
else:
raise ConfigurationError("Invalid operation: " + operation)
def combine_tensors_and_multiply(combination: str,
tensors: List[torch.Tensor],
weights: torch.nn.Parameter) -> torch.Tensor:
"""
Like :func:`combine_tensors`, but does a weighted (linear) multiplication while combining.
This is a separate function from ``combine_tensors`` because we try to avoid instantiating
large intermediate tensors during the combination, which is possible because we know that we're
going to be multiplying by a weight vector in the end.
Parameters
----------
combination : ``str``
Same as in :func:`combine_tensors`
tensors : ``List[torch.Tensor]``
A list of tensors to combine, where the integers in the ``combination`` are (1-indexed)
positions in this list of tensors. These tensors are all expected to have either three or
four dimensions, with the final dimension being an embedding. If there are four
dimensions, one of them must have length 1.
weights : ``torch.nn.Parameter``
A vector of weights to use for the combinations. This should have shape (combined_dim,),
as calculated by :func:`get_combined_dim`.
"""
if len(tensors) > 9:
raise ConfigurationError("Double-digit tensor lists not currently supported")
combination = combination.replace('x', '1').replace('y', '2')
pieces = combination.split(',')
tensor_dims = [tensor.size(-1) for tensor in tensors]
combination_dims = [_get_combination_dim(piece, tensor_dims) for piece in pieces]
dims_so_far = 0
to_sum = []
for piece, combination_dim in zip(pieces, combination_dims):
weight = weights[dims_so_far:(dims_so_far + combination_dim)]
dims_so_far += combination_dim
to_sum.append(_get_combination_and_multiply(piece, tensors, weight))
result = to_sum[0]
for result_piece in to_sum[1:]:
result = result + result_piece
return result
def _get_combination_and_multiply(combination: str,
tensors: List[torch.Tensor],
weight: torch.nn.Parameter) -> torch.Tensor:
if combination.isdigit():
index = int(combination) - 1
return torch.matmul(tensors[index], weight)
else:
if len(combination) != 3:
raise ConfigurationError("Invalid combination: " + combination)
first_tensor = _get_combination(combination[0], tensors)
second_tensor = _get_combination(combination[2], tensors)
operation = combination[1]
if operation == '*':
if first_tensor.dim() > 4 or second_tensor.dim() > 4:
raise ValueError("Tensors with dim > 4 not currently supported")
desired_dim = max(first_tensor.dim(), second_tensor.dim()) - 1
if first_tensor.dim() == 4:
expanded_dim = _rindex(first_tensor.size(), 1)
first_tensor = first_tensor.squeeze(expanded_dim)
if second_tensor.dim() == 4:
expanded_dim = _rindex(second_tensor.size(), 1)
second_tensor = second_tensor.squeeze(expanded_dim)
intermediate = first_tensor * weight
result = torch.matmul(intermediate, second_tensor.transpose(-1, -2))
if result.dim() == desired_dim + 1:
result = result.squeeze(-1)
return result
elif operation == '/':
if first_tensor.dim() > 4 or second_tensor.dim() > 4:
raise ValueError("Tensors with dim > 4 not currently supported")
desired_dim = max(first_tensor.dim(), second_tensor.dim()) - 1
if first_tensor.dim() == 4:
expanded_dim = _rindex(first_tensor.size(), 1)
first_tensor = first_tensor.squeeze(expanded_dim)
if second_tensor.dim() == 4:
expanded_dim = _rindex(second_tensor.size(), 1)
second_tensor = second_tensor.squeeze(expanded_dim)
intermediate = first_tensor * weight
result = torch.matmul(intermediate, second_tensor.pow(-1).transpose(-1, -2))
if result.dim() == desired_dim + 1:
result = result.squeeze(-1)
return result
elif operation == '+':
return torch.matmul(first_tensor, weight) + torch.matmul(second_tensor, weight)
elif operation == '-':
return torch.matmul(first_tensor, weight) - torch.matmul(second_tensor, weight)
else:
raise ConfigurationError("Invalid operation: " + operation)
def get_combined_dim(combination: str, tensor_dims: List[int]) -> int:
"""
For use with :func:`combine_tensors`. This function computes the resultant dimension when
calling ``combine_tensors(combination, tensors)``, when the tensor dimension is known. This is
necessary for knowing the sizes of weight matrices when building models that use
``combine_tensors``.
Parameters
----------
combination : ``str``
A comma-separated list of combination pieces, like ``"1,2,1*2"``, specified identically to
``combination`` in :func:`combine_tensors`.
tensor_dims : ``List[int]``
A list of tensor dimensions, where each dimension is from the `last axis` of the tensors
that will be input to :func:`combine_tensors`.
"""
if len(tensor_dims) > 9:
raise ConfigurationError("Double-digit tensor lists not currently supported")
combination = combination.replace('x', '1').replace('y', '2')
return sum([_get_combination_dim(piece, tensor_dims) for piece in combination.split(',')])
def _get_combination_dim(combination: str, tensor_dims: List[int]) -> int:
if combination.isdigit():
index = int(combination) - 1
return tensor_dims[index]
else:
if len(combination) != 3:
raise ConfigurationError("Invalid combination: " + combination)
first_tensor_dim = _get_combination_dim(combination[0], tensor_dims)
second_tensor_dim = _get_combination_dim(combination[2], tensor_dims)
operation = combination[1]
if first_tensor_dim != second_tensor_dim:
raise ConfigurationError("Tensor dims must match for operation \"{}\"".format(operation))
return first_tensor_dim
def logsumexp(tensor: torch.Tensor,
dim: int = -1,
keepdim: bool = False) -> torch.Tensor:
"""
A numerically stable computation of logsumexp. This is mathematically equivalent to
`tensor.exp().sum(dim, keep=keepdim).log()`. This function is typically used for summing log
probabilities.
Parameters
----------
tensor : torch.FloatTensor, required.
A tensor of arbitrary size.
dim : int, optional (default = -1)
The dimension of the tensor to apply the logsumexp to.
keepdim: bool, optional (default = False)
Whether to retain a dimension of size one at the dimension we reduce over.
"""
max_score, _ = tensor.max(dim, keepdim=keepdim)
if keepdim:
stable_vec = tensor - max_score
else:
stable_vec = tensor - max_score.unsqueeze(dim)
return max_score + (stable_vec.exp().sum(dim, keepdim=keepdim)).log()
def get_device_of(tensor: torch.Tensor) -> int:
"""
Returns the device of the tensor.
"""
if not tensor.is_cuda:
return -1
else:
return tensor.get_device()
def flatten_and_batch_shift_indices(indices: torch.Tensor,
sequence_length: int) -> torch.Tensor:
"""
This is a subroutine for :func:`~batched_index_select`. The given ``indices`` of size
``(batch_size, d_1, ..., d_n)`` indexes into dimension 2 of a target tensor, which has size
``(batch_size, sequence_length, embedding_size)``. This function returns a vector that
correctly indexes into the flattened target. The sequence length of the target must be
provided to compute the appropriate offsets.
.. code-block:: python
indices = torch.ones([2,3], dtype=torch.long)
# Sequence length of the target tensor.
sequence_length = 10
shifted_indices = flatten_and_batch_shift_indices(indices, sequence_length)
# Indices into the second element in the batch are correctly shifted
# to take into account that the target tensor will be flattened before
# the indices are applied.
assert shifted_indices == [1, 1, 1, 11, 11, 11]
Parameters
----------
indices : ``torch.LongTensor``, required.
sequence_length : ``int``, required.
The length of the sequence the indices index into.
This must be the second dimension of the tensor.
Returns
-------
offset_indices : ``torch.LongTensor``
"""
# Shape: (batch_size)
offsets = get_range_vector(indices.size(0), get_device_of(indices)) * sequence_length
for _ in range(len(indices.size()) - 1):
offsets = offsets.unsqueeze(1)
# Shape: (batch_size, d_1, ..., d_n)
offset_indices = indices + offsets
# Shape: (batch_size * d_1 * ... * d_n)
offset_indices = offset_indices.view(-1)
return offset_indices
def batched_index_select(target: torch.Tensor,
indices: torch.LongTensor,
flattened_indices: Optional[torch.LongTensor] = None) -> torch.Tensor:
"""
The given ``indices`` of size ``(batch_size, d_1, ..., d_n)`` indexes into the sequence
dimension (dimension 2) of the target, which has size ``(batch_size, sequence_length,
embedding_size)``.
This function returns selected values in the target with respect to the provided indices, which
have size ``(batch_size, d_1, ..., d_n, embedding_size)``. This can use the optionally
precomputed :func:`~flattened_indices` with size ``(batch_size * d_1 * ... * d_n)`` if given.
An example use case of this function is looking up the start and end indices of spans in a
sequence tensor. This is used in the
:class:`~allennlp.models.coreference_resolution.CoreferenceResolver`. Model to select
contextual word representations corresponding to the start and end indices of mentions. The key
reason this can't be done with basic torch functions is that we want to be able to use look-up
tensors with an arbitrary number of dimensions (for example, in the coref model, we don't know
a-priori how many spans we are looking up).
Parameters
----------
target : ``torch.Tensor``, required.
A 3 dimensional tensor of shape (batch_size, sequence_length, embedding_size).
This is the tensor to be indexed.
indices : ``torch.LongTensor``
A tensor of shape (batch_size, ...), where each element is an index into the
``sequence_length`` dimension of the ``target`` tensor.
flattened_indices : Optional[torch.Tensor], optional (default = None)
An optional tensor representing the result of calling :func:~`flatten_and_batch_shift_indices`
on ``indices``. This is helpful in the case that the indices can be flattened once and
cached for many batch lookups.
Returns
-------
selected_targets : ``torch.Tensor``
A tensor with shape [indices.size(), target.size(-1)] representing the embedded indices
extracted from the batch flattened target tensor.
"""
if flattened_indices is None:
# Shape: (batch_size * d_1 * ... * d_n)
flattened_indices = flatten_and_batch_shift_indices(indices, target.size(1))
# Shape: (batch_size * sequence_length, embedding_size)
flattened_target = target.view(-1, target.size(-1))
# Shape: (batch_size * d_1 * ... * d_n, embedding_size)
flattened_selected = flattened_target.index_select(0, flattened_indices)
selected_shape = list(indices.size()) + [target.size(-1)]
# Shape: (batch_size, d_1, ..., d_n, embedding_size)
selected_targets = flattened_selected.view(*selected_shape)
return selected_targets
def flattened_index_select(target: torch.Tensor,
indices: torch.LongTensor) -> torch.Tensor:
"""
The given ``indices`` of size ``(set_size, subset_size)`` specifies subsets of the ``target``
that each of the set_size rows should select. The `target` has size
``(batch_size, sequence_length, embedding_size)``, and the resulting selected tensor has size
``(batch_size, set_size, subset_size, embedding_size)``.
Parameters
----------
target : ``torch.Tensor``, required.
A Tensor of shape (batch_size, sequence_length, embedding_size).
indices : ``torch.LongTensor``, required.
A LongTensor of shape (set_size, subset_size). All indices must be < sequence_length
as this tensor is an index into the sequence_length dimension of the target.
Returns
-------
selected : ``torch.Tensor``, required.
A Tensor of shape (batch_size, set_size, subset_size, embedding_size).
"""
if indices.dim() != 2:
raise ConfigurationError("Indices passed to flattened_index_select had shape {} but "
"only 2 dimensional inputs are supported.".format(indices.size()))
# Shape: (batch_size, set_size * subset_size, embedding_size)
flattened_selected = target.index_select(1, indices.view(-1))
# Shape: (batch_size, set_size, subset_size, embedding_size)
selected = flattened_selected.view(target.size(0), indices.size(0), indices.size(1), -1)
return selected
def get_range_vector(size: int, device: int) -> torch.Tensor:
"""
Returns a range vector with the desired size, starting at 0. The CUDA implementation
is meant to avoid copy data from CPU to GPU.
"""
if device > -1:
return torch.cuda.LongTensor(size, device=device).fill_(1).cumsum(0) - 1
else:
return torch.arange(0, size, dtype=torch.long)
def bucket_values(distances: torch.Tensor,
num_identity_buckets: int = 4,
num_total_buckets: int = 10) -> torch.Tensor:
"""
Places the given values (designed for distances) into ``num_total_buckets``semi-logscale
buckets, with ``num_identity_buckets`` of these capturing single values.
The default settings will bucket values into the following buckets:
[0, 1, 2, 3, 4, 5-7, 8-15, 16-31, 32-63, 64+].
Parameters
----------
distances : ``torch.Tensor``, required.
A Tensor of any size, to be bucketed.
num_identity_buckets: int, optional (default = 4).
The number of identity buckets (those only holding a single value).
num_total_buckets : int, (default = 10)
The total number of buckets to bucket values into.
Returns
-------
A tensor of the same shape as the input, containing the indices of the buckets
the values were placed in.
"""
# Chunk the values into semi-logscale buckets using .floor().
# This is a semi-logscale bucketing because we divide by log(2) after taking the log.
# We do this to make the buckets more granular in the initial range, where we expect
# most values to fall. We then add (num_identity_buckets - 1) because we want these indices
# to start _after_ the fixed number of buckets which we specified would only hold single values.
logspace_index = (distances.float().log() / math.log(2)).floor().long() + (num_identity_buckets - 1)
# create a mask for values which will go into single number buckets (i.e not a range).
use_identity_mask = (distances <= num_identity_buckets).long()
use_buckets_mask = 1 + (-1 * use_identity_mask)
# Use the original values if they are less than num_identity_buckets, otherwise
# use the logspace indices.
combined_index = use_identity_mask * distances + use_buckets_mask * logspace_index
# Clamp to put anything > num_total_buckets into the final bucket.
return combined_index.clamp(0, num_total_buckets - 1)
def add_sentence_boundary_token_ids(tensor: torch.Tensor,
mask: torch.Tensor,
sentence_begin_token: Any,
sentence_end_token: Any) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Add begin/end of sentence tokens to the batch of sentences.
Given a batch of sentences with size ``(batch_size, timesteps)`` or
``(batch_size, timesteps, dim)`` this returns a tensor of shape
``(batch_size, timesteps + 2)`` or ``(batch_size, timesteps + 2, dim)`` respectively.
Returns both the new tensor and updated mask.
Parameters
----------
tensor : ``torch.Tensor``
A tensor of shape ``(batch_size, timesteps)`` or ``(batch_size, timesteps, dim)``
mask : ``torch.Tensor``
A tensor of shape ``(batch_size, timesteps)``
sentence_begin_token: Any (anything that can be broadcast in torch for assignment)
For 2D input, a scalar with the <S> id. For 3D input, a tensor with length dim.
sentence_end_token: Any (anything that can be broadcast in torch for assignment)
For 2D input, a scalar with the </S> id. For 3D input, a tensor with length dim.
Returns
-------
tensor_with_boundary_tokens : ``torch.Tensor``
The tensor with the appended and prepended boundary tokens. If the input was 2D,
it has shape (batch_size, timesteps + 2) and if the input was 3D, it has shape
(batch_size, timesteps + 2, dim).
new_mask : ``torch.Tensor``
The new mask for the tensor, taking into account the appended tokens
marking the beginning and end of the sentence.
"""
# TODO: matthewp, profile this transfer
sequence_lengths = mask.sum(dim=1).detach().cpu().numpy()
tensor_shape = list(tensor.data.shape)
new_shape = list(tensor_shape)
new_shape[1] = tensor_shape[1] + 2
tensor_with_boundary_tokens = tensor.new_zeros(*new_shape)
if len(tensor_shape) == 2:
tensor_with_boundary_tokens[:, 1:-1] = tensor
tensor_with_boundary_tokens[:, 0] = sentence_begin_token
for i, j in enumerate(sequence_lengths):
tensor_with_boundary_tokens[i, j + 1] = sentence_end_token
new_mask = (tensor_with_boundary_tokens != 0).long()
elif len(tensor_shape) == 3:
tensor_with_boundary_tokens[:, 1:-1, :] = tensor
for i, j in enumerate(sequence_lengths):
tensor_with_boundary_tokens[i, 0, :] = sentence_begin_token
tensor_with_boundary_tokens[i, j + 1, :] = sentence_end_token
new_mask = ((tensor_with_boundary_tokens > 0).long().sum(dim=-1) > 0).long()
else:
raise ValueError("add_sentence_boundary_token_ids only accepts 2D and 3D input")
return tensor_with_boundary_tokens, new_mask
def remove_sentence_boundaries(tensor: torch.Tensor,
mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Remove begin/end of sentence embeddings from the batch of sentences.
Given a batch of sentences with size ``(batch_size, timesteps, dim)``
this returns a tensor of shape ``(batch_size, timesteps - 2, dim)`` after removing
the beginning and end sentence markers. The sentences are assumed to be padded on the right,
with the beginning of each sentence assumed to occur at index 0 (i.e., ``mask[:, 0]`` is assumed
to be 1).
Returns both the new tensor and updated mask.
This function is the inverse of ``add_sentence_boundary_token_ids``.
Parameters
----------
tensor : ``torch.Tensor``
A tensor of shape ``(batch_size, timesteps, dim)``
mask : ``torch.Tensor``
A tensor of shape ``(batch_size, timesteps)``
Returns
-------
tensor_without_boundary_tokens : ``torch.Tensor``
The tensor after removing the boundary tokens of shape ``(batch_size, timesteps - 2, dim)``
new_mask : ``torch.Tensor``
The new mask for the tensor of shape ``(batch_size, timesteps - 2)``.
"""
# TODO: matthewp, profile this transfer
sequence_lengths = mask.sum(dim=1).detach().cpu().numpy()
tensor_shape = list(tensor.data.shape)
new_shape = list(tensor_shape)
new_shape[1] = tensor_shape[1] - 2
tensor_without_boundary_tokens = tensor.new_zeros(*new_shape)
new_mask = tensor.new_zeros((new_shape[0], new_shape[1]), dtype=torch.long)
for i, j in enumerate(sequence_lengths):
if j > 2:
tensor_without_boundary_tokens[i, :(j - 2), :] = tensor[i, 1:(j - 1), :]
new_mask[i, :(j - 2)] = 1
return tensor_without_boundary_tokens, new_mask
def add_positional_features(tensor: torch.Tensor,
min_timescale: float = 1.0,
max_timescale: float = 1.0e4):
# pylint: disable=line-too-long
"""
Implements the frequency-based positional encoding described
in `Attention is all you Need
<https://www.semanticscholar.org/paper/Attention-Is-All-You-Need-Vaswani-Shazeer/0737da0767d77606169cbf4187b83e1ab62f6077>`_ .
Adds sinusoids of different frequencies to a ``Tensor``. A sinusoid of a
different frequency and phase is added to each dimension of the input ``Tensor``.
This allows the attention heads to use absolute and relative positions.
The number of timescales is equal to hidden_dim / 2 within the range
(min_timescale, max_timescale). For each timescale, the two sinusoidal
signals sin(timestep / timescale) and cos(timestep / timescale) are
generated and concatenated along the hidden_dim dimension.
Parameters
----------
tensor : ``torch.Tensor``
a Tensor with shape (batch_size, timesteps, hidden_dim).
min_timescale : ``float``, optional (default = 1.0)
The smallest timescale to use.
max_timescale : ``float``, optional (default = 1.0e4)
The largest timescale to use.
Returns
-------
The input tensor augmented with the sinusoidal frequencies.
"""
_, timesteps, hidden_dim = tensor.size()
timestep_range = get_range_vector(timesteps, get_device_of(tensor)).data.float()
# We're generating both cos and sin frequencies,
# so half for each.
num_timescales = hidden_dim // 2
timescale_range = get_range_vector(num_timescales, get_device_of(tensor)).data.float()
log_timescale_increments = math.log(float(max_timescale) / float(min_timescale)) / float(num_timescales - 1)
inverse_timescales = min_timescale * torch.exp(timescale_range * -log_timescale_increments)
# Broadcasted multiplication - shape (timesteps, num_timescales)
scaled_time = timestep_range.unsqueeze(1) * inverse_timescales.unsqueeze(0)
# shape (timesteps, 2 * num_timescales)
sinusoids = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 1)
if hidden_dim % 2 != 0:
# if the number of dimensions is odd, the cos and sin
# timescales had size (hidden_dim - 1) / 2, so we need
# to add a row of zeros to make up the difference.
sinusoids = torch.cat([sinusoids, sinusoids.new_zeros(timesteps, 1)], 1)
return tensor + sinusoids.unsqueeze(0)
def clone(module: torch.nn.Module, num_copies: int) -> torch.nn.ModuleList:
"""Produce N identical layers."""
return torch.nn.ModuleList([copy.deepcopy(module) for _ in range(num_copies)])
def combine_initial_dims(tensor: torch.Tensor) -> torch.Tensor:
"""
Given a (possibly higher order) tensor of ids with shape
(d1, ..., dn, sequence_length)
Return a view that's (d1 * ... * dn, sequence_length).
If original tensor is 1-d or 2-d, return it as is.
"""
if tensor.dim() <= 2:
return tensor
else:
return tensor.view(-1, tensor.size(-1))
def uncombine_initial_dims(tensor: torch.Tensor, original_size: torch.Size) -> torch.Tensor:
"""
Given a tensor of embeddings with shape
(d1 * ... * dn, sequence_length, embedding_dim)
and the original shape
(d1, ..., dn, sequence_length),
return the reshaped tensor of embeddings with shape
(d1, ..., dn, sequence_length, embedding_dim).
If original size is 1-d or 2-d, return it as is.
"""
if len(original_size) <= 2:
return tensor
else:
view_args = list(original_size) + [tensor.size(-1)]
return tensor.view(*view_args)
|
import os
import cv2
import numpy as np
import sys
import pickle
from optparse import OptionParser
import time
from keras_frcnn import config
import keras_frcnn.resnet as nn
from keras import backend as K
from keras.layers import Input
from keras.models import Model
from keras_frcnn import roi_helpers
from keras_frcnn import data_generators
from sklearn.metrics import average_precision_score
def get_map(pred, gt, f):
T = {}
P = {}
fx, fy = f
for bbox in gt:
bbox['bbox_matched'] = False
pred_probs = np.array([s['prob'] for s in pred])
box_idx_sorted_by_prob = np.argsort(pred_probs)[::-1]
for box_idx in box_idx_sorted_by_prob:
pred_box = pred[box_idx]
pred_class = pred_box['class']
pred_x1 = pred_box['x1']
pred_x2 = pred_box['x2']
pred_y1 = pred_box['y1']
pred_y2 = pred_box['y2']
pred_prob = pred_box['prob']
if pred_class not in P:
P[pred_class] = []
T[pred_class] = []
P[pred_class].append(pred_prob)
found_match = False
for gt_box in gt:
gt_class = gt_box['class']
gt_x1 = gt_box['x1']/fx
gt_x2 = gt_box['x2']/fx
gt_y1 = gt_box['y1']/fy
gt_y2 = gt_box['y2']/fy
gt_seen = gt_box['bbox_matched']
if gt_class != pred_class:
continue
if gt_seen:
continue
iou = data_generators.iou((pred_x1, pred_y1, pred_x2, pred_y2), (gt_x1, gt_y1, gt_x2, gt_y2))
if iou >= 0.5:
found_match = True
gt_box['bbox_matched'] = True
break
else:
continue
T[pred_class].append(int(found_match))
for gt_box in gt:
if not gt_box['bbox_matched'] and not gt_box['difficult']:
if gt_box['class'] not in P:
P[gt_box['class']] = []
T[gt_box['class']] = []
T[gt_box['class']].append(1)
P[gt_box['class']].append(0)
#import pdb
#pdb.set_trace()
return T, P
sys.setrecursionlimit(40000)
parser = OptionParser()
parser.add_option("-p", "--path", dest="test_path", help="Path to test data.")
parser.add_option("-n", "--num_rois", dest="num_rois",
help="Number of ROIs per iteration. Higher means more memory use.", default=32)
parser.add_option("--config_filename", dest="config_filename", help=
"Location to read the metadata related to the training (generated when training).",
default="config.pickle")
parser.add_option("-o", "--parser", dest="parser", help="Parser to use. One of simple or pascal_voc",
default="pascal_voc"),
(options, args) = parser.parse_args()
if not options.test_path: # if filename is not given
parser.error('Error: path to test data must be specified. Pass --path to command line')
if options.parser == 'pascal_voc':
from keras_frcnn.pascal_voc_parser import get_data
elif options.parser == 'simple':
from keras_frcnn.simple_parser import get_data
else:
raise ValueError("Command line option parser must be one of 'pascal_voc' or 'simple'")
config_output_filename = options.config_filename
with open(config_output_filename, 'r') as f_in:
C = pickle.load(f_in)
# turn off any data augmentation at test time
C.use_horizontal_flips = False
C.use_vertical_flips = False
C.rot_90 = False
img_path = options.test_path
def format_img(img, C):
img_min_side = float(C.im_size)
(height,width,_) = img.shape
if width <= height:
f = img_min_side/width
new_height = int(f * height)
new_width = int(img_min_side)
else:
f = img_min_side/height
new_width = int(f * width)
new_height = int(img_min_side)
fx = width/float(new_width)
fy = height/float(new_height)
img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC)
img = img[:, :, (2, 1, 0)]
img = img.astype(np.float32)
img[:, :, 0] -= C.img_channel_mean[0]
img[:, :, 1] -= C.img_channel_mean[1]
img[:, :, 2] -= C.img_channel_mean[2]
img /= C.img_scaling_factor
img = np.transpose(img, (2, 0, 1))
img = np.expand_dims(img, axis=0)
return img, fx, fy
class_mapping = C.class_mapping
if 'bg' not in class_mapping:
class_mapping['bg'] = len(class_mapping)
class_mapping = {v: k for k, v in class_mapping.iteritems()}
print(class_mapping)
class_to_color = {class_mapping[v]: np.random.randint(0, 255, 3) for v in class_mapping}
C.num_rois = int(options.num_rois)
if K.image_data_format() == 'channels_last':
input_shape_img = (3, None, None)
input_shape_features = (1024, None, None)
else:
input_shape_img = (None, None, 3)
input_shape_features = (None, None, 1024)
img_input = Input(shape=input_shape_img)
roi_input = Input(shape=(C.num_rois, 4))
feature_map_input = Input(shape=input_shape_features)
# define the base network (resnet here, can be VGG, Inception, etc)
shared_layers = nn.nn_base(img_input, trainable=True)
# define the RPN, built on the base layers
num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios)
rpn_layers = nn.rpn(shared_layers, num_anchors)
classifier = nn.classifier(feature_map_input, roi_input, C.num_rois, nb_classes=len(class_mapping), trainable=True)
model_rpn = Model(img_input, rpn_layers)
model_classifier_only = Model([feature_map_input, roi_input], classifier)
model_classifier = Model([feature_map_input, roi_input], classifier)
model_rpn.load_weights(C.model_path, by_name=True)
model_classifier.load_weights(C.model_path, by_name=True)
model_rpn.compile(optimizer='sgd', loss='mse')
model_classifier.compile(optimizer='sgd', loss='mse')
all_imgs, _, _ = get_data(options.test_path)
test_imgs = [s for s in all_imgs if s['imageset'] == 'test']
T = {}
P = {}
for idx, img_data in enumerate(test_imgs):
print('{}/{}'.format(idx,len(test_imgs)))
st = time.time()
filepath = img_data['filepath']
img = cv2.imread(filepath)
X, fx, fy = format_img(img, C)
if K.image_data_format() == 'channels_first':
X = np.transpose(X, (0, 2, 3, 1))
# get the feature maps and output from the RPN
[Y1, Y2, F] = model_rpn.predict(X)
R = roi_helpers.rpn_to_roi(Y1, Y2, C, K.image_data_format(), overlap_thresh=0.7)
# convert from (x1,y1,x2,y2) to (x,y,w,h)
R[:, 2] -= R[:, 0]
R[:, 3] -= R[:, 1]
# apply the spatial pyramid pooling to the proposed regions
bboxes = {}
probs = {}
for jk in range(R.shape[0] // C.num_rois + 1):
ROIs = np.expand_dims(R[C.num_rois * jk:C.num_rois * (jk + 1), :], axis=0)
if ROIs.shape[1] == 0:
break
if jk == R.shape[0] // C.num_rois:
# pad R
curr_shape = ROIs.shape
target_shape = (curr_shape[0], C.num_rois, curr_shape[2])
ROIs_padded = np.zeros(target_shape).astype(ROIs.dtype)
ROIs_padded[:, :curr_shape[1], :] = ROIs
ROIs_padded[0, curr_shape[1]:, :] = ROIs[0, 0, :]
ROIs = ROIs_padded
[P_cls, P_regr] = model_classifier_only.predict([F, ROIs])
for ii in range(P_cls.shape[1]):
if np.argmax(P_cls[0, ii, :]) == (P_cls.shape[2] - 1):
continue
cls_name = class_mapping[np.argmax(P_cls[0, ii, :])]
if cls_name not in bboxes:
bboxes[cls_name] = []
probs[cls_name] = []
(x, y, w, h) = ROIs[0, ii, :]
cls_num = np.argmax(P_cls[0, ii, :])
try:
(tx, ty, tw, th) = P_regr[0, ii, 4 * cls_num:4 * (cls_num + 1)]
tx /= C.classifier_regr_std[0]
ty /= C.classifier_regr_std[1]
tw /= C.classifier_regr_std[2]
th /= C.classifier_regr_std[3]
x, y, w, h = roi_helpers.apply_regr(x, y, w, h, tx, ty, tw, th)
except:
pass
bboxes[cls_name].append([16 * x, 16 * y, 16 * (x + w), 16 * (y + h)])
probs[cls_name].append(np.max(P_cls[0, ii, :]))
all_dets = []
for key in bboxes:
bbox = np.array(bboxes[key])
new_boxes, new_probs = roi_helpers.non_max_suppression_fast(bbox, np.array(probs[key]), overlap_thresh=0.5)
for jk in range(new_boxes.shape[0]):
(x1, y1, x2, y2) = new_boxes[jk, :]
det = {'x1': x1, 'x2': x2, 'y1': y1, 'y2': y2, 'class': key, 'prob': new_probs[jk]}
all_dets.append(det)
print('Elapsed time = {}'.format(time.time() - st))
t, p = get_map(all_dets, img_data['bboxes'], (fx, fy))
for key in t.keys():
if key not in T:
T[key] = []
P[key] = []
T[key].extend(t[key])
P[key].extend(p[key])
all_aps = []
for key in T.keys():
ap = average_precision_score(T[key], P[key])
print('{} AP: {}'.format(key, ap))
all_aps.append(ap)
print('mAP = {}'.format(np.mean(np.array(all_aps))))
#print(T)
#print(P)
|
from logger_base import logger
class SubClass:
def __init__(self, QID=None, label=None):
self.__QID = QID
self.__label = label
def __str__(self):
return (
f'QID: {self.__QID}, '
f'label: {self.__label}'
)
def getQID(self):
return self.__QID
def setQID(self, QID):
self.__QID = QID
def getItemLabel(self):
return self.__label
def setItemLabel(self, label):
self.__label = label
if __name__ == '__main__':
subclass = SubClass(QID='Q4564', label='Gomez')
logger.debug(subclass)
|
import os
import time
import six
import uuid
import amostra.client.commands as acc
import conftrak.client.commands as ccc
from analysisstore.client.commands import AnalysisClient
import conftrak.exceptions
import logging
logger = logging.getLogger(__name__)
#12/19 - Skinner inherited this from Hugo, who inherited it from Matt. Arman wrote the underlying DB and left BNL in 2018.
# TODO: get the beamline_id from parameter
BEAMLINE_ID = '17ID1'
sample_ref = None
container_ref = None
request_ref = None
configuration_ref = None
mds_ref = None
analysis_ref = None
main_server = os.environ['MONGODB_HOST']
services_config = {
'amostra': {'host': main_server, 'port': '7770'},
'conftrak': {'host': main_server, 'port': '7771'},
'metadataservice': {'host': main_server, 'port': '7772'},
'analysisstore': {'host': main_server, 'port': '7773'}
}
def db_connect(params=services_config):
global sample_ref,container_ref,request_ref,configuration_ref,analysis_ref
"""
recommended idiom:
"""
sample_ref = acc.SampleReference(**params['amostra'])
container_ref = acc.ContainerReference(**params['amostra'])
request_ref = acc.RequestReference(**params['amostra'])
configuration_ref = ccc.ConfigurationReference(**services_config['conftrak'])
analysis_ref = AnalysisClient(services_config['analysisstore'])
logger.info(analysis_ref)
# should be in config :(
primaryDewarName = 'primaryDewarJohn'
#connect on import
db_connect()
def setCurrentUser(beamline,userName): #for now username, although these should be unique
setBeamlineConfigParam(beamline,"user",userName)
def getCurrentUser(beamline): #for now username, although these should be unique
return getBeamlineConfigParam(beamline,"user")
def setPrimaryDewarName(dewarName):
global primaryDewarName
primaryDewarName = dewarName
def searchBeamline(**kwargs):
try:
return list(configuration_ref.find(key="beamline", **kwargs))
except StopIteration:
return None
def getBeamlineByNumber(num):
"""eg. 17id1, 17id2, 16id1"""
try:
return list(configuration_ref.find(key="beamline", number=num))
except StopIteration:
return None
def createContainer(name, capacity, owner, kind, **kwargs): #16_pin_puck, automounterDewar, shippingDewar
"""
container_name: string, name for the new container, required
kwargs: passed to constructor
"""
if capacity is not None:
kwargs['content'] = [""]*capacity
uid = container_ref.create(name=name, owner=owner, kind=kind, **kwargs)
return uid
def updateContainer(cont_info): #really updating the contents
cont = cont_info['uid']
q = {'uid': cont_info.pop('uid', '')}
cont_info.pop('time', '')
container_ref.update(q, {'content':cont_info['content']})
return cont
def createSample(sample_name, owner, kind, proposalID=None, **kwargs):
"""
sample_name: string, name for the new sample, required
kwargs: passed to constructor
"""
# initialize request count to zero
if 'request_count' not in kwargs:
kwargs['request_count'] = 0
uid = sample_ref.create(name=sample_name, owner=owner,kind=kind,proposalID=proposalID,**kwargs)
return uid
def incrementSampleRequestCount(sample_id):
"""
increment the 'request_count' attribute of the specified sample by 1
"""
# potential for race here?
#skinner - I don't understand this line sample_ref.update(query={'uid': sample_id}, update={'$inc': {'request_count': 1}})
reqCount = getSampleRequestCount(sample_id)+1
sample_ref.update({'uid': sample_id},{'request_count':reqCount})
return getSampleRequestCount(sample_id)
def getSampleRequestCount(sample_id):
"""
get the 'request_count' attribute of the specified sample
"""
s = getSampleByID(sample_id)
return s['request_count']
def getRequestsBySampleID(sample_id, active_only=True):
"""
return a list of request dictionaries for the given sample_id
"""
params = {'sample': sample_id}
if active_only:
params['state'] = "active"
reqs = list(request_ref.find(**params))
return reqs
def getSampleByID(sample_id):
"""
sample_id: required, integer
"""
s = list(sample_ref.find(uid=sample_id))
if (s):
return s[0]
else:
return {}
def getSampleNamebyID(sample_id):
"""
sample_id: required, integer
"""
s = getSampleByID(sample_id)
if (s==None):
return ''
else:
return s['name']
def getSamplesbyOwner(owner): #skinner
s = sample_ref.find(owner=owner)
return [samp['uid'] for samp in s]
def getSampleIDbyName(sampleName,owner):
"""
sample_id: required, integer
"""
samples = list(sample_ref.find(owner=owner,name=sampleName))
if (samples != []):
return samples[0]["uid"]
else:
return ""
def getContainerIDbyName(container_name,owner):
containers = list(container_ref.find(owner=owner,name=container_name))
if (containers != []):
return containers[0]["uid"]
else:
return ""
def getContainerNameByID(container_id):
"""
container_id: required, integer
"""
c = list(container_ref.find(uid=container_id))
return c[0]['name']
def createResult(result_type, owner,request_id=None, sample_id=None, result_obj=None, proposalID=None,
**kwargs):
"""
result_type: string
request_id: int
sample_id: int
result_obj: dict to attach
"""
header = analysis_ref.insert_analysis_header(result_type=result_type,owner=owner, uid=str(uuid.uuid4()),
sample=sample_id, request=request_id,
provenance={'lsdc':1}, result_obj=result_obj,proposalID=proposalID,time=time.time(),**kwargs)
logger.info("uuid of result inserted into analysisstore: %s" % header)
return header
def getResult(result_id):
"""
result_id: required, int
"""
header = list(analysis_ref.find_analysis_header(uid=result_id))
return header[0]
def getResultsforRequest(request_id):
"""
Takes an integer request_id and returns a list of matching results or [].
"""
resultGen = analysis_ref.find_analysis_header(request=request_id)
if (resultGen != None):
headers = list(resultGen)
return headers
else:
return []
def getResultsforSample(sample_id):
"""
Takes a sample_id and returns it's resultsList or [].
"""
headers = list(analysis_ref.find_analysis_header(sample=sample_id))
return headers
def getRequestByID(request_id, active_only=True):
"""
return a list of request dictionaries for the given request_id
"""
params = {'uid': request_id}
if active_only:
params['state'] = "active"
req = list(request_ref.find(**params))[0]
return req
def addResultforRequest(result_type, request_id, owner,result_obj=None, **kwargs):
"""
like createResult, but also adds it to the resultList of result['sample_id']
"""
sample = getRequestByID(request_id)['sample']
r = createResult(owner=owner,result_type=result_type, request_id=request_id, sample_id=sample, result_obj=result_obj, **kwargs)
return r
def addResulttoSample(result_type, sample_id, owner,result_obj=None, as_mongo_obj=False, proposalID=None,**kwargs):
"""
like addResulttoRequest, but without a request
"""
r = createResult(owner=owner,result_type=result_type, request_id=None, sample_id=sample_id, result_obj=result_obj, proposalID=proposalID,**kwargs)
return r
def addResulttoBL(result_type, beamline_id, owner,result_obj=None, proposalID=None,**kwargs):
"""
add result to beamline
beamline_id: the integer, 'beamline_id' field of the beamline entry
other fields are as for createRequest
"""
r = createResult(owner=owner,result_type=result_type, request_id=None, sample_id=None, result_obj=result_obj, beamline_id=beamline_id, proposalID=proposalID,**kwargs)
return r
def getResultsforBL(id=None, name=None, number=None):
"""
Retrieve results using either BL id, name, or number (tried in that order)
Returns a generator of results
"""
if id is None:
if name is None:
key = 'number'
val = number
else:
key = 'name'
val = name
query = {key: val}
b = searchBeamline(**query)
if b is None:
yield None
raise StopIteration
id = b['uid']
if id is None:
yield None
raise StopIteration
results = list(analysis_ref.find_analysis_header(beamline_id=id))
for r in results:
yield r
def addFile(data=None, filename=None):
"""
Put the file data into the GenericFile collection,
return the _id for use as an id or ReferenceField.
If a filename kwarg is given, read data from the file.
If a data kwarg is given or data is the 1st arg, store the data.
If both or neither is given, raise an error.
"""
#TODO: Decide what to do with this method
raise NotImplemented
'''
if filename is not None:
if data is not None:
raise ValueError('both filename and data kwargs given. can only use one.')
else:
with open(filename, 'r') as file: # do we need 'b' for binary?
data = file.read() # is this blocking? might not always get everything at once?!
elif data is None:
raise ValueError('neither filename or data kwargs given. need one.')
f = GenericFile(data=data)
f.save()
f.reload() # to fetch generated id
return f.to_dbref()
'''
def getFile(_id):
"""
Retrieve the data from the GenericFile collection
for the given _id or db_ref
Returns the data in Binary. If you know it's a txt file and want a string,
convert with str()
Maybe this will be automatically deref'd most of the time?
Only if they're mongoengine ReferenceFields...
"""
#TODO: Decide what to do with this method
raise NotImplemented
'''
try:
_id = _id.id
except AttributeError:
pass
f = GenericFile.objects(__raw__={'_id': _id}) # yes it's '_id' here but just 'id' below, gofigure
return _try0_dict_key(f, 'file', 'id', _id, None,
dict_key='data')
'''
def createRequest(request_type, owner, request_obj=None, as_mongo_obj=False, proposalID=None, **kwargs):
"""
request_type: required, name (string) of request type, dbref to it's db entry, or a Type object
request_obj: optional, stored as is, could be a dict of collection parameters, or whatever
priority: optional, integer priority level
anything else (priority, sample_id) can either be embedded in the
request_object or passed in as keyword args to get saved at the
top level.
"""
kwargs['request_type'] = request_type
kwargs['request_obj'] = request_obj
kwargs['owner'] = owner
kwargs['proposalID']=proposalID
uid = request_ref.create(**kwargs)
return uid
def addRequesttoSample(sample_id, request_type, owner,request_obj=None, as_mongo_obj=False, proposalID=None,**kwargs):
"""
sample_id: required, integer sample id
request_type: required, name (string) of request type, dbref to it's db entry, or a Type object
request_obj: optional, stored as is, could be a dict of collection parameters, or whatever
anything else (priority, sample_id) can either be embedded in the
request_object or passed in as keyword args to get saved at the
top level.
"""
kwargs['sample'] = sample_id
s = time.time()
r = createRequest(request_type, owner, request_obj=request_obj, as_mongo_obj=True, proposalID=proposalID,**kwargs)
t = time.time()-s
logger.info("add req = " + str(t))
return r
def insertIntoContainer(container_name, owner, position, itemID):
c = getContainerByName(container_name,owner)
if c is not None:
cnt = c['content']
cnt[position - 1] = itemID # most people don't zero index things
c['content'] = cnt
updateContainer(c)
return True
else:
logger.error("bad container name %s" % container_name)
return False
def emptyContainer(uid):
c = getContainerByID(uid)
if c is not None:
cnt = c['content']
for i in range (len(cnt)):
cnt[i] = ''
c['content'] = cnt
updateContainer(c)
return True
else:
logger.error("container not found")
return False
def getContainers(filters=None):
"""get *all* containers"""
if filters is not None:
c = list(container_ref.find(**filters)) #skinner - seems to break on compound filter
else:
c = list(container_ref.find())
return c
def getContainersByType(type_name, owner):
#TODO: group_name was not being used kept for compatibility
return getContainers(filters={"kind": type_name,"owner":owner})
def getAllPucks(owner): #shouldn't this be for owner?
# find all the types desended from 'puck'?
# and then we could do this?
return getContainersByType("16_pin_puck", owner)
def getPrimaryDewar(beamline):
"""
returns the mongo object for a container with a name matching
the global variable 'primaryDewarName'
"""
return getContainerByName(primaryDewarName,beamline)
def getContainerByName(container_name,owner):
c = getContainers(filters={'name': container_name,'owner':owner})[0] #skinner, this should return only one, not a list
return c
def getContainerByID(container_id):
c = getContainers(filters={'uid': container_id})[0]
return c
def getQueue(beamlineName):
"""
returns a list of request dicts for all the samples in the container
named by the global variable 'primaryDewarName'
"""
# seems like this would be alot simpler if it weren't for the Nones?
ret_list = []
# try to only retrieve what we need...
# Use .first() instead of [0] here because when the query returns nothing,
# .first() returns None while [0] generates an IndexError
# Nah... [0] is faster and catch Exception...
DewarItems = []
try:
DewarItems = getPrimaryDewar(beamlineName)['content']
except IndexError as AttributeError:
raise ValueError('could not find container: "{0}"!'.format(primaryDewarName))
items = []
for item in DewarItems:
if (item != ""):
items.append(item)
sample_list = []
contents = [getContainerByID(uid)['content'] for uid in items]
for samp in contents:
if (samp != ""):
sample_list += samp
for s in sample_list:
reqs = getRequestsBySampleID(s, active_only=True)
for request in reqs:
yield request
def getQueueUnorderedObsolete(beamlineName):
"""
returns a list of request dicts for all the samples in the container
named by the global variable 'primaryDewarName'
"""
# seems like this would be alot simpler if it weren't for the Nones?
ret_list = []
# try to only retrieve what we need...
# Use .first() instead of [0] here because when the query returns nothing,
# .first() returns None while [0] generates an IndexError
# Nah... [0] is faster and catch Exception...
try:
items = getPrimaryDewar(beamlineName)['content']
except IndexError as AttributeError:
raise ValueError('could not find container: "{0}"!'.format(primaryDewarName))
items = set(items)
items.discard("") # skip empty positions
sample_list = []
contents = [getContainerByID(uid)['content'] for uid in items]
for samp in contents:
sil = set(samp)
sil.discard("")
sample_list += sil
for s in sample_list:
reqs = getRequestsBySampleID(s, active_only=True)
for request in reqs:
yield request
def queueDone(beamlineName):
ql = list(getQueue(beamlineName))
for i in range (0,len(ql)):
if (ql[i]['priority'] > 0):
return 0
return 1
def getCoordsfromSampleID(beamline,sample_id):
"""
returns the container position within the dewar and position in
that container for a sample with the given id in one of the
containers in the container named by the global variable
'primaryDewarName'
"""
try:
primary_dewar_item_list = getPrimaryDewar(beamline)['content']
except IndexError as AttributeError:
raise ValueError('could not find container: "{0}"!'.format(primaryDewarName))
#john try:
# eliminate empty item_list slots
pdil_set = set(primary_dewar_item_list)
pdil_ssample_id = pdil_set.discard("")
# find container in the primary_dewar_item_list (pdil) which has the sample
filters = {'$and': [{'uid': {'$in':list(pdil_set)}}, {'content': {'$in':[sample_id]}}]}
c = getContainers(filters=filters)
# get the index of the found container in the primary dewar
i = primary_dewar_item_list.index(c[0]['uid'])
# get the index of the sample in the found container item_list
j = c[0]['content'].index(sample_id)
# get the container_id of the found container
puck_id = c[0]['uid']
return (i, j, puck_id)
def popNextRequest(beamlineName):
"""
this just gives you the next one, it doesn't
actually pop it off the stack
"""
orderedRequests = getOrderedRequestList(beamlineName)
try:
if (orderedRequests[0]["priority"] != 99999):
if orderedRequests[0]["priority"] > 0:
return orderedRequests[0]
else: #99999 priority means it's running, try next
if orderedRequests[1]["priority"] > 0:
return orderedRequests[1]
except IndexError:
pass
return {}
def getRequestObsolete(reqID): # need to get this from searching the dewar I guess
#skinner - no idea reqID = int(reqID)
"""
request_id: required, integer id
"""
r = getRequestByID(reqID)
return r
def updateRequest(request_dict):
"""
This is not recommended once results are recorded for a request!
Using a new request instead would keep the apparent history
complete and intuitive. Although it won't hurt anything if you've
also recorded the request params used inside the results and query
against that, making requests basically ephemerally useful objects.
"""
if 'uid' in request_dict:
r_uid = request_dict.pop('uid', '')
s_time = request_dict.pop('time', '')
r = request_ref.update({'uid':r_uid},request_dict)
request_dict["uid"] = r_uid
request_dict["time"] = s_time
def deleteRequest(r_id):
"""
reqObj should be a dictionary with a 'uid' field
and optionally a 'sample_uid' field.
"""
r = getRequestByID(r_id)
r['state'] = "inactive"
updateRequest(r)
def updateSample(sampleObj):
if 'uid' in sampleObj:
s_uid = sampleObj.pop('uid','')
s_time = sampleObj.pop('time','')
s = sample_ref.update({'uid': s_uid}, sampleObj)
def deleteSample(sample_uid):
s = getSampleByID(sample_uid)
s['state'] = "active"
updateSample(s)
def removePuckFromDewar(beamline,dewarPos):
dewar = getPrimaryDewar(beamline)
dewar['content'][dewarPos] = ''
updateContainer(dewar)
def updatePriority(request_id, priority):
r = getRequestByID(request_id)
r['priority'] = priority
updateRequest(r)
def getPriorityMap(beamlineName):
"""
returns a dictionary with priorities as keys and lists of requests
having those priorities as values
"""
priority_map = {}
for request in getQueue(beamlineName):
try:
priority_map[request['priority']].append(request)
except KeyError:
priority_map[request['priority']] = [request]
return priority_map
def getOrderedRequestList(beamlineName):
"""
returns a list of requests sorted by priority
"""
orderedRequestsList = []
priority_map = getPriorityMap(beamlineName)
for priority in sorted(six.iterkeys(priority_map), reverse=True):
orderedRequestsList += priority_map[priority]
#for request in priority_map[priority]:
# yield request
# or if we want this to be a generator could it be more efficient
# with itertools.chain?
# foo=['abc','def','ghi']
# [a for a in itertools.chain(*foo)]
# ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']
# or [a for a in itertools.chain.from_iterable(foo)]
return orderedRequestsList
def createBeamline(bl_name, bl_num): #createBeamline("fmx", "17id1")
data = {"key": "beamline", "name": bl_name, "number": bl_num}
uid = configuration_ref.create(beamline_id=bl_num, **data)
return uid
def beamlineInfo(beamline_id, info_name, info_dict=None):
"""
to write info: beamlineInfo('x25', 'det', info_dict={'vendor':'adsc','model':'q315r'})
to fetch info: info = beamlineInfo('x25', 'det')
"""
# if it exists it's a query or update
try:
bli = list(configuration_ref.find(key='beamline_info', beamline_id=beamline_id, info_name=info_name))[0] #hugo put the [0]
if info_dict is None: # this is a query
return bli['info']
# else it's an update
bli_uid = bli.pop('uid', '')
configuration_ref.update({'uid': bli_uid},{'info':info_dict})
# else it's a create
except conftrak.exceptions.ConfTrakNotFoundException:
# edge case for 1st create in fresh database
# in which case this as actually a query
if info_dict is None:
return {}
# normal create
data = {'key': 'beamline_info', 'info_name':info_name, 'info': info_dict}
uid = configuration_ref.create(beamline_id,**data)
def setBeamlineConfigParams(paramDict, searchParams):
# get current config
beamlineConfig = beamlineInfo(**searchParams)
# update with given param dict and last_modified
paramDict['last_modified'] = time.time()
beamlineConfig.update(paramDict)
# save
beamlineInfo(info_dict=beamlineConfig, **searchParams)
def setBeamlineConfigParam(beamline_id, paramName, paramVal):
beamlineInfo(beamline_id,paramName,{"val":paramVal})
def getBeamlineConfigParam(beamline_id, paramName):
return beamlineInfo(beamline_id,paramName)["val"]
def getAllBeamlineConfigParams(beamline_id):
g = configuration_ref.find(key='beamline_info', beamline_id=beamline_id)
configList = list(g)
return configList
def printAllBeamlineConfigParams(beamline_id):
configList = getAllBeamlineConfigParams(beamline_id)
for i in range (0,len(configList)):
try:
logger.info(configList[i]['info_name'] + " " + str(configList[i]['info']['val']))
except KeyError:
pass
def deleteCompletedRequestsforSample(sid):
return #short circuit, not what they wanted
logger.info("delete request " + sid)
requestList=getRequestsBySampleID(sid)
for i in range (0,len(requestList)):
if (requestList[i]["priority"] == -1): #good to clean up completed requests after unmount
if (requestList[i]["protocol"] == "raster" or requestList[i]["protocol"] == "vector"):
deleteRequest(requestList[i]['uid'])
|
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Sean Bell
# --------------------------------------------------------
# --------------------------------------------------------
# Reorganized and modified by Jianwei Yang and Jiasen Lu
# --------------------------------------------------------
import torch
import torch.nn as nn
import numpy as np
import numpy.random as npr
from model.utils.config import cfg
from generate_anchors import generate_anchors, generate_anchors_all_pyramids
from bbox_transform import clip_boxes, bbox_overlaps_batch, bbox_transform_batch
import pdb
DEBUG = False
class _AnchorTargetLayer_DECONV(nn.Module):
"""
Assign anchors to ground-truth targets. Produces anchor classification
labels and bounding-box regression targets.
"""
def __init__(self, feat_stride, scales, ratios):
super(_AnchorTargetLayer_DECONV, self).__init__()
self._anchor_ratios = ratios
self._feat_stride = feat_stride
self._fpn_scales = np.array(cfg.FPN_ANCHOR_SCALES)
self._fpn_feature_strides = np.array(cfg.FPN_FEAT_STRIDES)
self._fpn_anchor_stride = cfg.FPN_ANCHOR_STRIDE
# allow boxes to sit over the edge by a small amount
self._allowed_border = 0 # default is 0
def forward(self, input):
# Algorithm:
#
# for each (H, W) location i
# generate 9 anchor boxes centered on cell i
# apply predicted bbox deltas at cell i to each of the 9 anchors
# filter out-of-image anchors
scores = input[0]
gt_boxes = input[1]
im_info = input[2]
num_boxes = input[3]
feat_shapes = input[4]
# NOTE: need to change
# height, width = scores.size(2), scores.size(3)
height, width = 0, 0
batch_size = gt_boxes.size(0)
anchors = torch.from_numpy(generate_anchors_all_pyramids(self._fpn_scales, self._anchor_ratios,
feat_shapes, self._fpn_feature_strides, self._fpn_anchor_stride)).type_as(scores)
total_anchors = anchors.size(0)
keep = ((anchors[:, 0] >= -self._allowed_border) &
(anchors[:, 1] >= -self._allowed_border) &
(anchors[:, 2] < long(im_info[0][1]) + self._allowed_border) &
(anchors[:, 3] < long(im_info[0][0]) + self._allowed_border))
inds_inside = torch.nonzero(keep).view(-1)
# keep only inside anchors
anchors = anchors[inds_inside, :]
# label: 1 is positive, 0 is negative, -1 is dont care
labels = gt_boxes.new(batch_size, inds_inside.size(0)).fill_(-1)
bbox_inside_weights = gt_boxes.new(batch_size, inds_inside.size(0)).zero_()
bbox_outside_weights = gt_boxes.new(batch_size, inds_inside.size(0)).zero_()
overlaps = bbox_overlaps_batch(anchors, gt_boxes)
max_overlaps, argmax_overlaps = torch.max(overlaps, 2)
gt_max_overlaps, _ = torch.max(overlaps, 1)
if not cfg.TRAIN.RPN_CLOBBER_POSITIVES:
labels[max_overlaps < cfg.TRAIN.RPN_NEGATIVE_OVERLAP] = 0
gt_max_overlaps[gt_max_overlaps==0] = 1e-5
keep = torch.sum(overlaps.eq(gt_max_overlaps.view(batch_size,1,-1).expand_as(overlaps)), 2)
if torch.sum(keep) > 0:
labels[keep>0] = 1
# fg label: above threshold IOU
labels[max_overlaps >= cfg.TRAIN.RPN_POSITIVE_OVERLAP] = 1
if cfg.TRAIN.RPN_CLOBBER_POSITIVES:
labels[max_overlaps < cfg.TRAIN.RPN_NEGATIVE_OVERLAP] = 0
num_fg = int(cfg.TRAIN.RPN_FG_FRACTION * cfg.TRAIN.RPN_BATCHSIZE)
sum_fg = torch.sum((labels == 1).int(), 1)
sum_bg = torch.sum((labels == 0).int(), 1)
for i in range(batch_size):
# subsample positive labels if we have too many
if sum_fg[i] > num_fg:
fg_inds = torch.nonzero(labels[i] == 1).view(-1)
# torch.randperm seems has a bug on multi-gpu setting that cause the segfault.
# See https://github.com/pytorch/pytorch/issues/1868 for more details.
# use numpy instead.
#rand_num = torch.randperm(fg_inds.size(0)).type_as(gt_boxes).long()
rand_num = torch.from_numpy(np.random.permutation(fg_inds.size(0))).type_as(gt_boxes).long()
disable_inds = fg_inds[rand_num[:fg_inds.size(0)-num_fg]]
labels[i][disable_inds] = -1
num_bg = cfg.TRAIN.RPN_BATCHSIZE - sum_fg[i]
# subsample negative labels if we have too many
if sum_bg[i] > num_bg:
bg_inds = torch.nonzero(labels[i] == 0).view(-1)
#rand_num = torch.randperm(bg_inds.size(0)).type_as(gt_boxes).long()
rand_num = torch.from_numpy(np.random.permutation(bg_inds.size(0))).type_as(gt_boxes).long()
disable_inds = bg_inds[rand_num[:bg_inds.size(0)-num_bg]]
labels[i][disable_inds] = -1
offset = torch.arange(0, batch_size)*gt_boxes.size(1)
argmax_overlaps = argmax_overlaps + offset.view(batch_size, 1).type_as(argmax_overlaps)
bbox_targets = _compute_targets_batch(anchors, gt_boxes.view(-1,5)[argmax_overlaps.view(-1), :].view(batch_size, -1, 5))
# use a single value instead of 4 values for easy index.
bbox_inside_weights[labels==1] = cfg.TRAIN.RPN_BBOX_INSIDE_WEIGHTS[0]
if cfg.TRAIN.RPN_POSITIVE_WEIGHT < 0:
num_examples = torch.sum(labels[i] >= 0)
positive_weights = 1.0 / num_examples.item()
#positive_weights = 1.0 / num_examples
#negative_weights = 1.0 / num_examples
negative_weights = 1.0 / num_examples.item()
else:
assert ((cfg.TRAIN.RPN_POSITIVE_WEIGHT > 0) &
(cfg.TRAIN.RPN_POSITIVE_WEIGHT < 1))
bbox_outside_weights[labels == 1] = positive_weights
bbox_outside_weights[labels == 0] = negative_weights
labels = _unmap(labels, total_anchors, inds_inside, batch_size, fill=-1)
bbox_targets = _unmap(bbox_targets, total_anchors, inds_inside, batch_size, fill=0)
bbox_inside_weights = _unmap(bbox_inside_weights, total_anchors, inds_inside, batch_size, fill=0)
bbox_outside_weights = _unmap(bbox_outside_weights, total_anchors, inds_inside, batch_size, fill=0)
outputs = []
# labels = labels.view(batch_size, height, width, A).permute(0,3,1,2).contiguous()
# labels = labels.view(batch_size, 1, A * height, width)
outputs.append(labels)
# bbox_targets = bbox_targets.view(batch_size, height, width, A*4).permute(0,3,1,2).contiguous()
outputs.append(bbox_targets)
# anchors_count = bbox_inside_weights.size(1)
# bbox_inside_weights = bbox_inside_weights.view(batch_size,anchors_count,1).expand(batch_size, anchors_count, 4)
# bbox_inside_weights = bbox_inside_weights.contiguous().view(batch_size, height, width, 4*A)\
# .permute(0,3,1,2).contiguous()
outputs.append(bbox_inside_weights)
# bbox_outside_weights = bbox_outside_weights.view(batch_size,anchors_count,1).expand(batch_size, anchors_count, 4)
# bbox_outside_weights = bbox_outside_weights.contiguous().view(batch_size, height, width, 4*A)\
# .permute(0,3,1,2).contiguous()
outputs.append(bbox_outside_weights)
return outputs
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
def _unmap(data, count, inds, batch_size, fill=0):
""" Unmap a subset of item (data) back to the original set of items (of
size count) """
if data.dim() == 2:
ret = torch.Tensor(batch_size, count).fill_(fill).type_as(data)
ret[:, inds] = data
else:
ret = torch.Tensor(batch_size, count, data.size(2)).fill_(fill).type_as(data)
ret[:, inds,:] = data
return ret
def _compute_targets_batch(ex_rois, gt_rois):
"""Compute bounding-box regression targets for an image."""
return bbox_transform_batch(ex_rois, gt_rois[:, :, :4])
|
"""
A number of defitions get re-used frequently; this module attempts to centralize and deduplicate them (a little; some of this still duplicates from Doxygen's source).
Not in love with how this works...
"""
from collections import namedtuple
from . import exceptions
from . import loggle
constants = namedtuple("constants", ("compound_kinds", "member_kinds", "relations"))
c = constants(
compound_kinds={
"category",
"class",
"dir",
"enum",
"example",
"exception",
"file",
"group",
"interface",
"library",
"module",
"namespace",
"package",
"page",
"protocol",
"service",
"singleton",
"struct",
"type",
"union",
"unknown",
"",
},
member_kinds={
"macro definition",
"function",
"variable",
"typedef",
"enumeration",
"enumvalue",
"signal",
"slot",
"friend",
"dcop",
"property",
"event",
"interface",
"service",
},
relations={
"reimplemented",
"reimplements",
"outercompounds",
"innercompounds",
"outerpages",
"innerpages",
"outerdirs",
"innerdirs",
"outerfiles",
"innerfiles",
"outerclasses",
"innerclasses",
"outernamespaces",
"innernamespaces",
"outergroups",
"innergroups",
"members",
"compounds",
"subclasses",
"superclasses",
"links_in",
"links_out",
"argument_links_in",
"argument_links_out",
"initializer_links_in",
"initializer_links_out",
},
)
class Defs(object):
"""Scaffold for type-specific singletons."""
defs = None
template = None
def __init__(self):
self.defs = {}
self.extra_setup()
def extra_setup(self):
pass
def on_missing(self, name, exception):
raise NotImplementedError()
def get(self, name):
try:
return self.defs[name]
except KeyError as e:
self.on_missing(name, e)
def names(self):
return self.defs.keys()
def define(self, name, *arg):
self.defs[name] = self.template(name, *arg)
class Types(Defs):
"""
Scaffold for pre-defining our record types.
The core purpose is defining namedtuples that will wrap rows returned from different sqlite3 queries.
However, for consistency and clarity, we also use this same process to define a few types that this module uses to wrap its own returnables, including 'manuals', 'sections', and 'searches'.
"""
cursor_type_cache = None
def extra_setup(self):
self.cursor_type_cache = {}
def on_missing(self, name, exception):
raise exceptions.RequiredTypeMissing("Required type not defined") from exception
def cols(self, name):
return self.get(name)._fields
def define(self, name, fields):
tupledef = []
for column in fields:
# This is the sqlite3/dbapi column descriptor format; we duplicate it so that we can use any given cursor's descriptor as a cache key.
tupledef.append((column, None, None, None, None, None, None))
typedef = namedtuple(name, fields)
self.defs[tuple(tupledef)] = typedef
self.defs[name] = typedef
def _implicit(self, fields):
typedef = namedtuple("_implicit", (x[0] for x in fields))
self.defs[fields] = typedef
return typedef
def row_factory(self):
def namedtuple_factory(cursor, row):
"""Returns sqlite rows as named tuples."""
if cursor not in self.cursor_type_cache:
try:
self.cursor_type_cache[cursor] = self.defs[cursor.description]
except KeyError:
loggle.info(
"No pre-defined type found; generating implicit type for",
cursor,
row,
)
self.cursor_type_cache[cursor] = self._implicit(cursor.description)
return self.cursor_type_cache[cursor](*row)
return namedtuple_factory
def default_types():
compound_cols = (
"rowid",
"kind",
"name",
"title",
"file_id",
"briefdescription",
"detaileddescription",
)
member_cols = (
"rowid",
"name",
"kind",
"definition",
"type",
"argsstring",
"scope",
"inline",
"bodystart",
"bodyend",
"bodyfile_id",
"line",
"detaileddescription",
"briefdescription",
"inbodydescription",
)
types = Types()
types.define(
"metadata",
(
"doxygen_version",
"schema_version",
"generated_at",
"generated_on",
"project_name",
"project_number",
"project_brief",
),
)
types.define("stub", ("rowid", "refid", "kind", "name", "summary"))
types.define("compound", compound_cols)
types.define("compound_rel", compound_cols + ("relations",))
types.define("member", member_cols)
types.define("member_rel", member_cols + ("relations",))
types.define("section", ("summary", "children", "type", "root"))
types.define("manual", ("root", "documents", "sections", "meta"))
types.define("search", ("results",))
# I want to limit noise here to types a consumer might want to leverage, so the system will implicitly create some internal-only types (like _relations and _distinct kinds) on first use.
return types
class RelationAtoms(Defs):
"""
Scaffold for pre-defining our relation atoms.
Relation atoms are a way to help DRY up some code for handling a common Doxygen relationship-table pattern. A relation atom's tuple follows this format: (
table_name,
outside rowid prefix,
inside rowid prefix,
)
The rowid foreign-key columns follow a format like <name>_rowid, so we just specify the unique part.
This discussion continues at the Relations class, and goes into more detail at View._build_relation.
"""
template = namedtuple(
"relation_atom", ("name", "table", "parent_col_prefix", "child_col_prefix")
)
def on_missing(self, name, exception):
raise exceptions.RequiredRelationAtomMissing(
"Required relation atom '{}' not defined".format(name)
) from exception
def default_atoms():
atoms = RelationAtoms()
atoms.define("references", "inline_xrefs", "dst", "src")
atoms.define("argument_references", "argument_xrefs", "dst", "src")
atoms.define("initializer_references", "initializer_xrefs", "dst", "src")
atoms.define("compounds", "contains", "outer", "inner")
atoms.define("members", "member", "scope", "memberdef")
atoms.define("inherits", "compoundref", "base", "derived")
atoms.define("reimplementing", "reimplements", "memberdef", "reimplemented")
return atoms
class Relations(Defs):
"""
Scaffold for pre-defining our relations.
Relations build on the relation atoms. The tuple format specifies: (
the name of this relation on the originating object,
child|parent (i.e., whether this relation points to the origin record's child, or parent),
the relation atom to build from
)
Relation definitions come in parent/child pairs.
For an example, let's say we've got two methods a::example() and b::example(), where b::example is a re-implementation of a::example.
a::example will have a 'reimplemented' relation TOWARDS the parent/re-implementing b::example.
b::example will have a 'reimplements' relation TOWARDS the child/re-implemented a::example.
This discussion goes into more detail at View._build_relation.
"""
defs = None
template = namedtuple("relation", ("name", "direction", "atom", "kinds"))
def __init__(self):
self.defs = {}
def on_missing(self, name, exception):
raise exceptions.RequiredRelationMissing(
"Required relation '{}' not defined".format(name)
) from exception
def default_relations():
rels = Relations()
# core Doxygen rels
rels.define("reimplemented", "parent", "reimplementing", None)
rels.define("reimplements", "child", "reimplementing", None)
rels.define("outercompounds", "parent", "compounds", None)
rels.define("innercompounds", "child", "compounds", None)
rels.define("outerpages", "parent", "compounds", ("page",))
rels.define("innerpages", "child", "compounds", ("page",))
rels.define("outerdirs", "parent", "compounds", ("dir",))
rels.define("innerdirs", "child", "compounds", ("dir",))
rels.define("outerfiles", "parent", "compounds", ("file",))
rels.define("innerfiles", "child", "compounds", ("file",))
rels.define(
"outerclasses",
"parent",
"compounds",
(
"category",
"class",
"enum",
"exception",
"interface",
"module",
"protocol",
"service",
"singleton",
"struct",
"type",
"union",
),
)
rels.define(
"innerclasses",
"child",
"compounds",
(
"category",
"class",
"enum",
"exception",
"interface",
"module",
"protocol",
"service",
"singleton",
"struct",
"type",
"union",
),
)
rels.define("outernamespaces", "parent", "compounds", ("namespace",))
rels.define("innernamespaces", "child", "compounds", ("namespace",))
rels.define("outergroups", "parent", "compounds", ("group",))
rels.define("innergroups", "child", "compounds", ("group",))
rels.define("members", "child", "members", None)
rels.define("compounds", "parent", "members", None)
rels.define("subclasses", "child", "inherits", None)
rels.define("superclasses", "parent", "inherits", None)
rels.define("links_in", "child", "references", None)
rels.define("links_out", "parent", "references", None)
rels.define("argument_links_in", "child", "argument_references", None)
rels.define("argument_links_out", "parent", "argument_references", None)
rels.define("initializer_links_in", "child", "initializer_references", None)
rels.define("initializer_links_out", "parent", "initializer_references", None)
# Additional rels for common tasks
rels.define("subpages", "child", "compounds", ("page",))
rels.define("methods", "child", "members", ("function",))
rels.define("properties", "child", "members", ("variable",))
return rels
defaults = dict(
type_factory=default_types,
atom_factory=default_atoms,
relation_factory=default_relations,
)
|
from simple_salesforce import Salesforce
from dotenv import load_dotenv
import os
import time
import random
BASE_DIR='./'
load_dotenv(os.path.join(BASE_DIR, '.env.iotxporg'))
USERNAME=os.getenv('USERNAME')
PASSWORD=os.getenv('PASSWORD')
SECURITY_TOKEN=os.getenv('SECURITY_TOKEN')
print("uname %s pw %s token %s" % (USERNAME, PASSWORD, SECURITY_TOKEN))
sf = Salesforce(username=USERNAME, password=PASSWORD, security_token=SECURITY_TOKEN)
print(sf);
def read_temp():
temp_c = 27.0 + (25*random.random())
temp_f = temp_c * 9.0 / 5.0 + 32.0
return temp_c
while True:
print(read_temp())
data = [{'serial_no__c': '1001','door_open__c': 'false','temp__c':read_temp()}]
print("Data sent: ")
for x in data: print("data item: ", x)
sf.Refrigerator_Event__e.create(data[0])
time.sleep(15)
print("Platform Event Sent: " )
|
from typing import Union
import os
try:
import objc
import Foundation
# This import is required for NSImage
import AppKit # noqa: 5401
except ImportError:
raise Exception(
"""To use native notifications, you need to install the following dependencies:
- pyobjc-core
- pyobjc-framework-NotificationCenter
- pyobjc-framework-UserNotifications
- pyobjc-framework-UserNotificationsUI
If you are unable to do so, import and use ApplescriptNotification instead:
from aquaui.notification.fallback_notification import ApplescriptNotification
"""
)
from .fallback_notification import ApplescriptNotification
NSUserNotification = objc.lookUpClass("NSUserNotification") # type: ignore
NSUserNotificationCenter = objc.lookUpClass("NSUserNotificationCenter") # type: ignore
NSUrl = objc.lookUpClass("NSURL") # type: ignore
NSImage = objc.lookUpClass("NSImage") # type: ignore
class Notification:
"""Show a notification with a title, subtitle, info text, image, delay, and sound"""
def __init__(self, text: Union[str, None] = None) -> None:
"""
info text is the third (last) line
"""
self.notification = NSUserNotification.alloc().init()
if text is not None:
self.notification.setInformativeText_(text)
self.text = text
def with_subtitle(self, subtitle: str):
"""
subtitle is in the second line
"""
self.notification.setSubtitle_(subtitle)
self.subtitle = subtitle
return self
def with_title(self, title: str):
"""
title is the large text at the top of the notification, not required
"""
self.notification.setTitle_(title)
self.title = title
return self
def _create_image(self, image_path: str):
"""Create an image for identity of content image"""
path = f"file:{os.getcwd()}/{image_path}"
url = NSUrl.alloc().initWithString_(path)
image = NSImage.alloc().initWithContentsOfURL_(url)
return image
def with_identity_image(self, identity_image_path: Union[str, None] = None):
"""Image on the right side of the notification"""
if identity_image_path is not None:
image = self._create_image(identity_image_path)
self.notification.set_identityImage_(image)
return self
def _with_content_image(self, content_image_path: Union[str, None] = None):
"""Image on the left side of the notification, but does not seem to be working on Big Sur"""
if content_image_path is not None:
image = self._create_image(content_image_path)
self.notification.setContentImage_(image)
return self
def with_delay(self, delay: int = 0):
"""The delay in second between .send() and the notification being shown"""
self.notification.setDeliveryDate_(
Foundation.NSDate.dateWithTimeInterval_sinceDate_(delay, Foundation.NSDate.date()) # type: ignore
)
return self
def send(self) -> Union[None, str]:
try:
NSUserNotificationCenter.defaultUserNotificationCenter().scheduleNotification_(self.notification)
except:
return ApplescriptNotification(self.text).with_subtitle(self.subtitle).with_title(self.title).send()
|
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.colors import Normalize
import numpy as np
import keras
from IPython.display import clear_output
import matplotlib as mpl
#plot function for sample images
def plot_tile(samples):
num_samples, x_dim, y_dim, _ = samples.shape
axes = (np.round(np.sqrt(num_samples))).astype(int)
fig = plt.figure(figsize=(axes, axes))
gs = gridspec.GridSpec(axes, axes)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_aspect('equal')
plt.imshow(sample, cmap=plt.get_cmap('viridis'), aspect='auto')
#visualize the generated signals (for training dataset)
def plot_signals(y_reg_train, labels):
fig, ax = plt.subplots(1,1, figsize = (16, 7))
my_cmap = cm.get_cmap('jet')
my_norm = Normalize(vmin=0, vmax=9)
cs = my_cmap(my_norm(labels))
for j in range(10):
plt.subplot(2, 5, j+1)
for i in range(500):
if (labels[i] == j):
plt.plot(y_reg_train[i, :], c=cs[i], alpha=0.5)
plt.ylim([0, 1])
plt.title('digit '+str(j))
return fig
#function to view training and validation losses
class PlotLosses(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.i = 0
self.x = []
self.losses = []
self.val_losses = []
self.fig = plt.figure()
self.logs = []
def on_epoch_end(self, epoch, logs={}):
self.logs.append(logs)
self.x.append(self.i)
self.losses.append(logs.get('loss'))
self.val_losses.append(logs.get('val_loss'))
self.i += 1
clear_output(wait=True)
plt.plot(self.x, self.losses, label="loss", c = 'green')
plt.plot(self.x, self.val_losses, label="val_loss", c = 'red')
plt.legend()
plt.show()
#function to view multiple losses
def plotAllLosses(loss1, loss2):
N, m1f = loss1.shape
_, m2f = loss2.shape
print(loss1.shape)
print(loss2.shape)
fig = plt.figure(figsize=(6, 12))
plt.subplot(2, 1, 1)
plt.plot(loss1[:, 0], label='loss1_check1')
plt.plot(loss1[:, 1], label='loss1_check2')
plt.plot(loss1[:, 2], label='loss1_check3')
plt.plot(loss1[:, 3], label='loss1_check4')
plt.plot(loss1[:, 4], label='loss1_check3')
plt.plot(loss1[:, 5], label='loss1_check4')
plt.legend()
plt.subplot(2, 1, 2)
plt.plot(loss2[:, 0], label='loss2_check1')
plt.plot(loss2[:, 1], label='loss2_check2')
plt.legend()
return fig
def normalize(arr):
arr_min = np.min(arr)
return (arr-arr_min)/(np.max(arr)-arr_min)
def plot_cube(cube, angle=320):
'''cube must be 3d
'''
cube = normalize(cube)
cube = np.expand_dims(cube, axis=-1) #28x28x1
facecolors = cm.GnBu(cube) #28x28x4
filled = np.ones(cube.shape) #28x28x1
x, y, z = np.indices(np.array(cube.shape)+1) #29x29x2 for each axis
fig = plt.figure(figsize=[6, 6])
ax = fig.gca(projection='3d')
ax.view_init(30, angle)
ax.set_axis_off()
ax.set_box_aspect((cube.shape[0], cube.shape[1], 20))
ax.voxels(x, y, z, filled = filled, facecolors=facecolors, linewidth=0.0001)
plt.show()
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
import numpy as np
from fairseq.data import data_utils
class WordNoising(object):
"""Generate a noisy version of a sentence, without changing words themselves."""
def __init__(self, dictionary, bpe_cont_marker="@@", bpe_end_marker=None):
self.dictionary = dictionary
self.bpe_end = None
if bpe_cont_marker:
self.bpe_end = np.array([
not self.dictionary[i].endswith(bpe_cont_marker)
for i in range(len(self.dictionary))
])
elif bpe_end_marker:
self.bpe_end = np.array([
self.dictionary[i].endswith(bpe_end_marker)
for i in range(len(self.dictionary))
])
self.get_word_idx = (
self._get_bpe_word_idx
if self.bpe_end is not None
else self._get_token_idx
)
def noising(self, x, lengths, noising_prob=0.0):
raise NotImplementedError()
def _get_bpe_word_idx(self, x):
"""
Given a list of BPE tokens, for every index in the tokens list,
return the index of the word grouping that it belongs to.
For example, for input x corresponding to ["how", "are", "y@@", "ou"],
return [[0], [1], [2], [2]].
"""
# x: (T x B)
bpe_end = self.bpe_end[x]
if (x.size(0) == 1 and x.size(1) == 1):
# Special case when we only have one word in x. If x = [[N]],
# bpe_end is a scalar (bool) instead of a 2-dim array of bools,
# which makes the sum operation below fail.
return np.array([[0]])
# do a reduce front sum to generate word ids
word_idx = bpe_end[::-1].cumsum(0)[::-1]
word_idx = word_idx.max(0)[None, :] - word_idx
return word_idx
def _get_token_idx(self, x):
"""
This is to extend noising functions to be able to apply to non-bpe
tokens, e.g. word or characters.
"""
x = torch.t(x)
word_idx = np.array([range(len(x_i)) for x_i in x])
return np.transpose(word_idx)
class WordDropout(WordNoising):
"""Randomly drop input words. If not passing blank_idx (default is None),
then dropped words will be removed. Otherwise, it will be replaced by the
blank_idx."""
def __init__(self, dictionary, bpe_cont_marker="@@", bpe_end_marker=None):
super().__init__(dictionary, bpe_cont_marker, bpe_end_marker)
def noising(self, x, lengths, dropout_prob=0.1, blank_idx=None):
# x: (T x B), lengths: B
if dropout_prob == 0:
return x, lengths
assert 0 < dropout_prob < 1
# be sure to drop entire words
word_idx = self.get_word_idx(x)
sentences = []
modified_lengths = []
for i in range(lengths.size(0)):
# Since dropout probabilities need to apply over non-pad tokens,
# it is not trivial to generate the keep mask without consider
# input lengths; otherwise, this could be done outside the loop
# We want to drop whole words based on word_idx grouping
num_words = max(word_idx[:, i]) + 1
# ith example: [x0, x1, ..., eos, pad, ..., pad]
# We should only generate keep probs for non-EOS tokens. Thus if the
# input sentence ends in EOS, the last word idx is not included in
# the dropout mask generation and we append True to always keep EOS.
# Otherwise, just generate the dropout mask for all word idx
# positions.
has_eos = x[lengths[i] - 1, i] == self.dictionary.eos()
if has_eos: # has eos?
keep = np.random.rand(num_words - 1) >= dropout_prob
keep = np.append(keep, [True]) # keep EOS symbol
else:
keep = np.random.rand(num_words) >= dropout_prob
words = x[:lengths[i], i].tolist()
# TODO: speed up the following loop
# drop words from the input according to keep
new_s = [
w if keep[word_idx[j, i]] else blank_idx
for j, w in enumerate(words)
]
new_s = [w for w in new_s if w is not None]
# we need to have at least one word in the sentence (more than the
# start / end sentence symbols)
if len(new_s) <= 1:
# insert at beginning in case the only token left is EOS
# EOS should be at end of list.
new_s.insert(0, words[np.random.randint(0, len(words))])
assert len(new_s) >= 1 and (
not has_eos # Either don't have EOS at end or last token is EOS
or (len(new_s) >= 2 and new_s[-1] == self.dictionary.eos())
), "New sentence is invalid."
sentences.append(new_s)
modified_lengths.append(len(new_s))
# re-construct input
modified_lengths = torch.LongTensor(modified_lengths)
modified_x = torch.LongTensor(
modified_lengths.max(),
modified_lengths.size(0)
).fill_(self.dictionary.pad())
for i in range(modified_lengths.size(0)):
modified_x[:modified_lengths[i], i].copy_(torch.LongTensor(sentences[i]))
return modified_x, modified_lengths
class WordShuffle(WordNoising):
"""Shuffle words by no more than k positions."""
def __init__(self, dictionary, bpe_cont_marker="@@", bpe_end_marker=None):
super().__init__(dictionary, bpe_cont_marker, bpe_end_marker)
def noising(self, x, lengths, max_shuffle_distance=3):
# x: (T x B), lengths: B
if max_shuffle_distance == 0:
return x, lengths
# max_shuffle_distance < 1 will return the same sequence
assert max_shuffle_distance > 1
# define noise word scores
noise = np.random.uniform(
0,
max_shuffle_distance,
size=(x.size(0), x.size(1)),
)
noise[0] = -1 # do not move start sentence symbol
# be sure to shuffle entire words
word_idx = self.get_word_idx(x)
x2 = x.clone()
for i in range(lengths.size(0)):
length_no_eos = lengths[i]
if x[lengths[i] - 1, i] == self.dictionary.eos():
length_no_eos = lengths[i] - 1
# generate a random permutation
scores = word_idx[:length_no_eos, i] + noise[word_idx[:length_no_eos, i], i]
# ensure no reordering inside a word
scores += 1e-6 * np.arange(length_no_eos)
permutation = scores.argsort()
# shuffle words
x2[:length_no_eos, i].copy_(
x2[:length_no_eos, i][torch.from_numpy(permutation)]
)
return x2, lengths
class UnsupervisedMTNoising(WordNoising):
"""
Implements the default configuration for noising in UnsupervisedMT
(github.com/facebookresearch/UnsupervisedMT)
"""
def __init__(
self,
dictionary,
max_word_shuffle_distance,
word_dropout_prob,
word_blanking_prob,
bpe_cont_marker="@@",
bpe_end_marker=None,
):
super().__init__(dictionary)
self.max_word_shuffle_distance = max_word_shuffle_distance
self.word_dropout_prob = word_dropout_prob
self.word_blanking_prob = word_blanking_prob
self.word_dropout = WordDropout(
dictionary=dictionary,
bpe_cont_marker=bpe_cont_marker,
bpe_end_marker=bpe_end_marker,
)
self.word_shuffle = WordShuffle(
dictionary=dictionary,
bpe_cont_marker=bpe_cont_marker,
bpe_end_marker=bpe_end_marker,
)
def noising(self, x, lengths):
# 1. Word Shuffle
noisy_src_tokens, noisy_src_lengths = self.word_shuffle.noising(
x=x,
lengths=lengths,
max_shuffle_distance=self.max_word_shuffle_distance,
)
# 2. Word Dropout
noisy_src_tokens, noisy_src_lengths = self.word_dropout.noising(
x=noisy_src_tokens,
lengths=noisy_src_lengths,
dropout_prob=self.word_dropout_prob,
)
# 3. Word Blanking
noisy_src_tokens, noisy_src_lengths = self.word_dropout.noising(
x=noisy_src_tokens,
lengths=noisy_src_lengths,
dropout_prob=self.word_blanking_prob,
blank_idx=self.dictionary.unk(),
)
return noisy_src_tokens
class NoisingDataset(torch.utils.data.Dataset):
def __init__(
self,
src_dataset,
src_dict,
seed,
noiser=None,
noising_class=UnsupervisedMTNoising,
**kwargs,
):
"""
Sets up a noising dataset which takes a src batch, generates
a noisy src using a noising config, and returns the
corresponding {noisy src, original src} batch
Args:
src_dataset: dataset which will be used to build self.src_dataset --
a LanguagePairDataset with src dataset as the source dataset and
None as the target dataset. Should NOT have padding so that
src_lengths are accurately calculated by language_pair_dataset
collate function.
We use language_pair_dataset here to encapsulate the tgt_dataset
so we can re-use the LanguagePairDataset collater to format the
batches in the structure that SequenceGenerator expects.
src_dict: src dict
src_dict: src dictionary
seed: seed to use when generating random noise
noiser: a pre-initialized noiser. If this is None, a noiser will
be created using noising_class and kwargs.
noising_class: class to use when initializing noiser
kwargs: noising args for configuring noising to apply
Note that there is no equivalent argparse code for these args
anywhere in our top level train scripts yet. Integration is
still in progress. You can still, however, test out this dataset
functionality with the appropriate args as in the corresponding
unittest: test_noising_dataset.
"""
self.src_dataset = src_dataset
self.src_dict = src_dict
self.noiser = noiser if noiser is not None else noising_class(
dictionary=src_dict, **kwargs,
)
self.seed = seed
def __getitem__(self, index):
"""
Returns a single noisy sample. Multiple samples are fed to the collater
create a noising dataset batch.
"""
src_tokens = self.src_dataset[index]
src_lengths = torch.LongTensor([len(src_tokens)])
src_tokens = src_tokens.unsqueeze(0)
# Transpose src tokens to fit expected shape of x in noising function
# (batch size, sequence length) -> (sequence length, batch size)
src_tokens_t = torch.t(src_tokens)
with data_utils.numpy_seed(self.seed + index):
noisy_src_tokens = self.noiser.noising(src_tokens_t, src_lengths)
# Transpose back to expected src_tokens format
# (sequence length, 1) -> (1, sequence length)
noisy_src_tokens = torch.t(noisy_src_tokens)
return noisy_src_tokens[0]
def __len__(self):
"""
The length of the noising dataset is the length of src.
"""
return len(self.src_dataset)
|
import torch.utils.data
import os
from PIL import Image
import numpy as np
class SearchDataset(torch.utils.data.Dataset):
def __init__(
self,
root_dir=os.path.join(os.path.dirname(__file__), "data/train"),
transform=None,
):
self.transform = transform
# Implement additional initialization logic if needed
self.root_dir = root_dir
self.samples = []
for i in os.listdir(root_dir):
if i in ["positive", "negative"]:
folder = os.path.join(root_dir, i)
target = folder.split("/")[-1]
for label in os.listdir(folder):
filepath = os.path.join(folder, label)
self.samples.append((target, filepath))
def __len__(self):
# Replace `...` with the actual implementation
return len(self.samples)
def __getitem__(self, index):
# Implement logic to get an image and its label using the received index.
#
# `image` should be a NumPy array with the shape [height, width, num_channels].
# If an image contains three color channels, it should use an RGB color scheme.
#
# `label` should be an integer in the range [0, model.num_classes - 1] where `model.num_classes`
# is a value set in the `search.yaml` file.
# get the filepath of the image based on the index and converts it to
# only RGB channels and then into a numpy array
image = np.array(Image.open(self.samples[index][1]).convert("RGB"))
# maps a label to an integer value
label_to_int = {"positive": 1, "negative": 0}
label = label_to_int[self.samples[index][0]]
if self.transform is not None:
transformed = self.transform(image=image)
image = transformed["image"]
return image, label
# print(os.path.join(os.path.dirname(__file__), "data/train"))
|
"""Latin scansion app."""
import functools
import unicodedata
import flask
import wtforms # type: ignore
import yaml
import latin_scansion
import pynini
CONFIG = "config.yaml"
## Startup.
# Creates app object.
app = flask.Flask(__name__)
# Loads configs.
with open(CONFIG, "r") as source:
app.config.update(yaml.safe_load(source))
## Forms.
class ScansionForm(wtforms.Form):
string = wtforms.StringField(
"string", [wtforms.validators.Length(min=1, max=32768)]
)
show_text = wtforms.BooleanField("show_text")
show_norm = wtforms.BooleanField("show_norm")
show_raw_pron = wtforms.BooleanField("show_raw_pron")
show_var_pron = wtforms.BooleanField("show_var_pron")
show_feet = wtforms.BooleanField("show_feet")
show_syllables = wtforms.BooleanField("show_syllables")
## Curries functions.
with pynini.Far(app.config["far_path"], "r") as far:
scan_document = functools.partial(
latin_scansion.scan_document,
far["NORMALIZE"],
far["PRONOUNCE"],
far["VARIABLE"],
far["SYLLABLE"],
far["WEIGHT"],
far["HEXAMETER"],
)
## Routes.
@app.route("/")
def index() -> str:
form = ScansionForm() # noqa: F841
return flask.render_template("index.html")
@app.route("/result.html", methods=["POST"])
def result() -> str:
form = ScansionForm(flask.request.form)
if form.validate():
lines = unicodedata.normalize(
"NFC", form.string.data.strip()
).splitlines()
return flask.render_template(
"result.html",
document=scan_document(lines, "<webapp input>"),
# TODO: Is there a way to make these auto-convert to bool
# in the form specification?
show_text=bool(form.show_text.data),
show_norm=bool(form.show_norm.data),
show_raw_pron=bool(form.show_raw_pron.data),
show_var_pron=bool(form.show_var_pron.data),
show_feet=bool(form.show_feet.data),
show_syllables=bool(form.show_syllables.data),
)
return "<p>Form validation failed.</p>"
if __name__ == "__main__":
app.run()
|
"""
Ory Kratos API
Documentation for all public and administrative Ory Kratos APIs. Public and administrative APIs are exposed on different ports. Public APIs can face the public internet without any protection while administrative APIs should never be exposed without prior authorization. To protect the administative API port you should use something like Nginx, Ory Oathkeeper, or any other technology capable of authorizing incoming requests. # noqa: E501
The version of the OpenAPI document: v0.10.1
Contact: hi@ory.sh
Generated by: https://openapi-generator.tech
"""
import unittest
import ory_kratos_client
from ory_kratos_client.api.v0alpha2_api import V0alpha2Api # noqa: E501
class TestV0alpha2Api(unittest.TestCase):
"""V0alpha2Api unit test stubs"""
def setUp(self):
self.api = V0alpha2Api() # noqa: E501
def tearDown(self):
pass
def test_admin_create_identity(self):
"""Test case for admin_create_identity
Create an Identity # noqa: E501
"""
pass
def test_admin_create_self_service_recovery_link(self):
"""Test case for admin_create_self_service_recovery_link
Create a Recovery Link # noqa: E501
"""
pass
def test_admin_delete_identity(self):
"""Test case for admin_delete_identity
Delete an Identity # noqa: E501
"""
pass
def test_admin_delete_identity_sessions(self):
"""Test case for admin_delete_identity_sessions
Calling this endpoint irrecoverably and permanently deletes and invalidates all sessions that belong to the given Identity. # noqa: E501
"""
pass
def test_admin_extend_session(self):
"""Test case for admin_extend_session
Calling this endpoint extends the given session ID. If `session.earliest_possible_extend` is set it will only extend the session after the specified time has passed. # noqa: E501
"""
pass
def test_admin_get_identity(self):
"""Test case for admin_get_identity
Get an Identity # noqa: E501
"""
pass
def test_admin_list_identities(self):
"""Test case for admin_list_identities
List Identities # noqa: E501
"""
pass
def test_admin_list_identity_sessions(self):
"""Test case for admin_list_identity_sessions
This endpoint returns all sessions that belong to the given Identity. # noqa: E501
"""
pass
def test_admin_update_identity(self):
"""Test case for admin_update_identity
Update an Identity # noqa: E501
"""
pass
def test_create_self_service_logout_flow_url_for_browsers(self):
"""Test case for create_self_service_logout_flow_url_for_browsers
Create a Logout URL for Browsers # noqa: E501
"""
pass
def test_get_json_schema(self):
"""Test case for get_json_schema
"""
pass
def test_get_self_service_error(self):
"""Test case for get_self_service_error
Get Self-Service Errors # noqa: E501
"""
pass
def test_get_self_service_login_flow(self):
"""Test case for get_self_service_login_flow
Get Login Flow # noqa: E501
"""
pass
def test_get_self_service_recovery_flow(self):
"""Test case for get_self_service_recovery_flow
Get Recovery Flow # noqa: E501
"""
pass
def test_get_self_service_registration_flow(self):
"""Test case for get_self_service_registration_flow
Get Registration Flow # noqa: E501
"""
pass
def test_get_self_service_settings_flow(self):
"""Test case for get_self_service_settings_flow
Get Settings Flow # noqa: E501
"""
pass
def test_get_self_service_verification_flow(self):
"""Test case for get_self_service_verification_flow
Get Verification Flow # noqa: E501
"""
pass
def test_get_web_authn_java_script(self):
"""Test case for get_web_authn_java_script
Get WebAuthn JavaScript # noqa: E501
"""
pass
def test_initialize_self_service_login_flow_for_browsers(self):
"""Test case for initialize_self_service_login_flow_for_browsers
Initialize Login Flow for Browsers # noqa: E501
"""
pass
def test_initialize_self_service_login_flow_without_browser(self):
"""Test case for initialize_self_service_login_flow_without_browser
Initialize Login Flow for APIs, Services, Apps, ... # noqa: E501
"""
pass
def test_initialize_self_service_recovery_flow_for_browsers(self):
"""Test case for initialize_self_service_recovery_flow_for_browsers
Initialize Recovery Flow for Browsers # noqa: E501
"""
pass
def test_initialize_self_service_recovery_flow_without_browser(self):
"""Test case for initialize_self_service_recovery_flow_without_browser
Initialize Recovery Flow for APIs, Services, Apps, ... # noqa: E501
"""
pass
def test_initialize_self_service_registration_flow_for_browsers(self):
"""Test case for initialize_self_service_registration_flow_for_browsers
Initialize Registration Flow for Browsers # noqa: E501
"""
pass
def test_initialize_self_service_registration_flow_without_browser(self):
"""Test case for initialize_self_service_registration_flow_without_browser
Initialize Registration Flow for APIs, Services, Apps, ... # noqa: E501
"""
pass
def test_initialize_self_service_settings_flow_for_browsers(self):
"""Test case for initialize_self_service_settings_flow_for_browsers
Initialize Settings Flow for Browsers # noqa: E501
"""
pass
def test_initialize_self_service_settings_flow_without_browser(self):
"""Test case for initialize_self_service_settings_flow_without_browser
Initialize Settings Flow for APIs, Services, Apps, ... # noqa: E501
"""
pass
def test_initialize_self_service_verification_flow_for_browsers(self):
"""Test case for initialize_self_service_verification_flow_for_browsers
Initialize Verification Flow for Browser Clients # noqa: E501
"""
pass
def test_initialize_self_service_verification_flow_without_browser(self):
"""Test case for initialize_self_service_verification_flow_without_browser
Initialize Verification Flow for APIs, Services, Apps, ... # noqa: E501
"""
pass
def test_list_identity_schemas(self):
"""Test case for list_identity_schemas
"""
pass
def test_list_sessions(self):
"""Test case for list_sessions
This endpoints returns all other active sessions that belong to the logged-in user. The current session can be retrieved by calling the `/sessions/whoami` endpoint. # noqa: E501
"""
pass
def test_revoke_session(self):
"""Test case for revoke_session
Calling this endpoint invalidates the specified session. The current session cannot be revoked. Session data are not deleted. # noqa: E501
"""
pass
def test_revoke_sessions(self):
"""Test case for revoke_sessions
Calling this endpoint invalidates all except the current session that belong to the logged-in user. Session data are not deleted. # noqa: E501
"""
pass
def test_submit_self_service_login_flow(self):
"""Test case for submit_self_service_login_flow
Submit a Login Flow # noqa: E501
"""
pass
def test_submit_self_service_logout_flow(self):
"""Test case for submit_self_service_logout_flow
Complete Self-Service Logout # noqa: E501
"""
pass
def test_submit_self_service_logout_flow_without_browser(self):
"""Test case for submit_self_service_logout_flow_without_browser
Perform Logout for APIs, Services, Apps, ... # noqa: E501
"""
pass
def test_submit_self_service_recovery_flow(self):
"""Test case for submit_self_service_recovery_flow
Complete Recovery Flow # noqa: E501
"""
pass
def test_submit_self_service_registration_flow(self):
"""Test case for submit_self_service_registration_flow
Submit a Registration Flow # noqa: E501
"""
pass
def test_submit_self_service_settings_flow(self):
"""Test case for submit_self_service_settings_flow
Complete Settings Flow # noqa: E501
"""
pass
def test_submit_self_service_verification_flow(self):
"""Test case for submit_self_service_verification_flow
Complete Verification Flow # noqa: E501
"""
pass
def test_to_session(self):
"""Test case for to_session
Check Who the Current HTTP Session Belongs To # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from base64 import b64encode
from airflow.gcp.hooks.kms import GoogleCloudKMSHook
from tests.compat import mock
BASE_STRING = 'airflow.contrib.hooks.gcp_api_base_hook.{}'
KMS_STRING = 'airflow.gcp.hooks.kms.{}'
TEST_PROJECT = 'test-project'
TEST_LOCATION = 'global'
TEST_KEY_RING = 'test-key-ring'
TEST_KEY = 'test-key'
TEST_KEY_ID = 'projects/{}/locations/{}/keyRings/{}/cryptoKeys/{}'.format(
TEST_PROJECT, TEST_LOCATION, TEST_KEY_RING, TEST_KEY)
def mock_init(self, gcp_conn_id, delegate_to=None): # pylint: disable=unused-argument
pass
class TestGoogleCloudKMSHook(unittest.TestCase):
def setUp(self):
with mock.patch(BASE_STRING.format('GoogleCloudBaseHook.__init__'),
new=mock_init):
self.kms_hook = GoogleCloudKMSHook(gcp_conn_id='test')
@mock.patch(KMS_STRING.format('GoogleCloudKMSHook.get_conn'))
def test_encrypt(self, mock_service):
plaintext = b'Test plaintext'
ciphertext = 'Test ciphertext'
plaintext_b64 = b64encode(plaintext).decode('ascii')
body = {'plaintext': plaintext_b64}
response = {'ciphertext': ciphertext}
encrypt_method = (mock_service.return_value
.projects.return_value
.locations.return_value
.keyRings.return_value
.cryptoKeys.return_value
.encrypt)
execute_method = encrypt_method.return_value.execute
execute_method.return_value = response
ret_val = self.kms_hook.encrypt(TEST_KEY_ID, plaintext)
encrypt_method.assert_called_with(name=TEST_KEY_ID,
body=body)
execute_method.assert_called_with(num_retries=mock.ANY)
self.assertEqual(ciphertext, ret_val)
@mock.patch(KMS_STRING.format('GoogleCloudKMSHook.get_conn'))
def test_encrypt_authdata(self, mock_service):
plaintext = b'Test plaintext'
auth_data = b'Test authdata'
ciphertext = 'Test ciphertext'
plaintext_b64 = b64encode(plaintext).decode('ascii')
auth_data_b64 = b64encode(auth_data).decode('ascii')
body = {
'plaintext': plaintext_b64,
'additionalAuthenticatedData': auth_data_b64
}
response = {'ciphertext': ciphertext}
encrypt_method = (mock_service.return_value
.projects.return_value
.locations.return_value
.keyRings.return_value
.cryptoKeys.return_value
.encrypt)
execute_method = encrypt_method.return_value.execute
execute_method.return_value = response
ret_val = self.kms_hook.encrypt(TEST_KEY_ID, plaintext,
authenticated_data=auth_data)
encrypt_method.assert_called_with(name=TEST_KEY_ID,
body=body)
execute_method.assert_called_with(num_retries=mock.ANY)
self.assertEqual(ciphertext, ret_val)
@mock.patch(KMS_STRING.format('GoogleCloudKMSHook.get_conn'))
def test_decrypt(self, mock_service):
plaintext = b'Test plaintext'
ciphertext = 'Test ciphertext'
plaintext_b64 = b64encode(plaintext).decode('ascii')
body = {'ciphertext': ciphertext}
response = {'plaintext': plaintext_b64}
decrypt_method = (mock_service.return_value
.projects.return_value
.locations.return_value
.keyRings.return_value
.cryptoKeys.return_value
.decrypt)
execute_method = decrypt_method.return_value.execute
execute_method.return_value = response
ret_val = self.kms_hook.decrypt(TEST_KEY_ID, ciphertext)
decrypt_method.assert_called_with(name=TEST_KEY_ID,
body=body)
execute_method.assert_called_with(num_retries=mock.ANY)
self.assertEqual(plaintext, ret_val)
@mock.patch(KMS_STRING.format('GoogleCloudKMSHook.get_conn'))
def test_decrypt_authdata(self, mock_service):
plaintext = b'Test plaintext'
auth_data = b'Test authdata'
ciphertext = 'Test ciphertext'
plaintext_b64 = b64encode(plaintext).decode('ascii')
auth_data_b64 = b64encode(auth_data).decode('ascii')
body = {
'ciphertext': ciphertext,
'additionalAuthenticatedData': auth_data_b64
}
response = {'plaintext': plaintext_b64}
decrypt_method = (mock_service.return_value
.projects.return_value
.locations.return_value
.keyRings.return_value
.cryptoKeys.return_value
.decrypt)
execute_method = decrypt_method.return_value.execute
execute_method.return_value = response
ret_val = self.kms_hook.decrypt(TEST_KEY_ID, ciphertext,
authenticated_data=auth_data)
decrypt_method.assert_called_with(name=TEST_KEY_ID,
body=body)
execute_method.assert_called_with(num_retries=mock.ANY)
self.assertEqual(plaintext, ret_val)
|
"""
Support for ZigBee Home Automation devices.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/zha/
"""
import collections
import enum
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant import const as ha_const
from homeassistant.helpers import discovery, entity
from homeassistant.util import slugify
REQUIREMENTS = [
'bellows==0.6.0',
'zigpy==0.1.0',
'zigpy-xbee==0.1.1',
]
DOMAIN = 'zha'
class RadioType(enum.Enum):
"""Possible options for radio type in config."""
ezsp = 'ezsp'
xbee = 'xbee'
CONF_BAUDRATE = 'baudrate'
CONF_DATABASE = 'database_path'
CONF_DEVICE_CONFIG = 'device_config'
CONF_RADIO_TYPE = 'radio_type'
CONF_USB_PATH = 'usb_path'
DATA_DEVICE_CONFIG = 'zha_device_config'
DEVICE_CONFIG_SCHEMA_ENTRY = vol.Schema({
vol.Optional(ha_const.CONF_TYPE): cv.string,
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_RADIO_TYPE, default='ezsp'): cv.enum(RadioType),
CONF_USB_PATH: cv.string,
vol.Optional(CONF_BAUDRATE, default=57600): cv.positive_int,
CONF_DATABASE: cv.string,
vol.Optional(CONF_DEVICE_CONFIG, default={}):
vol.Schema({cv.string: DEVICE_CONFIG_SCHEMA_ENTRY}),
})
}, extra=vol.ALLOW_EXTRA)
ATTR_DURATION = 'duration'
ATTR_IEEE = 'ieee_address'
SERVICE_PERMIT = 'permit'
SERVICE_REMOVE = 'remove'
SERVICE_SCHEMAS = {
SERVICE_PERMIT: vol.Schema({
vol.Optional(ATTR_DURATION, default=60):
vol.All(vol.Coerce(int), vol.Range(1, 254)),
}),
SERVICE_REMOVE: vol.Schema({
vol.Required(ATTR_IEEE): cv.string,
}),
}
# ZigBee definitions
CENTICELSIUS = 'C-100'
# Key in hass.data dict containing discovery info
DISCOVERY_KEY = 'zha_discovery_info'
# Internal definitions
APPLICATION_CONTROLLER = None
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass, config):
"""Set up ZHA.
Will automatically load components to support devices found on the network.
"""
global APPLICATION_CONTROLLER
usb_path = config[DOMAIN].get(CONF_USB_PATH)
baudrate = config[DOMAIN].get(CONF_BAUDRATE)
radio_type = config[DOMAIN].get(CONF_RADIO_TYPE)
if radio_type == RadioType.ezsp:
import bellows.ezsp
from bellows.zigbee.application import ControllerApplication
radio = bellows.ezsp.EZSP()
elif radio_type == RadioType.xbee:
import zigpy_xbee.api
from zigpy_xbee.zigbee.application import ControllerApplication
radio = zigpy_xbee.api.XBee()
await radio.connect(usb_path, baudrate)
database = config[DOMAIN].get(CONF_DATABASE)
APPLICATION_CONTROLLER = ControllerApplication(radio, database)
listener = ApplicationListener(hass, config)
APPLICATION_CONTROLLER.add_listener(listener)
await APPLICATION_CONTROLLER.startup(auto_form=True)
for device in APPLICATION_CONTROLLER.devices.values():
hass.async_add_job(listener.async_device_initialized(device, False))
async def permit(service):
"""Allow devices to join this network."""
duration = service.data.get(ATTR_DURATION)
_LOGGER.info("Permitting joins for %ss", duration)
await APPLICATION_CONTROLLER.permit(duration)
hass.services.async_register(DOMAIN, SERVICE_PERMIT, permit,
schema=SERVICE_SCHEMAS[SERVICE_PERMIT])
async def remove(service):
"""Remove a node from the network."""
from bellows.types import EmberEUI64, uint8_t
ieee = service.data.get(ATTR_IEEE)
ieee = EmberEUI64([uint8_t(p, base=16) for p in ieee.split(':')])
_LOGGER.info("Removing node %s", ieee)
await APPLICATION_CONTROLLER.remove(ieee)
hass.services.async_register(DOMAIN, SERVICE_REMOVE, remove,
schema=SERVICE_SCHEMAS[SERVICE_REMOVE])
return True
class ApplicationListener:
"""All handlers for events that happen on the ZigBee application."""
def __init__(self, hass, config):
"""Initialize the listener."""
self._hass = hass
self._config = config
self._device_registry = collections.defaultdict(list)
hass.data[DISCOVERY_KEY] = hass.data.get(DISCOVERY_KEY, {})
def device_joined(self, device):
"""Handle device joined.
At this point, no information about the device is known other than its
address
"""
# Wait for device_initialized, instead
pass
def raw_device_initialized(self, device):
"""Handle a device initialization without quirks loaded."""
# Wait for device_initialized, instead
pass
def device_initialized(self, device):
"""Handle device joined and basic information discovered."""
self._hass.async_add_job(self.async_device_initialized(device, True))
def device_left(self, device):
"""Handle device leaving the network."""
pass
def device_removed(self, device):
"""Handle device being removed from the network."""
for device_entity in self._device_registry[device.ieee]:
self._hass.async_add_job(device_entity.async_remove())
async def async_device_initialized(self, device, join):
"""Handle device joined and basic information discovered (async)."""
import zigpy.profiles
import homeassistant.components.zha.const as zha_const
zha_const.populate_data()
for endpoint_id, endpoint in device.endpoints.items():
if endpoint_id == 0: # ZDO
continue
discovered_info = await _discover_endpoint_info(endpoint)
component = None
profile_clusters = ([], [])
device_key = "{}-{}".format(device.ieee, endpoint_id)
node_config = self._config[DOMAIN][CONF_DEVICE_CONFIG].get(
device_key, {})
if endpoint.profile_id in zigpy.profiles.PROFILES:
profile = zigpy.profiles.PROFILES[endpoint.profile_id]
if zha_const.DEVICE_CLASS.get(endpoint.profile_id,
{}).get(endpoint.device_type,
None):
profile_clusters = profile.CLUSTERS[endpoint.device_type]
profile_info = zha_const.DEVICE_CLASS[endpoint.profile_id]
component = profile_info[endpoint.device_type]
if ha_const.CONF_TYPE in node_config:
component = node_config[ha_const.CONF_TYPE]
profile_clusters = zha_const.COMPONENT_CLUSTERS[component]
if component:
in_clusters = [endpoint.in_clusters[c]
for c in profile_clusters[0]
if c in endpoint.in_clusters]
out_clusters = [endpoint.out_clusters[c]
for c in profile_clusters[1]
if c in endpoint.out_clusters]
discovery_info = {
'application_listener': self,
'endpoint': endpoint,
'in_clusters': {c.cluster_id: c for c in in_clusters},
'out_clusters': {c.cluster_id: c for c in out_clusters},
'new_join': join,
'unique_id': device_key,
}
discovery_info.update(discovered_info)
self._hass.data[DISCOVERY_KEY][device_key] = discovery_info
await discovery.async_load_platform(
self._hass,
component,
DOMAIN,
{'discovery_key': device_key},
self._config,
)
for cluster in endpoint.in_clusters.values():
await self._attempt_single_cluster_device(
endpoint,
cluster,
profile_clusters[0],
device_key,
zha_const.SINGLE_INPUT_CLUSTER_DEVICE_CLASS,
'in_clusters',
discovered_info,
join,
)
for cluster in endpoint.out_clusters.values():
await self._attempt_single_cluster_device(
endpoint,
cluster,
profile_clusters[1],
device_key,
zha_const.SINGLE_OUTPUT_CLUSTER_DEVICE_CLASS,
'out_clusters',
discovered_info,
join,
)
def register_entity(self, ieee, entity_obj):
"""Record the creation of a hass entity associated with ieee."""
self._device_registry[ieee].append(entity_obj)
async def _attempt_single_cluster_device(self, endpoint, cluster,
profile_clusters, device_key,
device_classes, discovery_attr,
entity_info, is_new_join):
"""Try to set up an entity from a "bare" cluster."""
if cluster.cluster_id in profile_clusters:
return
component = None
for cluster_type, candidate_component in device_classes.items():
if isinstance(cluster, cluster_type):
component = candidate_component
break
if component is None:
return
cluster_key = "{}-{}".format(device_key, cluster.cluster_id)
discovery_info = {
'application_listener': self,
'endpoint': endpoint,
'in_clusters': {},
'out_clusters': {},
'new_join': is_new_join,
'unique_id': cluster_key,
'entity_suffix': '_{}'.format(cluster.cluster_id),
}
discovery_info[discovery_attr] = {cluster.cluster_id: cluster}
discovery_info.update(entity_info)
self._hass.data[DISCOVERY_KEY][cluster_key] = discovery_info
await discovery.async_load_platform(
self._hass,
component,
DOMAIN,
{'discovery_key': cluster_key},
self._config,
)
class Entity(entity.Entity):
"""A base class for ZHA entities."""
_domain = None # Must be overridden by subclasses
def __init__(self, endpoint, in_clusters, out_clusters, manufacturer,
model, application_listener, unique_id, **kwargs):
"""Init ZHA entity."""
self._device_state_attributes = {}
ieee = endpoint.device.ieee
ieeetail = ''.join(['%02x' % (o, ) for o in ieee[-4:]])
if manufacturer and model is not None:
self.entity_id = "{}.{}_{}_{}_{}{}".format(
self._domain,
slugify(manufacturer),
slugify(model),
ieeetail,
endpoint.endpoint_id,
kwargs.get('entity_suffix', ''),
)
self._device_state_attributes['friendly_name'] = "{} {}".format(
manufacturer,
model,
)
else:
self.entity_id = "{}.zha_{}_{}{}".format(
self._domain,
ieeetail,
endpoint.endpoint_id,
kwargs.get('entity_suffix', ''),
)
self._endpoint = endpoint
self._in_clusters = in_clusters
self._out_clusters = out_clusters
self._state = None
self._unique_id = unique_id
# Normally the entity itself is the listener. Sub-classes may set this
# to a dict of cluster ID -> listener to receive messages for specific
# clusters separately
self._in_listeners = {}
self._out_listeners = {}
application_listener.register_entity(ieee, self)
async def async_added_to_hass(self):
"""Callback once the entity is added to hass.
It is now safe to update the entity state
"""
for cluster_id, cluster in self._in_clusters.items():
cluster.add_listener(self._in_listeners.get(cluster_id, self))
for cluster_id, cluster in self._out_clusters.items():
cluster.add_listener(self._out_listeners.get(cluster_id, self))
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._unique_id
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
return self._device_state_attributes
def attribute_updated(self, attribute, value):
"""Handle an attribute updated on this cluster."""
pass
def zdo_command(self, tsn, command_id, args):
"""Handle a ZDO command received on this cluster."""
pass
async def _discover_endpoint_info(endpoint):
"""Find some basic information about an endpoint."""
extra_info = {
'manufacturer': None,
'model': None,
}
if 0 not in endpoint.in_clusters:
return extra_info
async def read(attributes):
"""Read attributes and update extra_info convenience function."""
result, _ = await endpoint.in_clusters[0].read_attributes(
attributes,
allow_cache=True,
)
extra_info.update(result)
await read(['manufacturer', 'model'])
if extra_info['manufacturer'] is None or extra_info['model'] is None:
# Some devices fail at returning multiple results. Attempt separately.
await read(['manufacturer'])
await read(['model'])
for key, value in extra_info.items():
if isinstance(value, bytes):
try:
extra_info[key] = value.decode('ascii').strip()
except UnicodeDecodeError:
# Unsure what the best behaviour here is. Unset the key?
pass
return extra_info
def get_discovery_info(hass, discovery_info):
"""Get the full discovery info for a device.
Some of the info that needs to be passed to platforms is not JSON
serializable, so it cannot be put in the discovery_info dictionary. This
component places that info we need to pass to the platform in hass.data,
and this function is a helper for platforms to retrieve the complete
discovery info.
"""
if discovery_info is None:
return
discovery_key = discovery_info.get('discovery_key', None)
all_discovery_info = hass.data.get(DISCOVERY_KEY, {})
return all_discovery_info.get(discovery_key, None)
async def safe_read(cluster, attributes, allow_cache=True):
"""Swallow all exceptions from network read.
If we throw during initialization, setup fails. Rather have an entity that
exists, but is in a maybe wrong state, than no entity. This method should
probably only be used during initialization.
"""
try:
result, _ = await cluster.read_attributes(
attributes,
allow_cache=allow_cache,
)
return result
except Exception: # pylint: disable=broad-except
return {}
|
# --------------------------------------------------------
# PyTorch WSDDN
# Copyright 2018. Seungkwan Lee
# Licensed under The MIT License [see LICENSE for details]
# Written by Seungkwan Lee
# Some parts of this implementation are based on code from Ross Girshick, Jiasen Lu, and Jianwei Yang
# --------------------------------------------------------
import torch
def element_wise_iou(boxes_a, boxes_b):
"""
Compute the element wise IoU
:param box_a: (n, 4) minmax form boxes
:param box_b: (n, 4) minmax form boxes
:return: (n) iou
"""
max_xy = torch.min(boxes_a[:, 2:], boxes_b[:, 2:])
min_xy = torch.max(boxes_a[:, :2], boxes_b[:, :2])
inter_wh = torch.clamp((max_xy - min_xy + 1), min=0)
I = inter_wh[:, 0] * inter_wh[:, 1]
A = (boxes_a[:, 2] - boxes_a[:, 0] + 1) * (boxes_a[:, 3] - boxes_a[:, 1] + 1)
B = (boxes_b[:, 2] - boxes_b[:, 0] + 1) * (boxes_b[:, 3] - boxes_b[:, 1] + 1)
U = A + B - I
return I / U
def all_pair_iou(boxes_a, boxes_b):
"""
Compute the IoU of all pairs.
:param boxes_a: (n, 4) minmax form boxes
:param boxes_b: (m, 4) minmax form boxes
:return: (n, m) iou of all pairs of two set
"""
N = boxes_a.size(0)
M = boxes_b.size(0)
max_xy = torch.min(boxes_a[:, 2:].unsqueeze(1).expand(N, M, 2), boxes_b[:, 2:].unsqueeze(0).expand(N, M, 2))
min_xy = torch.max(boxes_a[:, :2].unsqueeze(1).expand(N, M, 2), boxes_b[:, :2].unsqueeze(0).expand(N, M, 2))
inter_wh = torch.clamp((max_xy - min_xy + 1), min=0)
I = inter_wh[:, :, 0] * inter_wh[:, :, 1]
A = ((boxes_a[:, 2] - boxes_a[:, 0] + 1) * (boxes_a[:, 3] - boxes_a[:, 1] + 1)).unsqueeze(1).expand_as(I)
B = ((boxes_b[:, 2] - boxes_b[:, 0] + 1) * (boxes_b[:, 3] - boxes_b[:, 1] + 1)).unsqueeze(0).expand_as(I)
U = A + B - I
return I / U
def transform(boxes, transform_param):
"""
transform boxes
:param boxes: (n, 4) tensor, (cx, cy, w, h) form.
:param transform_param: (n, 4) tensor.
:return: (n, 4) transformed boxes, (cx, cy, w, h) form.
"""
cx = boxes[:, 0] + transform_param[:, 0] * boxes[:, 2]
cy = boxes[:, 1] + transform_param[:, 1] * boxes[:, 3]
w = boxes[:, 2] * torch.exp(transform_param[:, 2])
h = boxes[:, 3] * torch.exp(transform_param[:, 3])
return torch.stack([cx, cy, w, h], 1)
def to_cwh_form(boxes):
"""
:param boxes: (n, 4) tensor, (cx, cy, w, h) form.
:return: (n, 4) tensor, (xmin, ymin, xmax, ymax) form
"""
cx = (boxes[:, 0] + boxes[:, 2]) / 2
cy = (boxes[:, 1] + boxes[:, 3]) / 2
w = boxes[:, 2] - boxes[:, 0] + 1
h = boxes[:, 3] - boxes[:, 1] + 1
return torch.stack([cx, cy, w, h], 1)
def to_minmax_form(boxes):
"""
:param boxes: (n, 4) tensor, (xmin, ymin, xmax, ymax) form.
:return: (n, 4) tensor, (cx, cy, w, h) form
"""
xmin = boxes[:, 0] - boxes[:, 2] / 2 + 0.5
ymin = boxes[:, 1] - boxes[:, 3] / 2 + 0.5
xmax = boxes[:, 0] + boxes[:, 2] / 2 - 0.5
ymax = boxes[:, 1] + boxes[:, 3] / 2 - 0.5
return torch.stack([xmin, ymin, xmax, ymax], 1)
|
from typing import List
from ..ex.relational import IRelationalRow
from . import xivrow, XivRow, IXivSheet
@xivrow
class GatheringPoint(XivRow):
@property
def base(self) -> "GatheringPointBase":
from .gathering_point_base import GatheringPointBase
return self.as_T(GatheringPointBase)
@property
def territory_type(self) -> "TerritoryType":
from .territory_type import TerritoryType
return self.as_T(TerritoryType)
@property
def place_name(self) -> "PlaceName":
from .placename import PlaceName
return self.as_T(PlaceName)
@property
def gathering_point_bonus(self) -> List["GatheringPointBonus"]:
if self.__bonuses is None:
self.__bonuses = self.__build_gathering_point_bonus()
return self.__bonuses
@property
def gathering_sub_category(self) -> "GatheringSubCategory":
from .gathering_sub_category import GatheringSubCategory
return self.as_T(GatheringSubCategory)
def __init__(self, sheet: IXivSheet, source_row: IRelationalRow):
super(GatheringPoint, self).__init__(sheet, source_row)
self.__bonuses = None
def __build_gathering_point_bonus(self):
from .gathering_point_bonus import GatheringPointBonus
COUNT = 2
bonuses = []
for i in range(COUNT):
bonus = self.as_T(GatheringPointBonus, None, i)
if bonus.key != 0:
bonuses.append(bonus)
return bonuses
|
import datetime
from django import forms
from django.contrib import admin, messages
from django.contrib.admin.util import unquote
from django.contrib.admin.views.main import ChangeList
from django.contrib.auth.decorators import permission_required
from django.core.exceptions import ValidationError, PermissionDenied
from django.db.models import Q
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.utils.decorators import method_decorator
from django.utils.encoding import force_unicode
from django.utils.functional import update_wrapper
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _, ungettext
from django.views.decorators.csrf import csrf_protect
from metashare import settings
from metashare.accounts.models import EditorGroup, EditorGroupManagers
from metashare.repository.editor.editorutils import FilteredChangeList
from metashare.repository.editor.forms import StorageObjectUploadForm
from metashare.repository.editor.inlines import ReverseInlineFormSet, \
ReverseInlineModelAdmin
from metashare.repository.editor.lookups import MembershipDummyLookup
from metashare.repository.editor.schemamodel_mixin import encode_as_inline
from metashare.repository.editor.superadmin import SchemaModelAdmin
from metashare.repository.editor.widgets import OneToManyWidget
from metashare.repository.models import resourceComponentTypeType_model, \
corpusInfoType_model, languageDescriptionInfoType_model, \
lexicalConceptualResourceInfoType_model, toolServiceInfoType_model, \
corpusMediaTypeType_model, languageDescriptionMediaTypeType_model, \
lexicalConceptualResourceMediaTypeType_model, resourceInfoType_model, \
licenceInfoType_model, User
from metashare.repository.supermodel import SchemaModel
from metashare.stats.model_utils import saveLRStats, UPDATE_STAT, INGEST_STAT, DELETE_STAT
from metashare.storage.models import PUBLISHED, INGESTED, INTERNAL, \
ALLOWED_ARCHIVE_EXTENSIONS
from metashare.utils import verify_subclass, create_breadcrumb_template_params
from metashare.repository import pids
csrf_protect_m = method_decorator(csrf_protect)
class ResourceComponentInlineFormSet(ReverseInlineFormSet):
'''
A formset with custom save logic for resources.
'''
def clean(self):
actual_instance = self.get_actual_resourceComponentType()
error_list = ''
if isinstance(actual_instance, corpusInfoType_model):
error_list = error_list + self.clean_corpus(actual_instance)
elif isinstance(actual_instance, languageDescriptionInfoType_model):
error_list = error_list + self.clean_langdesc(actual_instance)
elif isinstance(actual_instance, lexicalConceptualResourceInfoType_model):
error_list = error_list + self.clean_lexicon(actual_instance)
elif isinstance(actual_instance, toolServiceInfoType_model):
error_list = error_list + self.clean_toolservice(actual_instance)
else:
raise Exception, "unexpected resource component class type: {}".format(actual_instance.__class__.__name__)
try:
actual_instance.full_clean()
except ValidationError:
#raise ValidationError('The content of the {} general info is not valid.'.format(self.get_actual_resourceComponentType()._meta.verbose_name))
#raise AssertionError("Meaningful error message for general info")
error_list = error_list + 'The content of the {} general info is not valid.'.format(self.get_actual_resourceComponentType()._meta.verbose_name)
if error_list != '':
raise ValidationError(error_list)
super(ResourceComponentInlineFormSet, self).clean()
def clean_media(self, parent, fieldnames):
'''
Clean the list of media data in the XXMediaType parent object.
'''
error = ''
for modelfieldname in fieldnames:
if modelfieldname not in self.data:
continue
value = self.data[modelfieldname]
if not value:
error = error + format(modelfieldname) + ' error. '
return error
def clean_corpus_one2many(self, corpusmediatype):
error = ''
media = 'corpusTextInfo'
flag = 'showCorpusTextInfo'
if flag in self.data and self.data[flag]:
num_infos = corpusmediatype.corpustextinfotype_model_set.all().count()
if num_infos == 0:
error += media + ' error. '
media = 'corpusVideoInfo'
flag = 'showCorpusVideoInfo'
if flag in self.data and self.data[flag]:
num_infos = corpusmediatype.corpusvideoinfotype_model_set.all().count()
if num_infos == 0:
error += media + ' error. '
return error
def clean_corpus(self, corpus):
return self.clean_corpus_one2many(corpus.corpusMediaType) \
+ self.clean_media(corpus.corpusMediaType, \
('corpusAudioInfo', 'corpusImageInfo', 'corpusTextNumericalInfo', 'corpusTextNgramInfo'))
def clean_langdesc(self, langdesc):
return self.clean_media(langdesc.languageDescriptionMediaType, \
('languageDescriptionTextInfo', 'languageDescriptionVideoInfo', 'languageDescriptionImageInfo'))
def clean_lexicon(self, lexicon):
return self.clean_media(lexicon.lexicalConceptualResourceMediaType, \
('lexicalConceptualResourceTextInfo', 'lexicalConceptualResourceAudioInfo', \
'lexicalConceptualResourceVideoInfo', 'lexicalConceptualResourceImageInfo'))
def clean_toolservice(self, tool):
return ''
def save_media(self, parent, fieldnames):
'''
Save the list of media data in the XXMediaType parent object.
'''
for modelfieldname in fieldnames:
if modelfieldname not in self.data:
continue
value = self.data[modelfieldname]
if not value:
continue
modelfield = parent._meta.get_field(modelfieldname)
child_id = int(value)
child = modelfield.rel.to.objects.get(pk=child_id)
setattr(parent, modelfieldname, child)
parent.save()
def save_corpus(self, corpus, commit):
self.save_media(corpus.corpusMediaType, \
('corpusAudioInfo', 'corpusImageInfo', 'corpusTextNumericalInfo', 'corpusTextNgramInfo'))
def save_langdesc(self, langdesc, commit):
self.save_media(langdesc.languageDescriptionMediaType, \
('languageDescriptionTextInfo', 'languageDescriptionVideoInfo', 'languageDescriptionImageInfo'))
def save_lexicon(self, lexicon, commit):
self.save_media(lexicon.lexicalConceptualResourceMediaType, \
('lexicalConceptualResourceTextInfo', 'lexicalConceptualResourceAudioInfo', \
'lexicalConceptualResourceVideoInfo', 'lexicalConceptualResourceImageInfo'))
def save_toolservice(self, tool, commit):
pass
def get_actual_resourceComponentType(self):
if not (self.forms and self.forms[0].instance):
raise Exception, "Cannot save for unexisting instance"
if self.forms[0].instance.pk is not None:
actual_instance = self.forms[0].instance
else:
actual_instance = resourceComponentTypeType_model.objects.get(pk=self.data['resourceComponentId'])
self.forms[0].instance = actual_instance # we need to use the resourceComponentType we created earlier
actual_instance = actual_instance.as_subclass()
return actual_instance
def save(self, commit=True):
actual_instance = self.get_actual_resourceComponentType()
if isinstance(actual_instance, corpusInfoType_model):
self.save_corpus(actual_instance, commit)
elif isinstance(actual_instance, languageDescriptionInfoType_model):
self.save_langdesc(actual_instance, commit)
elif isinstance(actual_instance, lexicalConceptualResourceInfoType_model):
self.save_lexicon(actual_instance, commit)
elif isinstance(actual_instance, toolServiceInfoType_model):
self.save_toolservice(actual_instance, commit)
else:
raise Exception, "unexpected resource component class type: {}".format(actual_instance.__class__.__name__)
super(ResourceComponentInlineFormSet, self).save(commit)
return (actual_instance,)
# pylint: disable-msg=R0901
class ResourceComponentInline(ReverseInlineModelAdmin):
formset = ResourceComponentInlineFormSet
def __init__(self,
parent_model,
parent_fk_name,
model, admin_site,
inline_type):
super(ResourceComponentInline, self). \
__init__(parent_model, parent_fk_name, model, admin_site, inline_type)
self.template = 'repository/editor/resourceComponentInline.html'
class IdentificationForm(forms.ModelForm):
def save(self, commit=True):
return super(IdentificationForm, self).save(commit)
# pylint: disable-msg=R0901
class IdentificationInline(ReverseInlineModelAdmin):
form = IdentificationForm
readonly_fields = ('metaShareId', 'identifier')
def change_resource_status(resource, status, precondition_status=None):
'''
Change the status of the given resource to the new status given.
If precondition_status is not None, then apply the change ONLY IF the
current status of the resource is precondition_status; otherwise do nothing.
The status of non-master copy resources is never changed.
'''
if not hasattr(resource, 'storage_object'):
raise NotImplementedError, "{0} has no storage object".format(resource)
if resource.storage_object.master_copy and \
(precondition_status is None \
or precondition_status == resource.storage_object.publication_status):
resource.storage_object.publication_status = status
resource.storage_object.save()
# explicitly write metadata XML and storage object to the storage folder
resource.storage_object.update_storage()
return True
return False
def has_edit_permission(request, res_obj):
"""
Returns `True` if the given request has permission to edit the metadata
for the current resource, `False` otherwise.
"""
return request.user.is_active and (request.user.is_superuser \
or request.user in res_obj.owners.all() \
or res_obj.editor_groups.filter(name__in=
request.user.groups.values_list('name', flat=True)).count() != 0)
def has_publish_permission(request, queryset):
"""
Returns `True` if the given request has permission to change the publication
status of all given language resources, `False` otherwise.
"""
if not request.user.is_superuser:
for obj in queryset:
res_groups = obj.editor_groups.all()
# we only allow a user to ingest/publish/unpublish a resource if she
# is a manager of one of the resource's `EditorGroup`s
if not any(res_group.name == mgr_group.managed_group.name
for res_group in res_groups
for mgr_group in EditorGroupManagers.objects.filter(name__in=
request.user.groups.values_list('name', flat=True))):
return False
return True
class MetadataForm(forms.ModelForm):
def save(self, commit=True):
today = datetime.date.today()
if not self.instance.metadataCreationDate:
self.instance.metadataCreationDate = today
self.instance.metadataLastDateUpdated = today
return super(MetadataForm, self).save(commit)
class MetadataInline(ReverseInlineModelAdmin):
form = MetadataForm
readonly_fields = ('metadataCreationDate', 'metadataLastDateUpdated',)
class ResourceModelAdmin(SchemaModelAdmin):
inline_type = 'stacked'
custom_one2one_inlines = {'identificationInfo':IdentificationInline,
'resourceComponentType':ResourceComponentInline,
'metadataInfo':MetadataInline, }
content_fields = ('resourceComponentType',)
list_display = ('__unicode__', 'resource_type', 'publication_status', 'resource_Owners', 'editor_Groups',)
list_filter = ('storage_object__publication_status',)
actions = ('publish_action', 'unpublish_action', 'ingest_action',
'export_xml_action', 'delete', 'add_group', 'remove_group',
'add_owner', 'remove_owner')
hidden_fields = ('storage_object', 'owners', 'editor_groups',)
def publish_action(self, request, queryset):
if has_publish_permission(request, queryset):
successful = 0
for obj in queryset:
if change_resource_status(obj, status=PUBLISHED,
precondition_status=INGESTED):
successful += 1
saveLRStats(obj, UPDATE_STAT, request)
if successful > 0:
messages.info(request, ungettext(
'Successfully published %(ingested)s ingested resource.',
'Successfully published %(ingested)s ingested resources.',
successful) % {'ingested': successful})
else:
messages.error(request,
_('Only ingested resources can be published.'))
else:
messages.error(request, _('You do not have the permission to ' \
'perform this action for all selected resources.'))
publish_action.short_description = _("Publish selected ingested resources")
def unpublish_action(self, request, queryset):
if has_publish_permission(request, queryset):
successful = 0
for obj in queryset:
if change_resource_status(obj, status=INGESTED,
precondition_status=PUBLISHED):
successful += 1
saveLRStats(obj, INGEST_STAT, request)
if successful > 0:
messages.info(request, ungettext(
'Successfully unpublished %s published resource.',
'Successfully unpublished %s published resources.',
successful) % (successful,))
else:
messages.error(request,
_('Only published resources can be unpublished.'))
else:
messages.error(request, _('You do not have the permission to ' \
'perform this action for all selected resources.'))
unpublish_action.short_description = \
_("Unpublish selected published resources")
def ingest_action(self, request, queryset):
if has_publish_permission(request, queryset):
successful = 0
for obj in queryset:
if change_resource_status(obj, status=INGESTED,
precondition_status=INTERNAL):
successful += 1
saveLRStats(obj, INGEST_STAT, request)
pids.add_pid_to_resource(obj.id)
if successful > 0:
messages.info(request, ungettext(
'Successfully ingested %(internal)s internal resource.',
'Successfully ingested %(internal)s internal resources.',
successful) % {'internal': successful})
else:
messages.error(request,
_('Only internal resources can be ingested.'))
else:
messages.error(request, _('You do not have the permission to ' \
'perform this action for all selected resources.'))
ingest_action.short_description = _("Ingest selected internal resources")
def export_xml_action(self, request, queryset):
from StringIO import StringIO
from zipfile import ZipFile
from metashare.xml_utils import to_xml_string
from django import http
zipfilename = "resources_export.zip"
in_memory = StringIO()
with ZipFile(in_memory, 'w') as zipfile:
for obj in queryset:
try:
xml_string = to_xml_string(obj.export_to_elementtree(),
encoding="utf-8").encode("utf-8")
resource_filename = \
'resource-{0}.xml'.format(obj.storage_object.id)
zipfile.writestr(resource_filename, xml_string)
except Exception:
raise Http404(_('Could not export resource "%(name)s" '
'with primary key %(key)s.') \
% {'name': force_unicode(obj),
'key': escape(obj.storage_object.id)})
zipfile.close()
response = http.HttpResponse(mimetype='application/zip')
response['Content-Disposition'] = \
'attachment; filename=%s' % (zipfilename)
in_memory.seek(0)
response.write(in_memory.read())
return response
export_xml_action.short_description = \
_("Export selected resource descriptions to XML")
def resource_Owners(self, obj):
"""
Method used for changelist view for resources.
"""
owners = obj.owners.all()
if owners.count() == 0:
return None
owners_list = ''
for owner in owners.all():
owners_list += owner.username + ', '
owners_list = owners_list.rstrip(', ')
return owners_list
def editor_Groups(self, obj):
"""
Method used for changelist view for resources.
"""
editor_groups = obj.editor_groups.all()
if editor_groups.count() == 0:
return None
groups_list = ''
for group in editor_groups.all():
groups_list += group.name + ', '
groups_list = groups_list.rstrip(', ')
return groups_list
class ConfirmDeleteForm(forms.Form):
_selected_action = forms.CharField(widget=forms.MultipleHiddenInput)
class IntermediateMultiSelectForm(forms.Form):
_selected_action = forms.CharField(widget=forms.MultipleHiddenInput)
def __init__(self, choices = None, *args, **kwargs):
super(ResourceModelAdmin.IntermediateMultiSelectForm, self).__init__(*args, **kwargs)
if choices is not None:
self.choices = choices
self.fields['multifield'] = forms.ModelMultipleChoiceField(self.choices)
@csrf_protect_m
def delete(self, request, queryset):
"""
Form to mark a resource as delete.
"""
if not self.has_delete_permission(request):
raise PermissionDenied
if 'cancel' in request.POST:
self.message_user(request,
_('Cancelled deleting the selected resources.'))
return
can_be_deleted = []
cannot_be_deleted = []
for resource in queryset:
if self.has_delete_permission(request, resource):
can_be_deleted.append(resource)
else:
cannot_be_deleted.append(resource)
if 'delete' in request.POST:
form = self.ConfirmDeleteForm(request.POST)
if form.is_valid():
for resource in can_be_deleted:
self.delete_model(request, resource)
count = len(can_be_deleted)
messages.success(request,
ungettext('Successfully deleted %d resource.',
'Successfully deleted %d resources.', count)
% (count,))
return HttpResponseRedirect(request.get_full_path())
else:
form = self.ConfirmDeleteForm(initial={admin.ACTION_CHECKBOX_NAME:
request.POST.getlist(admin.ACTION_CHECKBOX_NAME)})
dictionary = {
'title': _('Are you sure?'),
'can_be_deleted': can_be_deleted,
'cannot_be_deleted': cannot_be_deleted,
'selected_resources': queryset,
'form': form,
'path': request.get_full_path()
}
dictionary.update(create_breadcrumb_template_params(self.model, _('Delete resource')))
return render_to_response('admin/repository/resourceinfotype_model/delete_selected_confirmation.html',
dictionary,
context_instance=RequestContext(request))
delete.short_description = _("Mark selected resources as deleted")
@csrf_protect_m
def add_group(self, request, queryset):
"""
Form to add an editor group to a resource.
"""
if 'cancel' in request.POST:
self.message_user(request, _('Cancelled adding editor groups.'))
return
elif 'add_editor_group' in request.POST:
_addable_groups = \
ResourceModelAdmin._get_addable_editor_groups(request.user)
form = self.IntermediateMultiSelectForm(_addable_groups,
request.POST)
if form.is_valid():
_successes = 0
# actually this should be in the form validation but we just
# make sure here that only addable groups are actually added
groups = [g for g in form.cleaned_data['multifield']
if g in _addable_groups]
for obj in queryset:
if request.user.is_superuser or obj.owners.filter(
username=request.user.username).count():
obj.editor_groups.add(*groups)
obj.save()
_successes += 1
_failures = queryset.count() - _successes
if _failures:
messages.warning(request, _('Successfully added editor ' \
'groups to %i of the selected resources. %i resource ' \
'editor groups were left unchanged due to missing ' \
'permissions.') % (_successes, _failures))
else:
messages.success(request, _('Successfully added editor ' \
'groups to all selected resources.'))
return HttpResponseRedirect(request.get_full_path())
else:
form = self.IntermediateMultiSelectForm(
ResourceModelAdmin._get_addable_editor_groups(request.user),
initial={admin.ACTION_CHECKBOX_NAME:
request.POST.getlist(admin.ACTION_CHECKBOX_NAME)})
dictionary = {
'selected_resources': queryset,
'form': form,
'path': request.get_full_path()
}
dictionary.update(create_breadcrumb_template_params(self.model, _('Add editor group')))
return render_to_response('admin/repository/resourceinfotype_model/add_editor_group.html',
dictionary,
context_instance=RequestContext(request))
add_group.short_description = _("Add editor groups to selected resources")
@staticmethod
def _get_addable_editor_groups(user):
"""
Returns a queryset of the `EditorGroup` objects that the given user is
allowed to add to a resource.
Superusers can add all editor groups. Other users can only add those
editor groups of which they are a member or a manager.
"""
if user.is_superuser:
return EditorGroup.objects.all()
else:
return EditorGroup.objects.filter(
# either a group member
Q(name__in=user.groups.values_list('name', flat=True))
# or a manager of the editor group
| Q(name__in=EditorGroupManagers.objects.filter(name__in=
user.groups.values_list('name', flat=True)) \
.values_list('managed_group__name', flat=True)))
@csrf_protect_m
def remove_group(self, request, queryset):
"""
Form to remove an editor group from a resource.
"""
if not request.user.is_superuser:
raise PermissionDenied
if 'cancel' in request.POST:
self.message_user(request,
_('Cancelled removing editor groups.'))
return
elif 'remove_editor_group' in request.POST:
query = EditorGroup.objects.all()
form = self.IntermediateMultiSelectForm(query, request.POST)
if form.is_valid():
groups = form.cleaned_data['multifield']
for obj in queryset:
obj.editor_groups.remove(*groups)
obj.save()
self.message_user(request, _('Successfully removed ' \
'editor groups from the selected resources.'))
return HttpResponseRedirect(request.get_full_path())
else:
form = self.IntermediateMultiSelectForm(EditorGroup.objects.all(),
initial={admin.ACTION_CHECKBOX_NAME:
request.POST.getlist(admin.ACTION_CHECKBOX_NAME)})
dictionary = {
'selected_resources': queryset,
'form': form,
'path': request.get_full_path()
}
dictionary.update(create_breadcrumb_template_params(self.model, _('Remove editor group')))
return render_to_response('admin/repository/resourceinfotype_model/'
'remove_editor_group.html',
dictionary,
context_instance=RequestContext(request))
remove_group.short_description = _("Remove editor groups from selected " \
"resources")
@csrf_protect_m
def add_owner(self, request, queryset):
"""
Form to add an owner to a resource.
"""
if 'cancel' in request.POST:
self.message_user(request, _('Cancelled adding owners.'))
return
elif 'add_owner' in request.POST:
form = self.IntermediateMultiSelectForm(
User.objects.filter(is_active=True), request.POST)
if form.is_valid():
_successes = 0
owners = form.cleaned_data['multifield']
for obj in queryset:
if request.user.is_superuser or obj.owners.filter(
username=request.user.username).count():
obj.owners.add(*owners)
obj.save()
_successes += 1
_failures = queryset.count() - _successes
if _failures:
messages.warning(request, _('Successfully added owners ' \
'to %i of the selected resources. %i resource owners ' \
'were left unchanged due to missing permissions.')
% (_successes, _failures))
else:
messages.success(request, _('Successfully added owners ' \
'to all selected resources.'))
return HttpResponseRedirect(request.get_full_path())
else:
form = self.IntermediateMultiSelectForm(
User.objects.filter(is_active=True),
initial={admin.ACTION_CHECKBOX_NAME:
request.POST.getlist(admin.ACTION_CHECKBOX_NAME)})
dictionary = {
'selected_resources': queryset,
'form': form,
'path': request.get_full_path()
}
dictionary.update(create_breadcrumb_template_params(self.model, _('Add owner')))
return render_to_response('admin/repository/resourceinfotype_model/add_owner.html',
dictionary,
context_instance=RequestContext(request))
add_owner.short_description = _("Add owners to selected resources")
@csrf_protect_m
def remove_owner(self, request, queryset):
"""
Form to remove an owner from a resource.
"""
if not request.user.is_superuser:
raise PermissionDenied
if 'cancel' in request.POST:
self.message_user(request, _('Cancelled removing owners.'))
return
elif 'remove_owner' in request.POST:
form = self.IntermediateMultiSelectForm(
User.objects.filter(is_active=True), request.POST)
if form.is_valid():
owners = form.cleaned_data['multifield']
for obj in queryset:
obj.owners.remove(*owners)
obj.save()
self.message_user(request, _('Successfully removed owners ' \
'from the selected resources.'))
return HttpResponseRedirect(request.get_full_path())
else:
form = self.IntermediateMultiSelectForm(
User.objects.filter(is_active=True),
initial={admin.ACTION_CHECKBOX_NAME:
request.POST.getlist(admin.ACTION_CHECKBOX_NAME)})
dictionary = {
'selected_resources': queryset,
'form': form,
'path': request.get_full_path()
}
dictionary.update(create_breadcrumb_template_params(self.model, _('Remove owner')))
return render_to_response('admin/repository/resourceinfotype_model/remove_owner.html',
dictionary,
context_instance=RequestContext(request))
remove_owner.short_description = _("Remove owners from selected resources")
def get_urls(self):
from django.conf.urls import patterns, url
urlpatterns = super(ResourceModelAdmin, self).get_urls()
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
info = self.model._meta.app_label, self.model._meta.module_name
urlpatterns = patterns('',
url(r'^(.+)/upload-data/$',
wrap(self.uploaddata_view),
name='%s_%s_uploaddata' % info),
url(r'^my/$',
wrap(self.changelist_view_filtered),
name='%s_%s_myresources' % info),
url(r'^(.+)/export-xml/$',
wrap(self.exportxml),
name='%s_%s_exportxml' % info),
) + urlpatterns
return urlpatterns
@csrf_protect_m
def changelist_view_filtered(self, request, extra_context=None):
'''
The filtered changelist view for My Resources.
We reuse the generic django changelist_view and squeeze in our wish to
show the filtered view in two places:
1. we patch request.POST to insert a parameter 'myresources'='true',
which will be interpreted in get_changelist to show the filtered
version;
2. we pass a extra_context variable 'myresources' which will be
interpreted in the template change_list.html.
'''
_post = request.POST.copy()
_post['myresources'] = 'true'
request.POST = _post
_extra_context = extra_context or {}
_extra_context.update({'myresources':True})
return self.changelist_view(request, _extra_context)
def get_changelist(self, request, **kwargs):
"""
Returns the ChangeList class for use on the changelist page.
"""
if 'myresources' in request.POST:
return FilteredChangeList
else:
return ChangeList
@csrf_protect_m
def uploaddata_view(self, request, object_id, extra_context=None):
"""
The 'upload data' admin view for resourceInfoType_model instances.
"""
model = self.model
opts = model._meta
obj = self.get_object(request, unquote(object_id))
if not self.has_change_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)s does not exist.') \
% {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)})
storage_object = obj.storage_object
if storage_object is None:
raise Http404(_('%(name)s object with primary key %(key)s does not have a StorageObject attached.') \
% {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)})
if not storage_object.master_copy:
raise Http404(_('%(name)s object with primary key %(key)s is not a master-copy.') \
% {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)})
existing_download = storage_object.get_download()
storage_folder = storage_object._storage_folder()
if request.method == 'POST':
form = StorageObjectUploadForm(request.POST, request.FILES)
form_validated = form.is_valid()
if form_validated:
# Check if a new file has been uploaded to resource.
resource = request.FILES['resource']
_extension = None
for _allowed_extension in ALLOWED_ARCHIVE_EXTENSIONS:
if resource.name.endswith(_allowed_extension):
_extension = _allowed_extension
break
# We can assert that an extension has been found as the form
# validation would have raise a ValidationError otherwise;
# still, we raise an AssertionError if anything goes wrong!
assert(_extension in ALLOWED_ARCHIVE_EXTENSIONS)
if _extension:
_storage_folder = storage_object._storage_folder()
_out_filename = '{}/archive.{}'.format(_storage_folder,
_extension)
# Copy uploaded file to storage folder for this object.
with open(_out_filename, 'wb') as _out_file:
# pylint: disable-msg=E1101
for _chunk in resource.chunks():
_out_file.write(_chunk)
# Update the corresponding StorageObject to update its
# download data checksum.
obj.storage_object.compute_checksum()
obj.storage_object.save()
change_message = 'Uploaded "{}" to "{}" in {}.'.format(
resource.name, storage_object._storage_folder(),
storage_object)
self.log_change(request, obj, change_message)
return self.response_change(request, obj)
else:
form = StorageObjectUploadForm()
context = {
'title': _('Upload resource: "%s"') % force_unicode(obj),
'form': form,
'storage_folder': storage_folder,
'existing_download': existing_download,
'object_id': object_id,
'original': obj,
# 'root_path': self.admin_site.root_path,
'root_path': '/{}admin/'.format(settings.DJANGO_BASE),
'app_label': opts.app_label,
}
context.update(extra_context or {})
context_instance = RequestContext(request,
current_app=self.admin_site.name)
return render_to_response(
['admin/repository/resourceinfotype_model/upload_resource.html'], context,
context_instance)
@csrf_protect_m
def exportxml(self, request, object_id, extra_context=None):
"""
Export the XML description for one single resource
"""
model = self.model
opts = model._meta
obj = self.get_object(request, unquote(object_id))
if not self.has_change_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)s does not exist.') \
% {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)})
if obj.storage_object is None:
raise Http404(_('%(name)s object with primary key %(key)s does not have a StorageObject attached.') \
% {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)})
elif obj.storage_object.deleted:
raise Http404(_('%(name)s object with primary key %(key)s does not exist anymore.') \
% {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)})
from metashare.xml_utils import to_xml_string
from django import http
try:
root_node = obj.export_to_elementtree()
xml_string = to_xml_string(root_node, encoding="utf-8").encode('utf-8')
resource_filename = 'resource-{0}.xml'.format(object_id)
response = http.HttpResponse(xml_string, mimetype='text/xml')
response['Content-Disposition'] = 'attachment; filename=%s' % (resource_filename)
return response
except Exception:
raise Http404(_('Could not export resource "%(name)s" with primary key %(key)s.') \
% {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)})
def build_fieldsets_from_schema(self, include_inlines=False, inlines=()):
"""
Builds fieldsets using SchemaModel.get_fields().
"""
# pylint: disable-msg=E1101
verify_subclass(self.model, SchemaModel)
exclusion_list = set(self.get_excluded_fields() + self.get_hidden_fields() + self.get_non_editable_fields())
_fieldsets = []
_content_fieldsets = []
# pylint: disable-msg=E1101
_fields = self.model.get_fields()
_has_content_fields = hasattr(self, 'content_fields')
for _field_status in ('required', 'recommended', 'optional'):
_visible_fields = []
_visible_fields_verbose_names = []
_visible_content_fields = []
# pylint: disable-msg=C0103
_visible_content_fields_verbose_names = []
for _field_name in _fields[_field_status]:
_is_visible = False
if self.is_visible_as_normal_field(_field_name, exclusion_list):
_is_visible = True
_fieldname_to_append = _field_name
elif self.is_visible_as_inline(_field_name, include_inlines, inlines):
_is_visible = True
_fieldname_to_append = encode_as_inline(_field_name)
# Now, where to show the field: in administrative or in content fieldset:
if _has_content_fields and _field_name in self.content_fields:
_relevant_fields = _visible_content_fields
_verbose_names = _visible_content_fields_verbose_names
else:
_relevant_fields = _visible_fields
_verbose_names = _visible_fields_verbose_names
# And now put the field where it belongs:
if _is_visible:
_relevant_fields.append(_fieldname_to_append)
_verbose_names.append(self.model.get_verbose_name(_field_name))
if len(_visible_fields) > 0:
_detail = ', '.join(_visible_fields_verbose_names)
_caption = '{0} administration information: {1}'.format(_field_status.capitalize(), _detail)
_fieldset = {'fields': _visible_fields}
_fieldsets.append((_caption, _fieldset))
if len(_visible_content_fields) > 0:
_caption = '{0} content information: {1}'.format(_field_status.capitalize(), '')
_fieldset = {'fields': _visible_content_fields}
_content_fieldsets.append((_caption, _fieldset))
_fieldsets += _content_fieldsets
_hidden_fields = self.get_hidden_fields()
if _hidden_fields:
_fieldsets.append((None, {'fields': _hidden_fields, 'classes':('display_none',)}))
return _fieldsets
def resource_type_selection_view(self, request, form_url, extra_context):
opts = self.model._meta
media = self.media or []
context = {
'title': 'Add %s' % force_unicode(opts.verbose_name),
'show_delete': False,
# 'root_path': self.admin_site.root_path,
'root_path': '/{}admin/'.format(settings.DJANGO_BASE),
'app_label': opts.app_label,
'media': mark_safe(media),
'add': True,
'has_add_permission': self.has_add_permission(request),
'opts': opts,
'save_as': self.save_as,
'save_on_top': self.save_on_top,
'kb_link': settings.KNOWLEDGE_BASE_URL,
'comp_name': _('%s') % force_unicode(opts.verbose_name),
}
if extra_context:
context.update(extra_context)
return render_to_response("repository/editor/select_resource_type.html", context, RequestContext(request))
def copy_show_media(self, post):
showtags = ('showCorpusTextInfo', 'showCorpusAudioInfo', 'showCorpusVideoInfo', 'showCorpusImageInfo', 'showCorpusTextNumericalInfo',
'showCorpusTextNgramInfo',
'showLangdescTextInfo', 'showLangdescVideoInfo', 'showLangdescImageInfo',
'showLexiconTextInfo', 'showLexiconAudioInfo', 'showLexiconVideoInfo', 'showLexiconImageInfo',
)
out = {}
for item in showtags:
if item in post:
out[item] = True
return out
def queryset(self, request):
"""
Returns a QuerySet of all model instances that can be edited by the
admin site.
This is used by changelist_view, for example, but also for determining
whether the current user may edit a resource or not.
"""
result = super(ResourceModelAdmin, self).queryset(request)
# filter results marked as deleted:
result = result.distinct().filter(storage_object__deleted=False)
# all users but the superusers may only see resources for which they are
# either owner or editor group member:
if not request.user.is_superuser:
result = result.distinct().filter(Q(owners=request.user)
| Q(editor_groups__name__in=
request.user.groups.values_list('name', flat=True)))
return result
def has_delete_permission(self, request, obj=None):
"""
Returns `True` if the given request has permission to change the given
Django model instance.
"""
result = super(ResourceModelAdmin, self) \
.has_delete_permission(request, obj)
if result and obj:
if request.user.is_superuser:
return True
# in addition to the default delete permission determination, we
# only allow a user to delete a resource if either:
# (1) she is owner of the resource and the resource has not been
# ingested, yet
# (2) she is a manager of one of the resource's `EditorGroup`s
res_groups = obj.editor_groups.all()
return (request.user in obj.owners.all()
and obj.storage_object.publication_status == INTERNAL) \
or any(res_group.name == mgr_group.managed_group.name
for res_group in res_groups
for mgr_group in EditorGroupManagers.objects.filter(name__in=
request.user.groups.values_list('name', flat=True)))
return result
def get_actions(self, request):
"""
Return a dictionary mapping the names of all actions for this
`ModelAdmin` to a tuple of (callable, name, description) for each
action.
"""
result = super(ResourceModelAdmin, self).get_actions(request)
# always remove the standard Django bulk delete action for resources (if
# it hasn't previously been removed, yet)
if 'delete_selected' in result:
del result['delete_selected']
if not request.user.is_superuser:
del result['remove_group']
del result['remove_owner']
if not 'myresources' in request.POST:
del result['add_group']
del result['add_owner']
# only users with delete permissions can see the delete action:
if not self.has_delete_permission(request):
del result['delete']
# only users who are the manager of some group can see the
# ingest/publish/unpublish actions:
if EditorGroupManagers.objects.filter(name__in=
request.user.groups.values_list('name', flat=True)) \
.count() == 0:
for action in (self.publish_action, self.unpublish_action,
self.ingest_action):
del result[action.__name__]
return result
def create_hidden_structures(self, request):
'''
For a new resource of the given resource type, create the
hidden structures needed and return them as a dict.
'''
resource_type = request.POST['resourceType']
structures = {}
if resource_type == 'corpus':
corpus_media_type = corpusMediaTypeType_model.objects.create()
corpus_info = corpusInfoType_model.objects.create(corpusMediaType=corpus_media_type)
structures['resourceComponentType'] = corpus_info
structures['corpusMediaType'] = corpus_media_type
elif resource_type == 'langdesc':
language_description_media_type = languageDescriptionMediaTypeType_model.objects.create()
langdesc_info = languageDescriptionInfoType_model.objects.create(languageDescriptionMediaType=language_description_media_type)
structures['resourceComponentType'] = langdesc_info
structures['languageDescriptionMediaType'] = language_description_media_type
elif resource_type == 'lexicon':
lexicon_media_type = lexicalConceptualResourceMediaTypeType_model.objects.create()
lexicon_info = lexicalConceptualResourceInfoType_model.objects.create(lexicalConceptualResourceMediaType=lexicon_media_type)
structures['resourceComponentType'] = lexicon_info
structures['lexicalConceptualResourceMediaType'] = lexicon_media_type
elif resource_type == 'toolservice':
tool_info = toolServiceInfoType_model.objects.create()
structures['resourceComponentType'] = tool_info
structures['toolServiceInfoId'] = tool_info.pk
else:
raise NotImplementedError, "Cannot deal with '{}' resource types just yet".format(resource_type)
return structures
def get_hidden_structures(self, request, resource_id=None):
'''
For a resource with existing hidden structures,
fill a dict with the hidden objects.
'''
def get_mediatype_id(media_type_name, media_type_field):
if media_type_name in request.POST:
return request.POST[media_type_name]
if media_type_field:
return media_type_field.pk
return ''
resource_component_id = self.get_resource_component_id(request, resource_id)
structures = {}
resource_component = resourceComponentTypeType_model.objects.get(pk=resource_component_id)
content_info = resource_component.as_subclass()
structures['resourceComponentType'] = content_info
if isinstance(content_info, corpusInfoType_model):
structures['corpusMediaType'] = content_info.corpusMediaType
structures['corpusAudioInfoId'] = get_mediatype_id('corpusAudioInfo', \
content_info.corpusMediaType.corpusAudioInfo)
structures['corpusImageInfoId'] = get_mediatype_id('corpusImageInfo', \
content_info.corpusMediaType.corpusImageInfo)
structures['corpusTextNumericalInfoId'] = get_mediatype_id('corpusTextNumericalInfo', \
content_info.corpusMediaType.corpusTextNumericalInfo)
structures['corpusTextNgramInfoId'] = get_mediatype_id('corpusTextNgramInfo', \
content_info.corpusMediaType.corpusTextNgramInfo)
elif isinstance(content_info, languageDescriptionInfoType_model):
structures['langdescTextInfoId'] = get_mediatype_id('languageDescriptionTextInfo', \
content_info.languageDescriptionMediaType.languageDescriptionTextInfo)
structures['langdescVideoInfoId'] = get_mediatype_id('languageDescriptionVideoInfo', \
content_info.languageDescriptionMediaType.languageDescriptionVideoInfo)
structures['langdescImageInfoId'] = get_mediatype_id('languageDescriptionImageInfo', \
content_info.languageDescriptionMediaType.languageDescriptionImageInfo)
elif isinstance(content_info, lexicalConceptualResourceInfoType_model):
structures['lexiconTextInfoId'] = get_mediatype_id('lexicalConceptualResourceTextInfo', \
content_info.lexicalConceptualResourceMediaType.lexicalConceptualResourceTextInfo)
structures['lexiconAudioInfoId'] = get_mediatype_id('lexicalConceptualResourceAudioInfo', \
content_info.lexicalConceptualResourceMediaType.lexicalConceptualResourceAudioInfo)
structures['lexiconVideoInfoId'] = get_mediatype_id('lexicalConceptualResourceVideoInfo', \
content_info.lexicalConceptualResourceMediaType.lexicalConceptualResourceVideoInfo)
structures['lexiconImageInfoId'] = get_mediatype_id('lexicalConceptualResourceImageInfo', \
content_info.lexicalConceptualResourceMediaType.lexicalConceptualResourceImageInfo)
elif isinstance(content_info, toolServiceInfoType_model):
structures['toolServiceInfoId'] = content_info.pk
else:
raise NotImplementedError, "Cannot deal with '{}' resource types just yet".format(content_info.__class__.__name__)
return structures
def get_resource_component_id(self, request, resource_id=None):
'''
For the given resource (if any) and request, try to get a resource component ID.
'''
if resource_id is not None:
resource = resourceInfoType_model.objects.get(pk=resource_id)
return resource.resourceComponentType.pk
if request.method == 'POST':
return request.POST['resourceComponentId']
return None
def add_user_to_resource_owners(self, request):
'''
Add the current user to the list of owners for the current resource and
the user's `EditorGroup`s to the resource' editor_groups list.
Due to the validation logic of django admin, we add the user/groups to
the form's clean_data object rather than the resource object's m2m
fields; the actual fields will be filled in save_m2m().
'''
# Preconditions:
if not request.user or not request.POST:
return
user_id = str(request.user.pk)
owners = request.POST.getlist('owners')
# Target state already met:
if user_id in owners:
return
# Get UserProfile instance corresponding to the current user.
profile = request.user.get_profile()
# Need to add user to owners and groups to editor_groups
owners.append(user_id)
editor_groups = request.POST.getlist('editor_groups')
editor_groups.extend(EditorGroup.objects \
.filter(name__in=profile.default_editor_groups.values_list('name', flat=True))
.values_list('pk', flat=True))
_post = request.POST.copy()
_post.setlist('owners', owners)
_post.setlist('editor_groups', editor_groups)
request.POST = _post
@method_decorator(permission_required('repository.add_resourceinfotype_model'))
def add_view(self, request, form_url='', extra_context=None):
_extra_context = extra_context or {}
_extra_context.update({'DJANGO_BASE':settings.DJANGO_BASE})
# First, we show the resource type selection view:
if not request.POST:
return self.resource_type_selection_view(request, form_url, extra_context)
# When we get that one back, we create any hidden structures:
_extra_context.update(self.copy_show_media(request.POST))
if 'resourceType' in request.POST:
_structures = self.create_hidden_structures(request)
_extra_context.update(_structures)
request.method = 'GET' # simulate a first call to add/
else:
_structures = self.get_hidden_structures(request)
_extra_context.update(_structures)
# We add the current user to the resource owners:
self.add_user_to_resource_owners(request)
# And in any case, we serve the usual change form if we have a post request
return super(ResourceModelAdmin, self).add_view(request, form_url, _extra_context)
def save_model(self, request, obj, form, change):
super(ResourceModelAdmin, self).save_model(request, obj, form, change)
# update statistics
if hasattr(obj, 'storage_object') and obj.storage_object is not None:
saveLRStats(obj, UPDATE_STAT, request)
def delete_model(self, request, obj):
obj.storage_object.deleted = True
obj.storage_object.save()
# explicitly write metadata XML and storage object to the storage folder
obj.storage_object.update_storage()
# update statistics
saveLRStats(obj, DELETE_STAT, request)
def change_view(self, request, object_id, extra_context=None):
_extra_context = extra_context or {}
_extra_context.update({'DJANGO_BASE':settings.DJANGO_BASE})
_structures = self.get_hidden_structures(request, object_id)
_extra_context.update(_structures)
return super(ResourceModelAdmin, self).change_view(request, object_id, extra_context=_extra_context)
class LicenceForm(forms.ModelForm):
class Meta:
model = licenceInfoType_model
widgets = {'membershipInfo': OneToManyWidget(lookup_class=MembershipDummyLookup)}
class LicenceModelAdmin(SchemaModelAdmin):
form = LicenceForm
|
import os
import sys
import requests
import json
from datetime import datetime, timezone, timedelta
from django.core.management.base import BaseCommand
from api.models import AppealType, AppealStatus, Appeal, Region, Country, DisasterType, Event
from api.fixtures.dtype_map import DISASTER_TYPE_MAPPING
from api.logger import logger
dtype_keys = [a.lower() for a in DISASTER_TYPE_MAPPING.keys()]
dtype_vals = [a.lower() for a in DISASTER_TYPE_MAPPING.values()]
region2country = {'JAK': 'ID', #Jakarta Country Cluster Office: Indonesia
'SAM': 'AR', #South Cone and Brazil Country Cluster Office: Argentina
'TEG': 'HN', #Tegucigalpa Country Cluster Office: Honduras
'AFR': 'KE', #Africa regional office: Kenya
'EAF': 'KE', #Eastern Africa country cluster: Kenya
'WAF': 'NG', #Western Africa country cluster: Nigeria
'CAF': 'CM', #Central Africa country cluster: Cameroon
'SAF': 'ZA', #Southern Africa country cluster: South Africa
'CAM': 'HT', #Latin Caribbean Country Cluster Office: Haiti
'CAR': 'TT', #Caribbean Country Cluster: Trinidad and Tobago
'NAM': 'PA', #Americas regional office: Panama
'AME': 'PA', #Americas regional office: Panama
'ASI': 'MY', #Asia Pacific regional office / New Delhi country cluster: Malaysia
'EEU': 'HU', #Europe Regional Office: Hungary
'EUR': 'HU', #Europe Regional Office: Hungary
'WEU': 'CH', #(Western) Europe regional office: Switzerland
'NAF': 'TN', #MENA regional office / Tunis country cluster: Tunisia
'MEA': 'GE', #MENA Regonal Office / Southern Caucasus country cluster: Georgia
'OCE': 'FJ', #Suva Country Cluster Office: Fiji
'WAF': 'SG', #Sahel country cluster: Senegal
'WRD': 'CH', #IFRC Headquarters: Switzerland
'SAM': 'PE', #Andean Country Cluster Office: Peru
'SEA': 'TH', #Bangkok Country Cluster Office: Thailand
'SAS': 'IN', #Southern Asia Country Cluster Office: India
'EAS': 'CN', #Beijing Country Cluster Office: China
'CAS': 'KZ', #Central Asia country cluster: Kazakhstan
'HK': 'CN', #Hong Kong: China
'TW': 'CN', #Taiwan: China
'XK': 'RS', #Kosovo: Serbia
}
class Command(BaseCommand):
help = 'Add new entries from Access database file'
def parse_date(self, date_string):
timeformat = '%Y-%m-%dT%H:%M:%S'
return datetime.strptime(date_string[:18], timeformat).replace(tzinfo=timezone.utc)
def get_new_or_modified_appeals(self):
use_local_file = True if os.getenv('DJANGO_DB_NAME') == 'test' and os.path.exists('appeals.json') else False
new = []
modified = []
if use_local_file:
# read from static file for development
logger.info('Using local appeals.json file')
with open('appeals.json') as f:
modified = json.loads(f.read())
logger.info('Using local appealbilaterals.json file')
with open('appealbilaterals.json') as f:
records = json.loads(f.read())
bilaterals = {}
for r in records: # code duplication ¤
if r['APP_Code'] and r['AmountCHF']:
if r['APP_Code'] in bilaterals.keys():
bilaterals[r['APP_Code']] += r['AmountCHF']
else:
bilaterals[r['APP_Code']] = r['AmountCHF']
else:
# get latest BILATERALS
logger.info('Querying appeals API for new appeals data')
url = 'http://go-api.ifrc.org/api/appealbilaterals'
auth = (os.getenv('APPEALS_USER'), os.getenv('APPEALS_PASS'))
response = requests.get(url, auth=auth)
if response.status_code != 200:
logger.error('Error querying AppealBilaterals API')
raise Exception('Error querying AppealBilaterals API')
records = response.json()
# write the current record file to local disk
with open('appealbilaterals.json', 'w') as outfile:
json.dump(records, outfile)
bilaterals = {}
for r in records: # code duplication ¤
if r['APP_Code'] and r['AmountCHF']:
if r['APP_Code'] in bilaterals.keys():
bilaterals[r['APP_Code']] += r['AmountCHF']
else:
bilaterals[r['APP_Code']] = r['AmountCHF']
# get latest APPEALS
logger.info('Querying appeals API for new appeals data')
url = 'http://go-api.ifrc.org/api/appeals'
auth = (os.getenv('APPEALS_USER'), os.getenv('APPEALS_PASS'))
response = requests.get(url, auth=auth)
if response.status_code != 200:
logger.error('Error querying Appeals API')
raise Exception('Error querying Appeals API')
records = response.json()
# write the current record file to local disk
with open('appeals.json', 'w') as outfile:
json.dump(records, outfile)
codes = [a.code for a in Appeal.objects.all()]
for r in records:
# Temporary filtering, the manual version should be kept:
if r['APP_code'] in ['MDR65002', 'MDR00001', 'MDR00004']:
continue
#if r['APP_code'] != 'MDRMZ014': # Debug to test bilateral additions or other specific appeals
# continue
if not r['APP_code'] in codes:
new.append(r)
# We use all records, do NOT check if last_modified > since_last_checked
modified.append(r)
return new, modified, bilaterals
def parse_disaster_name(self, dname):
if dname in dtype_keys:
idx = dtype_keys.index(dname)
disaster_name = DISASTER_TYPE_MAPPING[list(DISASTER_TYPE_MAPPING)[idx]]
elif dname in dtype_vals:
idx = dtype_vals.index(dname)
disaster_name = list(DISASTER_TYPE_MAPPING.values())[idx]
else:
disaster_name = 'Other'
dtype = DisasterType.objects.get(name=disaster_name)
return dtype
def parse_country(self, iso_code, country_name):
if iso_code in region2country:
iso_code = region2country[iso_code]
if len(iso_code) == 2:
country = Country.objects.filter(iso=iso_code.lower())
else:
country = Country.objects.filter(name=country_name)
if country.count() == 0:
country = None
#print(iso_code + ' ' + country_name) # Debug: for the "orphan" iso_codes
else:
country = country.first()
return country
def parse_appeal_record(self, r, **options):
# get the disaster type mapping
dname = '' if not r['ADT_name'] else r['ADT_name'].lower()
# sometimes for some reason the string starts with a period
if dname and dname[0] == '.':
dname = dname[1:]
dtype = self.parse_disaster_name(dname)
# get the country mapping
iso_code = r['GEC_code']
country_name = r['OSC_name']
country = self.parse_country(iso_code, country_name)
# get the region mapping, using the country if possible
if country is not None and country.region is not None:
region = Region.objects.get(pk=country.region.pk)
else:
regions = {'africa': 0, 'americas': 1, 'asia pacific': 2, 'europe': 3, 'middle east and north africa': 4}
region_name = r['OSR_name'].lower().strip()
if not region_name in regions:
region = None
else:
region = Region.objects.get(name=regions[region_name])
# get the most recent appeal detail, using the appeal start date
# if there is more than one detail, the start date should be the *earliest
if len(r['Details']) == 1:
detail = r['Details'][0]
start_date = self.parse_date(detail['APD_startDate'])
else:
details = sorted(r['Details'], reverse=True, key=lambda x: self.parse_date(x['APD_startDate']))
detail = details[0]
start_date = self.parse_date(details[-1]['APD_startDate'])
atypes = {66: AppealType.DREF, 64: AppealType.APPEAL, 1537: AppealType.INTL}
atype = atypes[detail['APD_TYP_Id']]
if atype == AppealType.DREF:
# appeals are always fully-funded
amount_funded = detail['APD_amountCHF']
else:
amount_funded = 0 if detail['ContributionAmount'] is None else detail['ContributionAmount']
end_date = self.parse_date(detail['APD_endDate'])
# for new, open appeals, if we have a country, try to guess what emergency it belongs to.
# only consider emergencies within the past 90 days
event = None
if options['is_new_appeal'] and country is not None and end_date > datetime.utcnow().replace(tzinfo=timezone.utc):
six_mos = datetime.utcnow().replace(tzinfo=timezone.utc) - timedelta(days=90)
event = Event.objects.exclude(created_at__lt=six_mos).filter(countries__in=[country]).filter(dtype=dtype).order_by('-created_at')
if event.count():
event = event.first()
else:
event = None
fields = {
'aid': r['APP_Id'],
'name': r['APP_name'],
'dtype': dtype,
'atype': atype,
'country': country,
'region': region,
'sector': r['OSS_name'],
'code': r['APP_code'],
'status': {'Active': 0, 'Closed': 1, 'Frozen': 2, 'Archived': 3}[r['APP_status']],
'start_date': start_date,
'end_date': end_date,
'num_beneficiaries': detail['APD_noBeneficiaries'],
'amount_requested': detail['APD_amountCHF'],
'amount_funded': amount_funded,
}
if event is not None:
fields['event'] = event
fields['needs_confirmation'] = True
return fields
def handle(self, *args, **options):
logger.info('Starting appeals ingest')
new, modified, bilaterals = self.get_new_or_modified_appeals()
logger.info('%s current appeals' % Appeal.objects.all().count())
logger.info('Creating %s new appeals' % len(new))
logger.info('Updating %s existing appeals that have been modified' % len(modified))
num_created = 0
for i, r in enumerate(new):
fields = self.parse_appeal_record(r, is_new_appeal=True)
if fields['code'] in bilaterals: # correction of the appeal record due to appealbilaterals api
fields['amount_funded'] += round(bilaterals[fields['code']],1)
try:
Appeal.objects.create(**fields)
except Exception as e:
logger.error(str(e)[:100])
logger.error('Could not create appeal with code %s' % fields['code'])
continue
num_created = num_created + 1
num_updated = 0
for i, r in enumerate(modified):
fields = self.parse_appeal_record(r, is_new_appeal=False)
if fields['code'] in bilaterals: # correction of the appeal record due to appealbilaterals api
fields['amount_funded'] += round(bilaterals[fields['code']],1)
try:
appeal, created = Appeal.objects.update_or_create(code=fields['code'], defaults=fields)
except Exception as e:
logger.error(str(e)[:100])
logger.error('Could not update appeal with code %s' % fields['code'])
continue
num_updated = num_updated + 1
logger.info('%s appeals created' % num_created)
logger.info('%s appeals updated' % num_updated)
logger.info('%s total appeals' % Appeal.objects.all().count())
logger.info('Appeals ingest completed')
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tfgan.python.features.clip_weights."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib.gan.python.features.python import clip_weights_impl as clip_weights
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import training
class ClipWeightsTest(test.TestCase):
"""Tests for `discriminator_weight_clip`."""
def setUp(self):
self.variables = [variables.Variable(2.0)]
self.tuple = collections.namedtuple(
'VarTuple', ['discriminator_variables'])(self.variables)
def _test_weight_clipping_helper(self, use_tuple):
loss = self.variables[0] * 2.0
opt = training.GradientDescentOptimizer(1.0)
if use_tuple:
opt_clip = clip_weights.weight_clip(opt, self.variables, 0.1)
else:
opt_clip = clip_weights.discriminator_weight_clip(opt, self.tuple, 0.1)
train_op1 = opt.minimize(loss, var_list=self.variables)
train_op2 = opt_clip.minimize(loss, var_list=self.variables)
with self.test_session(use_gpu=True) as sess:
sess.run(variables.global_variables_initializer())
self.assertEqual(2.0, self.variables[0].eval())
sess.run(train_op1)
self.assertLess(0.1, self.variables[0].eval())
with self.test_session(use_gpu=True) as sess:
sess.run(variables.global_variables_initializer())
self.assertEqual(2.0, self.variables[0].eval())
sess.run(train_op2)
self.assertNear(0.1, self.variables[0].eval(), 1e-7)
def test_weight_clipping_argsonly(self):
self._test_weight_clipping_helper(False)
def test_weight_clipping_ganmodel(self):
self._test_weight_clipping_helper(True)
def _test_incorrect_weight_clip_value_helper(self, use_tuple):
opt = training.GradientDescentOptimizer(1.0)
if use_tuple:
with self.assertRaisesRegexp(ValueError, 'must be positive'):
clip_weights.clip_discriminator_weights(opt, self.tuple, weight_clip=-1)
else:
with self.assertRaisesRegexp(ValueError, 'must be positive'):
clip_weights.clip_weights(opt, self.variables, weight_clip=-1)
def test_incorrect_weight_clip_value_argsonly(self):
self._test_incorrect_weight_clip_value_helper(False)
def test_incorrect_weight_clip_value_tuple(self):
self._test_incorrect_weight_clip_value_helper(True)
|
class Solution:
def maxProfit(self, prices: List[int]) -> int:
maximumProfit = 0
for i in range(1, len(prices)):
if prices[i] > prices[i - 1]:
maximumProfit += (prices[i] - prices[i - 1])
return maximumProfit
|
#!/usr/bin/env python
__all__ = ['miaopai_download']
from ..common import *
import urllib.error
def miaopai_download(url, output_dir = '.', merge = False, info_only = False, **kwargs):
'''Source: Android mobile'''
if re.match(r'http://video.weibo.com/show\?fid=(\d{4}:\w{32})\w*', url):
fake_headers_mobile = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'UTF-8,*;q=0.5',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'en-US,en;q=0.8',
'User-Agent': 'Mozilla/5.0 (Linux; Android 4.4.2; Nexus 4 Build/KOT49H) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.114 Mobile Safari/537.36'
}
webpage_url = re.search(r'(http://video.weibo.com/show\?fid=\d{4}:\w{32})\w*', url).group(1) + '&type=mp4' #mobile
#grab download URL
a = get_content(webpage_url, headers= fake_headers_mobile , decoded=True)
url = match1(a, r'<video src="(.*?)\"\W')
#grab title
b = get_content(webpage_url) #normal
title = match1(b, r'<meta name="description" content="(.*?)\"\W')
type_, ext, size = url_info(url)
print_info(site_info, title, type_, size)
if not info_only:
download_urls([url], title, ext, total_size=None, output_dir=output_dir, merge=merge)
site_info = "miaopai"
download = miaopai_download
download_playlist = playlist_not_supported('miaopai')
|
from ctypes import c_void_p
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.gdal.prototypes import ds as vcapi, raster as rcapi
from django.utils import six
from django.utils.encoding import force_bytes, force_text
class Driver(GDALBase):
"""
Wraps a GDAL/OGR Data Source Driver.
For more information, see the C API source code:
http://www.gdal.org/gdal_8h.html - http://www.gdal.org/ogr__api_8h.html
"""
# Case-insensitive aliases for some GDAL/OGR Drivers.
# For a complete list of original driver names see
# http://www.gdal.org/ogr_formats.html (vector)
# http://www.gdal.org/formats_list.html (raster)
_alias = {
# vector
'esri': 'ESRI Shapefile',
'shp': 'ESRI Shapefile',
'shape': 'ESRI Shapefile',
'tiger': 'TIGER',
'tiger/line': 'TIGER',
# raster
'tiff': 'GTiff',
'tif': 'GTiff',
'jpeg': 'JPEG',
'jpg': 'JPEG',
}
def __init__(self, dr_input):
"""
Initializes an GDAL/OGR driver on either a string or integer input.
"""
if isinstance(dr_input, six.string_types):
# If a string name of the driver was passed in
self.ensure_registered()
# Checking the alias dictionary (case-insensitive) to see if an
# alias exists for the given driver.
if dr_input.lower() in self._alias:
name = self._alias[dr_input.lower()]
else:
name = dr_input
# Attempting to get the GDAL/OGR driver by the string name.
for iface in (vcapi, rcapi):
driver = iface.get_driver_by_name(force_bytes(name))
if driver:
break
elif isinstance(dr_input, int):
self.ensure_registered()
for iface in (vcapi, rcapi):
driver = iface.get_driver(dr_input)
if driver:
break
elif isinstance(dr_input, c_void_p):
driver = dr_input
else:
raise GDALException('Unrecognized input type for GDAL/OGR Driver: %s' % str(type(dr_input)))
# Making sure we get a valid pointer to the OGR Driver
if not driver:
raise GDALException('Could not initialize GDAL/OGR Driver on input: %s' % str(dr_input))
self.ptr = driver
def __str__(self):
return self.name
@classmethod
def ensure_registered(cls):
"""
Attempts to register all the data source drivers.
"""
# Only register all if the driver counts are 0 (or else all drivers
# will be registered over and over again)
if not vcapi.get_driver_count():
vcapi.register_all()
if not rcapi.get_driver_count():
rcapi.register_all()
@classmethod
def driver_count(cls):
"""
Returns the number of GDAL/OGR data source drivers registered.
"""
return vcapi.get_driver_count() + rcapi.get_driver_count()
@property
def name(self):
"""
Returns description/name string for this driver.
"""
return force_text(rcapi.get_driver_description(self.ptr))
|
import arcpy
from arcpy import env
env.overwriteOutput = True
env.workspace = "C:/Temp"
fcs = arcpy.ListFeatureClasses("","point")
if arcpy.Exists("roads.shp"):
for buff in fcs:
arcpy.Buffer_analysis (buff, "Results\Buffer" + buff, "0.25 MILES")
else:
print "the file does not exists"
|
# --------------------------------------------------------------------
###
# Supercell class:
# Methods
# get_data(): reads in rmc6f file from the set file path
# orthonormalise_cell(): converts atomic coordinates to an orthonormal basis
###
# --------------------------------------------------------------------
import re
import numpy as np
class SuperCell():
def __init__(self, file_path):
self.file_path = file_path
self.cell_parameters = None
self.elements = None
self.atom_list = None
self.supercell_size = None
self.density = None
self.volume = None
self.matrix = None
self.orth_header = None
self.orth_labels = None
self.orth_positions = None
self.orth_pos_lbl = None
def get_data(self):
with open(self.file_path, 'r') as f:
rmc_data = f.readlines()
atom_list_lines = []
# Loop through file to find matching strings
for line in rmc_data:
# Elements
if line.find('Atom types present:') >= 0:
line_elements = line
# Number densty
if line.find('Number density') >= 0:
line_density = line
# Supercell dimensions
if line.find('Supercell') >= 0:
line_supercell_size = line
# Supercell parameters
if line.find('Cell') >= 0:
line_cell = line
# Atom list
if line.find('[1]') >= 0:
atom_list_lines.append(line)
# Put element, atom no. and atomic positions etc. into lists
# Elements
key = 'Atom types present:'
lead_str, keyword, elements_str = line_elements.partition(key)
elements_str = elements_str.strip()
elements = re.sub("[^\w]", " ", elements_str).split()
# Density
temp = re.findall('\d+\.\d+', line_density)
density = [float(i) for i in temp]
# supercell dimensions
temp = re.findall('[-+]?\d*\.\d+|\d+', line_supercell_size)
supercell_size = [int(i) for i in temp]
# supercell parameters: a, b, c, alpha, beta, gamma
temp = re.findall('[-+]?\d*\.\d+|\d+', line_cell)
cell_parameters = [float(i) for i in temp]
# Create atom list of lists, format:
# [element, number, position x, position y, position z]
atom_list = []
for line in atom_list_lines:
temp_list = []
split = line.split()
element = split[1]
number = int(split[0])
pos_x = float(split[3])
pos_y = float(split[4])
pos_z = float(split[5])
temp_list.append(element)
temp_list.append(number)
temp_list.append(pos_x)
temp_list.append(pos_y)
temp_list.append(pos_z)
atom_list.append(temp_list)
self.cell_parameters = cell_parameters
self.elements = elements
self.atom_list = atom_list
self.supercell_size = supercell_size
self.density = density
def orthonormalise_cell(self):
"""
Orthonormalisation of a set of 3D coordinates.
Coordinates taken from <atom_list>
Original basis taken grom <cell_parameters>
"""
######################################################
# Initialise transformation matrix M
###
a = self.cell_parameters[0]
b = self.cell_parameters[1]
c = self.cell_parameters[2]
al = np.deg2rad(self.cell_parameters[3])
be = np.deg2rad(self.cell_parameters[4])
ga = np.deg2rad(self.cell_parameters[5])
volume = (a * b * c *
(1 - np.cos(al)**2
- np.cos(be)**2
- np.cos(ga)**2
+ 2 * np.cos(al)
* np.cos(be)
* np.cos(ga))
** 0.5)
a1 = a
a2 = 0
a3 = 0
b1 = b * np.cos(ga)
b2 = b * np.sin(ga)
b3 = 0
c1 = c * np.cos(be)
c2 = c * (np.cos(al) - (np.cos(be) *
np.cos(ga))) / np.sin(ga)
c3 = volume / (a * b * np.sin(ga))
M = np.array([[a1, b1, c1], [a2, b2, c2], [a3, b3, c3]])
self.supercell_volume = volume
self.average_cell_volume = volume / np.prod(self.supercell_size)
self.matrix = M
###
###
######################################################
######################################################
# Orthonormalisation calculaion
###
atom_positions = np.array(self.atom_list)[:, 2:5]
# Make dtype float64
atom_positions = np.float64(atom_positions)
orth_positions = np.dot(M, atom_positions.T).T
# Round to make very small values (e.g. ~10e-16), zero.
orth_positions = np.around(orth_positions, 8)
self.orth_positions = orth_positions
self.orth_header = ['Element', 'ID', 'x', 'y', 'z']
self.orth_labels = np.array(self.atom_list)[:, :2].tolist()
lbl_list = self.orth_labels
orth_list = self.orth_positions.tolist()
opl = [lbl + pos for lbl, pos in zip(lbl_list, orth_list)]
self.orth_pos_lbl = opl
###
###
######################################################
|
from rest_framework.serializers import ModelSerializer
from rest_framework.fields import SerializerMethodField
from .models import PlayerClanRule, PlayerClanRuleGoal
class PlayerClanRuleSerializer(ModelSerializer):
description = SerializerMethodField()
filtered_column_type = SerializerMethodField()
def get_description(self, obj):
return obj.humanize()
def get_filtered_column_type(self, obj):
return obj.filtered_column_type
class Meta:
model = PlayerClanRule
fields = (
'goal',
'field',
'operator',
'value',
'value_bound',
'value_type',
'predicate',
'is_promoting_rule',
'description',
'filtered_column_type',
)
class PlayerClanRuleGoalSerializer(ModelSerializer):
class Meta:
model = PlayerClanRuleGoal
fields = ('id', 'name', 'description', 'applies_to')
|
#
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''GoogLeNet with PyTorch.'''
import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ['googlenet_cinic']
NUM_CLASSES = 10
class Inception(nn.Module):
def __init__(self, in_planes, n1x1, n3x3red, n3x3, n5x5red, n5x5, pool_planes):
super(Inception, self).__init__()
# 1x1 conv branch
self.b1 = nn.Sequential(
nn.Conv2d(in_planes, n1x1, kernel_size=1),
nn.BatchNorm2d(n1x1),
nn.ReLU(True),
)
# 1x1 conv -> 3x3 conv branch
self.b2 = nn.Sequential(
nn.Conv2d(in_planes, n3x3red, kernel_size=1),
nn.BatchNorm2d(n3x3red),
nn.ReLU(True),
nn.Conv2d(n3x3red, n3x3, kernel_size=3, padding=1),
nn.BatchNorm2d(n3x3),
nn.ReLU(True),
)
# 1x1 conv -> 5x5 conv branch
self.b3 = nn.Sequential(
nn.Conv2d(in_planes, n5x5red, kernel_size=1),
nn.BatchNorm2d(n5x5red),
nn.ReLU(True),
nn.Conv2d(n5x5red, n5x5, kernel_size=3, padding=1),
nn.BatchNorm2d(n5x5),
nn.ReLU(True),
nn.Conv2d(n5x5, n5x5, kernel_size=3, padding=1),
nn.BatchNorm2d(n5x5),
nn.ReLU(True),
)
# 3x3 pool -> 1x1 conv branch
self.b4 = nn.Sequential(
nn.MaxPool2d(3, stride=1, padding=1),
nn.Conv2d(in_planes, pool_planes, kernel_size=1),
nn.BatchNorm2d(pool_planes),
nn.ReLU(True),
)
def forward(self, x):
y1 = self.b1(x)
y2 = self.b2(x)
y3 = self.b3(x)
y4 = self.b4(x)
return torch.cat([y1,y2,y3,y4], 1)
class GoogLeNet(nn.Module):
def __init__(self):
super(GoogLeNet, self).__init__()
self.pre_layers = nn.Sequential(
nn.Conv2d(3, 192, kernel_size=3, padding=1),
nn.BatchNorm2d(192),
nn.ReLU(True),
)
self.a3 = Inception(192, 64, 96, 128, 16, 32, 32)
self.b3 = Inception(256, 128, 128, 192, 32, 96, 64)
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.a4 = Inception(480, 192, 96, 208, 16, 48, 64)
self.b4 = Inception(512, 160, 112, 224, 24, 64, 64)
self.c4 = Inception(512, 128, 128, 256, 24, 64, 64)
self.d4 = Inception(512, 112, 144, 288, 32, 64, 64)
self.e4 = Inception(528, 256, 160, 320, 32, 128, 128)
self.a5 = Inception(832, 256, 160, 320, 32, 128, 128)
self.b5 = Inception(832, 384, 192, 384, 48, 128, 128)
self.avgpool = nn.AvgPool2d(8, stride=1)
self.linear = nn.Linear(1024, 10)
def forward(self, x):
out = self.pre_layers(x)
out = self.a3(out)
out = self.b3(out)
out = self.maxpool(out)
out = self.a4(out)
out = self.b4(out)
out = self.c4(out)
out = self.d4(out)
out = self.e4(out)
out = self.maxpool(out)
out = self.a5(out)
out = self.b5(out)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def googlenet_cinic(**kwargs):
return GoogLeNet()
|
"""Phased LSTM implementation based on the version in tensorflow contrib.
See: https://github.com/tensorflow/tensorflow/blob/r1.15/tensorflow/contrib/rnn/python/ops/rnn_cell.py#L1915-L2064
Due to restructurings in tensorflow some adaptions were required. This
implementation does not use global naming of variables and thus is compatible
with the new keras style paradime.
"""
from collections.abc import Sequence
from collections import namedtuple
import tensorflow as tf
from tensorflow.keras.layers import Dense
from tensorflow.nn.rnn_cell import LSTMStateTuple
from .delta_t_utils import get_delta_t
PhasedLSTMInput = namedtuple('PhasedLSTMInput', ['times', 'x'])
def _random_exp_initializer(minval, maxval, seed=None, dtype=tf.float32):
"""Return an exponential distribution initializer.
Args:
minval: float or a scalar float Tensor. With value > 0. Lower bound of the
range of random values to generate.
maxval: float or a scalar float Tensor. With value > minval. Upper bound of
the range of random values to generate.
seed: An integer. Used to create random seeds.
dtype: The data type.
Returns:
An initializer that generates tensors with an exponential distribution.
"""
def _initializer(shape, dtype=dtype, partition_info=None):
del partition_info # Unused.
return tf.math.exp(tf.random.uniform(
shape, tf.math.log(minval), tf.math.log(maxval), dtype, seed=seed))
return _initializer
class PhasedLSTMCell(tf.keras.layers.Layer):
"""Phased LSTM recurrent network cell.
https://arxiv.org/pdf/1610.09513v1.pdf
"""
def __init__(self, num_units, use_peepholes=False, leak=0.001,
ratio_on=0.1, trainable_ratio_on=True, period_init_min=0.5,
period_init_max=1000.0):
"""Initialize the Phased LSTM cell.
Args:
num_units: int, The number of units in the Phased LSTM cell.
use_peepholes: bool, set True to enable peephole connections.
leak: float or scalar float Tensor with value in [0, 1]. Leak applied
during training.
ratio_on: float or scalar float Tensor with value in [0, 1]. Ratio of
the period during which the gates are open.
trainable_ratio_on: bool, weather ratio_on is trainable.
period_init_min: float or scalar float Tensor. With value > 0.
Minimum value of the initialized period.
The period values are initialized by drawing from the
distribution: e^U(log(period_init_min), log(period_init_max))
Where U(.,.) is the uniform distribution.
period_init_max: float or scalar float Tensor.
With value > period_init_min. Maximum value of the initialized
period.
"""
super().__init__()
self._num_units = num_units
self._use_peepholes = use_peepholes
self._leak = leak
self._ratio_on = ratio_on
self._trainable_ratio_on = trainable_ratio_on
self._period_init_min = period_init_min
self._period_init_max = period_init_max
self.linear1 = Dense(
2 * self._num_units, use_bias=True, activation='sigmoid',
name='MaskGates')
self.linear2 = Dense(
self._num_units, use_bias=True, activation='tanh')
self.linear3 = Dense(
self._num_units, use_bias=True, activation='sigmoid')
self.period = self.add_weight(
'period', shape=[self._num_units],
initializer=_random_exp_initializer(
self._period_init_min, self._period_init_max))
self.phase = self.add_weight(
'phase', shape=[self._num_units],
initializer=tf.compat.v1.initializers.random_uniform(0., self.period.initial_value))
self.ratio_on = self.add_weight(
"ratio_on", [self._num_units],
initializer=tf.compat.v1.constant_initializer(self._ratio_on),
trainable=self._trainable_ratio_on)
def build(self, input_shapes):
time_shape, x_shape = input_shapes.times, input_shapes.x
x_dim = x_shape[-1]
if self._use_peepholes:
mask_gate_and_ouput_gate_dims = 2 * self._num_units + x_dim
else:
mask_gate_and_ouput_gate_dims = self._num_units + x_dim
self.linear1.build((time_shape[0], mask_gate_and_ouput_gate_dims))
self.linear2.build((time_shape[0], self._num_units + x_dim))
self.linear3.build((time_shape[0], mask_gate_and_ouput_gate_dims))
super().build(input_shapes)
@property
def state_size(self):
return tf.compat.v1.nn.rnn_cell.LSTMStateTuple(self._num_units, self._num_units)
@property
def output_size(self):
return self._num_units
def _mod(self, x, y):
"""Modulo function that propagates x gradients."""
return tf.stop_gradient(tf.math.mod(x, y) - x) + x
def _get_cycle_ratio(self, time):
"""Compute the cycle ratio in the dtype of the time."""
phase = tf.cast(self.phase, dtype=time.dtype)
period = tf.cast(self.period, dtype=time.dtype)
shifted_time = time - phase
cycle_ratio = self._mod(shifted_time, period) / period
return tf.cast(cycle_ratio, dtype=tf.float32)
def call(self, inputs, state):
"""Phased LSTM Cell.
Args:
inputs: A tuple of 2 Tensor.
The first Tensor has shape [batch, 1], and type float32 or float64.
It stores the time.
The second Tensor has shape [batch, features_size], and type float32.
It stores the features.
state: rnn_cell_impl.LSTMStateTuple, state from previous timestep.
Returns:
A tuple containing:
- A Tensor of float32, and shape [batch_size, num_units], representing the
output of the cell.
- A rnn_cell_impl.LSTMStateTuple, containing 2 Tensors of float32, shape
[batch_size, num_units], representing the new state and the output.
"""
(c_prev, h_prev) = state
time, x = inputs.times, inputs.x
if self._use_peepholes:
input_mask_and_output_gate = tf.concat(
[x, h_prev, c_prev], axis=-1)
else:
input_mask_and_output_gate = tf.concat([x, h_prev], axis=-1)
mask_gates = self.linear1(input_mask_and_output_gate)
input_gate, forget_gate = tf.split(
mask_gates, axis=1, num_or_size_splits=2)
new_input = self.linear2(tf.concat([x, h_prev], axis=-1))
new_c = (c_prev * forget_gate + input_gate * new_input)
output_gate = self.linear3(input_mask_and_output_gate)
new_h = tf.tanh(new_c) * output_gate
cycle_ratio = self._get_cycle_ratio(time)
k_up = 2 * cycle_ratio / self.ratio_on
k_down = 2 - k_up
k_closed = self._leak * cycle_ratio
k = tf.compat.v1.where(cycle_ratio < self.ratio_on, k_down, k_closed)
k = tf.compat.v1.where(cycle_ratio < 0.5 * self.ratio_on, k_up, k)
new_c = k * new_c + (1 - k) * c_prev
new_h = k * new_h + (1 - k) * h_prev
new_state = tf.compat.v1.nn.rnn_cell.LSTMStateTuple(new_c, new_h)
return new_h, new_state
class PhasedLSTMModel(tf.keras.Model):
def __init__(self, output_activation, output_dims, n_units, use_peepholes,
leak, period_init_max):
self._config = {
name: val for name, val in locals().items()
if name not in ['self', '__class__']
}
super().__init__()
self.demo_encoder = tf.keras.Sequential(
[
tf.keras.layers.Dense(n_units, activation='relu'),
tf.keras.layers.Dense(2*n_units)
],
name='demo_encoder'
)
if isinstance(output_dims, Sequence):
# We have an online prediction scenario
assert output_dims[0] is None
self.return_sequences = True
output_dims = output_dims[1]
else:
self.return_sequences = False
self.rnn = tf.keras.layers.RNN(
PhasedLSTMCell(
n_units, use_peepholes=use_peepholes,
leak=leak, period_init_max=period_init_max
),
return_sequences=self.return_sequences
)
self.output_layer = tf.keras.layers.Dense(
output_dims, activation=output_activation)
def call(self, inputs):
demo, times, values, measurements, dt, lengths = inputs
demo_encoded = self.demo_encoder(demo)
initial_state = LSTMStateTuple(*tf.split(demo_encoded, 2, axis=-1))
values = tf.concat(
(values, tf.cast(measurements, tf.float32), dt), axis=-1)
mask = tf.sequence_mask(tf.squeeze(lengths, axis=-1), name='mask')
out = self.rnn(
PhasedLSTMInput(times=times, x=values),
mask=mask,
initial_state=initial_state
)
return self.output_layer(out)
def data_preprocessing_fn(self):
def add_delta_t_tensor(ts, label):
demo, times, values, measurement_indicators, length = ts
times = tf.expand_dims(times, -1)
dt = get_delta_t(times, values, measurement_indicators)
return (
(demo, times, values, measurement_indicators, dt, length),
label
)
return add_delta_t_tensor
@classmethod
def get_hyperparameters(cls):
import tensorboard.plugins.hparams.api as hp
from ..training_utils import HParamWithDefault
return [
HParamWithDefault(
'n_units', hp.Discrete([32, 64, 128, 256, 512, 1024]),
default=32),
HParamWithDefault(
'use_peepholes', hp.Discrete([True, False]), default=False),
HParamWithDefault(
'leak', hp.Discrete([0.001, 0.005, 0.01]), default=0.001),
HParamWithDefault(
'period_init_max', hp.Discrete([10., 100., 1000.]),
default=1000.)
]
@classmethod
def from_hyperparameter_dict(cls, task, hparams):
return cls(
output_activation=task.output_activation,
output_dims=task.n_outputs,
n_units=hparams['n_units'],
use_peepholes=hparams['use_peepholes'],
leak=hparams['leak'],
period_init_max=hparams['period_init_max']
)
@classmethod
def from_config(cls, config):
return cls(**config)
def get_config(self):
return self._config
|
# (c) 2019–2020, Ansible by Red Hat
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Utils related to inline skipping of rules."""
import logging
from functools import lru_cache
from itertools import product
from typing import TYPE_CHECKING, Any, Generator, List, Sequence
import ruamel.yaml
from ansiblelint.config import used_old_tags
from ansiblelint.constants import RENAMED_TAGS
from ansiblelint.file_utils import Lintable
if TYPE_CHECKING:
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
_logger = logging.getLogger(__name__)
# playbook: Sequence currently expects only instances of one of the two
# classes below but we should consider avoiding this chimera.
# ruamel.yaml.comments.CommentedSeq
# ansible.parsing.yaml.objects.AnsibleSequence
def get_rule_skips_from_line(line: str) -> List:
"""Return list of rule ids skipped via comment on the line of yaml."""
_before_noqa, _noqa_marker, noqa_text = line.partition("# noqa")
noqa_text = noqa_text.lstrip(" :")
return noqa_text.split()
def append_skipped_rules(
pyyaml_data: "AnsibleBaseYAMLObject", lintable: Lintable
) -> Sequence:
"""Append 'skipped_rules' to individual tasks or single metadata block.
For a file, uses 2nd parser (ruamel.yaml) to pull comments out of
yaml subsets, check for '# noqa' skipped rules, and append any skips to the
original parser (pyyaml) data relied on by remainder of ansible-lint.
:param pyyaml_data: file text parsed via ansible and pyyaml.
:param file_text: raw file text.
:param file_type: type of file: tasks, handlers or meta.
:returns: original pyyaml_data altered with a 'skipped_rules' list added
to individual tasks, or added to the single metadata block.
"""
try:
yaml_skip = _append_skipped_rules(pyyaml_data, lintable)
except RuntimeError:
# Notify user of skip error, do not stop, do not change exit code
_logger.error('Error trying to append skipped rules', exc_info=True)
return pyyaml_data
return yaml_skip
@lru_cache(maxsize=128)
def load_data(file_text: str) -> Any:
"""Parse `file_text` as yaml and return parsed structure.
This is the main culprit for slow performance, each rule asks for loading yaml again and again
ideally the `maxsize` on the decorator above MUST be great or equal total number of rules
:param file_text: raw text to parse
:return: Parsed yaml
"""
yaml = ruamel.yaml.YAML()
return yaml.load(file_text)
def _append_skipped_rules(pyyaml_data: Sequence[Any], lintable: Lintable) -> Sequence:
# parse file text using 2nd parser library
ruamel_data = load_data(lintable.content)
if lintable.kind == 'meta':
pyyaml_data[0]['skipped_rules'] = _get_rule_skips_from_yaml(ruamel_data)
return pyyaml_data
# create list of blocks of tasks or nested tasks
if lintable.kind in ('tasks', 'handlers'):
ruamel_task_blocks = ruamel_data
pyyaml_task_blocks = pyyaml_data
elif lintable.kind == 'playbook':
try:
pyyaml_task_blocks = _get_task_blocks_from_playbook(pyyaml_data)
ruamel_task_blocks = _get_task_blocks_from_playbook(ruamel_data)
except (AttributeError, TypeError):
# TODO(awcrosby): running ansible-lint on any .yml file will
# assume it is a playbook, check needs to be added higher in the
# call stack, and can remove this except
return pyyaml_data
elif lintable.kind in ['yaml', 'requirements', 'vars', 'meta', 'reno']:
return pyyaml_data
else:
# For unsupported file types, we return empty skip lists
return []
# get tasks from blocks of tasks
pyyaml_tasks = _get_tasks_from_blocks(pyyaml_task_blocks)
ruamel_tasks = _get_tasks_from_blocks(ruamel_task_blocks)
# append skipped_rules for each task
for ruamel_task, pyyaml_task in zip(ruamel_tasks, pyyaml_tasks):
# ignore empty tasks
if not pyyaml_task and not ruamel_task:
continue
if pyyaml_task.get('name') != ruamel_task.get('name'):
raise RuntimeError('Error in matching skip comment to a task')
pyyaml_task['skipped_rules'] = _get_rule_skips_from_yaml(ruamel_task)
return pyyaml_data
def _get_task_blocks_from_playbook(playbook: Sequence) -> List:
"""Return parts of playbook that contains tasks, and nested tasks.
:param playbook: playbook yaml from yaml parser.
:returns: list of task dictionaries.
"""
PLAYBOOK_TASK_KEYWORDS = [
'tasks',
'pre_tasks',
'post_tasks',
'handlers',
]
task_blocks = []
for play, key in product(playbook, PLAYBOOK_TASK_KEYWORDS):
task_blocks.extend(play.get(key, []))
return task_blocks
def _get_tasks_from_blocks(task_blocks: Sequence) -> Generator:
"""Get list of tasks from list made of tasks and nested tasks."""
NESTED_TASK_KEYS = [
'block',
'always',
'rescue',
]
def get_nested_tasks(task: Any) -> Generator[Any, None, None]:
for k in NESTED_TASK_KEYS:
if task and k in task and task[k]:
for subtask in task[k]:
yield subtask
for task in task_blocks:
for sub_task in get_nested_tasks(task):
yield sub_task
yield task
def _get_rule_skips_from_yaml(yaml_input: Sequence) -> Sequence:
"""Traverse yaml for comments with rule skips and return list of rules."""
yaml_comment_obj_strs = []
def traverse_yaml(obj: Any) -> None:
yaml_comment_obj_strs.append(str(obj.ca.items))
if isinstance(obj, dict):
for key, val in obj.items():
if isinstance(val, (dict, list)):
traverse_yaml(val)
elif isinstance(obj, list):
for e in obj:
if isinstance(e, (dict, list)):
traverse_yaml(e)
else:
return
traverse_yaml(yaml_input)
rule_id_list = []
for comment_obj_str in yaml_comment_obj_strs:
for line in comment_obj_str.split(r'\n'):
rule_id_list.extend(get_rule_skips_from_line(line))
return [normalize_tag(tag) for tag in rule_id_list]
def normalize_tag(tag: str) -> str:
"""Return current name of tag."""
if tag in RENAMED_TAGS:
used_old_tags[tag] = RENAMED_TAGS[tag]
return RENAMED_TAGS[tag]
return tag
|
"""Google Photo API abstraction module"""
import json
import logging
import os.path
import time
from io import open
from datetime import date, datetime, timedelta
import requests
import six
logger = logging.getLogger(__name__)
AUTH_URL = "https://accounts.google.com/o/oauth2/auth"
CLIENT_ID = "834388343680-embh8gpuiavu35801g2564sfrkir3rfb.apps.googleusercontent.com"
CLIENT_SECRET = "jMX0btH5hLlfJgxXF6-bUgf6"
REDIRECT_URI = "urn:ietf:wg:oauth:2.0:oob"
TOKEN_URI = "https://accounts.google.com/o/oauth2/token"
URL_PHOTOS = "https://photoslibrary.googleapis.com/v1/mediaItems"
URL_ALBUMS = "https://photoslibrary.googleapis.com/v1/albums"
AUTH_SCOPE = "https://www.googleapis.com/auth/photoslibrary"
class GPhoto:
"""Implement the Google Photo Library API"""
def __init__(self, token_cache=".cache"):
self.token_cache = token_cache
self.token = None
self.refresh_token = None
if not self._refresh_token():
# If we don't have a cached token - get an authorization
url = "{}?client_id={}&redirect_uri={}&scope={}&response_type=code".format(
AUTH_URL, CLIENT_ID, REDIRECT_URI, AUTH_SCOPE
)
code = six.moves.input("URL: {0}\nPaste authorization code: ".format(url))
token_json = requests.post(
TOKEN_URI,
data={
"code": code,
"client_id": CLIENT_ID,
"client_secret": CLIENT_SECRET,
"redirect_uri": "urn:ietf:wg:oauth:2.0:oob",
"grant_type": "authorization_code",
},
).json()
self.token = token_json["access_token"]
self.refresh_token = token_json["refresh_token"]
self._write_refresh_token()
self.headers = {"Authorization": "Bearer {}".format(self.token)}
def _refresh_token(self):
self._read_refresh_token()
if not self.refresh_token:
return False
token_json = requests.post(
TOKEN_URI,
data={
"refresh_token": self.refresh_token,
"client_id": CLIENT_ID,
"client_secret": CLIENT_SECRET,
"grant_type": "refresh_token",
},
).json()
self.token = token_json["access_token"]
return True
def _read_refresh_token(self):
if not os.path.exists(self.token_cache):
return False
cache = {}
with open(self.token_cache, "rb") as cache_file:
try:
cache = json.loads(cache_file.read().decode("utf8"))
except json.JSONDecodeError:
pass
self.refresh_token = cache.get("gphoto_refresh_token", None)
return True
def _write_refresh_token(self):
cache = {}
if os.path.exists(self.token_cache):
with open(self.token_cache, "rb") as cache_file:
try:
cache = json.loads(cache_file.read().decode("utf8"))
except json.JSONDecodeError:
pass
cache["gphoto_refresh_token"] = self.refresh_token
with open(self.token_cache, "wb") as cache_file:
cache_file.write(json.dumps(cache).encode("utf8"))
def _extract_albums(self, data):
albums = []
logger.info("Received data about %i albums", len(data.get("albums", [])))
for entry in data.get("albums", []):
logger.debug("Processing: %s", entry)
album = {}
album["name"] = entry.get("title", entry["id"])
album["id"] = entry["id"]
album["user_url"] = entry["productUrl"]
album["count"] = int(entry.get("mediaItemsCount", 0))
albums.append(album)
return albums
def get_albums(self):
logger.info("Retrieving album list")
payload = {"pageSize": 50}
data = self._load_new_data(URL_ALBUMS, "get", payload)
albums = self._extract_albums(data)
while "nextPageToken" in data:
payload["page_token"] = data["nextPageToken"]
data = self._load_new_data(URL_ALBUMS, "get", payload)
albums.extend(self._extract_albums(data))
logger.info(
"Retrieving album list - done: found %i albums with %i items",
len(albums),
sum([x["count"] for x in albums]),
)
return albums
def _load_new_data(self, url, method, payload):
if method == "get":
response = requests.get(url, params=payload, headers=self.headers)
elif method == "post":
response = requests.post(url, data=json.dumps(payload), headers=self.headers)
if response.status_code != 200:
logger.error("Failed call to '%s' on '%s' with payload '%s'", method, url, payload)
response.raise_for_status()
feed = response.text.encode("utf8")
return json.loads(feed)
def _extract_photos(self, data):
logger.debug("Received %i items", len(data.get("mediaItems", [])))
photos = []
for entry in data.get("mediaItems", []):
if not entry.get("mediaType", "image/jpeg").startswith("image"):
continue
logger.debug("Processing: %s", entry)
photos.append(
{
"filename": entry["filename"],
"id": entry["id"],
"description": entry.get("description", entry["filename"]),
"raw": entry,
}
)
return photos
def get_photos(self, album_id=None, start_date=None, end_date=None):
logger.info("Retrieving photos for album %s or time %s-%s", album_id, start_date, end_date)
payload = {"pageSize": 100}
method = "get"
url = URL_PHOTOS
if album_id:
payload["albumId"] = album_id
method = "post"
url += ":search"
elif start_date:
method = "post"
url += ":search"
start_date = {
"year": start_date.year,
"month": start_date.month,
"day": start_date.day,
}
if not end_date:
end_date = date.today()
end_date = {
"year": end_date.year,
"month": end_date.month,
"day": end_date.day,
}
payload["filters"] = {"dateFilter": {"ranges": [{"startDate": start_date, "endDate": end_date}]}}
data = self._load_new_data(url, method, payload)
photos = self._extract_photos(data)
total_count = len(photos)
yield from photos
while "nextPageToken" in data:
payload["page_token"] = data["nextPageToken"]
data = self._load_new_data(url, method, payload)
photos = self._extract_photos(data)
total_count += len(photos)
logger.info("Total photos now retrieved: %s", total_count)
yield from photos
logger.info(
"Retrieving photos - done: found %i photos",
total_count,
)
def read_photo(self, photo):
"""Return a file-like object that can be read() to get photo file data"""
if datetime.now() - photo.get("modified", datetime.now()) > timedelta(minutes=59):
logger.warning("Media URL expired, refreshing")
response = requests.get(URL_PHOTOS + "/" + photo["id"], headers=self.headers)
response.raise_for_status()
feed = response.text.encode("utf8")
photo = json.loads(feed)
response = requests.get(photo["raw"]["baseUrl"] + "=d", headers=self.headers, stream=True)
if response.status_code != 200:
time.sleep(1)
response = requests.get(photo["raw"]["baseUrl"] + "=d", headers=self.headers, stream=True)
if response.status_code != 200:
time.sleep(1)
response = requests.get(photo["raw"]["baseUrl"] + "=d", headers=self.headers, stream=True)
response.raise_for_status()
return response.raw
def create_album(self, title):
raise NotImplementedError
def upload_media(self, filename):
"""Do the media upload step of adding a photo to GPhoto Library - returns a token for batch media creation"""
headers = {
"Content-type": "application/octet-stream",
"X-Goog-Upload-Content-Type": "image/jpeg",
"X-Goog-Upload-Protocol": "raw",
}
headers.update(self.headers)
with open(filename, "rb") as infile:
response = requests.post(
"https://photoslibrary.googleapis.com/v1/uploads", headers=headers, data=infile.read()
)
response.raise_for_status()
return response.text
def create_media(self, data_items):
"""Batch media creation - takes up to 50 items of (filename, upload_token) and creates all at once"""
data = {
"newMediaItems": [],
}
for data_item in data_items:
data["newMediaItems"].append(
{
"simpleMediaItem": {
"fileName": os.path.basename(data_item[0]),
"uploadToken": data_item[1],
}
}
)
response = requests.post(
"https://photoslibrary.googleapis.com/v1/mediaItems:batchCreate", json=data, headers=self.headers
)
if response.status_code == 207:
for item in response.json.get("newMediaItemResults", []):
if item.get("status", {}).get("message", "Failed") != "Success":
logger.error("Problem with upload: %s", item)
bad_items = [x[0] for x in data_items if x[1] == item.get("uploadToken", "xxx")]
logger.error("Files missed uploads: %s", bad_items)
response.raise_for_status()
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: job_tasks.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from idcmanager_sdk.model.ops_automation import mail_info_pb2 as idcmanager__sdk_dot_model_dot_ops__automation_dot_mail__info__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='job_tasks.proto',
package='ops_automation',
syntax='proto3',
serialized_options=_b('ZHgo.easyops.local/contracts/protorepo-models/easyops/model/ops_automation'),
serialized_pb=_b('\n\x0fjob_tasks.proto\x12\x0eops_automation\x1a\x33idcmanager_sdk/model/ops_automation/mail_info.proto\"\x82\x03\n\x08JobTasks\x12\n\n\x02id\x18\x01 \x01(\t\x12\r\n\x05jobId\x18\x02 \x01(\t\x12\x0f\n\x07jobName\x18\x03 \x01(\t\x12\x10\n\x08menuName\x18\x04 \x01(\t\x12\x0e\n\x06\x65xecId\x18\x05 \x01(\t\x12\x14\n\x0cresourceType\x18\x06 \x01(\t\x12\x12\n\nresourceId\x18\x07 \x01(\t\x12\x13\n\x0bresourceVId\x18\x08 \x01(\t\x12\x15\n\rresourceVName\x18\t \x01(\t\x12\x0f\n\x07trigger\x18\n \x01(\t\x12\x10\n\x08\x65xecUser\x18\x0b \x01(\t\x12\r\n\x05hosts\x18\x0c \x03(\t\x12\x0e\n\x06status\x18\r \x01(\t\x12&\n\x04mail\x18\x0e \x01(\x0b\x32\x18.ops_automation.MailInfo\x12\x13\n\x0bsuccessRate\x18\x0f \x01(\x02\x12\r\n\x05\x65rror\x18\x10 \x01(\t\x12\x12\n\ncreateTime\x18\x11 \x01(\t\x12\x12\n\nupdateTime\x18\x12 \x01(\t\x12\x0f\n\x07\x63reator\x18\x13 \x01(\t\x12\x0b\n\x03org\x18\x14 \x01(\x05\x42JZHgo.easyops.local/contracts/protorepo-models/easyops/model/ops_automationb\x06proto3')
,
dependencies=[idcmanager__sdk_dot_model_dot_ops__automation_dot_mail__info__pb2.DESCRIPTOR,])
_JOBTASKS = _descriptor.Descriptor(
name='JobTasks',
full_name='ops_automation.JobTasks',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='ops_automation.JobTasks.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='jobId', full_name='ops_automation.JobTasks.jobId', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='jobName', full_name='ops_automation.JobTasks.jobName', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='menuName', full_name='ops_automation.JobTasks.menuName', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='execId', full_name='ops_automation.JobTasks.execId', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resourceType', full_name='ops_automation.JobTasks.resourceType', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resourceId', full_name='ops_automation.JobTasks.resourceId', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resourceVId', full_name='ops_automation.JobTasks.resourceVId', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resourceVName', full_name='ops_automation.JobTasks.resourceVName', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='trigger', full_name='ops_automation.JobTasks.trigger', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='execUser', full_name='ops_automation.JobTasks.execUser', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hosts', full_name='ops_automation.JobTasks.hosts', index=11,
number=12, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='ops_automation.JobTasks.status', index=12,
number=13, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mail', full_name='ops_automation.JobTasks.mail', index=13,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='successRate', full_name='ops_automation.JobTasks.successRate', index=14,
number=15, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='ops_automation.JobTasks.error', index=15,
number=16, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='createTime', full_name='ops_automation.JobTasks.createTime', index=16,
number=17, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='updateTime', full_name='ops_automation.JobTasks.updateTime', index=17,
number=18, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='creator', full_name='ops_automation.JobTasks.creator', index=18,
number=19, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='org', full_name='ops_automation.JobTasks.org', index=19,
number=20, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=89,
serialized_end=475,
)
_JOBTASKS.fields_by_name['mail'].message_type = idcmanager__sdk_dot_model_dot_ops__automation_dot_mail__info__pb2._MAILINFO
DESCRIPTOR.message_types_by_name['JobTasks'] = _JOBTASKS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
JobTasks = _reflection.GeneratedProtocolMessageType('JobTasks', (_message.Message,), {
'DESCRIPTOR' : _JOBTASKS,
'__module__' : 'job_tasks_pb2'
# @@protoc_insertion_point(class_scope:ops_automation.JobTasks)
})
_sym_db.RegisterMessage(JobTasks)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.